text stringlengths 957 885k |
|---|
def main():
import argparse
import os
parser = argparse.ArgumentParser(description='')
parser.add_argument('--cutoff', type=str, help='')
parser.add_argument('--prd', type=str, nargs='*', default='S J F O',
help='The CFI initial letter for each product category to be downloaded (C D E F H I J K L M O R S T). Only options, swaps, futures and forwards are downloaded by default.')
parser.add_argument('--cleanup', type=str, nargs='*', help='File extensions of downloaded files to be cleaned up after data ingestion.', default='')
parser.add_argument('--sep', type=str, help='The CSV field separator. Semicolon by default.', default=';')
requiredNamed = parser.add_argument_group('required named arguments')
requiredNamed.add_argument('--wdir', type=str, help='Working directory path (used for downloads and conversions).',
required=True)
requiredNamed.add_argument('--hst', type=str, help='The hostname for the pgSQL DBMS.', required=True)
requiredNamed.add_argument('--dbn', type=str, help='The database name for which the data ingestion is desired.',
required=True)
requiredNamed.add_argument('--uid', type=str, help='The username of the user peforming the ingestion.',
required=True)
requiredNamed.add_argument('--pwd', type=str, help='The password of the user performing the ingestion.',
required=True)
args = parser.parse_args()
wdir = os.path.expanduser(args.wdir + '/')
pth = os.path.dirname(os.path.realpath(__file__))
cutoff = args.cutoff
prd = args.prd.split(" ")
hst = args.hst
dbn = args.dbn
uid = args.uid
pwd = args.pwd
sep = args.sep
print("Fetching the available files from FIRDS web service...")
n = list(map(lambda x: x.strftime("%Y%m%d"), download_files(cutoff, prd, wdir)))
print("Most recent files downloaded. Now proceeding...")
for file in os.listdir(wdir):
dlt = isDLT(file)
ful = isFUL(file)
if file.endswith('.zip') and (dlt or ful):
print(r"--> Unzipping and flattening file {}...".format(str(file)))
z = os.path.join(wdir, file)
unzip_files(z, os.path.dirname(z))
no_ext = z[:-3]
xml = "{}{}".format(no_ext, 'xml')
xsl = pth + '/FUL.xsl' if ful else pth + '/DLT.xsl'
csv = "{}{}".format(no_ext, 'csv')
to_csv(xml, xsl, csv)
print(r'--> CSV correctly generated for file {}'.format(file))
m_ful = wdir + r'/merge_FULINS_' + n[0] + '.csv'
m_dlt = wdir + r'/merge_DLTINS_' + n[1] + '.csv'
dest = {'fulins': {'from': wdir + r'/FULINS*.csv', 'to': m_ful, 'hr': [0, 5]},
'dltins': {'from': wdir + r'/DLTINS*.csv', 'to': m_dlt, 'hr': [1, 6]}}
success = {}
print("Merging available CSV files.")
for key, value in dest.items():
success[key] = merge_mult_csv(value['from'], value['to'])
if success[key]:
print(r"--> Merging {} files succeeded.".format(key))
print("Now proceeding to hash table rows...")
# Faccio l'hash delle righe utilizzando la 2-upla <ID,Venue>
insert_hashes(value['to'], prd, sep, value['hr'])
print(r"--> Hashing completed.")
# Se la tabella esiste, tronco e inserisco dati, altrimenti la creo
print("Ingesting {} data into pgSQL table.".format(key))
ingest_db(hst, dbn, key, uid, pwd, value['to'], sep)
if len(args.cleanup) > 0:
print("Now removing leftover files.")
cleanup(wdir, args.cleanup)
def download_files(cutoff, prods, dest_path):
import firds2dl as f
# Bypass the last run date if another ISO8601 timestamp is provided as argument
if cutoff is not None:
from datetime import datetime
last_run = datetime.strptime(cutoff, f.ISOfmt)
else:
fname = 'lastrun'
last_run = f.readDate(fname) # Lancia errore se non esiste questo file
result = f.getList(last_run, prods, 0, 500)
f.downloadLinks(result[0], dest_path)
return result[1], result[2]
def merge_mult_csv(pth, out):
import shutil
import glob
allFiles = glob.glob(pth)
if len(allFiles) > 0:
with open(out, 'wb') as outfile:
for i, fname in enumerate(allFiles):
print("####### About to merge file {}".format(fname))
with open(fname, 'rb') as infile:
if i != 0:
infile.readline() # Throw away header on all but first file
# Block copy rest of file from input to output without parsing
shutil.copyfileobj(infile, outfile)
return True
else:
print("No files to be merged.")
return False
def unzip_files(zip_path, dest_path):
import zipfile
with zipfile.ZipFile(zip_path, "r") as zip_ref:
zip_ref.extractall(dest_path)
def to_csv(xml, xsl, csv):
import xml2csv as x
x.transform(xml, xsl, csv)
def insert_hashes(file, prd, sep, rng):
from tempfile import NamedTemporaryFile
import shutil
import csv
import hashlib
tempfile = NamedTemporaryFile(mode='w', delete=False)
with open(file, 'r') as csvfile, tempfile:
reader = csv.reader(csvfile, delimiter=sep)
writer = csv.writer(tempfile, delimiter=sep)
header = next(reader)
nc = header.index('CFI_CODE')
writer.writerow(header + ['hash'])
for row in reader:
if row[nc][0] not in prd:
continue
subset = ''
for i in rng:
subset += "{}".format(row[i])
h = hashlib.md5(subset.encode('utf-8')).hexdigest()
row.append(h)
writer.writerow(row)
shutil.move(tempfile.name, file)
def ingest_db(host, dbname, tbl, uid, pwd, csvfile, separator):
import csv2pg as c
dsn = "host = '{}' dbname = '{}' user = '{}' password = '{}'".format(host, dbname, uid, pwd)
conn = c.connectDb(dsn)
f = open(csvfile, 'r')
has_table = c.existsTable(conn, tbl)
if not has_table:
h = c.readCSV(f)
c.createTable(*h, conn, tbl)
print("Tabella creata, ora inizio ingestion.")
f.seek(0)
c.loadCSV(conn, f, tbl, separator, has_table)
print("Ingestion completata.")
conn.close()
f.close()
def cleanup(dir, ext):
import os
# Remove files with specified extension
for file in os.listdir(dir):
file = os.path.join(dir, file)
for x in ext:
if file.endswith('.' + str(x)):
os.remove(file)
def isFUL(file):
return string_contains(file, c='FULINS')
def isDLT(file):
return string_contains(file, c='DLTINS')
def string_contains(s, c):
return c in s
if __name__ == '__main__':
main()
|
<reponame>worldmaker18349276/K-AIKO<filename>kaiko/menu/profiles.py<gh_stars>1-10
import os
import traceback
import tempfile
import subprocess
from pathlib import Path
from kaiko.utils import config as cfg
from kaiko.utils import biparsers as bp
from kaiko.utils import commands as cmd
from kaiko.utils import datanodes as dn
def exists(program):
if os.name == 'nt':
rc = subprocess.call(["where", program], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
else:
rc = subprocess.call(["which", program], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
return rc == 0
@dn.datanode
def edit(text, editor):
with tempfile.NamedTemporaryFile(mode='w+', suffix=".tmp") as file:
file.write(text)
file.flush()
with dn.subprocess_task([editor, file.name]) as task:
yield from task.join((yield))
return open(file.name, mode='r').read()
class ProfileTypeError(Exception):
pass
class ProfileNameError(Exception):
pass
class ProfileManager:
"""Profile manager for Configurable type.
Attributes
----------
logger : KAIKOLogger
config_type : type
The Configurable type to manage.
path : Path
The path of profiles directory.
profiles : list of str
A list of names of profiles.
default_name : str or None
The name of default configuration.
current_name : str
The name of current configuration.
current : Configurable
The current configuration.
"""
default_meta = ".default-config"
extension = ".config"
settings_name = "settings"
def __init__(self, config_type, path, logger):
"""Constructor.
Parameters
----------
config_type : type
The Configurable type to manage.
path : str or Path
The path of profiles directory.
Raises
------
ProfileTypeError
If file type is wrong.
DecodeError
If decoding fails.
"""
if isinstance(path, str):
path = Path(path)
self.logger = logger
self.config_type = config_type
self.path = path
self.profiles = []
self.default_name = None
self.current_name = None
self.current = None
self._profiles_mtime = None
self.update()
def is_uptodate(self):
return self._profiles_mtime == os.stat(str(self.path)).st_mtime
def update(self):
"""Update the list of profiles."""
self._profiles_mtime = os.stat(str(self.path)).st_mtime
logger = self.logger
self.profiles = []
self.default_name = None
logger.print(f"Update profiles...", prefix="data")
if not self.path.exists():
with logger.warn():
logger.print(f"The profile directory doesn't exist: {logger.emph(self.path.as_uri())}")
return
if not self.path.is_dir():
with logger.warn():
logger.print(f"Wrong file type for profile directory: {logger.emph(self.path.as_uri())}")
return
for subpath in self.path.iterdir():
if subpath.suffix == self.extension:
self.profiles.append(subpath.stem)
default_meta_path = self.path / self.default_meta
if default_meta_path.exists():
if not default_meta_path.is_file():
with logger.warn():
logger.print(f"Wrong file type for default profile: {logger.emph(default_meta_path.as_uri())}")
return
self.default_name = default_meta_path.read_text()
def set_default(self):
"""Set the current configuration as default configuration."""
logger = self.logger
logger.print(f"Set {logger.emph(self.default_name)} as the default configuration...", prefix="data")
if self.current_name is None:
raise ValueError("No profile")
if not self.path.exists():
with logger.warn():
logger.print(f"No such profile directory: {logger.emph(self.path.as_uri())}")
return
self.default_name = self.current_name
default_meta_path = self.path / self.default_meta
if default_meta_path.exists() and not default_meta_path.is_file():
with logger.warn():
logger.print(f"Wrong file type for default profile: {logger.emph(default_meta_path.as_uri())}")
return
default_meta_path.write_text(self.default_name)
def save(self):
"""Save the current configuration."""
logger = self.logger
if self.current_name is None:
raise ValueError("No profile")
current_path = self.path / (self.current_name + self.extension)
logger.print(f"Save configuration to {logger.emph(current_path.as_uri())}...", prefix="data")
if not self.path.exists():
with logger.warn():
logger.print(f"The profile directory doesn't exist: {logger.emph(self.path.as_uri())}")
return
if current_path.exists() and not current_path.is_file():
with logger.warn():
logger.print(f"Wrong file type for profile: {logger.emph(current_path.as_uri())}")
return
try:
self.current.write(current_path, self.settings_name)
except bp.EncodeError:
with logger.warn():
logger.print("Fail to encode configuration")
logger.print(traceback.format_exc(), end="")
self.update()
def load(self):
"""Load the current configuration."""
logger = self.logger
if self.current_name is None:
raise ValueError("No profile")
current_path = self.path / (self.current_name + self.extension)
logger.print(f"Load configuration from {logger.emph(current_path.as_uri())}...", prefix="data")
self.current = self.config_type()
if not current_path.exists():
return
if not current_path.is_file():
with logger.warn():
logger.print(f"Wrong file type for profile: {logger.emph(current_path.as_uri())}")
return
try:
self.current = self.config_type.read(current_path, name=self.settings_name)
except bp.DecodeError:
with logger.warn():
logger.print("Fail to decode configuration")
logger.print(traceback.format_exc(), end="")
def use(self, name=None):
"""change the current profile of configuration.
Parameters
----------
name : str, optional
The name of profile, or None for default.
"""
logger = self.logger
if name is None:
if self.default_name is None:
with logger.warn():
logger.print("No default profile")
return
name = self.default_name
if name not in self.profiles:
with logger.warn():
logger.print(f"No such profile: {logger.emph(name)}")
return
self.current_name = name
self.load()
def new(self, name=None, clone=None):
"""make a new profile of configuration.
Parameters
----------
name : str, optional
The name of profile.
clone : str, optional
The name of profile to clone.
"""
logger = self.logger
logger.print("Make new configuration...")
if clone is not None and clone not in self.profiles:
with logger.warn():
logger.print(f"No such profile: {logger.emph(clone)}")
return
if not name.isprintable() or "/" in name:
with logger.warn():
logger.print(f"Invalid profile name: {logger.emph(name)}")
return
if name in self.profiles:
with logger.warn():
logger.print(f"This profile name {logger.emph(name)} already exists.")
return
if name is None:
name = "new profile"
n = 1
while name in self.profiles:
n += 1
name = f"new profile ({str(n)})"
if clone is None:
self.current_name = name
self.current = self.config_type()
else:
self.current_name = clone
self.load()
self.current_name = name
def delete(self, name):
"""Delete a profile.
Parameters
----------
name : str
The name of profile to delete.
"""
logger = self.logger
target_path = self.path / (name + self.extension)
logger.print(f"Delete configuration {logger.emph(target_path.as_uri())}...", prefix="data")
if name not in self.profiles:
with logger.warn():
logger.print(f"No such profile: {logger.emph(name)}")
return
self.profiles.remove(name)
if target_path.exists():
if not target_path.is_file():
with logger.warn():
logger.print(f"Wrong file type for profile: {logger.emph(target_path.as_uri())}")
return
target_path.unlink()
def rename(self, name):
"""Rename the current profile.
Parameters
----------
name : str
The new name of profile.
"""
logger = self.logger
if self.current_name is None:
raise ValueError("No profile")
if self.current_name == name:
return
current_path = self.path / (self.current_name + self.extension)
target_path = self.path / (name + self.extension)
current_name = logger.emph(current_path.as_uri())
target_name = logger.emph(target_path.as_uri())
logger.print(f"Rename configuration file {current_name} to {target_name}...", prefix="data")
if not name.isprintable() or "/" in name:
with logger.warn():
logger.print(f"Invalid profile name: {logger.emph(name)}")
return
if name in self.profiles:
with logger.warn():
logger.print(f"This profile name {logger.emph(name)} already exists.")
return
if self.current_name in self.profiles:
self.profiles.remove(self.current_name)
self.profiles.append(name)
if current_path.exists():
current_path.rename(target_path)
if self.current_name == self.default_name:
self.current_name = name
self.set_default()
else:
self.current_name = name
class FieldParser(cmd.ArgumentParser):
def __init__(self, config_type):
self.config_type = config_type
self.biparser = cfg.FieldBiparser(config_type)
def parse(self, token):
try:
return self.biparser.decode(token)[0]
except bp.DecodeError:
raise cmd.CommandParseError("No such field")
def suggest(self, token):
try:
self.biparser.decode(token)
except bp.DecodeError as e:
sugg = cmd.fit(token, [token[:e.index] + ex for ex in e.expected])
else:
sugg = []
return sugg
def info(self, token):
fields = self.parse(token)
return self.config_type.get_field_doc(fields)
class ConfigCommand:
def __init__(self, config, logger):
self.config = config
self.logger = logger
# configuration
@cmd.function_command
def show(self):
"""Show configuration."""
biparser = cfg.ConfigurationBiparser(self.config.config_type, name=self.config.settings_name)
text = biparser.encode(self.config.current)
self.logger.print(f"profile name: {self.logger.emph(self.config.current_name)}")
self.logger.print(text)
@cmd.function_command
def has(self, field):
"""Check whether this field is set in the configuration.
usage: \x1b[94mconfig\x1b[m \x1b[94mhas\x1b[m \x1b[92m{field}\x1b[m
╱
The field name.
"""
return self.config.current.has(field)
@cmd.function_command
def get(self, field):
"""Get the value of this field in the configuration.
usage: \x1b[94mconfig\x1b[m \x1b[94mget\x1b[m \x1b[92m{field}\x1b[m
╱
The field name.
"""
return self.config.current.get(field)
@cmd.function_command
def set(self, field, value):
"""Set this field in the configuration.
usage: \x1b[94mconfig\x1b[m \x1b[94mset\x1b[m \x1b[92m{field}\x1b[m \x1b[92m{value}\x1b[m
╱ ╲
The field name. The value.
"""
self.config.current.set(field, value)
@cmd.function_command
def unset(self, field):
"""Unset this field in the configuration.
usage: \x1b[94mconfig\x1b[m \x1b[94munset\x1b[m \x1b[92m{field}\x1b[m
╱
The field name.
"""
self.config.current.unset(field)
@cmd.function_command
@dn.datanode
def edit(self, field):
"""Edit the value of this field in the configuration.
usage: \x1b[94mconfig\x1b[m \x1b[94medit\x1b[m \x1b[92m{field}\x1b[m
╱
The field name.
"""
editor = self.config.current.menu.editor
field_type = self.config.config_type.get_field_type(field)
biparser = bp.from_type_hint(field_type, multiline=True)
if self.config.current.has(field):
value = self.config.current.get(field)
value_str = biparser.encode(value)
else:
value_str = ""
yield
# open editor
if not exists(editor):
with self.logger.warn():
self.logger.print(f"Unknown editor: {editor}")
return
with edit(value_str, editor) as edit_task:
yield from edit_task.join((yield))
res_str = edit_task.result
# parse result
res_str = res_str.strip()
if res_str == "":
self.config.current.unset(field)
return
try:
res, _ = biparser.decode(res_str)
except bp.DecodeError as e:
with self.logger.warn():
self.logger.print("Invalid syntax:")
self.logger.print(e)
else:
self.config.current.set(field, res)
@get.arg_parser("field")
@has.arg_parser("field")
@unset.arg_parser("field")
@set.arg_parser("field")
@edit.arg_parser("field")
def _field_parser(self):
return FieldParser(self.config.config_type)
@set.arg_parser("value")
def _set_value_parser(self, field):
annotation = self.config.config_type.get_field_type(field)
default = self.config.current.get(field)
return cmd.LiteralParser(annotation, default)
# profiles
@cmd.function_command
def profiles(self):
"""Show all profiles.
usage: \x1b[94mconfig\x1b[m \x1b[94mprofiles\x1b[m
"""
logger = self.logger
if not self.config.is_uptodate():
self.config.update()
for profile in self.config.profiles:
note = ""
if profile == self.config.default_name:
note += " (default)"
if profile == self.config.current_name:
note += " (current)"
logger.print(logger.emph(profile + self.config.extension) + note)
@cmd.function_command
def reload(self):
"""Reload configuration."""
logger = self.logger
if not self.config.is_uptodate():
self.config.update()
self.config.load()
@cmd.function_command
def save(self):
"""Save configuration."""
logger = self.logger
if not self.config.is_uptodate():
self.config.update()
self.config.save()
@cmd.function_command
def set_default(self):
"""Set the current configuration profile as default.
usage: \x1b[94mconfig\x1b[m \x1b[94mset_default\x1b[m
"""
if not self.config.is_uptodate():
self.config.update()
self.config.set_default()
@cmd.function_command
def use(self, profile):
"""Change the current configuration profile.
usage: \x1b[94mconfig\x1b[m \x1b[94muse\x1b[m \x1b[92m{profile}\x1b[m
╱
The profile name.
"""
if not self.config.is_uptodate():
self.config.update()
self.config.use(profile)
@cmd.function_command
def rename(self, profile):
"""Rename current configuration profile.
usage: \x1b[94mconfig\x1b[m \x1b[94mrename\x1b[m \x1b[92m{profile}\x1b[m
╱
The profile name.
"""
if not self.config.is_uptodate():
self.config.update()
self.config.rename(profile)
@cmd.function_command
def new(self, profile, clone=None):
"""Make new configuration profile.
usage: \x1b[94mconfig\x1b[m \x1b[94mnew\x1b[m \x1b[92m{profile}\x1b[m [\x1b[95m--clone\x1b[m \x1b[92m{PROFILE}\x1b[m]
╱ ╲
The profile name. The profile to be cloned.
"""
if not self.config.is_uptodate():
self.config.update()
self.config.new(profile, clone)
@cmd.function_command
def delete(self, profile):
"""Delete a configuration profile.
usage: \x1b[94mconfig\x1b[m \x1b[94mdelete\x1b[m \x1b[92m{profile}\x1b[m
╱
he profile name.
"""
if not self.config.is_uptodate():
self.config.update()
self.config.delete(profile)
@rename.arg_parser("profile")
@new.arg_parser("profile")
def _new_profile_parser(self):
return cmd.RawParser()
@new.arg_parser("clone")
@use.arg_parser("profile")
@delete.arg_parser("profile")
def _old_profile_parser(self, *_, **__):
return cmd.OptionParser(self.config.profiles,
desc="It should be the name of the profile that exists in the configuration.")
|
"""
nnabla trainers and models
"""
import os
import time
from functools import partial
#https://neon.nervanasys.com/index.html/models.html
import neon as nn
class Trainer(object):
def __init__(self, model, ngpu, options=None):
self.model = model
self.ngpu = ngpu
self.gpu_mode = True if ngpu >= 1 else False
if self.gpu_mode:
self.gpus = [mx.gpu(i) for i in range(ngpu)]
if options['benchmark_mode']:
os.environ['MXNET_CUDNN_AUTOTUNE_DEFAULT'] = '1'
self.progressbar = options['progressbar']
def set_optimizer(self, opt_type, opt_conf):
if opt_type == 'SGD':
self.opt_type = 'sgd'
self.lr = opt_conf['lr']
self.metric = mx.metric.CrossEntropy()
else:
raise NotImplementedError
def run(self, iterator, mode='train'):
report = dict()
# setup mxnet module
data = mx.sym.var('data')
module = mx.mod.Module(symbol=self.model(data),
context=self.gpus,
data_names=['data'],
label_names=['softmax_label'])
B = iterator.batch_size
C, H, W = iterator.image_shape
data_shape = (B, C, H, W)
label_shape = (B,)
# https://mxnet.incubator.apache.org/tutorials/basic/data.html
module.bind(data_shapes=zip(['data'], [data_shape]),
label_shapes=zip(['softmax_label'], [label_shape]))
module.init_params(initializer=mx.init.Xavier(magnitude=2.))
module.init_optimizer(optimizer=self.opt_type,
optimizer_params=(('learning_rate', self.lr),))
self.metric.reset()
## end setup
if self.progressbar:
iterator = tqdm(iterator)
for idx, (x, t) in enumerate(iterator):
total_s = time.perf_counter()
x = [mx.nd.array(x[i, ...].reshape(1, C, H, W)) for i in range(B)]
t = [mx.nd.array([t[i]]) for i in range(B)]
batch = mx.io.DataBatch(x, t)
forward_s = time.perf_counter()
module.forward(batch, is_train=True)
forward_e = time.perf_counter()
module.update_metric(self.metric, batch.label)
backward_s = time.perf_counter()
module.backward()
backward_e = time.perf_counter()
module.update()
total_e = time.perf_counter()
report[idx] = dict(
forward=forward_e - forward_s,
backward=backward_e - backward_s,
total=total_e - total_s
)
return report
class CNN(object):
def __init__(self, channel, xdim, ydim, output_num):
self.cnn = partial(cnn,
channel=channel,
xdim=xdim,
ydim=ydim,
output_num=output_num)
def get_func(self):
return self.cnn
def __call__(self, x):
return self.cnn(x)
def cnn(x, channel, xdim, ydim, output_num):
net = mx.sym.Convolution(data=x, kernel=(xdim, 3), num_filter=180)
net = mx.sym.Activation(data=net, act_type='relu')
net = mx.sym.Convolution(data=net, kernel=(1, 3), num_filter=180)
net = mx.sym.Activation(data=net, act_type='relu')
net = mx.sym.Pooling(data=net, pool_type='max', kernel=(1, 2), stride=(2, 2))
net = mx.sym.Convolution(data=net, kernel=(1, 3), num_filter=180)
net = mx.sym.Activation(data=net, act_type='relu')
net = mx.sym.Convolution(data=net, kernel=(1, 3), num_filter=180)
net = mx.sym.Activation(data=net, act_type='relu')
net = mx.sym.Pooling(data=net, pool_type='max', kernel=(1, 2), stride=(2, 2))
net = mx.sym.Convolution(data=net, kernel=(1, 2), num_filter=180)
net = mx.sym.Activation(data=net, act_type='relu')
net = mx.sym.Convolution(data=net, kernel=(1, 1), num_filter=180)
net = mx.sym.Activation(data=net, act_type='relu')
net = mx.sym.flatten(data=net)
net = mx.sym.FullyConnected(data=net, num_hidden=2048)
net = mx.sym.Activation(data=net, act_type='relu')
net = mx.sym.FullyConnected(data=net, num_hidden=2048)
net = mx.sym.Activation(data=net, act_type='relu')
net = mx.sym.FullyConnected(data=net, num_hidden=output_num)
net = mx.sym.SoftmaxOutput(data=net, name='softmax')
#a = mx.viz.plot_network(net)
#a.render('cnn.net')
return net
|
<filename>distances_rkhs.py
"""
Pairwise distance functions between time series in a RKHS
=========================================================
They all have the following prototype:
function(K, T1, T2, **kwargs)
"""
import numpy as np
from scipy.linalg import solve, eigvals, inv
from scipy.signal import correlate2d
# mean-element-based ----------------------------------------------------------
def distance_mean_elements(K, T1, T2):
""" Compute the squared distance between mean elements of two time series
Parameters
----------
K: (T1+T2) x (T1+T2) array,
between frames kernel matrix
T1: int,
duration of time series 1
T2: int,
duration of time series 2
Returns
-------
dme2: double,
squared distance between the mean-elements in RKHS
"""
dme2 = K[:T1, :T1].mean()
dme2 += K[T1:, T1:].mean()
dme2 += -2.0 * K[:T1, T1:].mean()
# # normalization vector
# m = np.zeros((T1+T2, 1), dtype=np.double)
# m[:T1,:] = -1./T1
# m[T1:,:] = 1./T2
# # return the distance
# dme2 = np.dot(m.T, np.dot(K, m))[0,0]
return dme2
def distance_me_squared(K, T1, T2):
""" Compute the squared distance between the squared mean elements of two time series
Parameters
----------
K: (T1+T2) x (T1+T2) array,
between frames kernel matrix
T1: int,
duration of time series 1
T2: int,
duration of time series 2
Returns
-------
dme2: double,
squared HS distance between the mean-elements squared
"""
dme2 = (K[:T1, :T1].mean()) ** 2
dme2 += (K[T1:, T1:].mean()) ** 2
dme2 += -2.0 * (K[:T1, T1:].mean()) ** 2
return dme2
def distance_mahalanobis(K, T1, T2, regul=1e-3):
""" Compute the squared distance between mean elements of two time series
Parameters
----------
K: (T1+T2) x (T1+T2) array,
between frames kernel matrix
T1: int,
duration of time series 1
T2: int,
duration of time series 2
regul: double, optional, default: 1e-3,
regularization parameter
Returns
-------
dmpc2: double,
squared Mahalanobis distance between time-series in RKHS
"""
# normalization vector
n = T1 + T2
m = np.zeros((n, 1), dtype=np.double)
m[:T1, :] = -1.0 / T1
m[T1:, :] = 1.0 / T2
# centering matrix
PiT1 = np.eye(T1, dtype=np.double) - 1.0 / T1
PiT2 = np.eye(T2, dtype=np.double) - 1.0 / T2
N = np.vstack([np.hstack([PiT1, np.zeros((T1, T2), dtype=np.double)]),
np.hstack([np.zeros((T2, T1), dtype=np.double), PiT2])])
# compute the distance
mTK = np.dot(m.T, K)
me = np.dot(mTK, m) # difference between mean elements
mTKN = np.dot(mTK, N)
NTK = np.dot(N.T, K)
A = regul * np.eye(n) + 1.0 / n * np.dot(NTK, N)
AinvNTK = solve(A, NTK, overwrite_a=True) # A^{-1} N.T K
AinvNTKm = np.dot(AinvNTK, m)
dmpc2 = 1.0 / regul * (me - 1.0 / n * np.dot(mTKN, AinvNTKm))
return dmpc2[0, 0]
# alignment-based -------------------------------------------------------------
def distance_aligned_frames_truncated(K, T1, T2, tau=0):
""" Compute the squared distance between aligned frames
Parameters
----------
K: (T1+T2) x (T1+T2) array,
between frames kernel matrix
T1: int,
duration of time series 1
T2: int,
duration of time series 2
tau: int, optional, default: 0,
temporal shift (in frames) to apply to time series 2 before computing
alignment, using "cyclic" padding
Returns
-------
dme2: double,
squared distance between aligned frames in the RKHS
Notes
-----
Truncated verion (equivalent to zero padding)
dme2 = K[0,0] - 1/(T2-tau) * sum_{t=0}^{T2-tau} K[x1_t, x2_{t+tau}]
"""
assert T1 == T2, "the series should be of same duration"
T = T1
# constant base kernel value k(x,x)
c = K[0, 0]
# matrix of k(x,y)
Kxy = K[:T, T:]
# return the distance
return c - np.mean(np.diag(Kxy, k=tau))
def distance_aligned_frames_cyclic(K, T1, T2, tau=0):
""" Compute the squared distance between aligned frames
Parameters
----------
K: (T1+T2) x (T1+T2) array,
between frames kernel matrix
T1: int,
duration of time series 1
T2: int,
duration of time series 2
tau: positive int, optional, default: 0,
temporal shift (in frames) to apply to time series 2 before computing
alignment, using "cyclic" padding
Returns
-------
dme2: double,
squared distance between aligned frames in the RKHS
Notes
-----
Cyclic verion
dme2 = K[0,0] - 1/T2 * sum_{t=0}^{T2} K[x1_t, x2_{(t+tau) % T2}]
"""
assert T1 == T2, "the series should be of same duration"
T = T1
# constant base kernel value k(x,x)
c = K[0, 0]
# matrix of k(x,y)
Kxy = K[:T, T:]
# return the distance
if tau:
tr = Kxy.trace(offset=tau) + Kxy.trace(offset=tau - T)
else:
tr = Kxy.trace()
return c - tr / float(T)
# auto-covariance-based -------------------------------------------------------
def distance_hsac_truncated(K, T1, T2, tau=1):
""" Compute the squared HS distance between the autocovariance operators of
two time series
|| \\scov^{(y)}_{\\tau} - \\scov^{(x)}_{\\tau} ||_{HS}^2 =
1/T**2 ( Tr(K_1 x K_1^\\tau) + Tr(K_2 x K_2^\\tau) - 2 Tr(K_{1,2} x K_{2,1}^\\tau ) )
Parameters
----------
K: (T1+T2) x (T1+T2) array,
between frames kernel matrix
T1: int,
duration of time series 1
T2: int,
duration of time series 2
tau: int, optional, default: -1
lag, ie time shift used in the auto-covariance computation
Returns
-------
dhsac: double,
squared Hilbert-Schmidt norm of the difference between the
auto-covariance operators, in the RKHS induced by 'frame_kern', of
the two time series
Notes
-----
Truncated version between X[:-tau] and X[tau:] (equivalent to zero padding).
"""
assert tau <= min(T1 / 2.0, T2 / 2.0), "Too big tau"
# define the truncated matrices of the non-shifted series
K1 = K[:T1 - tau, :T1 - tau]
K2 = K[T1:T1 + T2 - tau, T1:T1 + T2 - tau]
K12 = K[:T1 - tau, T1:T1 + T2 - tau]
# define the truncated matrices of the shifted series
K1tau = K[tau:T1, tau:T1]
K2tau = K[T1 + tau:, T1 + tau:]
K12tau = K[tau:T1, T1 + tau:]
# compute the different traces using Hadamard products (and sym of K)
tr1 = np.mean(K1 * K1tau)
tr2 = np.mean(K2 * K2tau)
tr12 = np.mean(K12 * K12tau) # no transpose (K21tau.T == K12tau)
# return dhsac
return tr1 + tr2 - 2 * tr12
def distance_hsac_cyclic(K, T1, T2, tau=1):
""" Compute the squared HS distance between the autocovariance operators of
two time series
|| \\scov^{(y)}_{\\tau} - \\scov^{(x)}_{\\tau} ||_{HS}^2 =
1/T**2 ( Tr(K_1 x K_1^\\tau) + Tr(K_2 x K_2^\\tau) - 2 Tr(K_{1,2} x K_{2,1}^\\tau ) )
Parameters
----------
K: (T1+T2) x (T1+T2) array,
between frames kernel matrix
T1: int,
duration of time series 1
T2: int,
duration of time series 2
tau: int, optional, default: -1
lag, ie time shift used in the auto-covariance computation
Returns
-------
dhsac: double,
squared Hilbert-Schmidt norm of the difference between the
auto-covariance operators, in the RKHS induced by 'frame_kern', of
the two time series
Notes
-----
Cyclic version between X and [ X[tau:], X[:tau] ].
Artefacts may arise if the two series were not synchronized and comprised
of the same number of periods.
"""
assert tau <= min(T1 / 2.0, T2 / 2.0), "Too big tau"
# define the (non-truncated) matrices of the non-shifted series
K1 = K[:T1, :T1]
K2 = K[T1:, T1:]
K12 = K[:T1, T1:]
# circular permutation of tau frames
idxs1 = np.arange(tau, T1 + tau) % T1
idxs2 = np.arange(tau, T2 + tau) % T2
# Note: no need for copy as we re-use the previous centering (indep. of frame order)
K1tau = K1[np.ix_(idxs1, idxs1)]
K2tau = K2[np.ix_(idxs2, idxs2)]
K12tau = K12[np.ix_(idxs1, idxs2)]
# compute the different traces using Hadamard products (and sym of K)
tr1 = np.mean(K1 * K1tau)
tr2 = np.mean(K2 * K2tau)
tr12 = np.mean(K12 * K12tau) # no transpose (K21tau.T == K12tau)
# return dhsac
return tr1 + tr2 - 2 * tr12
# TODO use incomplete Cholesky decomposition (ST & C chap. 6, p. 175)
def hsnorm_cross_correlation(K, T1, T2, regul=1e-3):
""" Compute the squared Hilbert-Schmidt norm of the cross-correlation
This *similarity* measures the strength of the cross-correlation between
two series, i.e. the degree to which you can linearly (in feature space!)
predict one knowing the other (0 => not linked).
Parameters
----------
K: (T1+T2) x (T1+T2) array,
between frames kernel matrix
T1: int,
duration of time series 1
T2: int,
duration of time series 2
regul: double, optional, default: 1e-3,
regularization parameter
Returns
-------
hscorr: double,
squared Hilbert-Schmidt norm of the cross-correlation operator
between time series 1 and 2, in the RKHS induced by a base kernel
Notes
-----
This is computed as a trace by solving a generalized eigenvalue problem
equivalent to the one appearing in kernel CCA.
"""
assert T1 == T2, "the series should be of same duration"
T = T1
# define the gram matrices of the series
K1 = K[:T, :T]
K2 = K[T:, T:]
# build right-hand-side symetric matrix of the gen. eigenvalue problem
A = np.zeros(K.shape)
K1_K2 = np.dot(K1, K2)
A[:T, T:] = K1_K2 # upper triangular part
A[T:, :T] = K1_K2.T # lower triangular part (symetric)
# build left-hand-side symetric matrix of the gen. eigenvalue problem
B = np.zeros(K.shape)
B[:T, :T] = (1.0 - regul) * np.dot(K1, K1) + regul * K1
B[T:, T:] = (1.0 - regul) * np.dot(K2, K2) + regul * K2
# get the eigen-values (w) of Av = wBv (generalized eigenvalue problem)
tr = float(np.mean(eigvals(A, B, overwrite_a=True)))
return tr
def distance_autocor_truncated(K, T1, T2, tau=1, regul=1e-3):
""" Compute the squared HS distance between the autocorrelation operators of
two time series
Parameters
----------
K: (T1+T2) x (T1+T2) array,
between frames kernel matrix (assumed to be centered!)
T1: int,
duration of time series 1
T2: int,
duration of time series 2
tau: int, optional, default: 1
lag, ie time shift used in the auto-covariance computation
regul: float, optional, default: 1e-3
regularization parameter for the inverse computation
Returns
-------
dacor: double,
squared Hilbert-Schmidt norm of the difference between the
auto-correlation operators, in the RKHS induced by 'frame_kern', of
the two time series
Notes
-----
Truncated version.
"""
assert tau <= min(T1 / 2.0, T2 / 2.0), "Too big tau"
# define the truncated matrices of the non-shifted series
K1 = K[:T1 - tau, :T1 - tau]
K2 = K[T1:T1 + T2 - tau, T1:T1 + T2 - tau]
K12 = K[:T1 - tau, T1:T1 + T2 - tau]
# define the truncated matrices of the shifted series
K1tau = K[tau:T1, tau:T1]
K2tau = K[T1 + tau:, T1 + tau:]
K12tau = K[tau:T1, T1 + tau:]
# compute the different terms
N1 = regul * np.eye(T1 - tau) - solve(
(T1 - tau) * np.eye(T1 - tau) + 1.0 / regul * K1, K1, sym_pos=True)
N2 = regul * np.eye(T2 - tau) - solve(
(T2 - tau) * np.eye(T2 - tau) + 1.0 / regul * K2, K2, sym_pos=True)
KK1 = np.dot(np.dot(N1.T, K1), np.dot(N1, K1tau))
KK2 = np.dot(np.dot(N2.T, K2), np.dot(N2, K2tau))
KK12 = np.dot(np.dot(N1.T, K12), np.dot(N2, K12tau.T))
# compute the different traces
tr1 = 1.0 / ((regul ** 4) * (T1 - tau) ** 2) * KK1.trace()
tr2 = 1.0 / ((regul ** 4) * (T2 - tau) ** 2) * KK2.trace()
tr12 = 1.0 / ((regul ** 4) * (T1 - tau) * (T2 - tau)) * KK12.trace()
return tr1 + tr2 - 2.0 * tr12
def distance_autocor_cyclic(K, T1, T2, tau=1, regul=1e-3):
""" Compute the squared HS distance between the autocorrelation operators of
two time series
Parameters
----------
K: (T1+T2) x (T1+T2) array,
between frames kernel matrix (assumed to be centered!)
T1: int,
duration of time series 1
T2: int,
duration of time series 2
tau: int, optional, default: 1
lag, ie time shift used in the auto-covariance computation
regul: float, optional, default: 1e-3
regularization parameter for the inverse computation
Returns
-------
dacor: double,
squared Hilbert-Schmidt norm of the difference between the
auto-correlation operators, in the RKHS induced by 'frame_kern', of
the two time series
Notes
-----
Cyclic version.
"""
# define per-series tau
if tau < 0.5:
# tau as a fraction of series length
tau1 = max(1, int(T1 * tau + 0.5))
tau2 = max(1, int(T2 * tau + 0.5))
elif 1 <= tau < min(T1 / 2.0, T2 / 2.0):
# constant tau: same for each series
tau1 = tau2 = int(tau)
else:
raise ValueError("Too big tau")
# define the (non-truncated) matrices of the non-shifted series
K1 = K[:T1, :T1]
K2 = K[T1:, T1:]
K12 = K[:T1, T1:]
# circular permutation of tau frames
idxs1 = np.arange(tau1, T1 + tau1) % T1
idxs2 = np.arange(tau2, T2 + tau2) % T2
# Note: no need for copy as we re-use the previous centering (indep. of frame order)
K1tau = K1[np.ix_(idxs1, idxs1)]
K2tau = K2[np.ix_(idxs2, idxs2)]
K12tau = K12[np.ix_(idxs1, idxs2)]
# compute the different terms
N1 = regul * np.eye(T1) - solve(
T1 * np.eye(T1) + 1.0 / regul * K1, K1, sym_pos=True)
N2 = regul * np.eye(T2) - solve(
T2 * np.eye(T2) + 1.0 / regul * K2, K2, sym_pos=True)
KK1 = np.dot(np.dot(N1.T, K1), np.dot(N1, K1tau))
KK2 = np.dot(np.dot(N2.T, K2), np.dot(N2, K2tau))
KK12 = np.dot(np.dot(N1.T, K12), np.dot(N2, K12tau.T))
# compute the different traces
tr1 = 1.0 / ((regul ** 4) * T1 ** 2) * KK1.trace()
tr2 = 1.0 / ((regul ** 4) * T2 ** 2) * KK2.trace()
tr12 = 1.0 / ((regul ** 4) * T1 * T2) * KK12.trace()
# TODO: check if more efficient to use Hadamard products?
return tr1 + tr2 - 2.0 * tr12
def hsdotprod_autocor_truncated(K, T1, T2, tau=1, regul=1e-3):
""" Compute the Hilbert-Schmidt inner-product between the autocorrelation
operators of two time series (**similarity**, not a distance)
Parameters
----------
K: (T1+T2) x (T1+T2) array,
between frames kernel matrix (assumed to be centered!)
T1: int,
duration of time series 1
T2: int,
duration of time series 2
tau: int, optional, default: 1
lag, ie time shift used in the auto-covariance computation
regul: float, optional, default: 1e-3
regularization parameter for the inverse computation
Returns
-------
hsdp: double,
Hilbert-Schmidt inner product between the auto-correlation operators,
in the RKHS induced by 'frame_kern', of the two time series
Notes
-----
Truncated version.
"""
assert tau <= min(T1 / 2.0, T2 / 2.0), "Too big tau"
# define the truncated matrices of the non-shifted series
K1 = K[:T1 - tau, :T1 - tau]
K2 = K[T1:T1 + T2 - tau, T1:T1 + T2 - tau]
K12 = K[:T1 - tau, T1:T1 + T2 - tau]
# define the truncated matrices of the shifted series
K12tau = K[tau:T1, T1 + tau:]
# compute the different terms
N1 = regul * np.eye(T1 - tau) - solve(
(T1 - tau) * np.eye(T1 - tau) + 1.0 / regul * K1, K1, sym_pos=True)
N2 = regul * np.eye(T2 - tau) - solve(
(T2 - tau) * np.eye(T2 - tau) + 1.0 / regul * K2, K2, sym_pos=True)
KK12 = np.dot(np.dot(N1.T, K12), np.dot(N2, K12tau.T))
# compute the trace
hsdp = 1.0 / ((regul ** 4) * (T1 - tau) * (T2 - tau)) * KK12.trace()
return hsdp
def hsdotprod_autocor_cyclic(K, T1, T2, tau=1, regul=1e-3):
""" Compute the Hilbert-Schmidt inner-product between the autocorrelation
operators of two time series (**similarity**, not a distance)
Parameters
----------
K: (T1+T2) x (T1+T2) array,
between frames kernel matrix (assumed to be centered!)
T1: int,
duration of time series 1
T2: int,
duration of time series 2
tau: int, optional, default: 1
lag, ie time shift used in the auto-covariance computation
regul: float, optional, default: 1e-3
regularization parameter for the inverse computation
Returns
-------
hsdp: double,
Hilbert-Schmidt inner product between the auto-correlation operators,
in the RKHS induced by 'frame_kern', of the two time series
Notes
-----
Cyclic version.
"""
# define per-series tau
if tau < 0.5:
# tau as a fraction of series legth
tau1 = max(1, int(T1 * tau + 0.5))
tau2 = max(1, int(T2 * tau + 0.5))
elif 1 <= tau < min(T1 / 2.0, T2 / 2.0):
# constant tau: same for each series
tau1 = tau2 = int(tau)
else:
raise ValueError("Too big tau")
# define the (non-truncated) matrices of the non-shifted series
K1 = K[:T1, :T1]
K2 = K[T1:, T1:]
K12 = K[:T1, T1:]
# circular permutation of tau frames
idxs1 = np.arange(tau1, T1 + tau1) % T1
idxs2 = np.arange(tau2, T2 + tau2) % T2
# Note: no need for copy as we re-use the previous centering (indep. of frame order)
K12tau = K12[np.ix_(idxs1, idxs2)]
# compute the different terms
N1 = regul * np.eye(T1) - solve(
T1 * np.eye(T1) + 1.0 / regul * K1, K1, sym_pos=True)
N2 = regul * np.eye(T2) - solve(
T2 * np.eye(T2) + 1.0 / regul * K2, K2, sym_pos=True)
KK12 = np.dot(np.dot(N1.T, K12), np.dot(N2, K12tau.T))
# compute the trace
hsdp = 1.0 / ((regul ** 4) * T1 * T2) * KK12.trace()
return hsdp
# auto-regressive-model-based -------------------------------------------------
def distance_predictive_codings(K, T1, T2, tau=1, regul=1e-3):
""" Compute the squared HS distance between the parameters of AR(p) models
(in feature space) of two time series
Parameters
----------
K: (T1+T2) x (T1+T2) array,
between frames kernel matrix (assumed to be centered!)
T1: int,
duration of time series 1
T2: int,
duration of time series 2
tau: int, optional, default: 1
order of the AR models (use tau past frames)
regul: float, optional, default: 1e-3
regularization parameter for the inverse computation
Returns
-------
dpc: double,
squared Hilbert-Schmidt norm of the difference between the AR(p) models
learned by kernel ridge regression in the RKHS induced by 'frame_kern'
"""
p = int(tau)
assert 1 <= p < min(T1 / 2.0, T2 / 2.0), \
"Too big p (p=%d >= %d or %d)" % (p, T1 / 2.0, T2 / 2.0)
K1 = K[:T1, :T1]
K2 = K[T1:, T1:]
K12 = K[:T1, T1:]
# compute the convolutions
Ip = np.eye(p)
S1 = correlate2d(K1[:-1, :-1], Ip, mode='valid')
S2 = correlate2d(K2[:-1, :-1], Ip, mode='valid')
S21 = correlate2d(K12.T[:-1, :-1], Ip, mode='valid')
# compute the inverses
# TODO: rewrite formula better (to replace inv with solve and convolutions by products?)
Q1 = inv(regul * np.eye(T1 - p) + S1)
Q2 = inv(regul * np.eye(T2 - p) + S2)
# compute the product terms
P1 = np.dot(np.dot(Q1, K1[p:, p:]), np.dot(Q1, S1))
P2 = np.dot(np.dot(Q2, K2[p:, p:]), np.dot(Q2, S2))
P12 = np.dot(np.dot(Q1, K12[p:, p:]), np.dot(Q2, S21))
# compute the different traces
return 1.0 / T1 * P1.trace() + 1.0 / T2 * P2.trace() - 2.0 / T1 * P12.trace()
def distance_dual_predictive_codings(K, T1, T2, tau=1, regul=1e-3):
""" Compute the squared HS distance between the dual parameters of AR(p)
models (in feature space) of two time series
Parameters
----------
K: (T1+T2) x (T1+T2) array,
between frames kernel matrix (assumed to be centered!)
T1: int,
duration of time series 1
T2: int,
duration of time series 2
tau: int, optional, default: 1
order of the AR models (use tau past frames)
regul: float, optional, default: 1e-3
regularization parameter for the inverse computation
Returns
-------
ddpc: double,
squared Hilbert-Schmidt norm of the difference between the dual
parameters of AR(p) models learned by kernel ridge regression in the
RKHS induced by 'frame_kern'
"""
p = int(tau)
assert 1 <= p < min(T1 / 2.0, T2 / 2.0), \
"Too big p (p=%d >= %d or %d)" % (p, T1 / 2.0, T2 / 2.0)
K1 = K[:T1, :T1]
K2 = K[T1:, T1:]
K12 = K[:T1, T1:]
# compute the convolutions
Ip = np.eye(p)
S1 = correlate2d(K1[:-1, :-1], Ip, mode='valid')
S2 = correlate2d(K2[:-1, :-1], Ip, mode='valid')
# compute the inverses
# XXX incomplete Cholesky would be better but is 3x slower...
Q1 = inv(regul * np.eye(T1 - p) + S1)
Q2 = inv(regul * np.eye(T2 - p) + S2)
# compute the product terms
P1 = np.dot(np.dot(Q1, K1[p:, p:]), Q1)
P2 = np.dot(np.dot(Q2, K2[p:, p:]), Q2)
P12 = np.dot(np.dot(Q1, K12[p:, p:]), Q2)
# compute the different traces
return 1.0 / T1 * P1.trace() + 1.0 / T2 * P2.trace() - 2.0 / T1 * P12.trace()
# FOR DEBUG PURPOSES
def distance_hsac_decomp(K, T1, T2, tau=1, mode="truncated"):
""" Return the components 1/T**2 * (tr1, tr2, tr12) of HSAC
mode {"truncated"/"cyclic"} defines way to compute HSAC
"""
assert mode in ["truncated", "cyclic"], "Unknown HSAC mode (%s)" % mode
assert T1 == T2, "the series should be of same duration"
assert tau <= T1 / 2.0, "Too big tau"
T = T1
if mode == "truncated":
# define the truncated matrices of the non-shifted series
K1 = K[:T - tau, :T - tau]
K2 = K[T:T + T - tau, T:T + T - tau]
K12 = K[:T - tau, T:T + T - tau]
# define the truncated matrices of the shifted series
K1tau = K[tau:T, tau:T]
K2tau = K[T + tau:, T + tau:]
K12tau = K[tau:T, T + tau:]
# normalization factor
nzf = 1.0 / ((T - tau) * (T - tau))
elif mode == "cyclic":
# define the (non-truncated) matrices of the non-shifted series
K1 = K[:T, :T]
K2 = K[T:, T:]
K12 = K[:T, T:]
# circular permutation of tau frames
idxs = np.arange(tau, T + tau) % T
# indexes used to make the permuted views of the kernel matrix
perm_slice = np.ix_(idxs, idxs)
# Note: no need for copy as we re-use the previous centering (indep. of frame order)
K1tau = K1[perm_slice]
K2tau = K2[perm_slice]
K12tau = K12[perm_slice]
# normalization factor
nzf = 1.0 / (T * T)
# compute the different traces using Hadamard products
tr1 = nzf * (K1 * K1tau.T).sum()
tr2 = nzf * (K2 * K2tau.T).sum()
tr12 = nzf * (K12 * K12tau.T).sum() # do not forget the transpose!
return (tr1, tr2, tr12)
def _get_centered_gram(kern_mat, is_sym=True):
""" Center (NOT in place) the Gram (kernel) matrix in the feature space
Mathematical operation: K <- PKP where P = eye(n) - 1/n ones((n,n))
Parameters
----------
kern_mat: (n,n) symmetric positve semi-definite kernel matrix
is_sym: boolean (default: True), assume the matrix is symmetric
Returns
-------
cmat: the centered gram matrix
"""
# number of rows and cols
nr, nc = kern_mat.shape
assert not is_sym or nr == nc, "Matrix cannot be symmetric if not square!"
# mean of the columns of the original matrix (as (nc,) row vector)
cms = np.mean(kern_mat, 0).reshape((1, nc))
# mean of the rows (as (nr,1) column vector)
if is_sym:
rms = cms.reshape((nr, 1))
else:
rms = np.mean(kern_mat, 1).reshape((nr, 1))
# mean of the means over columns
mcm = np.mean(cms) # precomputed once for efficiency
# return the centered matrix (using array broadcasting)
return kern_mat + mcm - cms - rms
|
from datetime import datetime, timedelta
from random import randrange
from uuid import uuid4
from flask import Blueprint, jsonify, request, url_for
from flask_login import current_user, login_required
from ..helper import admin_required_decorator as admin_required
from ..helper.youtube import build_youtube_api
from ..models import Callback, Channel
from ..tasks import channels_renew
api_channel_blueprint = Blueprint("api_channel", __name__)
@api_channel_blueprint.route("/search")
@login_required
def search():
query = request.args.get("query")
response = (
build_youtube_api()
.search()
.list(part="snippet", maxResults=30, q=query, type="channel")
.execute()
)
results = response
results = [
{
"title": item["snippet"]["title"],
"id": item["snippet"]["channelId"],
"thumbnail": item["snippet"]["thumbnails"]["high"]["url"],
}
for item in response["items"]
]
return jsonify(results)
@api_channel_blueprint.route("/renew-all")
@login_required
def renew_all():
"""Renew Subscription Info, Both Hub and Info"""
execution = int(request.args.to_dict().get("execution", 0))
interval = 60 * 60 * 24 * 4
if execution == 0:
task = channels_renew.apply_async(
args=[[channel.id for channel in Channel.query.all()]]
)
response = {
"id": task.id,
"status": url_for("api_task.status", task_id=task.id),
}
else:
response = {}
for channel in Channel.query.all():
expiration = channel.expiration
if expiration is None:
# Expiration is not available yet (Channel just init)
# Set ETA to four days later
countdown = 60 * 60 * 24 * 4
elif expiration > datetime.now() + timedelta(days=1):
# Expiration is more than one day
# Set ETA to one day before expiration
countdown = expiration - timedelta(days=1) - datetime.now()
countdown = countdown.total_seconds()
else:
# Expiration is less than one day
# Set ETA to now
countdown = 0
if execution == -2 and countdown > 0:
countdown = randrange(int(countdown))
task = channels_renew.apply_async(
args=[[channel.id], interval],
countdown=countdown,
task_id=f"renew_{channel.id}_{str(uuid4())[:8]}",
)
response[channel.id] = task.id
return jsonify(response)
@api_channel_blueprint.route("/callbacks")
@login_required
@admin_required
def callbacks_all():
days = int(request.args.to_dict().get("days", 3))
days_ago = (datetime.now() - timedelta(days=days)).strftime("%Y-%m-%d")
callbacks = Callback.query.filter(Callback.timestamp >= days_ago).order_by(
Callback.timestamp.desc()
)
response = list(map(dict, callbacks))
return jsonify(response)
@api_channel_blueprint.route("/<channel_id>/status")
@login_required
def status(channel_id):
"""From Hub fetch Status"""
channel = Channel.query.get_or_404(channel_id)
response = channel.refresh()
return jsonify(response)
@api_channel_blueprint.route("/<channel_id>/subscribe")
@login_required
def subscribe(channel_id):
"""Subscribe to a Channel"""
return jsonify(current_user.subscribe_to(channel_id))
@api_channel_blueprint.route("/<channel_id>/unsubscribe")
@login_required
def unsubscribe(channel_id):
"""Unsubscribe to a Channel"""
return jsonify(current_user.unbsubscribe(channel_id))
@api_channel_blueprint.route("/<channel_id>/fetch-videos")
@login_required
@admin_required
def fetch_videos(channel_id):
# TODO: deprecate this
channel = Channel.query.get_or_404(channel_id)
response = channel.fetch_videos()
return jsonify(response)
@api_channel_blueprint.route("/<channel_id>/callbacks")
@login_required
@admin_required
def callbacks(channel_id):
channel = Channel.query.get_or_404(channel_id)
callbacks = channel.callbacks.limit(50)
response = list(map(dict, callbacks))
return jsonify(response)
|
<gh_stars>0
import gym
from gym import spaces
from gym.envs.registration import EnvSpec
from gym.utils import seeding
import numpy as np
import pandas as pd
from enum import Enum
import matplotlib.pyplot as plt
from typing import Union, Tuple
class Actions(Enum):
"""
Actions agents can take in the environment
"""
Short = 0
Long = 1
Flat = 2
class Positions(Enum):
"""
Positions agents can hold
"""
Short = 0
Long = 1
Neutral = 2
def opposite(self):
return Positions.Short if self == Positions.Long else Positions.Long
class TradingEnv(gym.Env):
metadata = {'render.modes': ['human']}
spec = EnvSpec("TradingEnv-v0")
def __init__(self, window_size, commission_perc=0.01, random_ofs_on_reset=True, date_range: Union[None, Tuple[str, str]] = None):
self.seed()
# self.df = df
self.window_size = window_size
self.frame_bound = [self.window_size, None]
self.date_range = date_range
self.dates, self.prices, self.signal_features = self._process_data()
assert self.df.ndim == 2
self.commission_perc = commission_perc
self.random_ofs_on_reset = random_ofs_on_reset
self.shape = (1 * self.window_size, )
# print(self.shape)
# Define action and observation spaces
self.action_space = spaces.Discrete(n=len(Actions))
self.observation_space = spaces.Box(
low=-np.inf,
high=np.inf,
shape=self.shape,
dtype=np.float32
)
# Episode
self._start_tick = self.window_size - 1
self._end_tick = len(self.prices) - 1
self._offset = 0
self._done = None
self._current_tick = None
self._prev_tick = None
self._position = None
self._position_history = None
self._total_reward = None
self._total_profit = None
self._first_rendering = None
self.history = None
def reset(self) -> np.ndarray:
"""
Reset environment internals and return first observation
"""
if self.random_ofs_on_reset:
self._offset = self.np_random.choice(
self.prices.shape[0] - self.window_size * 10)
else:
self._offset = 0
self._done = False
self._current_tick = self._start_tick + self._offset
self._prev_tick = self._current_tick - 1
self._position_history = ((self.window_size - 1) * [None])
self._total_reward = 0.0
self._total_profit = 1.0 # unit
self._first_rendering = True
self.history = {}
return self._get_observation()
def step(self, action) -> Tuple[np.ndarray, float, bool, dict]:
"""
Take action in environment
Args:
action (Actions): Action to take in current step
Returns:
Tuple: Tuple of (observation, step_reward, self._done, info)
"""
self._done = False
# print(
# f'Taking step in tick {self._current_tick} ({self.dates[self._current_tick].date()})')
self._prev_tick = self._current_tick
self._current_tick += 1 # Increase current tick
if self._current_tick == self._end_tick:
self._done = True
# Calculate immediate reward for current action
step_reward = self._calculate_reward(action)
self._total_reward += step_reward
self._update_profit(action)
self._position = action
self._position_history.append(self._position)
# Get new observation and update history
observation = self._get_observation()
info = dict(
total_reward=self._total_reward,
total_profit=self._total_profit,
position=self._position,
offset=self._offset
)
self._update_history(info)
return observation, step_reward, self._done, info
def _get_observation(self) -> np.ndarray:
return self.signal_features[(self._current_tick + 1 - self.window_size):self._current_tick + 1]
def _update_history(self, info):
if not self.history:
self.history = {key: [] for key in info.keys()}
for key, value in info.items():
self.history[key].append(value)
def _process_data(self) -> Tuple[np.ndarray, np.ndarray]:
self.df = pd.read_csv('../data/BTC_history_n.csv',
sep=',', parse_dates=True, index_col=0)
if self.date_range is not None:
self.df = self.df.reindex(pd.date_range(
start=self.date_range[0], end=self.date_range[1], freq='d'))
dates = self.df.index
prices = self.df.loc[:, 'close'].to_numpy(dtype=np.float32)
pct_change = self.df.loc[:, 'close'].pct_change().replace(
np.nan, 0.0).to_numpy(dtype=np.float32)
if self.frame_bound[1] is None:
self.frame_bound[1] = len(prices)
assert self.frame_bound[0] - self.window_size == 0
prices = prices[self.frame_bound[0] -
self.window_size:self.frame_bound[1]]
# diff = np.insert(np.diff(prices), 0, 0)
diff = pct_change
# signal_features = np.column_stack((prices, diff))
signal_features = diff
return dates, prices, signal_features
def _calculate_reward(self, action) -> float:
"""
Calculate step reward based on action
Args:
action (Actions): Action taken in time step
Returns:
float: Step reward
"""
step_reward = 0.0
if action in [Actions.Long.value, Actions.Short.value]:
current_price = self.prices[self._current_tick]
last_trade_price = self.prices[self._prev_tick]
price_diff = current_price - last_trade_price
if action == Actions.Short.value:
price_diff *= -1
perc_diff = (price_diff - self.commission_perc * (
last_trade_price + current_price)) / last_trade_price
step_reward = perc_diff
return step_reward
@staticmethod
def calculate_rel_profit(price_diff: float, shares: float, prev_total_profit: float, current_price: float, commission_perc: float):
relative_profit = (
shares * (price_diff - commission_perc * current_price)) / prev_total_profit
return relative_profit
@staticmethod
def calculate_shares(total_profit: float, commission_perc: float, last_trade_price: float):
return total_profit * (1 - commission_perc) / last_trade_price
def _update_profit(self, action):
trade = False
if action in [Actions.Long.value, Actions.Short.value]:
trade = True
if trade or self._done:
current_price = self.prices[self._current_tick]
last_trade_price = self.prices[self._prev_tick]
price_diff = current_price - last_trade_price
# perc_diff = (price_diff - self.commission_perc *
# last_trade_price) / last_trade_price
# Calculate relative number of shares bought based on previous total profit
shares = self.calculate_shares(
total_profit=self._total_profit, commission_perc=self.commission_perc, last_trade_price=last_trade_price)
rel_profit = self.calculate_rel_profit(price_diff=price_diff,
shares=shares, prev_total_profit=self._total_profit, current_price=current_price, commission_perc=self.commission_perc)
# Update total profit
self._total_profit = self._total_profit * (1 + rel_profit)
def max_possible_profit(self) -> float: # Trade fees are ignored
current_tick = self._start_tick
last_trade_tick = current_tick - 1
profit = 1.0
while current_tick <= self._end_tick:
position = None
if self.prices[current_tick] < self.prices[current_tick - 1]:
while (current_tick <= self._end_tick and
self.prices[current_tick] < self.prices[current_tick - 1]):
current_tick += 1
position = Positions.Short
else:
while (current_tick <= self._end_tick and
self.prices[current_tick] >= self.prices[current_tick - 1]):
current_tick += 1
position = Positions.Long
if position == Positions.Long:
current_price = self.prices[current_tick - 1]
last_trade_price = self.prices[last_trade_tick]
shares = profit / last_trade_price
profit = shares * current_price
last_trade_tick = current_tick - 1
return profit
def seed(self, seed=None) -> list:
self.np_random, seed = seeding.np_random(seed)
seed2 = seeding.hash_seed(seed + 1) % 2 ** 31
return [seed, seed2]
def close(self):
plt.close()
def render(self, mode='human'):
def _plot_position(position, tick):
color = None
if position == Positions.Short.value:
color = 'red'
elif position == Positions.Long.value:
color = 'green'
if color:
plt.scatter(tick, self.prices[tick], color=color)
if self._first_rendering:
self._first_rendering = False
plt.cla()
plt.plot(self.prices)
start_position = self._position_history[self._start_tick]
_plot_position(start_position, self._start_tick)
_plot_position(self._position, self._current_tick)
plt.suptitle(
"Total Reward: %.6f" % self._total_reward + ' ~ ' +
"Total Profit: %.6f" % self._total_profit
)
plt.pause(0.01)
def render_all(self, mode='human'):
window_ticks = np.arange(len(self._position_history))
# print(f'Position history:')
# print(self._position_history)
fig, ax = plt.subplots(figsize=(18, 12))
plot = plt.plot(self.dates, self.prices)
short_ticks = []
long_ticks = []
for i, tick in enumerate(window_ticks):
if self._position_history[i] == Positions.Short.value:
short_ticks.append(tick)
elif self._position_history[i] == Positions.Long.value:
long_ticks.append(tick)
plt.plot(self.dates[short_ticks], self.prices[short_ticks], 'ro')
plt.plot(self.dates[long_ticks], self.prices[long_ticks], 'go')
plt.suptitle(
"Total Reward: %.6f" % self._total_reward + ' ~ ' +
"Total Profit: %.6f" % self._total_profit
)
def save_rendering(self, filepath):
plt.savefig(filepath)
def pause_rendering(self):
plt.show()
|
#!/usr/bin/env python3
"""
A script for calculating the best uptake and depuration constants given historical data as input.
"""
import argparse
import pandas as pd
import numpy as np
from math import isnan,sqrt
from statistics import mean
import os
def calc_rms(x,y):
"""
Calculate the root mean square deviation for two lists of numbers, x and y.
returns rms = √[ ∑(x-y)² / n ]
rms is used over a simple sum of squares so as to give a basis of comparison between datasets of different lengths.
"""
xy = list(zip(*[x,y]))
rms = sqrt(sum([(x-y)**2 for x,y in xy])/len(xy))
return rms
def calc_sos(x,y):
"""
Calculate the sum-of-squared error for two lists of numbers, x and y.
returns sos = ∑(x-y)²
"""
xy = list(zip(*[x,y]))
sos = sum([(x-y)**2 for x,y in xy])
return sos
def calc_rms_df(df, measured_tox_col, modeled_tox_col):
"""
Calculate the root mean square deviation for two columns of a DataFrame
returns √[ ∑(x-y)² / n ]
"""
df_rms = df[[measured_tox_col, modeled_tox_col]].dropna()
toxobs = df_rms[measured_tox_col].tolist()
toxmod = df_rms[modeled_tox_col].tolist()
rms = calc_rms(toxobs,toxmod)
return rms
def load_dataset(fname,src=None):
"""Loads a csv with date, ESP, and TOX data columns"""
# Ingest the csv, interpretting the first column as a dates index
if src is not None:
fname = os.path.join(src,fname)
df = pd.read_csv(fname, index_col=0, parse_dates=True).dropna(how="all")
# insert rows where there are missing days to get a monotonic timeseries
df = df.reindex( pd.date_range(start=df.index[0],end=df.index[-1]) )
df.index.name = 'date'
return df
## THE MODEL ##
def model_tox_series(df, cell_col, tox_col, tox_col_model, tox_const, cell_const, T0=None, lag=0):
"""
Given a dataset and c1 c2 constants, generates modeled toxicity data
following the following equation for all known and interpolated cell_count values,
Starting with the first measured toxicity value.
T(t+1) = c1 * T(t) + c2 * C(t+1-tau)
df is the data frame containing ESP and TOX measured shellfish toxicity
cell_col and tox_col are the column names of that ESP and shellfish toxicity data, respectively
tox_col_model is the name to be given to the newly generated model data
tox_const and cell_const are c1 and c2 respectivly.
tox_const = c1 = 1-gamma, where gamma is the depuration value
cell_const = c2 = beta, where beta is the uptake value
lag is the estimated time it takes for cells identified at an ESP to reach a downstream shellfish site,
in integer days. Typically this value is 0 days or 1 days.
T0 is the initial model toxicity value.
If None, the measured or interpolated toxicity value from 1 day prior to the first esp datum is used.
Returns: an rms value,
the input dataframe with additional tox_col_model column,
list of measured tox data,
list of modeled tox data.
"""
# Time Displacements
aday = pd.Timedelta(days=1)
lag = pd.Timedelta(days=lag)
# Set up model column and interpolation
df = df[[cell_col,tox_col]].interpolate(limit_area='inside')
df[tox_col_model] = pd.np.nan
# determining starting conditions
for date in df.index:
t0=date
if T0 is None: # use measured value
TO=df.loc[date,tox_col]
else: # use provided start value
TO = T0
C0=df.loc[date+aday-lag,cell_col]
if not isnan(C0) and not isnan(TO):
df.loc[t0+aday,tox_col_model] = tox_const*TO + cell_const*C0
break
# applying the model until cell counts run out
for date in df[t0+aday:].index:
Ti = df.loc[date,tox_col_model]
Ci = df.loc[date+aday-lag,cell_col]
if isnan(Ci): break
df.loc[date+aday,tox_col_model] = tox_const*Ti + cell_const*Ci
# calculating the root mean square deviation
rms = calc_rms_df(df, tox_col, tox_col_model)
# reporting list of real and modeled toxicity values for cumu_rms
df_tox = df[[tox_col, tox_col_model]].dropna()
tox_real = df_tox[tox_col].tolist()
tox_model = df_tox[tox_col_model].tolist()
#tox_tups = list(df[[tox_col, tox_col_model]].dropna().itertuples(index=False, name=None))
return rms, df, tox_real, tox_model
def calculate_RMS_table(input_tups,beta_gammas, T0=None, output='df'):
"""
Calculates RMS for all year-location-pairs for all beta_gammas.
Additionally calculates cumulative RMS across year-location-pairs for all beta_gammas.
Input tups must be a list of tuples with 4 elements: model_column_label,esp_column_label,tox_column_label,dataframe
beta_gammas is a list of beta and gamma tuples
T0 is the initial model toxicity value. Default is None (select value automatically)
output changes the output format. If 'df', a dataframe is returned.
If any other string, a csv with that filename is created.
If anything else, eg None, a dict is returned.
"""
print('Calculating RMS')
betas,gammas = list(zip(*beta_gammas))
df_data = dict(betas=betas,gammas=gammas)
cumu_toxlist = dict()
for a,g in beta_gammas:
cumu_toxlist[(a,g)] = dict(real=[],model=[])
for col,esp,tox,sub_df in input_tups:
print(' ',col)
beta_gamma_results = []
for a,g in beta_gammas:
rms,_,toxlist_real,toxlist_model = model_tox_series(sub_df, esp, tox, col, tox_const=1-g, cell_const=a, T0=T0)
beta_gamma_results.append(rms)
cumu_toxlist[(a,g)]['real'].extend(toxlist_real)
cumu_toxlist[(a,g)]['model'].extend(toxlist_model)
df_data[col] = beta_gamma_results
print(' ','Cumulative RMS (cumu_rms)')
for a,g in beta_gammas:
cumu_toxlist[(a,g)]['rms'] = calc_rms(cumu_toxlist[(a,g)]['real'],cumu_toxlist[(a,g)]['model'])
#cumu_toxlist[(a,g)]['sos'] = calc_sos(cumu_toxlist[(a,g)]['real'],cumu_toxlist[(a,g)]['model'])
df_data['cumu_rms'] = [cumu_toxlist[(a,g)]['rms'] for a,g in beta_gammas]
print()
if output=='df':
df = pd.DataFrame(df_data)
return df.set_index(['betas','gammas'])
elif isinstance(output,str):
df = pd.DataFrame(df_data)
df = df.set_index(['betas','gammas'])
df.to_csv(output)
else:
return df_data
def annotate(df, outfile=None, print_final=[1,2,3]):
"""
Adds footer and final columns with value means to input dataframe df
Footer includes best-rms on a per-column basis and the associated beta and gamma values
If outfile is specified, the table is saved to the entered filename.
outfile may also be "stdout" in which case the a reduced summary table is displayed
print_final is optional and (if outfile is not "stdout") shows the final results for method1, method2, and method3.
Returns the modified df.
"""
print('Annotating Table...')
df = df.copy()
#df = df.set_index(['betas','gammas'])
cumu_series = df['cumu_rms']
df = df.drop('cumu_rms',axis=1)
valmin_mean1 = df.min().mean()
beta_mins,gamma_mins = list(zip(*df.idxmin().tolist()))
betamin_mean1,gammamin_mean1 = mean(beta_mins),mean(gamma_mins)
df[''] = pd.Series()
df['method1_mean'] = pd.Series()
df['method2_mean'] = df.mean(axis=1)
df['method3_cumu'] = cumu_series
beta_mins,gamma_mins = list(zip(*[tup if isinstance(tup,tuple) else (np.nan,np.nan) for tup in df.idxmin().tolist()]))
df = df.append(pd.Series(name=('','')))
df.loc[('best','rms'),:] = df.min()
df.loc[('best','beta'),:] = beta_mins
df.loc[('best','gamma'),:] = gamma_mins
df.loc[('best','rms'),'method1_mean'] = valmin_mean1
df.loc[('best','beta'),'method1_mean'] = betamin_mean1
df.loc[('best','gamma'),'method1_mean'] = gammamin_mean1
valmin_mean2 = df.loc[('best','rms'),'method2_mean']
betamin_mean2 = df.loc[('best','beta'),'method2_mean']
gammamin_mean2 = df.loc[('best','gamma'),'method2_mean']
valmin_cumu = df.loc[('best','rms'),'method3_cumu']
betamin_cumu = df.loc[('best','beta'),'method3_cumu']
gammamin_cumu = df.loc[('best','gamma'),'method3_cumu']
print('Outputting table to:', outfile)
if outfile=='stdout':
df_short = df.loc['best'].T.fillna('')
df_short.columns.name=''
print(df_short)
elif outfile:
df.to_csv(outfile)
if print_final: print('Final Results:')
if print_final==[3]:
print(' least-rms={:.3f}, beta-gamma=({:.3f},{:.3f})'.format(valmin_cumu,betamin_cumu,gammamin_cumu))
elif print_final:
if 1 in print_final: print(' Method1: mean-rms={:.3f}, beta-gamma=({:.3f},{:.3f})'.format(valmin_mean1,betamin_mean1,gammamin_mean1))
if 2 in print_final: print(' Method2: mean-rms={:.3f}, beta-gamma=({:.3f},{:.3f})'.format(valmin_mean2,betamin_mean2,gammamin_mean2))
if 3 in print_final: print(' Method3: least-rms={:.3f}, beta-gamma=({:.3f},{:.3f})'.format(valmin_cumu,betamin_cumu,gammamin_cumu))
return df
def parse_input_args(targets, beta_arg, gamma_arg, src=None):
"""
Converts cli input values to a format usable by calculate_RMS_table.
Also checks that input args are valid.
targets is a list of strings where each string has the followwing format: "YEARfile,ESP column,TOX column"
beta_arg and gamma_arg are either a single-item list containing a float, or a 3 item list with 3 floats: START STOP STEP
"""
print('validating input...')
input_tups = []
for target in targets:
print(' ',target)
fname,esp,tox = target.split(',')
if not fname.lower().endswith('.csv'): fname = fname+'.csv'
if src is not None:
fname = os.path.join(src,fname)
year = os.path.splitext(os.path.basename(fname))[0]
col = '{}:{}:{}'.format(year,esp,tox.replace('TOX ','')).replace(' ','_')
assert os.path.isfile(fname), '{} was not found'.format(fname)
sub_df = load_dataset(fname)
assert esp in sub_df.columns, '"{}" is not a valid column in {}'.format(esp, fname)
assert tox in sub_df.columns, '"{}" is not a valid column in {}'.format(tox, fname)
sub_df = sub_df[[esp,tox]]
input_tups.append((col,esp,tox,sub_df))
print()
if len(beta_arg)==3:
start,stop,step = beta_arg
betas = np.arange(start,stop+step*0.999,step)
betas = [round(a, 6) for a in betas]
print('Uptake Range: {} to {} ({} steps)'.format(start,stop,len(betas)))
else:
betas = beta_arg
print('Uptake = {}'.format(betas[0]))
if len(gamma_arg)==3:
start,stop,step = gamma_arg
gammas = np.arange(start,stop+step*0.999,step)
gammas = [round(g, 6) for g in gammas]
print('Depuration Range: {} to {} ({} steps)'.format(start,stop,len(gammas)))
else:
gammas = gamma_arg
print('Depuration = {}'.format(gammas[0]))
beta_gammas = []
for a in betas:
for g in gammas:
beta_gammas.append((a,g))
print()
return input_tups,beta_gammas
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("input", nargs="+", metavar="YEAR,ESP,TOX", help='List of "YEAR,ESP,TOX" labels. "YEAR" is the filename of a YEAR.csv file found in SRC, "ESP" and "TOX" are column-headers from YEAR.csv. Note that each triplet should be space-delimited and surrounded by quotes "". This positional argument will also accept a configuration file in the described format, newline-delimited.')
parser.add_argument("--src", help="Path to directory with input csv datafiles. Default is the current working directory.")
groupB = parser.add_mutually_exclusive_group()
groupB.add_argument("-b", "--betas", dest='beta_args', default=[0,0.05,0.001],
nargs=3, type=float, metavar=('START','STOP','STEP'),
help='Range of uptake constants to asses. Default is "0 0.05 0.001". Mutually-exclusive with UPTAKE.')
groupB.add_argument("-u","--uptake", metavar='UPTAKE',dest='beta_args', nargs=1, type=float, help='Single locked-in uptake constant. Mutually-exclusive with --betas.')
groupG = parser.add_mutually_exclusive_group()
groupG.add_argument("-d","--depuration",metavar='DEPURATION',dest='gamma_args',nargs=1,type=float, default=[0.1],
help='Single locked-in depuration constant. Default is "0.1". Mutually-exclusive with --gammas.')
groupG.add_argument("-g", "--gammas", dest='gamma_args', #default=[0,0.2, 0.005],
nargs=3, type=float, metavar=('START','STOP','STEP'),
help='Range of depuration constants to asses. Eg: "0 0.2 0.005". Mutually-exclusive with DEPURATION.')
parser.add_argument("-o","--outfile", default='stdout', help='The file to output the final csv table to. If not specified, a summary table is displayed.')
parser.add_argument("-i","--model-init", metavar='I', type=float, default=None,
help='If included, sets the initial modeled toxicity to the specified value, eg 0. If not included (default), the measured or interpolated toxicity from 1-day prior to the first ESP datapoint is used.')
parser.add_argument("-m", "--method", metavar='N', type=int, nargs="+", choices=[1,2,3],
help=argparse.SUPPRESS)
#help='Determins the final shown command-line output text. The output csv includes all 3 methods of determining best uptake and depuration values, this merely determines what is shown onscreen when the process completes. For example "1 2 3" shows the final result using all 3 methods. If OUTFILE is specified, default is "3".')
args = parser.parse_args()
if args.outfile!='stdout' and args.method is None:
args.method=[3]
if os.path.isfile(args.input[0]):
config_file = args.input[0]
print('loading from inputs from file: ' + config_file)
args.input = []
with open(config_file) as f:
for line in f:
line=line.strip()
if line.replace(',','')=='': continue
args.input.append(line.strip())
## Parse and Validate Inputs
input_tups, beta_gammas = parse_input_args(args.input, args.beta_args, args.gamma_args, args.src)
## Calculate Results
df = calculate_RMS_table(input_tups, beta_gammas, T0=args.model_init)
df = annotate(df, outfile=args.outfile, print_final=args.method)
|
<filename>tracker/materials/db.py
import datetime
from typing import Optional
from uuid import UUID
import sqlalchemy.sql as sa
from sqlalchemy.engine import RowMapping
from tracker.common import database, models
from tracker.common.log import logger
from tracker.materials import schemas
async def get_materials(*,
materials_ids: Optional[list[UUID]] = None) -> list[RowMapping]:
how_many = 'all'
if materials_ids:
how_many = str(len(materials_ids))
logger.info("Getting %s materials", how_many)
stmt = sa.select(models.Materials)
if materials_ids:
materials_ids = (str(id_) for id_ in materials_ids) # type: ignore
stmt = stmt\
.where(models.Materials.c.material_id.in_(materials_ids))
async with database.session() as ses:
return (await ses.execute(stmt)).all()
async def get_title(*,
material_id: UUID) -> str:
logger.info("Getting title for material_id=%s", material_id)
materials = await get_materials(materials_ids=[material_id])
try:
return materials[0].title
except IndexError:
logger.warning(f"Material {material_id=} not found")
return ''
async def does_material_exist(*,
material_id: UUID) -> bool:
logger.debug("Whether material_id=%s exists", material_id)
stmt = sa.select(models.Materials.c.material_id)\
.where(models.Materials.c.material_id == str(material_id))
async with database.session() as ses:
return await ses.scalar(stmt) is not None
async def is_material_reading(*,
material_id: UUID) -> bool:
logger.debug("Whether material_id=%s is reading",
material_id)
stmt = sa.select(models.Materials.c.material_id)\
.join(models.Statuses,
models.Materials.c.material_id == models.Statuses.c.material_id)\
.where(models.Statuses.c.begin != None)\
.where(models.Statuses.c.end == None)\
.where(models.Materials.c.material_id == str(material_id))
async with database.session() as ses:
return await ses.scalar(stmt) is not None
async def is_material_assigned(*,
material_id: UUID) -> bool:
logger.debug("Whether material_id=%s reading or completed",
material_id)
stmt = sa.select(models.Materials.c.material_id) \
.join(models.Statuses,
models.Materials.c.material_id == models.Statuses.c.material_id) \
.where(models.Statuses.c.begin != None) \
.where(models.Materials.c.material_id == str(material_id))
async with database.session() as ses:
return await ses.scalar(stmt) is not None
async def get_free_materials() -> list[models.Materials]:
logger.debug("Getting free materials")
assigned_condition = sa.select(1) \
.select_from(models.Statuses) \
.where(models.Statuses.c.material_id == models.Materials.c.material_id)
stmt = sa.select(models.Materials)\
.where(~sa.exists(assigned_condition)) \
async with database.session() as ses:
return (await ses.execute(stmt)).all()
async def get_completed_materials() -> list[RowMapping]:
logger.debug("Getting completed materials")
stmt = sa.select([models.Materials,
models.Statuses]) \
.join(models.Statuses,
models.Materials.c.material_id == models.Statuses.c.material_id) \
.where(models.Statuses.end != None)
async with database.session() as ses:
return (await ses.execute(stmt)).mappings().all()
async def get_status(*,
status_ids: Optional[list[int]] = None) -> list[models.Statuses]:
how_many = 'all'
if status_ids:
how_many = str(len(status_ids))
logger.debug("Getting %s statuses", how_many)
stmt = sa.select(models.Statuses)
if status_ids:
stmt = stmt.where(models.Statuses.c.status_id.in_(status_ids))
async with database.session() as ses:
return (await ses.execute(stmt)).all()
async def get_material_status(*, # type: ignore
material_id: UUID) -> Optional[RowMapping]:
logger.debug("Getting status for material_id=%s",
material_id)
stmt = sa.select(models.Statuses)\
.where(models.Statuses.c.material_id == str(material_id))
async with database.session() as ses:
if status := (await ses.execute(stmt)).first():
return status
async def add_material(*,
material: schemas.Material) -> None:
logger.debug("Adding material")
values = {
"title": material.title,
"authors": material.authors,
"pages": material.pages,
"tags": material.tags
}
stmt = models.Materials\
.insert().values(values)
async with database.session() as ses:
await ses.execute(stmt)
logger.debug("Material added")
async def start_material(*,
material_id: UUID,
start_date: Optional[datetime.date] = None) -> None:
start_date = start_date or database.today().date()
logger.debug("Starting material_id=%s at %s",
material_id, start_date)
if start_date > database.today():
raise ValueError("Start date must be less than today")
values = {
"material_id": str(material_id),
"start_date": start_date
}
stmt = models.Statuses\
.insert().values(values)
async with database.session() as ses:
await ses.execute(stmt)
logger.debug("Material material_id=%s started at %s",
material_id, start_date)
async def complete_material(*,
material_id: UUID,
completion_date: Optional[datetime.date] = None) -> None:
completion_date = completion_date or database.today()
logger.debug("Completing material_id=%s at %s",
material_id, completion_date)
get_status_stmt = sa.select(models.Statuses)\
.where(models.Statuses.c.material_id == str(material_id))
update_status_stmt = models.Statuses\
.update().values(end=completion_date)\
.where(models.Statuses.c.material_id == str(material_id))
async with database.session() as ses:
status = (await ses.execute(get_status_stmt)).mappings().first()
if status is None:
raise ValueError("Material_id=%s not assigned", material_id)
if status.completed_at is not None:
raise ValueError("Material_id=%s even completed", material_id)
if status.started_at > completion_date:
raise ValueError
await ses.execute(update_status_stmt)
logger.debug("Material_id=%s completed at %s",
material_id, completion_date)
|
<filename>tests/integration/test_integration_scan.py
import cloudpassage
import datetime
import os
import pytest
from cloudpassage.utility import Utility as utility
config_file_name = "portal.yaml.local"
tests_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "../"))
config_file = os.path.join(tests_dir, "configs/", config_file_name)
session_info = cloudpassage.ApiKeyManager(config_file=config_file)
key_id = session_info.key_id
secret_key = session_info.secret_key
api_hostname = session_info.api_hostname
api_port = session_info.api_port
class TestIntegrationScan:
def get_fim_scan_with_findings(self):
scan_type = "fim"
scan_status = "completed_clean"
scanner = self.build_scan_object()
report = scanner.scan_history(module=scan_type, status=scan_status)
for item in report:
if (item["critical_findings_count"] >= 0 or
item["non_critical_findings_count"] >= 0):
return item["id"]
return None
def build_scan_object(self):
session = cloudpassage.HaloSession(key_id, secret_key,
api_host=api_hostname,
api_port=api_port,
integration_string="SDK-Smoke")
return_obj = cloudpassage.Scan(session)
return(return_obj)
def build_server_group_object(self):
session = cloudpassage.HaloSession(key_id, secret_key,
api_host=api_hostname,
api_port=api_port,
integration_string="SDK-Smoke")
return_obj = cloudpassage.ServerGroup(session)
return(return_obj)
def build_server_object(self):
session = cloudpassage.HaloSession(key_id, secret_key,
api_host=api_hostname,
api_port=api_port,
integration_string="SDK-Smoke")
return_obj = cloudpassage.Server(session)
return(return_obj)
def get_svm_target(self):
target_id = None
s_group = self.build_server_group_object()
list_of_groups = s_group.list_all()
num_members = 0
for g in list_of_groups:
target_group_id = g["id"]
num_members = g["server_counts"]["active"]
if num_members > 0:
members = s_group.list_members(target_group_id)
target_id = members[0]["id"]
break
return(target_id)
def get_csm_target(self):
target_id = None
s_group = self.build_server_group_object()
list_of_groups = s_group.list_all()
num_members = 0
for g in list_of_groups:
csm_policies = g["policy_ids"]
num_members = g["server_counts"]["active"]
if num_members > 0 and len(csm_policies) > 0:
members = s_group.list_members(g["id"])
target_id = members[0]["id"]
break
return(target_id)
def get_fim_target(self):
target_id = None
s_group = self.build_server_group_object()
list_of_groups = s_group.list_all()
num_members = 0
for g in list_of_groups:
fim_policies = g["fim_policy_ids"]
num_members = g["server_counts"]["active"]
if num_members > 0 and len(fim_policies) > 0:
members = s_group.list_members(g["id"])
target_id = members[0]["id"]
break
return(target_id)
def get_sam_target(self):
target_id = None
s_group = self.build_server_group_object()
list_of_groups = s_group.list_all()
num_members = 0
for g in list_of_groups:
num_members = g["server_counts"]["active"]
if num_members > 0:
members = s_group.list_members(g["id"])
for member in members:
if member["platform"] != "windows":
target_id = members[0]["id"]
break
return(target_id)
def test_instantiation(self):
session = cloudpassage.HaloSession(key_id, secret_key,
api_host=api_hostname,
api_port=api_port)
assert cloudpassage.Scan(session)
def test_bad_scan_type(self):
session = cloudpassage.HaloSession(key_id, secret_key,
api_host=api_hostname,
api_port=api_port)
scanner = cloudpassage.Scan(session)
s_group = cloudpassage.ServerGroup(session)
scan_type = "barfola"
server_id = s_group.list_all()[0]["id"]
with pytest.raises(cloudpassage.CloudPassageValidation) as e:
scanner.initiate_scan(server_id, scan_type)
assert 'Unsupported scan type: barfola' in str(e)
def test_bad_server_id(self):
session = cloudpassage.HaloSession(key_id, secret_key,
api_host=api_hostname,
api_port=api_port)
scanner = cloudpassage.Scan(session)
scan_type = "svm"
server_id = "ABC123"
with pytest.raises(cloudpassage.CloudPassageResourceExistence) as e:
scanner.initiate_scan(server_id, scan_type)
assert server_id in str(e)
def test_sam_historical_is_unsupported(self):
rejected = False
session = cloudpassage.HaloSession(key_id, secret_key,
api_host=api_hostname,
api_port=api_port)
scanner = cloudpassage.Scan(session)
server = cloudpassage.Server(session)
scan_type = "sam"
server_id = server.list_all()[0]["id"]
try:
scanner.last_scan_results(server_id, scan_type)
except cloudpassage.CloudPassageValidation:
rejected = True
assert rejected
def test_scan_type_valid(self):
valid_types = ["svm", "sva", "csm", "sca", "fim", "sam", "sv"]
invalid_types = ["death_stare", "lids"]
session = cloudpassage.HaloSession(key_id, secret_key,
api_host=api_hostname,
api_port=api_port)
scanner = cloudpassage.Scan(session)
for v in valid_types:
assert scanner.scan_type_supported(v)
for i in invalid_types:
assert not scanner.scan_type_supported(i)
def test_sv_initiate(self):
scanner = self.build_scan_object()
target_id = self.get_sam_target()
command = scanner.initiate_scan(target_id, "sv")
assert command["id"]
def test_sca_initiate(self):
sca_aliases = ["sca", "csm"]
scanner = self.build_scan_object()
target_id = self.get_csm_target()
for alias in sca_aliases:
command = scanner.initiate_scan(target_id, alias)
assert command["id"]
def test_sca_retrieve(self):
sca_aliases = ["sca", "csm"]
scanner = self.build_scan_object()
target_id = self.get_csm_target()
for alias in sca_aliases:
report = scanner.last_scan_results(target_id, alias)
assert report["id"]
def test_fim_initiate(self):
scanner = self.build_scan_object()
target_id = self.get_fim_target()
command = scanner.initiate_scan(target_id, 'fim')
assert command["id"]
def test_fim_retrieve(self):
scanner = self.build_scan_object()
target_id = self.get_fim_target()
report = scanner.last_scan_results(target_id, 'fim')
assert report["id"]
def test_svm_initiate(self):
svm_aliases = ["svm", "sva"]
scanner = self.build_scan_object()
target_id = self.get_svm_target()
for alias in svm_aliases:
command = scanner.initiate_scan(target_id, alias)
assert command["id"]
def test_svm_retrieve(self):
svm_aliases = ["svm", "sva"]
scanner = self.build_scan_object()
target_id = self.get_svm_target()
for alias in svm_aliases:
report = scanner.last_scan_results(target_id, alias)
assert report["id"]
def test_sam_initiate(self):
scanner = self.build_scan_object()
target_id = self.get_sam_target()
command = scanner.initiate_scan(target_id, "sam")
assert command["id"]
def test_scan_history(self):
scanner = self.build_scan_object()
report = scanner.scan_history()
assert report[0]["id"]
"""
def test_scan_history_by_serverid(self):
scanner = self.build_scan_object()
target_id = self.get_sam_target()
report = scanner.scan_history(server_id=target_id)
assert report[0]["server_id"] == target_id
"""
def test_scan_history_by_single_scan_type(self):
"""This test requires a completed SAM scan. If you don't have one
in your account, this scan will fail.
"""
scan_type = "sam"
scanner = self.build_scan_object()
report = scanner.scan_history(module=scan_type, max_pages=2)
assert report[0]["module"] == scan_type
def test_scan_history_by_multi_scan_type(self):
"""This test requires a completed SAM and SVM scan. If your account
doesn't have results from both of these scan types, the test will
fail.
"""
scan_types = ["sam", "svm"]
scanner = self.build_scan_object()
report = scanner.scan_history(module=scan_types, max_pages=2)
assert report[0]["module"] in scan_types
def test_scan_history_by_single_status(self):
"""This test requires scan results in your account with a status
of completed_clean. If you don't have any scan results with
this status, this test will fail.
"""
scan_status = "completed_clean"
scanner = self.build_scan_object()
report = scanner.scan_history(status=scan_status, max_pages=2)
assert report[0]["status"] == scan_status
"""
def test_scan_history_by_multi_status(self):
scan_status = ["completed_clean", "completed_with_errors"]
scanner = self.build_scan_object()
target_id = self.get_sam_target()
report = scanner.scan_history(status=scan_status, max_pages=2)
assert report[0]["status"] in scan_status
def test_scan_details(self):
scanner = self.build_scan_object()
target_id = self.get_fim_target()
report = scanner.scan_history(server_id=target_id)
details = scanner.scan_details(report[0]["id"])
assert "id" in details
"""
def test_fim_findings_details(self):
"""This test requires a FIM scan with findings. If you don't
have a FIM scan with resulting findings, this test will fail.
"""
target_fim_scan_id = self.get_fim_scan_with_findings()
scanner = self.build_scan_object()
details = scanner.scan_details(target_fim_scan_id)
findings = details["findings"]
target_finding = findings[0]["id"]
target_findings_body = scanner.findings(target_fim_scan_id,
target_finding)
assert "id" in target_findings_body
def test_scan_history_by_date(self):
"""This test requires scan results in your account, produced in the
last week. If no such records exist, this test will fail.
"""
scan = self.build_scan_object()
until = utility.time_string_now()
since = datetime.datetime.utcnow() - datetime.timedelta(weeks=1)
scan_list = scan.scan_history(max_pages=2, since=since, until=until)
assert "id" in scan_list[0]
class TestIntegrationCveException:
def create_cve_exception_object(self):
session = cloudpassage.HaloSession(key_id, secret_key,
api_host=api_hostname,
api_port=api_port,
integration_string="SDK-Smoke")
return_obj = cloudpassage.CveException(session)
return(return_obj)
def test_instantiation(self):
assert self.create_cve_exception_object()
def test_get_list(self):
"""Your account must have at least one CVE exception set.
If you haven't set any CVE exceptions in your account,
this test will fail.
"""
cve_exc = self.create_cve_exception_object()
list_of_exceptions = cve_exc.list_all()
assert "id" in list_of_exceptions[0]
def test_get_details(self):
"""Your account must have at least one CVE exception set.
If you haven't set any CVE exceptions in your account,
this test will fail.
"""
cve_exc = self.create_cve_exception_object()
list_of_exceptions = cve_exc.list_all()
details = cve_exc.describe(list_of_exceptions[0]["id"])
assert "id" in details
|
import torch
from collections import OrderedDict
import torchvision.transforms as transforms
from PIL import Image
import numpy as np
from .discriminator import Discriminator
from .generator import Generator
from .loss import Loss
from ..common import utils as util
from .blocks import *
import os
class DarkNight():
def __init__(self, opt):
super(DarkNight, self).__init__()
self.Tensor = torch.cuda.FloatTensor if opt.gpu_ids else torch.Tensor
opt.Tensor = self.Tensor
self.gen = Generator(opt)
self.dis = Discriminator(opt)
self.setup_networks(opt)
print('*** Initialized ***')
def setup_networks(self,opt):
self.load_network(self.gen, 'G', opt.which_epoch)
self.load_network(self.dis, 'D', opt.which_epoch)
self.fake_pools = [ImagePool(opt.pool_size) for _ in range(self.n_domains)]
# define loss functions
self.L1 = torch.nn.SmoothL1Loss()
self.downsample = torch.nn.AvgPool2d(3, stride=2)
self.criterionCycle = self.L1
self.criterionIdt = lambda y,t : self.L1(self.downsample(y), self.downsample(t))
self.criterionLatent = lambda y,t : self.L1(y, t.detach())
self.criterionGAN = lambda r,f,v : (Loss(r[0],f[0],v) + \
Loss(r[1],f[1],v) + \
Loss(r[2],f[2],v)) / 3
# initialize optimizers
self.gen.init_optimizers(torch.optim.Adam, opt.lr, (opt.beta1, 0.999))
self.dis.init_optimizers(torch.optim.Adam, opt.lr, (opt.beta1, 0.999))
# initialize loss storage
self.loss_D, self.loss_G = [0]*self.n_domains, [0]*self.n_domains
self.loss_cycle = [0]*self.n_domains
# initialize loss multipliers
self.lambda_cyc, self.lambda_enc = opt.lambda_cycle, (0 * opt.lambda_latent)
self.lambda_idt, self.lambda_fwd = opt.lambda_identity, opt.lambda_forward
def get_current_errors(self):
extract = lambda l: [(i if type(i) is int or type(i) is float else i.item()) for i in l]
D_losses, G_losses, cyc_losses = extract(self.loss_D), extract(self.loss_G), extract(self.loss_cycle)
return OrderedDict([('D', D_losses), ('G', G_losses), ('Cyc', cyc_losses)])
def forward(self, input):
lstm_out, self.hidden = self.lstm(input, self.hidden)
out = self.fc(lstm_out)
out = self.softmax(out)
return out
def generator(opt):
return DarkNight(opt.input_size, opt.hidden_size, opt.output_size)
def discriminator(opt):
return DarkNight(opt.input_size, opt.hidden_size, 1)
def save_network(self, network, network_label, epoch, gpu_ids):
save_filename = '%d_net_%s' % (epoch, network_label)
save_path = os.path.join(self.save_dir, save_filename)
network.save(save_path)
if gpu_ids and torch.cuda.is_available():
network.cuda(gpu_ids[0])
def save(self, label):
self.save_network(self.gen, 'G', label, self.gpu_ids)
self.save_network(self.dis, 'D', label, self.gpu_ids)
def load_network(self, network, network_label, epoch):
save_filename = '%d_net_%s' % (epoch, network_label)
save_path = os.path.join(self.save_dir, save_filename)
network.load(save_path)
def inference(self, image, domain, dim):
image = Image.fromarray(np.uint8(image)).convert('RGB')
h, w = image.shape[0], image.shape[1]
p = transforms.Compose([transforms.Resize((dim, dim))])
image = p(image)
p = transforms.ToTensor()
image = p(image)
image = image.to("cuda:0")
self.gen.cuda(0)
image = image.reshape([1, 3, dim, dim])
encoded = self.gen.encode(image, domain)
fake = self.gen.decode(encoded, 1-domain)
image = fake[0]
p = transforms.Compose([transforms.Resize((h ,w))])
image = p(image)
image_numpy = image.cpu().detach().numpy()
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0
if image_numpy.shape[2] < 3:
image_numpy = np.dstack([image_numpy]*3)
imtype=np.uint8
image_numpy = image_numpy.astype(imtype)
return image_numpy |
from typing import List
from typing import Mapping
from typing import Optional
import logging
import pathlib
import os
import json
import shutil
import structlog
import click
from covidactnow.datapublic.common_fields import CommonFields
from covidactnow.datapublic.common_fields import FieldName
from libs import google_sheet_helpers
from libs import pipeline
from libs.datasets import combined_dataset_utils
from libs.datasets import custom_aggregations
from libs.datasets import statistical_areas
from libs.datasets.combined_datasets import (
ALL_TIMESERIES_FEATURE_DEFINITION,
ALL_FIELDS_FEATURE_DEFINITION,
)
from libs.datasets import timeseries
from libs.datasets import dataset_utils
from libs.datasets import combined_datasets
from libs.datasets.sources import forecast_hub
from libs.datasets import tail_filter
from libs.datasets.sources import zeros_filter
from libs.us_state_abbrev import ABBREV_US_UNKNOWN_COUNTY_FIPS
from pyseir import DATA_DIR
import pyseir.icu.utils
from pyseir.icu import infer_icu
TailFilter = tail_filter.TailFilter
CUMULATIVE_FIELDS_TO_FILTER = [
CommonFields.CASES,
CommonFields.DEATHS,
CommonFields.POSITIVE_TESTS,
CommonFields.NEGATIVE_TESTS,
CommonFields.TOTAL_TESTS,
CommonFields.POSITIVE_TESTS_VIRAL,
CommonFields.POSITIVE_CASES_VIRAL,
CommonFields.TOTAL_TESTS_VIRAL,
CommonFields.TOTAL_TESTS_PEOPLE_VIRAL,
CommonFields.TOTAL_TEST_ENCOUNTERS_VIRAL,
]
PROD_BUCKET = "data.covidactnow.org"
# By default require 0.95 of populations from regions to include a data point in aggregate.
DEFAULT_REPORTING_RATIO = 0.95
_logger = logging.getLogger(__name__)
@click.group("data")
def main():
pass
@main.command()
@click.option("--filename", default="external_forecasts.csv")
def update_forecasts(filename):
"""Updates external forecasts to the current checked out covid data public commit"""
path_prefix = dataset_utils.DATA_DIRECTORY.relative_to(dataset_utils.REPO_ROOT)
data_root = dataset_utils.LOCAL_PUBLIC_DATA_PATH
data_path = forecast_hub.ForecastHubDataset.COMMON_DF_CSV_PATH
shutil.copy(data_root / data_path, path_prefix / filename)
_logger.info(f"Updating External Forecasts at {path_prefix / filename}")
@main.command()
@click.option(
"--aggregate-to-country/--no-aggregate-to-country",
is_flag=True,
help="Aggregate states to one USA country region",
default=False,
)
@click.option("--state", type=str, help="For testing, a two letter state abbr")
@click.option("--fips", type=str, help="For testing, a 5 digit county fips")
def update(aggregate_to_country: bool, state: Optional[str], fips: Optional[str]):
"""Updates latest and timeseries datasets to the current checked out covid data public commit"""
path_prefix = dataset_utils.DATA_DIRECTORY.relative_to(dataset_utils.REPO_ROOT)
timeseries_field_datasets = load_datasets_by_field(
ALL_TIMESERIES_FEATURE_DEFINITION, state=state, fips=fips
)
static_field_datasets = load_datasets_by_field(
ALL_FIELDS_FEATURE_DEFINITION, state=state, fips=fips
)
multiregion_dataset = timeseries.combined_datasets(
timeseries_field_datasets, static_field_datasets
)
# Filter for stalled cumulative values before deriving NEW_CASES from CASES.
_, multiregion_dataset = TailFilter.run(multiregion_dataset, CUMULATIVE_FIELDS_TO_FILTER,)
multiregion_dataset = zeros_filter.drop_all_zero_timeseries(
multiregion_dataset,
[
CommonFields.VACCINES_DISTRIBUTED,
CommonFields.VACCINES_ADMINISTERED,
CommonFields.VACCINATIONS_COMPLETED,
CommonFields.VACCINATIONS_INITIATED,
],
)
multiregion_dataset = timeseries.add_new_cases(multiregion_dataset)
multiregion_dataset = timeseries.drop_new_case_outliers(multiregion_dataset)
multiregion_dataset = timeseries.backfill_vaccination_initiated(multiregion_dataset)
multiregion_dataset = timeseries.drop_regions_without_population(
multiregion_dataset, KNOWN_LOCATION_ID_WITHOUT_POPULATION, structlog.get_logger()
)
multiregion_dataset = timeseries.aggregate_puerto_rico_from_counties(multiregion_dataset)
multiregion_dataset = custom_aggregations.aggregate_to_new_york_city(multiregion_dataset)
multiregion_dataset = custom_aggregations.replace_dc_county_with_state_data(multiregion_dataset)
aggregator = statistical_areas.CountyToCBSAAggregator.from_local_public_data()
cbsa_dataset = aggregator.aggregate(
multiregion_dataset, reporting_ratio_required_to_aggregate=DEFAULT_REPORTING_RATIO
)
multiregion_dataset = multiregion_dataset.append_regions(cbsa_dataset)
if aggregate_to_country:
country_dataset = timeseries.aggregate_regions(
multiregion_dataset,
pipeline.us_states_to_country_map(),
reporting_ratio_required_to_aggregate=DEFAULT_REPORTING_RATIO,
)
multiregion_dataset = multiregion_dataset.append_regions(country_dataset)
combined_dataset_utils.persist_dataset(multiregion_dataset, path_prefix)
@main.command()
@click.argument("output_path", type=pathlib.Path)
def aggregate_cbsa(output_path: pathlib.Path):
us_timeseries = combined_datasets.load_us_timeseries_dataset()
aggregator = statistical_areas.CountyToCBSAAggregator.from_local_public_data()
cbsa_dataset = aggregator.aggregate(us_timeseries)
cbsa_dataset.to_csv(output_path)
@main.command()
@click.argument("output_path", type=pathlib.Path)
def aggregate_states_to_country(output_path: pathlib.Path):
us_timeseries = combined_datasets.load_us_timeseries_dataset()
country_dataset = timeseries.aggregate_regions(
us_timeseries, pipeline.us_states_to_country_map(),
)
country_dataset.to_csv(output_path)
KNOWN_LOCATION_ID_WITHOUT_POPULATION = [
# Territories other than PR
"iso1:us#iso2:us-vi",
"iso1:us#iso2:us-as",
"iso1:us#iso2:us-gu",
# Subregion of AS
"iso1:us#iso2:us-vi#fips:78030",
"iso1:us#iso2:us-vi#fips:78020",
"iso1:us#iso2:us-vi#fips:78010",
# Retired FIPS
"iso1:us#iso2:us-sd#fips:46113",
"iso1:us#iso2:us-va#fips:51515",
# All the unknown county FIPS
*[pipeline.fips_to_location_id(f) for f in ABBREV_US_UNKNOWN_COUNTY_FIPS.values()],
]
@main.command()
@click.argument("output_path", type=pathlib.Path)
def run_population_filter(output_path: pathlib.Path):
us_timeseries = combined_datasets.load_us_timeseries_dataset()
log = structlog.get_logger()
log.info("starting filter")
ts_out = timeseries.drop_regions_without_population(
us_timeseries, KNOWN_LOCATION_ID_WITHOUT_POPULATION, log
)
ts_out.to_csv(output_path)
@main.command()
@click.argument("output_path", type=pathlib.Path)
def run_bad_tails_filter(output_path: pathlib.Path):
us_dataset = combined_datasets.load_us_timeseries_dataset()
log = structlog.get_logger()
log.info("Starting filter")
_, dataset_out = TailFilter.run(us_dataset, CUMULATIVE_FIELDS_TO_FILTER)
log.info("Writing output")
dataset_out.timeseries_rows().to_csv(output_path, index=True, float_format="%.05g")
@main.command()
@click.option("--name", envvar="DATA_AVAILABILITY_SHEET_NAME", default="Data Availability - Dev")
@click.option("--share-email")
def update_availability_report(name: str, share_email: Optional[str]):
from libs.qa import data_availability
sheet = google_sheet_helpers.open_or_create_spreadsheet(name, share_email=share_email)
info_worksheet = google_sheet_helpers.update_info_sheet(sheet)
data_sources_by_source_name = data_availability.load_all_latest_sources()
for name, dataset in data_sources_by_source_name.items():
_logger.info(f"Updating {name}")
report = data_availability.build_data_availability_report(dataset)
data_availability.update_multi_field_availability_report(
sheet, report, name, columns_to_drop=["source", "fips"]
)
# Reorder sheets with combined data first and metadata last
COLUMN_ORDER_OVERRIDE = {data_availability.COMBINED_DATA_KEY: -5, info_worksheet.title: 5}
worksheets = sheet.worksheets()
worksheets = sorted(worksheets, key=lambda x: (COLUMN_ORDER_OVERRIDE.get(x.title, 0), x.title))
sheet.reorder_worksheets(worksheets)
_logger.info("Finished updating data availability report")
@main.command()
def update_case_based_icu_utilization_weights():
"""
Calculate the updated States to Counties disaggregation weights and save to disk. These
weights are used to estimate county level ICU heads-in-beds as an input for the ICU Utilization
metric.
The output is callable with county aggregation-level fips keys and returns a normalized [0,1]
value such that the weights for all counties in a given state sum to unity.
"""
output_path = os.path.join(DATA_DIR, infer_icu.ICUWeightsPath.ONE_MONTH_TRAILING_CASES.value)
output = pyseir.icu.utils.calculate_case_based_weights()
_logger.info(f"Saved case-based ICU Utilization weights to {output_path}")
with open(output_path, "w") as f:
json.dump(output, f, indent=2, sort_keys=True)
def load_datasets_by_field(
feature_definition_config: combined_datasets.FeatureDataSourceMap, *, state, fips
) -> Mapping[FieldName, List[timeseries.MultiRegionDataset]]:
def _load_dataset(data_source_cls) -> timeseries.MultiRegionDataset:
dataset = data_source_cls.make_dataset()
if state or fips:
dataset = dataset.get_subset(state=state, fips=fips)
return dataset
feature_definition = {
# Put the highest priority first, as expected by timeseries.combined_datasets.
# TODO(tom): reverse the hard-coded FeatureDataSourceMap and remove the reversed call.
field_name: list(reversed(list(_load_dataset(cls) for cls in classes)))
for field_name, classes in feature_definition_config.items()
if classes
}
return feature_definition
|
# Copyright 2014 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import re
import time
from cros.factory.device import device_types
from cros.factory.device import sensor_utils
from cros.factory.utils import process_utils
_GRAVITY = 9.80665
class AccelerometerException(Exception):
pass
class AccelerometerController(sensor_utils.BasicSensorController):
"""Utility class for the two accelerometers.
Attributes:
name: the name of the accelerometer, e.g., 'cros-ec-accel', or None.
This will be used to lookup a matched name in
/sys/bus/iio/devices/iio:deviceX/name to get
the corresponding iio:deviceX.
At least one of name or location must present.
location: the location of the accelerometer, e.g., 'base' or 'lid', or
None. This will be used to lookup a matched location in
/sys/bus/iio/devices/iio:deviceX/location to get
the corresponding iio:deviceX.
At least one of name or location must present.
Raises:
Raises AccelerometerException if there is no accelerometer.
"""
def __init__(self, board, name, location):
"""Cleans up previous calibration values and stores the scan order.
We can get raw data from below sysfs:
/sys/bus/iio/devices/iio:deviceX/in_accel_(x|y|z)_raw.
However, there is no guarantee that the data will have been sampled
at the same time. So we use `iioservice_simpleclient` to query the
sensor data.
"""
super(AccelerometerController, self).__init__(
board, name, location, ['in_accel_x', 'in_accel_y', 'in_accel_z'],
scale=True)
self.location = location
def CleanUpCalibrationValues(self):
"""Clean up calibration values.
The sysfs trigger only captures calibrated input values, so we reset
the calibration to allow reading raw data from a trigger.
"""
for signal_name in self.signal_names:
self._SetSysfsValue('%s_calibbias' % signal_name, '0')
def GetData(self, capture_count: int = 1, sample_rate: float = None):
"""Returns average values of the sensor data.
Use `iioservice_simpleclient` to capture the sensor data.
Args:
capture_count: how many records to read to compute the average.
sample_rate: sample rate in Hz to read data from accelerometers. If it is
None, set to the maximum frequency.
Returns:
A dict of the format {'signal_name': average value}
The output data is in m/s^2.
Ex, {'in_accel_x': 0,
'in_accel_y': 0,
'in_accel_z': 9.8}
Raises:
Raises AccelerometerException if there is no calibration
value in VPD.
"""
# Initializes the returned dict.
ret = {signal_name: 0.0 for signal_name in self.signal_names}
def ToChannelName(signal_name):
"""Transform the signal names (in_accel_(x|y|z)) to the channel names used
in iioservice (accel_(x|y|z))."""
return signal_name[3:] if signal_name.startswith('in_') else signal_name
iioservice_channels = [
ToChannelName(signal_name) for signal_name in self.signal_names
]
# We only test `iioservice_simpleclient` with maximum frequency in
# sensor_iioservice_hard.go. Use maximum frequency by default to make sure
# that our tests are using tested commands.
if sample_rate is None:
frequencies = self.GetSamplingFrequencies()
sample_rate = frequencies[1]
iioservice_cmd = [
'iioservice_simpleclient',
'--channels=%s' % ' '.join(iioservice_channels),
'--frequency=%f' % sample_rate,
'--device_id=%d' % int(self._GetSysfsValue('dev').split(':')[1]),
'--samples=1'
]
logging.info('iioservice_simpleclient command: %r', iioservice_cmd)
# Reads the captured data.
data_captured = 0
while data_captured < capture_count:
time.sleep(1 / sample_rate)
proc = process_utils.CheckCall(iioservice_cmd, read_stderr=True)
for signal_name in self.signal_names:
channel_name = ToChannelName(signal_name)
match = re.search(r'(?<={}: )-?\d+'.format(channel_name),
proc.stderr_data)
if not match:
logging.error(
'Failed to read channel "%s" from iioservice_simpleclient. '
'stderr:\n%s', channel_name, proc.stderr_data)
raise AccelerometerException
ret[signal_name] += int(match[0])
logging.info('(%d) Getting data on channel %s: %d', data_captured,
channel_name, int(match[0]))
data_captured += 1
# Calculates average value and convert to SI unit.
for signal_name in ret:
ret[signal_name] = (ret[signal_name] / capture_count * self.scale)
logging.info('Average of %d data: %s', capture_count, ret)
return ret
@staticmethod
def IsWithinOffsetRange(data, orientations, spec_offset):
"""Checks whether the value of sensor data is within the spec or not.
It is used before calibration to filter out abnormal accelerometers.
Args:
data: a dict containing digital output for each signal, in m/s^2.
Ex, {'in_accel_x': 0,
'in_accel_y': 0,
'in_accel_z': 9.8}
orientations: a dict indicating the orentation in gravity
(either 0 or -/+1) of the signal.
Ex, {'in_accel_x': 0,
'in_accel_y': 0,
'in_accel_z': 1}
spec_offset: a tuple of two integers, ex: (0.5, 0.5) indicating the
tolerance for the digital output of sensors under zero gravity and
one gravity, respectively.
Returns:
True if the data is within the tolerance of the spec.
"""
for signal_name in data:
value = data[signal_name]
orientation = orientations[signal_name]
# Check the sign of the value for -/+1G orientation.
if orientation and orientation * value < 0:
logging.error('The orientation of %s is wrong.', signal_name)
return False
# Check the abs value is within the range of -/+ offset.
index = abs(orientation)
ideal_value = _GRAVITY * orientation
if abs(value - ideal_value) > spec_offset[index]:
logging.error('Signal %s out of range: %f', signal_name, value)
return False
return True
def CalculateCalibrationBias(self, data, orientations):
# Calculating calibration data.
calib_bias = {}
for signal_name in data:
ideal_value = _GRAVITY * orientations[signal_name]
current_calib_bias = (
int(self._GetSysfsValue('%s_calibbias' % signal_name))
* _GRAVITY / 1024)
# Calculate the difference between the ideal value and actual value
# then store it into _calibbias. In release image, the raw data will
# be adjusted by _calibbias to generate the 'post-calibrated' values.
calib_bias[signal_name + '_' + self.location + '_calibbias'] = (
ideal_value - data[signal_name] + current_calib_bias)
return calib_bias
def UpdateCalibrationBias(self, calib_bias):
"""Update calibration bias to RO_VPD
Args:
A dict of calibration bias, in m/s^2.
Ex, {'in_accel_x_base_calibbias': 0.1,
'in_accel_y_base_calibbias': -0.2,
'in_accel_z_base_calibbias': 0.3}
"""
# Writes the calibration results into ro vpd.
# The data is converted to 1/1024G unit before writing.
logging.info('Calibration results: %s.', calib_bias)
scaled = {k: str(int(v * 1024 / _GRAVITY)) for k, v in calib_bias.items()}
self._device.vpd.ro.Update(scaled)
mapping = []
for signal_name in self.signal_names:
mapping.append(('%s_%s_calibbias' % (signal_name, self.location),
'%s_calibbias' % signal_name))
for vpd_entry, sysfs_entry in mapping:
self._SetSysfsValue(sysfs_entry, scaled[vpd_entry])
class Accelerometer(device_types.DeviceComponent):
"""Accelerometer component module."""
def GetController(self, location):
"""Gets a controller with specified arguments.
See AccelerometerController for more information.
"""
return AccelerometerController(self._device, 'cros-ec-accel', location)
|
<filename>algorithm/BrainAC.py
import numpy as np
import tensorflow as tf
import copy
np.random.seed(1)
tf.set_random_seed(1)
class ActorCritic():
def __init__(
self,
n_actions=2,
n_features=87,
LR_A = 0.001, # learning rate for actor
LR_C = 0.01, # learning rate for critic
reward_decay=0.95,
prob_clip=0.06,
output_graph=False,
):
#动作空间的维数
self.n_actions = n_actions
#状态特征的维数
self.n_features = n_features
#回报衰减率
self.gamma = reward_decay
#一条轨迹的观测值,动作值,和回报值
self.ep_obs, self.ep_as, self.ep_rs = [],[],[]
self.ep_length = []
# 启动一个默认的会话
self.sess = tf.Session()
#创建策略网络
# self._build_net()
self.actor = Actor(self.sess, n_features=n_features, n_actions=n_actions, lr=LR_A)
self.critic = Critic(self.sess, n_features=n_features, lr=LR_C)#据说critic作为评价网络,学习率要大于actor
self.prob_clip = prob_clip
# if output_graph:
# tf.summary.FileWriter("logs/", self.sess.graph)
# 初始化会话中的变量
self.sess.run(tf.global_variables_initializer())
def choose_action(self, state, length):
stateP = self.statePreprocess(state)
return self.actor.choose_action(stateP, length)
def criticLearn(self, state, reward, state_):
stateP = self.statePreprocess(state)
state_P = self.statePreprocess(state_)
return self.critic.learn(stateP, reward, state_P)
def actorLearn(self, state, action, td_error):
stateP = self.statePreprocess(state)
return self.actor.learn(stateP, action, td_error)
def statePreprocess(self,state):
exist = np.array([n for n in state if n>0])
exist -= 1877.368
exist /= 256.61
exist = exist.tolist()
while len(exist) < 87:
exist.append(0)
# print("STATE:", self.state,"EXIST:",exist)
return exist
class Actor(object):
def __init__(self, sess, n_features, n_actions, lr=0.001):
self.sess = sess
self.s = tf.placeholder(tf.float32, [None, n_features], "state")
self.a = tf.placeholder(tf.int32, None, "act")
self.td_error = tf.placeholder(tf.float32, None, "td_error") # TD_error
with tf.variable_scope('Actor'):
l1 = tf.layers.dense(
inputs=self.s,
units=20, # number of hidden units
activation=tf.nn.relu,
kernel_initializer=tf.random_normal_initializer(0., .1), # weights
bias_initializer=tf.constant_initializer(0.1), # biases
name='l1'
)
self.acts_prob = tf.layers.dense(
inputs=l1,
units=n_actions, # output units
activation=tf.nn.softmax, # get action probabilities
kernel_initializer=tf.random_normal_initializer(0., .1), # weights
bias_initializer=tf.constant_initializer(0.1), # biases
name='acts_prob'
)
with tf.variable_scope('exp_v'):
log_prob = tf.log(tf.clip_by_value(self.acts_prob[0, self.a],0.000001,1,name=None))
self.exp_v = tf.reduce_sum(log_prob * self.td_error) # advantage (TD_error) guided loss
# self.exp_v = tf.reduce_mean(log_prob * self.td_error) # advantage (TD_error) guided loss
with tf.variable_scope('train'):
self.train_op = tf.train.AdamOptimizer(lr).minimize(-self.exp_v) # minimize(-exp_v) = maximize(exp_v)
def learn(self, s, a, td):
s = np.mat(s)
feed_dict = {self.s: s, self.a: a, self.td_error: td}
_, exp_v = self.sess.run([self.train_op, self.exp_v], feed_dict)
return exp_v
def choose_action(self, s, length):
s = np.mat(s)
probs = self.sess.run(self.acts_prob, {self.s: s}) # get probabilities for all actions
# print(probs)
action = np.random.choice(np.arange(probs.shape[1]), p=probs.ravel()) # return a int
p = probs.ravel()[0],probs.ravel()[1]
print(p)
return action,p
class Critic(object):
def __init__(self, sess, n_features, lr=0.01, gamma=0.9):
self.sess = sess
self.gamma = gamma
self.s = tf.placeholder(tf.float32, [None, n_features], "state")
self.v_ = tf.placeholder(tf.float32, None, "v_next")
self.r = tf.placeholder(tf.float32, None, 'r')
with tf.variable_scope('Critic'):
l1 = tf.layers.dense(
inputs=self.s,
units=20, # number of hidden units
activation=tf.nn.relu, # None
# have to be linear to make sure the convergence of actor.
# But linear approximator seems hardly learns the correct Q.
kernel_initializer=tf.random_normal_initializer(0., .1), # weights
bias_initializer=tf.constant_initializer(0.1), # biases
name='l1'
)
self.v = tf.layers.dense(
inputs=l1,
units=1, # output units
activation=None,
kernel_initializer=tf.random_normal_initializer(0., .1), # weights
bias_initializer=tf.constant_initializer(0.1), # biases
name='V'
)
with tf.variable_scope('squared_TD_error'):
self.td_error = self.r + self.gamma * self.v_ - self.v
self.loss = tf.square(self.td_error) # TD_error = (r+gamma*V_next) - V_eval
with tf.variable_scope('train'):
self.train_op = tf.train.AdamOptimizer(lr).minimize(self.loss)
def learn(self, s, r, s_):
if s_[0] == -1 and len(s_) == 1:
v_ = np.mat([0])
else:
s_ = np.mat(s_)
# s_ = s_[np.newaxis, :]
v_ = self.sess.run(self.v, {self.s: s_})
# print("V(S):",v_)
s = np.mat(s)
td_error, _ = self.sess.run([self.td_error, self.train_op],
{self.s: s, self.v_: v_, self.r: r})
return td_error |
# -*- coding: utf-8 -*-
import time, os
#import re, sys
from decode import decoder
from combine import csvcombine
dir_path = './' #Default path
confirm = ['true','yes','1'] #Possible responses to indicate "True"
deny = ['false','no','0'] #Possible responses to indicate "False"
try:
dir_path = os.path.dirname(os.path.realpath(__file__))+'/'
except:
print('Failed to set dir_path programmatically!\n',
'Parser works better if run from command line.\n',
'Setting dir_path to: ', dir_path)
input('Press Enter to continue...')
kwMode = False
for line in open(dir_path+'template.txt'):
# ignore hashed-out lines and blank lines
if (line[:1] !='#' and line.strip()!=''):
# try to split line into cmd, val pairs using a colon
try:
cmd = line.split(':')[0].strip().lower()
val = str(line.split(':')[1]).strip()
cmdval = True
# otherwise, treat entire line as val
except:
cmd = None
val = str(line).strip()
cmdval = False
pass
# begin parsing cmd, val pairs into working variables
if cmdval:
# set directories for input, temp, and output
if cmd == 'dir_self':
dirSelf = val
if cmd == 'dir_in':
dirIn = val
if cmd == 'dir_temp':
dirTemp = val
if cmd == 'dir_out':
dirOut = val
# hiMem setting should be based on file size and amount of working memory (i.e., RAM)
# If hiMem == True, then full JSON files will be moved to working memory for parsing
# this is faster but more memory intensive.
if cmd.lower() in ['memory', 'mem', 'himem']:
if val.lower() in ['high', 'fast'] or val in confirm:
hiMem = True
else:
hiMem = False
# set "start" and "stop" date strings to select input files to parse
# format is YYYYMMDD, inclusive
if cmd.lower() in ['start','begin', 'first']:
start = int(val)
if cmd.lower() in ['stop','end', 'last']:
end = int(val)
# set combine to dictate how output files are combined
# NEED TO DOCUMENT THIS!
if cmd == 'combine':
combine = val
if val.lower() in confirm:
combine = True
else:
combine = False
# set the CSV file to use for emoji translations
# NEED TO DOCUMENT THIS!
if 'emoj' in cmd.lower():
emojify = 1 # FIX THIS DEPENDENCY
if '.csv' in val:
emojiFile = dir_path+val
else:
emojiFile = None
# set "clear" to True, to clear temp files before decoding
# TO-DO: clear tempDir and/or outDir separately
if cmd.lower() == 'clear':
if val.lower() in confirm:
clear = True
else:
clear = False
# set "geo" to True, to retrieve only geotagged tweets
if cmd == 'geo':
if val.lower() in confirm:
geo = True
else:
geo = False
# set "test" variable (not yet implemented)
if cmd == 'test':
if val.lower() in confirm:
test = True
else:
test = False
# look for the "keywords" command (should be the last command)
# all lines after this will be treated as keyword parameters
if cmd in ['keywords','kws']:
keywords = {}
if val == '':
kwMode = True
else:
# different types of modes may come in handy later?
kwMode = val.lower()
# append all remaining lines to the keyword parameter list
# NEED TO DOCUMENT THIS!
if kwMode:
if cmd:
# keywords are not read from the initial "keywords" line
pass
else:
if val not in keywords.keys():
keywords.update({val.strip() : 0})
print(val)
else:
continue
# clear EVERYTHING from "dirTemp" and "dirOut"
if clear in ['true','1','yes','clear']:
for f in sorted(os.listdir(dirTemp)):
os.remove(dirTemp+f)
for f in sorted(os.listdir(dirOut)):
os.remove(dirOut+f)
# read data files in dirIn
files = sorted(os.listdir(dirIn))
print("\nREADING TWEETS FROM " + str(start) + ' to ' + str(end) +'\n')
t = str(time.time())
for f in files:
if f[-5:] =='.json':
#try:
if int(f[:8]) >= int(start):
if int(f[:8]) <= int(end):
d = decoder(keywords, dirIn, dirTemp, dirOut,
hiMem, emojiFile)
record = d.fixjson(dirIn, f, hiMem, emojiFile)
#d.jsontocsv(record,f,geo,emojify, count=0)
#except:
# print("Incorrect format: ",f)
# continue
if combine:
c = csvcombine(dirOut, dir_path, dirTemp)
c.combinecsv(combine, clear)
|
<filename>examples/regular_2d_mesh_tally_example.py
# A minimal example that obtains TBR on the blanket and fast neutron flux on all
# cells in the DAGMC geometry.
# Particular emphasis is placed on explaining the openmc-dagmc-wrapper
# extentions of openmc base classes.
import openmc
import openmc_dagmc_wrapper as odw
from openmc_plasma_source import FusionRingSource
# downloads a dagmc file for use in the example
# import tarfile
# import urllib.request
# url = "https://github.com/fusion-energy/neutronics_workflow/archive/refs/tags/v0.0.2.tar.gz"
# urllib.request.urlretrieve(url, "v0.0.2.tar.gz")
# tar = tarfile.open("v0.0.2.tar.gz", "r:gz")
# tar.extractall(".")
# tar.close()
h5m_filename = "neutronics_workflow-0.0.2/example_02_multi_volume_cell_tally/stage_2_output/dagmc.h5m"
# creates a geometry object from a DAGMC geometry.
# In this case the geometry doen't have a graveyard cell.
# So a set of 6 CSG surfaces are automatically made and added to the geometry
geometry = odw.Geometry(h5m_filename=h5m_filename)
# Creates the materials to use in the problem using by linking the material
# tags in the DAGMC h5m file with material definitions in the
# neutronics-material-maker. One could also use openmc.Material or nmm.Material
# objects instead of the strings used here
materials = odw.Materials(
h5m_filename=h5m_filename,
correspondence_dict={
"blanket_mat": "Li4SiO4",
"blanket_rear_wall_mat": "Be",
"center_column_shield_mat": "Be",
"divertor_mat": "Be",
"firstwall_mat": "Be",
"inboard_tf_coils_mat": "Be",
"pf_coil_case_mat": "Be",
"pf_coil_mat": "Be",
"tf_coil_mat": "Be",
},
)
# makes use of the dagmc-bound-box package to get the corners of the bounding
# box. This will be used to set the bounding box for the tally. This can be
# expanded with the expand keyword if needed
my_bounding_box = geometry.corners()
# A MeshTally2D tally allows a set of standard tally types (made from filters
# and scores) to be applied to the DAGMC geometry. By default the mesh will be
# applied across the entire geomtry with and the size of the geometry is
# automatically found.
tally1 = odw.MeshTally2D(
tally_type="photon_effective_dose",
plane="xy",
bounding_box=my_bounding_box)
tally2 = odw.MeshTally2D(
tally_type="neutron_effective_dose",
plane="xy",
bounding_box=my_bounding_box)
# no modifications are made to the default openmc.Tallies
tallies = openmc.Tallies([tally1, tally2])
# Creates and openmc settings object with the run mode set to 'fixed source'
# and the number of inactivate particles set to zero. Setting these to values
# by default means less code is needed by the user and less chance of simulating
# batches that don't contribute to the tallies
settings = odw.FusionSettings()
settings.batches = 2
settings.particles = 100
settings.photon_transport = True
# assigns a ring source of DT energy neutrons to the source using the
# openmc_plasma_source package
settings.source = FusionRingSource(fuel="DT", radius=350)
# no modifications are made to the default openmc.Model object
my_model = openmc.Model(
materials=materials, geometry=geometry, settings=settings, tallies=tallies
)
statepoint_file = my_model.run()
|
<reponame>C3BI-pasteur-fr/ReMoTE
#!/usr/bin/env python
from lxml import etree
import json
import argparse
import requests
import os.path
import getpass
from requests.packages import urllib3
ns = {'btr':'http://biotoolsregistry.org'}
HOST = 'https://bio.tools'
SSL_VERIFY = False #verify SSL certificates
def auth(login):
password = getpass.getpass()
resp = requests.post(HOST + '/api/auth/login','{"username": "%s","password": "%s"}' % (login, password), headers={'Accept':'application/json', 'Content-type':'application/json'}, verify=SSL_VERIFY).text
return json.loads(resp)['token']
def main():
# 1. Import XML files from a Mobyle server or from a folder containing XML files
# 2. Convert to BTR XML
# 3. Convert to BTR JSON
# 4. Register to Elixir BTR
parser = argparse.ArgumentParser(
description='Transform Mobyle1 XML to BTR XML and JSON')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--from_server', help="Mobyle server URI to import definitions from")
group.add_argument('--from_files', help="Mobyle XML files to import definitions from", nargs='+')
parser.add_argument('--xml_dir', help="target directory for XML files")
parser.add_argument('--login', help="registry login")
args = parser.parse_args()
if args.from_files:
filenames = args.from_files
elif args.from_server:
resp = requests.get(args.from_server+'/net_services.py')
services = json.loads(resp.text)
filenames = []
for key, value in json.loads(resp.text).items():
filenames.append(value['url'])
XSL_PATH = os.path.normpath(os.path.join(os.path.dirname(__file__),'remote.xsl'))
xslt_doc = etree.parse(XSL_PATH)
transform = etree.XSLT(xslt_doc)
params = {'mobyle_root':"'http://mobyle.pasteur.fr'",
'mobyle_contact':"'<EMAIL>'"}
if args.login:
# Disable HTTPS verification warnings from urllib3
# if setup requires it
if SSL_VERIFY==False:
urllib3.disable_warnings()
print "authenticating..."
token = auth(args.login)
print "authentication ok"
ok_cnt = 0
ko_cnt = 0
print "attempting to delete all registered services..."
resp = requests.delete(HOST + '/api/tool/%s' % args.login, headers={'Accept':'application/json', 'Content-type':'application/json', 'Authorization': 'Token %s' % token}, verify=SSL_VERIFY)
for filename in filenames:
print "processing %s..." % filename
mobyle_doc = etree.parse(filename)
xml = transform(mobyle_doc, **params)
btr_doc = xml
resource_name = filename.split('/')[-1][0:-4]
if args.xml_dir:
xml_path = os.path.join(args.xml_dir, resource_name + '.xml')
o_file = open(xml_path, 'w')
o_file.write(etree.tostring(xml, pretty_print=True))
o_file.close()
if args.login and args:
resp = requests.post(HOST + '/api/tool', etree.tostring(xml, pretty_print=True), headers={'Accept':'application/json', 'Content-type':'application/xml', 'Authorization': 'Token %s' % token}, verify=SSL_VERIFY)
if resp.status_code==201:
print "%s ok" % resource_name
ok_cnt += 1
else:
print "%s ko, error: %s" % (resource_name, resp.text)
ko_cnt += 1
if args.login:
print "import finished, ok=%s, ko=%s" % (ok_cnt, ko_cnt)
|
<gh_stars>0
import random
import numpy as np
import torch
from metrics.average_meter import AverageMeter
from metrics.average_ensemble_meter import AverageEnsembleMeter
from models.shapes_trainer import ShapesTrainer
class TrainHelper():
def __init__(self, device):
self.device = device
def train_one_batch(
self,
model: ShapesTrainer,
batch,
receiver_optimizer,
meta_data,
device,
inference_step,
multi_task,
zero_shot,
disabled_properties,
sender_optimizer):
"""
Train for single batch
"""
model.train()
receiver_optimizer.zero_grad()
target, distractors, indices, md5 = batch
if inference_step or multi_task:
md = torch.tensor(meta_data[indices[:,0], :], device=device, dtype=torch.int64)
elif zero_shot:
md = md5
else:
md = None
loss, disabled_loss, losses, accuracies, _, _, _ = model.forward(target, distractors, md)
loss.backward(retain_graph=True)
receiver_optimizer.step()
if sender_optimizer:
sender_optimizer.zero_grad()
disabled_loss.backward()
sender_optimizer.step()
return losses, accuracies
def evaluate(self, model, dataloader, valid_meta_data, device, inference_step, multi_task, step3, property_one, property_two, zero_shot):
if multi_task:
loss_meter = [AverageEnsembleMeter(5), AverageMeter()]
acc_meter = [AverageEnsembleMeter(5), AverageMeter()]
elif inference_step or step3:
loss_meter = AverageEnsembleMeter(5)
acc_meter = AverageEnsembleMeter(5)
else:
loss_meter = AverageMeter()
acc_meter = AverageMeter()
messages = []
model.eval()
hidden_sender_parameters, hidden_receiver_parameters = [], []
for batch in dataloader:
# note step3 takes lkey, but for zeroshot this is vmd5, thus step3 and zeroshot can't be combined
target, distractors, indices, vmd5 = batch
if inference_step or multi_task:
if zero_shot:
vmd = vmd5
else:
vmd = torch.tensor(valid_meta_data[indices[:, 0], :], device=device, dtype=torch.int64)
else:
vmd = None
_, _, loss2, acc, msg, h_s, h_r = model.forward(target, distractors, vmd)
hidden_sender_parameters.append(h_s.detach().cpu().numpy())
hidden_receiver_parameters.append(h_r.detach().cpu().numpy())
if multi_task:
loss_meter[0].update(loss2[0].detach().numpy())
loss_meter[1].update(loss2[1])
acc_meter[0].update(acc[0].detach().numpy())
acc_meter[1].update(acc[1])
elif step3:
# Note that for the RANDOM step3 dict, lkey is just a random integer
# Thus its prediction of the classes are average accuracies over smaller sets
lkey = torch.tensor(list(map(int, lkey)))
lkey_stack = torch.stack([lkey == 0, lkey == 1, lkey == 2, lkey == 3, lkey == 4])
acc = (torch.sum(lkey_stack.cpu().float() * acc.cpu().float(), dim=1)/torch.sum(lkey_stack.cpu().float(),dim=1)).numpy()
loss2 = (torch.sum(lkey_stack.cpu().float() * loss2.cpu().float(), dim=1)/torch.sum(lkey_stack.cpu().float(),dim=1)).detach().numpy()
acc[np.isnan(acc)] = 0
loss2[np.isnan(loss2)] = 0
loss_meter.update(loss2)
acc_meter.update(acc)
elif inference_step:
loss_meter.update(loss2.detach().numpy())
acc_meter.update(acc.detach().numpy())
else:
loss_meter.update(loss2)
acc_meter.update(acc)
messages.append(msg)
hidden_sender_parameters = np.concatenate(hidden_sender_parameters)
hidden_receiver_parameters = np.concatenate(hidden_receiver_parameters)
return (
loss_meter,
acc_meter,
torch.cat(messages, 0),
hidden_sender_parameters,
hidden_receiver_parameters
)
def get_filename_from_baseline_params(self, params):
"""
Generates a filename from baseline params (see baseline.py)
"""
if params.name:
return params.name
name = params.dataset_type
name += "_e_{}".format(params.embedding_size)
name += "_h_{}".format(params.hidden_size)
name += "_lr_{}".format(params.lr)
name += "_max_len_{}".format(params.max_length)
name += "_k_{}".format(params.k)
name += "_vocab_{}".format(params.vocab_size)
name += "_seed_{}".format(params.seed)
name += "_btch_size_{}".format(params.batch_size)
if params.single_model:
name += "_single_model"
if params.greedy:
name += "_greedy"
if params.debugging:
name += "_debug"
if params.sender_path or params.receiver_path:
name += "_loaded_from_path"
if params.inference_step:
name += "_inference"
if params.step3:
name += "_step3"
if params.multi_task:
name += "_multi"
if params.multi_task_lambda:
name += f'_lambda_{params.multi_task_lambda}'
if params.disabled_properties:
name += "_disabled"
for image_property in params.disabled_properties:
name+= f'_{int(image_property)}'
return name
def seed_torch(self, seed=42):
"""
Seed random, numpy and torch with same seed
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if self.device.type == 'cuda':
torch.cuda.manual_seed(seed) |
<gh_stars>0
#!/usr/bin/env python
"""
.. py:currentmodule:: experimental.nanopico.test_LogFile
.. moduleauthor:: <NAME> <<EMAIL>>
description
"""
# Script information for the file.
__author__ = "<NAME> (<EMAIL>)"
__version__ = ""
__date__ = ""
__copyright__ = "Copyright (c) 2012 <NAME>"
__license__ = ""
# Standard library modules.
import unittest
import logging
import os.path
import datetime
# Third party modules.
from nose import SkipTest
# Local modules.
# Project modules
import pyprobecurrent.nanopico.LogFile as LogFile
from pyprobecurrent import get_current_module_path
# Globals and constants variables.
class TestLogFile(unittest.TestCase):
"""
TestCase class for the module `LogFile`.
"""
def setUp(self):
"""
Setup method.
"""
unittest.TestCase.setUp(self)
self._dataPath = get_current_module_path(__file__, "../../testData/nanopico")
if not os.path.isdir(self._dataPath):
raise SkipTest
def tearDown(self):
"""
Teardown method.
"""
unittest.TestCase.tearDown(self)
def testSkeleton(self):
"""
First test to check if the testcase is working with the testing framework.
"""
#self.fail("Test if the testcase is working.")
self.assertTrue(True)
def test_read(self):
"""
Tests for method `read`.
"""
filepath = os.path.join(self._dataPath, "testCurrent.txt")
logFile = LogFile.LogFile(filepath)
logFile._read(filepath)
self.assertEqual(76, logFile.numberPoints)
#self.fail("Test if the testcase is working.")
def test__extractStartTime(self):
"""
Tests for method `_extractStartTime`.
"""
line = "Logging started at 3/27/2012 8:16:32 PM"
dateTimeRef = datetime.datetime(2012, 3, 27, 20, 16, 32)
logFile = LogFile.LogFile(None)
startDateTime = logFile._extractStartTime(line)
self.assertEqual(dateTimeRef, startDateTime)
#self.fail("Test if the testcase is working.")
def test_multipledays(self):
"""
Tests for method `_extractStartTime`.
"""
line = "Logging started at 3/27/2012 8:16:32 PM"
logFile = LogFile.LogFile(None)
logFile._readInit(line)
# 3/27/2012
line = "8:16:33 PM"
currentDateTimeRef = 1.0
currentDateTime = logFile._extractDateTime(line)
self.assertEqual(currentDateTimeRef, currentDateTime)
# 3/27/2012
line = "11:59:59 PM"
currentDateTimeRef = 27.0 + 43.0*60.0 + 3.0*60.0*60.0
currentDateTime = logFile._extractDateTime(line)
self.assertEqual(currentDateTimeRef, currentDateTime)
# 3/28/2012
line = "8:16:32 AM"
currentDateTimeRef = 0.0 + 0.0*60.0 + 12.0*60.0*60.0
currentDateTime = logFile._extractDateTime(line)
self.assertEqual(currentDateTimeRef, currentDateTime)
# 3/28/2012
line = "8:16:33 AM"
currentDateTimeRef = 1.0 + 0.0*60.0 + 12.0*60.0*60.0
currentDateTime = logFile._extractDateTime(line)
self.assertEqual(currentDateTimeRef, currentDateTime)
# 3/28/2012
line = "8:16:31 PM"
currentDateTimeRef = 59.0 + 59.0*60.0 + 23.0*60.0*60.0
currentDateTime = logFile._extractDateTime(line)
self.assertEqual(currentDateTimeRef, currentDateTime)
# 3/28/2012
line = "8:16:32 PM"
currentDateTimeRef = 0.0 + 0.0*60.0 + 24.0*60.0*60.0
currentDateTime = logFile._extractDateTime(line)
self.assertEqual(currentDateTimeRef, currentDateTime)
# 3/28/2012
line = "8:16:33 PM"
currentDateTimeRef = 1.0 + 0.0*60.0 + 24.0*60.0*60.0
currentDateTime = logFile._extractDateTime(line)
self.assertEqual(currentDateTimeRef, currentDateTime)
# 3/28/2012
line = "8:16:31 PM"
currentDateTimeRef = 59.0 + 59.0*60.0 + 47.0*60.0*60.0
currentDateTime = logFile._extractDateTime(line)
self.assertEqual(currentDateTimeRef, currentDateTime)
# 3/28/2012
line = "8:16:32 PM"
currentDateTimeRef = 0.0 + 0.0*60.0 + 48.0*60.0*60.0
currentDateTime = logFile._extractDateTime(line)
self.assertEqual(currentDateTimeRef, currentDateTime)
# 3/28/2012
line = "8:16:33 PM"
currentDateTimeRef = 1.0 + 0.0*60.0 + 48.0*60.0*60.0
currentDateTime = logFile._extractDateTime(line)
self.assertEqual(currentDateTimeRef, currentDateTime)
#self.fail("Test if the testcase is working.")
if __name__ == '__main__': #pragma: no cover
import nose
import sys
argv = sys.argv
argv.append("--cover-package=pyMcGill.experimental.nanopico.LogFile")
nose.runmodule(argv=argv)
|
#!/home/alus_soft/matt_EMAN2/bin/python
####################################################################################################
from EMAN2 import *
import mrcfile
import matplotlib.pyplot as plt
import numpy as np
from tqdm import tqdm
####################################################################################################
####################################################################################################
# Load the roughly aligned 3D volumes
rightFil_vol=EMData()
#rightFil_vol.read_image('rightFil_box448_matched_to2Dclass.mrc')
rightFil_vol.read_image('rightFil_currBest_it0_locSearch_20.mrc')
leftFil_vol=EMData()
#leftFil_vol.read_image('leftFil_box448_matched_to2Dclass.mrc')
leftFil_vol.read_image('leftFil_currBest_it0_locSearch_09.mrc')
#display 2D average to make sure it is correct
class_avg = EMData()
#class_avg.read_image('run_it040_classes_corrPxSize.mrcs', 0)
class_avg.read_image('refined_2Dclass_corrPxSize.mrc')
class_avg = class_avg.process('mask.ringmean', {'outer_radius':(350.0)/2.0})
####################################################################################################
# This function takes two 3D volumes as inputs. It adds them and projects them, then it masks the
# projection and compares to the 2D class average reference
def compute_loss_2Fils(fil_1, fil_2, class_avg):
proj = (fil_1+fil_2).project('standard',Transform())
proj_masked = proj.process('mask.ringmean', {'outer_radius':(350.0)/2.0})
proj.process('normalize.toimage',{'to':proj_masked})
#display([class_avg, proj_masked, proj])
return(class_avg.cmp('ccc',proj_masked))
def save_projection(fil_1, fil_2, class_avg, file_name):
proj = (fil_1+fil_2).project('standard',Transform())
proj_masked = proj.process('mask.ringmean', {'outer_radius':(350.0)/2.0})
proj_masked_np = EMNumPy.em2numpy(proj_masked)
class_avg_np = EMNumPy.em2numpy(class_avg)
#display([class_avg, proj_masked])
with mrcfile.new(file_name,overwrite=True) as mrc:
mrc.set_data(np.asarray([class_avg_np,proj_masked_np]))
return
# This function takes a set of projections of 1 of the 2 filaments and adds the projection of the other
# filament's volume. This composite projection is then masked and compared to the 2D class average reference
def compute_losses(projected_fils, fil_2_vol, class_avg):
cccs = []
proj_fil2 = fil_2_vol.project('standard',Transform())
print('Computing cross-correlational coefficient between projections and 2D class average')
for i in tqdm(range(0, len(projected_fils))):
proj = projected_fils[i] + proj_fil2
proj_masked = proj.process('mask.ringmean', {'outer_radius':(350.0)/2.0})
proj.process('normalize.toimage',{'to':proj_masked})
cccs.append(class_avg.cmp('ccc',proj_masked))
return cccs
# This function generates a set of rotated and translated projection images of a given filament with
# defined step sizes
def sample_orientations(fil, ang_step, step_size, fil_ID):
print('Generating set of rotated and translated projection images of the '+ fil_ID +' filament.')
print('An angular step size of ' + str(ang_step) + ' degrees and a translational step size of ' +
str(step_size) +' pixels will be used')
ts = []; fil_copies = []; projs = []; projs_trans = []; trans_holder = []
print('\tCopying filament to array and generating 3D transform objects')
for i in range(-1,2):
for j in range(-1,2):
for k in range(-1,2):
ts.append(Transform({'type':'eman', 'alt':ang_step*i, 'az':ang_step*j, 'phi':ang_step*k}))
fil_copies.append(fil.copy())
print('\tApplying transformations to filament copies')
for i in range(0,len(ts)):
fil_copies[i].transform(ts[i])
print('\tProjecting ' + str(len(ts)) + ' maps')
for i in range(0, len(fil_copies)):
projs.append(fil_copies[i].project('standard', Transform()))
print('\tTranslating projections')
for i in range(0,len(projs)):
for j in range(-5,6):
for k in range(-5,6):
# Handle transformation parameters to know which params gave best result
trans_params = {'tx':step_size*j, 'ty':step_size*k}
full_trans_params = copy.deepcopy(ts[i])#.set_params(trans_params)
full_trans_params.set_params(trans_params)
trans_holder.append(full_trans_params)
# Now, do xy translations to the projections and store
temp_proj = projs[i].copy()
temp_proj.transform(Transform(trans_params))
projs_trans.append(temp_proj)
return projs_trans, trans_holder
####################################################################################################
# Define some metaparameters, and initialize some values
curr_loss = 0; keep_going = True; ANG_STEP = 4; TRANS_STEP = 2.0; best_loss = 0
currBest_leftFil_vol = leftFil_vol.copy(); currBest_rightFil_vol = rightFil_vol.copy()
curr_it = 0
print('The beginning loss is: ' + str(compute_loss_2Fils(currBest_leftFil_vol, currBest_rightFil_vol, class_avg)))
save_projection(currBest_leftFil_vol, currBest_rightFil_vol, class_avg, 'locSearch_refinedRef_initial.mrcs')
# Run the functions defined above iteratively
while(keep_going):
left_fil_copies, trans_params = sample_orientations(currBest_leftFil_vol, ANG_STEP, TRANS_STEP, 'left')
cccs = compute_losses(left_fil_copies, currBest_rightFil_vol, class_avg)
updated_trans = trans_params[np.argmin(cccs)]
print('The transformation to apply to the current best filament orientation is: ')
print(updated_trans)
currBest_leftFil_vol = currBest_leftFil_vol.copy()
currBest_leftFil_vol.transform(updated_trans)
curr_loss = compute_loss_2Fils(currBest_leftFil_vol, currBest_rightFil_vol, class_avg)
print('The loss from the current iteration is: ' + str(curr_loss))
if(curr_loss+0.0005 < best_loss):
best_loss = curr_loss
elif(TRANS_STEP>1.0):
TRANS_STEP = TRANS_STEP/2.0
print('Decreasing translational step size to: ' + str(TRANS_STEP))
elif(ANG_STEP>0.5):
ANG_STEP = ANG_STEP/2.0
#TRANS_STEP = TRANS_STEP/2.0
print('Decreasing angular step size to: ' + str(ANG_STEP))
elif(TRANS_STEP>0.5):
TRANS_STEP = TRANS_STEP/2.0
print('Decreasing translational step size to: ' + str(TRANS_STEP))
else:
keep_going = False
print('Converged for the left filament. Now updating the right filament')
save_projection(currBest_leftFil_vol, currBest_rightFil_vol, class_avg, 'locSearch_refinedRef_leftFil_'+str(curr_it).zfill(2)+'.mrcs')
currBest_leftFil_vol.write_image( 'leftFil_currBest_it1_locSearch_'+str(curr_it).zfill(2)+'.mrc')
curr_it = curr_it+1
print('The best loss after fitting the first filament is: ' + str(compute_loss_2Fils(currBest_leftFil_vol, currBest_rightFil_vol, class_avg)))
#save_projection(currBest_leftFil_vol, currBest_rightFil_vol, class_avg, 'locSearch_leftFil_0.mrcs')
# Switch to the right filament
keep_going = True; ANG_STEP = 4; TRANS_STEP = 2.0;
while(keep_going):
right_fil_copies, trans_params = sample_orientations(currBest_rightFil_vol, ANG_STEP, TRANS_STEP, 'right')
cccs = compute_losses(right_fil_copies, currBest_leftFil_vol, class_avg)
updated_trans = trans_params[np.argmin(cccs)]
print('The transformation to apply to the current best filament orientation is: ')
print(updated_trans)
currBest_rightFil_vol = currBest_rightFil_vol.copy()
currBest_rightFil_vol.transform(updated_trans)
curr_loss = compute_loss_2Fils(currBest_rightFil_vol, currBest_leftFil_vol, class_avg)
print('The loss from the current iteration is: ' + str(curr_loss))
if(curr_loss+0.0005 < best_loss):
best_loss = curr_loss
elif(TRANS_STEP>1.0):
TRANS_STEP = TRANS_STEP/2.0
print('Decreasing translational step size to: ' + str(TRANS_STEP))
elif(ANG_STEP>0.5):
ANG_STEP = ANG_STEP/2.0
#TRANS_STEP = TRANS_STEP/2.0
print('Decreasing angular step size to: ' + str(ANG_STEP))
elif(TRANS_STEP>0.5):
TRANS_STEP = TRANS_STEP/2.0
print('Decreasing translational step size to: ' + str(TRANS_STEP))
else:
keep_going = False
print('Converged for the current filament')
save_projection(currBest_leftFil_vol, currBest_rightFil_vol, class_avg, 'locSearch_refinedRef_rightFil_'+str(curr_it).zfill(2)+'.mrcs')
currBest_rightFil_vol.write_image('rightFil_currBest_it1_locSearch_'+str(curr_it).zfill(2)+'.mrc')
curr_it = curr_it+1
print('The best loss after fitting the second filament is: ' + str(compute_loss_2Fils(currBest_leftFil_vol, currBest_rightFil_vol, class_avg)))
#save_projection(currBest_leftFil_vol, currBest_rightFil_vol, class_avg, 'locSearch_rightFil_0.mrcs')
"""
left_fil_copies, trans_params = sample_orientations(leftFil_vol, ANG_STEP, TRANS_STEP, 'left')
cccs = compute_losses(left_fil_copies, rightFil_vol, class_avg)
updated_trans = trans_params[np.argmin(cccs)]
currBest_leftFil_vol = leftFil_vol.copy()
currBest_leftFil_vol.transform(updated_trans)
# Verify results are improving
compute_loss_2Fils(leftFil_vol, rightFil_vol, class_avg)
curr_loss = compute_loss_2Fils(currBest_leftFil_vol, rightFil_vol, class_avg)
print('The loss from the current iteration is: ' + str(curr_loss))
if(curr_loss+0.00001 < best_loss):
best_loss = curr_loss
elif(ANG_STEP>0.5):
ANG_STEP = ANG_STEP/2.0
#TRANS_STEP = TRANS_STEP/2.0
print('Decreasing angular step size to: ' + str(ANG_STEP))
else:
keep_going = False
"""
|
<filename>train.py
from keras.preprocessing import sequence
import nltk
import numpy as np
import pandas as pd
import os
import random
import tensorflow as tf
from config.msrvtt_config import *
from nets.obj_feat_att_net import VideoCaptionGenerator
from utils.msrvtt_utils import get_video_data, preProBuildWordVocab
def train(prev_model_path=None):
captions = get_video_data(video_data_path_train, video_feat_path_train)
wordtoix, ixtoword, bias_init_vector = \
preProBuildWordVocab(captions, word_count_threshold=3)
np.save('./data/ixtoword', ixtoword)
model = VideoCaptionGenerator(
dim_image=dim_image,
n_words=len(wordtoix),
dim_embed=dim_embed,
dim_hidden=dim_hidden,
batch_size=batch_size,
dim_obj_feats=dim_obj_feats,
n_obj_feats=n_obj_feats,
#encoder_max_sequence_length=encoder_step,
decoder_max_sentence_length=decoder_step,
bias_init_vector=bias_init_vector)
tf_loss, tf_obj_feats, tf_video_mask, tf_caption, tf_caption_mask, _, _ = \
model.build_model()
sess = tf.InteractiveSession()
saver = tf.train.Saver(max_to_keep=5)
train_op = tf.train.AdamOptimizer(learning_rate).minimize(tf_loss)
sess.run(tf.global_variables_initializer())
if not prev_model_path is None:
saver.restore(sess, prev_model_path)
# Setup TensorBoard.
file_writer = tf.summary.FileWriter('logs/train.log', sess.graph)
for epoch in range(n_epochs):
# Select one sentence randomly.
current_videos, current_sents = list(), list()
for video_id, sents in captions.items():
current_videos.append(video_id)
# Randomly select one sentence.
current_sents.append(sents[np.random.choice(len(sents))])
# Shuffle training data.
videos_sents = list(zip(current_videos, current_sents))
random.shuffle(videos_sents)
current_videos, current_sents = zip(*videos_sents)
len_current_videos = len(current_videos)
for start,end in zip(range(0, len_current_videos, batch_size),
range(batch_size, len_current_videos, batch_size)):
batch_videos = current_videos[start:end]
batch_sents = current_sents[start:end]
# Frame feature.
#current_feats = np.zeros((batch_size, encoder_step, dim_image))
#current_feats_vals = map(lambda vid: np.load(os.path.join(video_feat_path, vid)), current_videos)
# Object features.
current_obj_feats = np.zeros((batch_size, n_obj_feats, dim_obj_feats))
current_obj_feats_vals = map(
lambda vid: np.load(os.path.join(video_feat_path_train, vid+'.mp4.npy')),
batch_videos)
current_video_masks = np.zeros((batch_size, n_obj_feats))
#for ind, feat in enumerate(current_feats_vals):
#interval_frame = max(feat.shape[0]/n_frame_step, 1)
#current_feats[ind][:len(current_feats_vals[ind])] = feat[
# range(0, min(n_frame_step*interval_frame, max(feat.shape[0]), interval_frame), :]
# current_feats[ind][:len(current_feats_vals[ind])] = feat
# current_video_masks[ind][:len(current_feats_vals[ind])] = 1
for ind, feat in enumerate(current_obj_feats_vals):
if (not feat is None) and len(feat.shape) == 2:
n_obj = min(n_obj_feats, feat.shape[0])
current_obj_feats[ind][:n_obj] = feat[:n_obj]
current_video_masks[ind][:n_obj] = 1
#for idx, cc in enumerate(batch_sents):
# current_captions[idx] = cc.replace('.', '').replace(',', '')
current_captions_ind = map(
#lambda x : [wordtoix[word] for word in x.lower().split(' ') if word in wordtoix],
lambda x : [wordtoix[word] for word in x if word in wordtoix],
batch_sents)
current_caption_matrix = sequence.pad_sequences(
current_captions_ind, padding='post', maxlen=decoder_step-1, value=0)
current_caption_matrix = np.hstack(
[current_caption_matrix, np.zeros([len(current_caption_matrix), 1])]).astype(int)
current_caption_masks = np.zeros(
(current_caption_matrix.shape[0], current_caption_matrix.shape[1]))
nonzeros = np.array(map(lambda x: (x != 0).sum()+1, current_caption_matrix))
for ind, row in enumerate(current_caption_masks):
row[:nonzeros[ind]] = 1
_, loss_val = sess.run(
[train_op, tf_loss],
feed_dict={
#tf_video: current_feats,
tf_obj_feats: current_obj_feats,
tf_video_mask : current_video_masks,
tf_caption: current_caption_matrix,
tf_caption_mask: current_caption_masks
})
print loss_val
if np.mod(epoch+1, 100) == 0:
print "Epoch ", epoch, " is done. Saving the model ..."
saver.save(sess, os.path.join(model_path, 'model'), global_step=epoch)
if __name__=="__main__":
train(prev_model_path=init_model_path)
|
<reponame>ivanmkc/helpdesk-assistant<filename>data_generation/common_nlu/common_intent_creators.py
from data_generation.common_nlu.parameterized_intents import (
ParameterizedIntentCreator,
)
from actions import find_objects_action as find_objects_action
intent_is_there_a_type_creator = ParameterizedIntentCreator(
name="intent_is_there_a_type_with_entities",
parameterized_examples=[
"Is there <context>?",
"Is there <context> around here?",
"I want to see <context>",
"Do you know of any <context>?",
"Any <context>?",
"Have you heard about <context>?",
"You wouldn't know <context>, would you?",
"I've heard about <context>",
"I've heard there was <context>?",
"I've heard there was <context>. Know anything about that?",
"Do you know about <context>",
"Tell me about any <context>",
"Can you point me towards <context>",
"What is <context>?",
"What's <context>?",
"Tell me about <context>.",
"I want to learn more about <context>",
"What about other <context>",
"What about <context>",
],
entity_name=find_objects_action.SLOT_OBJECT_NAME_OR_TYPE,
)
intent_is_there_a_place_with_thing_creator = ParameterizedIntentCreator(
name="intent_is_there_a_place_with_context_with_entities",
parameterized_examples=[
"Is there a place to <context>?",
"Is there somewhere to see <context>?",
"Do you know of any places for <context>?",
"I want to <context>?",
"Where can I find <context>?",
"Where can I get <context>?",
"Where would I go for <context>?",
"I'm looking for <context>?",
"Is there somewhere with <context>",
"I want to see <context>",
"Are there <context>?",
"Are there <context> around here?",
],
entity_name=find_objects_action.SLOT_OBJECT_NAME_OR_TYPE,
)
intent_when_is_that_creator = ParameterizedIntentCreator(
name="intent_what_hours_with_entities",
parameterized_examples=[
"What are the hours of <context>?",
"What time does <context> happpen?",
"When does <context> open?",
"When does <context> close?",
"What are <context> hours?",
"When does <context> close?",
"What time does <context> open until?",
"What time does <context> close?",
"Is <context> still open?",
"When would <context> be open?",
"When would <context> open?",
"When would <context> close?",
"When is that?",
"What are the hours?",
"What time does that happpen?",
"When does it open?",
"When does it close?",
"What are the hours?",
"When does it close?",
"What time does it open?",
"What time does it open until?",
"What time does it close?",
"Is it still open?",
"What would be the hours?",
"What would the hours be?",
"Do you know when it opens?",
"When are they open?",
"When do they close?",
"When would they be closing?",
"What time does are they open?",
"Are they open now?",
],
entity_name=find_objects_action.SLOT_OBJECT_NAME_OR_TYPE,
object_attribute="hours",
)
intent_what_price_creator = ParameterizedIntentCreator(
name="intent_what_price_with_entities",
parameterized_examples=[
"How much is <context>?",
"Is <context> expensive?",
"Is <context> cheap?",
"How much does <context> cost?",
"What's the cost of <context>?",
"What's the price of <context>?",
"How much for <context>?",
"What price for <context>?",
"How much is it?",
"Is it expensive?",
"How much does it cost?",
"What's the cost?",
"What's the price?",
"How much?",
"What price?",
"What cost?",
],
entity_name=find_objects_action.SLOT_OBJECT_NAME_OR_TYPE,
object_attribute="price",
)
intent_what_duration_creator = ParameterizedIntentCreator(
name="intent_what_duration_with_entities",
parameterized_examples=[
"How long is <context>?",
"What's the length of <context>?",
"What's the duration of <context>?",
"How much time does <context> take?",
"How long?",
"How long is it?",
"What's the length?",
"How much time does it take?",
],
entity_name=find_objects_action.SLOT_OBJECT_NAME_OR_TYPE,
object_attribute="duration",
)
intent_directions_creator = ParameterizedIntentCreator(
name="intent_directions_with_entities",
parameterized_examples=[
"How do you get to <context>?",
"What's the way to <context>?",
"How do I go to <context>?",
"What are the directions to <context>?",
"Do you know where <context> is?",
"Do you know how to get to <context>?",
"Where is the closest <context>",
"Where is <context>",
"Cool! Where is it?",
"Where is it?",
"What is that?",
"How do you get there?",
"Can you give directions?",
"How does one get there?",
"Any idea how to get there?",
],
entity_name=find_objects_action.SLOT_OBJECT_NAME_OR_TYPE,
object_attribute="directions",
)
intent_opinion_creator = ParameterizedIntentCreator(
name="intent_opinion_with_entities",
parameterized_examples=[
"What do you think of <context>?",
"What your opinion of <context>?",
"Do you like <context>?",
"What's your opinion of <context>?",
"Don't you like <context>?",
"What do you think about <context>?",
"What are your thoughts on <context>?",
"Can I hear more about <context>?",
"Do you have more details about <context>",
"What do you think about it?",
"Do you like it?",
"What's your opinion of it?",
"What's your opinion?",
"What do you think?",
"What are your thoughts?",
"Thoughts?",
"You like?",
],
entity_name=find_objects_action.SLOT_OBJECT_NAME_OR_TYPE,
object_attribute="opinion",
)
intent_details_creator = ParameterizedIntentCreator(
name="intent_details",
parameterized_examples=[
"What's there to do at <context>?",
"What's in <context>",
"What things do you do at <context>?",
"Tell me more about the <context>",
"What's there to know about <context>",
"What's there to do?",
"What do you do here?",
"What can you do there",
"Is there anything to do?",
"What's there to do?",
"What activities are there?",
"What kind of things do you do at <context>",
"What's there to do around here",
"Tell me more about this place",
],
entity_name=find_objects_action.SLOT_OBJECT_NAME_OR_TYPE,
object_attribute="details",
)
# intent_ill_have_context_creator = ParameterizedIntentCreator(
# name="intent_i_will_have_with_entities",
# parameterized_examples=[
# "Sure, I'll get <context>",
# "I'll have <context> then",
# "<context> sounds great",
# "Yes, I'll get <context>",
# "I'll have <context>",
# "I'll take <context>",
# "Ya, <context> sounds good",
# ],
# entity_name=find_objects_action.SLOT_OBJECT_NAME_OR_TYPE,
# )
buy_synonyms = "[buy|order|get|have|need|want|book|take]"
intent_i_want_to_buy_creator = ParameterizedIntentCreator(
name="intent_i_want_to_buy_with_entities",
parameterized_examples=[
f"I want to {buy_synonyms} <number> <context>",
f"Can I {buy_synonyms} <number> <context>?",
f"I'll {buy_synonyms} <number> <context>?",
"Give me <number> <context>?",
f"I {buy_synonyms} <number> <context>?",
f"We {buy_synonyms} get <number> <context>",
"Let me [have|get] <number> <context>",
f"I {buy_synonyms} <number> <context>",
f"Sure, I'll {buy_synonyms} <number> <context>",
f"I'll {buy_synonyms} <number> <context> then",
"<number> <context> sounds great",
f"Yes, I'll {buy_synonyms} <number> <context>",
f"I'll {buy_synonyms} <number> <context>",
f"I'll {buy_synonyms} <number> <context>",
"Ya, <context> sounds good",
f"I'll {buy_synonyms} <number_only>",
f"I'll {buy_synonyms} it",
f"Can I {buy_synonyms} <number_only>?",
"Give us <number> please",
f"I'll {buy_synonyms} <number> <context>",
f"I want to {buy_synonyms} <number> <context>",
f"Can I {buy_synonyms} <context>",
f"Can I {buy_synonyms} for <number> people",
f"I {buy_synonyms} <number_only>",
f"Ok... I {buy_synonyms} to do the <context>",
f"Sure thing... I {buy_synonyms} the <context>",
],
entity_name=find_objects_action.SLOT_OBJECT_NAME_OR_TYPE,
)
intent_context_only_creator = ParameterizedIntentCreator(
name="intent_context_only",
parameterized_examples=[
"<number> <context>",
],
entity_name=find_objects_action.SLOT_OBJECT_NAME_OR_TYPE,
)
intent_creators = [
# intent_is_there_a_type_creator,
# intent_what_is_context_creator,
# intent_what_about_context_creator,
intent_directions_creator,
intent_opinion_creator,
intent_details_creator,
# intent_is_there_a_place_with_thing_creator,
intent_when_is_that_creator,
intent_what_price_creator,
intent_what_duration_creator,
# intent_ill_have_context_creator,
]
|
# -*- coding: utf-8 -*-
__ver_major__ = 0
__ver_minor__ = 3
__ver_patch__ = 0
__ver_sub__ = "dev"
__version__ = "%d.%d.%d" % (__ver_major__, __ver_minor__, __ver_patch__)
"""
:authors: <NAME>
:copyright: Copyright 2019, BYaka
:license: Apache License 2.0
:license:
Copyright 2019 BYaka
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from ..utils import *
from ..DBBase import DBBase
def __init():
return DBLazyIndex, ('LazyIndex',)
class LazyChilds(DictInterface):
__slots__ = ('__store', '__cb', '__cb_data', '__auto_lazy', '__is_node')
def __init__(self, mapping=(), is_node=True, cb=None, cb_data=None, auto_lazy=False, **kwargs):
self.workspace.log(2, 'Extension `LazyIndex` not effective and deprecated!')
#? это вызывает принудительное копирование `data`, возможно лучше сделать это опциональным например для вызовов из `__setitem__`
self.__store=dict(mapping, **kwargs)
self.__cb=None if not callable(cb) else cb
self.__cb_data=cb_data
self.__auto_lazy=auto_lazy
self.__is_node=is_node
def __copy__(self):
return self.__store.copy()
def __cb_props(self, v, _1, _2, k):
_props, _node=self.get(k, (None, None))
if _props is v or (_props is None and not v): return
props, node=v, _node
self.__store[k]=(props, node)
def __cb_node(self, v, _1, _2, k):
_props, _node=self.get(k, (None, None))
if _node is v or (_node is None and not v): return
props, node=_props, v
self.__store[k]=(props, node)
def __iter__(self):
return iter(self.__store)
def __len__(self):
return len(self.__store)
def __getitem__(self, k):
v=self.__store[k]
if self.__is_node:
auto_lazy=self.__auto_lazy
props, node=v
if props is None:
props=LazyChilds(is_node=False, auto_lazy=auto_lazy, cb=self.__cb_props, cb_data=k)
if node is None:
node=LazyChilds(is_node=True, auto_lazy=auto_lazy, cb=self.__cb_node, cb_data=k)
return props, node
else:
return v
def __setitem__(self, k, v):
if self.__is_node:
props, node=v
auto_lazy=self.__auto_lazy
if props:
if not isinstance(props, LazyChilds):
props=LazyChilds(props, is_node=False, auto_lazy=auto_lazy, cb=auto_lazy and self.__cb_props, cb_data=k)
else: props=None
if node:
if not isinstance(node, LazyChilds):
node=LazyChilds(node, is_node=True, auto_lazy=auto_lazy, cb=auto_lazy and self.__cb_node, cb_data=k)
else: node=None
v=(props, node)
self.__store[k]=v
if self.__cb is not None:
self.__cb(self, k, v, self.__cb_data)
if not self.__auto_lazy: self.__cb=None
def __delitem__(self, k):
del self.__store[k]
if self.__cb is not None:
self.__cb(self, k, None, self.__cb_data)
if not self.__auto_lazy: self.__cb=None
def __contains__(self, k):
return k in self.__store
def __repr__(self):
return '{0}({1})'.format(type(self).__name__, repr(self.__store))
class LazyChildsAuto(DictInterface):
def __init__(self, mapping=(), cb=None, **kwargs):
super(LazyChildsAuto, self).__init__(mapping=(), auto_lazy=True, cb=None, **kwargs)
class DBLazyIndex(DBBase):
def _init(self, *args, **kwargs):
res=super(DBLazyIndex, self)._init(*args, **kwargs)
#! добавить конфигурирование `auto_lazy` для класса (или выбор между `LazyChildsAuto` и `LazyChilds`)
self.___indexNodeClass=LazyChilds
self.supports.lazyIndex=True
return res
|
#!/usr/bin/python2.7
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
slogsink is a module (that is used in conjuctions with the slogsink C
program) that receives syslog messages over a network. This is not a full
syslog implementation, but a simple syslog protocol receiver that listens
on the standard syslog/udp port for incoming messages and forwards them
to your callback function.
The slogsink C program is used because the syslog port is a privileged
port, and requires root access to open. The slogsink program should have
been install SUID to root.
"""
import os, sys, struct
import re
from errno import EAGAIN
from pycopia import socket
from pycopia import asyncio
from pycopia import UI
from pycopia import CLI
from pycopia import timelib
from pycopia.ipv4 import IPv4
now = timelib.now
FACILITY = {
0: "kern",
1: "user",
2: "mail",
3: "daemon",
4: "auth",
5: "syslog",
6: "lpr",
7: "news",
8: "uucp",
9: "cron",
10: "authpriv",
11: "ftp",
16: "local0",
17: "local1",
18: "local2",
19: "local3",
20: "local4",
21: "local5",
22: "local6",
23: "local7",
}
PRIORITY = {
0: "emerg",
1: "alert",
2: "crit",
3: "err",
4: "warning",
5: "notice",
6: "info",
7: "debug",
}
_default_addr=("", 514)
_user_default_addr = ("", 10514)
class SyslogMessage(object):
def __init__(self, msg, fac, pri, host=None, timestamp=None, tag=""):
self.message = msg
self.facility = int(fac)
self.priority = int(pri)
self.host = host
self.timestamp = timestamp
self.tag = tag
def __str__(self):
return "%.2f|%s|%s: %s" % (self.timestamp, self.host, self.tag, self.message)
def encode(self):
ts = timelib.strftime("%b %d %H:%M:%S", timelib.localtime(self.timestamp))
return "<%s>%s %s %s: %s" % ((self.facility<<3) + self.priority, ts, self.tag, self.message)
def __repr__(self):
return "%s(%r, %r, %r, %r, %r)" % (self.__class__.__name__, self.message, self.facility, self.priority,
self.host, self.timestamp)
_MSG_RE = re.compile("<(\d+?)>(.*)")
def parse_message(timestamp, srcip, rawmsg):
mo = _MSG_RE.search(rawmsg)
code = int(mo.group(1))
fac, pri = code>>3, code & 0x07
msg = mo.group(2)
return SyslogMessage(msg, fac, pri, srcip, timestamp)
class SlogDispatcher(socket.AsyncSocket):
def __init__(self, callback, addr=_default_addr):
super(SlogDispatcher, self).__init__(socket.AF_UNIX, socket.SOCK_STREAM)
loc, port = addr
self.callback = callback # should be a callable object
self.connect("/tmp/.slog-%d" % (port,))
def writable(self):
return False
def readable(self):
return True
def error_handler(self, ex, val, tb):
print >> sys.stderr, "*** Dispatcher:", ex, val
def read(self):
ip = struct.unpack("!i", self.recv(4))[0] # network byte-order
port = struct.unpack("!h", self.recv(2))[0] # network byte-order
length = struct.unpack("i", self.recv(4))[0] # host byte-order
msg = self.recv(length)
assert length == len(msg)
return self.callback(parse_message(now(), IPv4(ip), msg))
class UserSlogDispatcher(socket.AsyncSocket):
def __init__(self, callback, addr=_user_default_addr):
super(UserSlogDispatcher, self).__init__(socket.AF_INET, socket.SOCK_DGRAM)
self.callback = callback # should be a callable object
self.bind(addr)
def writable(self):
return False
def readable(self):
return True
def error_handler(self, ex, val, tb):
print "*** Dispatcher:", ex, val
def read(self):
try:
while 1:
msg, addr = self.recvfrom(4096, socket.MSG_DONTWAIT)
self.callback(parse_message(now(), IPv4(addr[0]), msg))
except socket.SocketError, err:
if err[0] == EAGAIN:
return
else:
raise
class Syslog(object):
"""A syslog program object."""
def __init__(self, files=None):
self._FLIST = []
if files:
assert type(files) is list
self.openlogs(files)
logfiles = property(lambda s: s._FLIST[:])
def addlog(self, fp):
self._FLIST.append(fp)
def openlog(self, fname):
from pycopia import logfile
try:
fp = logfile.open(fname, "w")
except: # non fatal
ex, val, tb = sys.exc_info()
print >>sys.stderr, "Warning: could not open %r for writing: %s (%s)." % (fname, ex, val)
else:
self._FLIST.append(fp)
def openlogs(self, flist):
for fn in flist:
self.openlog(fn)
def message(self, tag, msg, facility=5, priority=6):
self.dispatch(SyslogMessage(msg, facility, priority, timestamp=now(), tag=tag))
def write(self, msg):
for fp in self._FLIST:
fp.write(msg)
def close(self):
while self._FLIST:
fn = self._FLIST.pop()
if not fn.name.startswith("<"):
fn.close()
def flush(self):
asyncio.poller.poll(0)
for fp in self._FLIST:
fp.flush()
def dispatch(self, msg):
for fp in self._FLIST:
fp.write(str(msg)) # XXX
fp.write("\n")
class SyslogApp(object):
def __init__(self, syslog, ps1="%Isyslog%N> "):
self.syslog = syslog
theme = UI.DefaultTheme(ps1)
self.parser = CLI.get_cli(SyslogCLI, paged=False, theme=theme, aliases=_CMDALIASES)
self.parser.command_setup(syslog, ps1)
def mainloop(self, debug=False):
try:
self.parser.interact()
except KeyboardInterrupt:
self.syslog.message("EXITING", "User interrupt")
return
except:
exc, val, tb = sys.exc_info()
self.syslog.message("EXCEPTION", "internal error: %s(%s)" % (exc, val))
if debug:
from pycopia import debugger
debugger.post_mortem(tb)
else:
self.syslog.message("EXITING", "User exited")
class SyslogCLI(CLI.BaseCommands):
def message(self, argv):
"""message <text>
Place a manual entry in the log file."""
self._obj.message("USER", " ".join(argv[1:]))
def flush(self, argv):
"""flush
Flush all of the log files."""
self._obj.flush()
def ctime(self, argv):
"""ctime <timeval>
Expand <timeval> (a float) to a readable form."""
t = float(argv[1])
self._print(timelib.ctime(t))
_CMDALIASES = {"log":["message"]}
def start_slogsink(port=514):
from pycopia import scheduler
if port != 514:
cmd = "daemonize slogsink %d" % port
else:
cmd = "daemonize slogsink"
rv = os.system(cmd)
scheduler.sleep(2)
return rv
def default_logger(message):
sys.stdout.write("%s\n" % (message,))
sys.stdout.flush()
def get_dispatcher(addr, callback=default_logger):
port = addr[1]
if port <= 1024 and os.getuid() > 0:
start_slogsink(port)
slogsink = SlogDispatcher(callback, addr)
else:
slogsink = UserSlogDispatcher(callback, addr)
asyncio.register(slogsink)
return slogsink
# simple syslog server
def slog(argv):
if len(argv) > 1:
port = int(argv[1])
else:
port = 514
get_dispatcher(("", port))
try:
while 1:
asyncio.pause()
except KeyboardInterrupt:
return
# full syslog server with command interface
def nmslog(argv):
"""
Usage:
nmslog [-p port] [-d] [<logfile> ...]
Record external syslog events. An optional list of file names may be
supplied on the command line. The received event will be written to each
file given, including stdout. The user may also type in a message at
anytime which will also be added to the log files.
The -d flag enables debugging mode. The -p flag specifies a non-standard UDP
port to listen to.
"""
from pycopia import getopt
port = 514
debug = False
try:
opts, longopts, args = getopt.getopt(argv[1:], "p:dh?")
except getopt.GetoptError:
print __doc__
return
for opt, arg in opts:
if opt == "-p":
port = int(arg)
if opt == "-d":
debug = True
elif opt in ("-?", "-h"):
print __doc__
return
sl = Syslog(args)
get_dispatcher(("", port), sl.dispatch)
sl.addlog(sys.stdout)
print "Logging started. Listening on UDP port %d. You may type manual entries at will." % (port,)
sl.message("STARTING", ", ".join(map(lambda o: o.name, sl.logfiles)))
sa = SyslogApp(sl)
sa.mainloop(debug)
sl.close()
if __name__ == "__main__":
msg = parse_message(now(), "localhost", '<177>local6 alert')
print msg
dis = get_dispatcher(_user_default_addr)
|
<reponame>ihaeyong/drama-graph<gh_stars>1-10
import os
import pandas as pd
import numpy as np
from tqdm import tqdm
import librosa
from sklearn.utils import shuffle
from utils import extract_features, read_audio_file
delta = False
def addSmpls(train, test, samples, class_name):
randf = np.random.rand(len(samples)) > 0.2
while np.sum(randf) < 0.78*len(samples) or np.sum(randf) > 0.82*len(samples):
randf = np.random.rand(len(samples)) > 0.2
print(class_name, ' (train/test):', np.sum(randf), (len(samples) - np.sum(randf)))
for i in range(0, len(samples)):
if randf[i] == True:
train.append((samples[i], class_name))
else:
test.append((samples[i], class_name))
return train, test
def genSets(samples, classes):
print("Generating sets...\n")
train = []
test = []
class_num = len(classes)
class_distribution = np.zeros(class_num)
class_samples = [[] for x in range(class_num)]
for y in samples:
for cl_idx, cl in enumerate(classes):
if cl in y:
class_samples[cl_idx].append(y)
class_distribution[cl_idx] += 1
print("Class distribution:", class_distribution)
print("Partitioning to test and train sets...")
for idx, cl_list in enumerate(tqdm(class_samples)):
train, test = addSmpls(train, test, cl_list, classes[idx])
print("Partitioned:", len(train), len(test))
return train, test, class_distribution
def get_classes(samples):
classes = set()
for sample in samples:
pos1 = sample.find('"')+1
pos2 = pos1 + sample[pos1:].find('"')
class_name = sample[pos1:pos2]
if class_name not in classes:
classes.add(class_name)
return list(classes)
def extr(df, X, sr, bookmark):
features = extract_features(X, sr, delta=delta)
df.loc[bookmark] = [features]
samples = os.listdir('./sound_event_detection/src/pre_proc/')
print("There are {} samples.".format(len(samples)))
class_list = get_classes(samples)
print('There are {} classes:\n{}', len(class_list), class_list)
train, test, class_distribution = genSets(samples, class_list)
data_len = set()
print("Feature extraction for test set...")
test_df = pd.DataFrame(columns=['feature'])
test_labels = []
for index, sample in tqdm(enumerate(test)):
file_name = sample[0]
file_class = sample[1]
data, sr = read_audio_file('./sound_event_detection/src/pre_proc/'+file_name)
data_len.add(len(data)/sr)
extr(test_df, data, sr, index)
test_labels.append(sample[1])
test_labels = pd.DataFrame(test_labels)
print("Test Labels length:", len(test_labels))
test_df3 = pd.DataFrame(test_df['feature'].values.tolist())
test_newdf = pd.concat([test_df3, test_labels], axis=1)
print("Test DF length:", len(test_newdf))
test_rnewdf = test_newdf.rename(index=str, columns={"0": "label"})
test_rnewdf = shuffle(test_rnewdf)
test_rnewdf = test_rnewdf.fillna(0)
print("New Test DF length:", len(test_rnewdf))
token = ''
if delta:
token = '_delta'
print("Saving {} ...".format('./sound_event_detection/src/test'+token+'.csv'))
test_rnewdf.to_csv('./sound_event_detection/src/test'+token+'.csv', sep='\t', encoding='utf-8')
data_len = set()
print("Feature extraction for train set...")
train_df = pd.DataFrame(columns=['feature'])
train_labels = []
for index, sample in tqdm(enumerate(train)):
file_name = sample[0]
file_class = sample[1]
data, sr = read_audio_file('./sound_event_detection/src/pre_proc/'+file_name)
data_len.add(len(data)/sr)
extr(train_df, data, sr, index)
train_labels.append(sample[1])
train_labels = pd.DataFrame(train_labels)
print("Train Labels length:", len(train_labels))
train_df3 = pd.DataFrame(train_df['feature'].values.tolist())
train_newdf = pd.concat([train_df3, train_labels], axis=1)
print("Train DF length:", len(train_newdf))
train_rnewdf = train_newdf.rename(index=str, columns={"0": "label"})
train_rnewdf = shuffle(train_rnewdf)
train_rnewdf = train_rnewdf.fillna(0)
print("New Train DF length:", len(train_rnewdf))
print("Saving {} ...".format('./sound_event_detection/src/train'+token+'.csv'))
train_rnewdf.to_csv('./sound_event_detection/src/train'+token+'.csv', sep='\t', encoding='utf-8')
|
# Variables y Comentarios
# Cadena de caracteres String
"""
Este es un comentario de muchas lineas que describe este programa escrito en python
TODO Debemos de limpiar este codigo mas, es decir separarlo por funciones
FIXME Debemos de corregir el codigo que a veces da errores en la linea 4
"""
palabra = "esta es una palabra"
# Numerico
numero_decimal = 3.4 # Numerico decimal
numero_entero = -2 # Numerico entero
numero_octal = 0o43 # Numero en base octal
numero_hexadecimal = 0x23 # Numero en base hexadecimal
# Imprimir tipos de variables
print("Esta es una palabra - " + palabra)
print("Este es un valor numerico decimal " + str(numero_decimal))
print("Este es un valor numerico entero " + str(numero_entero))
print("Estes es un valor hexadecimal " + str(numero_hexadecimal))
print("Este es un numero en base octal " + str(numero_octal))
# Booleano
logico = False
# Nulo
nula = None
print("Este es un tipo de valor booleano " + str(logico))
print("Este es un tipo de valor nulo " + str(nula))
print("============================================")
# Operadores Aritmeticos
# Declaracion de variables
num1 = 4
num2 = 8
# Operador de Suma
resultado_suma = num1 + num2
# Operador de Resta
resultado_resta = num1 - num2
# Operador de multiplicacion
resultado_multiplicacion = num1 * num2
# Operador de division
resultado_division = num1 / num2
# Operador
resultado_exponente = num1 ** num2
# Modulo
resultado_modulo = num1 % num2
# Division entera
resultado_division_entera = num1 // num2
# Raiz cuadrada
resultado_raiz_cuadrada = num1 ** (1/2)
print("Es resultado de la suma de 4 + 8 = " + str(resultado_suma))
print("El resultado de la resta de 4 - 8 = " + str(resultado_resta))
print("El resultado de la multiplicacion de 4 * 8 = " + str(resultado_multiplicacion))
print("El resultado de la division de 4 / 8 = " + str(resultado_division))
print("El resultado de elevar 4 a la 8 es = " + str(resultado_exponente))
print("El resultado de el modulo es = " + str(resultado_modulo))
print("El resultado de la division entera es = " + str(resultado_division_entera))
print("El resultado de la raiz cuadrada es = " + str(resultado_raiz_cuadrada))
print("============================================")
"""
Este es el tipo de operadores logicos, consultar en la documentacion oficial
https://entrenamiento-python-basico.readthedocs.io/es/latest/leccion4/operadores_logicos.html
"""
valor_a = True
valor_b = True
resultado_or = valor_a or valor_b
resultado_and = valor_a and valor_b
resultado_not = not valor_a
print("El valor de el resultado or de " + str(valor_a) + " y " + str(valor_b) + " = " + str(resultado_or))
print("El valor de el resultado and de " + str(valor_a) + " y " + str(valor_b) + " = " + str(resultado_and))
print("El valor de el resultado not de " + str(valor_a) + " = " + str(resultado_not))
# Logicas entre numeros
print("El valor de la comparacion entre 4 == 5 es " + str(4==5))
print("El valor de la comparacion entre 4 < 5 es " + str(4<5))
print("El valor de la comparacion entre 4 > 5 es " + str(4>5))
print("El valor de la comparacion entre 4 != 5 es " + str(4!=5))
print("El valor de la comparacion entre 4 <= 5 es " + str(4<=5))
print("El valor de la comparacion entre 4 >= 5 es " + str(4>=5))
|
import struct
import wtforms
from wtforms.validators import Length, NumberRange
from . import core
class BasicBinaryField(core.BinaryField):
# Some BinaryFields will have inherent value restrictions, based on the
# limitations of the serialized form. For example, a UInt8Field cannot
# store numbers above 0xFF. When the class is instantiated, these
# validators will be silently added to any validators provided by the
# constructor.
initial_validators = []
def __init__(self, label='', validators=None, order=None, **kwargs):
core.BinaryItem.__init__(self)
self.size = struct.calcsize(self.pack_string)
self.order = order
# Clone the initial_validators list to avoid mutating a class
# variable.
all_vldtrs = list(self.initial_validators)
if validators is not None:
all_vldtrs.extend(validators)
self.form_field = self.form_field_class(label, all_vldtrs, **kwargs)
def pack(self, data, order=None):
order = self.order or order or ''
return self.pack_data(data, order)
def pack_data(self, data, order):
return struct.pack(order + self.pack_string, data)
def unpack(self, buffer, order=None):
order = self.order or order or ''
return self.unpack_data(buffer, order)
def unpack_data(self, buffer, order):
return struct.unpack(order + self.pack_string, buffer)[0]
class CharField(BasicBinaryField):
"""
Store a single byte as a one-character ``str`` (in Python 2) or ``bytes``
object (in Python 3).
Attributes:
size: always ``1``
form_field: A :class:`wtforms.fields.StringField` instance.
"""
form_field_class = wtforms.StringField
initial_validators = [Length(min=1, max=1)]
pack_string = 'c'
class BinaryBooleanField(BasicBinaryField):
"""
Store either ``True`` or ``False`` as ``b'\\x01'`` or ``b'\\x00'``
(respectively).
Attributes:
size: always ``1``
form_field: A :class:`wtforms.fields.BooleanField` instance.
"""
form_field_class = wtforms.BooleanField
pack_string = '?'
class BinaryIntegerField(BasicBinaryField):
"""
This class should not be instantiated directly; instead, you should use
one of its subclasses, which determine what kind of int is stored, and
how. Those subclasses are:
==================== ==== =============== ================
Name size Min Max
==================== ==== =============== ================
:class:`Int8Field` 1 -128 127
:class:`UInt8Field` 1 0 255
:class:`Int16Field` 2 -32768 32767
:class:`UInt16Field` 2 0 65535
:class:`Int32Field` 4 -2\ :sup:`31` 2\ :sup:`31` - 1
:class:`UInt32Field` 4 0 2\ :sup:`32` - 1
:class:`Int64Field` 8 -2\ :sup:`63` 2\ :sup:`63` - 1
:class:`UInt64Field` 8 0 2\ :sup:`64` - 1
==================== ==== =============== ================
Attributes:
form_field: A :class:`wtforms.fields.Integerfield` instance.
"""
form_field_class = wtforms.IntegerField
@property
def initial_validators(self):
return [NumberRange(self.min, self.max)]
class Int8Field(BinaryIntegerField):
pack_string = 'b'
min = -128
max = 127
class UInt8Field(BinaryIntegerField):
pack_string = 'B'
min = 0
max = (2 ** 8) - 1
class Int16Field(BinaryIntegerField):
pack_string = 'h'
min = -(2 ** 15)
max = (2 ** 15) - 1
class UInt16Field(BinaryIntegerField):
pack_string = 'H'
min = 0
max = (2 ** 16) - 1
class Int32Field(BinaryIntegerField):
pack_string = 'i'
min = -(2 ** 31)
max = (2 ** 31) - 1
class UInt32Field(BinaryIntegerField):
pack_string = 'I'
min = 0
max = (2 ** 32) - 1
class Int64Field(BinaryIntegerField):
pack_string = 'q'
min = -(2 ** 63)
max = (2 ** 63) - 1
class UInt64Field(BinaryIntegerField):
pack_string = 'Q'
min = 0
max = (2 ** 64) - 1
class Float32Field(BasicBinaryField):
"""
Store a ``float`` in four bytes.
Attributes:
size: Always ``4``.
form_field: A :class:`wtforms.fields.FloatField` instance.
"""
form_field_class = wtforms.FloatField
pack_string = 'f'
class Float64Field(BasicBinaryField):
"""
Store a ``float`` in eight bytes.
Attributes:
size: Always ``8``.
form_field: A :class:`wtforms.fields.FloatField` instance.
"""
form_field_class = wtforms.FloatField
pack_string = 'd'
class BytesField(BasicBinaryField):
"""
Store *N* bytes.
Attributes:
max_length: Maximum number of bytes in the stored string. Note that
this may not be equal to :attr:`size`.
size: The :attr:`size` of a :class:`BytesField` with ``max_length``
*N* varies based on the *length* argument used to construct it.
If *length* is :attr:`~minform.FIXED` or
:attr:`~minform.AUTOMATIC`, ``size`` will be *N*.
If *length* is :attr:`~minform.EXPLICIT`, there will be one or
more extra bytes at the beginning of the packed data, which store
the number of bytes used by the string. This will be the smallest
number of bytes needed to store a number up to ``max_length``. So,
``size`` can be *N+1*, *N+2*, *N+4*, or *N+8*. (For more
information, see the documentation for :data:`~minform.EXPLICIT`.)
form_field: A :class:`wtforms.fields.StringField` instance.
"""
form_field_class = wtforms.StringField
def __init__(self, label='', validators=None, max_length=None,
length=core.AUTOMATIC, order=None, **kwargs):
if not isinstance(max_length, int) or max_length < 0:
raise ValueError('BytesField must be created with a '
'positive max_length keyword argument.')
self.order = order
self.length = length
self.max_length = max_length
if self.length == core.FIXED:
self.initial_validators = [Length(max=max_length, min=max_length)]
self.pack_string = '{0}s'.format(max_length)
elif self.length == core.AUTOMATIC:
self.initial_validators = [Length(max=max_length)]
self.pack_string = '{0}s'.format(max_length)
elif self.length == core.EXPLICIT:
self.initial_validators = [Length(max=max_length)]
self.length_field = store_numbers_up_to(max_length, order=order)
self.pack_string = '{0}{1}s'.format(self.length_field.pack_string,
max_length)
super(BytesField, self).__init__(label, validators, order, **kwargs)
def pack_data(self, data, order):
buffer = bytearray(self.size)
length = len(data)
if self.length == core.EXPLICIT:
pack_length_string = order + self.length_field.pack_string
struct.pack_into(pack_length_string, buffer, 0, length)
start = self.length_field.size
else:
start = 0
buffer[start:start+length] = data
return buffer
def unpack_data(self, buffer, order):
if self.length == core.EXPLICIT:
unpack_length_string = order + self.length_field.pack_string
length = struct.unpack_from(unpack_length_string, buffer)[0]
if length > self.max_length:
message = "Buffer cannot contain {0} bytes.".format(length)
raise ValueError(message)
data_buffer = buffer[self.length_field.size:]
else:
length = self.max_length
data_buffer = buffer
data = data_buffer[:length]
if self.length == core.AUTOMATIC:
data = data.rstrip(b'\x00')
return data
def store_numbers_up_to(n, signed=False, **kwargs):
"""
Return a BinaryField class that can store numbers up to a certain maximum.
If the number is too big to store, a ``ValueError`` will be raised.
Parameters:
n: The highest number that you expect to need to store (must be at
most a 64-bit integer).
signed: Return a field that can store negative numbers.
kwargs: Additional arguments get passed into the binary field
constructor.
Returns:
BinaryIntegerField: A :class:`BinaryIntegerField` that can store
numbers up to at least ``n``.
"""
if signed:
if n <= Int8Field.max:
return Int8Field(**kwargs)
elif n <= Int16Field.max:
return Int16Field(**kwargs)
elif n <= Int32Field.max:
return Int32Field(**kwargs)
elif n <= Int64Field.max:
return Int64Field(**kwargs)
else:
raise ValueError("Can't track numbers up to {0}".format(n))
else:
if n <= UInt8Field.max:
return UInt8Field(**kwargs)
elif n <= UInt16Field.max:
return UInt16Field(**kwargs)
elif n <= UInt32Field.max:
return UInt32Field(**kwargs)
elif n <= UInt64Field.max:
return UInt64Field(**kwargs)
else:
raise ValueError("Can't track numbers up to {0}".format(n))
|
'''
This reads images from caltech256 and makes them into tfrecords file.
'''
import tensorflow as tf
import numpy as np
from PIL import Image
import os
import shutil
main_dir='/media/yukun/Barracuda Hard Drive 2TB/Data/Caltech256'#The main directory to work in
num_tffile=5#The number of tfrecord files to create. One of the files are for testing
def get_image(filename):
'''
returns the image data for the filename.
Args:
filepath of an image
Returns:
image: in string format,
shape: [height, width, depth] is an string
'''
image = Image.open(filename)
image = np.asarray(image, np.uint8)
try:#converts grayscale to RGB
image.shape[2]
except IndexError:
image = np.transpose(np.multiply(image, np.ones((3, image.shape[0],image.shape[1]))
),(1,2,0)).astype(np.uint8)
shape = image.shape
return image.tostring(), shape#np.asarray(shape).astype(np.int64).tostring()
def readimages():
'''
Goes through every image and gets the image, putting them into an array
Args:
None
Returns:
data: array of information including:
image: string
shape: int
label: index from 0 to 256 for the classes
'''
data = []
j = 0
object_dir = os.path.join(main_dir, '256_ObjectCategories')
objectCategories = os.listdir(object_dir)
for i in range(len(objectCategories)):
label = i
examp_dir = os.path.join(object_dir, objectCategories[i])
examples = os.listdir(examp_dir)
print 'compiling data: %d/%d'%(i+1, len(objectCategories))
for examp in examples:
image_dir = os.path.join(examp_dir, examp)
image, shape = get_image(image_dir)
try:
data[j]
except IndexError:
data.append([])
data[j].append([image, shape, label])
j = (j+1)%num_tffile
return data
def make_bytes(item):
return tf.train.Feature(bytes_list=tf.train.BytesList(value = [item]))
def make_int64(item):
return tf.train.Feature(int64_list=tf.train.Int64List(value = [item]))
def write_data(data):
'''
writes the data into tfrecord files
Args:
data:
of size [num_tffile, total_examples/num_tffile, 3] the 3 at the end is for:
[image, shape, label]
Returns:
nothing
'''
write_dir = os.path.join(main_dir, 'caltech-256-batches-bin')
if os.path.exists(write_dir):
shutil.rmtree(write_dir)
os.mkdir(write_dir)
for i in range(len(data)):
if i:
filename = os.path.join(write_dir, 'train_batch_%d.tfrecords'%(i))
else:
filename = os.path.join(write_dir, 'test_batch.tfrecords')
print 'making files %d of %d'%(i+1, len(data))
with tf.python_io.TFRecordWriter(filename) as writer:
for j in range(len(data[i])):
image,shape,label = data[i][j]
height, width, depth = shape
writer.write(tf.train.Example(features=tf.train.Features(feature={
'image':make_bytes(image),
'height':make_int64(height),
'width':make_int64(width),
'depth':make_int64(depth),
'label':make_int64(label)
})).SerializeToString())
def main(argv=None):
data = readimages()
write_data(data)
if __name__ == '__main__':
tf.app.run()
|
<filename>tests/setup.py<gh_stars>1-10
import unittest
class Setup(unittest.TestCase):
"""A Class to host all the Setup data to avoid a lot of copy pasting between modules"""
def setUp(self):
"""!killmails dont have items!
C O M P L E T E :
uk00 = complete Killmail
A T T A C K E R :
uk10 = missing char
uk11 = invalid char
uk12 = missing corp
uk13 = invalid corp
uk14 = missing alliance
uk15 = invalid alliance
uk16 = missing ship
uk17 = invalid ship
uk18 = all missing names
V I C T I M :
uk20 = missing char
uk21 = invalid char
uk22 = missing corp
uk23 = invalid corp
uk24 = missing alliance
uk25 = invalid alliance
uk26 = missing ship
uk27 = invalid ship
uk28 = all missing names
M U L T I P L E A T T A C K E R S :
uk30 = complete Killmail
uk31 = 1 missing char
uk32 = 2 missing char
uk33 = 1 invalid char
uk34 = 2 invalid char
uk35 = 1 missing corp
uk36 = 2 missing corp
uk37 = 1 invalid corp
uk38 = 2 invalid corp
uk39 = 1 missing alliance
uk40 = 2 missing alliance
uk41 = 1 invalid alliance
uk42 = 2 invalid alliance
uk43 = 1 missing ship
uk44 = 2 missing ship
uk45 = 1 invalid ship
uk46 = 2 invalid ship
M I S C :
uk60 = missing solar system id
uk70 = wh with 2 attacker with no alli
"""
self.uk00 = \
{
"package":
{
"killID": 71443648,
"killmail":
{
"attackers":
[{
"alliance_id": 1354830081,
"character_id": 992181402,
"corporation_id": 1324429368,
"damage_done": 4110,
"final_blow": True,
"security_status": -7.8,
"ship_type_id": 605,
"weapon_type_id": 2456
}],
"killmail_id": 71443648,
"killmail_time": "2018-07-24T17:56:14Z",
"solar_system_id": 30003681,
"victim":
{
"alliance_id": 99007362,
"character_id": 2114300996,
"corporation_id": 98531953,
"damage_taken": 4110,
"position":
{
"x": -456877791246.22,
"y": -83876045685.746,
"z": 458094309170.23
},
"ship_type_id": 32878
}
},
"zkb":
{
"locationID": 50006982,
"hash": "9ab505bacad3122d8648e2c4aa9a3c80ad67eedc",
"fittedValue": 2543013.41,
"totalValue": 7521431.46,
"points": 1,
"npc": False,
"solo": True,
"awox": False,
"href": "https://esi.evetech.net/v1/killmails/71443648/9ab505bacad3122d8648e2c4aa9a3c80ad67eedc/"
}
}
}
# M U L T I P L E A T T A C K E R S
self.uk30 = \
{
"package":
{
"killID": 71933840,
"killmail":
{
"attackers":
[{
"alliance_id": 99005382,
"character_id": 224182597,
"corporation_id": 818601383,
"damage_done": 959,
"final_blow": False,
"security_status": 4.7,
"ship_type_id": 29990,
"weapon_type_id": 29990
}, {
"alliance_id": 99005382,
"character_id": 91715917,
"corporation_id": 98567437,
"damage_done": 778,
"final_blow": True,
"security_status": 0.9,
"ship_type_id": 29990,
"weapon_type_id": 2969
}],
"killmail_id": 71933840,
"killmail_time": "2018-08-18T11:39:49Z",
"solar_system_id": 30000142,
"victim":
{
"alliance_id": 99003581,
"character_id": 2113228085,
"corporation_id": 98446928,
"damage_taken": 1737,
"position":
{
"x": -107303397020.36,
"y": -18744981247.376,
"z": 436489013090.49},
"ship_type_id": 33468
},
"war_id": 609116
},
"zkb":
{
"locationID": 60003760,
"hash": "905e1f9b42f08effd05a804b32fafc541a6d8f46",
"fittedValue": 65084309.91,
"totalValue": 227289291.33,
"points": 1,
"npc": False,
"solo": False,
"awox": False,
"href": "https://esi.evetech.net/v1/killmails/71933840/905e1f9b42f08effd05a804b32fafc541a6d8f46/"
}
}
}
self.uk70 ={
"package":
{
"killID": 72012818,
"killmail":
{
"attackers":
[{
"character_id": 94830824,
"corporation_id": 98539465,
"damage_done": 1085,
"final_blow": False,
"security_status": 4.6,
"ship_type_id": 22456,
"weapon_type_id": 22456
},
{
"character_id": 95074071,
"corporation_id": 98389109,
"damage_done": 1002,
"final_blow": True,
"security_status": 3.8,
"ship_type_id": 33470,
"weapon_type_id": 2205
}],
"killmail_id": 72012818,
"killmail_time": "2018-08-22T10:41:01Z",
"solar_system_id": 31000153,
"victim":
{
"character_id": 2112516399,
"corporation_id": 1000180,
"damage_taken": 2087,
"faction_id": 500001,
"position":
{
"x": -640253952528.6340332031,
"y": -106856446836.6681518555,
"z": 706723188313.1087646484
},
"ship_type_id": 33468
}
},
"zkb":
{
"locationID": 40358750,
"hash": "bliblablup",
"fittedValue": 10593294.36,
"totalValue": 71262264.86,
"points": 2,
"npc": False,
"solo": False,
"awox": False,
"href": "https://esi.evetech.net/latest/killmails/72012818/a9c2729c80677483802d5be43a711441fd08670d/"
}
}
}
self.uk71 = {
'package':
{
'killID': 72477866,
'killmail':
{
'attackers':
[{
'damage_done': 5548,
'faction_id': 500021,
'final_blow': True,
'security_status': 0,
'ship_type_id': 48930
}],
'killmail_id': 72477866,
'killmail_time': '2018-09-18T12:00:28Z',
'solar_system_id': 30002457,
'victim':
{
'alliance_id': 99005338,
'character_id': 93415043,
'corporation_id': 98388312,
'damage_taken': 5548,
'position':
{
'x': -135325276843.07732,
'y': -12429544064.941017,
'z': -854682512095.4653
},
'ship_type_id': 589
}
},
'zkb':
{
'locationID': 40156317,
'hash': 'a80c7cacd7b04a69def30891a5fa76a19c2e72cb',
'fittedValue': 328402.77,
'totalValue': 614449.97,
'points': 1,
'npc': True,
'solo': False,
'awox': False,
'href': 'https://esi.evetech.net/v1/killmails/72477866/a80c7cacd7b04a69def30891a5fa76a19c2e72cb/'
}
}
}
self.uk72 = \
{
'package':
{
'killID': 72478369,
'killmail':
{
'attackers':
[{
'damage_done': 3160,
'faction_id': 500021,
'final_blow': True,
'security_status': 0,
'ship_type_id': 48799
}],
'killmail_id': 72478369,
'killmail_time': '2018-09-18T13:01:24Z',
'solar_system_id': 30001718,
'victim':
{
'alliance_id': 99007175,
'character_id': 1388637030,
'corporation_id': 98017240,
'damage_taken': 3160,
'position':
{
'x': -243396761509.71545,
'y': -136410538661.06255,
'z': 183045971513.31302}, 'ship_type_id': 34317
}
},
'zkb':
{
'locationID': 40109337,
'hash': 'c5fb11ff0fa76495835965ec230efa037cecca55',
'fittedValue': 37301202.41,
'totalValue': 50309060.11,
'points': 1,
'npc': True,
'solo': False,
'awox': False,
'href': 'https://esi.evetech.net/v1/killmails/72478369/c5fb11ff0fa76495835965ec230efa037cecca55/'
}
}
}
self.uk99 = \
{
'package':
{
'killID': 78141788,
'killmail':
{
'attackers':
[{
'character_id': 1312548849,
'corporation_id': 1000009,
'damage_done': 461,
'final_blow': True,
'security_status': 0.3,
'ship_type_id': 49710,
'weapon_type_id': 47915
},
{
'character_id': 96785777,
'corporation_id': 98605030,
'damage_done': 0,
'final_blow': False,
'security_status': 4.8,
'ship_type_id': 35683,
'weapon_type_id': 19806
},
{
'alliance_id': 99009279,
'character_id': 96582280,
'corporation_id': 98520515,
'damage_done': 0,
'final_blow': False,
'security_status': -2.5,
'ship_type_id': 29990,
'weapon_type_id': 15889
}],
'killmail_id': 78141788,
'killmail_time': '2019-07-31T18:17:44Z',
'solar_system_id': 30004759,
'victim':
{
'alliance_id': 99004425,
'character_id': 96501929,
'corporation_id': 1765089512,
'damage_taken': 461,
'items': [],
'position':
{
'x': 5096709601888.194,
'y': -2641212328751.0195,
'z': 4144289825779.7705
},
'ship_type_id': 670
}
},
'zkb':
{
'locationID': 50010993,
'hash': '4aded6bd6913ff6130b36255436dfd2c4b41325e',
'fittedValue': 10000,
'totalValue': 10000,
'points': 1,
'npc': False,
'solo': False,
'awox': False,
'href': 'https://esi.evetech.net/v1/killmails/78141788/4aded6bd6913ff6130b36255436dfd2c4b41325e/'
}
}
}
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Built-in resource transform functions.
A resource transform function converts a JSON-serializable resource to a string
value. This module contains built-in transform functions that may be used in
resource projection and filter expressions.
NOTICE: Each TransformFoo() method is the implementation of a foo() transform
function. Even though the implementation here is in Python the usage in resource
projection and filter expressions is language agnostic. This affects the
Pythonicness of the Transform*() methods:
(1) The docstrings are used to generate external user documentation.
(2) The method prototypes are included in the documentation. In particular the
prototype formal parameter names are stylized for the documentation.
(3) The 'r', 'kwargs', and 'projection' args are not included in the external
documentation. Docstring descriptions, other than the Args: line for the
arg itself, should not mention these args. Assume the reader knows the
specific item the transform is being applied to. When in doubt refer to
the output of $ gcloud topic projections.
(4) The types of some args, like r, are not fixed until runtime. Other args
may have either a base type value or string representation of that type.
It is up to the transform implementation to silently do the string=>type
conversions. That's why you may see e.g. int(arg) in some of the methods.
(5) Unless it is documented to do so, a transform function must not raise any
exceptions related to the resource r. The `undefined' arg is used to
handle all unusual conditions, including ones that would raise exceptions.
Exceptions for arguments explicitly under the caller's control are OK.
"""
import datetime
import re
import StringIO
import urllib2
from googlecloudsdk.core.console import console_attr
from googlecloudsdk.core.resource import resource_exceptions
from googlecloudsdk.core.resource import resource_property
from googlecloudsdk.core.util import times
def TransformAlways(r):
"""Marks a transform sequence to always be applied.
In some cases transforms are disabled. Prepending always() to a transform
sequence causes the sequence to always be evaluated.
Example:
some_field.always().foo().bar() will always apply foo() and then bar().
Args:
r: A resource.
Returns:
r.
"""
# This method is used as a decorator in transform expressions. It is
# recognized at parse time and discarded.
return r
def TransformBaseName(r, undefined=''):
"""Returns the last path component.
Args:
r: A URI or unix/windows file path.
undefined: Returns this value if the resource or basename is empty.
Returns:
The last path component.
"""
if not r:
return undefined
s = unicode(r)
for separator in ('/', '\\'):
i = s.rfind(separator)
if i >= 0:
return s[i + 1:]
return s or undefined
def TransformCollection(r, undefined=''): # pylint: disable=unused-argument
"""Returns the current resource collection.
Args:
r: A JSON-serializable object.
undefined: This value is returned if r or the collection is empty.
Returns:
The current resource collection, undefined if unknown.
"""
# This method will most likely be overridden by a resource printer.
return undefined
def TransformColor(r, red=None, yellow=None, green=None, blue=None, **kwargs):
"""Colorizes the resource string value.
The resource string is searched for an RE pattern match in Roy.G.Biv order.
The first pattern that matches colorizes the resource string with that color.
Args:
r: A JSON-serializable object.
red: Color red resource value pattern.
yellow: Color yellow resource value pattern.
green: Color green resource value pattern.
blue: Color blue resource value pattern.
**kwargs: console_attr.Colorizer() kwargs.
Returns:
A console_attr.Colorizer() object if any color substring matches, r
otherwise.
"""
string = unicode(r)
for color, pattern in (('red', red), ('yellow', yellow), ('green', green),
('blue', blue)):
if pattern and re.search(pattern, string):
return console_attr.Colorizer(string, color, **kwargs)
return string
# pylint: disable=redefined-builtin, external expression expects format kwarg.
def TransformDate(r, format='%Y-%m-%dT%H:%M:%S', unit=1, undefined='',
tz=None, tz_default=None):
"""Formats the resource as a strftime() format.
Args:
r: A timestamp number or an object with 3 or more of these fields: year,
month, day, hour, minute, second, millisecond, microsecond, nanosecond.
format: The strftime(3) format.
unit: If the resource is a Timestamp then divide by _unit_ to yield seconds.
undefined: Returns this value if the resource is not a valid time.
tz: Return the time relative to the tz timezone if specified, the explicit
timezone in the resource if it has one, otherwise the local timezone.
For example, ...date(tz=EST5EDT, tz_default=UTC).
tz_default: The default timezone if the resource does not have a timezone
suffix.
Returns:
The strftime() date format for r or undefined if r does not contain a valid
time.
"""
# Check if r has an isoformat() method.
try:
r = r.isoformat()
except (AttributeError, TypeError, ValueError):
pass
tz_in = times.GetTimeZone(tz_default) if tz_default else None
# Check if r is a timestamp.
try:
timestamp = float(r) / float(unit)
dt = times.GetDateTimeFromTimeStamp(timestamp, tz_in)
return times.FormatDateTime(dt, format)
except (TypeError, ValueError):
pass
# Check if r is a serialized datetime object.
original_repr = resource_property.Get(r, ['datetime'], None)
if original_repr and isinstance(original_repr, basestring):
r = original_repr
tz_out = times.GetTimeZone(tz) if tz else None
# Check if r is a date/time string.
try:
dt = times.ParseDateTime(r, tz_in)
return times.FormatDateTime(dt, format, tz_out)
except (AttributeError, ImportError, TypeError, ValueError):
pass
def _FormatFromParts():
"""Returns the formatted time from broken down time parts in r.
Raises:
TypeError: For invalid time part errors.
ValueError: For time conversion errors or not enough valid time parts.
Returns:
The formatted time from broken down time parts in r.
"""
valid = 0
parts = []
now = datetime.datetime.now(tz_in)
for part in ('year', 'month', 'day', 'hour', 'minute', 'second'):
value = resource_property.Get(r, [part], None)
if value is None:
# Missing parts default to now.
value = getattr(now, part, 0)
else:
valid += 1
parts.append(int(value))
# The last value is microseconds. Add in any subsecond parts but don't count
# them in the validity check.
parts.append(0)
for i, part in enumerate(['nanosecond', 'microsecond', 'millisecond']):
value = resource_property.Get(r, [part], None)
if value is not None:
parts[-1] += int(int(value) * 1000 ** (i - 1))
# year&month&day or hour&minute&second would be OK, "3" covers those and any
# combination of 3 non-subsecond date/time parts.
if valid < 3:
raise ValueError
parts.append(tz_in)
dt = datetime.datetime(*parts)
return times.FormatDateTime(dt, format, tz_out)
try:
return _FormatFromParts()
except (TypeError, ValueError):
pass
# Does anyone really know what time it is?
return undefined
def TransformDecode(r, encoding, undefined=''):
"""Returns the decoded value of the resource that was encoded by encoding.
Args:
r: A JSON-serializable object.
encoding: The encoding name. *base64* and *utf-8* are supported.
undefined: Returns this value if the decoding fails.
Returns:
The decoded resource.
"""
# Some codecs support 'replace', all support 'strict' (the default).
for errors in ('replace', 'strict'):
try:
return r.decode(encoding, errors)
except: # pylint: disable=bare-except, undefined for any exception
pass
return undefined
def TransformDuration(r, unit=1, undefined=''):
"""Formats the resource as a duration string.
Args:
r: A JSON-serializable object.
unit: Divide the resource numeric value by _unit_ to yield seconds.
undefined: Returns this value if the resource is not a valid timestamp.
Returns:
The duration string for r or undefined if r is not a duration.
"""
try:
timestamp = float(r) / unit
d = datetime.timedelta(seconds=timestamp)
return unicode(d).replace(' ', '')
except (TypeError, ValueError):
return undefined
def TransformEncode(r, encoding, undefined=''):
"""Returns the encoded value of the resource using encoding.
Args:
r: A JSON-serializable object.
encoding: The encoding name. *base64* and *utf-8* are supported.
undefined: Returns this value if the encoding fails.
Returns:
The encoded resource.
"""
# Some codecs support 'replace', all support 'strict' (the default).
for errors in ('replace', 'strict'):
try:
return r.encode(encoding, errors).rstrip('\n')
except: # pylint: disable=bare-except, undefined for any exception
pass
return undefined
def TransformEnum(r, projection, enums, inverse=False, undefined=''):
"""Returns the enums dictionary description for the resource.
Args:
r: A JSON-serializable object.
projection: The parent ProjectionSpec.
enums: The name of a message enum dictionary.
inverse: Do inverse lookup.
undefined: Returns this value if there is no matching enum description.
Returns:
The enums dictionary description for the resource.
"""
type_name = GetTypeDataName(enums, 'inverse-enum' if inverse else 'enum')
descriptions = projection.symbols.get(type_name)
if not descriptions and inverse:
normal = projection.symbols.get(GetTypeDataName(enums, 'enum'))
if normal:
# Create the inverse dict and memoize it in projection.symbols.
descriptions = {}
for k, v in normal.iteritems():
descriptions[v] = k
projection.symbols[type_name] = descriptions
return descriptions.get(r, undefined) if descriptions else undefined
def TransformError(r, message=None):
"""Raises an Error exception that does not generate a stack trace.
Args:
r: A JSON-serializable object.
message: An error message. If not specified then the resource is formatted
as the error message.
Raises:
Error: This will not generate a stack trace.
"""
if message is None:
message = unicode(r)
raise resource_exceptions.Error(message)
def TransformExtract(r, *keys):
"""Extract an ordered list of values from the resource for the specified keys.
Args:
r: A JSON-serializable object.
*keys: The list of keys in the resource whose associated values will be
included in the result.
Returns:
The list of extracted values.
"""
try:
return [r[k] for k in keys if k in r]
except TypeError:
return []
def TransformFatal(r, message=None):
"""Raises an InternalError exception that generates a stack trace.
Args:
r: A JSON-serializable object.
message: An error message. If not specified then the resource is formatted
as the error message.
Raises:
InternalError: This generates a stack trace.
"""
raise resource_exceptions.InternalError(message if message is not None
else str(r))
def TransformFirstOf(r, *args):
"""Returns the first non-empty .name attribute value for name in args.
Args:
r: A JSON-serializable object.
*args: Names to check for resource attribute values,
Returns:
The first non-empty r.name value for name in args, '' otherwise.
Example:
x.firstof(bar_foo, barFoo, BarFoo, BAR_FOO) will check x.bar_foo, x.barFoo,
x.BarFoo, and x.BAR_FOO in order for the first non-empty value.
"""
for name in args:
v = resource_property.Get(r, [name], None)
if v is not None:
return v
return ''
def TransformFloat(r, precision=6, spec=None, undefined=''):
"""Returns the string representation of a floating point number.
One of these formats is used (1) ". _precision_ _spec_" if _spec_ is specified
(2) ". _precision_" unless 1e-04 <= abs(number) < 1e+09 (3) ".1f" otherwise.
Args:
r: A JSON-serializable object.
precision: The maximum number of digits before and after the decimal point.
spec: The printf(3) floating point format "e", "f" or "g" spec character.
undefined: Returns this value if the resource is not a float.
Returns:
The string representation of the floating point number r.
"""
# TransformFloat vs. float.str() comparison:
#
# METHOD PRECISION NO-EXPONENT-RANGE
# TransformFloat() 6 1e-04 <= x < 1e+9
# float.str() 12 1e-04 <= x < 1e+11
#
# The TransformFloat default avoids implementation dependent floating point
# roundoff differences in the fraction digits.
#
# round(float(r), precision) won't work here because it only works for
# significant digits immediately after the decimal point. For example,
# round(0.0000000000123456789, 6) is 0, not 1.23457e-11.
try:
number = float(r)
except (TypeError, ValueError):
return undefined
if spec is not None:
fmt = '{{number:.{precision}{spec}}}'.format(precision=precision, spec=spec)
return fmt.format(number=number)
fmt = '{{number:.{precision}}}'.format(precision=precision)
representation = fmt.format(number=number)
exponent_index = representation.find('e+')
if exponent_index >= 0:
exponent = int(representation[exponent_index + 2:])
if exponent < 9:
return '{number:.1f}'.format(number=number)
return representation
# The 'format' transform is special: it has no kwargs and the second argument
# is the ProjectionSpec of the calling projection.
def TransformFormat(r, projection, fmt, *args):
"""Formats resource key values.
Args:
r: A JSON-serializable object.
projection: The parent ProjectionSpec.
fmt: The format string with {0} ... {nargs-1} references to the resource
attribute name arg values.
*args: The resource attribute key expression to format. The printer
projection symbols and aliases may be used in key expressions.
Returns:
The formatted string.
Example:
--format='value(format("{0:f.1}/{0:f.1}", q.CPU.default, q.CPU.limit))'
"""
columns = projection.compiler('(' + ','.join(args) + ')',
by_columns=True,
defaults=projection).Evaluate(r)
return fmt.format(*columns)
def TransformGroup(r, *args):
"""Formats a [...] grouped list.
Each group is enclosed in [...]. The first item separator is ':', subsequent
separators are ','.
[item1] [item1] ...
[item1: item2] ... [item1: item2]
[item1: item2, item3] ... [item1: item2, item3]
Args:
r: A JSON-serializable object.
*args: Optional attribute names to select from the list. Otherwise
the string value of each list item is selected.
Returns:
The [...] grouped formatted list, [] if r is empty.
"""
if not r:
return '[]'
buf = StringIO.StringIO()
sep = None
for item in r:
if sep:
buf.write(sep)
else:
sep = ' '
if not args:
buf.write('[{0}]'.format(unicode(item)))
else:
buf.write('[')
sub = None
for attr in args:
if sub:
buf.write(sub)
sub = ', '
else:
sub = ': '
if isinstance(item, dict):
value = item.get(attr)
else:
value = getattr(item, attr, None)
if value is not None:
buf.write(unicode(value))
buf.write(']')
return buf.getvalue()
def TransformIf(r, expr):
"""Disables the projection key if the flag name filter expr is false.
Args:
r: A JSON-serializable object.
expr: A command flag filter name expression. See `gcloud topic filters` for
details on filter expressions. The expression variables are flag names
without the leading *--* prefix and dashes replaced by underscores.
Example:
The "table(name, value.if(NOT short_format))" format will list a value
column if the *--short-format* command line flag is not specified.
Returns:
r
"""
_ = expr
return r
def TransformIso(r, undefined='T'):
"""Formats the resource to numeric ISO time format.
Args:
r: A JSON-serializable object.
undefined: Returns this value if the resource does not have an isoformat()
attribute.
Returns:
The numeric ISO time format for r or undefined if r is not a time.
"""
return TransformDate(r, format='%Y-%m-%dT%H:%M:%S.%3f%Oz',
undefined=undefined)
def TransformJoin(r, sep='/', undefined=''):
"""Joins the elements of the resource list by the value of sep.
A string resource is treated as a list of characters.
Args:
r: A string or list.
sep: The separator value to use when joining.
undefined: Returns this value if the result after joining is empty.
Returns:
A new string containing the resource values joined by sep.
Example:
"a/b/c/d".split("/").join("!") returns "a!b!c!d"
"""
try:
parts = [unicode(i) for i in r]
return sep.join(parts) or undefined
except (AttributeError, TypeError):
return undefined
def TransformLen(r):
"""Returns the length of the resource if it is non-empty, 0 otherwise.
Args:
r: A JSON-serializable object.
Returns:
The length of r if r is non-empty, 0 otherwise.
"""
try:
return len(r)
except TypeError:
return 0
def TransformList(r, show='', undefined='', separator=','):
"""Formats a dict or list into a compact comma separated list.
Args:
r: A JSON-serializable object.
show: If show=*keys* then list dict keys; if show=*values* then list dict
values; otherwise list dict key=value pairs.
undefined: Return this if the resource is empty.
separator: The list item separator string.
Returns:
The key=value pairs for a dict or list values for a list, separated by
separator. Returns undefined if r is empty, or r if it is not a dict or
list.
"""
if isinstance(r, dict):
if show == 'keys':
return separator.join([unicode(k) for k in sorted(r)])
elif show == 'values':
return separator.join([unicode(v) for _, v in sorted(r.iteritems())])
else:
return separator.join([u'{k}={v}'.format(k=k, v=v)
for k, v in sorted(r.iteritems())])
if isinstance(r, list):
return separator.join(map(unicode, r))
return r or undefined
def TransformMap(r):
"""Applies the next transform in the sequence to each resource list item.
Example:
list_field.map().foo().bar() applies foo() to each item in list_field and
then bar() to the resulting value. list_field.map().foo().map().bar()
applies foo() to each item in list_field and then bar() to each item in the
resulting list.
Args:
r: A resource.
Returns:
r.
"""
# This method is used as a decorator in transform expressions. It is
# recognized at parse time and discarded.
return r
def TransformResolution(r, undefined='', transpose=False):
"""Formats a human readable XY resolution.
Args:
r: object, A JSON-serializable object containing an x/y resolution.
undefined: Returns this value if a recognizable resolution was not found.
transpose: Returns the y/x resolution if True.
Returns:
The human readable x/y resolution for r if it contains members that
specify width/height, col/row, col/line, or x/y resolution. Returns
undefined if no resolution found.
"""
names = (
('width', 'height'),
('screenx', 'screeny'),
('col', 'row'),
('col', 'line'),
('x', 'y'),
)
# Collect the lower case candidate member names.
mem = {}
for m in r if isinstance(r, dict) else dir(r):
if not m.startswith('__') and not m.endswith('__'):
mem[m.lower()] = m
def _Dimension(d):
"""Gets the resolution dimension for d.
Args:
d: The dimension name substring to get.
Returns:
The resolution dimension matching d or None.
"""
for m in mem:
if d in m:
return resource_property.Get(r, [mem[d]], None)
return None
# Check member name pairwise matches in order from least to most ambiguous.
for name_x, name_y in names:
x = _Dimension(name_x)
if x is None:
continue
y = _Dimension(name_y)
if y is None:
continue
return ('{y} x {x}' if transpose else '{x} x {y}').format(x=x, y=y)
return undefined
def TransformScope(r, *args):
"""Gets the /args/ suffix from a URI.
Args:
r: A URI.
*args: Optional URI segment names. If not specified then 'regions', 'zones'
is assumed.
Returns:
The URI segment after the first /*args/ in r, the last /-separated
component in r if none found.
Example:
"https://abc/foo/projects/bar/xyz".scope("projects") returns "bar/xyz".
"https://xyz/foo/regions/abc".scope() returns "abc".
"""
if not r:
return ''
r = urllib2.unquote(unicode(r))
if '/' not in r:
return r
# Checking for regions and/or zones is the most common use case.
for scope in args or ('regions', 'zones'):
segment = '/' + scope + '/'
if segment in r:
return r.split(segment)[-1]
if r.startswith('https://'):
return r.split('/')[-1]
return r
def TransformSegment(r, index=-1, undefined=''):
"""Returns the index-th URI path segment.
Args:
r: A URI path.
index: The path segment index to return counting from 0.
undefined: Returns this value if the resource or segment index is empty.
Returns:
The index-th URI path segment in r
"""
if not r:
return undefined
r = urllib2.unquote(unicode(r))
segments = r.split('/')
try:
return segments[int(index)] or undefined
except IndexError:
return undefined
# pylint: disable=redefined-builtin, params match the transform spec
def TransformSize(r, zero='0', precision=1, units_in=None, units_out=None,
min=0):
"""Formats a human readable size in bytes.
Args:
r: A size in bytes.
zero: Returns this if size==0. Ignored if None.
precision: The number of digits displayed after the decimal point.
units_in: A unit suffix (only the first character is checked) or unit size.
The size is multiplied by this. The default is 1.0.
units_out: A unit suffix (only the first character is checked) or unit size.
The size is divided by this. The default is 1.0.
min: Sizes < _min_ will be listed as "< _min_".
Returns:
A human readable scaled size in bytes.
"""
def _UnitSuffixAndSize(unit):
"""Returns the unit size for unit, 1.0 for unknown units.
Args:
unit: The unit suffix (only the first character is checked), the unit
size in bytes, or None.
Returns:
A (unit_suffix, unit_size) tuple.
"""
unit_size = {
'K': 2 ** 10,
'M': 2 ** 20,
'G': 2 ** 30,
'T': 2 ** 40,
'P': 2 ** 50,
}
try:
return ('', float(unit) or 1.0)
except (TypeError, ValueError):
pass
try:
unit_suffix = unit[0].upper()
return (unit_suffix, unit_size[unit_suffix])
except (IndexError, KeyError, TypeError):
pass
return ('', 1.0)
if not r and zero is not None:
return zero
try:
size = float(r)
except (TypeError, ValueError):
size = 0
min_size = float(min) # Exception OK here.
if size < min_size:
size = min_size
prefix = '< '
else:
prefix = ''
(_, units_in_size) = _UnitSuffixAndSize(units_in)
size *= units_in_size
(units_out_suffix, units_out_size) = _UnitSuffixAndSize(units_out)
if units_out_suffix:
size /= units_out_size
fmt = '{{0:.{precision}f}}'.format(precision=precision)
return fmt.format(size)
the_unit = 'PiB'
for unit in ['bytes', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB']:
if size < 1024.0:
the_unit = unit
break
size /= 1024.0
if the_unit:
the_unit = ' ' + the_unit
if size == int(size):
return '{0}{1}{2}'.format(prefix, int(size), the_unit)
else:
fmt = '{{0}}{{1:.{precision}f}}{{2}}'.format(precision=precision)
return fmt.format(prefix, size, the_unit)
def TransformSlice(r, op=':', undefined=''):
"""Returns a list slice specified by op.
The op parameter consists of up to three colon-delimeted integers: start, end,
and step. The parameter supports half-open ranges: start and end values can
be omitted, representing the first and last positions of the resource
respectively.
The step value represents the increment between items in the resource included
in the slice. A step of 2 results in a slice that contains every other item in
the resource.
Negative values for start and end indicate that the positons should start from
the last position of the resource. A negative value for step indicates that
the slice should contain items in reverse order.
If op contains no colons, the slice consists of the single item at the
specified position in the resource.
Args:
r: A JSON-serializable string or array.
op: The slice operation.
undefined: Returns this value if the slice cannot be created, or the
resulting slice is empty.
Returns:
A new array containing the specified slice of the resource.
Example:
[1,2,3].slice(1:) returns [2,3].
[1,2,3].slice(:2) returns [1,2].
[1,2,3].slice(-1:) returns [3].
[1,2,3].slice(::-1) returns [3,2,1].
[1,2,3].slice(1) returns [2].
"""
op = op.strip()
if not op:
return undefined
# Construct a list of integer and None values from op to be passed to slice().
try:
ops = [int(sp) if sp else None for sp in (p.strip() for p in op.split(':'))]
except (AttributeError, TypeError, ValueError):
return undefined
# Handle the case where the user specifies only an index by
# constructing a slice of i:i+1. E.g., the equivalent slice of index
# 1 is [1:2]. If ops[0] + 1 == 0, use None as the slice end instead.
# The slice [-1:0] returns an empty set; [-1:] returns a set
# containing the last element.
if len(ops) == 1:
ops.append(ops[0] + 1 or None)
try:
return list(r[slice(*ops)]) or undefined
except (TypeError, ValueError):
return undefined
def TransformSplit(r, sep='/', undefined=''):
"""Splits a string by the value of sep.
Args:
r: A string.
sep: The separator value to use when splitting.
undefined: Returns this value if the result after splitting is empty.
Returns:
A new array containing the split components of the resource.
Example:
"a/b/c/d".split() returns ["a", "b", "c", "d"]
"""
if not r:
return undefined
try:
return r.split(sep)
except (AttributeError, TypeError, ValueError):
return undefined
def TransformUri(r, undefined='.'):
"""Gets the resource URI.
Args:
r: A JSON-serializable object.
undefined: Returns this if a the URI for r cannot be determined.
Returns:
The URI for r or undefined if not defined.
"""
def _GetAttr(attr):
"""Returns the string value for attr or None if the value is not a string.
Args:
attr: The attribute object to get the value from.
Returns:
The string value for attr or None if the value is not a string.
"""
try:
attr = attr()
except TypeError:
pass
return attr if isinstance(attr, (basestring, buffer)) else None
if isinstance(r, (basestring, buffer)):
if r.startswith('https://'):
return r
elif r:
for name in ('selfLink', 'SelfLink'):
uri = _GetAttr(resource_property.Get(r, [name], None))
if uri:
return uri
return undefined
def TransformYesNo(r, yes=None, no='No'):
"""Returns no if the resource is empty, yes or the resource itself otherwise.
Args:
r: A JSON-serializable object.
yes: If the resource is not empty then returns _yes_ or the resource itself
if _yes_ is not defined.
no: Returns this value if the resource is empty.
Returns:
yes or r if r is not empty, no otherwise.
"""
return (r if yes is None else yes) if r else no
# The builtin transforms.
_BUILTIN_TRANSFORMS = {
'always': TransformAlways,
'basename': TransformBaseName,
'collection': TransformCollection,
'color': TransformColor,
'date': TransformDate,
'decode': TransformDecode,
'duration': TransformDuration,
'encode': TransformEncode,
'enum': TransformEnum,
'error': TransformError,
'extract': TransformExtract,
'fatal': TransformFatal,
'firstof': TransformFirstOf,
'float': TransformFloat,
'format': TransformFormat,
'group': TransformGroup,
'if': TransformIf,
'iso': TransformIso,
'join': TransformJoin,
'len': TransformLen,
'list': TransformList,
'map': TransformMap,
'resolution': TransformResolution,
'scope': TransformScope,
'segment': TransformSegment,
'size': TransformSize,
'slice': TransformSlice,
'split': TransformSplit,
'uri': TransformUri,
'yesno': TransformYesNo,
}
# This dict maps API names (the leftmost dotted name in a collection) to
# (module_path, method_name) tuples where:
# module_path: A dotted module path that contains a transform dict.
# method_name: A method name in the module that returns the transform dict.
_API_TO_TRANSFORMS = {
'compute': ('googlecloudsdk.api_lib.compute.transforms', 'GetTransforms'),
'debug': ('googlecloudsdk.api_lib.debug.transforms', 'GetTransforms'),
'runtimeconfig': (
'googlecloudsdk.api_lib.deployment_manager.runtime_configs.transforms',
'GetTransforms'
),
'service_registry': ('googlecloudsdk.api_lib.service_registry.transforms',
'GetTransforms'),
}
def GetTransforms(collection=None):
"""Returns the builtin or collection specific transform symbols dict.
Args:
collection: A collection, None or 'builtin' for the builtin transforms.
Raises:
ImportError: module_path __import__ error.
AttributeError: module does not contain method_name.
Returns:
The transform symbols dict, None if there is none.
"""
if collection in (None, 'builtin'):
return _BUILTIN_TRANSFORMS
api = collection.split('.')[0]
module_path, method_name = _API_TO_TRANSFORMS.get(api, (None, None))
if not module_path:
return None
# Exceptions after this point indicate configuration/installation errors.
module = __import__(module_path, fromlist=[method_name])
method = getattr(module, method_name)
return method()
def GetTypeDataName(name, type_name='object'):
"""Returns the data name for name of type type_name.
Args:
name: The data name.
type_name: The data type name.
Returns:
The data name for name of type type_name.
"""
return '{name}::{type_name}'.format(name=name, type_name=type_name)
|
import bisect
import math
import os
import operator
import random
import six
import sys
import yaml
from collections import defaultdict
from marisa_trie import BytesTrie
from geodata.text.phrases import PhraseFilter
from geodata.encoding import safe_encode, safe_decode
from geodata.i18n.unicode_paths import DATA_DIR
from geodata.numbers.numex import NUMEX_DATA_DIR
class OrdinalSuffixTrie(PhraseFilter):
def __init__(self, ordinal_rules):
self.trie = BytesTrie([(safe_decode(k)[::-1], safe_decode('|').join(v).encode('utf-8')) for k, v in six.iteritems(ordinal_rules)])
self.configured = True
def search_substring(self, s):
if len(s) == 0:
return None, 0
for i in xrange(len(s) + 1):
if not self.trie.has_keys_with_prefix(s[:i]):
i -= 1
break
if i > 0:
return (self.trie.get(s[:i]), i)
else:
return None, 0
def search_suffix(self, token):
suffix_search, suffix_len = self.search_substring(safe_decode(token[::-1]))
if suffix_search:
return suffix_search[0].split('|')
else:
return None
class OrdinalExpressions(object):
def __init__(self, base_dir=NUMEX_DATA_DIR):
self.cardinal_rules = {}
self.cardinal_rules_ones = {}
self.ordinal_rules = {}
self.ordinal_suffix_rules = {}
for filename in os.listdir(base_dir):
if filename.endswith('.yaml'):
lang = filename.split('.yaml')[0]
f = open(os.path.join(base_dir, filename))
data = yaml.load(f)
rules = data.get('rules')
if rules is not None and hasattr(rules, '__getslice__'):
cardinals = []
ordinals = defaultdict(list)
for rule in rules:
name = rule.get('name')
value = rule.get('value')
rule_type = rule.get('type')
if not name or type(value) not in (int, float) or rule_type not in ('cardinal', 'ordinal'):
continue
gender = rule.get('gender', None)
category = rule.get('category', None)
if rule_type == 'ordinal':
ordinals[(value, gender, category)].append(name)
else:
cardinals.append(rule)
if value == 1:
self.cardinal_rules_ones[(lang, gender, category)] = name
self.cardinal_rules[lang] = cardinals
self.ordinal_rules[lang] = ordinals
ordinal_indicators = data.get('ordinal_indicators')
if ordinal_indicators is not None and hasattr(ordinal_indicators, '__getslice__'):
for rule_set in ordinal_indicators:
gender = rule_set.get('gender', None)
category = rule_set.get('category', None)
self.ordinal_suffix_rules[(lang, gender, category)] = OrdinalSuffixTrie(rule_set['suffixes'])
def get_suffixes(self, num, lang, gender=None, category=None):
trie = self.ordinal_suffix_rules.get((lang, gender, category))
if not trie:
return None
return trie.search_suffix(str(num))
def get_suffix(self, num, lang, gender=None, category=None):
suffixes = self.get_suffixes(num, lang, gender=gender, category=category)
if not suffixes:
return None
return random.choice(suffixes)
def suffixed_number(self, num, lang, gender=None, category=None):
suffix = self.get_suffix(num, lang, gender=gender, category=category)
if not suffix:
return None
return six.u('{}{}').format(safe_decode(num), safe_decode(suffix))
ordinal_expressions = OrdinalExpressions()
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 31 11:44:05 2020
@author: Andy
"""
import os
import re
import time
from datetime import datetime
from random import randrange
# Extract table content in each Oracle script
def LoadTableContent(filename):
with open(os.path.join(oracle_path, filename), 'r') as oracle:
upper_parenthesis = lower_parenthesis = None
lines = oracle.readlines()
content = []
for i in range(len(lines)):
line = lines[i]
# Detect content lines by first pair of parentheses
if line == '(\n' and not upper_parenthesis:
upper_parenthesis = line
first_index = i + 1
if line == ')\n' and not lower_parenthesis:
lower_parenthesis = line
last_index = i
if upper_parenthesis and lower_parenthesis:
content = lines[first_index:last_index]
break
return content
# Replace texts in template as required
def ReplaceText(filenum, filelist):
for i in range(filenum):
schema = filelist[i].split('.')[0]
table = filelist[i].split('.')[1]
# Create output folder for each schema
schema_path = os.path.join(output_path, schema)
if not os.path.exists(schema_path):
os.mkdir(schema_path)
content = LoadTableContent(filelist[i])
with open(os.path.join(schema_path, table + '.sql'), 'w') as file:
first_indent = True
f = open(os.path.join(template_path,'<TEMPLATE_FILENAME>.sql'))
for line in f:
# Check if specific line is not empty
if line not in ['\n', '\r\n']:
# Check if specific line has leading spaces
if re.match(r'\s', line):
# Check if it's the first line with indent
if first_indent:
for j in content:
# Replace the leading spaces with tab
line = '\t' + j.lstrip(' ')
# NUMBER(p,0) and NUMBER(p), p >= 0
pattern_1 = re.compile(r'NUMBER\((?:[0-9]+,0|[0-9]+)\)')
if pattern_1.search(line):
nums = [int(k) for k in re.split(r'\(|,|\)', line) if k.isdigit()]
line = re.sub(r'NUMBER\((?:[0-9]+,0|[0-9]+)\)', 'DECIMAL(%s)' % nums[0], line)
# NUMBER(p,s), p >= 0, s > 0
pattern_2 = re.compile(r'NUMBER\([0-9]+,(?:[1-9]|[1-9][0-9]|[1-9][0-9][0-9])\)')
if pattern_2.search(line):
nums = [int(l) for l in re.split(r'\(|,|\)', line) if l.isdigit()]
line = re.sub(r'NUMBER\([0-9]+,(?:[1-9]|[1-9][0-9]|[1-9][0-9][0-9])\)', 'DECIMAL(%s,%s)' % (nums[0], nums[1]), line)
# VARCHAR2 to NVARCHAR
line = line.replace(' VARCHAR2', ' NVARCHAR')
# NVARCHAR2 to NVARCHAR
line = line.replace(' NVARCHAR2', ' NVARCHAR')
# Remove BYTE in VARCHAR2
line = line.replace(' BYTE', '')
# Lowercase sysdate to getdate()
line = line.replace('sysdate', 'getdate()')
# Uppercase SYSDATE to GETDATE()
line = line.replace(' SYSDATE', ' GETDATE()')
# TIMESTAMP to DATETIME2
line = line.replace(' TIMESTAMP', ' DATETIME2')
line = line.replace("to_number(to_char(getdate(),'YYYYMMDDHH24MISS'))", 'CONVERT(CHAR(20),GETDATE(),120)')
file.write(line)
# Set first_indent as False after all the contents are written
first_indent = False
else:
pass
# Check if line starts with '/'
elif line.startswith('/'):
pass
else:
# TEMPLATE_FILENAME to table name
line = line.replace("<TEMPLATE_FILENAME>", table)
file.write(line)
else:
file.write(line)
#time.sleep(randrange(480, 781)) # Time gap of 8 ~ 13 mins
def main():
start_time = datetime.now()
# Set all relevant paths
global output_path, oracle_path, template_path
script_path = os.path.dirname(os.path.abspath(__file__))
template_path = os.path.join(script_path, 'Templates')
oracle_path = os.path.join(script_path, 'Oracle')
output_path = os.path.join(script_path, 'MSSQL')
# Create output path if not exists
if not os.path.exists(output_path):
os.mkdir(output_path)
# Number of files
filenum = len(os.listdir(oracle_path))
# List of filename
filelist = os.listdir(oracle_path)
ReplaceText(filenum, filelist)
end_time = datetime.now()
time_diff = end_time - start_time
print('Execution time:', str(time_diff))
if __name__ == '__main__':
main()
|
<filename>channels/saghe.py
# -*- coding: utf-8 -*-
# StreamOnDemand Community Edition - Kodi Addon
# ------------------------------------------------------------
# streamondemand - XBMC Plugin
# <NAME>"
# http://www.mimediacenter.info/foro/viewforum.php?f=36
# ------------------------------------------------------------
import re
import datetime
import json
import urllib
from core import httptools, tmdb
from platformcode import logger
from core import scrapertools
from core.item import Item
from core.tmdb import infoSod
__channel__ = "saghe"
PERPAGE = 20
tmdb_key = tmdb.tmdb_auth_key #tmdb_key = '<KEY>'
dttime = (datetime.datetime.utcnow() - datetime.timedelta(hours=5))
systime = dttime.strftime('%Y%m%d%H%M%S%f')
today_date = dttime.strftime('%Y-%m-%d')
month_date = (dttime - datetime.timedelta(days=30)).strftime('%Y-%m-%d')
month2_date = (dttime - datetime.timedelta(days=60)).strftime('%Y-%m-%d')
year_date = (dttime - datetime.timedelta(days=365)).strftime('%Y-%m-%d')
tmdb_image = 'http://image.tmdb.org/t/p/original'
tmdb_poster = 'http://image.tmdb.org/t/p/w500'
def mainlist(item):
logger.info("streamondemand.saghe mainlist")
itemlist = [Item(channel=__channel__,
title="[COLOR yellow]The Marvel Universe[/COLOR]",
action="tmdb_saghe",
url='http://api.themoviedb.org/3/list/50941077760ee35e1500000c?api_key=%s&language=it' % tmdb_key,
thumbnail="https://image.tmdb.org/t/p/w180_and_h270_bestv2/6t3KOEUtrIPmmtu1czzt6p2XxJy.jpg"),
Item(channel=__channel__,
title="[COLOR yellow]The DC Comics Universe[/COLOR]",
action="tmdb_saghe",
url='http://api.themoviedb.org/3/list/5094147819c2955e4c00006a?api_key=%s&language=it' % tmdb_key,
thumbnail="https://image.tmdb.org/t/p/w180_and_h270_bestv2/xWlaTLnD8NJMTT9PGOD9z5re1SL.jpg"),
Item(channel=__channel__,
title="[COLOR yellow]iMDb Top 250 Movies[/COLOR]",
action="tmdb_saghe",
url='http://api.themoviedb.org/3/list/522effe419c2955e9922fcf3?api_key=%s&language=it' % tmdb_key,
thumbnail="https://image.tmdb.org/t/p/w180_and_h270_bestv2/9O7gLzmreU0nGkIB6K3BsJbzvNv.jpg"),
Item(channel=__channel__,
title="[COLOR yellow]Rotten Tomatoes top 100 movies of all times[/COLOR]",
action="tmdb_saghe",
url='http://api.themoviedb.org/3/list/5418c914c3a368462c000020?api_key=%s&language=it' % tmdb_key,
thumbnail="https://image.tmdb.org/t/p/w180_and_h270_bestv2/zGadcmcF48gy8rKCX2ubBz2ZlbF.jpg"),
Item(channel=__channel__,
title="[COLOR yellow]Reddit top 250 movies[/COLOR]",
action="tmdb_saghe",
url='http://api.themoviedb.org/3/list/54924e17c3a3683d070008c8?api_key=%s&language=it' % tmdb_key,
thumbnail="https://image.tmdb.org/t/p/w180_and_h270_bestv2/dM2w364MScsjFf8pfMbaWUcWrR.jpg"),
Item(channel=__channel__,
title="[COLOR yellow]Sci-Fi Action[/COLOR]",
action="tmdb_saghe",
url='http://api.themoviedb.org/3/list/54408e79929fb858d1000052?api_key=%s&language=it' % tmdb_key,
thumbnail="https://image.tmdb.org/t/p/w180_and_h270_bestv2/5ig0kdWz5kxR4PHjyCgyI5khCzd.jpg"),
Item(channel=__channel__,
title="[COLOR yellow]007 - Movies[/COLOR]",
action="tmdb_saghe",
url='http://api.themoviedb.org/3/list/557b152bc3a36840f5000265?api_key=%s&language=it' % tmdb_key,
thumbnail="https://image.tmdb.org/t/p/w180_and_h270_bestv2/zlWBxz2pTA9p45kUTrI8AQiKrHm.jpg"),
Item(channel=__channel__,
title="[COLOR yellow]Disney Classic Collection[/COLOR]",
action="tmdb_saghe",
url='http://api.themoviedb.org/3/list/51224e42760ee3297424a1e0?api_key=%s&language=it' % tmdb_key,
thumbnail="https://image.tmdb.org/t/p/w180_and_h270_bestv2/vGV35HBCMhQl2phhGaQ29P08ZgM.jpg"),
Item(channel=__channel__,
title="[COLOR yellow]Bad Movies[/COLOR]",
action="badmovies",
url='http://www.badmovies.org/movies/',
thumbnail="http://www.badmovies.org/mainpage/badmovielogo_600.jpg")]
return itemlist
def tmdb_saghe(item):
try:
result = httptools.downloadpage(item.url).data
result = json.loads(result)
items = result['items']
except:
return
itemlist = []
for item in items:
try:
title = item['title']
title = scrapertools.decodeHtmlentities(title)
title = title.encode('utf-8')
poster = item['poster_path']
if poster == '' or poster is None:
raise Exception()
else:
poster = '%s%s' % (tmdb_poster, poster)
poster = poster.encode('utf-8')
fanart = item['backdrop_path']
if fanart == '' or fanart is None: fanart = '0'
if not fanart == '0': fanart = '%s%s' % (tmdb_image, fanart)
fanart = fanart.encode('utf-8')
plot = item['overview']
if plot == '' or plot is None: plot = '0'
plot = scrapertools.decodeHtmlentities(plot)
plot = plot.encode('utf-8')
itemlist.append(
Item(channel=__channel__,
action="do_search",
extra=urllib.quote_plus(title),
title="[COLOR azure]%s[/COLOR]" % title,
fulltitle=title,
plot=plot,
thumbnail=poster,
fanart=fanart,
folder=True))
except:
pass
return itemlist
def badmovies(item):
itemlist = []
p = 1
if '{}' in item.url:
item.url, p = item.url.split('{}')
p = int(p)
# Carica la pagina
data = httptools.downloadpage(item.url).data
data = scrapertools.find_single_match(data, '<table width="100%" cellpadding="6" cellspacing="1" class="listtab">(.*?)<tr><td align="center" valign="top">')
# Estrae i contenuti
patron = r'">([^<]+)\s*</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
scrapedurl = ""
scrapedplot = ""
scrapedthumbnail = ""
for i, (scrapedtitle) in enumerate(matches):
if (p - 1) * PERPAGE > i: continue
if i >= p * PERPAGE: break
title = scrapertools.decodeHtmlentities(scrapedtitle).strip()
itemlist.append(infoSod(
Item(channel=__channel__,
extra=item.extra,
action="do_search",
title=title,
url=title,
thumbnail=scrapedthumbnail,
fulltitle=title,
show=title,
plot=scrapedplot,
folder=True), tipo='movie'))
if len(itemlist) > 0:
itemlist.append(
Item(channel=__channel__,
action="HomePage",
title="[COLOR yellow]Torna Home[/COLOR]",
folder=True)),
if len(matches) >= p * PERPAGE:
scrapedurl = item.url + '{}' + str(p + 1)
itemlist.append(
Item(channel=__channel__,
extra=item.extra,
action="badmovies",
title="[COLOR orange]Successivo >>[/COLOR]",
url=scrapedurl,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
return itemlist
def do_search(item):
from channels import buscador
return buscador.do_search(item)
|
<reponame>ChangHoon-Sung/streamlit
# Copyright 2018-2022 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common cache logic shared by st.memo and st.singleton."""
import contextlib
import functools
import hashlib
import inspect
import threading
import types
from abc import abstractmethod
from typing import Callable, List, Iterator, Tuple, Optional, Any, Union
import streamlit as st
from streamlit import util
from streamlit.caching.cache_errors import CacheKeyNotFoundError
from streamlit.logger import get_logger
from .cache_errors import (
CacheType,
CachedStFunctionWarning,
UnhashableParamError,
UnhashableTypeError,
)
from .hashing import update_hash
_LOGGER = get_logger(__name__)
class Cache:
"""Function cache interface. Caches persist across script runs."""
@abstractmethod
def read_value(self, value_key: str) -> Any:
"""Read a value from the cache.
Raises
------
CacheKeyNotFoundError
Raised if value_key is not in the cache.
"""
raise NotImplementedError
@abstractmethod
def write_value(self, value_key: str, value: Any) -> None:
"""Write a value to the cache, overwriting any existing value that
uses the value_key.
"""
raise NotImplementedError
@abstractmethod
def clear(self) -> None:
"""Clear all values from this function cache."""
raise NotImplementedError
class CachedFunction:
"""Encapsulates data for a cached function instance.
CachedFunction instances are scoped to a single script run - they're not
persistent.
"""
def __init__(
self, func: types.FunctionType, show_spinner: bool, suppress_st_warning: bool
):
self.func = func
self.show_spinner = show_spinner
self.suppress_st_warning = suppress_st_warning
@property
def cache_type(self) -> CacheType:
raise NotImplementedError
@property
def call_stack(self) -> "CachedFunctionCallStack":
raise NotImplementedError
def get_function_cache(self, function_key: str) -> Cache:
"""Get or create the function cache for the given key."""
raise NotImplementedError
def create_cache_wrapper(cached_func: CachedFunction) -> Callable[..., Any]:
"""Create a wrapper for a CachedFunction. This implements the common
plumbing for both st.memo and st.singleton.
"""
func = cached_func.func
function_key = _make_function_key(cached_func.cache_type, func)
@functools.wraps(func)
def wrapper(*args, **kwargs):
"""This function wrapper will only call the underlying function in
the case of a cache miss.
"""
# Retrieve the function's cache object. We must do this inside the
# wrapped function, because caches can be invalidated at any time.
cache = cached_func.get_function_cache(function_key)
name = func.__qualname__
if len(args) == 0 and len(kwargs) == 0:
message = f"Running `{name}()`."
else:
message = f"Running `{name}(...)`."
def get_or_create_cached_value():
# Generate the key for the cached value. This is based on the
# arguments passed to the function.
value_key = _make_value_key(cached_func.cache_type, func, *args, **kwargs)
try:
return_value = cache.read_value(value_key)
_LOGGER.debug("Cache hit: %s", func)
except CacheKeyNotFoundError:
_LOGGER.debug("Cache miss: %s", func)
with cached_func.call_stack.calling_cached_function(func):
if cached_func.suppress_st_warning:
with cached_func.call_stack.suppress_cached_st_function_warning():
return_value = func(*args, **kwargs)
else:
return_value = func(*args, **kwargs)
cache.write_value(value_key, return_value)
return return_value
if cached_func.show_spinner:
with st.spinner(message):
return get_or_create_cached_value()
else:
return get_or_create_cached_value()
def clear():
"""Clear the wrapped function's associated cache."""
cache = cached_func.get_function_cache(function_key)
cache.clear()
# Mypy doesn't support declaring attributes of function objects,
# so we have to suppress a warning here. We can remove this suppression
# when this issue is resolved: https://github.com/python/mypy/issues/2087
wrapper.clear = clear # type: ignore
return wrapper
class CachedFunctionCallStack(threading.local):
"""A utility for warning users when they call `st` commands inside
a cached function. Internally, this is just a counter that's incremented
when we enter a cache function, and decremented when we exit.
Data is stored in a thread-local object, so it's safe to use an instance
of this class across multiple threads.
"""
def __init__(self, cache_type: CacheType):
self._cached_func_stack: List[types.FunctionType] = []
self._suppress_st_function_warning = 0
self._cache_type = cache_type
def __repr__(self) -> str:
return util.repr_(self)
@contextlib.contextmanager
def calling_cached_function(self, func: types.FunctionType) -> Iterator[None]:
self._cached_func_stack.append(func)
try:
yield
finally:
self._cached_func_stack.pop()
@contextlib.contextmanager
def suppress_cached_st_function_warning(self) -> Iterator[None]:
self._suppress_st_function_warning += 1
try:
yield
finally:
self._suppress_st_function_warning -= 1
assert self._suppress_st_function_warning >= 0
def maybe_show_cached_st_function_warning(
self, dg: "st.delta_generator.DeltaGenerator", st_func_name: str
) -> None:
"""If appropriate, warn about calling st.foo inside @memo.
DeltaGenerator's @_with_element and @_widget wrappers use this to warn
the user when they're calling st.foo() from within a function that is
wrapped in @st.cache.
Parameters
----------
dg : DeltaGenerator
The DeltaGenerator to publish the warning to.
st_func_name : str
The name of the Streamlit function that was called.
"""
if len(self._cached_func_stack) > 0 and self._suppress_st_function_warning <= 0:
cached_func = self._cached_func_stack[-1]
self._show_cached_st_function_warning(dg, st_func_name, cached_func)
def _show_cached_st_function_warning(
self,
dg: "st.delta_generator.DeltaGenerator",
st_func_name: str,
cached_func: types.FunctionType,
) -> None:
# Avoid infinite recursion by suppressing additional cached
# function warnings from within the cached function warning.
with self.suppress_cached_st_function_warning():
e = CachedStFunctionWarning(self._cache_type, st_func_name, cached_func)
dg.exception(e)
def _make_value_key(
cache_type: CacheType, func: types.FunctionType, *args, **kwargs
) -> str:
"""Create the key for a value within a cache.
This key is generated from the function's arguments. All arguments
will be hashed, except for those named with a leading "_".
Raises
------
StreamlitAPIException
Raised (with a nicely-formatted explanation message) if we encounter
an un-hashable arg.
"""
# Create a (name, value) list of all *args and **kwargs passed to the
# function.
arg_pairs: List[Tuple[Optional[str], Any]] = []
for arg_idx in range(len(args)):
arg_name = _get_positional_arg_name(func, arg_idx)
arg_pairs.append((arg_name, args[arg_idx]))
for kw_name, kw_val in kwargs.items():
# **kwargs ordering is preserved, per PEP 468
# https://www.python.org/dev/peps/pep-0468/, so this iteration is
# deterministic.
arg_pairs.append((kw_name, kw_val))
# Create the hash from each arg value, except for those args whose name
# starts with "_". (Underscore-prefixed args are deliberately excluded from
# hashing.)
args_hasher = hashlib.new("md5")
for arg_name, arg_value in arg_pairs:
if arg_name is not None and arg_name.startswith("_"):
_LOGGER.debug("Not hashing %s because it starts with _", arg_name)
continue
try:
update_hash(
(arg_name, arg_value),
hasher=args_hasher,
cache_type=cache_type,
)
except UnhashableTypeError as exc:
raise UnhashableParamError(cache_type, func, arg_name, arg_value, exc)
value_key = args_hasher.hexdigest()
_LOGGER.debug("Cache key: %s", value_key)
return value_key
def _make_function_key(cache_type: CacheType, func: types.FunctionType) -> str:
"""Create the unique key for a function's cache.
A function's key is stable across reruns of the app, and changes when
the function's source code changes.
"""
func_hasher = hashlib.new("md5")
# Include the function's __module__ and __qualname__ strings in the hash.
# This means that two identical functions in different modules
# will not share a hash; it also means that two identical *nested*
# functions in the same module will not share a hash.
update_hash(
(func.__module__, func.__qualname__),
hasher=func_hasher,
cache_type=cache_type,
)
# Include the function's source code in its hash. If the source code can't
# be retrieved, fall back to the function's bytecode instead.
source_code: Union[str, bytes]
try:
source_code = inspect.getsource(func)
except OSError as e:
_LOGGER.debug(
"Failed to retrieve function's source code when building its key; falling back to bytecode. err={0}",
e,
)
source_code = func.__code__.co_code
update_hash(
source_code,
hasher=func_hasher,
cache_type=cache_type,
)
cache_key = func_hasher.hexdigest()
return cache_key
def _get_positional_arg_name(func: types.FunctionType, arg_index: int) -> Optional[str]:
"""Return the name of a function's positional argument.
If arg_index is out of range, or refers to a parameter that is not a
named positional argument (e.g. an *args, **kwargs, or keyword-only param),
return None instead.
"""
if arg_index < 0:
return None
params: List[inspect.Parameter] = list(inspect.signature(func).parameters.values())
if arg_index >= len(params):
return None
if params[arg_index].kind in (
inspect.Parameter.POSITIONAL_OR_KEYWORD,
inspect.Parameter.POSITIONAL_ONLY,
):
return params[arg_index].name
return None
|
# encoding=utf8
import sys
import spacy as sp
from flask import Flask, request, jsonify, session
from flask_cors import CORS, cross_origin
import urllib.request
import urllib.parse
app = Flask(__name__)
app.secret_key = b'_5#y2L"F4Q8z\n\xec]/'
CORS(app, support_credentials=True)
#print('Initiating ontology server...', file=sys.stderr)
# Load language model
#print('Preloading language model...', file=sys.stderr)
nlp = sp.load('en_default')
from spacy.lemmatizer import Lemmatizer
from spacy.lang.en import LEMMA_INDEX, LEMMA_EXC, LEMMA_RULES
lemmatizer = Lemmatizer(LEMMA_INDEX, LEMMA_EXC, LEMMA_RULES)
# Load ontology model
#print('Preloading ontology model...', file=sys.stderr)
from owlready2 import *
import os
cwd = os.getcwd()
ontology_base = 'data/ontology/'
ontology_path = ontology_base + 'maintenance.'
onto_path.append(ontology_base)
basepath = os.path.dirname(__file__)
filepath = os.path.abspath(os.path.join(basepath, ontology_path + "owl"))
ontology_prefix = basepath + '/' + ontology_path
ontology_prefix_clean = basepath + '/' + ontology_base
ontology = get_ontology(filepath).load()
ontology.load()
ontology_prefix = cwd + ontology_prefix
iteration = 0
# test_issue = ontology.MaintenanceIssue("test_issue")
# test_use = ontology.BadUse("test_use")
# sync_reasoner()
# print("TEST1: ")
# print("-------")
# print()
# print(test_issue.__class__)
# print()
# print()
# print("TEST2: ")
# print("-------")
# print()
# test_issue.causedBy = [ test_use ]
# sync_reasoner()
# print(test_issue.__class__)
# print()
# print()
#Define ontology classes
# with ontology:
# class LegalIssue(Thing):
# response_classmap = {
# ontology_prefix + 'MaintenanceIssue': 'Maintenance related issue.',
# ontology_prefix + 'MinorIssue': 'The tenant is responsible for resolving the issue because:',
# ontology_prefix + 'MajorIssue': 'The landlord is responsible for resolving the issue because:',
# ontology_prefix + 'BadUse': 'The damage is caused by the tenants negligence (<a href="http://wetten.overheid.nl/BWBR0005290/2018-06-13#Boek7_Titeldeel4_Afdeling5_ParagraafOnderafdeling1_Artikel218">Section 218, Book 7</a>).',
# ontology_prefix + 'NaturalCalamity': 'The damage has not been caused by the tenant (<a href="http://wetten.overheid.nl/BWBR0005290/2018-06-13#Boek7_Titeldeel4_Afdeling5_ParagraafOnderafdeling1_Artikel218">Section 218, Book 7</a>).',
# ontology_prefix + 'SmallObject': 'The damaged object is small (<a href="http://wetten.overheid.nl/BWBR0005290/2018-06-13#Boek7_Titeldeel4_Afdeling5_ParagraafOnderafdeling1_Artikel240">Section 240, Book 7</a>).',
# ontology_prefix + 'BigObject': 'The damaged object is large (<a href="http://wetten.overheid.nl/BWBR0005290/2018-06-13#Boek7_Titeldeel4_Afdeling5_ParagraafOnderafdeling1_Artikel240">Section 240, Book 7</a>).',
# ontology_prefix + 'HighCost': 'The damage requires extensive actions to be resolved. (<a href="http://wetten.overheid.nl/BWBR0014931/2003-08-01">Minor Repairs Decree</a>).',
# ontology_prefix + 'LowCost': 'The damage requires minor actionsto be resolved (<a href="http://wetten.overheid.nl/BWBR0014931/2003-08-01">Minor Repairs Decree</a>).',
# }
# ontology_properties = {
# 'causedBy': 'Was the damage caused by your actions?',
# 'associatedWithObject': 'What object was damaged?',
# 'hasCost': 'Does this object require more than 100 EURO to fix?'
# }
# ontology_property_classes = {
# 'causedBy': ['BadUse', 'NaturalCalamity'],
# 'associatedWithObject': ['BigObject', 'SmallObject'],
# 'hasCost': ['LowCost', 'HighCost']
# }
# Map resolved classes to their chatbot response
# def GetResolvedOutput(self, resolved_class):
# return self.response_classmap.get(resolved_class)
# # Get an explenation from a resolved class
# def GetResolvedExplenation(self, resolved_class):
# explenation = []
# for property in self.ontology_properties:
# if property in dir(resolved_class):
# reasons = getattr(resolved_class, property)
# if len(reasons) > 0:
# for reason in reasons:
# class_str = str(reason.__class__).replace('\\', '/')
# if '/' not in class_str:
# class_str = ontology_prefix_clean + class_str
# explenation.append(self.response_classmap.get(class_str))
# return explenation
# # Get options
# def GetOptionsByProperty(self, propertyName):
# if propertyName not in self.ontology_property_classes:
# return False
# propertyClasses = self.ontology_property_classes[propertyName]
# options = []
# for propertyClass in propertyClasses:
# for instance in ontology[propertyClass].instances():
# instance.lemma_ = lemmatizer(instance.name, 'VERB')[0]
# options.append(instance)
# return options
# iteration = 0
# def ResolveMaintenanceIssue(self, properties):
# self.iteration += 1
# print()
# print()
# # Create an instance from the given properties
# onto_instance = ontology.LegalIssue("maintenanceissue_" + str(self.iteration))
# #print("instance: " + str(onto_instance))
# print()
# print()
# print("properties: " + str(properties))
# print()
# print()
# for property in properties:
# #print("property: " + str(property))
# #print("other: " + str(properties.get(property)))
# setattr(onto_instance, property, properties.get(property))
# # Resolve the instance
# sync_reasoner()
# resolved_class = onto_instance.__class__
# print()
# print()
# print("TEST: " + str(resolved_class))
# print()
# print()
# resolved_classes = {
# ontology_prefix + 'MinorIssue',
# ontology_prefix + 'MajorIssue',
# }
# print()
# print()
# print("prefix: " + str(ontology_prefix))
# print()
# print()
# resolved_class_str = str(resolved_class).replace('/', '\\')
# print("resolved: " + str(resolved_class_str))
# #
# #print("resolved class str" + str(resolved_class_str))
# if '\\' not in resolved_class_str:
# print("Kody1")
# resolved_class_str = ontology_prefix_clean + resolved_class_str
# if resolved_class_str in resolved_classes:
# print("Kody2")
# conclusion = self.GetResolvedOutput(resolved_class_str)
# support = self.GetResolvedExplenation(onto_instance)
# del onto_instance
# # close_world(onto_instance)
# return True, conclusion, support
# else:
# print("Kody3")
# conclusion = 'Not yet resolved! Need more facts.'
# missing = []
# for property in self.ontology_properties:
# if property not in properties:
# missing.append({property: self.ontology_properties.get(property)})
# del onto_instance
# # close_world(onto_instance)
# print()
# print()
# print("missing: " + str(missing))
# print()
# print()
# return False, conclusion, missing
# # Define possible maintenance conclusions
# class MinorIssue(LegalIssue):
# equivalent_to = [
# ontology.MaintenanceIssue
# & (ontology.causedBy.some(ontology.BadUse) |
# ontology.associatedWithObject.some(ontology.SmallObject) |
# ontology.hasCost.some(ontology.LowCost))
# ]
# class MajorIssue(LegalIssue):
# equivalent_to = [
# ontology.MaintenanceIssue
# & (ontology.causedBy.some(ontology.NaturalCalamity) &
# ontology.associatedWithObject.some(ontology.BigObject) &
# ontology.hasCost.some(ontology.HighCost))
# ]
# global sess
# sess = {}
# def set_errcnt(user, message):
# # setattr(g, '_err_cnt', message)
# sess[user] = message
# return sess[user]
# def get_errcnt(user):
# # err_cnt = getattr(g, '_err_cnt', None)
# if not user in sess:
# sess[user] = 0
# return sess[user]
# conversations = {}
# asking = 'çausedBy'
# maint = LegalIssue()
from nltk import wordpunct_tokenize
from nltk.corpus import stopwords
@app.route("/language", methods=['GET', 'OPTIONS'])
@cross_origin(supports_credentials=True)
def detect_language():
userText = request.args.get('msg')
probabilities = {}
tokens = wordpunct_tokenize(userText)
words = [word.lower() for word in tokens]
for lang in stopwords.fileids():
stopwords_set = set(stopwords.words(lang))
words_set = set(words)
common_words = words_set.intersection(stopwords_set)
probabilities[lang] = len(common_words)
return max(probabilities, key=probabilities.get)
@app.route("/get", methods=['GET', 'OPTIONS'])
@cross_origin(supports_credentials=True)
def get_bot_response():
global iteration
iteration += 1
issue = ontology.MaintenanceIssue("issue" + str(iteration))
if (request.args.get('usage') == '0'):
print('seun0')
use = ontology.BadUse("use" + str(iteration))
else:
print('seun1')
use = ontology.WearAndTear("use" + str(iteration))
if (request.args.get('propertyitem') == '0'):
print('seun2')
obj = ontology.BigObject("obj" + str(iteration))
else:
print('seun3')
obj = ontology.SmallObject("obj" + str(iteration))
if (request.args.get('actioncost') == '0'):
print('seun4')
cost = ontology.HighCost("cost" + str(iteration))
else:
print('seun5')
cost = ontology.SmallObject("cost" + str(iteration))
# link issue to properties using ontology relations
issue.causedBy = [ use ]
issue.associatedWithObject = [ obj ]
issue.hasCost = [ cost ]
sync_reasoner()
print(issue.__class__)
if ("MajorIssue" in str(issue.__class__)):
response = {'text': 'The landlord may be responsible for resolving this issue.<br> <a target = "_blank" href=https://wetten.overheid.nl/BWBR0005290/2018-09-19#Boek7_Titeldeel4_Afdeling5_ParagraafOnderafdeling1>Burgerlijk Wetboek Boek 7</a>'}
return jsonify(response)
elif ("MinorIssue" in str(issue.__class__)):
response = {'text': 'The tenant is generally responsible for repairing, replacing or maintaining such items in this case.<br> <a target = "_blank" href=https://wetten.overheid.nl/BWBR0005290/2018-09-19#Boek7_Titeldeel4_Afdeling5_ParagraafOnderafdeling1>Burgerlijk Wetboek Boek 7</a>'}
return jsonify(response)
else:
response = {'text': 'I am not sure if the tenant or landlord is responsible for resolving this particular issue.<br> <a target = "_blank" href=https://wetten.overheid.nl/BWBR0005290/2018-09-19#Boek7_Titeldeel4_Afdeling5_ParagraafOnderafdeling1>Burgerlijk Wetboek Boek 7</a>'}
return jsonify(response)
if __name__ == "__main__":
port = 80
# Before was 5577
# print('Starting ontology server on port ' + str(port), file=sys.stderr)
app.run(host='0.0.0.0', port=port)
# app.run(port=port)
|
<gh_stars>1-10
# Module: Natural Language Processing
# Author: <NAME> <<EMAIL>>
# License: MIT
def setup(data,
target=None,
custom_stopwords=None,
session_id = None):
"""
Description:
------------
This function initializes the environment in pycaret. setup() must called before
executing any other function in pycaret. It takes one mandatory parameter:
dataframe {array-like, sparse matrix} or object of type list. If a dataframe is
passed, target column containing text must be specified. When data passed is of
type list, no target parameter is required. All other parameters are optional.
This module only supports English Language at this time.
Example
-------
from pycaret.datasets import get_data
kiva = get_data('kiva')
experiment_name = setup(data = kiva, target = 'en')
'kiva' is a pandas Dataframe.
Parameters
----------
data : {array-like, sparse matrix}, shape (n_samples, n_features) where n_samples
is the number of samples and n_features is the number of features or object of type
list with n length.
target: string
If data is of type DataFrame, name of column containing text values must be passed as
string.
custom_stopwords: list, default = None
list containing custom stopwords.
session_id: int, default = None
If None, a random seed is generated and returned in the Information grid. The
unique number is then distributed as a seed in all functions used during the
experiment. This can be used for later reproducibility of the entire experiment.
Returns:
--------
info grid: Information grid is printed.
-----------
environment: This function returns various outputs that are stored in variable
----------- as tuple. They are used by other functions in pycaret.
Warnings:
---------
- Some functionalities in pycaret.nlp requires you to have english language model.
The language model is not downloaded automatically when you install pycaret.
You will have to download two models using your Anaconda Prompt or python
command line interface. To download the model, please type the following in
your command line:
python -m spacy download en_core_web_sm
python -m textblob.download_corpora
Once downloaded, please restart your kernel and re-run the setup.
"""
#exception checking
import sys
#ignore warnings
import warnings
warnings.filterwarnings('ignore')
"""
error handling starts here
"""
#checking data type
if hasattr(data,'shape') is False:
if type(data) is not list:
sys.exit('(Type Error): data passed must be of type pandas.DataFrame or list')
#if dataframe is passed then target is mandatory
if hasattr(data,'shape'):
if target is None:
sys.exit('(Type Error): When DataFrame is passed as data param. Target column containing text must be specified in target param.')
#checking target parameter
if target is not None:
if target not in data.columns:
sys.exit('(Value Error): Target parameter doesnt exist in the data provided.')
#custom stopwords checking
if custom_stopwords is not None:
if type(custom_stopwords) is not list:
sys.exit('(Type Error): custom_stopwords must be of list type.')
#checking session_id
if session_id is not None:
if type(session_id) is not int:
sys.exit('(Type Error): session_id parameter must be an integer.')
#chcek if spacy is loaded
try:
import spacy
sp = spacy.load('en_core_web_sm', disable=['parser', 'ner'])
except:
sys.exit('(Type Error): spacy english model is not yet downloaded. See the documentation of setup to see installation guide.')
"""
error handling ends here
"""
#pre-load libraries
import pandas as pd
import ipywidgets as ipw
from IPython.display import display, HTML, clear_output, update_display
import datetime, time
'''
generate monitor starts
'''
#progress bar
max_steps = 11
total_steps = 9
progress = ipw.IntProgress(value=0, min=0, max=max_steps, step=1 , description='Processing: ')
display(progress)
try:
max_sub = len(data[target].values.tolist())
except:
max_sub = len(data)
#sub_progress = ipw.IntProgress(value=0, min=0, max=max_sub, step=1, bar_style='', description='Sub Process: ')
#display(sub_progress)
timestampStr = datetime.datetime.now().strftime("%H:%M:%S")
monitor = pd.DataFrame( [ ['Initiated' , '. . . . . . . . . . . . . . . . . .', timestampStr ],
['Status' , '. . . . . . . . . . . . . . . . . .' , 'Loading Dependencies' ],
['Step' , '. . . . . . . . . . . . . . . . . .', 'Step 0 of ' + str(total_steps)] ],
columns=['', ' ', ' ']).set_index('')
display(monitor, display_id = 'monitor')
'''
generate monitor end
'''
#general dependencies
import numpy as np
import random
import spacy
import gensim
import gensim.corpora as corpora
from gensim.utils import simple_preprocess
from gensim.models import CoherenceModel
import spacy
import re
#defining global variables
global text, id2word, corpus, data_, seed, target_, experiment__
#create an empty list for pickling later.
try:
experiment__.append('dummy')
experiment__.pop()
except:
experiment__ = []
#converting to dataframe if list provided
if type(data) is list:
data = pd.DataFrame(data, columns=['en'])
target = 'en'
#converting target column into list
try:
text = data[target].values.tolist()
target_ = str(target)
except:
text = data
target_ = 'en'
#generate seed to be used globally
if session_id is None:
seed = random.randint(150,9000)
else:
seed = session_id
#copying dataframe
if type(data) is list:
data_ = pd.DataFrame(data)
data_.columns = ['en']
else:
data_ = data.copy()
progress.value += 1
"""
DEFINE STOPWORDS
"""
try:
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
stop_words = stopwords.words('english')
except:
stop_words = ['ourselves', 'hers', 'between', 'yourself', 'but', 'again', 'there', 'about', 'once', 'during',
'out', 'very', 'having', 'with', 'they', 'own', 'an', 'be', 'some', 'for', 'do', 'its', 'yours',
'such', 'into', 'of', 'most', 'itself', 'other', 'off', 'is', 's', 'am', 'or', 'who', 'as', 'from',
'him', 'each', 'the', 'themselves', 'until', 'below', 'are', 'we', 'these', 'your', 'his', 'through',
'don', 'nor', 'me', 'were', 'her', 'more', 'himself', 'this', 'down', 'should', 'our', 'their', 'while',
'above', 'both', 'up', 'to', 'ours', 'had', 'she', 'all', 'no', 'when', 'at', 'any', 'before', 'them',
'same', 'and', 'been', 'have', 'in', 'will', 'on', 'does', 'yourselves', 'then', 'that', 'because', 'what',
'over', 'why', 'so', 'can', 'did', 'not', 'now', 'under', 'he', 'you', 'herself', 'has', 'just', 'where',
'too', 'only', 'myself', 'which', 'those', 'i', 'after', 'few', 'whom', 't', 'being', 'if', 'theirs', 'my',
'against', 'a', 'by', 'doing', 'it', 'how', 'further', 'was', 'here', 'than']
if custom_stopwords is not None:
stop_words = stop_words + custom_stopwords
progress.value += 1
"""
TEXT PRE-PROCESSING STARTS HERE
"""
"""
STEP 1 - REMOVE NUMERIC CHARACTERS FROM THE LIST
"""
monitor.iloc[1,1:] = 'Removing Numeric Characters'
monitor.iloc[2,1:] = 'Step 1 of '+ str(total_steps)
update_display(monitor, display_id = 'monitor')
text_step1 = []
for i in range(0,len(text)):
review = re.sub("\d+", "", str(text[i]))
text_step1.append(review)
#sub_progress.value += 1
#sub_progress.value = 0
text = text_step1 #re-assigning
del(text_step1)
progress.value += 1
"""
STEP 2 - REGULAR EXPRESSIONS
"""
monitor.iloc[1,1:] = 'Removing Special Characters'
monitor.iloc[2,1:] = 'Step 2 of '+ str(total_steps)
update_display(monitor, display_id = 'monitor')
text_step2 = []
for i in range(0,len(text)):
review = re.sub(r'\W', ' ', str(text[i]))
review = review.lower()
review = re.sub(r'\s+[a-z]\s+', ' ', review)
review = re.sub(r'^[a-z]\s+', ' ', review)
review = re.sub(r'\d+', ' ', review)
review = re.sub(r'\s+', ' ', review)
text_step2.append(review)
#sub_progress.value += 1
#sub_progress.value = 0
text = text_step2 #re-assigning
del(text_step2)
progress.value += 1
"""
STEP 3 - WORD TOKENIZATION
"""
monitor.iloc[1,1:] = 'Tokenizing Words'
monitor.iloc[2,1:] = 'Step 3 of '+ str(total_steps)
update_display(monitor, display_id = 'monitor')
text_step3 = []
for i in text:
review = gensim.utils.simple_preprocess(str(i), deacc=True)
text_step3.append(review)
#sub_progress.value += 1
#sub_progress.value = 0
text = text_step3
del(text_step3)
progress.value += 1
"""
STEP 4 - REMOVE STOPWORDS
"""
monitor.iloc[1,1:] = 'Removing Stopwords'
monitor.iloc[2,1:] = 'Step 4 of '+ str(total_steps)
update_display(monitor, display_id = 'monitor')
text_step4 = []
for i in text:
ii = []
for word in i:
if word not in stop_words:
ii.append(word)
text_step4.append(ii)
#sub_progress.value += 1
text = text_step4
del(text_step4)
#sub_progress.value = 0
progress.value += 1
"""
STEP 5 - BIGRAM EXTRACTION
"""
monitor.iloc[1,1:] = 'Extracting Bigrams'
monitor.iloc[2,1:] = 'Step 5 of '+ str(total_steps)
update_display(monitor, display_id = 'monitor')
bigram = gensim.models.Phrases(text, min_count=5, threshold=100)
bigram_mod = gensim.models.phrases.Phraser(bigram)
text_step5 = []
for i in text:
text_step5.append(bigram_mod[i])
#sub_progress.value += 1
text = text_step5
del(text_step5)
#sub_progress.value = 0
progress.value += 1
"""
STEP 6 - TRIGRAM EXTRACTION
"""
monitor.iloc[1,1:] = 'Extracting Trigrams'
monitor.iloc[2,1:] = 'Step 6 of '+ str(total_steps)
update_display(monitor, display_id = 'monitor')
trigram = gensim.models.Phrases(bigram[text], threshold=100)
trigram_mod = gensim.models.phrases.Phraser(trigram)
text_step6 = []
for i in text:
text_step6.append(trigram_mod[bigram_mod[i]])
#sub_progress.value += 1
#sub_progress.value = 0
text = text_step6
del(text_step6)
progress.value += 1
"""
STEP 7 - LEMMATIZATION USING SPACY
"""
monitor.iloc[1,1:] = 'Lemmatizing'
monitor.iloc[2,1:] = 'Step 7 of '+ str(total_steps)
update_display(monitor, display_id = 'monitor')
nlp = spacy.load('en_core_web_sm', disable=['parser', 'ner'])
allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV']
text_step7 = []
for i in text:
doc = nlp(" ".join(i))
text_step7.append([token.lemma_ for token in doc if token.pos_ in allowed_postags])
#sub_progress.value += 1
#sub_progress.value = 0
text = text_step7
del(text_step7)
progress.value += 1
"""
STEP 8 - CUSTOM STOPWORD REMOVER
"""
monitor.iloc[1,1:] = 'Removing Custom Stopwords'
monitor.iloc[2,1:] = 'Step 8 of '+ str(total_steps)
update_display(monitor, display_id = 'monitor')
text_step8 = []
for i in text:
ii = []
for word in i:
if word not in stop_words:
ii.append(word)
text_step8.append(ii)
#sub_progress.value += 1
text = text_step8
del(text_step8)
#sub_progress.value = 0
progress.value += 1
"""
STEP 8 - CREATING CORPUS AND DICTIONARY
"""
monitor.iloc[1,1:] = 'Compiling Corpus'
monitor.iloc[2,1:] = 'Step 9 of '+ str(total_steps)
update_display(monitor, display_id = 'monitor')
#creating dictionary
id2word = corpora.Dictionary(text)
#creating corpus
corpus = []
for i in text:
d = id2word.doc2bow(i)
corpus.append(d)
#sub_progress.value += 1
#sub_progress.value = 0
progress.value += 1
"""
PROGRESS NOT YET TRACKED - TO BE CODED LATER
"""
text_join = []
for i in text:
word = ' '.join(i)
text_join.append(word)
data_[target_] = text_join
'''
Final display Starts
'''
clear_output()
if custom_stopwords is None:
csw = False
else:
csw = True
functions = pd.DataFrame ( [ ['session_id', seed ],
['# Documents', len(corpus) ],
['Vocab Size',len(id2word.keys()) ],
['Custom Stopwords',csw ],
], columns = ['Description', 'Value'] )
functions_ = functions.style.hide_index()
display(functions_)
'''
Final display Ends
'''
#log into experiment
experiment__.append(('Info', functions))
experiment__.append(('Dataset', data_))
experiment__.append(('Corpus', corpus))
experiment__.append(('Dictionary', id2word))
experiment__.append(('Text', text))
return text, data_, corpus, id2word, seed, target_, experiment__
def create_model(model=None,
multi_core=False,
num_topics = None,
verbose=True):
"""
Description:
------------
This function creates a model on the dataset passed as a data param during
the setup stage. setup() function must be called before using create_model().
This function returns a trained model object.
Example
-------
from pycaret.datasets import get_data
kiva = get_data('kiva')
experiment_name = setup(data = kiva, target = 'en')
lda = create_model('lda')
This will return trained Latent Dirichlet Allocation model.
Parameters
----------
model : string, default = None
Enter abbreviated string of the model class. List of models supported:
Model Abbreviated String Original Implementation
--------- ------------------ -----------------------
Latent Dirichlet Allocation 'lda' gensim/models/ldamodel.html
Latent Semantic Indexing 'lsi' gensim/models/lsimodel.html
Hierarchical Dirichlet Process 'hdp' gensim/models/hdpmodel.html
Random Projections 'rp' gensim/models/rpmodel.html
Non-Negative Matrix Factorization 'nmf' sklearn.decomposition.NMF.html
multi_core: Boolean, default = False
True would utilize all CPU cores to parallelize and speed up model training. Only
available for 'lda'. For all other models, the multi_core parameter is ignored.
num_topics: integer, default = 4
Number of topics to be created. If None, default is set to 4.
verbose: Boolean, default = True
Status update is not printed when verbose is set to False.
Returns:
--------
model: trained model object
------
Warnings:
---------
None
"""
#exception checking
import sys
#ignore warnings
import warnings
warnings.filterwarnings('ignore')
"""
error handling starts here
"""
#checking for model parameter
if model is None:
sys.exit('(Value Error): Model parameter Missing. Please see docstring for list of available models.')
#checking for allowed models
allowed_models = ['lda', 'lsi', 'hdp', 'rp', 'nmf']
if model not in allowed_models:
sys.exit('(Value Error): Model Not Available. Please see docstring for list of available models.')
#checking multicore type:
if type(multi_core) is not bool:
sys.exit('(Type Error): multi_core parameter can only take argument as True or False.')
#checking round parameter
if num_topics is not None:
if type(num_topics) is not int:
sys.exit('(Type Error): num_topics parameter only accepts integer value.')
#checking verbose parameter
if type(verbose) is not bool:
sys.exit('(Type Error): Verbose parameter can only take argument as True or False.')
"""
error handling ends here
"""
#pre-load libraries
import pandas as pd
import ipywidgets as ipw
from IPython.display import display, HTML, clear_output, update_display
import datetime, time
"""
monitor starts
"""
#progress bar and monitor control
timestampStr = datetime.datetime.now().strftime("%H:%M:%S")
progress = ipw.IntProgress(value=0, min=0, max=4, step=1 , description='Processing: ')
monitor = pd.DataFrame( [ ['Initiated' , '. . . . . . . . . . . . . . . . . .', timestampStr ],
['Status' , '. . . . . . . . . . . . . . . . . .' , 'Initializing'] ],
columns=['', ' ', ' ']).set_index('')
if verbose:
display(progress)
display(monitor, display_id = 'monitor')
progress.value += 1
"""
monitor starts
"""
#define topic_model_name
if model == 'lda':
topic_model_name = 'Latent Dirichlet Allocation'
elif model == 'lsi':
topic_model_name = 'Latent Semantic Indexing'
elif model == 'hdp':
topic_model_name = 'Hierarchical Dirichlet Process'
elif model == 'nmf':
topic_model_name = 'Non-Negative Matrix Factorization'
elif model == 'rp':
topic_model_name = 'Random Projections'
#defining default number of topics
if num_topics is None:
n_topics = 4
else:
n_topics = num_topics
#monitor update
monitor.iloc[1,1:] = 'Fitting Topic Model'
progress.value += 1
if verbose:
update_display(monitor, display_id = 'monitor')
if model == 'lda':
if multi_core:
from gensim.models.ldamulticore import LdaMulticore
model = LdaMulticore(corpus=corpus,
num_topics=n_topics,
id2word=id2word,
workers=4,
random_state=seed,
chunksize=100,
passes=10,
alpha= 'symmetric',
per_word_topics=True)
progress.value += 1
else:
from gensim.models.ldamodel import LdaModel
model = LdaModel(corpus=corpus,
num_topics=n_topics,
id2word=id2word,
random_state=seed,
update_every=1,
chunksize=100,
passes=10,
alpha='auto',
per_word_topics=True)
progress.value += 1
elif model == 'lsi':
from gensim.models.lsimodel import LsiModel
model = LsiModel(corpus=corpus,
num_topics=n_topics,
id2word=id2word)
progress.value += 1
elif model == 'hdp':
from gensim.models import HdpModel
model = HdpModel(corpus=corpus,
id2word=id2word,
random_state=seed,
chunksize=100,
T=n_topics)
progress.value += 1
elif model == 'rp':
from gensim.models import RpModel
model = RpModel(corpus=corpus,
id2word=id2word,
num_topics=n_topics)
progress.value += 1
elif model == 'nmf':
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.decomposition import NMF
from sklearn.preprocessing import normalize
text_join = []
for i in text:
word = ' '.join(i)
text_join.append(word)
progress.value += 1
vectorizer = CountVectorizer(analyzer='word', max_features=5000)
x_counts = vectorizer.fit_transform(text_join)
transformer = TfidfTransformer(smooth_idf=False);
x_tfidf = transformer.fit_transform(x_counts);
xtfidf_norm = normalize(x_tfidf, norm='l1', axis=1)
model = NMF(n_components=n_topics, init='nndsvd', random_state=seed);
model.fit(xtfidf_norm)
progress.value += 1
#storing into experiment
if verbose:
clear_output()
tup = (topic_model_name,model)
experiment__.append(tup)
return model
def assign_model(model,
verbose=True):
"""
Description:
------------
This function assigns each of the data point in the dataset passed during setup
stage to one of the topic using trained model object passed as model param.
create_model() function must be called before using assign_model().
This function returns dataframe with topic weights, dominant topic and % of the
dominant topic (where applicable).
Example
-------
from pycaret.datasets import get_data
kiva = get_data('kiva')
experiment_name = setup(data = kiva, target = 'en')
lda = create_model('lda')
lda_df = assign_model(lda)
This will return a dataframe with inferred topics using trained model.
Parameters
----------
model : trained model object, default = None
verbose: Boolean, default = True
Status update is not printed when verbose is set to False.
Returns:
--------
dataframe: Returns dataframe with inferred topics using trained model object.
---------
Warnings:
---------
None
"""
#determine model type
if 'LdaModel' in str(type(model)):
mod_type = 'lda'
elif 'LdaMulticore' in str(type(model)):
mod_type = 'lda'
elif 'LsiModel' in str(type(model)):
mod_type = 'lsi'
elif 'NMF' in str(type(model)):
mod_type = 'nmf'
elif 'HdpModel' in str(type(model)):
mod_type = 'hdp'
elif 'RpModel' in str(type(model)):
mod_type = 'rp'
else:
mod_type = None
#exception checking
import sys
#ignore warnings
import warnings
warnings.filterwarnings('ignore')
"""
error handling starts here
"""
#checking for allowed models
allowed_models = ['lda', 'lsi', 'hdp', 'rp', 'nmf']
if mod_type not in allowed_models:
sys.exit('(Value Error): Model Not Recognized. Please see docstring for list of available models.')
#checking verbose parameter
if type(verbose) is not bool:
sys.exit('(Type Error): Verbose parameter can only take argument as True or False.')
"""
error handling ends here
"""
#pre-load libraries
import numpy as np
import pandas as pd
import ipywidgets as ipw
from IPython.display import display, HTML, clear_output, update_display
import datetime, time
#progress bar and monitor control
max_progress = len(text) + 5
timestampStr = datetime.datetime.now().strftime("%H:%M:%S")
progress = ipw.IntProgress(value=0, min=0, max=max_progress, step=1 , description='Processing: ')
monitor = pd.DataFrame( [ ['Initiated' , '. . . . . . . . . . . . . . . . . .', timestampStr ],
['Status' , '. . . . . . . . . . . . . . . . . .' , 'Initializing'] ],
columns=['', ' ', ' ']).set_index('')
if verbose:
display(progress)
display(monitor, display_id = 'monitor')
progress.value += 1
monitor.iloc[1,1:] = 'Extracting Topics from Model'
if verbose:
update_display(monitor, display_id = 'monitor')
progress.value += 1
#assignment starts here
if mod_type == 'lda':
c = model.get_document_topics(corpus, minimum_probability=0)
ls = []
for i in range(len(c)):
ls.append(c[i])
bb = []
for i in ls:
bs = []
for k in i:
progress.value += 1
bs.append(k[1])
bb.append(bs)
Dominant_Topic = []
for i in bb:
max_ = max(i)
max_ = i.index(max_)
Dominant_Topic.append('Topic ' + str(max_))
pdt = []
for i in range(0,len(bb)):
l = max(bb[i]) / sum(bb[i])
pdt.append(round(l,2))
col_names = []
for i in range(len(model.show_topics(num_topics=999999))):
a = 'Topic_' + str(i)
col_names.append(a)
progress.value += 1
bb = pd.DataFrame(bb,columns=col_names)
bb_ = pd.concat([data_,bb], axis=1)
dt_ = pd.DataFrame(Dominant_Topic, columns=['Dominant_Topic'])
bb_ = pd.concat([bb_,dt_], axis=1)
pdt_ = pd.DataFrame(pdt, columns=['Perc_Dominant_Topic'])
bb_ = pd.concat([bb_,pdt_], axis=1)
progress.value += 1
if verbose:
clear_output()
#return bb_
elif mod_type == 'lsi':
col_names = []
for i in range(0,len(model.print_topics(num_topics=999999))):
a = 'Topic_' + str(i)
col_names.append(a)
df_ = pd.DataFrame()
Dominant_Topic = []
for i in range(0,len(text)):
progress.value += 1
db = id2word.doc2bow(text[i])
db_ = model[db]
db_array = np.array(db_)
db_array_ = db_array[:,1]
max_ = max(db_array_)
max_ = list(db_array_).index(max_)
Dominant_Topic.append('Topic ' + str(max_))
db_df_ = pd.DataFrame([db_array_])
df_ = pd.concat([df_,db_df_])
progress.value += 1
df_.columns = col_names
df_['Dominant_Topic'] = Dominant_Topic
df_ = df_.reset_index(drop=True)
bb_ = pd.concat([data_,df_], axis=1)
progress.value += 1
if verbose:
clear_output()
#return bb_
elif mod_type == 'hdp' or mod_type == 'rp':
rate = []
for i in range(0,len(corpus)):
progress.value += 1
rate.append(model[corpus[i]])
topic_num = []
topic_weight = []
doc_num = []
counter = 0
for i in rate:
for k in i:
topic_num.append(k[0])
topic_weight.append(k[1])
doc_num.append(counter)
counter += 1
progress.value += 1
df = pd.DataFrame({'Document': doc_num, 'Topic' : topic_num, 'Topic Weight' : topic_weight}).sort_values(by='Topic')
df = df.pivot(index='Document', columns='Topic', values='Topic Weight').fillna(0)
df.columns = ['Topic_' + str(i) for i in df.columns]
Dominant_Topic = []
for i in range(0,len(df)):
s = df.iloc[i].max()
d = list(df.iloc[i]).index(s)
v = df.columns[d]
v = v.replace("_", ' ')
Dominant_Topic.append(v)
df['Dominant_Topic'] = Dominant_Topic
progress.value += 1
if verbose:
clear_output()
bb_ = pd.concat([data_,df], axis=1)
#return bb_
elif mod_type == 'nmf':
"""
this section will go away in future release through better handling
"""
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.decomposition import NMF
from sklearn.preprocessing import normalize
text_join = []
for i in text:
word = ' '.join(i)
text_join.append(word)
progress.value += 1
vectorizer = CountVectorizer(analyzer='word', max_features=5000)
x_counts = vectorizer.fit_transform(text_join)
transformer = TfidfTransformer(smooth_idf=False);
x_tfidf = transformer.fit_transform(x_counts);
xtfidf_norm = normalize(x_tfidf, norm='l1', axis=1)
"""
section ends
"""
bb = list(model.fit_transform(xtfidf_norm))
col_names = []
for i in range(len(bb[0])):
a = 'Topic_' + str(i)
col_names.append(a)
Dominant_Topic = []
for i in bb:
progress.value += 1
max_ = max(i)
max_ = list(i).index(max_)
Dominant_Topic.append('Topic ' + str(max_))
pdt = []
for i in range(0,len(bb)):
l = max(bb[i]) / sum(bb[i])
pdt.append(round(l,2))
progress.value += 1
bb = pd.DataFrame(bb, columns=col_names)
bb_ = pd.concat([data_,bb], axis=1)
dt_ = pd.DataFrame(Dominant_Topic, columns=['Dominant_Topic'])
bb_ = pd.concat([bb_,dt_], axis=1)
pdt_ = pd.DataFrame(pdt, columns=['Perc_Dominant_Topic'])
bb_ = pd.concat([bb_,pdt_], axis=1)
progress.value += 1
if verbose:
clear_output()
#storing into experiment
if verbose:
clear_output()
mod__ = str(mod_type) + ' Topic Assignment'
tup = (mod__,bb_)
experiment__.append(tup)
#return bb_
return bb_
def plot_model(model = None,
plot = 'frequency',
topic_num = None):
"""
Description:
------------
This function takes a trained model object (optional) and returns a plot based
on the inferred dataset by internally calling assign_model before generating a
plot. Where a model parameter is not passed, a plot on the entire dataset will
be returned instead of one at the topic level. As such, plot_model can be used
with or without model. All plots with a model parameter passed as a trained
model object will return a plot based on the first topic i.e. 'Topic 0'. This
can be changed using the topic_num param.
Example:
--------
from pycaret.datasets import get_data
kiva = get_data('kiva')
experiment_name = setup(data = kiva, target = 'en')
lda = create_model('lda')
plot_model(lda, plot = 'frequency')
This will return a frequency plot on a trained Latent Dirichlet Allocation
model for all documents in 'Topic 0'. The topic number can be changed as
follows:
plot_model(lda, plot = 'frequency', topic_num = 'Topic 1')
This will now return a frequency plot on a trained LDA model for all
documents inferred in 'Topic 1'.
Alternatively, if following is used:
plot_model(plot = 'frequency')
This will return frequency plot on the entire training corpus compiled
during setup stage.
Parameters
----------
model : object, default = none
A trained model object can be passed. Model must be created using create_model().
plot : string, default = 'frequency'
Enter abbreviation for type of plot. The current list of plots supported are:
Name Abbreviated String
--------- ------------------
Word Token Frequency 'frequency'
Word Distribution Plot 'distribution'
Bigram Frequency Plot 'bigram'
Trigram Frequency Plot 'trigram'
Sentiment Polarity Plot 'sentiment'
Part of Speech Frequency 'pos'
t-SNE (3d) Dimension Plot 'tsne'
Topic Model (pyLDAvis) 'topic_model'
Topic Infer Distribution 'topic_distribution'
Wordcloud 'wordcloud'
UMAP Dimensionality Plot 'umap'
topic_num : string, default = None
Topic number to be passed as a string. If set to None, default generation will
be on 'Topic 0'
Returns:
--------
Visual Plot: Prints the visual plot.
------------
Warnings:
---------
- 'pos' and 'umap' plot not available at model level. Hence the model parameter is
ignored. The result will always be based on the entire training corpus.
- 'topic_model' plot is based on pyLDAVis implementation. Hence its not available
for model = 'lsi', 'rp' and 'nmf'.
"""
#exception checking
import sys
#ignore warnings
import warnings
warnings.filterwarnings('ignore')
#setting default of topic_num
if model is not None and topic_num is None:
topic_num = 'Topic 0'
"""
exception handling starts here
"""
#determine model type
if model is not None:
mod = str(type(model))
if 'LdaModel' in mod:
mod_type = 'lda'
elif 'LdaMulticore' in str(type(model)):
mod_type = 'lda'
elif 'LsiModel' in str(type(model)):
mod_type = 'lsi'
elif 'NMF' in str(type(model)):
mod_type = 'nmf'
elif 'HdpModel' in str(type(model)):
mod_type = 'hdp'
elif 'RpModel' in str(type(model)):
mod_type = 'rp'
#plot checking
allowed_plots = ['frequency', 'distribution', 'bigram', 'trigram', 'sentiment', 'pos', 'tsne', 'topic_model',
'topic_distribution', 'wordcloud', 'umap']
if plot not in allowed_plots:
sys.exit('(Value Error): Plot Not Available. Please see docstring for list of available plots.')
#plots without topic model
if model is None:
not_allowed_wm = ['tsne', 'topic_model', 'topic_distribution']
if plot in not_allowed_wm:
sys.exit('(Type Error): Model parameter Missing. Plot not supported without specific model passed in as Model param.')
#handle topic_model plot error
if plot == 'topic_model':
not_allowed_tm = ['lsi', 'rp', 'nmf']
if mod_type in not_allowed_tm:
sys.exit('(Type Error): Model not supported for plot = topic_model. Please see docstring for list of available models supported for topic_model.')
"""
error handling ends here
"""
#import dependencies
import pandas as pd
import numpy
#import cufflinks
import cufflinks as cf
cf.go_offline()
cf.set_config_file(offline=False, world_readable=True)
if plot == 'frequency':
try:
from sklearn.feature_extraction.text import CountVectorizer
def get_top_n_words(corpus, n=None):
vec = CountVectorizer()
bag_of_words = vec.fit_transform(corpus)
sum_words = bag_of_words.sum(axis=0)
words_freq = [(word, sum_words[0, idx]) for word, idx in vec.vocabulary_.items()]
words_freq =sorted(words_freq, key = lambda x: x[1], reverse=True)
return words_freq[:n]
if topic_num is None:
common_words = get_top_n_words(data_[target_], n=100)
df2 = pd.DataFrame(common_words, columns = ['Text' , 'count'])
df2.groupby('Text').sum()['count'].sort_values(ascending=False).iplot(
kind='bar', yTitle='Count', linecolor='black', title='Top 100 words after removing stop words')
else:
title = str(topic_num) + ': ' + 'Top 100 words after removing stop words'
assigned_df = assign_model(model, verbose = False)
filtered_df = assigned_df.loc[assigned_df['Dominant_Topic'] == topic_num]
common_words = get_top_n_words(filtered_df[target_], n=100)
df2 = pd.DataFrame(common_words, columns = ['Text' , 'count'])
df2.groupby('Text').sum()['count'].sort_values(ascending=False).iplot(
kind='bar', yTitle='Count', linecolor='black', title=title)
except:
sys.exit('(Value Error): Invalid topic_num param or empty Vocab. Try changing Topic Number.')
elif plot == 'distribution':
try:
if topic_num is None:
b = data_[target_].apply(lambda x: len(str(x).split()))
b = pd.DataFrame(b)
b[target_].iplot(
kind='hist',
bins=100,
xTitle='word count',
linecolor='black',
yTitle='count',
title='Word Count Distribution')
else:
title = str(topic_num) + ': ' + 'Word Count Distribution'
assigned_df = assign_model(model, verbose = False)
filtered_df = assigned_df.loc[assigned_df['Dominant_Topic'] == topic_num]
b = filtered_df[target_].apply(lambda x: len(str(x).split()))
b = pd.DataFrame(b)
b[target_].iplot(
kind='hist',
bins=100,
xTitle='word count',
linecolor='black',
yTitle='count',
title= title)
except:
sys.exit('(Value Error): Invalid topic_num param or empty Vocab. Try changing Topic Number.')
elif plot == 'bigram':
try:
from sklearn.feature_extraction.text import CountVectorizer
def get_top_n_bigram(corpus, n=None):
vec = CountVectorizer(ngram_range=(2, 2)).fit(corpus)
bag_of_words = vec.transform(corpus)
sum_words = bag_of_words.sum(axis=0)
words_freq = [(word, sum_words[0, idx]) for word, idx in vec.vocabulary_.items()]
words_freq =sorted(words_freq, key = lambda x: x[1], reverse=True)
return words_freq[:n]
if topic_num is None:
common_words = get_top_n_bigram(data_[target_], 100)
df3 = pd.DataFrame(common_words, columns = ['Text' , 'count'])
df3.groupby('Text').sum()['count'].sort_values(ascending=False).iplot(
kind='bar', yTitle='Count', linecolor='black', title='Top 100 bigrams after removing stop words')
else:
title = str(topic_num) + ': ' + 'Top 100 bigrams after removing stop words'
assigned_df = assign_model(model, verbose = False)
filtered_df = assigned_df.loc[assigned_df['Dominant_Topic'] == topic_num]
common_words = get_top_n_bigram(filtered_df[target_], 100)
df3 = pd.DataFrame(common_words, columns = ['Text' , 'count'])
df3.groupby('Text').sum()['count'].sort_values(ascending=False).iplot(
kind='bar', yTitle='Count', linecolor='black', title=title)
except:
sys.exit('(Value Error): Invalid topic_num param or empty Vocab. Try changing Topic Number.')
elif plot == 'trigram':
try:
from sklearn.feature_extraction.text import CountVectorizer
def get_top_n_trigram(corpus, n=None):
vec = CountVectorizer(ngram_range=(3, 3)).fit(corpus)
bag_of_words = vec.transform(corpus)
sum_words = bag_of_words.sum(axis=0)
words_freq = [(word, sum_words[0, idx]) for word, idx in vec.vocabulary_.items()]
words_freq =sorted(words_freq, key = lambda x: x[1], reverse=True)
return words_freq[:n]
if topic_num is None:
common_words = get_top_n_trigram(data_[target_], 100)
df3 = pd.DataFrame(common_words, columns = ['Text' , 'count'])
df3.groupby('Text').sum()['count'].sort_values(ascending=False).iplot(
kind='bar', yTitle='Count', linecolor='black', title='Top 100 trigrams after removing stop words')
else:
title = str(topic_num) + ': ' + 'Top 100 trigrams after removing stop words'
assigned_df = assign_model(model, verbose = False)
filtered_df = assigned_df.loc[assigned_df['Dominant_Topic'] == topic_num]
common_words = get_top_n_trigram(filtered_df[target_], 100)
df3 = pd.DataFrame(common_words, columns = ['Text' , 'count'])
df3.groupby('Text').sum()['count'].sort_values(ascending=False).iplot(
kind='bar', yTitle='Count', linecolor='black', title=title)
except:
sys.exit('(Value Error): Invalid topic_num param or empty Vocab. Try changing Topic Number.')
elif plot == 'sentiment':
try:
#loadies dependencies
import plotly.graph_objects as go
from textblob import TextBlob
if topic_num is None:
sentiments = data_[target_].map(lambda text: TextBlob(text).sentiment.polarity)
sentiments = pd.DataFrame(sentiments)
sentiments[target_].iplot(
kind='hist',
bins=50,
xTitle='polarity',
linecolor='black',
yTitle='count',
title='Sentiment Polarity Distribution')
else:
title = str(topic_num) + ': ' + 'Sentiment Polarity Distribution'
assigned_df = assign_model(model, verbose = False)
filtered_df = assigned_df.loc[assigned_df['Dominant_Topic'] == topic_num]
sentiments = filtered_df[target_].map(lambda text: TextBlob(text).sentiment.polarity)
sentiments = pd.DataFrame(sentiments)
sentiments[target_].iplot(
kind='hist',
bins=50,
xTitle='polarity',
linecolor='black',
yTitle='count',
title=title)
except:
sys.exit('(Value Error): Invalid topic_num param or empty Vocab. Try changing Topic Number.')
elif plot == 'pos':
from textblob import TextBlob
b = list(id2word.token2id.keys())
blob = TextBlob(str(b))
pos_df = pd.DataFrame(blob.tags, columns = ['word' , 'pos'])
pos_df = pos_df.loc[pos_df['pos'] != 'POS']
pos_df = pos_df.pos.value_counts()[:20]
pos_df.iplot(
kind='bar',
xTitle='POS',
yTitle='count',
title='Top 20 Part-of-speech tagging for review corpus')
elif plot == 'tsne':
b = assign_model(model, verbose = False)
b.dropna(axis=0, inplace=True) #droping rows where Dominant_Topic is blank
c = []
for i in b.columns:
if 'Topic_' in i:
a = i
c.append(a)
bb = b[c]
from sklearn.manifold import TSNE
X_embedded = TSNE(n_components=3).fit_transform(bb)
X = pd.DataFrame(X_embedded)
X['Dominant_Topic'] = b['Dominant_Topic']
X.sort_values(by='Dominant_Topic', inplace=True)
X.dropna(inplace=True)
import plotly.express as px
df = X
fig = px.scatter_3d(df, x=0, y=1, z=2,
color='Dominant_Topic', title='3d TSNE Plot for Topic Model', opacity=0.7, width=900, height=800)
fig.show()
elif plot == 'topic_model':
import pyLDAvis
import pyLDAvis.gensim # don't skip this
import warnings
warnings.filterwarnings('ignore')
pyLDAvis.enable_notebook()
vis = pyLDAvis.gensim.prepare(model, corpus, id2word, mds='mmds')
display(vis)
elif plot == 'topic_distribution':
try:
iter1 = len(model.show_topics(999999))
except:
try:
iter1 = model.num_topics
except:
iter1 = model.n_components_
topic_name = []
keywords = []
for i in range(0,iter1):
try:
s = model.show_topic(i,topn=10)
topic_name.append('Topic ' + str(i))
kw = []
for i in s:
kw.append(i[0])
keywords.append(kw)
except:
keywords.append('NA')
topic_name.append('Topic ' + str(i))
keyword = []
for i in keywords:
b = ", ".join(i)
keyword.append(b)
kw_df = pd.DataFrame({'Topic': topic_name, 'Keyword' : keyword}).set_index('Topic')
ass_df = assign_model(model, verbose = False)
ass_df_pivot = ass_df.pivot_table(index='Dominant_Topic', values='Topic_0', aggfunc='count')
df2 = ass_df_pivot.join(kw_df)
df2 = df2.reset_index()
df2.columns = ['Topic', 'Documents', 'Keyword']
"""
sorting column starts
"""
topic_list = list(df2['Topic'])
s = []
for i in range(0,len(topic_list)):
a = int(topic_list[i].split()[1])
s.append(a)
df2['Topic'] = s
df2.sort_values(by='Topic', inplace=True)
df2.sort_values(by='Topic', inplace=True)
topic_list = list(df2['Topic'])
topic_list = list(df2['Topic'])
s = []
for i in topic_list:
a = 'Topic ' + str(i)
s.append(a)
df2['Topic'] = s
df2.reset_index(drop=True, inplace=True)
"""
sorting column ends
"""
import plotly.express as px
fig = px.bar(df2, x='Topic', y='Documents', hover_data = ['Keyword'], title='Document Distribution by Topics')
fig.show()
elif plot == 'wordcloud':
try:
from wordcloud import WordCloud, STOPWORDS
import matplotlib.pyplot as plt
stopwords = set(STOPWORDS)
if topic_num is None:
atext = " ".join(review for review in data_[target_])
else:
assigned_df = assign_model(model, verbose = False)
filtered_df = assigned_df.loc[assigned_df['Dominant_Topic'] == topic_num]
atext = " ".join(review for review in filtered_df[target_])
wordcloud = WordCloud(width = 800, height = 800,
background_color ='white',
stopwords = stopwords,
min_font_size = 10).generate(atext)
# plot the WordCloud image
plt.figure(figsize = (8, 8), facecolor = None)
plt.imshow(wordcloud)
plt.axis("off")
plt.tight_layout(pad = 0)
plt.show()
except:
sys.exit('(Value Error): Invalid topic_num param or empty Vocab. Try changing Topic Number.')
elif plot == 'umap':
#warnings
from matplotlib.axes._axes import _log as matplotlib_axes_logger
matplotlib_axes_logger.setLevel('ERROR')
#loading dependencies
from sklearn.cluster import KMeans
from sklearn.feature_extraction.text import TfidfVectorizer
from yellowbrick.text import UMAPVisualizer
import matplotlib.pyplot as plt
tfidf = TfidfVectorizer()
docs = tfidf.fit_transform(data_[target_])
# Instantiate the clustering model
clusters = KMeans(n_clusters=5, random_state=seed)
clusters.fit(docs)
plt.figure(figsize=(10,6))
umap = UMAPVisualizer(random_state=seed)
umap.fit(docs, ["c{}".format(c) for c in clusters.labels_])
umap.show()
def tune_model(model=None,
multi_core=False,
supervised_target=None,
estimator=None,
optimize=None,
auto_fe = True,
fold=10):
"""
Description:
------------
This function tunes the num_topics model parameter using a predefined grid with
the objective of optimizing a supervised learning metric as defined in the optimize
param. You can choose the supervised estimator from a large library available in
pycaret. By default, supervised estimator is Linear.
This function returns the tuned model object.
Example
-------
from pycaret.datasets import get_data
kiva = get_data('kiva')
experiment_name = setup(data = kiva, target = 'en')
tuned_lda = tune_model(model = 'lda', supervised_target = 'status')
This will return trained Latent Dirichlet Allocation model.
Parameters
----------
model : string, default = None
Enter abbreviated name of the model. List of available models supported:
Model Abbreviated String Original Implementation
--------- ------------------ -----------------------
Latent Dirichlet Allocation 'lda' gensim/models/ldamodel.html
Latent Semantic Indexing 'lsi' gensim/models/lsimodel.html
Hierarchical Dirichlet Process 'hdp' gensim/models/hdpmodel.html
Random Projections 'rp' gensim/models/rpmodel.html
Non-Negative Matrix Factorization 'nmf' sklearn.decomposition.NMF.html
multi_core: Boolean, default = False
True would utilize all CPU cores to parallelize and speed up model training. Only
available for 'lda'. For all other models, multi_core parameter is ignored.
supervised_target: string
Name of the target column for supervised learning. If None, the mdel coherence value
is used as the objective function.
estimator: string, default = None
Estimator Abbreviated String Task
--------- ------------------ ---------------
Logistic Regression 'lr' Classification
K Nearest Neighbour 'knn' Classification
Naives Bayes 'nb' Classification
Decision Tree 'dt' Classification
SVM (Linear) 'svm' Classification
SVM (RBF) 'rbfsvm' Classification
Gaussian Process 'gpc' Classification
Multi Level Perceptron 'mlp' Classification
Ridge Classifier 'ridge' Classification
Random Forest 'rf' Classification
Quadratic Disc. Analysis 'qda' Classification
AdaBoost 'ada' Classification
Gradient Boosting 'gbc' Classification
Linear Disc. Analysis 'lda' Classification
Extra Trees Classifier 'et' Classification
Extreme Gradient Boosting 'xgboost' Classification
Light Gradient Boosting 'lightgbm' Classification
CatBoost Classifier 'catboost' Classification
Linear Regression 'lr' Regression
Lasso Regression 'lasso' Regression
Ridge Regression 'ridge' Regression
Elastic Net 'en' Regression
Least Angle Regression 'lar' Regression
Lasso Least Angle Regression 'llar' Regression
Orthogonal Matching Pursuit 'omp' Regression
Bayesian Ridge 'br' Regression
Automatic Relevance Determ. 'ard' Regression
Passive Aggressive Regressor 'par' Regression
Random Sample Consensus 'ransac' Regression
TheilSen Regressor 'tr' Regression
Huber Regressor 'huber' Regression
Kernel Ridge 'kr' Regression
Support Vector Machine 'svm' Regression
K Neighbors Regressor 'knn' Regression
Decision Tree 'dt' Regression
Random Forest 'rf' Regression
Extra Trees Regressor 'et' Regression
AdaBoost Regressor 'ada' Regression
Gradient Boosting 'gbr' Regression
Multi Level Perceptron 'mlp' Regression
Extreme Gradient Boosting 'xgboost' Regression
Light Gradient Boosting 'lightgbm' Regression
CatBoost Regressor 'catboost' Regression
If set to None, Linear model is used by default for both classification
and regression tasks.
optimize: string, default = None
For Classification tasks:
Accuracy, AUC, Recall, Precision, F1, Kappa
For Regression tasks:
MAE, MSE, RMSE, R2, ME
If set to None, default is 'Accuracy' for classification and 'R2' for
regression tasks.
auto_fe: boolean, default = True
Automatic text feature engineering. Only used when supervised_target is
passed. When set to true, it will generate text based features such as
polarity, subjectivity, wordcounts to be used in supervised learning.
Ignored when supervised_target is set to None.
fold: integer, default = 10
Number of folds to be used in Kfold CV. Must be at least 2.
Returns:
--------
visual plot: Visual plot with k number of topics on x-axis with metric to
----------- optimize on y-axis. Coherence is used when learning is
unsupervised. Also, prints the best model metric.
model: trained model object with best K number of topics.
-----------
Warnings:
---------
- Random Projections ('rp') and Non Negative Matrix Factorization ('nmf')
is not available for unsupervised learning. Error is raised when 'rp' or
'nmf' is passed without supervised_target.
- Estimators using kernel based methods such as Kernel Ridge Regressor,
Automatic Relevance Determinant, Gaussian Process Classifier, Radial Basis
Support Vector Machine and Multi Level Perceptron may have longer training
times.
"""
"""
exception handling starts here
"""
#ignore warnings
import warnings
warnings.filterwarnings('ignore')
import sys
#checking for model parameter
if model is None:
sys.exit('(Value Error): Model parameter Missing. Please see docstring for list of available models.')
#checking for allowed models
allowed_models = ['lda', 'lsi', 'hdp', 'rp', 'nmf']
if model not in allowed_models:
sys.exit('(Value Error): Model Not Available. Please see docstring for list of available models.')
#checking multicore type:
if type(multi_core) is not bool:
sys.exit('(Type Error): multi_core parameter can only take argument as True or False.')
#check supervised target:
if supervised_target is not None:
all_col = list(data_.columns)
target = target_
all_col.remove(target)
if supervised_target not in all_col:
sys.exit('(Value Error): supervised_target not recognized. It can only be one of the following: ' + str(all_col))
#supervised target exception handling
if supervised_target is None:
models_not_allowed = ['rp', 'nmf']
if model in models_not_allowed:
sys.exit('(Type Error): Model not supported for unsupervised tuning. Either supervised_target param has to be passed or different model has to be used. Please see docstring for available models.')
#checking estimator:
if estimator is not None:
available_estimators = ['lr', 'knn', 'nb', 'dt', 'svm', 'rbfsvm', 'gpc', 'mlp', 'ridge', 'rf', 'qda', 'ada',
'gbc', 'lda', 'et', 'lasso', 'ridge', 'en', 'lar', 'llar', 'omp', 'br', 'ard', 'par',
'ransac', 'tr', 'huber', 'kr', 'svm', 'knn', 'dt', 'rf', 'et', 'ada', 'gbr',
'mlp', 'xgboost', 'lightgbm', 'catboost']
if estimator not in available_estimators:
sys.exit('(Value Error): Estimator Not Available. Please see docstring for list of available estimators.')
#checking optimize parameter
if optimize is not None:
available_optimizers = ['MAE', 'MSE', 'RMSE', 'R2', 'ME', 'Accuracy', 'AUC', 'Recall', 'Precision', 'F1', 'Kappa']
if optimize not in available_optimizers:
sys.exit('(Value Error): optimize parameter Not Available. Please see docstring for list of available parameters.')
#checking auto_fe:
if type(auto_fe) is not bool:
sys.exit('(Type Error): auto_fe parameter can only take argument as True or False.')
#checking fold parameter
if type(fold) is not int:
sys.exit('(Type Error): Fold parameter only accepts integer value.')
"""
exception handling ends here
"""
#pre-load libraries
import pandas as pd
import ipywidgets as ipw
from ipywidgets import Output
from IPython.display import display, HTML, clear_output, update_display
import datetime, time
#progress bar
max_steps = 25
progress = ipw.IntProgress(value=0, min=0, max=max_steps, step=1 , description='Processing: ')
display(progress)
timestampStr = datetime.datetime.now().strftime("%H:%M:%S")
monitor = pd.DataFrame( [ ['Initiated' , '. . . . . . . . . . . . . . . . . .', timestampStr ],
['Status' , '. . . . . . . . . . . . . . . . . .' , 'Loading Dependencies'],
['Step' , '. . . . . . . . . . . . . . . . . .', 'Initializing' ] ],
columns=['', ' ', ' ']).set_index('')
monitor_out = Output()
display(monitor_out)
with monitor_out:
display(monitor, display_id = 'monitor')
#General Dependencies
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_predict
from sklearn import metrics
import numpy as np
import plotly.express as px
#setting up cufflinks
import cufflinks as cf
cf.go_offline()
cf.set_config_file(offline=False, world_readable=True)
progress.value += 1
#define the problem
if supervised_target is None:
problem ='unsupervised'
elif data_[supervised_target].value_counts().count() == 2:
problem = 'classification'
else:
problem = 'regression'
#define topic_model_name
if model == 'lda':
topic_model_name = 'Latent Dirichlet Allocation'
elif model == 'lsi':
topic_model_name = 'Latent Semantic Indexing'
elif model == 'hdp':
topic_model_name = 'Hierarchical Dirichlet Process'
elif model == 'nmf':
topic_model_name = 'Non-Negative Matrix Factorization'
elif model == 'rp':
topic_model_name = 'Random Projections'
#defining estimator:
if problem == 'classification' and estimator is None:
estimator = 'lr'
elif problem == 'regression' and estimator is None:
estimator = 'lr'
else:
estimator = estimator
#defining optimizer:
if optimize is None and problem == 'classification':
optimize = 'Accuracy'
elif optimize is None and problem == 'regression':
optimize = 'R2'
else:
optimize=optimize
progress.value += 1
#creating sentiments
if problem == 'classification' or problem == 'regression':
if auto_fe:
monitor.iloc[1,1:] = 'Feature Engineering'
update_display(monitor, display_id = 'monitor')
from textblob import TextBlob
monitor.iloc[2,1:] = 'Extracting Polarity'
update_display(monitor, display_id = 'monitor')
polarity = data_[target_].map(lambda text: TextBlob(text).sentiment.polarity)
monitor.iloc[2,1:] = 'Extracting Subjectivity'
update_display(monitor, display_id = 'monitor')
subjectivity = data_[target_].map(lambda text: TextBlob(text).sentiment.subjectivity)
monitor.iloc[2,1:] = 'Extracting Wordcount'
update_display(monitor, display_id = 'monitor')
word_count = [len(i) for i in text]
progress.value += 1
#defining tuning grid
param_grid = [2,4,8,16,32,64,100,200,300,400]
master = []; master_df = []
monitor.iloc[1,1:] = 'Creating Topic Model'
update_display(monitor, display_id = 'monitor')
for i in param_grid:
progress.value += 1
monitor.iloc[2,1:] = 'Fitting Model With ' + str(i) + ' Topics'
update_display(monitor, display_id = 'monitor')
#create and assign the model to dataset d
m = create_model(model=model, multi_core=multi_core, num_topics=i, verbose=False)
d = assign_model(m, verbose=False)
if problem in ['classification', 'regression'] and auto_fe:
d['Polarity'] = polarity
d['Subjectivity'] = subjectivity
d['word_count'] = word_count
master.append(m)
master_df.append(d)
#topic model creation end's here
if problem == 'unsupervised':
monitor.iloc[1,1:] = 'Evaluating Topic Model'
update_display(monitor, display_id = 'monitor')
from gensim.models import CoherenceModel
coherence = []
metric = []
counter = 0
for i in master:
progress.value += 1
monitor.iloc[2,1:] = 'Evaluating Coherence With ' + str(param_grid[counter]) + ' Topics'
update_display(monitor, display_id = 'monitor')
model = CoherenceModel(model=i, texts=text, dictionary=id2word, coherence='c_v')
model_coherence = model.get_coherence()
coherence.append(model_coherence)
metric.append('Coherence')
counter += 1
monitor.iloc[1,1:] = 'Compiling Results'
monitor.iloc[1,1:] = 'Finalizing'
update_display(monitor, display_id = 'monitor')
df = pd.DataFrame({'# Topics': param_grid, 'Score' : coherence, 'Metric': metric})
df.columns = ['# Topics', 'Score', 'Metric']
sorted_df = df.sort_values(by='Score', ascending=False)
ival = sorted_df.index[0]
best_model = master[ival]
best_model_df = master_df[ival]
fig = px.line(df, x='# Topics', y='Score', line_shape='linear',
title= 'Coherence Value and # of Topics', color='Metric')
fig.update_layout(plot_bgcolor='rgb(245,245,245)')
fig.show()
monitor = ''
update_display(monitor, display_id = 'monitor')
monitor_out.clear_output()
progress.close()
best_k = np.array(sorted_df.head(1)['# Topics'])[0]
best_m = round(np.array(sorted_df.head(1)['Score'])[0],4)
p = 'Best Model: ' + topic_model_name + ' |' + ' # Topics: ' + str(best_k) + ' | ' + 'Coherence: ' + str(best_m)
print(p)
elif problem == 'classification':
"""
defining estimator
"""
monitor.iloc[1,1:] = 'Evaluating Topic Model'
update_display(monitor, display_id = 'monitor')
if estimator == 'lr':
from sklearn.linear_model import LogisticRegression
model = LogisticRegression(random_state=seed)
full_name = 'Logistic Regression'
elif estimator == 'knn':
from sklearn.neighbors import KNeighborsClassifier
model = KNeighborsClassifier()
full_name = 'K Nearest Neighbours'
elif estimator == 'nb':
from sklearn.naive_bayes import GaussianNB
model = GaussianNB()
full_name = 'Naive Bayes'
elif estimator == 'dt':
from sklearn.tree import DecisionTreeClassifier
model = DecisionTreeClassifier(random_state=seed)
full_name = 'Decision Tree'
elif estimator == 'svm':
from sklearn.linear_model import SGDClassifier
model = SGDClassifier(max_iter=1000, tol=0.001, random_state=seed)
full_name = 'Support Vector Machine'
elif estimator == 'rbfsvm':
from sklearn.svm import SVC
model = SVC(gamma='auto', C=1, probability=True, kernel='rbf', random_state=seed)
full_name = 'RBF SVM'
elif estimator == 'gpc':
from sklearn.gaussian_process import GaussianProcessClassifier
model = GaussianProcessClassifier(random_state=seed)
full_name = 'Gaussian Process Classifier'
elif estimator == 'mlp':
from sklearn.neural_network import MLPClassifier
model = MLPClassifier(max_iter=500, random_state=seed)
full_name = 'Multi Level Perceptron'
elif estimator == 'ridge':
from sklearn.linear_model import RidgeClassifier
model = RidgeClassifier(random_state=seed)
full_name = 'Ridge Classifier'
elif estimator == 'rf':
from sklearn.ensemble import RandomForestClassifier
model = RandomForestClassifier(n_estimators=10, random_state=seed)
full_name = 'Random Forest Classifier'
elif estimator == 'qda':
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
model = QuadraticDiscriminantAnalysis()
full_name = 'Quadratic Discriminant Analysis'
elif estimator == 'ada':
from sklearn.ensemble import AdaBoostClassifier
model = AdaBoostClassifier(random_state=seed)
full_name = 'AdaBoost Classifier'
elif estimator == 'gbc':
from sklearn.ensemble import GradientBoostingClassifier
model = GradientBoostingClassifier(random_state=seed)
full_name = 'Gradient Boosting Classifier'
elif estimator == 'lda':
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
model = LinearDiscriminantAnalysis()
full_name = 'Linear Discriminant Analysis'
elif estimator == 'et':
from sklearn.ensemble import ExtraTreesClassifier
model = ExtraTreesClassifier(random_state=seed)
full_name = 'Extra Trees Classifier'
elif estimator == 'xgboost':
from xgboost import XGBClassifier
model = XGBClassifier(random_state=seed, n_jobs=-1, verbosity=0)
full_name = 'Extreme Gradient Boosting'
elif estimator == 'lightgbm':
import lightgbm as lgb
model = lgb.LGBMClassifier(random_state=seed)
full_name = 'Light Gradient Boosting Machine'
elif estimator == 'catboost':
from catboost import CatBoostClassifier
model = CatBoostClassifier(random_state=seed, silent=True) # Silent is True to suppress CatBoost iteration results
full_name = 'CatBoost Classifier'
progress.value += 1
"""
start model building here
"""
acc = []; auc = []; recall = []; prec = []; kappa = []; f1 = []
for i in range(0,len(master_df)):
progress.value += 1
param_grid_val = param_grid[i]
monitor.iloc[2,1:] = 'Evaluating Classifier With ' + str(param_grid_val) + ' Topics'
update_display(monitor, display_id = 'monitor')
#prepare the dataset for supervised problem
d = master_df[i]
d.dropna(axis=0, inplace=True) #droping rows where Dominant_Topic is blank
d.drop([target_], inplace=True, axis=1)
d = pd.get_dummies(d)
#split the dataset
X = d.drop(supervised_target, axis=1)
y = d[supervised_target]
#fit the model
model.fit(X,y)
#generate the prediction and evaluate metric
pred = cross_val_predict(model,X,y,cv=fold, method = 'predict')
acc_ = metrics.accuracy_score(y,pred)
acc.append(acc_)
recall_ = metrics.recall_score(y,pred)
recall.append(recall_)
precision_ = metrics.precision_score(y,pred)
prec.append(precision_)
kappa_ = metrics.cohen_kappa_score(y,pred)
kappa.append(kappa_)
f1_ = metrics.f1_score(y,pred)
f1.append(f1_)
if hasattr(model,'predict_proba'):
pred_ = cross_val_predict(model,X,y,cv=fold, method = 'predict_proba')
pred_prob = pred_[:,1]
auc_ = metrics.roc_auc_score(y,pred_prob)
auc.append(auc_)
else:
auc.append(0)
monitor.iloc[1,1:] = 'Compiling Results'
monitor.iloc[1,1:] = 'Finalizing'
update_display(monitor, display_id = 'monitor')
df = pd.DataFrame({'# Topics': param_grid, 'Accuracy' : acc, 'AUC' : auc, 'Recall' : recall,
'Precision' : prec, 'F1' : f1, 'Kappa' : kappa})
sorted_df = df.sort_values(by=optimize, ascending=False)
ival = sorted_df.index[0]
best_model = master[ival]
best_model_df = master_df[ival]
progress.value += 1
sd = pd.melt(df, id_vars=['# Topics'], value_vars=['Accuracy', 'AUC', 'Recall', 'Precision', 'F1', 'Kappa'],
var_name='Metric', value_name='Score')
fig = px.line(sd, x='# Topics', y='Score', color='Metric', line_shape='linear', range_y = [0,1])
fig.update_layout(plot_bgcolor='rgb(245,245,245)')
title= str(full_name) + ' Metrics and # of Topics'
fig.update_layout(title={'text': title, 'y':0.95,'x':0.45,'xanchor': 'center','yanchor': 'top'})
fig.show()
monitor = ''
update_display(monitor, display_id = 'monitor')
monitor_out.clear_output()
progress.close()
best_k = np.array(sorted_df.head(1)['# Topics'])[0]
best_m = round(np.array(sorted_df.head(1)[optimize])[0],4)
p = 'Best Model: ' + topic_model_name + ' |' + ' # Topics: ' + str(best_k) + ' | ' + str(optimize) + ' : ' + str(best_m)
print(p)
elif problem == 'regression':
"""
defining estimator
"""
monitor.iloc[1,1:] = 'Evaluating Topic Model'
update_display(monitor, display_id = 'monitor')
if estimator == 'lr':
from sklearn.linear_model import LinearRegression
model = LinearRegression()
full_name = 'Linear Regression'
elif estimator == 'lasso':
from sklearn.linear_model import Lasso
model = Lasso(random_state=seed)
full_name = 'Lasso Regression'
elif estimator == 'ridge':
from sklearn.linear_model import Ridge
model = Ridge(random_state=seed)
full_name = 'Ridge Regression'
elif estimator == 'en':
from sklearn.linear_model import ElasticNet
model = ElasticNet(random_state=seed)
full_name = 'Elastic Net'
elif estimator == 'lar':
from sklearn.linear_model import Lars
model = Lars()
full_name = 'Least Angle Regression'
elif estimator == 'llar':
from sklearn.linear_model import LassoLars
model = LassoLars()
full_name = 'Lasso Least Angle Regression'
elif estimator == 'omp':
from sklearn.linear_model import OrthogonalMatchingPursuit
model = OrthogonalMatchingPursuit()
full_name = 'Orthogonal Matching Pursuit'
elif estimator == 'br':
from sklearn.linear_model import BayesianRidge
model = BayesianRidge()
full_name = 'Bayesian Ridge Regression'
elif estimator == 'ard':
from sklearn.linear_model import ARDRegression
model = ARDRegression()
full_name = 'Automatic Relevance Determination'
elif estimator == 'par':
from sklearn.linear_model import PassiveAggressiveRegressor
model = PassiveAggressiveRegressor(random_state=seed)
full_name = 'Passive Aggressive Regressor'
elif estimator == 'ransac':
from sklearn.linear_model import RANSACRegressor
model = RANSACRegressor(random_state=seed)
full_name = 'Random Sample Consensus'
elif estimator == 'tr':
from sklearn.linear_model import TheilSenRegressor
model = TheilSenRegressor(random_state=seed)
full_name = 'TheilSen Regressor'
elif estimator == 'huber':
from sklearn.linear_model import HuberRegressor
model = HuberRegressor()
full_name = 'Huber Regressor'
elif estimator == 'kr':
from sklearn.kernel_ridge import KernelRidge
model = KernelRidge()
full_name = 'Kernel Ridge'
elif estimator == 'svm':
from sklearn.svm import SVR
model = SVR()
full_name = 'Support Vector Regression'
elif estimator == 'knn':
from sklearn.neighbors import KNeighborsRegressor
model = KNeighborsRegressor()
full_name = 'Nearest Neighbors Regression'
elif estimator == 'dt':
from sklearn.tree import DecisionTreeRegressor
model = DecisionTreeRegressor(random_state=seed)
full_name = 'Decision Tree Regressor'
elif estimator == 'rf':
from sklearn.ensemble import RandomForestRegressor
model = RandomForestRegressor(random_state=seed)
full_name = 'Random Forest Regressor'
elif estimator == 'et':
from sklearn.ensemble import ExtraTreesRegressor
model = ExtraTreesRegressor(random_state=seed)
full_name = 'Extra Trees Regressor'
elif estimator == 'ada':
from sklearn.ensemble import AdaBoostRegressor
model = AdaBoostRegressor(random_state=seed)
full_name = 'AdaBoost Regressor'
elif estimator == 'gbr':
from sklearn.ensemble import GradientBoostingRegressor
model = GradientBoostingRegressor(random_state=seed)
full_name = 'Gradient Boosting Regressor'
elif estimator == 'mlp':
from sklearn.neural_network import MLPRegressor
model = MLPRegressor(random_state=seed)
full_name = 'MLP Regressor'
elif estimator == 'xgboost':
from xgboost import XGBRegressor
model = XGBRegressor(random_state=seed, n_jobs=-1, verbosity=0)
full_name = 'Extreme Gradient Boosting Regressor'
elif estimator == 'lightgbm':
import lightgbm as lgb
model = lgb.LGBMRegressor(random_state=seed)
full_name = 'Light Gradient Boosting Machine'
elif estimator == 'catboost':
from catboost import CatBoostRegressor
model = CatBoostRegressor(random_state=seed, silent = True)
full_name = 'CatBoost Regressor'
progress.value += 1
"""
start model building here
"""
score = []
metric = []
for i in range(0,len(master_df)):
progress.value += 1
param_grid_val = param_grid[i]
monitor.iloc[2,1:] = 'Evaluating Regressor With ' + str(param_grid_val) + ' Topics'
update_display(monitor, display_id = 'monitor')
#prepare the dataset for supervised problem
d = master_df[i]
d.dropna(axis=0, inplace=True) #droping rows where Dominant_Topic is blank
d.drop([target_], inplace=True, axis=1)
d = pd.get_dummies(d)
#split the dataset
X = d.drop(supervised_target, axis=1)
y = d[supervised_target]
#fit the model
model.fit(X,y)
#generate the prediction and evaluate metric
pred = cross_val_predict(model,X,y,cv=fold, method = 'predict')
if optimize == 'R2':
r2_ = metrics.r2_score(y,pred)
score.append(r2_)
elif optimize == 'MAE':
mae_ = metrics.mean_absolute_error(y,pred)
score.append(mae_)
elif optimize == 'MSE':
mse_ = metrics.mean_squared_error(y,pred)
score.append(mse_)
elif optimize == 'RMSE':
mse_ = metrics.mean_squared_error(y,pred)
rmse_ = np.sqrt(mse_)
score.append(rmse_)
elif optimize == 'ME':
max_error_ = metrics.max_error(y,pred)
score.append(max_error_)
metric.append(str(optimize))
monitor.iloc[1,1:] = 'Compiling Results'
monitor.iloc[1,1:] = 'Finalizing'
update_display(monitor, display_id = 'monitor')
df = pd.DataFrame({'# Topics': param_grid, 'Score' : score, 'Metric': metric})
df.columns = ['# Topics', optimize, 'Metric']
#sorting to return best model
if optimize == 'R2':
sorted_df = df.sort_values(by=optimize, ascending=False)
else:
sorted_df = df.sort_values(by=optimize, ascending=True)
ival = sorted_df.index[0]
best_model = master[ival]
best_model_df = master_df[ival]
fig = px.line(df, x='# Topics', y=optimize, line_shape='linear',
title= str(full_name) + ' Metrics and # of Topics', color='Metric')
fig.update_layout(plot_bgcolor='rgb(245,245,245)')
progress.value += 1
monitor = ''
update_display(monitor, display_id = 'monitor')
monitor_out.clear_output()
progress.close()
fig.show()
best_k = np.array(sorted_df.head(1)['# Topics'])[0]
best_m = round(np.array(sorted_df.head(1)[optimize])[0],4)
p = 'Best Model: ' + topic_model_name + ' |' + ' # Topics: ' + str(best_k) + ' | ' + str(optimize) + ' : ' + str(best_m)
print(p)
#storing into experiment
tup = ('Best Model',best_model)
experiment__.append(tup)
return best_model
def evaluate_model(model):
"""
Description:
------------
This function displays the user interface for all the available plots
for a given model. It internally uses the plot_model() function.
Example:
--------
from pycaret.datasets import get_data
kiva = get_data('kiva')
experiment_name = setup(data = kiva, target = 'en')
lda = create_model('lda')
evaluate_model(lda)
This will display the User Interface for all of the plots for
given model.
Parameters
----------
model : object, default = none
A trained model object should be passed.
Returns:
--------
User Interface: Displays the user interface for plotting.
--------------
Warnings:
---------
None
"""
from ipywidgets import widgets
from ipywidgets.widgets import interact, fixed, interact_manual
import numpy as np
"""
generate sorted list
"""
try:
n_topic_assigned = len(model.show_topics())
except:
try:
n_topic_assigned = model.num_topics
except:
n_topic_assigned = model.n_components
final_list = []
for i in range(0,n_topic_assigned):
final_list.append('Topic ' +str(i))
a = widgets.ToggleButtons(
options=[('Frequency Plot', 'frequency'),
('Bigrams', 'bigram'),
('Trigrams', 'trigram'),
('Sentiment Polarity', 'sentiment'),
('Word Cloud', 'wordcloud'),
],
description='Plot Type:',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
icons=['']
)
b = widgets.Dropdown(options=final_list, description='Topic #:', disabled=False)
d = interact_manual(plot_model, model = fixed(model), plot = a, topic_num=b)
def save_model(model, model_name):
"""
Description:
------------
This function saves the trained model object into the current active
directory as a pickle file for later use.
Example:
--------
from pycaret.datasets import get_data
kiva = get_data('kiva')
experiment_name = setup(data = kiva, target = 'en')
lda = create_model('lda')
save_model(lda, 'lda_model_23122019')
This will save the model as a binary pickle file in the current
directory.
Parameters
----------
model : object, default = none
A trained model object should be passed.
model_name : string, default = none
Name of pickle file to be passed as a string.
Returns:
--------
Success Message
Warnings:
---------
None
"""
import joblib
model_name = model_name + '.pkl'
joblib.dump(model, model_name)
print('Model Succesfully Saved')
def load_model(model_name):
"""
Description:
------------
This function loads a previously saved model from the current active directory
into the current python environment. Load object must be a pickle file.
Example:
--------
saved_lda = load_model('lda_model_23122019')
This will call the trained model in saved_lr variable using model_name param.
The file must be in current directory.
Parameters
----------
model_name : string, default = none
Name of pickle file to be passed as a string.
Returns:
--------
Success Message
Warnings:
---------
None
"""
import joblib
model_name = model_name + '.pkl'
print('Model Sucessfully Loaded')
return joblib.load(model_name)
def save_experiment(experiment_name=None):
"""
Description:
------------
This function saves the entire experiment into the current active directory.
All outputs using pycaret are internally saved into a binary list which is
pickilized when save_experiment() is used.
Example:
--------
save_experiment()
This will save the entire experiment into the current active directory.
By default, the name of the experiment will use the session_id generated
during setup(). To use a custom name, a string must be passed to the
experiment_name param. For example:
save_experiment('experiment_23122019')
Parameters
----------
experiment_name : string, default = none
Name of pickle file to be passed as a string.
Returns:
--------
Success Message
Warnings:
---------
None
"""
#general dependencies
import joblib
global experiment__
#defining experiment name
if experiment_name is None:
experiment_name = 'experiment_' + str(seed)
else:
experiment_name = experiment_name
experiment_name = experiment_name + '.pkl'
joblib.dump(experiment__, experiment_name)
print('Experiment Succesfully Saved')
def load_experiment(experiment_name):
"""
Description:
------------
This function loads a previously saved experiment from the current active
directory into current python environment. Load object must be a pickle file.
Example:
--------
saved_experiment = load_experiment('experiment_23122019')
This will load the entire experiment pipeline into the object
saved_experiment. The experiment file must be in current directory.
Parameters
----------
experiment_name : string, default = none
Name of pickle file to be passed as a string.
Returns:
--------
Information Grid containing details of saved objects in experiment pipeline.
Warnings:
---------
None
"""
#general dependencies
import joblib
import pandas as pd
experiment_name = experiment_name + '.pkl'
temp = joblib.load(experiment_name)
name = []
exp = []
for i in temp:
name.append(i[0])
exp.append(i[-1])
ind = pd.DataFrame(name, columns=['Object'])
display(ind)
return exp
def get_topics(data, text, model=None, num_topics=4):
"""
Magic function to get topic model in Power Query / Power BI.
"""
if model is None:
model = 'lda'
s = setup(data=data, target=text)
c = create_model(model=model, num_topics=num_topics, verbose=False)
dataset = assign_model(c, verbose=False)
return dataset
|
from mirage.core import scenario, interpreter, module
from mirage.libs import io, ble, bt, utils
from mirage.libs.ble_utils.firewall import *
import configparser,os.path,subprocess
class mitm_test(scenario.Scenario):
def onStart(self):
self.a2sEmitter = self.module.a2sEmitter
self.a2sReceiver = self.module.a2sReceiver
self.a2mEmitter = self.module.a2mEmitter
self.a2mReceiver = self.module.a2mReceiver
self.firewallManager = FirewallEventManager()
self.dependencies = ["ble_discover"] # for GATT
io.info("MITM started !")
# Load module
self.m = utils.loadModule('ble_discover')
self.m['INTERFACE'] = self.args['INTERFACE1']
self.m["START_HANDLE"] = "0x0001"
self.m["END_HANDLE"] = "0xFFFF"
self.m["FILTER_BY"] = ""
self.m["FILTER"] = ""
return True
def onEnd(self):
io.info("MITM finished")
return True
def onMasterWriteCommand(self,packet):
#Defines what apprend exactly
currentEvent = self.getEventName(packet.handle, packet.value, self.onMasterWriteCommand.__name__)
#Init counter of the number of packets if it's the first time that packet is handled
self.firewallManager.initCounters(currentEvent)
#Computes duration in seconds where last time where packet comes, 0 is default value
sinceLastEventDuration = self.firewallManager.durationSinceLastPacket(currentEvent)
if packet.handle == 0x29 and 0x2 in packet.value:
#Increment counter of one packet and update timestamp of last packet that comes
self.firewallManager.countEvent(currentEvent)
# packet is allowed
return True
if packet.handle == 0x29 and 0x0 in packet.value:
#Increment counter of one packet and update timestamp of last packet that comes
self.firewallManager.countEvent(currentEvent)
# packet is allowed
return True
else : #default case of the rule
return self.__drop(currentEvent)
def onSlaveHandleValueNotification(self,packet):
#Defines what apprend exactly
currentEvent = self.getEventName(packet.handle, packet.value, self.onSlaveHandleValueNotification.__name__)
#Init counter of the number of packets if it's the first time that packet is handled
self.firewallManager.initCounters(currentEvent)
#Computes duration in seconds where last time where packet comes, 0 is default value
sinceLastEventDuration = self.firewallManager.durationSinceLastPacket(currentEvent)
if packet.handle == 0x25 and 0x1 in packet.value:
#Increment counter of one packet and update timestamp of last packet that comes
self.firewallManager.countEvent(currentEvent)
#Check if flow of packets is allowed or not
if self.firewallManager.getCurrentCount(currentEvent) >= 2 and sinceLastEventDuration < WINDOW_SIZE_IN_SECONDS:
return self.__drop(currentEvent)
elif sinceLastEventDuration > WINDOW_SIZE_IN_SECONDS: # After a certain time counters go down
self.firewallManager.resetCounters(currentEvent)
else: # number of packet flows is inferior of limit during window
return True
else : #default case of the rule
return self.__drop(currentEvent)
# Drop packets and reset counter of packets after drops
def __drop(self, name: str):
io.info("According to our firewall policy we choose to drop the packet")
self.firewallManager.resetCounters(name)
return False
def getEventName(self, handle: hex, value: hex, handlerName: str):
return "{0}_{1}_{2} ".format(str(handle),str(value),handlerName)
"""
MIDDLE FILTERING GATT SERVER
"""
# When receiving a packet for GATT
def onMasterReadByGroupTypeRequest(self, packet):
io.info("Read By Group Type Request (from Master): startHandle = "+hex(packet.startHandle)+
" / endHandle = "+hex(packet.endHandle)+" / uuid = "+hex(packet.uuid))
io.info("Response from MITM ReadGroup ...")
(success,response) = self.server.readByGroupType(packet.startHandle, packet.endHandle, packet.uuid)
if success:
io.displayPacket(ble.BLEReadByGroupTypeResponse(attributes=response))
self.a2mEmitter.sendp(ble.BLEReadByGroupTypeResponse(attributes=response))
else:
self.a2mEmitter.sendp(ble.BLEErrorResponse(request=0x10,ecode=response,handle=packet.startHandle))
return False
# When receiving a packet for ATT
def onMasterReadByTypeRequest(self, packet):
io.info("Read By Type Request : startHandle = "+hex(packet.startHandle)+
" / endHandle = "+hex(packet.endHandle)+" / uuid = "+hex(packet.uuid))
io.info("Response from MITM ReadType ...")
(success,response) = self.server.readByType(packet.startHandle,packet.endHandle,packet.uuid)
if (response == []):
io.warning(" MITM: Empty Response ! Try again ... ")
self.a2mEmitter.sendp(ble.BLEErrorResponse(request=0x08,ecode=0, handle=packet.startHandle))
else:
if success:
io.displayPacket(ble.BLEReadByTypeResponse(attributes=response))
self.a2mEmitter.sendp(ble.BLEReadByTypeResponse(attributes=response))
else:
self.a2mEmitter.sendp(ble.BLEErrorResponse(request=0x08,ecode=response, handle=packet.startHandle))
return False
# NOT USED
# When receiving a request FindInfo
#
def onMasterFindInformationRequest(self,packet):
io.info("Find Information Request : startHandle = "+hex(packet.startHandle)+
" / endHandle = "+hex(packet.endHandle))
io.info("Response from MITM FindInfo ...")
(success,response) = self.server.findInformation(packet.startHandle,packet.endHandle)
if success:
io.displayPacket(ble.BLEFindInformationResponse(attributes=response))
self.a2mEmitter.sendp(ble.BLEFindInformationResponse(attributes=response))
else:
self.a2mEmitter.sendp(ble.BLEErrorResponse(request=0x04,ecode=response,handle=packet.startHandle))
return False
def onSlaveConnect(self, initiatorType="public"):
# Entering the GATT Entering Cloning mode
while (self.a2sEmitter.getMode() != "NORMAL"):
utils.wait(seconds=1)
print(self.a2sEmitter.getMode())
# Verify the connection type
address = utils.addressArg(self.args["TARGET"])
connectionType = self.args["CONNECTION_TYPE"]
self.responderAddress = address
self.responderAddressType = (b"\x00" if self.args["CONNECTION_TYPE"] == "public" else b"\x01")
# Connecting to Slave
io.info("MITM: Connecting to slave "+address+"...")
self.a2sEmitter.sendp(ble.BLEConnect(dstAddr=address, type=connectionType, initiatorType=initiatorType))
# Wait until connection
while not self.a2sEmitter.isConnected(): utils.wait(seconds=0.5)
# When connected, clone the GATT Server
if self.a2sEmitter.isConnected():
io.success("Connected on slave : "+self.a2sReceiver.getCurrentConnection())
# Cloning the ATT
io.info("MITM: Cloning Slave's ATT Server ...")
self.__getAttSlave("ATT_SLAVE_MITM")
# Cloning the GATT
io.info("MITM: Cloning GATT Server ... ")
self.__getGattSlave("GATT_SLAVE_MITM")
io.success("MITM: Cloning has been finished ... ")
# Starting the server
io.info("MITM: GATT/ATT starting server ...")
self.__setGattServer("GATT_SLAVE_MITM", "ATT_SLAVE_MITM")
io.success("MITM: GATT/ATT server running ... ")
else:
io.fail("MITM: No active connections !")
return False
# Check if file exists
def __fileExists(self,filename):
return os.path.isfile(filename)
# Initialize GATT Server
def __setGattServer(self, GATT_SLAVE_FILE, ATT_SLAVE_FILE):
# Init server
self.server = ble.GATT_Server()
# Create GattServer object
firewallGattServer = Firewall_GattServer()
# InitParsing
io.info("MITM: Parsing of rules ...")
(characteristicRules,serviceRules,descriptorRules,attributeRules,gatt_modifier_rules) = checkRules('/home/pi/mirage/mirage/tables/scenario/ble_tables.txt')
io.info("MITM: Starting MITM ATT / GATT ... ")
# Import ATT Structure
# if ATT_SLAVE_FILE != "" and self.__fileExists(ATT_SLAVE_FILE):
# io.info("MITM: Importing ATT_SLAVE structure")
# firewallGattServer.importATT(filename=ATT_SLAVE_FILE,forbiddenAtrributes=attributeRules,replaceList=gatt_modifier_rules,server=self.server)
# print('finishing import ATT')
# Import GATT Structure
if GATT_SLAVE_FILE != "" and self.__fileExists(GATT_SLAVE_FILE):
io.info("MITM: Importing GATT_SLAVE structure")
firewallGattServer.importGATT(filename=GATT_SLAVE_FILE,forbiddenServices=serviceRules,forbiddenCharacteristics=characteristicRules,forbiddenDescriptors=descriptorRules,server=self.server)
print('finishing import GATT')
else:
io.info("MITM: No filename provided : empty database !")
#print(self.server.database.show())
# print("STRUCTURE SERVEUR GATT")
# print(self.server.database.showGATT())
# Export Slave's ATT Server
def __getAttSlave(self, ATT_SLAVE_FILE):
# Set attributes
self.m["WHAT"] = "attributes"
self.m['ATT_FILE']=ATT_SLAVE_FILE
# Empty file before filling
open(self.m['ATT_FILE'], 'w').close()
# Execute to fill ATT_FILE
self.m.execute()
# Export Slave's GATT
def __getGattSlave(self, GATT_SLAVE_FILE):
# Set attributes
self.m["WHAT"] = "all"
self.m['GATT_FILE']=GATT_SLAVE_FILE
# Empty file before filling
open(self.m['GATT_FILE'], 'w').close()
# Execute to fill GATT_FILE
self.m.execute() |
from django.conf import settings
from django.contrib.auth import get_user_model
from django.urls import reverse
from mock import patch
from rest_framework import status
from rest_framework.test import APITestCase
from referral.faker_factories import FakeCampaignFactory, faker
from referral.models import Campaign
class TestReferralWebhooks(APITestCase):
def _create_user(self, password='<PASSWORD>'):
self.user = get_user_model().objects.create(
email=faker.email(),
is_superuser=False,
is_active=True)
self.user.set_password(password)
self.user.save()
return self.user
def _get_referral_webhook_payload(self, campaign_id=None, email=None):
return {
'pointType': 'referrals',
'point': 3,
'rewardImage': 'https://mydomain.com/rewardImage.jpg',
'rewardName': 'Free E-Book',
'rewardDescription': '',
'rewardId': '5b8fdb3c6d8c774e5eab7183',
'campaignId': campaign_id or faker.numerify(),
'email': email or faker.email(),
'webHookType': 'NewReward'
}
@patch('referral.tasks.subscriber.SubscriberGetTokenTask.apply_async')
@patch('referral.signals_define.referral_reward_acquired.send')
def test_referral_webhook_received_launch_signal(self, mock_signal_reward, patch_task):
# PREPARE DATA
campaign = FakeCampaignFactory()
user = self._create_user()
campaign.add_subscriber(user)
data = self._get_referral_webhook_payload(
campaign_id=campaign.campaign_id,
email=user.email,
)
url = reverse('referral:reward-awared')
# DO ACTION
response = self.client.post(url, data)
# DO ASSERTIONS
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertTrue(mock_signal_reward.called)
@patch('referral.signals_define.referral_reward_acquired.send')
def test_work_payload_data_raise_404(self, mock_signal_reward):
# PREPARE DATA
data = self._get_referral_webhook_payload()
url = reverse('referral:reward-awared')
# DO ACTION
response = self.client.post(url, data)
# ASSERTIONS
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertFalse(mock_signal_reward.called)
self.assertFalse(Campaign.objects.filter(campaign_id=data.get('campaignId')).exists())
self.assertEqual(settings.REFERRAL_NEW_REWARD_AWARED, data.get('webHookType'))
@patch('referral.signals_define.referral_reward_acquired.send')
def test_work_payload_data_raise_404_if_user_is_not_subscriber(self, mock_signal_reward):
# PREPARE DATA
campaign = FakeCampaignFactory()
data = self._get_referral_webhook_payload(
campaign_id=campaign.campaign_id
)
url = reverse('referral:reward-awared')
# DO ACTION
response = self.client.post(url, data)
# ASSERTIONS
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertTrue(Campaign.objects.filter(campaign_id=data.get('campaignId')).exists())
self.assertFalse(campaign.users.filter(email=data.get('email')).exists())
self.assertEqual(settings.REFERRAL_NEW_REWARD_AWARED, data.get('webHookType'))
self.assertFalse(mock_signal_reward.called)
|
from django.utils.translation import ugettext as _
from django.db import models
from django.utils.safestring import mark_safe
from django.core.files.storage import FileSystemStorage
from django.conf import settings
import os
# Visualization XML templates:
#
# These files are designed and uploaded by an administrator.
#
# They are basically XML files, except that they contain some Django
# template systax variable references, so that they can be customized
# with parameters.
#
# This means that they must be "remdered" by the Django template
# engine, rather than just served staticly.
#
# Therefore, they need not (shouldn't) be under MEDIA_ROOT.
#
# We will use the ordinary render_to_response on them from a simple
# view, so they must be in a directory that some template loader
# searches.
#
# By default, the sub-directory of somewhere on the template load path
# uses is "visualizations/XML", but you can set
# VISUALIZATION_TEMPLATES_SUB_PATH in the project settings..
#
# By default, that path is interpreted relative to the last path in
# the TEMPLATE_DIRS project setting. If there is some reason you
# can't put this path last (such as another app requiring it's path be
# last), you can specify VISUALIZATION_TEMPLATES_DIR, in your project
# settings, but be sure that it is ALSO in TEMPLATES_DIRS if you do
# so.
#
# You can also choose to have the templates subdirectory of the app
# itself used. Note that apps can be in your python's site-packages
# directory, which your web server probably SHOULD NOT have permission
# to write, so this fallback has limited utility. To choose this
# behavior you can do any of:
#
# Specify it in VISUALIZATION_TEMPLATES_DIR.
#
# Leave TEMPLATE_DIRS empty, and don't specify or use None for
# VISUALIZATION_TEMPLATES_DIR.
#
# Specify VISUALIZATION_TEMPLATES_DIR as False.
#
# All this assumes that your project settings TEMPLATE_LOADERS
# includes a suitable loader.
VISUALIZATION_TEMPLATES_SUB_PATH = getattr(settings,
'VISUALIZATION_TEMPLATES_SUB_PATH',
"visualizations/XML")
VISUALIZATION_TEMPLATES_DIR = getattr(settings,
'VISUALIZATION_TEMPLATES_DIR', None)
if VISUALIZATION_TEMPLATES_DIR is None:
# We presume it is not set, so try for the last of TEMPLATES_DIRS
try:
VISUALIZATION_TEMPLATES_DIR = settings.TEMPLATES_DIRS[-1]
except:
pass
if not VISUALIZATION_TEMPLATES_DIR:
# Either we have a non-None false value or we tried and failed to
# get the last of TEMPLATES_DIRS. Use the app directory.
VISUALIZATION_TEMPLATES_DIR = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"templates")
VISUALIZATION_TEMPLATES_STORAGE = FileSystemStorage(
VISUALIZATION_TEMPLATES_DIR)
class Visualization(models.Model):
"""Hold imformation needed to render Weave visualizations.
"""
name = models.CharField(_('Name'), max_length=100)
thumbnail = models.ImageField(
_('Dummy Image'), upload_to='visualizations/thumbnails/',
max_length=255, blank=True, default='', help_text=_(
'If set, may be shown as a stand-in for the Flash, allowing '
'delayed loading or click to load functionality.'))
template = models.FileField(
_('Visualization template'),
upload_to=VISUALIZATION_TEMPLATES_SUB_PATH,
storage=VISUALIZATION_TEMPLATES_STORAGE,
blank=True, default='')
def __unicode__(self):
return self.name
def has_thumbnail(self):
if not self.thumbnail:
return False
path = self.thumbnail.path
return path and os.path.isfile(path)
class Slot(models.Model):
name = models.CharField(_('Name'), max_length=100)
visualization = models.ForeignKey(Visualization)
rank = models.IntegerField(_('Relative order'), default=500)
new_row = models.BooleanField(_('Starts new row'), default=True,
help_text=_(
'If turned off this slot is in the same row as the slot '
'before it in the relative order. If on, this slot begins '
'a new row of slots.'))
title = models.CharField(_('Row (group) title'), max_length=100,
blank=True, default='',
help_text=_(
'Ignored if "Starts new row" is off, or if it is blank. '
'Otherwise it is displayed as a heading for the row or '
'section (group of rows begun by this one).'))
vistitle = models.CharField(_('Visualization title'), max_length=100,
blank=True, default='',
help_text=_(
'Optional title for the individual visualization (slots) '
'within a row.'))
# The first two values below are width and height.
# Use a string if you need a suffix. Integers will be turned into
# strings later.
# We can append more paramters to the value tuples as we think of
# them.
slot_params_by_type = {
'Table': (707, 190),
'Chart': (344, 288),
'Map': (344, 288)
}
SLOT_TYPE_CHOICES = tuple(
[(n.lower(), n) for n in slot_params_by_type.keys()]
)
slot_params_by_type = dict(
[ (n.lower(), v) for n, v in slot_params_by_type.items() ])
slot_type = models.CharField(_('Type'), max_length=100,
blank=False, default='table',
choices=SLOT_TYPE_CHOICES)
SHOWN_ON_CHOICES = (
('-not-shown-', '--not-shown--'),
('profile', 'Health Profile pages'),
('summary', 'Summary pages'),
)
shown_on = models.CharField(_('Shown on'), max_length=100,
blank=False, default='-not-shown-',
choices=SHOWN_ON_CHOICES)
def width(self):
v = str(self.__class__.slot_params_by_type[self.slot_type][0])
return mark_safe('"%s"' % v)
def height(self):
v = str(self.__class__.slot_params_by_type[self.slot_type][1])
return mark_safe('"%s"' % v)
def __unicode__(self):
return self.name
class Meta:
ordering = ('rank',)
verbose_name = 'Profile/Summary Visualization Slot'
|
import argparse
import os, time
import numpy as np
import matplotlib as mpl
if os.environ.get('DISPLAY','') == '':
print('no display found. Using non-interactive Agg backend')
mpl.use('Agg')
import matplotlib.pyplot as plt
import torch
import models
import utils
from matplotlib.backends.backend_pdf import PdfPages
from tqdm import tqdm
parser = argparse.ArgumentParser(description='Train a generator on artificial data set.')
parser.add_argument('-lr', default=1e-3, type=float)
parser.add_argument('-batch_size',help='batch size. default 16',default=16,type=int)
parser.add_argument('-epoch',default=300,type=int)
parser.add_argument('-out',help='output folder.',type=str,required=True)
parser.add_argument('-title',help='title of the animation.',type=str,required=True)
parser.add_argument('-seed',help='seed. default 2019.',type=int)
parser.add_argument('-dataset', help='grid, spiral, ellipse, unbalanced.', default='grid', type=str)
parser.add_argument('-wasserstein', default=0, type=int)
parser.add_argument('-c_iter', default=1, type=int)
args = parser.parse_args()
if torch.cuda.is_available():
device = torch.device("cuda:0")
else:
device = torch.device("cpu")
if args.seed is None:
args.seed = np.random.randint(0, 1000)
WASSERSTEIN = True if args.wasserstein==1 else False
NUM_OF_POINTS = 2500
BATCH_SIZE = args.batch_size
NUM_OF_EPOCHS = args.epoch
out_directory = args.out
print(args)
if not os.path.exists(out_directory):
os.makedirs(out_directory)
print(args,file=open(out_directory+"args.txt","w"))
np.random.seed(args.seed)
torch.random.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
if args.dataset == 'grid':
x_axis = np.linspace(-10, 10, 5)
y_axis = np.linspace(-10, 10, 5)
it = 0
x = torch.empty(NUM_OF_POINTS, 2, dtype=torch.float)
CLUSTER_SIZE = NUM_OF_POINTS // 25
for i in range(5):
for j in range(5):
x[it*CLUSTER_SIZE:(it+1)*CLUSTER_SIZE,0] = torch.randn(CLUSTER_SIZE) * 0.05 + x_axis[i]
x[it*CLUSTER_SIZE:(it+1)*CLUSTER_SIZE,1] = torch.randn(CLUSTER_SIZE) * 0.05 + y_axis[j]
it += 1
elif args.dataset == 'ellipse':
r = 4
th = torch.rand(NUM_OF_POINTS) * np.pi * 2.0
x = torch.empty(NUM_OF_POINTS, 2, dtype=torch.float)
x[:,0] = r * torch.cos(th)
x[:,1] = r * torch.sin(th)
x = torch.matmul(x, torch.randn(2, 2) * 0.5)+torch.randn(2)
elif args.dataset == 'spiral':
r = torch.sqrt(torch.linspace(0, 1, NUM_OF_POINTS)) * 780 * (2*np.pi)/360
dx = -torch.cos(r)*r + torch.rand(NUM_OF_POINTS) * (0.5)
dy = torch.sin(r)*r + torch.rand(NUM_OF_POINTS) * (0.5)
x = torch.stack([dx, dy]).t()
elif args.dataset == 'unbalanced':
x = torch.empty(NUM_OF_POINTS, 2, device=device)
x[:1250] = torch.randn(NUM_OF_POINTS//2, 2, device=device) * 0.25 + torch.tensor([-5., 5.], device=device)
x[1250:] = torch.randn(NUM_OF_POINTS//2, 2, device=device) * 2 + torch.tensor([5., -5.], device=device)
elif args.dataset == 'gmm':
x = torch.randn(NUM_OF_POINTS, 2, device=device)
k = 5
cluster_size = NUM_OF_POINTS // k
for i in range(k):
rand_std = torch.rand(1,2, device=device)*2 + 0.5
rand_mu = torch.rand(1,2, device=device)*24 - 12
x[i*cluster_size:(i+1)*cluster_size] = x[i*cluster_size:(i+1)*cluster_size] * rand_std + rand_mu
x = x.to(device)
z_dim = 2
z = torch.randn(NUM_OF_POINTS, z_dim, device=device)
if args.title == "HME":
generator = models.SoftTree(in_features=z_dim, out_features=2, depth=3, projection='linear')
elif args.title == "ME":
generator = models.MoE(in_features=z_dim, out_features=2, num_leafs=8, projection='linear')
else:
generator = models.MLP(layer_info=[z_dim, 20, 20, 20, 20, 2], activation=torch.nn.ReLU(), normalization=None)
discriminator = models.MLP(layer_info=[2, 20, 20, 20, 20, 1], activation=torch.nn.ReLU(), normalization=None)
generator.to(device)
discriminator.to(device)
print("GENERATOR")
print(generator)
print("DISCRIMINATOR")
print(discriminator)
print("G num of params: %d" % utils.get_parameter_count(generator))
print("D num of params: %d" % utils.get_parameter_count(discriminator))
optimG = torch.optim.Adam(lr=args.lr*8, betas=(0.5, 0.999), params=generator.parameters(), amsgrad=True)
optimD = torch.optim.Adam(lr=args.lr, betas=(0.5, 0.999), params=discriminator.parameters(), amsgrad=True)
criterion = torch.nn.BCEWithLogitsLoss()
print("Training starts...")
size = x.shape[0]
critic_iter = args.c_iter
loop_per_epoch = size // (BATCH_SIZE * critic_iter)
total_loss = torch.zeros(NUM_OF_EPOCHS)
timesteps = []
d_fields = []
real_total = []
fake_total = []
fid_total = []
disc_total = []
gen_total = []
##
# stuff for animation
xv, yv = torch.meshgrid(torch.linspace(-30, 30, 40), torch.linspace(-30, 30, 40))
field = torch.stack([xv.contiguous().view(-1), yv.contiguous().view(-1)], dim=1).to(device)
##
for e in range(NUM_OF_EPOCHS):
R = torch.randperm(size)
gen_avg_loss = 0.0
disc_avg_loss = 0.0
g_count = 0
d_count = 0
start_time = time.time()
for i in tqdm(range(loop_per_epoch)):
for c in range(critic_iter):
# train discriminator with real data
optimD.zero_grad()
x_real = x[R[(critic_iter*i+c)*args.batch_size:(critic_iter*i+c+1)*args.batch_size]]
x_real = x_real.to(device)
d_real = discriminator(x_real)
# wasserstein
if WASSERSTEIN:
d_real_loss = -d_real.mean()
# original
else:
d_real_loss = criterion(d_real, torch.ones_like(d_real,device=device))
# train discriminator with fake data
x_fake = generator(torch.randn(args.batch_size, z_dim, device=device))
d_fake = discriminator(x_fake)
# wasserstein
if WASSERSTEIN:
d_fake_loss = d_fake.mean()
# original
else:
d_fake_loss = criterion(d_fake, torch.zeros_like(d_fake,device=device))
d_loss = d_real_loss + d_fake_loss
# wasserstein
if WASSERSTEIN:
d_loss += utils.gradient_penalty(discriminator,x_real,x_fake,1.0,device)
d_loss.backward()
optimD.step()
disc_avg_loss += d_loss.item()
d_count += 1
# train generator
for p in discriminator.parameters():
p.requires_grad = False
optimG.zero_grad()
x_fake = generator(torch.randn(args.batch_size, z_dim, device=device))
g_fake = discriminator(x_fake)
# wasserstein
if WASSERSTEIN:
g_loss = -g_fake.mean()
else:
g_loss = criterion(g_fake,torch.ones_like(g_fake, device=device))
g_loss.backward()
optimG.step()
gen_avg_loss += g_loss.item()
g_count += 1
for p in discriminator.parameters():
p.requires_grad = True
finish_time = time.time()
print("epoch: %d - disc loss: %.5f - gen loss: %.5f - time elapsed: %.5f" % (e+1, disc_avg_loss / d_count, gen_avg_loss / g_count, finish_time-start_time))
gen_total.append(gen_avg_loss/g_count)
disc_total.append(disc_avg_loss/d_count)
if (e+1) % args.c_iter == 0:
generator.eval()
discriminator.eval()
with torch.no_grad():
ff = discriminator(field)
ff = torch.sigmoid(ff).cpu().numpy()
indexes = (ff*100).astype(np.int32).reshape(-1)
d_fields.append(indexes)
data = np.zeros((size * 2,2))
data[:size] = x.cpu()
data[size:] = generator(z).cpu().numpy()
timesteps.append(data)
fake_acc, real_acc = utils.nn_accuracy(p_real=x, p_fake=generator(z), k=5)
fid = utils.FID_score(x.cpu(), generator(z).cpu())
print("fake acc: %.5f - real acc: %.5f - FID: %.5f" % (fake_acc, real_acc, fid))
fake_total.append(fake_acc)
real_total.append(real_acc)
fid_total.append(fid)
discriminator.train()
generator.train()
plt.plot(fake_total)
plt.plot(real_total)
plt.plot((np.array(fake_total)+np.array(real_total))*0.5, '--')
plt.legend(["fake acc.", "real acc.", "total acc."])
pp = PdfPages(out_directory+'accuracy.pdf')
pp.savefig()
pp.close()
plt.close()
plt.plot(disc_total)
plt.plot(gen_total)
plt.legend(["disc. loss", "gen. loss"])
pp = PdfPages(out_directory+'loss.pdf')
pp.savefig()
pp.close()
plt.close()
plt.plot(fid_total)
pp = PdfPages(out_directory+'fid.pdf')
pp.savefig()
pp.close()
plt.close()
torch.save(generator.cpu().state_dict(),out_directory+'gen.ckpt')
torch.save(discriminator.cpu().state_dict(),out_directory+'disc.ckpt')
np.save(out_directory+"fid.npy", fid_total)
np.save(out_directory+"fake.npy", fake_total)
np.save(out_directory+"real.npy", real_total)
np.save(out_directory+"g_loss.npy", gen_total)
np.save(out_directory+"d_loss.npy", disc_total)
np.save(out_directory+"timesteps.npy", timesteps)
utils.save_animation_withdisc(
name=out_directory+'animation.mp4',
timesteps=timesteps,
d_field=d_fields,
lims=(-15, 15),
title=args.title,
alpha=0.5)
|
from VintageousPlus.vi.utils import modes
from VintageousPlus.state import State
from VintageousPlus.tests import get_sel
from VintageousPlus.tests import first_sel
from VintageousPlus.tests import ViewTest
from VintageousPlus.ex_commands import CURRENT_LINE_RANGE
class Test_ex_copy_Copying_InNormalMode_SingleLine_DefaultStart(ViewTest):
def testCanCopyDefaultLineRange(self):
self.write('abc\nxxx\nabc\nabc')
self.clear_sel()
self.add_sel(self.R((1, 0), (1, 0)))
self.view.run_command('ex_copy', {'command_line': 'copy3'})
actual = self.view.substr(self.R(0, self.view.size()))
expected = 'abc\nxxx\nabc\nxxx\nabc'
self.assertEqual(expected, actual)
def testCanCopyToEof(self):
self.write('abc\nxxx\nabc\nabc')
self.clear_sel()
self.add_sel(self.R((1, 0), (1, 0)))
self.view.run_command('ex_copy', {'command_line': 'copy4'})
actual = self.view.substr(self.R(0, self.view.size()))
expected = 'abc\nxxx\nabc\nabc\nxxx'
self.assertEqual(expected, actual)
def testCanCopyToBof(self):
self.write('abc\nxxx\nabc\nabc')
self.clear_sel()
self.add_sel(self.R((1, 0), (1, 0)))
self.view.run_command('ex_copy', {'command_line': 'copy0'})
actual = self.view.substr(self.R(0, self.view.size()))
expected = 'xxx\nabc\nxxx\nabc\nabc'
self.assertEqual(expected, actual)
def testCanCopyToEmptyLine(self):
self.write('abc\nxxx\nabc\n\nabc')
self.clear_sel()
self.add_sel(self.R((1, 0), (1, 0)))
self.view.run_command('ex_copy', {'command_line': 'copy4'})
actual = self.view.substr(self.R(0, self.view.size()))
expected = 'abc\nxxx\nabc\n\nxxx\nabc'
self.assertEqual(expected, actual)
def testCanCopyToSameLine(self):
self.write('abc\nxxx\nabc\nabc')
self.clear_sel()
self.add_sel(self.R((1, 0), (1, 0)))
self.view.run_command('ex_copy', {'command_line': 'copy2'})
actual = self.view.substr(self.R(0, self.view.size()))
expected = 'abc\nxxx\nxxx\nabc\nabc'
self.assertEqual(expected, actual)
class Test_ex_copy_Copying_InNormalMode_MultipleLines(ViewTest):
def setUp(self):
super().setUp()
self.range = {'left_ref': '.','left_offset': 0, 'left_search_offsets': [],
'right_ref': '.', 'right_offset': 1, 'right_search_offsets': []}
def testCanCopyDefaultLineRange(self):
self.write('abc\nxxx\nxxx\nabc\nabc')
self.clear_sel()
self.add_sel(self.R((1, 0), (1, 0)))
self.view.run_command('ex_copy', {'command_line': '.,.+1copy4'})
expected = 'abc\nxxx\nxxx\nabc\nxxx\nxxx\nabc'
actual = self.view.substr(self.R(0, self.view.size()))
self.assertEqual(expected, actual)
def testCanCopyToEof(self):
self.write('abc\nxxx\nxxx\nabc\nabc')
self.clear_sel()
self.add_sel(self.R((1, 0), (1, 0)))
self.view.run_command('ex_copy', {'command_line': '.,.+1copy5'})
expected = 'abc\nxxx\nxxx\nabc\nabc\nxxx\nxxx'
actual = self.view.substr(self.R(0, self.view.size()))
self.assertEqual(expected, actual)
def testCanCopyToBof(self):
self.write('abc\nxxx\nxxx\nabc\nabc')
self.clear_sel()
self.add_sel(self.R((1, 0), (1, 0)))
self.view.run_command('ex_copy', {'command_line': '.,.+1copy0'})
expected = 'xxx\nxxx\nabc\nxxx\nxxx\nabc\nabc'
actual = self.view.substr(self.R(0, self.view.size()))
self.assertEqual(expected, actual)
def testCanCopyToEmptyLine(self):
self.write('abc\nxxx\nxxx\nabc\n\nabc')
self.clear_sel()
self.add_sel(self.R((1, 0), (1, 0)))
self.view.run_command('ex_copy', {'command_line': '.,.+1copy5'})
expected = 'abc\nxxx\nxxx\nabc\n\nxxx\nxxx\nabc'
actual = self.view.substr(self.R(0, self.view.size()))
self.assertEqual(expected, actual)
def testCanCopyToSameLine(self):
self.write('abc\nxxx\nxxx\nabc\nabc')
self.clear_sel()
self.add_sel(self.R((1, 0), (1, 0)))
self.view.run_command('ex_copy', {'command_line': '.,.+1copy2'})
expected = 'abc\nxxx\nxxx\nxxx\nxxx\nabc\nabc'
actual = self.view.substr(self.R(0, self.view.size()))
self.assertEqual(expected, actual)
class Test_ex_copy_InNormalMode_CaretPosition(ViewTest):
def testCanRepositionCaret(self):
self.write('abc\nxxx\nabc\nabc')
self.clear_sel()
self.add_sel(self.R((1, 0), (1, 0)))
self.view.run_command('ex_copy', {'command_line': 'copy3'})
actual = list(self.view.sel())
expected = [self.R((3, 0), (3, 0))]
self.assertEqual(expected, actual)
class Test_ex_copy_ModeTransition(ViewTest):
def testFromNormalModeToNormalMode(self):
self.write('abc\nxxx\nabc\nabc')
self.clear_sel()
self.add_sel(self.R((1, 0), (1, 0)))
state = State(self.view)
state.enter_normal_mode()
self.view.run_command('vi_enter_normal_mode')
prev_mode = state.mode
self.view.run_command('ex_copy', {'address': '3'})
state = State(self.view)
new_mode = state.mode
self.assertEqual(prev_mode, new_mode, modes.NORMAL)
def testFromVisualModeToNormalMode(self):
self.write('abc\nxxx\nabc\nabc')
self.clear_sel()
self.add_sel(self.R((1, 0), (1, 1)))
state = State(self.view)
state.enter_visual_mode()
prev_mode = state.mode
self.view.run_command('ex_copy', {'command_line': 'copy3'})
state = State(self.view)
new_mode = state.mode
self.assertNotEqual(prev_mode, new_mode)
self.assertEqual(new_mode, modes.NORMAL)
|
<filename>src/openprocurement/tender/simpledefense/tests/tender_blanks.py<gh_stars>1-10
# -*- coding: utf-8 -*-
from copy import deepcopy
from datetime import timedelta
from openprocurement.tender.belowthreshold.tests.base import test_organization, test_criteria
from openprocurement.api.constants import NOT_REQUIRED_ADDITIONAL_CLASSIFICATION_FROM, CPV_ITEMS_CLASS_FROM
from openprocurement.tender.core.models import get_now
def create_tender_invalid(self):
request_path = "/tenders"
response = self.app.post(request_path, "data", status=415)
self.assertEqual(response.status, "415 Unsupported Media Type")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[
{
"description": "Content-Type header should be one of ['application/json']",
"location": "header",
"name": "Content-Type",
}
],
)
response = self.app.post(request_path, "data", content_type="application/json", status=422)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[{"description": "Expecting value: line 1 column 1 (char 0)", "location": "body", "name": "data"}],
)
response = self.app.post_json(request_path, "data", status=422)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"], [{"description": "Data not available", "location": "body", "name": "data"}]
)
response = self.app.post_json(request_path, {"not_data": {}}, status=422)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"], [{"description": "Data not available", "location": "body", "name": "data"}]
)
response = self.app.post_json(request_path, {"data": []}, status=422)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"], [{"description": "Data not available", "location": "body", "name": "data"}]
)
response = self.app.post_json(request_path, {"data": {"procurementMethodType": "invalid_value"}}, status=415)
self.assertEqual(response.status, "415 Unsupported Media Type")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[{"description": "Not implemented", "location": "body", "name": "procurementMethodType"}],
)
response = self.app.post_json(
request_path,
{"data": {"invalid_field": "invalid_value", "procurementMethodType": "simple.defense"}},
status=422,
)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"], [{"description": "Rogue field", "location": "body", "name": "invalid_field"}]
)
response = self.app.post_json(
request_path,
{"data": {"value": "invalid_value", "procurementMethodType": "simple.defense"}},
status=422,
)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[
{
"description": ["Please use a mapping for this field or Value instance instead of str."],
"location": "body",
"name": "value",
}
],
)
response = self.app.post_json(
request_path,
{"data": {"procurementMethod": "invalid_value", "procurementMethodType": "simple.defense"}},
status=422,
)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertIn(
{
"description": ["Value must be one of ['open', 'selective', 'limited']."],
"location": "body",
"name": "procurementMethod",
},
response.json["errors"],
)
self.assertIn(
{"description": ["This field is required."], "location": "body", "name": "tenderPeriod"},
response.json["errors"],
)
self.assertIn(
{"description": ["This field is required."], "location": "body", "name": "minimalStep"},
response.json["errors"],
)
self.assertIn(
{"description": ["This field is required."], "location": "body", "name": "items"}, response.json["errors"]
)
# self.assertIn({u'description': [u'This field is required.'], u'location': u'body', u'name': u'enquiryPeriod'}, response.json['errors'])
self.assertIn(
{"description": ["This field is required."], "location": "body", "name": "value"}, response.json["errors"]
)
self.assertIn(
{"description": ["This field is required."], "location": "body", "name": "items"}, response.json["errors"]
)
response = self.app.post_json(
request_path,
{"data": {"enquiryPeriod": {"endDate": "invalid_value"}, "procurementMethodType": "simple.defense"}},
status=422,
)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[
{
"description": {"endDate": ["Could not parse invalid_value. Should be ISO8601."]},
"location": "body",
"name": "enquiryPeriod",
}
],
)
response = self.app.post_json(
request_path,
{
"data": {
"enquiryPeriod": {"endDate": "9999-12-31T23:59:59.999999"},
"procurementMethodType": "simple.defense",
}
},
status=422,
)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[{"description": {"endDate": ["date value out of range"]}, "location": "body", "name": "enquiryPeriod"}],
)
data = self.initial_data["tenderPeriod"]
self.initial_data["tenderPeriod"] = {"startDate": "2014-10-31T00:00:00", "endDate": "2014-10-01T00:00:00"}
response = self.app.post_json(request_path, {"data": self.initial_data}, status=422)
self.initial_data["tenderPeriod"] = data
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[
{
"description": {"startDate": ["period should begin before its end"]},
"location": "body",
"name": "tenderPeriod",
}
],
)
# data = self.initial_data['tenderPeriod']
# self.initial_data['tenderPeriod'] = {'startDate': '2014-10-31T00:00:00', 'endDate': '2015-10-01T00:00:00'}
# response = self.app.post_json(request_path, {'data': self.initial_data}, status=422)
# self.initial_data['tenderPeriod'] = data
# self.assertEqual(response.status, '422 Unprocessable Entity')
# self.assertEqual(response.content_type, 'application/json')
# self.assertEqual(response.json['status'], 'error')
# self.assertEqual(response.json['errors'], [
# {u'description': [u'period should begin after enquiryPeriod'], u'location': u'body', u'name': u'tenderPeriod'}
# ])
now = get_now()
self.initial_data["awardPeriod"] = {"startDate": now.isoformat(), "endDate": now.isoformat()}
response = self.app.post_json(request_path, {"data": self.initial_data}, status=422)
del self.initial_data["awardPeriod"]
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[{"description": ["period should begin after tenderPeriod"], "location": "body", "name": "awardPeriod"}],
)
self.initial_data["auctionPeriod"] = {
"startDate": (now + timedelta(days=16)).isoformat(),
"endDate": (now + timedelta(days=16)).isoformat(),
}
self.initial_data["awardPeriod"] = {
"startDate": (now + timedelta(days=15)).isoformat(),
"endDate": (now + timedelta(days=15)).isoformat(),
}
response = self.app.post_json(request_path, {"data": self.initial_data}, status=422)
del self.initial_data["auctionPeriod"]
del self.initial_data["awardPeriod"]
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[{"description": ["period should begin after auctionPeriod"], "location": "body", "name": "awardPeriod"}],
)
data = self.initial_data["minimalStep"]
self.initial_data["minimalStep"] = {"amount": "1000.0"}
response = self.app.post_json(request_path, {"data": self.initial_data}, status=422)
self.initial_data["minimalStep"] = data
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[
{
"description": ["value should be less than value of tender"],
"location": "body",
"name": "minimalStep",
}
],
)
data = self.initial_data["minimalStep"]
self.initial_data["minimalStep"] = {"amount": "100.0", "valueAddedTaxIncluded": False}
response = self.app.post_json(request_path, {"data": self.initial_data}, status=422)
self.initial_data["minimalStep"] = data
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[
{
"description": [
"valueAddedTaxIncluded should be identical to valueAddedTaxIncluded of value of tender"
],
"location": "body",
"name": "minimalStep",
}
],
)
data = self.initial_data["minimalStep"]
self.initial_data["minimalStep"] = {"amount": "100.0", "currency": "USD"}
response = self.app.post_json(request_path, {"data": self.initial_data}, status=422)
self.initial_data["minimalStep"] = data
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[
{
"description": ["currency should be identical to currency of value of tender"],
"location": "body",
"name": "minimalStep",
}
],
)
data = self.initial_data["items"][0].pop("additionalClassifications")
if get_now() > CPV_ITEMS_CLASS_FROM:
cpv_code = self.initial_data["items"][0]["classification"]["id"]
self.initial_data["items"][0]["classification"]["id"] = "99999999-9"
status = 422 if get_now() < NOT_REQUIRED_ADDITIONAL_CLASSIFICATION_FROM else 201
response = self.app.post_json(request_path, {"data": self.initial_data}, status=status)
self.initial_data["items"][0]["additionalClassifications"] = data
if get_now() > CPV_ITEMS_CLASS_FROM:
self.initial_data["items"][0]["classification"]["id"] = cpv_code
if status == 201:
self.assertEqual(response.status, "201 Created")
self.assertEqual(response.content_type, "application/json")
else:
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[
{
"description": [{"additionalClassifications": ["This field is required."]}],
"location": "body",
"name": "items",
}
],
)
data = self.initial_data["items"][0]["additionalClassifications"][0]["scheme"]
self.initial_data["items"][0]["additionalClassifications"][0]["scheme"] = "Не ДКПП"
if get_now() > CPV_ITEMS_CLASS_FROM:
cpv_code = self.initial_data["items"][0]["classification"]["id"]
self.initial_data["items"][0]["classification"]["id"] = "99999999-9"
response = self.app.post_json(request_path, {"data": self.initial_data}, status=422)
self.initial_data["items"][0]["additionalClassifications"][0]["scheme"] = data
if get_now() > CPV_ITEMS_CLASS_FROM:
self.initial_data["items"][0]["classification"]["id"] = cpv_code
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
if get_now() > CPV_ITEMS_CLASS_FROM:
self.assertEqual(
response.json["errors"],
[
{
"description": [
{
"additionalClassifications": [
"One of additional classifications should be one of [ДК003, ДК015, ДК018, specialNorms]."
]
}
],
"location": "body",
"name": "items",
}
],
)
else:
self.assertEqual(
response.json["errors"],
[
{
"description": [
{
"additionalClassifications": [
"One of additional classifications should be one of [ДКПП, NONE, ДК003, ДК015, ДК018]."
]
}
],
"location": "body",
"name": "items",
}
],
)
data = test_organization["contactPoint"]["telephone"]
del self.initial_data["procuringEntity"]["contactPoint"]["telephone"]
response = self.app.post_json(request_path, {"data": self.initial_data}, status=422)
self.initial_data["procuringEntity"]["contactPoint"]["telephone"] = data
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[
{
"description": {"contactPoint": {"email": ["telephone or email should be present"]}},
"location": "body",
"name": "procuringEntity",
}
],
)
correct_phone = self.initial_data["procuringEntity"]["contactPoint"]["telephone"]
self.initial_data["procuringEntity"]["contactPoint"]["telephone"] = "++223"
response = self.app.post_json(request_path, {"data": self.initial_data}, status=422)
self.initial_data["procuringEntity"]["contactPoint"]["telephone"] = correct_phone
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[
{
u'description': {u'contactPoint': {u'telephone': [u'wrong telephone format (could be missed +)']}},
u'location': u'body',
u'name': u'procuringEntity'
}
]
)
data = self.initial_data["items"][0].copy()
classification = data["classification"].copy()
classification["id"] = "19212310-1"
data["classification"] = classification
self.initial_data["items"] = [self.initial_data["items"][0], data]
response = self.app.post_json(request_path, {"data": self.initial_data}, status=422)
self.initial_data["items"] = self.initial_data["items"][:1]
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[{"description": ["CPV group of items be identical"], "location": "body", "name": "items"}],
)
data = deepcopy(self.initial_data)
del data["items"][0]["deliveryDate"]
response = self.app.post_json(request_path, {"data": data}, status=422)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[{"description": [{"deliveryDate": ["This field is required."]}], "location": "body", "name": "items"}],
)
|
<reponame>slamavl/quantarhei
# -*- coding: utf-8 -*-
"""
*******************************************************************************
MODIFIED REDFIELD RATE MATRIX
*******************************************************************************
"""
import numpy
from scipy import integrate
import scipy
from quantarhei.core.implementations import implementation
from quantarhei.core.units import cm2int
from quantarhei.core.units import kB_intK
from quantarhei.qm.hilbertspace.hamiltonian import Hamiltonian
from quantarhei.qm.liouvillespace.systembathinteraction import SystemBathInteraction
import quantarhei as qr
import itertools as it
class ModifiedRedfieldRateMatrix:
"""Modifield Redfield relaxation rate matrix
Modified Redfield population relaxation rate matrix is calculated from the
Hamiltonian and system-system bath interation. The bath
correlation functions are Fourier transformed by FFT and the negative
frequency part is calculated from the expected thermodynamic
symmetry.
Parameters
----------
ham : Hamiltonian
Hamiltonian object
sbi : SystemBathInteraction
SystemBathInteraction object
initialize : bool (default True)
If true, the rates will be calculated when the object is created
cutoff_time : float
If cutoff time is specified, the tensor is integrated only up to the
cutoff time
"""
def __init__(self, ham, sbi, time, initialize=True, cutoff_time=None):
if not isinstance(ham,Hamiltonian):
raise Exception("First argument must be a Hamiltonian")
if not isinstance(sbi,SystemBathInteraction):
raise Exception
self._is_initialized = False
self._has_cutoff_time = False
if cutoff_time is not None:
self.cutoff_time = cutoff_time
self._has_cutoff_time = True
self.ham = ham
self.sbi = sbi
self.tt = time.data
if initialize:
self._set_rates2()
self._is_initialized = True
def _set_rates2(self,force_detbalance=True):
"""
"""
Na = self.ham.dim
Nc = self.sbi.N
tt = self.tt
# Eigen problem
hD,SS = numpy.linalg.eigh(self.ham._data) #hD=eigenvalues, SS=eigenvectors
Nt = self.sbi.CC.timeAxis.length
dt = self.sbi.CC.timeAxis.step
time = self.sbi.CC.timeAxis
time_full = numpy.zeros(Nt*2)
time_full[time._length:] = tt.copy()
time_full[1:time._length] = -tt[:0:-1]
time_full[0] = time_full[1] - dt
# fill the corr. matrix
Cmat = numpy.zeros((Na,Nt),dtype=numpy.complex128)
lambdas = numpy.zeros(Na,dtype=numpy.float64)
for ii in range(Na):
Cmat[ii,:] = self.sbi.get_coft(ii,ii)
for ii in range(Na-1): # assume single excitation per monomer - not correct one should replace it with: aggreg.Nb[1]
lambdas[ii+1] = self.sbi.get_reorganization_energy(ii)
# Rate matrix init
rates = numpy.zeros((Na,Na),dtype=numpy.float64)
for M in range(Na):
if force_detbalance:
minL = M+1
else:
minL=0
for L in range(minL,Na):
if M==L:
continue
# Here we assume noncorrelated sites (diagonal correlation function matrix)
# prepare individual quantities
cMMMM = numpy.dot(SS[:,M]**4,Cmat)
cLLLL = numpy.dot(SS[:,L]**4,Cmat)
cMLLL = numpy.dot((SS[:,L]**3)*SS[:,M],Cmat)
cMMML = numpy.dot((SS[:,M]**3)*SS[:,L],Cmat)
cMMLL = numpy.dot((SS[:,M]**2)*(SS[:,L]**2),Cmat)
gMMMM = _c2g(time,cMMMM)
gLLLL = _c2g(time,cLLLL)
gMMLL = _c2g(time,cMMLL)
hMLLL = _c2h(time,cMLLL)
hMMML = _c2h(time,cMMML)
lambdaLLLL = numpy.dot(SS[:,L]**4,lambdas)
lambdaMMLL = numpy.dot((SS[:,M]**2)*(SS[:,L]**2),lambdas)
lambdaMLLL = numpy.dot(SS[:,M]*(SS[:,L]**3),lambdas)
omML = hD[L]-hD[M]
# Compute rates (time dependent function)
om = omML-2*lambdaLLLL+2*lambdaMMLL
ft_full = numpy.zeros(Nt*2,dtype=numpy.complex128)
ft = numpy.exp(- gLLLL - gMMMM + 2*gMMLL)
ft *= (cMMLL - (hMLLL - hMMML + 2*1j*lambdaMLLL)**2 )
# Replace the time integration over very oscillating function by the fourier transform
# Prepare the negative part of the function - because we want the real part of the positive
# part of the function the negative has to be the complex conjugate and we get exactly what
# we need\
ft_full[Nt:] = ft.copy()
ft_full[1:Nt] = numpy.conj(ft[:0:-1])
# Perform the fourier transform
fw_full = 2*Nt*numpy.fft.fftshift(numpy.fft.ifft(numpy.fft.fftshift(ft_full)))*dt
fw_full = numpy.real(fw_full)
faxis = time.get_FrequencyAxis()
# Define the result as DFunction to enable the interpolation
Fw = qr.DFunction(x=faxis,y=fw_full)
rates[M,L] = Fw.at(om)
if force_detbalance:
T = self.sbi.get_temperature()
for M in range(Na):
for L in range(M+1,Na):
if M==L:
continue
om00 = hD[M]-hD[L] + numpy.dot(SS[:,L]**4,lambdas) - numpy.dot(SS[:,M]**4,lambdas)
rates[L,M] = rates[M,L] * numpy.exp(om00/kB_intK/T)
# Compute the diagonal elements
for L in range(Na):
rates[L,L] = -numpy.sum(rates[:,L])
self.rates = rates
def _set_rates(self):
"""
"""
Na = self.ham.dim
Nc = self.sbi.N
tt = self.tt
# Eigen problem
hD,SS = numpy.linalg.eigh(self.ham._data) #hD=eigenvalues, SS=eigenvectors
Nt = self.sbi.CC.timeAxis.length
lam4 = numpy.zeros((Na-1,Na-1,Na-1,Na-1),dtype=qr.REAL)
#lam4 = numpy.zeros((Na,Na,Na,Na),dtype=qr.REAL)
for a in range(Na-1):
#for a in range(1,Na):
for b in range(Na-1):
#for b in range(1,Na):
for c in range(Na-1):
#for c in range(1,Na):
for d in range(Na-1):
#for d in range(1,Na):
lam4[a,b,c,d] = self.sbi.CC.get_reorganization_energy4(a,b,c,d)
self.sbi.CC.create_double_integral() #g(t)
self.sbi.CC.create_one_integral() #g_dot(t)
self.sbi.CC.transform(SS)
#g4_1value = self.sbi.CC.get_goft4(1,2,3,4)
g4 = self.sbi.CC.get_goft_matrix() #g_{abcd}(t), dimensions (Na, Na, Na, Na, Nt-1)
h4 = self.sbi.CC.get_hoft_matrix() #g_dot_{abcd}(t), dimensions (Na, Na, Na, Na, Nt-1)
c4 = self.sbi.CC.get_coft_matrix() #g_dotdot_{abcd}(t) = C(t) in exciton basis, dimensions (Na, Na, Na, Na, Nt-1)
rates = ssModifiedRedfieldRateMatrix(Na, Nc, Nt, hD, lam4, g4, h4, c4, tt)
self.rates = rates
def ssModifiedRedfieldRateMatrix(Na, Nc, Nt, hD, lam4, g4, h4, c4, tt): #, Ee, SS, prt, gg, hh, cf, tt, ls,
#rtol, werror, RR):
"""Standard redfield rates
Parameters
----------
Na : integer
Rank of the rate matrix, number of excitons
Nc : integer
Number of components of the interaction Hamiltonian (number of sites
or number of distinct correlation functions)
Ee : float array
Eigen energies of the Hamiltonian
SS : float array
Transformation matrix
(components of the interaction operator in the exciton basis)
prt : integer array
Pointer between site index and correlation function index
gg : float array
Line shape functions
hh : float array
derivative of the line shape function
cf : float array
second derivatives of the line shape functions (correlation functions)
tt : float array
values of time
ls : float array
reorganization energies of the sites
RR : real array
Relaxation rate matrix (to be calculated and returned)
rtol : float array
tolerances
werror : integer array
warnings and errors
"""
print("***")
E_0k = numpy.zeros(Na-1,dtype=numpy.float)
#E_0k = numpy.zeros(Na,dtype=numpy.float)
for ii in range(Na-1):
#for ii in range(1,Na):
E_0k[ii] = hD[ii] - lam4[ii,ii,ii,ii]
F_k_t = numpy.zeros((Na-1,Nt-1),dtype=numpy.complex)
A_k_t = numpy.zeros((Na-1,Nt-1),dtype=numpy.complex)
N_kl_t = numpy.zeros((Na-1,Na-1,Nt-1),dtype=numpy.complex)
#F_k_t = numpy.zeros((Na,Nt-1),dtype=numpy.complex)
#A_k_t = numpy.zeros((Na,Nt-1),dtype=numpy.complex)
#N_kl_t = numpy.zeros((Na,Na,Nt-1),dtype=numpy.complex)
for a in range(Na-1):
#for a in range(1,Na):
for ti in range(Nt-1):
F_k_t[a,ti] = numpy.exp(-1j*(E_0k[a] - lam4[a,a,a,a])*tt[ti] - numpy.conjugate(g4[a,a,a,a,ti]))
A_k_t[a,ti] = numpy.exp(-1j*(E_0k[a] + lam4[a,a,a,a])*tt[ti] - g4[a,a,a,a,ti])
for a in range(Na-1):
#for a in range(1,Na):
for b in range(Na-1):
#for b in range(1,Na):
for ti in range(Nt-1):
N_kl_t[a,b,ti] = (c4[b,a,a,b,ti] - (h4[b,a,a,a,ti] - h4[b,a,b,b,ti] - 2j*lam4[b,a,b,b])*(h4[a,b,a,a,ti] - h4[a,b,b,b,ti] - 2j*lam4[a,b,b,b]))*numpy.exp(2*(g4[a,a,b,b,ti] + 1j*lam4[a,a,b,b]*tt[ti]))
f = numpy.zeros((Na-1,Na-1,Nt),dtype=numpy.complex)
RR = numpy.zeros((Na-1,Na-1),dtype=numpy.complex)
#f = numpy.zeros((Na,Na,Nt),dtype=numpy.complex)
#RR = numpy.zeros((Na,Na),dtype=numpy.complex)
for a in range(Na-1):
#for a in range(1,Na):
for b in range(Na-1):
#for b in range(1,Na):
for ti in range(Nt-1):
f[a,b,ti] = numpy.conjugate(F_k_t[b,ti])*A_k_t[a,ti]*N_kl_t[a,b,ti]
RR[a,b] = 2*numpy.real(integrate.simps(f[a,b,:],tt))
for a in range(Na-1):
#for a in range(1,Na):
RR[a,a] = 0
RR_bbaa = -numpy.sum(RR, axis = 0)
print('RR_bbaa is:')
print(RR_bbaa)
#RR = numpy.diag(numpy.diag(RR_bbaa)) + RR
RR = numpy.diag(RR_bbaa) + RR
print('diag RR_bbaa is:')
print(numpy.diag(RR_bbaa))
print('RR is:')
print(RR)
print("I am called from outside")
return RR
qr.stop()
def _c2g(timeaxis,coft):
""" Converts correlation function to lineshape function
Explicit numerical double integration of the correlation
function to form a lineshape function.
Parameters
----------
timeaxis : cu.oqs.time.TimeAxis
TimeAxis of the correlation function
coft : complex numpy array
Values of correlation function given at points specified
in the TimeAxis object
"""
ta = timeaxis
rr = numpy.real(coft)
ri = numpy.imag(coft)
sr = scipy.interpolate.UnivariateSpline(ta.data,
rr,s=0).antiderivative()(ta.data)
sr = scipy.interpolate.UnivariateSpline(ta.data,
sr,s=0).antiderivative()(ta.data)
si = scipy.interpolate.UnivariateSpline(ta.data,
ri,s=0).antiderivative()(ta.data)
si = scipy.interpolate.UnivariateSpline(ta.data,
si,s=0).antiderivative()(ta.data)
gt = sr + 1j*si
return gt
def _c2h(timeaxis,coft):
""" Converts correlation function to derivative of lineshape function
Explicit numerical single integration of the correlation
function to form a time redivative of lineshape function.
Parameters
----------
timeaxis : cu.oqs.time.TimeAxis
TimeAxis of the correlation function
coft : complex numpy array
Values of correlation function given at points specified
in the TimeAxis object
"""
ta = timeaxis
rr = numpy.real(coft)
ri = numpy.imag(coft)
sr = scipy.interpolate.UnivariateSpline(ta.data,
rr,s=0).antiderivative()(ta.data)
si = scipy.interpolate.UnivariateSpline(ta.data,
ri,s=0).antiderivative()(ta.data)
ht = sr + 1j*si
return ht
|
from flask import Flask, render_template, request, session, redirect, Markup, jsonify
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
import requests
from xml.etree import ElementTree
app = Flask(__name__ ,template_folder='template')
app.secret_key = 'key'
app.config["SESSION_PERMANENT"] = False
app.config["SESSION_TYPE"] = "filesystem"
engine = create_engine(
'postgres://usetpxuboatswg:14291d4b50681090072617602611ea12822484a1e39cde28e9f046fc7ff50a85@ec2-54-235-86-226.'
'compute-1.amazonaws.com:5432/d5mk3psi9hs4nb')
db = scoped_session(sessionmaker(bind=engine))
@app.route('/', methods=['GET', 'POST'])
def index():
if session.get('username') is None:
return redirect('/login')
if request.method == 'GET':
return render_template('index.html', navbar=True)
else:
query = request.form.get('query').lower()
query_like = '%' + query + '%'
books = db.execute('SELECT * FROM books WHERE (LOWER(isbn) LIKE :query) OR (LOWER(title) LIKE :query) '
'OR (LOWER(author) LIKE :query)',
{'query': query_like}).fetchall()
if not books:
return render_template('error.html', message='No Books were Found!', navbar=True)
return render_template('result.html', query=query, books=books, navbar=True)
@app.route('/login', methods=['GET', 'POST'])
def login():
session.clear()
if request.method == 'POST':
username = request.form.get('username')
password = request.form.get('password')
user = db.execute('SELECT * FROM users WHERE (username=:username AND password=:password)',
{'username': username, 'password': password}).fetchone()
if user is None:
return render_template('error.html', message='Entered credentials not valid!')
session["username"] = username
return redirect('/')
else:
return render_template('login.html', navbar=False)
@app.route('/logout')
def logout():
session.clear()
return redirect('/')
@app.route('/signup', methods=['GET', 'POST'])
def signup():
session.clear()
if request.method == 'POST':
username = request.form.get('username')
password = request.form.get('password')
retype_password = request.form.get('retype_password')
# check if passwords are the same
if not password == retype_password:
return render_template('error.html', message='Passwords do not match')
# check if user is available
avail = db.execute('SELECT username FROM users WHERE username=:username',
{'username': username}).fetchone()
if avail:
return render_template('error.html', message='Username Already Exists')
# Write username and password to database
db.execute('INSERT INTO users(username, password) VALUES(:username, :password)',
{'username': username, 'password': password})
db.commit()
session['username'] = username
return redirect('/')
else:
return render_template('signup.html', navbar=False)
@app.route('/books/<isbn>')
def book(isbn):
book = db.execute('SELECT * FROM books WHERE isbn=:isbn',
{'isbn': isbn}).fetchone()
if book is None:
return render_template('error.html', message='This book is not available', navbar=True)
url = "https://www.goodreads.com/book/isbn/{}?key=JKfZcTyK1lzaCpB58Tpr8g".format(isbn)
res = requests.get(url)
tree = ElementTree.fromstring(res.content)
try:
description = tree[1][16].text
image_url = tree[1][8].text
review_count = tree[1][17][3].text
avg_score = tree[1][18].text
link = tree[1][24].text
except IndexError:
return render_template('book.html', book=book, link=None, navbar=True)
description_markup = Markup(description)
return render_template('book.html', book=book, link=link, description=description_markup,
image_url=image_url, review_count=review_count, avg_score=avg_score, navbar=True)
@app.route('/api/<isbn>')
def book_api(isbn):
book = db.execute('SELECT * FROM books WHERE isbn=:isbn',
{'isbn': isbn}).fetchone()
if book is None:
api = jsonify({'error': 'This book is not available'})
return api
url = "https://www.goodreads.com/book/isbn/{}?key=JKfZcTyK1lzaCpB58Tpr8g".format(isbn)
res = requests.get(url)
tree = ElementTree.fromstring(res.content)
try:
description = tree[1][16].text
image_url = tree[1][8].text
review_count = tree[1][17][3].text
avg_score = tree[1][18].text
link = tree[1][24].text
except IndexError:
api = jsonify({
'title': book.title,
'author': book.author,
'year': book.year,
'isbn': book.isbn,
'link': '',
'description': '',
'book_cover': '',
'review_count': '',
'average_rating': ''
})
return api
api = jsonify({
'title': book.title,
'author': book.author,
'year': book.year,
'isbn': book.isbn,
'link': link,
'description': description,
'book_cover': image_url,
'review_count': review_count,
'average_rating': avg_score
})
return api
@app.route('/review', methods=['GET', 'POST'])
def review():
if request.method == 'POST':
isbn = request.form.get('isbn')
review = request.form.get('review')
username = session['username']
book = db.execute('SELECT * FROM books WHERE isbn=:isbn ',
{'isbn': isbn}).fetchone()
if book is None:
return render_template('error.html', message='Book ISBN Invalid', navbar=True)
db.execute('INSERT INTO reviews(title, isbn, review, user_name) VALUES(:title, :isbn, :review, :username)',
{'title': book.title, 'isbn': isbn, 'review': review, 'username': username})
db.commit()
return render_template('success.html', message='Review Successfully Submitted', navbar=True)
else:
return render_template('review.html', navbar=True) |
# pylint: disable=no-self-use,invalid-name
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data.fields import TextField, ListField, SequenceLabelField, LabelField, SpanField
from allennlp.data.tokenizers.token import Token
import pytest
from propara.propara.data import ProStructDatasetReader, _find_span
FILENAME = "../fixtures/state_changes/grids.small.tsv"
class TestProParaDatasetReader(AllenNlpTestCase):
def test_find_span(self):
sentence = [Token("My"), Token("car"), Token("is"), Token("-"), Token("grey"), Token("?")]
# Single token
assert _find_span([Token("car")], sentence) == (1, 1)
# Multi token
assert _find_span([Token("My"), Token("car")], sentence) == (0, 1)
# Case insensitive
assert _find_span([Token("my"), Token("car")], sentence) == (0, 1)
# Not in sentence
assert _find_span([Token("my"), Token("truck")], sentence) == (-1, -1)
# Unknown
assert _find_span([Token("?")], sentence) == (-2, -2)
# Absent
assert _find_span([Token("-")], sentence) == (-3, -3)
def test_read_from_file(self):
reader = ProStructDatasetReader()
instances = list(reader.read(FILENAME))
instance = instances[0]
fields = instance.fields
# 4 SID PARTICIPANTS plants bacteria sediment oil
# 4 PROMPT: How does oil form? -===== -===== -===== -=====
# 4 state1 ? ? ? -
# 4 event1 Plants die.
# 4 state2 ? ? ? -
# 4 event2 They are buried in sediment.
# 4 state3 sediment ? ? -
# 4 event3 Bacteria is buried in the sediment.
# 4 state4 sediment sediment ? -
# 4 event4 Large amounts of sediment gradually pile on top of the original sediment.
# 4 state5 sediment sediment ? -
# 4 event5 Pressure builds up.
# 4 state6 sediment sediment ? -
# 4 event6 Heat increases.
# 4 state7 sediment sediment ? -
# 4 event7 The chemical structure of the buried sediment and plants changes.
# 4 state8 sediment sediment ? -
# 4 event8 The sediment and plants are at least one mile underground.
# 4 state9 one mile underground sediment underground -
# 4 event9 The buried area is extremely hot.
# 4 state10 one mile underground sediment underground -
# 4 event10 More chemical changes happen eand the buried material becomes oil.
# 4 state11 - - underground underground
participants = fields["participants"]
assert isinstance(participants, ListField)
pfl = participants.field_list
assert len(pfl) == 4
assert all(isinstance(field, TextField) for field in pfl)
assert all(len(field.tokens) == 1 for field in pfl)
assert {field.tokens[0].text for field in pfl} == {'plants', 'bacteria', 'sediment', 'oil'}
participant_strings = fields["participant_strings"].metadata
assert participant_strings == ['plants', 'bacteria', 'sediment', 'oil']
sentences = fields["sentences"]
assert isinstance(sentences, ListField)
sfl = sentences.field_list
assert len(sfl) == 10
assert all(isinstance(field, TextField) for field in sfl)
sentence = sfl[0].tokens
assert [token.text for token in sentence] == ["Plants", "die", "."]
verbs = fields["verbs"]
assert isinstance(verbs, ListField)
vfl = verbs.field_list
assert len(vfl) == 10
assert all(isinstance(field, SequenceLabelField) for field in vfl)
# second word is the verb
assert vfl[0].labels == [0, 1, 0]
actions = fields["actions"]
assert isinstance(actions, ListField)
afl = actions.field_list
assert len(afl) == 4 # one per participant
assert all(isinstance(af, ListField) for af in afl)
assert all(len(af.field_list) == 10 for af in afl)
af0 = afl[0]
assert all(isinstance(action, LabelField) for action in af0.field_list)
assert [action.label for action in af0.field_list] == [0, 3, 0, 0, 0, 0, 0, 3, 0, 2]
starts = fields["before_locations"]
assert isinstance(starts, ListField)
sfl = starts.field_list
assert len(sfl) == 4 # one per participant
assert all(isinstance(sf, ListField) for sf in sfl)
assert all(len(sf.field_list) == 10 for sf in sfl)
sf0 = sfl[0]
assert all(isinstance(span, SpanField) for span in sf0.field_list)
assert ([(span.span_start, span.span_end) for span in sf0.field_list] ==
[(-2, -2), (-2, -2), (5, 5), (3, 3), (-1, -1), (-1, -1), (6, 6), (1, 1), (-1, -1), (-1, -1)])
ends = fields["after_locations"]
assert isinstance(ends, ListField)
efl = ends.field_list
assert len(efl) == 4 # one per participant
assert all(isinstance(ef, ListField) for ef in efl)
assert all(len(ef.field_list) == 10 for ef in efl)
ef0 = efl[0]
assert all(isinstance(span, SpanField) for span in ef0.field_list)
assert ([(span.span_start, span.span_end) for span in ef0.field_list] ==
[(-2, -2), (4, 4), (5, 5), (3, 3), (-1, -1), (-1, -1), (6, 6), (7, 9), (-1, -1), (-3, -3)])
participant_indicators = fields["participant_indicators"]
# should be (num_participants, num_sentences, num_words)
pifl = participant_indicators.field_list
assert len(pifl) == 4 # one per participant
assert all(isinstance(pif, ListField) for pif in pifl)
assert all(len(pif.field_list) == 10 for pif in pifl) # 10 sentences
pif0 = pifl[0].field_list
assert all(isinstance(pif, SequenceLabelField) for pif in pif0)
# plants -> Plants die.
assert pif0[0].labels == [1, 0, 0]
# plants -> They are buried in sediment.
assert pif0[1].labels == [0, 0, 0, 0, 0, 0]
# plants -> The sediment and plants are at least one mile underground.
assert pif0[7].labels == [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0]
# Paragraph indicators
paragraph = fields["paragraph"]
num_tokens = len(paragraph.tokens)
assert num_tokens == sum(len(sf.tokens) for sf in sentences.field_list)
# Paragraph verb indicators
paragraph_verb_labels = fields["paragraph_verbs"].labels
assert paragraph_verb_labels == [label for sentence_verbs in vfl for label in sentence_verbs.labels]
# Paragraph participant indicators
ppi = fields["paragraph_participant_indicators"]
assert len(ppi.field_list) == len(participants.field_list)
for i, participant_i_indicator in enumerate(ppi.field_list):
joined_labels = [label for pif in pifl[i].field_list for label in pif.labels]
assert joined_labels == participant_i_indicator.labels
# Paragraph sentence indicators
psi = fields["paragraph_sentence_indicators"]
assert len(psi.field_list) == len(sentences.field_list)
length0 = len(sentences.field_list[0].tokens)
length1 = len(sentences.field_list[1].tokens)
length2 = len(sentences.field_list[2].tokens)
psi2 = psi.field_list[2]
for i, label in enumerate(psi2.labels):
if i < length0 + length1:
assert label == 0
elif i < length0 + length1 + length2:
assert label == 1
else:
assert label == 2
|
<reponame>LSaffin/scripts
"""Cross sections of theta_e at different points along a front
"""
from string import ascii_uppercase
import numpy as np
import matplotlib.pyplot as plt
import iris.plot as iplt
from irise import convert, grid
from irise.plot.util import multilabel, add_map
from myscripts.models.um import case_studies
from myscripts.projects.thesis.fronts import plotdir
from myscripts.projects.thesis.bl.low_level_cancellation_cs import cs_cube
# Contour levels to plot variables
levels = ('air_pressure', [90000])
plevs = np.linspace(950, 1050, 11)
rh_levs = [0.8]
# Points for IOP5
points = [(-8, 8, 9, 7), (-10, 6, 4, 2), (-12, 4, -1, -3)]
filename = plotdir + 'iop5_24h_front_cs.pdf'
theta_levs = np.linspace(280, 320, 9)
# Points for IOP8
#points = [(-6, 2, 10, 2), (-6, 2, 2, 10), (-12, -6, 5, -5)]
#filename = plotdir + 'iop8_24h_front_cs.pdf'
#theta_levs = np.linspace(275, 315, 9)
nrows, ncols = 2, 2
def main(cubes, **kwargs):
# Pre-calculate parameters on every plot
mslp = convert.calc('air_pressure_at_sea_level', cubes)
mslp.convert_units('hPa')
theta = convert.calc('equivalent_potential_temperature', cubes,
levels=levels)[0]
rh = convert.calc('relative_humidity', cubes)
fig = plt.figure(figsize=(18, 15))
lon, lat = grid.true_coords(theta)
lonmask = np.logical_or(lon < -15, lon > 5)
latmask = np.logical_or(lat < 47.5, lat > 62.5)
areamask = np.logical_or(lonmask, latmask)
# Plot overview
ax1 = plt.subplot2grid((2, 2), (0, 0))
iplt.contourf(theta, theta_levs, cmap='coolwarm', extend='both')
add_map()
cs = iplt.contour(mslp, plevs, colors='k', linewidths=2)
plt.clabel(cs, fmt='%1.0f')
mask = theta.copy(data=areamask.astype(int))
iplt.contour(mask, [0.5], colors='k', linestyles='--')
count = 0
for n, (xs, xf, ys, yf) in enumerate(points, start=1):
# Label the cross section points
plt.plot([xs, xf], [ys, yf], '-kx')
plt.text(xs, ys, ascii_uppercase[count], color='k', fontsize=20)
count += 1
plt.text(xf, yf, ascii_uppercase[count], color='k', fontsize=20)
count += 1
multilabel(plt.gca(), 0)
theta = convert.calc('equivalent_potential_temperature', cubes)
# Plot cross sections
titles = ['AB', 'CD', 'EF']
coords = ['grid_longitude', 'altitude']
for n, (xs, xf, ys, yf) in enumerate(points, start=1):
row = n / ncols
col = n - row * ncols
ax = plt.subplot2grid((nrows, ncols), (row, col))
theta_cs = cs_cube(theta, xs, xf, ys, yf)
rh_cs = cs_cube(rh, xs, xf, ys, yf)
im = iplt.contourf(theta_cs, theta_levs, coords=coords,
cmap='coolwarm', extend='both')
iplt.contour(rh_cs, rh_levs, coords=coords, colors='w')
iplt.contourf(rh_cs, [0.8, 2], coords=coords,
colors='None', hatches=['.'])
ax.set_ylim(0, 7)
if xs > xf:
ax.invert_xaxis()
multilabel(ax, n, xreversed=True)
else:
multilabel(ax, n)
ax.set_title(titles[n - 1])
ax.set_ylabel('Height (km)')
ax.set_xlabel('Grid Longitude')
# Add colorbar at bottom of figure
cbar = plt.colorbar(im, ax=fig.axes, orientation='horizontal',
fraction=0.05, spacing='proportional')
cbar.set_label('PVU')
fig.savefig(filename)
plt.show()
if __name__ == '__main__':
forecast = case_studies.iop5b.copy()
cubes = forecast.set_lead_time(hours=24)
main(cubes, vmin=-10, vmax=10, cmap='coolwarm')
|
<filename>src/kalman/scripts/kfilter/kfilter.py
import math
import numpy as np
from gmap import GlobalMap
from gposition import GlobalPosition
from math import cos, sin
from scipy.linalg import block_diag
from threading import Lock
def scale_angle(angle):
while angle <= -math.pi:
angle = angle + 2*math.pi
while angle > math.pi:
angle = angle - 2*math.pi
return angle
class KalmanFilter():
def __init__(self):
self.mutex = Lock()
self.pos = GlobalPosition()
self.global_map = GlobalMap()
self.s_r_new = None
self.s_r_old = None
self.ds_r = None
self.s_l_new = None
self.s_l_old = None
self.ds_l = None
self.line_segments = None
self.b = 0.230 # 230 mm
self.wheel_r = 0.035 # 35 mm
self.k_r = 1e-10
self.k_l = 1e-10
self.P = 1e-10 * np.ones(shape=(3,3))
self.g = 10
def filter(self):
self.mutex.acquire()
self._set_ds()
pos_pred, P_pred = self._predict_position()
# Any measurement?
if self.line_segments is None or len(self.line_segments) == 0:
self.pos = pos_pred
self.P = P_pred
self.mutex.release()
return self.pos
# Fix the prediction, based on measurements
mes_pred, H = self._predict_measurement(pos_pred)
v, R, H = self._match_prediction_and_measurement(mes_pred, H, P_pred)
self._filter_position(pos_pred, P_pred, H, R, v)
self.mutex.release()
return self.pos
def _predict_position(self):
ds = (self.ds_r + self.ds_l) / 2
dtheta = (self.ds_r - self.ds_l) / self.b
# Position prediction
x_pred = self.pos.x + ds * cos(self.pos.theta + dtheta / 2)
y_pred = self.pos.y + ds * sin(self.pos.theta + dtheta / 2)
theta_pred = scale_angle(self.pos.theta + dtheta)
pos_pred = GlobalPosition(x_pred, y_pred, theta_pred)
# Prediction and error
Q = np.array([
[self.k_r * abs(self.ds_r), 0],
[0, self.k_l * abs(self.ds_l)]
])
F_x = np.array([
[1, 0, -ds * sin(self.pos.theta + dtheta/2)],
[0, 1, ds * cos(self.pos.theta + dtheta/2)],
[0, 0, 1],
])
F_u = np.array([
[(1/2 * cos(self.pos.theta + dtheta/2) +
ds/(2*self.b) * sin(self.pos.theta + dtheta/2)),
(1/2 * cos(self.pos.theta + dtheta/2) -
ds/(2*self.b) * sin(self.pos.theta + dtheta/2))],
[(1/2 * sin(self.pos.theta + dtheta/2) -
ds/(2*self.b) * cos(self.pos.theta + dtheta/2)),
(1/2 * sin(self.pos.theta + dtheta/2) +
ds/(2*self.b) * cos(self.pos.theta + dtheta/2))],
[-1 / self.b, 1 / self.b],
])
P_pred = (
np.matmul(np.matmul(F_x, self.P), F_x.T) +
np.matmul(np.matmul(F_u, Q), F_u.T)
)
return pos_pred, P_pred
def _predict_measurement(self, pos_pred):
measurement_pred = []
H = []
for wall in self.global_map.walls:
alpha_pred = scale_angle(wall.angle - pos_pred.theta)
rho_pred = wall.radius - (
pos_pred.x * cos(wall.angle) + pos_pred.y * sin(wall.angle)
)
measurement_pred.append([alpha_pred, rho_pred])
H.append([[0, 0, -1], [-cos(wall.angle), -sin(wall.angle), 0]])
return measurement_pred, H
def _match_prediction_and_measurement(self, mes_pred, H_pred, P_pred):
if self.line_segments is None:
return None, None, None
v_matched = []
R_matched = []
H_matched = []
for i in range(len(mes_pred)):
m_pred_i = np.array(mes_pred[i])
H_i = np.array(H_pred[i])
for j in range(len(self.line_segments)):
m_real_j = np.array([
self.line_segments[j].angle, self.line_segments[j].radius
])
R_j = np.array([
self.line_segments[j].covariance[0:2],
self.line_segments[j].covariance[2:4]
])
v_ij = m_real_j - m_pred_i
sigma = np.matmul(np.matmul(H_i, P_pred), H_i.T) + R_j
d_ij = np.matmul(np.matmul(v_ij.T, np.linalg.inv(sigma)), v_ij)
if d_ij <= self.g**2:
v_matched.append(v_ij)
R_matched.append(R_j)
H_matched.append(H_i)
return v_matched, R_matched, H_matched
def _filter_position(self, pos_pred, P_pred, H, R, v):
if R is None or len(R) == 0 or len(R) != len(H) or len(R) != len(v):
self.P = P_pred
self.pos.set_position(pos_pred.x, pos_pred.y, pos_pred.theta)
return
# Block diagonal R
R_r = np.array(R[0])
for i in range(1, len(R)):
R_r = block_diag(R_r, R[i])
R = R_r
# Reshape H and v
v = np.reshape(v, (-1, 1))
H_r = []
for i in range(len(H)):
H_r.append(np.transpose(H[i]))
H = np.reshape(H_r, (-1, 3))
# Calculate Kalman gain and fitler position
sigma = np.matmul(np.matmul(H, P_pred), H.T) + R
self.K = np.matmul(np.matmul(P_pred, H.T), np.linalg.inv(sigma))
self.P = np.matmul((np.eye(3) - np.matmul(self.K, H)), P_pred)
pos_inovation = np.matmul(self.K, v)
self.pos.set_position(
x=pos_pred.x + pos_inovation.T[0][0],
y=pos_pred.y + pos_inovation.T[0][1],
theta=scale_angle(pos_pred.theta + pos_inovation.T[0][2])
)
def _set_ds(self):
self.ds_r = (
0.0 if self.s_r_old is None or self.s_r_new is None
else self.s_r_new - self.s_r_old
)
self.s_r_old = self.s_r_new
self.ds_l = (
0.0 if self.s_l_old is None or self.s_l_new is None
else self.s_l_new - self.s_l_old
)
self.s_l_old = self.s_l_new
def save_joint_states(self, joint_states):
self.mutex.acquire()
for i in range(len(joint_states.name)):
if joint_states.name[i] == 'wheel_right_joint':
self.s_r_new = joint_states.position[i] * self.wheel_r
if joint_states.name[i] == 'wheel_left_joint':
self.s_l_new = joint_states.position[i] * self.wheel_r
self.mutex.release()
def save_line_segments(self, line_segments):
self.mutex.acquire()
self.line_segments = line_segments.line_segments
self.mutex.release() |
import utils
import logging
import json
import requests
from threading import Thread
import pycarwings2
import time
from time import sleep
from configparser import SafeConfigParser
import sys
import datetime
from datetime import datetime
import _thread
import traceback
class LeafHandler(utils.BaseApp):
#Persistent info
PersistentSession = None
LastUpdate = None
LastUpdated = None
LastSuccess = None
Updating = False
Error = False
def __init__(self):
print("Opening LEAF connection")
self.PersistentSession = self.Login()
#self.RequestUpdate_Threaded(self.PersistentSession)
self.StartUpdateLoop(self.PersistentSession)
#self.GetLatestStatus(self.PersistentSession, True)
print("LEAF ready!")
def ProcessIntent(self, intent, request):
speech = "Sorry, but there was an error processing the request."
try:
if intent == "LEAF_UpdateStatus":
#Requests an update of battery stats
self.RequestUpdate_Threaded(self.PersistentSession)
speech = "Status update requested!"
elif intent == "LEAF_GetStatus":
speech = self.GetStatusResponse(self.PersistentSession)
elif intent == "LEAF_ClimateOn":
#Turns on the climate control for preheating/cooling
dialog_state = request.json['request']['dialogState']
if (dialog_state == "STARTED"):
return utils.continue_dialog()
elif (dialog_state == "IN_PROGRESS"):
choice = request.json['request']['intent']['confirmationStatus'] == "CONFIRMED"
speech = self.ActivateClimateControl(self.PersistentSession, choice)
elif intent == "LEAF_ClimateOff":
#Turns off the climate control
self.DeactivateClimateControl(self.PersistentSession)
speech = "Turning off climate control."
elif intent == "LEAF_StartCharge":
dialog_state = request.json['request']['dialogState']
if (dialog_state == "STARTED"):
return self.StartCharging_Start(self.PersistentSession)
elif (dialog_state == "IN_PROGRESS"):
choice = request.json['request']['intent']['confirmationStatus'] == "CONFIRMED"
speech = self.StartCharging_Confirm(self.PersistentSession, choice)
else:
speech = "Sorry, no handler was found for the intent "+intent
except:
speech = "Hmm, an internal error occured while processing the request."
traceback.print_exc()
return utils.return_speech(speech)
def Ready(self):
return self.LastUpdate != None
def GetStatsForDisplay(self):
stats = ""
if not self.Ready(): return "LEAF processor is not ready."
stats += "LEAF processor is ready.</br>"
#LEAF is (not) plugged in with (N) bars.
#LEAF is charging.
#Last updated at [time]
stats += "LEAF is "
if (not self.LastUpdate.is_connected):
stats += "not "
stats += "plugged in with {0} bars.</br>".format(self.LastUpdate.battery_remaining_amount)
if (self.LastUpdate.is_charging): stats += "LEAF is charging.</br>"
stats += "Last updated at {0}.</br>".format(self.LastSuccess)
return stats
def GetStatusResponse(self, leaf):
#Lets the user know the status of the car
#If not charging: "The TIE Fighter is at 58 percent charge"
#If charging: "The TIE fighter is at 58 percent charge and will finish charging in 2 hours 30 minutes
#if updating, let the user know
speech = "Error"
if self.Updating:
speech = "The status is currently being updated. Try again in a few minutes."
else:
results = self.GetLatestStatus(leaf, False)
if results:
date = datetime.strptime(results.answer["BatteryStatusRecords"]["OperationDateAndTime"], "%b %d, %Y %I:%M %p")
minutes = int((datetime.now() - date).total_seconds() / 60)
pluggedMsg = ""
if (not results.is_connected):
pluggedMsg = "not "
speech = "As of {0} minutes ago the leaf is at {1} bars and is {2}plugged in.".format(minutes, int(results.battery_remaining_amount), pluggedMsg)
if (results.is_charging):
hours = 0
minutes = 0
time_holder = None
if (results.time_to_full_trickle):
time_holder = results.time_to_full_trickle
elif (results.time_to_full_l2):
time_holder = results.time_to_full_l2
elif (results.time_to_full_l2_6kw):
time_holder = results.time_to_full_l2_6kw
hours = int(time_holder.total_seconds() / 3600)
minutes = int((time_holder.total_seconds() - (int(hours)*3600)) / 60)
speech += " It is charging with "
if (int(hours) > 0): speech += "{0} hours ".format(hours)
if (int(hours) > 0 and int(minutes) > 0): speech += "and "
if (int(minutes) > 0): speech += "{0} minutes ".format(minutes)
speech += "remaining."
else:
speech = "No results cached. Try again in a few minutes."
self.RequestUpdate_Threaded(leaf)
#self.RequestUpdate_Threaded(leaf)
return speech
#Nissan API Related
def Login(self):
parser = SafeConfigParser()
candidates = [ 'config.ini', 'my_config.ini' ]
found = parser.read(candidates)
username = parser.get('get-leaf-info', 'username')
password = parser.get('get-leaf-info', 'password')
logging.debug("login = %s , password = %s" % ( username , password) )
print("Prepare Session")
s = pycarwings2.Session(username, password , "<PASSWORD>")
print("Login...")
return s.get_leaf()
def RequestUpdate(self, leaf, block=False):
utils.PrintTimed("Requesting Update")
result_key = leaf.request_update()
if block:
status = leaf.get_status_from_update(result_key)
while status is None:
sleep(10)
status = leaf.get_status_from_update(result_key)
utils.PrintTimed("Update Completed")
def GetLatestStatus(self, leaf, update=False):
if update: self.SetLastUpdate(leaf)
return self.LastUpdate
def ActivateClimateControl(self, leaf, choice):
if choice:
leaf.start_climate_control()
return "Activating climate control."
return "Not starting climate control."
def DeactivateClimateControl(self, leaf):
print("Deactivating Climate Control")
leaf.stop_climate_control()
def RequestUpdate_Threaded(self, leaf):
_thread.start_new_thread(self.FullUpdate, (leaf, ))
def StartCharging_Start(self, leaf):
#require an existing status, start charging if plugged in
latest = self.GetLatestStatus(leaf)
if latest == None:
self.RequestUpdate_Threaded(leaf)
return utils.return_speech("No status cached. Try again in a few minutes.")
elif latest.is_charging:
return utils.return_speech("Leaf is already charging.")
elif not latest.is_connected:
return utils.return_speech("The leaf is not plugged in.")
else:
return utils.continue_dialog()
def StartCharging_Confirm(self, leaf, choice):
if choice:
if leaf.start_charging():
return "Starting charge"
else:
return "Could not start charge."
return "Not starting charge. Let me know if you change your mind."
#Update Thread/Loop
def FullUpdate(self, leaf):
if self.Updating: return
try:
self.Updating = True
self.RequestUpdate(leaf, True)
self.SetLastUpdate(leaf)
except:
traceback.print_exc()
#self.Error = True
self.LastUpdated = datetime.utcnow()
print("Error while performing Full Update")
finally:
self.Updating = False
def SetLastUpdate(self, leaf):
self.LastUpdate = leaf.get_latest_battery_status()
self.LastUpdated = datetime.utcnow()
self.LastSuccess = datetime.utcnow()
utils.PrintTimed("Status Updated")
def UpdateLoop(self, leaf, updateFreq, checkFreq):
utils.PrintTimed("Starting update loop...")
self.FullUpdate(leaf)
while not self.Error:
if (datetime.utcnow() - self.LastUpdated).total_seconds() > 60*updateFreq:
self.FullUpdate(leaf)
sleep(60*checkFreq)
def StartUpdateLoop(self, leaf):
_thread.start_new_thread(self.UpdateLoop, (leaf, 60, 1))
#start a thread that periodically updates the status
#updates every 60 minutes, checks if it should updated every minute
|
<filename>main.py
# -*- coding: utf-8 -*-
import argparse
from functools import partial
import re
import os
from toolz.curried import curry, memoize
from toolz import curried as tz
import pandas as pd
import numpy as np
import scipy
import networkx as nx
from gensim.summarization import keywords, summarize
import sklearn as sk
from sklearn import neighbors, cluster
import tensorflow_hub as hub
from aux.text_extract import get_all_pdf_text_concatenated
os.environ['TFHUB_CACHE_DIR'] = os.path.expanduser("~/.cache/tfhub_modules")
avg_word_len = 4.79 # average number of characters per word in the English language
@memoize
def tfload(model_url):
return hub.load(model_url)
@memoize
def emb(texts, model_url):
return tfload(model_url)(texts)
def text_reduce_return(paragraph, upper_bound_chars, max_word_count):
if len(paragraph) < upper_bound_chars:
return paragraph
try:
return summarize(paragraph, word_count=max_word_count).replace("\n", " ") or \
paragraph[:upper_bound_chars]
except ValueError: # usually happens if there aren't multiple sentences in the paragraph
return paragraph[:upper_bound_chars]
@curry
def clust(g, v, n):
model = cluster.AgglomerativeClustering(n, connectivity=g, linkage='ward', affinity='euclidean')
labels = model.fit_predict(v)
silh = sk.metrics.silhouette_samples(v, labels, metric='cosine')
return (silh.mean(), n, labels, silh, model)
def main(args):
name_of_pdf_dir = os.path.basename(args.directory_with_pdfs)
all_text = get_all_pdf_text_concatenated(args.directory_with_pdfs)
pars = pd.Series(all_text.split('\n\n')).str.replace('\n', ' ')
pars.str.len().apply(lambda x: np.log2(x + 1)).astype(int).value_counts() # TODO, is this being stored anywhere?
text_keywords = keywords(all_text, scores=True, lemmatize=True, words=args.num_keywords)
lower_bound_chars, upper_bound_chars = args.lower_bound_chars, args.upper_bound_chars
word_count = int((lower_bound_chars + upper_bound_chars) / (2 * (avg_word_len + 1)))
lens = pars.str.len() # paragraph lengths
nice_pars = pars[(lens >= lower_bound_chars)] # paragraphs we want to use
nice_pars = nice_pars.apply(
partial(text_reduce_return,
upper_bound_chars=upper_bound_chars, max_word_count=word_count)
)
vecs = emb(tuple(nice_pars), args.tfhub_sentence_encoder_url).numpy()
D = sk.metrics.pairwise_distances(vecs, metric='cosine') # pairwise distances of vectors
R = scipy.sparse.csgraph.minimum_spanning_tree(D).max() # reduced graph
G = neighbors.radius_neighbors_graph(vecs, R, metric='cosine')
core = nx.k_core(nx.Graph(G))
# Capitalize all occurrences of keywords for easy display on the output
# TODO, make matching case insensitive
pattern = re.compile(f"\\b({tz.pipe(text_keywords, tz.pluck(0), '|'.join)})\\b")
nice_pars = nice_pars.apply(
lambda x: re.sub(pattern, lambda m: m.group().upper(), x)) # TODO add [[]] around our keywords for zettelkasten
core_nodes = core.nodes
core_pars = np.array(nice_pars)[core_nodes]
core_vecs = vecs[core_nodes]
sil_u, n, lab, sil, p = clust(nx.adjacency_matrix(core), core_vecs, 8)
layers = nx.onion_layers(core)
df = pd.DataFrame(
data=[{"Label": par, "Cluster ID": cid, "Silhouette Score": ss} for par, cid, ss in zip(core_pars, lab, sil)])
df = df[df["Silhouette Score"] > 0]
df['Cluster ID'] = df.apply(lambda row: "T" + str(row['Cluster ID']), axis=1)
# add footer to dataframe so that csv export will be imported by gsheet's tree map plotter correctly
for cluster_id in df['Cluster ID'].unique():
df = df.append({"Label": cluster_id, "Cluster ID": name_of_pdf_dir, "Silhouette Score": None},
ignore_index=True)
else:
df = df.append({"Label": name_of_pdf_dir, "Cluster ID": None, "Silhouette Score": None}, ignore_index=True)
df.to_csv(args.output_filename, index=False)
return {
"text_keywords": text_keywords
}
if __name__ == "__main__":
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument("directory_with_pdfs",
help="Please provide the directory which contains the PDFs"
"which you'd like to build an information map of.",
type=str)
arg_parser.add_argument("--output_filename",
default="out.csv")
arg_parser.add_argument("--num_keywords",
help="The number of keywords you'd like to be extracted",
type=int,
default=500)
arg_parser.add_argument("--lower_bound_chars",
type=int,
default=256)
arg_parser.add_argument("--upper_bound_chars",
type=int,
default=512)
arg_parser.add_argument("--tfhub_sentence_encoder_url",
type=str,
default="https://tfhub.dev/google/universal-sentence-encoder-large/5")
args = arg_parser.parse_args()
out = main(args)
print(f"Number of keywords: {len(out['text_keywords'])}")
|
<filename>manifest_lambda/tests/unit/test_iiifImage.py
import unittest
import json
import os
from pathlib import Path
from iiifImage import iiifImage
current_path = str(Path(__file__).parent.absolute())
media_extensions_list = ['.mp3', '.mp4', '.pdf', '.wav']
class Test(unittest.TestCase):
def setUp(self):
self.example_standard_json = {}
file_name = os.path.join(current_path, '..', '1934.007.001.standard.json')
with open(file_name, 'r') as input_source:
self.example_standard_json = json.load(input_source)
self.iiif_base_url = 'https://my.base.url'
def test_is_image(self):
""" test_is_image """
sample_files_json = {
"id": "1934.007.001/1934_007_001-v0003.tif",
"mediaResourceId": "1934.007.001%2F1934_007_001-v0003",
"mediaServer": "https://image-iiif-testlib.libraries.nd.edu/iiif/2"
}
iiif_image_class = iiifImage(sample_files_json, self.iiif_base_url, media_extensions_list)
self.assertTrue(iiif_image_class.is_image())
def test_is_image_pdf(self):
""" test_is_image_pdf """
sample_files_json = {
"id": "1934.007.001/1934_007_001-v0003.pdf",
"mediaResourceId": "1934.007.001%2F1934_007_001-v0003",
"mediaServer": "https://image-iiif-testlib.libraries.nd.edu/iiif/2"
}
iiif_image_class = iiifImage(sample_files_json, self.iiif_base_url, media_extensions_list)
self.assertFalse(iiif_image_class.is_image())
def test_is_image_mimeType(self):
""" test_is_image_mimeType """
sample_files_json = {
"id": "1934.007.001/1934_007_001-v0003.tif",
"mediaResourceId": "1934.007.001%2F1934_007_001-v0003",
"mediaServer": "https://image-iiif-testlib.libraries.nd.edu/iiif/2",
"mimeType": "application/pdf"
}
iiif_image_class = iiifImage(sample_files_json, self.iiif_base_url, media_extensions_list)
self.assertFalse(iiif_image_class.is_image())
def test_other_things(self):
""" test_other_things """
sample_files_json = {
"id": "1934.007.001/1934_007_001-v0003.tif",
"mediaResourceId": "1934.007.001%2F1934_007_001-v0003",
"mediaServer": "https://image-iiif-testlib.libraries.nd.edu/iiif/2"
}
iiif_image_class = iiifImage(sample_files_json, self.iiif_base_url, media_extensions_list)
actual_thumbnail_results = iiif_image_class.thumbnail()
expected_thumbnail_results = {
'id': 'https://image-iiif-testlib.libraries.nd.edu/iiif/2/1934.007.001%2F1934_007_001-v0003/full/250,/0/default.jpg',
'type': 'Image',
'service': [
{'id': 'https://image-iiif-testlib.libraries.nd.edu/iiif/2/1934.007.001%2F1934_007_001-v0003', 'type': 'ImageService2', 'profile': 'http://iiif.io/api/image/2/level2.json'}
]
}
self.assertEqual(expected_thumbnail_results, actual_thumbnail_results)
actual_annotation_id = iiif_image_class._annotation_id()
expected_annotation_id = 'https://my.base.url/annotation/1934.007.001%2F1934_007_001-v0003.tif'
self.assertEqual(actual_annotation_id, expected_annotation_id)
actual_image_url_id = iiif_image_class._image_url_id()
expected_image_url_id = 'https://image-iiif-testlib.libraries.nd.edu/iiif/2/1934.007.001%2F1934_007_001-v0003'
self.assertEqual(actual_image_url_id, expected_image_url_id)
actual_service = iiif_image_class._service()
expected_service = {
'id': 'https://image-iiif-testlib.libraries.nd.edu/iiif/2/1934.007.001%2F1934_007_001-v0003',
'profile': 'http://iiif.io/api/image/2/level2.json',
'type': 'ImageService2'
}
self.assertEqual(actual_service, expected_service)
actual_image = iiif_image_class.image()
expected_image = {
'id': 'https://image-iiif-testlib.libraries.nd.edu/iiif/2/1934.007.001%2F1934_007_001-v0003/full/full/0/default.jpg',
'type': 'Image',
'format': 'image/jpeg',
'service': [
{
'id': 'https://image-iiif-testlib.libraries.nd.edu/iiif/2/1934.007.001%2F1934_007_001-v0003',
'type': 'ImageService2',
'profile': 'http://iiif.io/api/image/2/level2.json'
}
]
}
self.assertTrue(actual_image, expected_image)
actual_annotation = iiif_image_class.annotation('my_canvas_url_id')
expected_annotation = {
'body': {
'format': 'image/jpeg',
'id': 'https://image-iiif-testlib.libraries.nd.edu/iiif/2/1934.007.001%2F1934_007_001-v0003/full/full/0/default.jpg',
'service': [
{
'id': 'https://image-iiif-testlib.libraries.nd.edu/iiif/2/1934.007.001%2F1934_007_001-v0003',
'profile': 'http://iiif.io/api/image/2/level2.json',
'type': 'ImageService2'
}
],
'type': 'Image'
},
'id': 'https://my.base.url/annotation/1934.007.001%2F1934_007_001-v0003.tif',
'motivation': 'painting',
'target': 'my_canvas_url_id',
'type': 'Annotation'
}
self.assertEqual(actual_annotation, expected_annotation)
def suite():
""" define test suite """
return unittest.TestLoader().loadTestsFromTestCase(Test)
if __name__ == '__main__':
suite()
unittest.main()
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras activation functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.keras import activations
from tensorflow.python.keras import backend
from tensorflow.python.keras import combinations
from tensorflow.python.keras.layers import advanced_activations
from tensorflow.python.keras.layers import core
from tensorflow.python.keras.layers import serialization
from tensorflow.python.ops import nn_ops as nn
from tensorflow.python.platform import test
def _ref_softmax(values):
m = np.max(values)
e = np.exp(values - m)
return e / np.sum(e)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class KerasActivationsTest(test.TestCase, parameterized.TestCase):
def test_serialization(self):
all_activations = [
'softmax', 'relu', 'elu', 'tanh', 'sigmoid', 'hard_sigmoid', 'linear',
'softplus', 'softsign', 'selu', 'gelu'
]
for name in all_activations:
fn = activations.get(name)
ref_fn = getattr(activations, name)
assert fn == ref_fn
config = activations.serialize(fn)
fn = activations.deserialize(config)
assert fn == ref_fn
def test_serialization_v2(self):
activation_map = {nn.softmax_v2: 'softmax'}
for fn_v2_key in activation_map:
fn_v2 = activations.get(fn_v2_key)
config = activations.serialize(fn_v2)
fn = activations.deserialize(config)
assert fn.__name__ == activation_map[fn_v2_key]
def test_serialization_with_layers(self):
activation = advanced_activations.LeakyReLU(alpha=0.1)
layer = core.Dense(3, activation=activation)
config = serialization.serialize(layer)
# with custom objects
deserialized_layer = serialization.deserialize(
config, custom_objects={'LeakyReLU': activation})
self.assertEqual(deserialized_layer.__class__.__name__,
layer.__class__.__name__)
self.assertEqual(deserialized_layer.activation.__class__.__name__,
activation.__class__.__name__)
# without custom objects
deserialized_layer = serialization.deserialize(config)
self.assertEqual(deserialized_layer.__class__.__name__,
layer.__class__.__name__)
self.assertEqual(deserialized_layer.activation.__class__.__name__,
activation.__class__.__name__)
def test_softmax(self):
x = backend.placeholder(ndim=2)
f = backend.function([x], [activations.softmax(x)])
test_values = np.random.random((2, 5))
result = f([test_values])[0]
expected = _ref_softmax(test_values[0])
self.assertAllClose(result[0], expected, rtol=1e-05)
x = backend.placeholder(ndim=1)
with self.assertRaises(ValueError):
activations.softmax(x)
def test_softmax_2d_axis0(self):
x = backend.placeholder(ndim=2)
f = backend.function([x], [activations.softmax(x, axis=0)])
test_values = np.random.random((2, 5))
result = f([test_values])[0]
expected = np.zeros((2, 5))
for i in range(5):
expected[:, i] = _ref_softmax(test_values[:, i])
self.assertAllClose(result, expected, rtol=1e-05)
def test_temporal_softmax(self):
x = backend.placeholder(shape=(2, 2, 3))
f = backend.function([x], [activations.softmax(x)])
test_values = np.random.random((2, 2, 3)) * 10
result = f([test_values])[0]
expected = _ref_softmax(test_values[0, 0])
self.assertAllClose(result[0, 0], expected, rtol=1e-05)
def test_selu(self):
x = backend.placeholder(ndim=2)
f = backend.function([x], [activations.selu(x)])
alpha = 1.6732632423543772848170429916717
scale = 1.0507009873554804934193349852946
positive_values = np.array([[1, 2]], dtype=backend.floatx())
result = f([positive_values])[0]
self.assertAllClose(result, positive_values * scale, rtol=1e-05)
negative_values = np.array([[-1, -2]], dtype=backend.floatx())
result = f([negative_values])[0]
true_result = (np.exp(negative_values) - 1) * scale * alpha
self.assertAllClose(result, true_result)
def test_softplus(self):
def softplus(x):
return np.log(np.ones_like(x) + np.exp(x))
x = backend.placeholder(ndim=2)
f = backend.function([x], [activations.softplus(x)])
test_values = np.random.random((2, 5))
result = f([test_values])[0]
expected = softplus(test_values)
self.assertAllClose(result, expected, rtol=1e-05)
def test_softsign(self):
def softsign(x):
return np.divide(x, np.ones_like(x) + np.absolute(x))
x = backend.placeholder(ndim=2)
f = backend.function([x], [activations.softsign(x)])
test_values = np.random.random((2, 5))
result = f([test_values])[0]
expected = softsign(test_values)
self.assertAllClose(result, expected, rtol=1e-05)
def test_sigmoid(self):
def ref_sigmoid(x):
if x >= 0:
return 1 / (1 + np.exp(-x))
else:
z = np.exp(x)
return z / (1 + z)
sigmoid = np.vectorize(ref_sigmoid)
x = backend.placeholder(ndim=2)
f = backend.function([x], [activations.sigmoid(x)])
test_values = np.random.random((2, 5))
result = f([test_values])[0]
expected = sigmoid(test_values)
self.assertAllClose(result, expected, rtol=1e-05)
def test_hard_sigmoid(self):
def ref_hard_sigmoid(x):
x = (x * 0.2) + 0.5
z = 0.0 if x <= 0 else (1.0 if x >= 1 else x)
return z
hard_sigmoid = np.vectorize(ref_hard_sigmoid)
x = backend.placeholder(ndim=2)
f = backend.function([x], [activations.hard_sigmoid(x)])
test_values = np.random.random((2, 5))
result = f([test_values])[0]
expected = hard_sigmoid(test_values)
self.assertAllClose(result, expected, rtol=1e-05)
def test_relu(self):
x = backend.placeholder(ndim=2)
f = backend.function([x], [activations.relu(x)])
positive_values = np.random.random((2, 5))
result = f([positive_values])[0]
self.assertAllClose(result, positive_values, rtol=1e-05)
negative_values = np.random.uniform(-1, 0, (2, 5))
result = f([negative_values])[0]
expected = np.zeros((2, 5))
self.assertAllClose(result, expected, rtol=1e-05)
def test_gelu(self):
def gelu(x, approximate=False):
if approximate:
return 0.5 * x * (1.0 + np.tanh(
np.sqrt(2.0 / np.pi) * (x + 0.044715 * np.power(x, 3))))
else:
from scipy.stats import norm # pylint: disable=g-import-not-at-top
return x * norm.cdf(x)
x = backend.placeholder(ndim=2)
f = backend.function([x], [activations.gelu(x)])
test_values = np.random.random((2, 5))
result = f([test_values])[0]
expected = gelu(test_values)
self.assertAllClose(result, expected, rtol=1e-05)
f = backend.function([x], [activations.gelu(x, True)])
test_values = np.random.random((2, 5))
result = f([test_values])[0]
expected = gelu(test_values, True)
self.assertAllClose(result, expected, rtol=1e-05)
def test_elu(self):
x = backend.placeholder(ndim=2)
f = backend.function([x], [activations.elu(x, 0.5)])
test_values = np.random.random((2, 5))
result = f([test_values])[0]
self.assertAllClose(result, test_values, rtol=1e-05)
negative_values = np.array([[-1, -2]], dtype=backend.floatx())
result = f([negative_values])[0]
true_result = (np.exp(negative_values) - 1) / 2
self.assertAllClose(result, true_result)
def test_tanh(self):
test_values = np.random.random((2, 5))
x = backend.placeholder(ndim=2)
exp = activations.tanh(x)
f = backend.function([x], [exp])
result = f([test_values])[0]
expected = np.tanh(test_values)
self.assertAllClose(result, expected, rtol=1e-05)
def test_exponential(self):
test_values = np.random.random((2, 5))
x = backend.placeholder(ndim=2)
exp = activations.exponential(x)
f = backend.function([x], [exp])
result = f([test_values])[0]
expected = np.exp(test_values)
self.assertAllClose(result, expected, rtol=1e-05)
def test_linear(self):
x = np.random.random((10, 5))
self.assertAllClose(x, activations.linear(x))
def test_invalid_usage(self):
with self.assertRaises(ValueError):
activations.get('unknown')
# The following should be possible but should raise a warning:
activations.get(advanced_activations.LeakyReLU())
if __name__ == '__main__':
test.main()
|
<filename>scripts/generate_farfield_scattering_movie.py
import sys, os, time
from tqdm import tqdm
import numpy as np
import matlab.engine
import ott_plotting
ott_plotting.update_base_plotting_directory(\
'/home/cblakemore/plots/ott_farfield/movies')
base_data_path = '../raw_data/movies/zsweep_test'
### Pack up all the simulation parameters into a dictionary with string keys
### that match the expected MATLAB variable names. This is the only reasonably
### compact way of passing keyword arguments via the MATLAB/Python engine API
###
### REMEMBER: Cartesian offsets are in SI base units (meters) and correspond
### to the position of the BEAM relative to the microsphere/scatterer.
### e.g. zOffset = +10um corresponds to a microsphere BELOW the focus
simulation_parameters = {
'datapath': base_data_path, \
'radius': 3.76e-6, \
'n_particle': 1.39, \
'n_medium': 1.00, \
'wavelength': 1064.0e-9, \
'NA': 0.095, \
'xOffset': 0.0e-6, \
'yOffset': 0.0e-6, \
'zOffset': 0.0e-6, \
'halfCone': float(np.pi/6), \
'ntheta': 301, \
'nphi': 151, \
'polarisation': 'X', \
'Nmax': 200, \
'resimulate': True
}
plot_parameters = {
'beam': 'tot', \
'rmax': 0.006, \
'save': True, \
'show': False, \
'plot_2D': True, \
'plot_3D': True, \
'view_elev': -40.0, \
'view_azim': 20.0, \
'max_radiance_trans': 25.0, \
'max_radiance_refl': 0.12, \
'unwrap_phase': True, \
'manual_phase_plot_lims': (-2.0*np.pi, 4.0*np.pi), \
'label_position': True, \
'verbose': True
}
video_frame_rate = 10
nframes = 101
param_to_sweep = 'zOffset'
# param_array = np.linspace(0.0, -100.0, 101)
param_array = np.linspace(0.0, -50.0, nframes)[::-1]
param_scale = 1e-6
save_suffix = '_um'
# save_suffix = ''
movie_name = 'zsweep_0-50um'
##########################################################################
##########################################################################
##########################################################################
param_ind = 0
for param_ind in tqdm(range(len(param_array))):
### Get the current value of the swept parameter
param_val = param_array[param_ind]
param_str = f'{param_to_sweep}_{int(param_val):d}{save_suffix}'
sys.stdout.flush()
param_ind += 1
### Adjust the savepath to included information about the current
### parameter values
simulation_parameters['datapath'] \
= os.path.join(base_data_path, param_str)
simulation_parameters[param_to_sweep] \
= float(param_scale*param_val)
### Build the MATLAB formatted argument list from the dictionary
### defined at the top of this script
arglist = [[key, simulation_parameters[key]] \
for key in simulation_parameters.keys()]
### Start the MATLAB engine and run the computation
engine = matlab.engine.start_matlab()
engine.addpath('../lib', nargout=0)
matlab_datapath \
= engine.compute_far_field(\
*[arg for argtup in arglist for arg in argtup], \
nargout=1, background=False)
### Load the data that MATLAB computed and saved, handling the
### transmitted and reflected cases separately since they may
### propagate through distinct optical systems
theta_grid_trans, r_grid_trans, efield_trans\
= ott_plotting.load_farfield_data(\
matlab_datapath, transmitted=True,\
beam=plot_parameters['beam'])
theta_grid_refl, r_grid_refl, efield_refl\
= ott_plotting.load_farfield_data(\
matlab_datapath, transmitted=False,\
beam=plot_parameters['beam'])
ray_tracing = ott_plotting.get_simple_ray_tracing_matrix()
### Plot everything!
figname_trans = os.path.join(movie_name, 'trans', f'frame_{param_ind:04d}.png')
ott_plotting.plot_2D_farfield(
theta_grid_trans, r_grid_trans, efield_trans, simulation_parameters, \
transmitted=True, ray_tracing_matrix=ray_tracing, \
**{**plot_parameters, 'figname': figname_trans})
figname_refl = os.path.join(movie_name, 'refl', f'frame_{param_ind:04d}.png')
ott_plotting.plot_2D_farfield(
theta_grid_refl, r_grid_refl, efield_refl, simulation_parameters, \
transmitted=False, ray_tracing_matrix=ray_tracing, \
**{**plot_parameters, 'figname': figname_refl})
figname_trans_3D = os.path.join(movie_name, 'trans_3d', \
f'frame_{param_ind:04d}.png')
ott_plotting.plot_3D_farfield(
theta_grid_trans, r_grid_trans, efield_trans, simulation_parameters, \
transmitted=True, ray_tracing_matrix=ray_tracing, \
**{**plot_parameters, 'figname': figname_trans_3D})
figname_refl_3D = os.path.join(movie_name, 'refl_3d', \
f'frame_{param_ind:04d}.png')
ott_plotting.plot_3D_farfield(
theta_grid_refl, r_grid_refl, efield_refl, simulation_parameters, \
transmitted=False, ray_tracing_matrix=ray_tracing, \
**{**plot_parameters, 'figname': figname_refl_3D})
print()
print('Converting .png frames to .mp4 video...')
for movie_type in ['trans', 'refl', 'trans_3d', 'refl_3d']:
ott_plotting.make_movie(os.path.join(movie_name, movie_type), \
framerate=video_frame_rate)
|
<reponame>CodingNowNow/jiaoyi<filename>testdata.py
#!/usr/bin/python
# -*- coding: utf8 -*-
# cp936
import tables
class FiveMinDataRecordH5File(tables.IsDescription):
datetime = tables.UInt64Col() #IGNORE:E1101
openPrice = tables.UInt32Col() #IGNORE:E1101
highPrice = tables.UInt32Col() #IGNORE:E1101
lowPrice = tables.UInt32Col() #IGNORE:E1101
closePrice = tables.UInt32Col() #IGNORE:E1101
transAmount = tables.UInt64Col() #IGNORE:E1101
transCount = tables.UInt64Col() #IGNORE:E1101
class IndexDataRecordH5File(tables.IsDescription):
datetime = tables.UInt64Col() #IGNORE:E1101
start = tables.UInt64Col() #IGNORE:E1101
def fenge(src_file_name, dest_file_name):
src_hdf5 = tables.openFile(src_file_name, mode='r', filters=tables.Filters(complevel=9,complib='zlib', shuffle=True))
dest_hdf5 = tables.openFile(dest_file_name, mode = "w", filters=tables.Filters(complevel=9,complib='zlib', shuffle=True))
all_table = [x for x in src_hdf5.walkNodes("/data")]
for i in range(1,len(all_table)):
src_table = all_table[i]
print src_table.name
dest_table = dest_hdf5.createTable("/", src_table.name, FiveMinDataRecordH5File, src_table.name)
dest_row = dest_table.row
for x in src_table:
if x['lowPrice'] <= x['openPrice'] <=x['highPrice'] \
and x['lowPrice'] <= x['closePrice'] <= x['highPrice']:
dest_row['datetime'] = x['datetime']
dest_row['openPrice'] = x['openPrice']
dest_row['highPrice'] = x['highPrice']
dest_row['lowPrice'] = x['lowPrice']
dest_row['closePrice'] = x['closePrice']
dest_row['transAmount'] = x['transAmount']
dest_row['transCount'] = x['transCount']
dest_row.append()
dest_table.flush()
src_hdf5.close()
dest_hdf5.close()
def fenge2(src_file_name, dest_file_name, table_name_list, lastdate):
src_hdf5 = tables.openFile(src_file_name, mode='r', filters=tables.Filters(complevel=9,complib='zlib', shuffle=True))
dest_hdf5 = tables.openFile(dest_file_name, mode = "w", filters=tables.Filters(complevel=9,complib='zlib', shuffle=True))
for src_table_name in table_name_list:
print src_table_name
src_table = src_hdf5.getNode("/data", src_table_name)
try:
group = dest_hdf5.getNode("/", "data")
except:
group = dest_hdf5.createGroup("/", "data")
dest_table = dest_hdf5.createTable(group, src_table.name, FiveMinDataRecordH5File, src_table.name)
dest_row = dest_table.row
for x in src_table:
if x['datetime'] > lastdate:
break
if x['lowPrice'] <= x['openPrice'] <=x['highPrice'] \
and x['lowPrice'] <= x['closePrice'] <= x['highPrice']:
dest_row['datetime'] = x['datetime']
dest_row['openPrice'] = x['openPrice']
dest_row['highPrice'] = x['highPrice']
dest_row['lowPrice'] = x['lowPrice']
dest_row['closePrice'] = x['closePrice']
dest_row['transAmount'] = x['transAmount']
dest_row['transCount'] = x['transCount']
dest_row.append()
else:
print x['datetime']
dest_table.flush()
src_hdf5.close()
dest_hdf5.close()
def fenge3(src_file_name, dest_path):
src_hdf5 = tables.openFile(src_file_name, mode='r', filters=tables.Filters(complevel=9,complib='zlib', shuffle=True))
all_table = [x for x in src_hdf5.walkNodes("/data")]
for i in range(1,len(all_table)):
src_table = all_table[i]
print src_table.name
dest_file_name = dest_path + "/" + src_table.name[2:] + ".h5"
dest_hdf5 = tables.openFile(dest_file_name, mode = "w", filters=tables.Filters(complevel=9,complib='zlib', shuffle=True))
dest_table = dest_hdf5.createTable("/", "data", FiveMinDataRecordH5File, "data")
dest_row = dest_table.row
for x in src_table:
if x['lowPrice'] <= x['openPrice'] <=x['highPrice'] \
and x['lowPrice'] <= x['closePrice'] <= x['highPrice']:
dest_row['datetime'] = x['datetime']
dest_row['openPrice'] = x['openPrice']
dest_row['highPrice'] = x['highPrice']
dest_row['lowPrice'] = x['lowPrice']
dest_row['closePrice'] = x['closePrice']
dest_row['transAmount'] = x['transAmount']
dest_row['transCount'] = x['transCount']
dest_row.append()
dest_table.flush()
dest_hdf5.close()
src_hdf5.close()
def fenge4(src_file_name, dest_file_name, table_name_list, groupname):
src_hdf5 = tables.openFile(src_file_name, mode='r', filters=tables.Filters(complevel=9,complib='zlib', shuffle=True))
dest_hdf5 = tables.openFile(dest_file_name, mode = "w", filters=tables.Filters(complevel=9,complib='zlib', shuffle=True))
for src_table_name in table_name_list:
print src_table_name
src_table = src_hdf5.getNode(groupname, src_table_name)
dest_table = dest_hdf5.createTable(groupname, src_table.name, IndexDataRecordH5File, src_table.name)
dest_row = dest_table.row
for x in src_table:
dest_row['datetime'] = x['datetime']
dest_row['start'] = x['start']
dest_row.append()
dest_table.flush()
src_hdf5.close()
dest_hdf5.close()
def fenge5(src_file_name, dest_file_name):
src_hdf5 = tables.openFile(src_file_name, mode='r', filters=tables.Filters(complevel=9,complib='zlib', shuffle=True))
dest_hdf5 = tables.openFile(dest_file_name, mode = "w", filters=tables.Filters(complevel=9,complib='zlib', shuffle=True))
all_table = [x for x in src_hdf5.walkNodes("/data")]
for i in range(1,len(all_table)):
src_table = all_table[i]
print src_table.name
#src_table = src_hdf5.getNode("/data", src_table_name)
try:
group = dest_hdf5.getNode("/", "data")
except:
group = dest_hdf5.createGroup("/", "data")
dest_table = dest_hdf5.createTable(group, src_table.name, FiveMinDataRecordH5File, src_table.name)
dest_row = dest_table.row
for x in src_table:
if x['lowPrice'] <= x['openPrice'] <=x['highPrice'] \
and x['lowPrice'] <= x['closePrice'] <= x['highPrice']:
dest_row['datetime'] = x['datetime']
dest_row['openPrice'] = x['openPrice']
dest_row['highPrice'] = x['highPrice']
dest_row['lowPrice'] = x['lowPrice']
dest_row['closePrice'] = x['closePrice']
dest_row['transAmount'] = x['transAmount']
dest_row['transCount'] = x['transCount']
dest_row.append()
else:
print x['datetime']
dest_table.flush()
src_hdf5.close()
dest_hdf5.close()
if __name__ == "__main__":
import time
starttime = time.time()
"""
src_file_name = "d:\\stock\\sh_day.h5"
dest_file_name = "d:\\workspace\\hikyuu\\test\\data\\sh_day.h5"
src_table_name_list = ["SH000001","SH600000","SH600004","SH600005"]
fenge2(src_file_name, dest_file_name, src_table_name_list)
src_file_name = "d:\\stock\\sz_day.h5"
dest_file_name = "d:\\workspace\\hikyuu\\test\\data\\sz_day.h5"
src_table_name_list = ["SZ000001","SZ000002","SZ000004","SZ000005"]
fenge2(src_file_name, dest_file_name, src_table_name_list)
src_file_name = "d:\\stock\\sh_5min.h5"
dest_file_name = "d:\\workspace\\hikyuu\\test\\data\\sh_5min.h5"
src_table_name_list = ["SH000001","SH600000","SH600004","SH600005"]
fenge2(src_file_name, dest_file_name, src_table_name_list)
src_file_name = "d:\\stock\\sz_5min.h5"
dest_file_name = "d:\\workspace\\hikyuu\\test\\data\\sz_5min.h5"
src_table_name_list = ["SZ000001","SZ000002","SZ000004","SZ000005"]
fenge2(src_file_name, dest_file_name, src_table_name_list)
src_file_name = "d:\\stock\\sh_1min.h5"
dest_file_name = "d:\\workspace\\hikyuu\\test\\data\\sh_1min.h5"
src_table_name_list = ["SH000001","SH600000","SH600004","SH600005"]
fenge2(src_file_name, dest_file_name, src_table_name_list)
src_file_name = "d:\\stock\\sz_1min.h5"
dest_file_name = "d:\\workspace\\hikyuu\\test\\data\\sz_1min.h5"
src_table_name_list = ["SZ000001","SZ000002","SZ000004","SZ000005"]
fenge2(src_file_name, dest_file_name, src_table_name_list)
"""
src_file_name = "d:\\stock\\sh_1min.h5"
dest_path = "d:\\stock\\sh\\1min"
fenge3(src_file_name, dest_path)
src_file_name = "d:\\stock\\sz_5min.h5"
dest_path = "d:\\stock\\sz\\5min"
fenge3(src_file_name, dest_path)
src_file_name = "d:\\stock\\sz_1min.h5"
dest_path = "d:\\stock\\sz\\1min"
fenge3(src_file_name, dest_path)
endtime = time.time()
print "%.2fs" % (endtime-starttime)
print "%.2fm" % ((endtime-starttime)/60)
|
import pathlib
study_name = "age-prediction-benchmark"
bids_root = pathlib.Path(
'/storage/store/data/camcan/BIDSsep/rest')
deriv_root = pathlib.Path('/storage/store3/derivatives/camcan-bids/derivatives')
subjects_dir = pathlib.Path('/storage/store/data/camcan-mne/freesurfer')
source_info_path_update = {'processing': 'autoreject',
'suffix': 'epo'}
inverse_targets = []
noise_cov = 'ad-hoc'
task = 'rest'
sessions = ['rest'] # keep empty for code flow
data_type = 'meg'
ch_types = ['meg']
analyze_channels = [
'MEG0111', 'MEG0121', 'MEG0131', 'MEG0141', 'MEG0211',
'MEG0221', 'MEG0231', 'MEG0241', 'MEG0311', 'MEG0321', 'MEG0331',
'MEG0341', 'MEG0411', 'MEG0421', 'MEG0431', 'MEG0441', 'MEG0511',
'MEG0521', 'MEG0531', 'MEG0541', 'MEG0611', 'MEG0621', 'MEG0631',
'MEG0641', 'MEG0711', 'MEG0721', 'MEG0731', 'MEG0741', 'MEG0811',
'MEG0821', 'MEG0911', 'MEG0921', 'MEG0931', 'MEG0941', 'MEG1011',
'MEG1021', 'MEG1031', 'MEG1041', 'MEG1111', 'MEG1121', 'MEG1131',
'MEG1141', 'MEG1211', 'MEG1221', 'MEG1231', 'MEG1241', 'MEG1311',
'MEG1321', 'MEG1331', 'MEG1341', 'MEG1411', 'MEG1421', 'MEG1431',
'MEG1441', 'MEG1511', 'MEG1521', 'MEG1531', 'MEG1541', 'MEG1611',
'MEG1621', 'MEG1631', 'MEG1641', 'MEG1711', 'MEG1721', 'MEG1731',
'MEG1741', 'MEG1811', 'MEG1821', 'MEG1831', 'MEG1841', 'MEG1911',
'MEG1921', 'MEG1931', 'MEG1941', 'MEG2011', 'MEG2021', 'MEG2031',
'MEG2041', 'MEG2111', 'MEG2121', 'MEG2131', 'MEG2141', 'MEG2211',
'MEG2221', 'MEG2231', 'MEG2241', 'MEG2311', 'MEG2321', 'MEG2331',
'MEG2341', 'MEG2411', 'MEG2421', 'MEG2431', 'MEG2441', 'MEG2511',
'MEG2521', 'MEG2531', 'MEG2541', 'MEG2611', 'MEG2621', 'MEG2631',
'MEG2641'
]
l_freq = 0.1
h_freq = 49
eeg_reference = []
eog_channels = []
find_breaks = False
n_proj_eog = 1
reject = None
on_rename_missing_events = "warn"
N_JOBS = 30
decim = 5 # Cam-CAN has 1000 Hz; Cuban Human Brain Project 200Hz
mf_st_duration = 10.
# XXX the values below differ from our previous papers but would be in line
# with the other EEG data used in this benchmark
epochs_tmin = 0.
epochs_tmax = 10.
rest_epochs_overlap = 0.
rest_epochs_duration = 10.
baseline = None
mf_cal_fname = '/storage/store/data/camcan-mne/Cam-CAN_sss_cal.dat'
mf_ctc_fname = '/storage/store/data/camcan-mne/Cam-CAN_ct_sparse.fif'
find_flat_channels_meg = True
find_noisy_channels_meg = True
use_maxwell_filter = True
run_source_estimation = True
use_template_mri = "fsaverage_small"
adjust_coreg = True
event_repeated = "drop"
l_trans_bandwidth = "auto"
h_trans_bandwidth = "auto"
random_state = 42
shortest_event = 1
log_level = "info"
mne_log_level = "error"
# on_error = 'continue'
on_error = "continue"
N_JOBS = 40
subjects = ['CC110033']
|
from __future__ import print_function
import inspect
import os
import subprocess
import sys
from pprint import pprint
import pytest
from hunter import Backlog
from hunter import CallPrinter
from hunter import CodePrinter
from hunter import Debugger
from hunter import ErrorSnooper
from hunter import Q
from hunter import StackPrinter
from hunter import Tracer
from hunter import VarsPrinter
from hunter import VarsSnooper
from hunter import When
from hunter import trace
from hunter import wrap
from utils import DebugCallPrinter
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
pytest_plugins = 'pytester',
def _get_func_spec(func):
if hasattr(inspect, 'signature'):
return str(inspect.signature(func))
if hasattr(inspect, 'getfullargspec'):
spec = inspect.getfullargspec(func)
else:
spec = inspect.getargspec(func)
return inspect.formatargspec(spec.args, spec.varargs)
def test_pth_activation():
module_name = os.path.__name__
expected_module = '{0}.py'.format(module_name)
hunter_env = 'action=CodePrinter,module={!r},function="join"'.format(module_name)
func_spec = _get_func_spec(os.path.join)
expected_call = 'call def join{0}:'.format(func_spec)
output = subprocess.check_output(
[sys.executable, os.path.join(os.path.dirname(__file__), 'sample.py')],
env=dict(os.environ, PYTHONHUNTER=hunter_env),
stderr=subprocess.STDOUT,
)
assert expected_module.encode() in output
assert expected_call.encode() in output
def test_pth_sample4():
env = dict(os.environ, PYTHONHUNTER='CodePrinter')
env.pop('COVERAGE_PROCESS_START', None)
env.pop('COV_CORE_SOURCE', None)
output = subprocess.check_output(
[sys.executable, os.path.join(os.path.dirname(__file__), 'sample4.py')],
env=env,
stderr=subprocess.STDOUT,
)
assert output
def test_pth_sample2(LineMatcher):
env = dict(os.environ, PYTHONHUNTER="module='__main__',action=CodePrinter")
env.pop('COVERAGE_PROCESS_START', None)
env.pop('COV_CORE_SOURCE', None)
output = subprocess.check_output(
[sys.executable, os.path.join(os.path.dirname(__file__), 'sample2.py')],
env=env,
stderr=subprocess.STDOUT,
)
lm = LineMatcher(output.decode('utf-8').splitlines())
lm.fnmatch_lines([
'*tests*sample2.py:* call if __name__ == "__main__": #*',
'*tests*sample2.py:* line if __name__ == "__main__": #*',
'*tests*sample2.py:* line import functools',
'*tests*sample2.py:* line def deco(opt):',
'*tests*sample2.py:* line @deco(1)',
'*tests*sample2.py:* call def deco(opt):',
'*tests*sample2.py:* line def decorator(func):',
'*tests*sample2.py:* line return decorator',
'*tests*sample2.py:* return return decorator',
'* * ... return value: <function deco*',
'*tests*sample2.py:* line @deco(2)',
'*tests*sample2.py:* call def deco(opt):',
'*tests*sample2.py:* line def decorator(func):',
'*tests*sample2.py:* line return decorator',
'*tests*sample2.py:* return return decorator',
'* * ... return value: <function deco*',
'*tests*sample2.py:* line @deco(3)',
'*tests*sample2.py:* call def deco(opt):',
'*tests*sample2.py:* line def decorator(func):',
'*tests*sample2.py:* line return decorator',
'*tests*sample2.py:* return return decorator',
'* * ... return value: <function deco*',
'*tests*sample2.py:* call def decorator(func):',
'*tests*sample2.py:* line @functools.wraps(func)',
'*tests*sample2.py:* line return wrapper',
'*tests*sample2.py:* return return wrapper',
'* * ... return value: <function foo *',
'*tests*sample2.py:* call def decorator(func):',
'*tests*sample2.py:* line @functools.wraps(func)',
'*tests*sample2.py:* line return wrapper',
'*tests*sample2.py:* return return wrapper',
'* * ... return value: <function foo *',
'*tests*sample2.py:* call def decorator(func):',
'*tests*sample2.py:* line @functools.wraps(func)',
'*tests*sample2.py:* line return wrapper',
'*tests*sample2.py:* return return wrapper',
'* * ... return value: <function foo *',
'*tests*sample2.py:* line foo(',
"*tests*sample2.py:* line 'a*',",
"*tests*sample2.py:* line 'b'",
'*tests*sample2.py:* call @functools.wraps(func)',
'* * [*] def wrapper(*args):',
'*tests*sample2.py:* line return func(*args)',
'*tests*sample2.py:* call @functools.wraps(func)',
'* * [*] def wrapper(*args):',
'*tests*sample2.py:* line return func(*args)',
'*tests*sample2.py:* call @functools.wraps(func)',
'* * [*] def wrapper(*args):',
'*tests*sample2.py:* line return func(*args)',
'*tests*sample2.py:* call @deco(1)',
'* * | @deco(2)',
'* * | @deco(3)',
'* * [*] def foo(*args):',
'*tests*sample2.py:* line return args',
'*tests*sample2.py:* return return args',
"* * ... return value: ('a*', 'b')",
"*tests*sample2.py:* return return func(*args)",
"* * ... return value: ('a*', 'b')",
"*tests*sample2.py:* return return func(*args)",
"* * ... return value: ('a*', 'b')",
"*tests*sample2.py:* return return func(*args)",
"* * ... return value: ('a*', 'b')",
"*tests*sample2.py:* line try:",
"*tests*sample2.py:* line None(",
"*tests*sample2.py:* line 'a',",
"*tests*sample2.py:* line 'b'",
"*tests*sample2.py:* exception *",
"* * ... exception value: *",
"*tests*sample2.py:* line except:",
"*tests*sample2.py:* line pass",
"*tests*sample2.py:* return pass",
"* ... return value: None",
])
def test_tracing_bare(LineMatcher):
lines = StringIO()
with trace(CodePrinter(stream=lines)):
def a():
return 1
b = a()
b = 2
try:
raise Exception('BOOM!')
except Exception:
pass
print(lines.getvalue())
lm = LineMatcher(lines.getvalue().splitlines())
lm.fnmatch_lines([
"*test_*.py* call def a():",
"*test_*.py* line return 1",
"*test_*.py* return return 1",
"* ... return value: 1",
])
@pytest.mark.parametrize('module', ['sys', 'builtins'])
def test_profile_mode(LineMatcher, module):
lines = StringIO()
with trace(profile=True, action=CallPrinter(stream=lines)):
def a():
foo = 1
sys.getsizeof(foo, 2)
return getattr(a, 'b', foo)
a()
print(lines.getvalue())
lm = LineMatcher(lines.getvalue().splitlines())
if module == 'sys':
lm.fnmatch_lines([
'*test_integration.py:* call * > sys.getsizeof: *',
'*test_integration.py:* return * < sys.getsizeof',
])
else:
lm.fnmatch_lines([
"*test_integration.py:* call * > *builtin*.getattr: *",
'*test_integration.py:* return * < *builtin*.getattr',
])
def test_tracing_reinstall(LineMatcher):
lines = StringIO()
with trace(CodePrinter(stream=lines)):
def foo():
a = 2
sys.settrace(sys.gettrace())
a = 3
def bar():
a = 1
foo()
a = 4
bar()
print(lines.getvalue())
lm = LineMatcher(lines.getvalue().splitlines())
lm.fnmatch_lines([
"*test_*.py:* call def bar():",
"*test_*.py:* line a = 1",
"*test_*.py:* line foo()",
"*test_*.py:* call def foo():",
"*test_*.py:* line a = 2",
"*test_*.py:* line sys.settrace(sys.gettrace())",
"*test_*.py:* line a = 3",
"*test_*.py:* return a = 3",
"* ... return value: None",
"*test_*.py:* line a = 4",
"*test_*.py:* return a = 4",
"* ... return value: None",
])
def test_tracer_autostop():
with trace(lambda: garbage) as tracer:
if os.environ.get("SETUPPY_CFLAGS") == "-DCYTHON_TRACE=1":
assert sys.gettrace() is not tracer
else:
assert sys.gettrace() is None
@pytest.mark.skipif(sys.platform == 'win32', reason='no fork on windows')
@pytest.mark.parametrize('Action', [CodePrinter, CallPrinter])
@pytest.mark.parametrize('force_pid', [True, False])
def test_pid_prefix(LineMatcher, Action, force_pid, capfd):
def main():
a = 1
pid = os.fork()
if pid:
os.waitpid(pid, 0)
else:
os._exit(0) # child
with trace(actions=[Action(force_pid=force_pid, stream=sys.stdout),
VarsPrinter('a', force_pid=force_pid, stream=sys.stdout)],
stdlib=False,
threading_support=True):
main()
out, err = capfd.readouterr()
print('OUT', out)
print('ERR', err)
lm = LineMatcher(out.splitlines())
prefix = '[[]*[]] *' if force_pid else ''
lm.fnmatch_lines_random([
prefix + "MainThread *test_*.py:* line * a = 1",
prefix + "MainThread *test_*.py:* line * if pid:",
prefix + "MainThread *test_*.py:* line * [[]a => 1[]]",
prefix + "MainThread *test_*.py:* line * os.waitpid(pid, 0)",
"[[]*[]] *MainThread *test_*.py:* line * os._exit(0) # child",
"[[]*[]] *MainThread *test_*.py:* line * [[]a => 1[]]",
])
def test_debugger(LineMatcher):
out = StringIO()
calls = []
class FakePDB:
def __init__(self, foobar=1):
calls.append(foobar)
def set_trace(self, frame):
calls.append(frame.f_code.co_name)
with trace(
lambda event: event.locals.get('node') == 'Foobar',
module=__name__,
function='foo',
actions=[CodePrinter,
VarsPrinter('a', 'node', 'foo', 'test_debugger', stream=out),
Debugger(klass=FakePDB, foobar=2)]
):
def foo():
a = 1
node = 'Foobar'
node += 'x'
a += 2
return a
foo()
print(out.getvalue())
assert calls == [2, 'foo']
lm = LineMatcher(out.getvalue().splitlines())
pprint(lm.lines)
lm.fnmatch_lines_random([
"* [[]test_debugger => <function test_debugger at *[]]",
"* [[]node => 'Foobar'[]]",
"* [[]a => 1[]]",
])
@pytest.mark.parametrize('depth', [2, 3, 4], ids='depth_lt={}'.format)
def test_depth_limit(LineMatcher, depth):
buff = StringIO()
from sample7 import one
tracer = Tracer()
predicate = When(Q(depth_lt=depth), CallPrinter(stream=buff))
try:
tracer.trace(predicate)
one()
finally:
tracer.stop()
output = buff.getvalue()
lm = LineMatcher(output.splitlines())
lm.fnmatch_lines([
"* call => one()",
"* line for i in range(1): # one",
"* line two()",
"* call => two()",
"* return <= two: None",
"* line for i in range(1): # one",
"* return <= one: None",
])
if depth < 3:
assert 'three' not in output
if depth < 4:
assert 'four' not in output
if depth < 5:
assert 'five' not in output
@pytest.mark.parametrize('depth', [2, 3, 4], ids='depth_lt={}'.format)
def test_depth_limit_subprocess(LineMatcher, depth):
hunter_env = "action=CallPrinter,depth_lt={!r},kind_in=['call','return'],stdlib=0".format(depth + 1)
output = subprocess.check_output(
[sys.executable, os.path.join(os.path.dirname(__file__), 'sample7.py')],
env=dict(os.environ, PYTHONHUNTER=hunter_env, COV_CORE_DATAFILE=''),
stderr=subprocess.STDOUT,
)
output = output.decode('utf8')
lm = LineMatcher(output.splitlines())
lm.fnmatch_lines([
"* call * => one()",
"* call * => two()",
"* return * <= two: None",
"* return * <= one: None",
])
if depth < 3:
assert '=> three' not in output
if depth < 4:
assert '=> four' not in output
if depth < 5:
assert '=> five' not in output
def test_varssnooper(LineMatcher):
lines = StringIO()
snooper = VarsSnooper(stream=lines)
@wrap(actions=[snooper, CodePrinter(stream=lines)])
def a():
foo = bar = b = 1
b = 2
foo = 3
foo = bar = 4
return b
a()
print(lines.getvalue())
lm = LineMatcher(lines.getvalue().splitlines())
lm.fnmatch_lines([
"*test_*.py* line foo = bar = b = 1",
"*test_*.py* line [[]b := 1[]]",
"* * ... [[]bar := 1[]]",
"* * ... [[]foo := 1[]]",
"*test_*.py* line b = 2",
"*test_*.py* line [[]b : 1 => 2[]]",
"*test_*.py* line foo = 3",
"*test_*.py* line [[]foo : 1 => 3[]]",
"*test_*.py* line foo = bar = 4",
"*test_*.py* line [[]bar : 1 => 4[]]",
"* * ... [[]foo : 3 => 4[]]",
"*test_*.py* line return b",
"*test_*.py* return return b",
"* * ... return value: 2",
])
assert snooper.stored_reprs == {}
def test_errorsnooper(LineMatcher):
lines = StringIO()
snooper = ErrorSnooper(stream=lines, max_backlog=50, max_events=100)
@wrap(actions=[snooper])
def a():
from sample8errors import notsilenced
from sample8errors import silenced1
from sample8errors import silenced2
from sample8errors import silenced3
from sample8errors import silenced4
silenced1()
print("Done silenced1")
silenced2()
print("Done silenced2")
silenced3()
print("Done silenced3")
silenced4()
print("Done silenced4")
try:
notsilenced()
except ValueError:
print("Done not silenced")
a()
print(lines.getvalue())
lm = LineMatcher(lines.getvalue().splitlines())
lm.fnmatch_lines([
"*>>>>>>>>>>>>>>>>>>>>>> tracing silenced1 on (*RuntimeError*)",
"*test_*.py:* line silenced1()",
"*sample8errors.py:14 call def silenced1():",
"*sample8errors.py:15 line try:",
"*sample8errors.py:16 line error()",
"*sample8errors.py:6 call def error():",
"*sample8errors.py:7 line raise RuntimeError()",
"*sample8errors.py:7 exception raise RuntimeError()",
"* ... exception value: (*RuntimeError*)",
"*sample8errors.py:7 return raise RuntimeError()",
"* ... return value: None",
"*sample8errors.py:16 exception error()",
"* ... exception value: (*RuntimeError*)",
"*sample8errors.py:17 line except Exception:",
"*sample8errors.py:18 line pass",
"*sample8errors.py:18 return pass",
"* ... return value: None",
"*---------------------- function exit",
"*>>>>>>>>>>>>>>>>>>>>>> tracing silenced2 on (*RuntimeError*)",
'*test_*.py:* line print("Done silenced1")',
"*test_*.py:* line silenced2()",
"*sample8errors.py:21 call def silenced2():",
"*sample8errors.py:22 line try:",
"*sample8errors.py:23 line error()",
"*sample8errors.py:6 call def error():",
"*sample8errors.py:7 line raise RuntimeError()",
"*sample8errors.py:7 exception raise RuntimeError()",
"* ... exception value: (*RuntimeError*)",
"*sample8errors.py:7 return raise RuntimeError()",
"* ... return value: None",
"*sample8errors.py:23 exception error()",
"* ... exception value: (*RuntimeError*)",
"*sample8errors.py:24 line except Exception as exc:",
"*sample8errors.py:25 line log(exc)",
"*sample8errors.py:10 call def log(msg):",
"*sample8errors.py:11 return print(msg)",
"* ... return value: None",
"*sample8errors.py:26 line for i in range(*):",
"*sample8errors.py:27 line log(i)",
"*sample8errors.py:10 call def log(msg):",
"*sample8errors.py:11 return print(msg)",
"* ... return value: None",
"*sample8errors.py:26 line for i in range(*):",
"*sample8errors.py:27 line log(i)",
"*sample8errors.py:10 call def log(msg):",
"*sample8errors.py:11 return print(msg)",
"* ... return value: None",
"*sample8errors.py:26 line for i in range(*):",
"*sample8errors.py:27 line log(i)",
"*sample8errors.py:10 call def log(msg):",
"*sample8errors.py:11 return print(msg)",
"* ... return value: None",
"*sample8errors.py:26 line for i in range(*):",
"*---------------------- too many lines",
"*>>>>>>>>>>>>>>>>>>>>>> tracing silenced3 on (*RuntimeError*)",
'*test_*.py:* line print("Done silenced2")',
"*test_*.py:* line silenced3()",
"*sample8errors.py:31 call def silenced3():",
"*sample8errors.py:32 line try:",
"*sample8errors.py:33 line error()",
"*sample8errors.py:6 call def error():",
"*sample8errors.py:7 line raise RuntimeError()",
"*sample8errors.py:7 exception raise RuntimeError()",
"* ... exception value: (*RuntimeError*)",
"*sample8errors.py:7 return raise RuntimeError()",
"* ... return value: None",
"*sample8errors.py:33 exception error()",
"* ... exception value: (*RuntimeError*)",
'*sample8errors.py:35 line return "mwhahaha"',
'*sample8errors.py:35 return return "mwhahaha"',
"* ... return value: 'mwhahaha'",
"*---------------------- function exit",
"*>>>>>>>>>>>>>>>>>>>>>> tracing silenced4 on (*RuntimeError*)",
'*test_*.py:* line print("Done silenced3")',
"*test_*.py:* line silenced4()",
"*sample8errors.py:38 call def silenced4():",
"*sample8errors.py:39 line try:",
"*sample8errors.py:40 line error()",
"*sample8errors.py:6 call def error():",
"*sample8errors.py:7 line raise RuntimeError()",
"*sample8errors.py:7 exception raise RuntimeError()",
"* ... exception value: (*RuntimeError*)",
"*sample8errors.py:7 return raise RuntimeError()",
"* ... return value: None",
"*sample8errors.py:40 exception error()",
"* ... exception value: (*RuntimeError*)",
"*sample8errors.py:41 line except Exception as exc:",
"*sample8errors.py:42 line logger.info(repr(exc))",
"*__init__.py:* call def info(self, msg, *args, **kwargs):",
"*sample8errors.py:42 return logger.info(repr(exc))",
"* ... return value: None",
"*---------------------- function exit",
])
def test_errorsnooper_fastmode(LineMatcher):
lines = StringIO()
snooper = ErrorSnooper(stream=lines, max_backlog=0, max_events=100)
@wrap(actions=[snooper])
def a():
from sample8errors import notsilenced
from sample8errors import silenced1
from sample8errors import silenced2
from sample8errors import silenced3
from sample8errors import silenced4
silenced1()
print("Done silenced1")
silenced2()
print("Done silenced2")
silenced3()
print("Done silenced3")
silenced4()
print("Done silenced4")
try:
notsilenced()
except ValueError:
print("Done not silenced")
a()
print(lines.getvalue())
lm = LineMatcher(lines.getvalue().splitlines())
lm.fnmatch_lines([
"*>>>>>>>>>>>>>>>>>>>>>> tracing silenced1 on (*RuntimeError*)",
"*sample8errors.py:17 line except Exception:",
"*sample8errors.py:18 line pass",
"*sample8errors.py:18 return pass",
"* ... return value: None",
"*---------------------- function exit",
"*>>>>>>>>>>>>>>>>>>>>>> tracing silenced2 on (*RuntimeError*)",
"*sample8errors.py:24 line except Exception as exc:",
"*sample8errors.py:25 line log(exc)",
"*sample8errors.py:10 call def log(msg):",
"*sample8errors.py:11 return print(msg)",
"* ... return value: None",
"*sample8errors.py:26 line for i in range(*):",
"*sample8errors.py:27 line log(i)",
"*sample8errors.py:10 call def log(msg):",
"*sample8errors.py:11 return print(msg)",
"* ... return value: None",
"*sample8errors.py:26 line for i in range(*):",
"*sample8errors.py:27 line log(i)",
"*sample8errors.py:10 call def log(msg):",
"*sample8errors.py:11 return print(msg)",
"* ... return value: None",
"*sample8errors.py:26 line for i in range(*):",
"*sample8errors.py:27 line log(i)",
"*sample8errors.py:10 call def log(msg):",
"*sample8errors.py:11 return print(msg)",
"* ... return value: None",
"*sample8errors.py:26 line for i in range(*):",
"*---------------------- too many lines",
"*>>>>>>>>>>>>>>>>>>>>>> tracing silenced3 on (*RuntimeError*)",
'*sample8errors.py:35 line return "mwhahaha"',
'*sample8errors.py:35 return return "mwhahaha"',
"* ... return value: 'mwhahaha'",
"*---------------------- function exit",
"*>>>>>>>>>>>>>>>>>>>>>> tracing silenced4 on (*RuntimeError*)",
"*sample8errors.py:41 line except Exception as exc:",
"*sample8errors.py:42 line logger.info(repr(exc))",
"*__init__.py:* call def info(self, msg, *args, **kwargs):",
"*sample8errors.py:42 return logger.info(repr(exc))",
"* ... return value: None",
"*---------------------- function exit",
])
def test_stack_printer_1(LineMatcher):
buff = StringIO()
with trace(Q(function="five", action=StackPrinter(limit=1, stream=buff))):
from sample7 import one
one()
output = buff.getvalue()
lm = LineMatcher(output.splitlines())
lm.fnmatch_lines([
"*sample7.py:??:five <= sample7.py:??:four <= sample7.py:??:three <= sample7.py:??:two <= sample7.py:?:one <= test_integration.py:???:test_stack_printer*",
])
def test_stack_printer_2(LineMatcher):
buff = StringIO()
with trace(Q(function="five", action=StackPrinter(limit=2, stream=buff))):
from sample7 import one
one()
output = buff.getvalue()
lm = LineMatcher(output.splitlines())
lm.fnmatch_lines([
"*sample7.py:??:five <= tests/sample7.py:??:four <= tests/sample7.py:??:three <= tests/sample7.py:??:two <= tests/sample7.py:?:one <= tests/test_integration.py:???:test_stack_printer*",
])
@pytest.mark.parametrize('stack', [5, 6], ids="stack={}".format)
def test_backlog(LineMatcher, stack):
buff = StringIO()
from sample7args import one
with trace(
Backlog(
fullsource_has='return i', size=19, stack=stack, vars=False, action=DebugCallPrinter(' [' 'backlog' ']', stream=buff)
).filter(
~Q(function='six')
),
action=DebugCallPrinter(stream=buff)
):
one()
one() # make sure Backlog is reusable (doesn't have storage side-effects)
output = buff.getvalue()
import re
print(re.sub(r'([\[\]])', r'[\1]', output))
# print(output)
lm = LineMatcher(output.splitlines())
lm.fnmatch_lines([
"depth=0 calls=-1 *sample7args.py:* call => one(a=?, b=?, c=?) [[]backlog[]]",
"depth=1 calls=?? *sample7args.py:* line two() [[]backlog[]]",
"depth=1 calls=?? *sample7args.py:* call => two(a=?, b=?, c=?) [[]backlog[]]",
"depth=2 calls=?? *sample7args.py:* line for i in range(1): # two [[]backlog[]]",
"depth=2 calls=?? *sample7args.py:* line a = b = c[[]'side'[]] = 'effect' [[]backlog[]]",
"depth=2 calls=?? *sample7args.py:* line three() [[]backlog[]]",
"depth=2 calls=?? *sample7args.py:* call => three(a=?, b=?, c=?) [[]backlog[]]",
"depth=3 calls=?? *sample7args.py:* line for i in range(1): # three [[]backlog[]]",
"depth=3 calls=?? *sample7args.py:* line a = b = c[[]'side'[]] = 'effect' [[]backlog[]]",
"depth=3 calls=?? *sample7args.py:* line four() [[]backlog[]]",
"depth=3 calls=?? *sample7args.py:* call => four(a=?, b=?, c=?) [[]backlog[]]",
"depth=4 calls=?? *sample7args.py:* line for i in range(1): # four [[]backlog[]]",
"depth=4 calls=?? *sample7args.py:* line a = b = c[[]'side'[]] = 'effect' [[]backlog[]]",
"depth=4 calls=?? *sample7args.py:* line five() [[]backlog[]]",
"depth=4 calls=?? *sample7args.py:* call => five(a=?, b=?, c=?) [[]backlog[]]",
"depth=5 calls=?? *sample7args.py:* line six() [[]backlog[]]",
"depth=5 calls=?? *sample7args.py:* line six() [[]backlog[]]",
"depth=5 calls=?? *sample7args.py:* line six() [[]backlog[]]",
"depth=5 calls=?? *sample7args.py:* line a = b = c[[]'side'[]] = in_five = 'effect' [[]backlog[]]",
"depth=5 calls=?? *sample7args.py:* line for i in range(1): # five [[]backlog[]]",
"depth=5 calls=?? *sample7args.py:* line return i # five",
"depth=4 calls=?? *sample7args.py:* return <= five: 0",
"depth=0 calls=-1 *sample7args.py:* call => one(a=?, b=?, c=?) [[]backlog[]]",
"depth=1 calls=?? *sample7args.py:* line two() [[]backlog[]]",
"depth=1 calls=?? *sample7args.py:* call => two(a=?, b=?, c=?) [[]backlog[]]",
"depth=2 calls=?? *sample7args.py:* line for i in range(1): # two [[]backlog[]]",
"depth=2 calls=?? *sample7args.py:* line a = b = c[[]'side'[]] = 'effect' [[]backlog[]]",
"depth=2 calls=?? *sample7args.py:* line three() [[]backlog[]]",
"depth=2 calls=?? *sample7args.py:* call => three(a=?, b=?, c=?) [[]backlog[]]",
"depth=3 calls=?? *sample7args.py:* line for i in range(1): # three [[]backlog[]]",
"depth=3 calls=?? *sample7args.py:* line a = b = c[[]'side'[]] = 'effect' [[]backlog[]]",
"depth=3 calls=?? *sample7args.py:* line four() [[]backlog[]]",
"depth=3 calls=?? *sample7args.py:* call => four(a=?, b=?, c=?) [[]backlog[]]",
"depth=4 calls=?? *sample7args.py:* line for i in range(1): # four [[]backlog[]]",
"depth=4 calls=?? *sample7args.py:* line a = b = c[[]'side'[]] = 'effect' [[]backlog[]]",
"depth=4 calls=?? *sample7args.py:* line five() [[]backlog[]]",
"depth=4 calls=?? *sample7args.py:* call => five(a=?, b=?, c=?) [[]backlog[]]",
"depth=5 calls=?? *sample7args.py:* line six() [[]backlog[]]",
"depth=5 calls=?? *sample7args.py:* line six() [[]backlog[]]",
"depth=5 calls=?? *sample7args.py:* line six() [[]backlog[]]",
"depth=5 calls=?? *sample7args.py:* line a = b = c[[]'side'[]] = in_five = 'effect' [[]backlog[]]",
"depth=5 calls=?? *sample7args.py:* line for i in range(1): # five [[]backlog[]]",
"depth=5 calls=?? *sample7args.py:* line return i # five",
"depth=4 calls=?? *sample7args.py:* return <= five: 0",
])
def test_backlog_subprocess(LineMatcher):
output = subprocess.check_output(
[sys.executable, os.path.join(os.path.dirname(__file__), 'sample7args.py')],
stderr=subprocess.STDOUT,
universal_newlines=True,
)
import re
print(re.sub(r'([\[\]])', r'[\1]', output))
print(output)
lm = LineMatcher(output.splitlines())
lm.fnmatch_lines([
"depth=0 calls=0 *sample7args.py:4 call => one(a=123, b='234', c={'3': [[]4, '5'[]]}) [[]backlog[]]",
"depth=1 calls=1 *sample7args.py:5 line for i in range(1): # one [[]backlog[]]",
"depth=1 calls=1 *sample7args.py:6 line a = b = c[[]'side'[]] = 'effect' [[]backlog[]]",
"depth=1 calls=1 *sample7args.py:7 line two() [[]backlog[]]",
"depth=1 calls=1 *sample7args.py:10 call => two(a=123, b='234', c={'3': [[]4, '5'[]]}) [[]backlog[]]",
"depth=2 calls=2 *sample7args.py:11 line for i in range(1): # two [[]backlog[]]",
"depth=2 calls=2 *sample7args.py:12 line a = b = c[[]'side'[]] = 'effect' [[]backlog[]]",
"depth=2 calls=2 *sample7args.py:13 line three() [[]backlog[]]",
"depth=2 calls=2 *sample7args.py:16 call => three(a=123, b='234', c={'3': [[]4, '5'[]]}) [[]backlog[]]",
"depth=3 calls=3 *sample7args.py:17 line for i in range(1): # three [[]backlog[]]",
"depth=3 calls=3 *sample7args.py:18 line a = b = c[[]'side'[]] = 'effect' [[]backlog[]]",
"depth=3 calls=3 *sample7args.py:19 line four() [[]backlog[]]",
"depth=3 calls=3 *sample7args.py:22 call => four(a=123, b='234', c={'3': [[]4, '5'[]]}) [[]backlog[]]",
"depth=4 calls=4 *sample7args.py:23 line for i in range(1): # four [[]backlog[]]",
"depth=4 calls=4 *sample7args.py:24 line a = b = c[[]'side'[]] = 'effect' [[]backlog[]]",
"depth=4 calls=4 *sample7args.py:25 line five() [[]backlog[]]",
"depth=4 calls=4 *sample7args.py:28 call => five(a=123, b='234', c={'3': [[]4, '5'[]]})",
"depth=5 calls=5 *sample7args.py:29 line six()",
"depth=5 calls=6 *sample7args.py:30 line six()",
"depth=5 calls=7 *sample7args.py:31 line six()",
"depth=5 calls=8 *sample7args.py:32 line a = b = c[[]'side'[]] = in_five = 'effect'",
"depth=5 calls=8 *sample7args.py:33 line for i in range(1): # five",
"depth=5 calls=8 *sample7args.py:34 line return i # five",
"depth=4 calls=8 *sample7args.py:34 return <= five: 0",
"depth=0 calls=8 *sample7args.py:4 call => one(a=123, b='234', c={*'side': 'effect'*}) [[]backlog[]]",
"depth=1 calls=9 *sample7args.py:5 line for i in range(1): # one [[]backlog[]]",
"depth=1 calls=9 *sample7args.py:6 line a = b = c[[]'side'[]] = 'effect' [[]backlog[]]",
"depth=1 calls=9 *sample7args.py:7 line two() [[]backlog[]]",
"depth=1 calls=9 *sample7args.py:10 call => two(a=123, b='234', c={*'side': 'effect'*}) [[]backlog[]]",
"depth=2 calls=10 *sample7args.py:11 line for i in range(1): # two [[]backlog[]]",
"depth=2 calls=10 *sample7args.py:12 line a = b = c[[]'side'[]] = 'effect' [[]backlog[]]",
"depth=2 calls=10 *sample7args.py:13 line three() [[]backlog[]]",
"depth=2 calls=10 *sample7args.py:16 call => three(a=123, b='234', c={*'side': 'effect'*}) [[]backlog[]]",
"depth=3 calls=11 *sample7args.py:17 line for i in range(1): # three [[]backlog[]]",
"depth=3 calls=11 *sample7args.py:18 line a = b = c[[]'side'[]] = 'effect' [[]backlog[]]",
"depth=3 calls=11 *sample7args.py:19 line four() [[]backlog[]]",
"depth=3 calls=11 *sample7args.py:22 call => four(a=123, b='234', c={*'side': 'effect'*}) [[]backlog[]]",
"depth=4 calls=12 *sample7args.py:23 line for i in range(1): # four [[]backlog[]]",
"depth=4 calls=12 *sample7args.py:24 line a = b = c[[]'side'[]] = 'effect' [[]backlog[]]",
"depth=4 calls=12 *sample7args.py:25 line five() [[]backlog[]]",
"depth=4 calls=12 *sample7args.py:28 call => five(a=123, b='234', c={*'side': 'effect'*})",
"depth=5 calls=13 *sample7args.py:29 line six()",
"depth=5 calls=14 *sample7args.py:30 line six()",
"depth=5 calls=15 *sample7args.py:31 line six()",
"depth=5 calls=16 *sample7args.py:32 line a = b = c[[]'side'[]] = in_five = 'effect'",
"depth=5 calls=16 *sample7args.py:33 line for i in range(1): # five",
"depth=5 calls=16 *sample7args.py:34 line return i # five",
"depth=4 calls=16 *sample7args.py:34 return <= five: 0",
])
|
<gh_stars>1-10
import numpy as np
# import pdb
def PMTcm_defaultout(n_traces, n_windows):
default_output = dict(nPMThit=n_traces +
np.zeros([n_traces], dtype=np.int32),
iPMThit=1 +
np.arange(n_traces, dtype=np.int32),
PMT_t0_sec=np.nan +
np.zeros([n_traces], dtype=np.float64),
PMT_t0_frac=np.nan +
np.zeros([n_traces], dtype=np.float64),
PMT_t0_fastdaq=np.nan +
np.zeros([n_traces], dtype=np.float64),
PMT_coinc=-1 +
np.zeros([n_traces], dtype=np.int16),
PMT_baseline=np.nan +
np.zeros([n_traces], dtype=np.float64),
PMT_baserms=np.nan +
np.zeros([n_traces], dtype=np.float64),
PMT_sat=-1 +
np.zeros([n_traces], dtype=np.int8),
PMT_area=np.nan +
np.zeros([n_traces, n_windows], dtype=np.float64),
PMT_area_nobs=np.nan +
np.zeros([n_traces, n_windows], dtype=np.float64),
PMT_min=np.nan +
np.zeros([n_traces, n_windows], dtype=np.float64),
PMT_max=np.nan +
np.zeros([n_traces, n_windows], dtype=np.float64),
PMT_pulse_area=np.nan +
np.zeros([n_traces], dtype=np.float64),
PMT_pulse_height=np.nan +
np.zeros([n_traces], dtype=np.float64),
PMT_pulse_tstart=np.nan +
np.zeros([n_traces], dtype=np.float64),
PMT_pulse_tend=np.nan +
np.zeros([n_traces], dtype=np.float64),
PMT_pulse_tpeak=np.nan +
np.zeros([n_traces], dtype=np.float64),
PMT_pulse_t10=np.nan +
np.zeros([n_traces], dtype=np.float64),
PMT_pulse_t90=np.nan +
np.zeros([n_traces], dtype=np.float64),
PMT_nimtrig_ton=np.nan +
np.zeros([n_traces], dtype=np.float64),
PMT_nimtrig_toff=np.nan +
np.zeros([n_traces], dtype=np.float64),
PMT_ttltrig_ton=np.nan +
np.zeros([n_traces], dtype=np.float64),
PMT_ttltrig_toff=np.nan +
np.zeros([n_traces], dtype=np.float64),
pmt_nphe=np.nan +
np.zeros([n_traces], dtype=np.float64),
pmt_npeaks=-1 +
np.zeros([n_traces], dtype=np.int32),
pmt_nclusters=-1 +
np.zeros([n_traces], dtype=np.int32),
pmt_maxpeak=np.nan +
np.zeros([n_traces], dtype=np.float64),
)
return default_output
def PMTcm(ev,
t_windows=np.float64([[100, 200], [0, 99]]),
bssup_thresh=np.float64(0.002),
base_samples=np.intp(80),
pmtfda={'PMT_trigt0_sec': np.float64([-1]),
'PMT_trigt0_frac': np.float64([-1])},
phe_width=np.float64(1.8e-9),
phe_amp=np.float64(17e-3),
phecount_thresh=np.float64(10),
single_pe_thresh=np.float64(6e-3),
single_pe_max=np.float64(2e-2),
stop_hunting_thresh=np.float64(4e-3),
breakout_thresh=np.float64(4e-3),
isopeak_dt=np.float64(2e-8),
max_tracecount=np.intp(1e5)
):
n_windows = t_windows.shape[0]
default_output = PMTcm_defaultout(1, n_windows)
default_output['nPMThit'][0] = -1
default_output['iPMThit'][0] = -1
try:
n_windows = t_windows.shape[0]
# *** First check that we have data ***
if not (ev['PMTtraces']['loaded'] and
(ev['PMTtraces']['t0_frac'].shape[0] > 0)
):
default_output = PMTcm_defaultout(1, n_windows)
default_output['nPMThit'][0] = -1
default_output['iPMThit'][0] = -1
return default_output
# *** No pre-allocate the output dictionary
n_traces = ev['PMTtraces']['traces'].shape[0]
firsttrace = 0
if n_traces > max_tracecount:
firsttrace = n_traces - max_tracecount
n_traces = max_tracecount
out = PMTcm_defaultout(n_traces, n_windows)
# *** Now fill out the timing outputs
out['PMT_t0_sec'] = ev['PMTtraces']['t0_sec'][firsttrace:, 0]
out['PMT_t0_frac'] = ev['PMTtraces']['t0_frac'][firsttrace:, 0]
if ev['fastDAQ']['loaded'] and\
('PMT_trigt0_sec' in pmtfda) and\
('PMT_trigt0_frac' in pmtfda) and\
(pmtfda['PMT_trigt0_frac'] >= 0):
rel_t0_sec = ev['PMTtraces']['t0_sec'][firsttrace:, 0] -\
pmtfda['PMT_trigt0_sec']
rel_t0_frac = ev['PMTtraces']['t0_frac'][firsttrace:, 0] -\
pmtfda['PMT_trigt0_frac']
rel_t0 = rel_t0_sec + rel_t0_frac
out['PMT_t0_fastdaq'] = rel_t0
fastdaq_ix = np.intp(np.round((rel_t0 -
ev['fastDAQ']['time'][0]) /
ev['fastDAQ']['caldata']['dt']))
in_fastdaq = (fastdaq_ix >= 0) *\
(fastdaq_ix < ev['fastDAQ']['time'].shape[0])
out['PMT_coinc'][in_fastdaq] = 0
if 'VetoCoinc' in ev['fastDAQ']:
veto_coinc = ev['fastDAQ']['VetoCoinc'] > 1.5
veto_coinc[:-1] = veto_coinc[:-1] + veto_coinc[1:]
veto_coinc[1:] = veto_coinc[:-1] + veto_coinc[1:]
out['PMT_coinc'][in_fastdaq] = out['PMT_coinc'][in_fastdaq] +\
np.int16(veto_coinc[fastdaq_ix[in_fastdaq]])
if 'CAMgate' in ev['fastDAQ']:
led_coinc = ev['fastDAQ']['CAMgate'] < -0.5 # LED is NIM trigger
out['PMT_coinc'][in_fastdaq] = out['PMT_coinc'][in_fastdaq] +\
2 * np.int16(led_coinc[fastdaq_ix[in_fastdaq]])
# Pretty soon we'll enter the trace loop, so pre-calculate some things here
dt = ev['PMTtraces']['dt'][0, 0]
trig_t0 = 0
gauss_xd = np.arange(-6, 6.001, 0.1, dtype=np.float64)
center_ix = np.nonzero(np.abs(gauss_xd) < 1e-3)[0][0]
phe_area = phe_width * phe_amp * np.sqrt(2 * np.pi)
phe_width = phe_width / dt
isopeak_dt = isopeak_dt / dt
gauss_yd = np.exp(-np.square(gauss_xd) / (2 * phe_width * phe_width))
gauss_ydconv = np.convolve(gauss_yd, gauss_yd)
gauss_convnorm = np.max(gauss_ydconv)
# Entering the trace loop
for i_t in range(n_traces):
# First order of business: scale and merge the traces
i_t_ev = i_t + firsttrace
ls = ev['PMTtraces']['lost_samples'][i_t_ev, 0]
if ls <= base_samples: # skip bad trace
continue
pmtV = ev['PMTtraces']['traces'][i_t_ev, 0, :ls] *\
ev['PMTtraces']['v_scale'][i_t_ev, 0] +\
ev['PMTtraces']['v_offset'][i_t_ev, 0]
coarse_data = ev['PMTtraces']['traces'][i_t_ev, 1, :ls] *\
ev['PMTtraces']['v_scale'][i_t_ev, 1] +\
ev['PMTtraces']['v_offset'][i_t_ev, 1]
satBool = (ev['PMTtraces']['traces'][i_t_ev, 0, :ls] <= -127) +\
(ev['PMTtraces']['traces'][i_t_ev, 0, :ls] >= 126)
np.copyto(pmtV, coarse_data, casting='same_kind', where=satBool)
out['PMT_sat'][i_t] = np.int8(np.any(satBool))
# Next, find the baseline and do subtraction/suppression
this_base_samples = np.min([base_samples, np.intp(ls * .5)])
bs = np.mean(pmtV[:this_base_samples])
bs_rms = np.sqrt(np.var(pmtV[:this_base_samples]))
out['PMT_baseline'][i_t] = bs
out['PMT_baserms'][i_t] = bs_rms
pmtV_bsub = pmtV - bs
notbs_samples = abs(pmtV_bsub) >= bssup_thresh
notbs_samples[1:] = notbs_samples[1:] + notbs_samples[:-1]
notbs_samples[:-1] = notbs_samples[:-1] + notbs_samples[1:]
pmtV_bsup = np.copy(pmtV_bsub)
pmtV_bsup[~notbs_samples] = 0
# Windowed calculations next -- finding areas and peaks by window
ix_windows = np.intp(np.round((t_windows * 1e-9 - trig_t0) / dt))
ix_windows[ix_windows < 0] = 0
ix_windows[ix_windows > pmtV_bsub.shape[0]] = pmtV_bsub.shape[0]
for i_w in range(n_windows):
out['PMT_area'][i_t, i_w] = dt *\
np.sum(pmtV_bsup[ix_windows[i_w, 0]:ix_windows[i_w, 1]])
out['PMT_area_nobs'][i_t, i_w] = dt *\
np.sum(pmtV_bsub[ix_windows[i_w, 0]:ix_windows[i_w, 1]])
out['PMT_max'][i_t, i_w] = \
np.max(pmtV_bsub[ix_windows[i_w, 0]:ix_windows[i_w, 1]])
out['PMT_min'][i_t, i_w] = \
np.min(pmtV_bsub[ix_windows[i_w, 0]:ix_windows[i_w, 1]])
# Next is the single peak analysis, based on suppression threshold
peak_ix = np.argmin(pmtV_bsub)
peak_ix_start = np.nonzero(~notbs_samples[:peak_ix])[0]
if peak_ix_start.shape[0] > 0:
peak_ix_start = peak_ix_start[-1] + 1
else:
peak_ix_start = 0
peak_ix_end = np.nonzero(~notbs_samples[peak_ix:])[0]
if peak_ix_end.shape[0] > 0:
peak_ix_end = peak_ix_end[0] + peak_ix
else:
peak_ix_end = pmtV_bsub.shape[0]
out['PMT_pulse_area'][i_t] = dt *\
np.sum(pmtV_bsub[peak_ix_start:peak_ix_end])
out['PMT_pulse_height'][i_t] = -pmtV_bsub[peak_ix]
out['PMT_pulse_tstart'][i_t] = trig_t0 + dt * peak_ix_start
out['PMT_pulse_tend'][i_t] = trig_t0 + dt * (peak_ix_end - 1)
out['PMT_pulse_tpeak'][i_t] = trig_t0 + dt * peak_ix
pulse_frac = np.cumsum(pmtV_bsub[peak_ix_start:peak_ix_end]) /\
np.max([out['PMT_pulse_area'][i_t], 1e-20])
t10_ix = np.nonzero(pulse_frac >= 0.1)[0]
t90_ix = np.nonzero(pulse_frac <= 0.9)[0]
if t10_ix.shape[0] > 0:
out['PMT_pulse_t10'][i_t] = trig_t0 +\
dt * (peak_ix_start + t10_ix[0])
else:
out['PMT_pulse_t10'][i_t] = out['PMT_pulse_tpeak'][i_t]
if t90_ix.shape[0] > 0:
out['PMT_pulse_t90'][i_t] = trig_t0 +\
dt * (peak_ix_start + t90_ix[-1])
else:
out['PMT_pulse_t90'][i_t] = out['PMT_pulse_tpeak'][i_t]
# Now we do some bookkeeping for the coarse trace, in case this is LED
nimtrig = np.nonzero(coarse_data < -0.5)[0]
ttltrig = np.nonzero(coarse_data > 1.5)[0]
if nimtrig.shape[0] > 0:
out['PMT_nimtrig_ton'][i_t] = trig_t0 + dt * nimtrig[0]
out['PMT_nimtrig_toff'][i_t] = trig_t0 + dt * nimtrig[-1]
if ttltrig.shape[0] > 0:
out['PMT_ttltrig_ton'][i_t] = trig_t0 + dt * ttltrig[0]
out['PMT_ttltrig_toff'][i_t] = trig_t0 + dt * ttltrig[-1]
# And now, phe counting *** THIS MUST BE LAST IN THE LOOP!!! ***
if np.abs(out['PMT_area'][i_t, 0]) > (phecount_thresh * phe_area):
out['pmt_nphe'][i_t] = -out['PMT_area'][i_t, 0] / phe_area
out['pmt_maxpeak'][i_t] = out['pmt_nphe'][i_t]
continue
old_xd = np.arange(1, pmtV_bsub.shape[0] + 1e-3, 1,
dtype=np.float64)
new_xd = np.arange(old_xd[ix_windows[0, 0]],
old_xd[ix_windows[0, 1]], .1,
dtype=np.float64)
newtrace = -np.interp(new_xd, old_xd, pmtV_bsub)
thistrace = np.copy(newtrace)
thisconv = np.convolve(thistrace, gauss_yd)
thispeak = np.zeros(thistrace.shape, dtype=np.float64)
thispeakconv = np.zeros(thisconv.shape, dtype=np.float64)
p_amp_list = []
p_ix_list = []
while np.max(thistrace) > stop_hunting_thresh:
p_ix = np.argmax(thisconv)
p_amp = thisconv[p_ix]
p_amp = p_amp / gauss_convnorm
p_ix = p_ix - center_ix
if p_amp < breakout_thresh or \
p_ix < center_ix or \
p_ix > (thistrace.shape[0] - center_ix - 1):
break
alt_traceslice = thistrace[(p_ix - center_ix):
(p_ix - center_ix + gauss_yd.shape[0])]
alternate_p_ix = np.argmax(alt_traceslice)
alternate_max_p_amp = alt_traceslice[alternate_p_ix] * \
np.exp(0.125 / phe_width)
alternate_p_ix = alternate_p_ix + p_ix - center_ix
if p_amp > alternate_max_p_amp:
p_amp = alternate_max_p_amp
p_ix = alternate_p_ix
p_amp_list.append(p_amp)
p_ix_list.append(p_ix)
thispeak[:] = 0
thispeak_start = p_ix - center_ix
thispeak_end = p_ix + center_ix + 1
if thispeak_start < 0:
thispeak[:thispeak_end] = p_amp *\
gauss_yd[-thispeak_start:]
elif thispeak_end > thispeak.shape[0]:
thispeak[thispeak_start:] = p_amp *\
gauss_yd[:(thispeak.shape[0] - thispeak_end)]
else:
thispeak[thispeak_start:thispeak_end] = p_amp *\
gauss_yd
thistrace = thistrace - thispeak
thispeakconv[:] = 0
thispeakconv_start = p_ix - center_ix
thispeakconv_end = p_ix + 3 * center_ix + 1
if thispeakconv_start < 0:
thispeakconv[:thispeakconv_end] = p_amp *\
gauss_ydconv[-thispeakconv_start:]
elif thispeakconv_end > thispeakconv.shape[0]:
thispeakconv[thispeakconv_start:] = p_amp *\
gauss_ydconv[:(thispeakconv.shape[0] - thispeakconv_end)]
else:
thispeakconv[thispeakconv_start:thispeakconv_end] = p_amp *\
gauss_ydconv
thisconv = thisconv - thispeakconv
if len(p_amp_list) == 0:
out['pmt_nphe'][i_t] = 0
out['pmt_npeaks'][i_t] = 0
out['pmt_nclusters'][i_t] = 0
out['pmt_maxpeak'][i_t] = 0
continue
p_amp_list = np.float64(p_amp_list)
p_ix_list = np.intp(p_ix_list)
out['pmt_maxpeak'][i_t] = p_amp_list[0] / phe_amp
out['pmt_npeaks'][i_t] = p_amp_list.shape[0]
timeorder = np.argsort(p_ix_list)
p_ix_list = p_ix_list[timeorder]
p_amp_list = p_amp_list[timeorder]
peak_tdiff = np.concatenate((np.float64([np.inf]),
np.diff(p_ix_list),
np.float64([np.inf])))
peak_starts = np.nonzero(peak_tdiff[:-1] > 10 * isopeak_dt)[0]
peak_ends = np.nonzero(peak_tdiff[1:] > 10 * isopeak_dt)[0]
cumsum_amps = np.zeros(peak_tdiff.shape[0], dtype=np.float64)
cumsum_amps[1:] = np.cumsum(p_amp_list)
cumsum_peaks = np.zeros(peak_starts.shape[0] + 1, dtype=np.float64)
cumsum_peaks[:-1] = cumsum_amps[peak_starts]
cumsum_peaks[-1] = cumsum_amps[-1]
peak_amps = np.diff(cumsum_peaks)
out['pmt_nclusters'][i_t] = peak_amps.shape[0]
peak_ix_start = p_ix_list[peak_starts]
peak_ix_end = p_ix_list[peak_ends]
singlephe = (peak_starts == peak_ends) * \
(peak_amps < single_pe_max) * \
((np.abs(peak_ix_start - 1050) < 100) +
(peak_amps > single_pe_thresh))
phe_count = peak_amps / phe_amp
phe_count[singlephe] = 1
out['pmt_nphe'][i_t] = np.sum(phe_count)
# pdb.set_trace()
return out
except:
return default_output
|
<gh_stars>10-100
from jabber.objects.x_event import X_Event, X_EVENT_NS
from jabber.objects.chatstates import ChatState, CHATSTATES_NS
from common.Conversation import Conversation
from pyxmpp.message import Message
from logging import getLogger
from jabber import JabberBuddy
from util import callsback
from pyxmpp.utils import from_utf8
from jabber.objects.x_delay import X_DELAY_NS, X_Delay
import libxml2
from common import pref
from util.primitives.fmtstr import fmtstr
log = getLogger('jabber.JabberConversation')
XHTML_IM_NS = 'http://jabber.org/protocol/xhtml-im'
XHTML_NS = 'http://www.w3.org/1999/xhtml'
xdata_namespaces = {'xhtmlim': XHTML_IM_NS,
'xhtml' : XHTML_NS}
typing_chatstates = dict(typing = 'composing',
typed = 'paused')
class JabberConversation(Conversation):
# def __init__(self, protocol, buddy, jid_to, jid_from,thread=None):
def __init__(self, protocol, buddy, jid_to, thread=None):
'''
@param protocol: a JabberProtocol Instance
@param buddy: the buddy you are talking to
@param jid_to: the jid you are talking to
@param jid_from: the jid you are talking from
@param thread: the thread id, if any (not yet used)
'''
if not isinstance(buddy, JabberBuddy.JabberBuddy):
raise TypeError
Conversation.__init__(self, protocol)
self.buddy_to = buddy
self.jid_to = jid_to
# self.jid_from = jid_from
self.buddies = protocol.buddies
self.thread = thread
self.name = buddy.alias
self.reset_chat_states()
ischat = False
@property
def self_buddy(self):
return self.protocol.self_buddy
@property
def buddy(self): return self.buddy_to
def reset_chat_states(self):
self.chat_states_allowed = None
self.x_events_allowed = None
@callsback
def _send_message(self, message, auto = False, callback=None, **opts):
assert isinstance(message, fmtstr)
if self.jid_to not in self.buddy.resources:
self.reset_chat_states()
self.jid_to = self.buddy.jid
# PyXMPP will escape the message for us...
m = Message(stanza_type = 'chat', to_jid = self.jid_to, body = message.format_as('plaintext'))
#message = unicode(message.encode('xml'))
#assert isinstance(message, unicode)
append_formatted_html(m, message)
if pref('privacy.send_typing_notifications', False):
ChatState('active').as_xml(m.xmlnode)
X_Event(composing = True).as_xml(m.xmlnode)
try:
self.protocol.send_message(m)
except Exception, e:
callback.error(e)
else:
callback.success()
#self.sent_message(message.replace('\n', '<br />'), format)
def send_typing_status(self, status):
if not any((self.x_events_allowed, self.chat_states_allowed)):
return
m = Message(to_jid = self.jid_to, stanza_type='chat')
node = m.xmlnode
if self.x_events_allowed:
X_Event(composing = (status == 'typing')).as_xml(node)
if self.chat_states_allowed:
ChatState(typing_chatstates.get(status, 'active')).as_xml(node)
self.protocol.send_message(m)
def buddy_join(self, buddy):
if buddy not in self.room_list:
self.room_list.append(buddy)
self.typing_status[buddy] = None
def incoming_message(self, buddy, message):
from_jid = message.get_from()
if from_jid != self.jid_to:
self.reset_chat_states()
self.jid_to = from_jid
#self.message = message
body = get_message_body(message)
if body:
stamp = get_message_timestamp(message)
if stamp:
did_receive = self.received_message(buddy, body, timestamp = stamp, offline = True, content_type = 'text/html')
else:
did_receive = self.received_message(buddy, body, content_type = 'text/html')
if did_receive:
Conversation.incoming_message(self)
chatstate = self.get_message_chatstate(message, body)
if chatstate is False:
chatstate = None
if pref('jabber.system_message.show_gone', type=bool, default=False):
self.system_message(_('{name} has left the conversation.').format(name=from_jid))
self.typing_status[buddy] = chatstate
def get_message_chatstate(self, message, body):
'Returns "typing", "typed", or None for a <message> stanza.'
retval = None
xevents = message.xpath_eval(u"jxe:x",{'jxe':X_EVENT_NS})
chatstates = message.xpath_eval('cs:*', {'cs': CHATSTATES_NS})
if chatstates:
self.chat_states_allowed = True
chatstate = ChatState(chatstates[0]).xml_element_name
retval = {'composing':'typing',
'paused' :'typed',
'gone' : False, #left
'inactive' : None, #not typing or typed, not nearby?
'active' : None, #not typing or typed, nearby?
}.get(chatstate)
if xevents:
found_composing = X_Event(xevents[0]).composing
if found_composing:
self.x_events_allowed = True
if not chatstates:
retval = 'typing' if found_composing and not body else None
return retval
@property
def id(self):
# return (self.buddy_to, self.jid_to, self.jid_from, self.thread)
return (self.buddy_to,)
def exit(self):
if self.chat_states_allowed and pref('privacy.send_typing_notifications', False):
m = Message(to_jid = self.jid_to, stanza_type='chat')
node = m.xmlnode
ChatState('gone').as_xml(node)
self.protocol.send_message(m)
self.protocol.conversations.pop(self.id, None)
Conversation.exit(self)
def append_formatted_html(message_tag, message):
'''
Inserts an <html> node with formatted XHTML into a message tag.
message_tag a <message> stanza
message the message (a fmtstr object)
After this method completes, the message stanza has an additional <html> child node.
Also returns the <span> as a string.
'''
html = message_tag.xmlnode.newChild(None, 'html', None)
xhtml_ns = html.newNs(XHTML_IM_NS, None)
span_text = message.format_as('xhtml')
body_text = '<body xmlns="%s">%s</body>' % (XHTML_NS, span_text)
try:
message_doc = libxml2.parseDoc(body_text.encode('utf-8'))
except Exception:
import traceback;traceback.print_exc()
print 'This text failed: %r' % body_text
raise
message_node = message_doc.get_children()
message_node_copy = message_node.docCopyNode(message_tag.xmlnode.doc, 1)
html.addChild(message_node_copy)
message_doc.freeDoc()
return span_text
#TODO: do I need to unlink or free nodes here?
def get_message_timestamp(message):
'Returns a timestamp for a <message> stanza, or None.'
xdelays = message.xpath_eval(u"jxd:x",{'jxd':X_DELAY_NS})
if xdelays:
delay = X_Delay(xdelays[0])
if delay.timestamp is not None:
return delay.timestamp
def get_message_body(message):
'Returns the unicode message body from a <message> stanza.'
jid = message.get_from()
xdata = message.xpath_eval(u"xhtmlim:html/xhtml:body[1]/node()", xdata_namespaces)
if xdata:
# XHTML formatted message
# TODO: Strip namespaces
body = from_utf8(''.join(child.serialize() for child in xdata))
else:
# Old style <body> message
body = message.get_body()
body = unicode(body.encode('xml')) if body else None
if body is not None:
body = body.replace('\n', '<br />')
return body
|
#!/usr/bin/env python
import numpy as np
import pandas as pd
import click as ck
from sklearn.metrics import classification_report
from sklearn.metrics.pairwise import cosine_similarity
import sys
from collections import deque
import time
import logging
from sklearn.metrics import roc_curve, auc, matthews_corrcoef
from scipy.spatial import distance
from scipy import sparse
import math
from utils import FUNC_DICT, Ontology, NAMESPACES
from matplotlib import pyplot as plt
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
@ck.command()
@ck.option(
'--data-root', '-dr', default='data',
help='Prediction model')
@ck.option(
'--ont', '-ont', default='mf',
help='Prediction model')
@ck.option(
'--model', '-m', default='deepgozero_blast',
help='Prediction model')
@ck.option(
'--combine', '-c', is_flag=True,
help='Prediction model')
def main(data_root, ont, model, combine):
train_data_file = f'{data_root}/{ont}/train_data.pkl'
valid_data_file = f'{data_root}/{ont}/valid_data.pkl'
test_data_file = f'{data_root}/{ont}/predictions_{model}.pkl'
terms_file = f'{data_root}/{ont}/terms.pkl'
go_rels = Ontology(f'{data_root}/go.obo', with_rels=True)
terms_df = pd.read_pickle(terms_file)
terms = terms_df['gos'].values.flatten()
terms_dict = {v: i for i, v in enumerate(terms)}
train_df = pd.read_pickle(train_data_file)
valid_df = pd.read_pickle(valid_data_file)
train_df = pd.concat([train_df, valid_df])
test_df = pd.read_pickle(test_data_file)
annotations = train_df['prop_annotations'].values
annotations = list(map(lambda x: set(x), annotations))
test_annotations = test_df['prop_annotations'].values
test_annotations = list(map(lambda x: set(x), test_annotations))
go_rels.calculate_ic(annotations + test_annotations)
# Print IC values of terms
ics = {}
for term in terms:
ics[term] = go_rels.get_ic(term)
# Combine scores for diamond and deepgo
alpha = 0.5
eval_preds = []
for i, row in enumerate(test_df.itertuples()):
if combine:
preds = row.blast_preds * alpha + row.preds * (1 - alpha)
else:
preds = row.preds
eval_preds.append(preds)
print('Computing Fmax')
fmax = 0.0
tmax = 0.0
wfmax = 0.0
wtmax = 0.0
avgic = 0.0
precisions = []
recalls = []
smin = 1000000.0
rus = []
mis = []
go_set = go_rels.get_namespace_terms(NAMESPACES[ont])
go_set.remove(FUNC_DICT[ont])
labels = test_df['prop_annotations'].values
labels = list(map(lambda x: set(filter(lambda y: y in go_set, x)), labels))
for t in range(0, 101):
threshold = t / 100.0
preds = []
for i, row in enumerate(test_df.itertuples()):
annots = set()
for j, go_id in enumerate(terms):
if eval_preds[i][j] >= threshold:
annots.add(go_id)
if t == 0:
preds.append(annots)
continue
# new_annots = set()
# for go_id in annots:
# new_annots |= go_rels.get_anchestors(go_id)
preds.append(annots)
# Filter classes
preds = list(map(lambda x: set(filter(lambda y: y in go_set, x)), preds))
fscore, prec, rec, s, ru, mi, fps, fns, avg_ic, wf = evaluate_annotations(go_rels, labels, preds)
print(f'AVG IC {avg_ic:.3f}')
precisions.append(prec)
recalls.append(rec)
print(f'Fscore: {fscore}, Precision: {prec}, Recall: {rec} S: {s}, RU: {ru}, MI: {mi} threshold: {threshold}, WFmax: {wf}')
if fmax < fscore:
fmax = fscore
tmax = threshold
avgic = avg_ic
if wfmax < wf:
wfmax = wf
wtmax = threshold
if smin > s:
smin = s
if combine:
model += '_diam'
print(model, ont)
print(f'Fmax: {fmax:0.3f}, Smin: {smin:0.3f}, threshold: {tmax}')
print(f'WFmax: {wfmax:0.3f}, threshold: {wtmax}')
precisions = np.array(precisions)
recalls = np.array(recalls)
sorted_index = np.argsort(recalls)
recalls = recalls[sorted_index]
precisions = precisions[sorted_index]
aupr = np.trapz(precisions, recalls)
print(f'AUPR: {aupr:0.3f}')
print(f'AVGIC: {avgic:0.3f}')
plt.figure()
lw = 2
plt.plot(recalls, precisions, color='darkorange',
lw=lw, label=f'AUPR curve (area = {aupr:0.2f})')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title('Area Under the Precision-Recall curve')
plt.legend(loc="lower right")
plt.savefig(f'{data_root}/{ont}/aupr_{model}.pdf')
df = pd.DataFrame({'precisions': precisions, 'recalls': recalls})
df.to_pickle(f'{data_root}/{ont}/pr_{model}.pkl')
def compute_roc(labels, preds):
# Compute ROC curve and ROC area for each class
fpr, tpr, _ = roc_curve(labels.flatten(), preds.flatten())
roc_auc = auc(fpr, tpr)
return roc_auc
def compute_mcc(labels, preds):
# Compute ROC curve and ROC area for each class
mcc = matthews_corrcoef(labels.flatten(), preds.flatten())
return mcc
def evaluate_annotations(go, real_annots, pred_annots):
total = 0
p = 0.0
r = 0.0
wp = 0.0
wr = 0.0
p_total= 0
ru = 0.0
mi = 0.0
avg_ic = 0.0
fps = []
fns = []
for i in range(len(real_annots)):
if len(real_annots[i]) == 0:
continue
tp = set(real_annots[i]).intersection(set(pred_annots[i]))
fp = pred_annots[i] - tp
fn = real_annots[i] - tp
tpic = 0.0
for go_id in tp:
tpic += go.get_norm_ic(go_id)
avg_ic += go.get_ic(go_id)
fpic = 0.0
for go_id in fp:
fpic += go.get_norm_ic(go_id)
mi += go.get_ic(go_id)
fnic = 0.0
for go_id in fn:
fnic += go.get_norm_ic(go_id)
ru += go.get_ic(go_id)
fps.append(fp)
fns.append(fn)
tpn = len(tp)
fpn = len(fp)
fnn = len(fn)
total += 1
recall = tpn / (1.0 * (tpn + fnn))
r += recall
wrecall = tpic / (tpic + fnic)
wr += wrecall
if len(pred_annots[i]) > 0:
p_total += 1
precision = tpn / (1.0 * (tpn + fpn))
p += precision
wp += tpic / (tpic + fpic)
avg_ic = (avg_ic + mi) / total
ru /= total
mi /= total
r /= total
wr /= total
if p_total > 0:
p /= p_total
wp /= p_total
f = 0.0
wf = 0.0
if p + r > 0:
f = 2 * p * r / (p + r)
wf = 2 * wp * wr / (wp + wr)
s = math.sqrt(ru * ru + mi * mi)
return f, p, r, s, ru, mi, fps, fns, avg_ic, wf
if __name__ == '__main__':
main()
|
from flask import request, make_response, send_from_directory
from functools import wraps
import jwt
import datetime
import os
from app import app, db
from app.models import JobPosts, JobPostsReqs, Talent
# from app.get_details import get_details
from app.utils import hash_file
from app.model_rank import model_rank
def login_required(f):
@wraps(f)
def decorated(*args, **kwargs):
token = None
if 'x-access-token' in request.headers:
token = request.headers['x-access-token']
if not token:
return {'message': 'missing token'}, 401
try:
# for PyJWT ver 2.1.0, decode needs algorithms parameter passed in
# https://stackoverflow.com/questions/65451144/ibm-text-to-speech-python-decodeerror
data = jwt.decode(token, app.config['SECRET_KEY'], algorithms=['HS256'])
except:
return {'message': 'invalid token'}, 401
return f(*args, **kwargs)
return decorated
@app.route('/')
def root():
return 'hello world!'
@app.route('/login', methods=['POST'])
def login():
username = request.form['username']
password = request.form['password']
# auth = request.authorization
if username and password and username == 'admin' and password == '<PASSWORD>':
token = jwt.encode({
'user': username,
'exp': datetime.datetime.utcnow() + datetime.timedelta(hours=24)
}, app.config['SECRET_KEY'])
return {'token': token}
return {'message': 'unable to login'}, 401
# return make_response('unauthorized access', 401,
# {'WWW-Authenticate': 'Basic Realm="Login Required"'})
@app.route('/dashboard')
@login_required
def dashboard():
res = {'jobposts': []}
for i in JobPosts.query.all():
res['jobposts'].append({
'id': i.id,
'title': i.title,
'uri': i.uri,
'applicants': i.applicants
})
return res
@app.route('/jobpost', methods=['GET', 'POST'])
@login_required
def jobpost():
# handle post req
if request.method == 'POST':
data = request.form
# add job post to db
new_jobpost = JobPosts(title=data['title'], tag=data['tag'], uri=data['uri'], applicants=0)
db.session.add(new_jobpost)
db.session.commit()
# add requirements to db
requirements = data['requirements']
id = JobPosts.query.order_by(JobPosts.id.desc()).first().id
for i in requirements.split(','):
new_jobpostreq = JobPostsReqs(job_post_id=id, skill=i)
db.session.add(new_jobpostreq)
db.session.commit()
return {'message': 'job post submitted'}
# handle get request
response = {'job_posts': []}
for i in JobPosts.query.all():
response['job_posts'].append({
'title': i.title,
'uri': i.uri,
'tag': i.tag,
'applicants': i.applicants
})
return response
@app.route('/jobpost/<id>', methods=['DELETE'])
@login_required
def jobpost_id(id):
job_post_id = JobPosts.query.filter_by(uri=id).first().id
job_posts = JobPosts.query.filter_by(uri=id).delete()
job_posts_reqs = JobPostsReqs.query.filter_by(job_post_id=job_post_id).delete()
talents = Talent.query.filter_by(job_post_id=job_post_id).delete()
db.session.commit()
return {'message': 'job post successfully deleted'}
@app.route('/results/<id>')
@login_required
def results(id):
res = {'results': []}
job_post_id = JobPosts.query.filter_by(uri=id).first().id
requirements = [i.skill for i in JobPostsReqs.query.filter_by(job_post_id=job_post_id).all()]
for i in Talent.query.filter_by(job_post_id=job_post_id).all():
filename = os.path.join(app.config['UPLOAD_PATH'], i.file_hash + '.pdf')
score = model_rank(filename, requirements)
res['results'].append({
'name': i.name,
'email': i.email,
'phone': i.phone,
'filehash': i.file_hash,
'score': score
})
return res
@app.route('/talent/<id>', methods=['POST'])
@login_required
def talent(id):
print('WOI ANJING', request.files, request.form)
upload = request.files['file']
filename = upload.filename
if filename != '':
if filename.split('.')[-1] not in app.config['VALID_FILE_EXT']:
return {'message': 'invalid file extension'}, 401
# hash file and save file as ../UPLOAD_PATH/hash.pdf
hashed = hash_file(upload)
upload.seek(0) # move filepointer to start after read()
new_filename = os.path.join(app.config['UPLOAD_PATH'], hashed + '.pdf')
upload.save(new_filename)
job_post = JobPosts.query.filter_by(uri=id).first()
job_post_id = job_post.id
# increment applicants in database
job_post.applicants += 1
db.session.commit()
# add talent to db
# details = get_details(new_filename)
new_talent = Talent(
name=request.form['name'],
email=request.form['email'],
phone=request.form['phone'],
file_hash=hashed,
job_post_id=job_post_id
)
db.session.add(new_talent)
db.session.commit()
return {'message': 'file upload succesful'}
return {'message': 'file upload failed'}, 401
@app.route('/uploads/<filehash>')
def uploads(filehash):
return send_from_directory('../' + app.config['UPLOAD_PATH'], filehash + '.pdf', as_attachment=True)
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import sys
import array
import compas
from compas.utilities import flatten
try:
from compas.numerical.alglib.core import Array
from compas.numerical.alglib.core import Zeros
from compas.numerical.alglib.core import xalglib
except ImportError:
compas.raise_if_ironpython()
__all__ = ['SparseArray', 'SparseDiagonal']
class SparseArrayError(Exception):
pass
class SparseArray(object):
dtypes = {
'f' : float,
'float': float,
'i' : int,
'int' : int
}
def __init__(self, ijk, shape, dtype='f'):
self.__rows = None
self.__cols = None
self.__data = None
self._data = None
self._shape = None
self._dtype = None
self.shape = shape
self.dtype = dtype
self.data = ijk
# ==========================================================================
# descriptors
# ==========================================================================
@property
def data(self):
return self._data
@data.setter
def data(self, ijk):
m, n = self.shape
dtype = SparseArray.dtypes[self.dtype]
rows, cols, data = ijk
self.__rows = rows
self.__cols = cols
self.__data = data
self._data = {i: {} for i in rows}
for i, j, k in zip(rows, cols, data):
if i >= m or j >= n:
raise SparseArrayError('Data not compatible with shape.')
self._data[i][j] = dtype(k)
@property
def matrix(self):
m, n = self.shape
M = xalglib.sparsecreate(m, n, len(self.__data))
for i in self._data:
for j in self._data[i]:
k = self._data[i][j]
xalglib.sparseset(M, i, j, k)
xalglib.sparseconverttocrs(M)
return M
@property
def dtype(self):
return self._dtype
@dtype.setter
def dtype(self, dtype):
if dtype not in SparseArray.dtypes:
raise TypeError
self._dtype = dtype
@property
def shape(self):
return self._shape
@shape.setter
def shape(self, shape):
m, n = shape
self._shape = m, n
# ==========================================================================
# customisation
# ==========================================================================
def __str__(self):
return str(self._data)
def __getitem__(self, key):
m, n = self.shape
if isinstance(key, int):
row = []
if key >= m:
raise KeyError
for j in range(n):
if key in self._data and j in self._data[key]:
row.append(self._data[key][j])
else:
row.append(0.0)
return row
if isinstance(key, list):
keys = key
rows, cols, data = [], [], []
for i, key in enumerate(keys):
if key >= m:
raise KeyError
if key in self._data:
for j in self._data[key]:
rows.append(i)
cols.append(j)
data.append(self._data[key][j])
return SparseArray((rows, cols, data), (len(keys), n))
if isinstance(key, slice):
keys = range(* key.indices(m))
return self[keys]
if isinstance(key, tuple):
i, j = key
i_int = isinstance(i, int)
j_int = isinstance(j, int)
if i_int and j_int:
if i in self._data and j in self._data[i]:
return self._data[i][j]
return 0.0
if i_int:
return self[i][j]
if j_int:
return self[i].transpose()[j]
return self[i].transpose()[j].transpose()
def __setitem__(self, key, value):
raise NotImplementedError
# ==========================================================================
# conversions
# ==========================================================================
def to_dense(self):
m, n = self.shape
data = []
for i in range(m):
for j in range(n):
if i in self._data and j in self._data[i]:
data.append(self._data[i][j])
else:
data.append(0.0)
return Array(data, (m, n))
def to_csc(self):
pass
# ==========================================================================
# linalg
# ==========================================================================
def diagonal(self):
m, n = self.shape
if m <= n:
return [self[i, i] for i in range(m)]
return [self[i, i] for i in range(n)]
def transpose(self):
m, n = self.shape
rows, cols, data = [], [], []
for i in self._data:
for j in self._data[i]:
k = self._data[i][j]
rows.append(j)
cols.append(i)
data.append(k)
return SparseArray((rows, cols, data), (n, m))
def _dot(self, other):
A = self
B = other.transpose()
m, n = A.shape
k, n = B.shape
shape = m, k
rows, cols, data = [], [], []
Arows = {i: set(A._data[i]) for i in A._data}
Bcols = {j: set(B._data[j]) for j in B._data}
for i, setrow in iter(Arows.items()):
for j, setcol in iter(Bcols.items()):
keys = setrow & setcol
if keys:
value = sum(A._data[i][key] * B._data[j][key] for key in keys)
rows.append(i)
cols.append(j)
data.append(value)
return SparseArray((rows, cols, data), shape)
def _tdot(self, other):
A = self.transpose()
B = other
return A._dot(B)
def dot(self, other):
if isinstance(other, Array):
m, n = self.shape
n, k = other.shape
A = self.matrix
b = other.data
data = xalglib.sparsemm(A, b, k, Zeros((m, k)).data)
return Array(data, (m, k))
return self._dot(other)
def tdot(self, other):
if isinstance(other, Array):
m, n = self.shape
n, k = other.shape
A = self.matrix
b = other.data
data = xalglib.sparsemtm(A, b, k, Zeros((n, k)).data)
return Array(data, (n, k))
return self._tdot(other)
def xdot(self, B, C, a=1.0, c=1.0):
raise NotImplementedError
# ==============================================================================
# Special
# ==============================================================================
class SparseDiagonal(SparseArray):
def __init__(self, data, dtype='f'):
m = len(data)
rows = range(m)
super(SparseDiagonal, self).__init__((rows, rows, data), (m, m), dtype)
# ==============================================================================
# Main
# ==============================================================================
if __name__ == "__main__":
import compas
from compas.datastructures import Network
network = Network.from_obj(compas.get('lines.obj'))
e = network.number_of_edges()
v = network.number_of_vertices()
vertices = network.get_vertices_attributes('xyz')
edges = list(network.edges())
xyz = Array(vertices, (v, 3))
shape = e, v
rows, cols, data = [], [], []
for i, (u, v) in enumerate(edges):
rows.append(i)
rows.append(i)
cols.append(u)
cols.append(v)
data.append(-1)
data.append(+1)
C = SparseArray((rows, cols, data), shape)
print(C.transpose()._dot(C).diagonal())
print(C.transpose().dot(C).diagonal())
print(C.tdot(C).diagonal())
# print(C[:, 0])
# print(Ct[0, :])
# print(C[:, [0, 1, 2]])
# print(Ct[[0, 1, 2], :])
# print(C[0])
# print(C[[0, 1, 2]])
# print(C[0:3])
# print(C.to_dense())
# print(Ct.to_dense())
# uvw = C.dot(xyz)
# print(uvw)
|
<filename>ingestion/functions/parsing/argentina/argentina_test.py
import os
import unittest
from datetime import date
from argentina import argentina
_SOURCE_ID = "placeholder_ID"
_SOURCE_URL = "placeholder_URL"
class ArgentinaTest(unittest.TestCase):
def setUp(self):
self.maxDiff = None
def test_parse(self):
'''
Includes imported and internally transmitted cases
'''
current_dir = os.path.dirname(__file__)
sample_data_file = os.path.join(current_dir, "sample_data.csv")
result = argentina.parse_cases(
sample_data_file, _SOURCE_ID, _SOURCE_URL)
self.assertCountEqual(list(result), [
{'caseReference': {'sourceId': 'placeholder_ID',
'sourceEntryId': '1000007',
'sourceUrl': 'placeholder_URL'},
'location': {
"administrativeAreaLevel2": "Patiño",
"country": "Argentina",
"administrativeAreaLevel1": "Formosa",
"geoResolution": "Admin2",
"name": '<NAME>',
"geometry": {
"latitude": -24.87505855,
"longitude": -59.95853733
},
},
'demographics': {'ageRange': {'start': 26.0, 'end': 26.0}, 'gender': 'Male'},
'events': [{'name': 'confirmed',
'value': 'Laboratory Test',
'dateRange': {'start': '06/25/2020', 'end': '06/25/2020'}},
{'name': 'outcome', 'value': 'Recovered'}],
'travelHistory': None,
'notes': 'Using Date of Diagnosis as the date of confirmation., Patient recovery was confirmed by a negative laboratory test., Province in charge of case reported as Formosa, Argentina., Case last updated on 09/18/2020., Case was dealt with through Public health system., Diagnostic notes: Caso confirmado por laboratorio - No Activo por criterio de laboratorio'},
{'caseReference': {'sourceId': 'placeholder_ID',
'sourceEntryId': '1000010',
'sourceUrl': 'placeholder_URL'},
'location': {
"administrativeAreaLevel2": "Comuna 7",
"country": "Argentina",
"administrativeAreaLevel1": "Ciudad Autónoma de Buenos Aires",
"geoResolution": "Admin2",
"name": '<NAME>, Ciudad Autónoma de Buenos Aires, Argentina',
"geometry": {
"latitude": -34.63655441,
"longitude": -58.45188686
},
},
'demographics': {'ageRange': {'start': 7.0, 'end': 7.0}, 'gender': 'Male'},
'events': [{'name': 'confirmed',
'value': 'Laboratory Test',
'dateRange': {'start': '06/01/2020', 'end': '06/01/2020'}},
{'name': 'outcome', 'value': 'Recovered'}],
'travelHistory': None,
'notes': 'Using Date of Diagnosis as the date of confirmation., Patient recovery was confirmed by a number of days elapsing with no symptoms., Province in charge of case reported as CABA, Argentina., Case last updated on 09/18/2020., Case was dealt with through Public health system., Diagnostic notes: Caso confirmado por laboratorio - No activo (por tiempo de evolución)'},
{'caseReference': {'sourceId': 'placeholder_ID',
'sourceEntryId': '1000012',
'sourceUrl': 'placeholder_URL'},
'location': {
"country": "Argentina",
"administrativeAreaLevel1": "Ciudad Autónoma de Buenos Aires",
"geoResolution": "Admin1",
"name": 'Ciudad Autónoma de Buenos Aires, Argentina',
"geometry": {
"latitude": -34.61448692,
"longitude": -58.44590845
},
},
'demographics': {'ageRange': {'start': 46.0, 'end': 46.0}, 'gender': 'Male'},
'events': [{'name': 'confirmed',
'value': 'Laboratory Test',
'dateRange': {'start': '05/31/2020', 'end': '05/31/2020'}},
{'name': 'onsetSymptoms',
'dateRange': {'start': '05/26/2020', 'end': '05/26/2020'}},
{'name': 'outcome',
'value': 'hospitalAdmission',
'dateRange': {'start': '05/31/2020', 'end': '05/31/2020'}},
{'name': 'outcome', 'value': 'Recovered'}],
'symptoms': {'status': 'Symptomatic'},
'travelHistory': None,
'notes': 'Using Date of Diagnosis as the date of confirmation., Patient recovery was confirmed by a negative laboratory test., Province in charge of case reported as CABA, Argentina., Case last updated on 09/18/2020., Case was dealt with through Private health system., Diagnostic notes: Caso confirmado por laboratorio - No Activo por criterio de laboratorio'},
{'caseReference': {'sourceId': 'placeholder_ID',
'sourceEntryId': '1000015',
'sourceUrl': 'placeholder_URL'},
'location': {
"administrativeAreaLevel2": "Comuna 7",
"country": "Argentina",
"administrativeAreaLevel1": "Ciudad Autónoma de Buenos Aires",
"geoResolution": "Admin2",
"name": 'Comuna 7, Ciudad Autónoma de Buenos Aires, Argentina',
"geometry": {
"latitude": -34.63655441,
"longitude": -58.45188686
},
},
'demographics': {'ageRange': {'start': 29.0, 'end': 29.0},
'gender': 'Female'},
'events': [{'name': 'confirmed',
'value': 'Laboratory Test',
'dateRange': {'start': '06/01/2020', 'end': '06/01/2020'}},
{'name': 'onsetSymptoms',
'dateRange': {'start': '05/18/2020', 'end': '05/18/2020'}},
{'name': 'outcome', 'value': 'Recovered'}],
'symptoms': {'status': 'Symptomatic'},
'travelHistory': None,
'notes': 'Using Date of Diagnosis as the date of confirmation., Patient recovery was confirmed by a number of days elapsing with no symptoms., Province in charge of case reported as Buenos Aires, Argentina., Case last updated on 09/18/2020., Case was dealt with through Private health system., Diagnostic notes: Caso confirmado por laboratorio - No activo (por tiempo de evolución)'},
{'caseReference': {'sourceId': 'placeholder_ID',
'sourceEntryId': '1039608',
'sourceUrl': 'placeholder_URL'},
'location': {
"country": "Argentina",
"administrativeAreaLevel1": "Buenos Aires",
"geoResolution": "Admin1",
"name": 'Buenos Aires, Argentina',
"geometry": {
"latitude": -36.67373207,
"longitude": -60.55722017
},
},
'demographics': {'ageRange': {'start': 70.0, 'end': 70.0},
'gender': 'Male'},
'events': [{'name': 'confirmed',
'value': 'Laboratory Test',
'dateRange': {'start': '06/10/2020', 'end': '06/10/2020'}},
{'name': 'onsetSymptoms',
'dateRange': {'start': '06/04/2020', 'end': '06/04/2020'}},
{'name': 'outcome', 'value': 'Recovered'}],
'symptoms': {'status': 'Symptomatic'},
'travelHistory': {
"traveledPrior30Days": True,
"travel": [
{
"location": {
"country": "Italy",
"geoResolution": "Country",
"name": 'Italy',
"geometry": {
"latitude": 41.87194,
"longitude": 12.56738
}
}
}
]
},
'notes': 'Using Date of Diagnosis as the date of confirmation., Patient recovery was confirmed by a negative laboratory test., Province in charge of case reported as Buenos Aires, Argentina., Case last updated on 03/03/2021., Case was dealt with through Public health system., Diagnostic notes: Caso confirmado por laboratorio - No Activo por criterio de laboratorio'}
])
|
import argparse
import csv
import os
import time
from core_data_modules.cleaners import Codes
from core_data_modules.traced_data import Metadata, TracedData
from core_data_modules.traced_data.io import TracedDataJsonIO, TracedDataCSVIO
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Joins radio show answers with survey answers on respondents' "
"phone ids.")
parser.add_argument("user", help="User launching this program")
parser.add_argument("json_input_path", metavar="json-input-path",
help="Path to the input messages JSON file, containing a list of serialized TracedData objects")
parser.add_argument("survey_input_path", metavar="survey-input-path",
help="Path to the cleaned survey JSON file, containing a list of serialized TracedData objects")
parser.add_argument("flow_name", metavar="flow-name",
help="Name of activation flow from which this data was derived")
parser.add_argument("variable_name", metavar="variable-name",
help="Name of message variable in flow")
parser.add_argument("json_output_path", metavar="json-output-path",
help="Path to a JSON file to write processed messages to")
parser.add_argument("csv_output_path", metavar="csv-output-path",
help="Path to a CSV file to write the joined dataset to")
args = parser.parse_args()
user = args.user
json_input_path = args.json_input_path
survey_input_path = args.survey_input_path
variable_name = args.variable_name
flow_name = args.flow_name
json_output_path = args.json_output_path
csv_output_path = args.csv_output_path
message_keys = [
"avf_phone_id",
"{} (Run ID) - {}".format(variable_name, flow_name),
"{} (Time) - {}".format(variable_name, flow_name),
"{} (Text) - {}".format(variable_name, flow_name)
]
survey_keys = [
"District (Text) - wt_demog_1",
"Gender (Text) - wt_demog_1",
"Urban_Rural (Text) - wt_demog_1",
"Radio_Station (Text) - wt_demog_2",
"Age (Text) - wt_demog_2",
"Education_Level (Text) - wt_demog_2",
"Idp (Text) - wt_demog_2",
"Origin_District (Text) - wt_demog_2",
"Cholera_Vaccination (Text) - wt_practice",
"Household_Sickness (Text) - wt_practice",
"Trustworthy_Advisors (Text) - wt_practice"
]
# Load messages
with open(json_input_path, "r") as f:
data = TracedDataJsonIO.import_json_to_traced_data_iterable(f)
# Load surveys
with open(survey_input_path, "r") as f:
surveys = TracedDataJsonIO.import_json_to_traced_data_iterable(f)
# Add survey data to the messages
TracedData.update_iterable(user, "avf_phone_id", data, surveys, "survey_responses")
# Mark missing survey entries in the raw data as true missing
for td in data:
for key in survey_keys:
if key not in td:
td.append_data({key: Codes.TRUE_MISSING}, Metadata(user, Metadata.get_call_location(), time.time()))
# Write json output
if os.path.dirname(json_output_path) is not "" and not os.path.exists(os.path.dirname(json_output_path)):
os.makedirs(os.path.dirname(json_output_path))
with open(json_output_path, "w") as f:
TracedDataJsonIO.export_traced_data_iterable_to_json(data, f, pretty_print=True)
# Output to CSV for analysis
export_keys = list(message_keys)
export_keys.extend(survey_keys)
if os.path.dirname(csv_output_path) is not "" and not os.path.exists(os.path.dirname(csv_output_path)):
os.makedirs(os.path.dirname(csv_output_path))
with open(csv_output_path, "w") as f:
TracedDataCSVIO.export_traced_data_iterable_to_csv(data, f, headers=export_keys)
|
<gh_stars>1-10
'''
Created on Dec 12, 2013
@author: Mark V Systems Limited
(c) Copyright 2013 Mark V Systems Limited, All rights reserved.
Input file parameters may be in JSON (without newlines for pretty printing as below):
[ {# current fields in JSON structure from Arelle Wrapper, per instance
"file": "file path to instance or html",
"cik": "1234567890",
"cikNameList": { "cik1": "name1", "cik2":"name2", "cik3":"name3"...},
"submissionType" : "SDR-A",
"exhibitType": "EX-99.K",
"accessionNumber":"0001125840-15-000159" ,
# new fields
"periodOfReport": "mm-dd-yyyy",
"entity.fyEnd": "mm/dd",
"voluntaryFilerFlag": true/false, # JSON Boolean or absent
"wellKnownSeasonedIssuerFlag": true/false, # JSON Boolean or absent
"shellCompanyFlag": true/false, true/false, # JSON Boolean or absent
"acceleratedFilerStatus": true/false, # JSON Boolean or absent
"smallBusinessFlag": true/false, # JSON Boolean or absent
"emergingGrowthCompanyFlag": true/false, # JSON Boolean or absent
"exTransitionPeriodFlag": true/false, # JSON Boolean or absent
# filer - use "cik" above
"invCompanyType": "N-1A" # from table of investment company types
"rptIncludeAllSeriesFlag": true/false, # JSON Boolean or absent
"rptSeriesClassInfo.seriesIds": ["S0000990666", ...] # list of EDGAR seriesId values
"newClass2.seriesIds": [] # //seriesId xpath result on submission headers
},
{"file": "file 2"...
]
For test case operation, the above fields accepted from testcase variation:
<data>
<conf:parameter name="cikName" datatype="xs:string" value="cik1:name1" xmlns="" xmlns:conf="http://edgar/2009/conformance" />
<conf:parameter name="cikName" datatype="xs:string" value="cik2:name2" xmlns="" xmlns:conf="http://edgar/2009/conformance" />
<conf:parameter name="cikName" datatype="xs:string" value="cik3:name3" xmlns="" xmlns:conf="http://edgar/2009/conformance" />
<conf:parameter name="submissionType" datatype="xs:string" value="8-K" xmlns="" xmlns:conf="http://edgar/2009/conformance" />
<conf:parameter name="periodOfReport" datatype="xs:string" value="12-31-2017" xmlns="" xmlns:conf="http://edgar/2009/conformance" />
<conf:parameter name="voluntaryFilerFlag" datatype="xs:boolean" value="true" xmlns="" xmlns:conf="http://edgar/2009/conformance" />
<conf:parameter name="coregCikFileNumber" datatype="xs:string" value="cik1:fileNbr1" xmlns="" xmlns:conf="http://edgar/2009/conformance" />
<conf:parameter name="coregCikFileNumber" datatype="xs:string" value="cik2:fileNbr2" xmlns="" xmlns:conf="http://edgar/2009/conformance" />
<conf:parameter name="coregCikFileNumber" datatype="xs:string" value="cik3:fileNbr3" xmlns="" xmlns:conf="http://edgar/2009/conformance" />
<conf:parameter name="sroId" datatype="xs:string" value="NASD" xmlns="" xmlns:conf="http://edgar/2009/conformance" />
<conf:parameter name="sroId" datatype="xs:string" value="NYSE" xmlns="" xmlns:conf="http://edgar/2009/conformance" />
...
<instance readMeFirst="true">e9999999ng-20081231.xml</instance>
<data>
(Accession number is only needed for those EdgarRenderer output transformations of
FilingSummary.xml which require it as a parameter (such as EDGAR's internal workstations,
which have a database that requires accession number as part of the query string to retrieve
a file of a submission.)
On Windows, the input file argument must be specially quoted if passed in via Java
due to a Java bug on Windows shell interface (without the newlines for pretty printing below):
"[{\"file\":\"z:\\Documents\\dir\\gpc_gd1-20130930.htm\",
\"cik\": \"0000350001\",
\"cikNameList\": {\"0000350001\":\"BIG FUND TRUST CO\"},
\"submissionType\":\"SDR-A\", \"exhibitType\":\"EX-99.K SDR.INS\"}]"
To build cached deprecated concepts files (requires internet access):
First delete any resources/*deprecated-concept.json which you want to rebuild
arelleCmdLine --plugin validate/EFM --build-deprecated-concepts-file
In GUI mode please use formula parameters dialog to emulate the above. The parameters are named as above (with no prefix), and
an additional EdgarRenderer parameters:
noLogsInSummary or includeLogsInSummary (default) (this parameter does not need a value, just presence)
summaryXslt (use EdgarWorkstationSummarize.xslt to emulate EDGAR workstation)
reportXslt (use EdgarWorkstationInstanceReport.xslt to emulate EDGAR workstation)
'''
import os, io, json, zipfile, logging
jsonIndent = 1 # None for most compact, 0 for left aligned
from decimal import Decimal
from lxml.etree import XML, XMLSyntaxError
from arelle import ModelDocument, ModelValue, XmlUtil, FileSource
from arelle.ModelDocument import Type
from arelle.ModelValue import qname
from arelle.PluginManager import pluginClassMethods # , pluginMethodsForClasses, modulePluginInfos
from arelle.UrlUtil import authority, relativeUri
from arelle.ValidateFilingText import referencedFiles
from .Document import checkDTSdocument
from .Filing import validateFiling
try:
import regex as re
except ImportError:
import re
from collections import defaultdict
def dislosureSystemTypes(disclosureSystem, *args, **kwargs):
# return ((disclosure system name, variable name), ...)
return (("EFM", "EFMplugin"),)
def disclosureSystemConfigURL(disclosureSystem, *args, **kwargs):
return os.path.join(os.path.dirname(__file__), "config.xml")
def validateXbrlStart(val, parameters=None, *args, **kwargs):
val.validateEFMplugin = val.validateDisclosureSystem and getattr(val.disclosureSystem, "EFMplugin", False)
if not (val.validateEFMplugin):
return
val.params = {}
parameterNames = ("CIK", "cik", "cikList", "cikNameList", "submissionType", "exhibitType", # CIK or cik both allowed
"periodOfReport", "entity.fyEnd", "voluntaryFilerFlag",
"wellKnownSeasonedIssuerFlag", "shellCompanyFlag", "acceleratedFilerStatus", "smallBusinessFlag",
"emergingGrowthCompanyFlag", "exTransitionPeriodFlag", "invCompanyType",
"rptIncludeAllSeriesFlag", "rptSeriesClassInfo.seriesIds", "newClass2.seriesIds")
if parameters: # parameter-provided CIKs and registrant names
for paramName in parameterNames:
p = parameters.get(ModelValue.qname(paramName,noPrefixIsNoNamespace=True))
if p and len(p) == 2 and p[1] not in ("null", "None", None):
v = p[1] # formula dialog and cmd line formula parameters may need type conversion
if isinstance(v, str):
if paramName in {"voluntaryFilerFlag", "wellKnownSeasonedIssuerFlag", "shellCompanyFlag", "acceleratedFilerStatus",
"smallBusinessFlag", "emergingGrowthCompanyFlag", "exTransitionPeriodFlag", "rptIncludeAllSeriesFlag"}:
v = {"true":True, "false":False}.get(v)
elif paramName in {"rptSeriesClassInfo.seriesIds", "newClass2.seriesIds"}:
v = v.split()
val.params[paramName] = v
if "CIK" in val.params: # change to lower case key
val.params["cik"] = val.params["CIK"]
del val.params["CIK"]
for paramName, p in parameters.items(): # allow ELOparams to be in any namespace (no xmlns="" required)
if paramName and paramName.localName == "ELOparams" and len(p) == 2 and p[1] not in ("null", "None", None):
try:
for key, value in json.loads(p[1]).items():
val.params[{"CIK":"cik"}.get(key,key)] = value # change upper case CIK to lower case
except (ValueError, AttributeError, TypeError):
val.modelXbrl.error("arelle.testcaseVariationParameters",
_("parameter ELOparams has malformed JSON %(json)s object"),
modelXbrl=val.modelXbrl, json=p[1][:100])
break
# parameters may also come from report entryPoint (such as exhibitType for SDR)
if hasattr(val.modelXbrl.modelManager, "efmFiling"):
efmFiling = val.modelXbrl.modelManager.efmFiling
if efmFiling.reports: # possible that there are no reports
entryPoint = efmFiling.reports[-1].entryPoint
for paramName in parameterNames: # cik is lower case here
if paramName in entryPoint and entryPoint[paramName] not in (None, ""):
val.params[paramName] = entryPoint[paramName] # if not set uses prior value
# exhibitType may be an attachmentType, if so remove ".INS"
if val.params.get("exhibitType", "").endswith(".INS"):
val.params["exhibitType"] = val.params["exhibitType"][:-4]
if isinstance(val.params.get("cikNameList", None), str):
# cik1, cik2, cik3 in cikList and name1|Edgar|name2|Edgar|name3 in cikNameList strings
_filerIdentifiers = val.params["cikList"].split(",") if "cikList" in val.params else []
_filerNames = val.params["cikNameList"].split("|Edgar|") if "cikNameList" in val.params else []
if _filerIdentifiers:
if len(_filerNames) not in (0, len(_filerIdentifiers)):
val.modelXbrl.error(("EFM.6.05.24.parameters", "GFM.3.02.02"),
_("parameters for cikList and cikNameList different list entry counts: %(cikList)s, %(cikNameList)s"),
modelXbrl=val.modelXbrl, cikList=_filerIdentifiers, cikNameList=_filerNames)
if _filerNames:
val.params["cikNameList"]=dict((_cik,_filerNames[i] if i < len(_filerNames) else None)
for i, _cik in enumerate(_filerIdentifiers))
else:
val.params["cikNameList"]=dict((_cik,None) for _cik in _filerIdentifiers)
del val.params["cikList"]
elif _filerNames:
val.modelXbrl.error(("EFM.6.05.24.parameters", "GFM.3.02.02"),
_("parameters for cikNameList provided but missing corresponding cikList: %(cikNameList)s"),
modelXbrl=val.modelXbrl, cikNameList=_filerNames)
del val.params["cikNameList"] # can't process without cik's as keys
if val.params.get("exhibitType", "") == "EX-2.01": # only applicable for edgar production and parameterized testcases
val.EFM60303 = "EFM.6.23.01"
else:
val.EFM60303 = "EFM.6.03.03"
if any((concept.qname.namespaceURI in val.disclosureSystem.standardTaxonomiesDict and concept.modelDocument.inDTS)
for concept in val.modelXbrl.nameConcepts.get("UTR",())):
val.validateUTR = True
modelManager = val.modelXbrl.modelManager
if hasattr(modelManager, "efmFiling"):
efmFiling = modelManager.efmFiling
efmFiling.submissionType = val.params.get("submissionType")
def validateXbrlFinally(val, *args, **kwargs):
if not (val.validateEFMplugin):
return
modelXbrl = val.modelXbrl
_statusMsg = _("validating {0} filing rules").format(val.disclosureSystem.name)
modelXbrl.profileActivity()
modelXbrl.modelManager.showStatus(_statusMsg)
validateFiling(val, modelXbrl, isEFM=True)
modelXbrl.profileActivity(_statusMsg, minTimeToShow=0.0)
modelXbrl.modelManager.showStatus(None)
def validateXbrlDtsDocument(val, modelDocument, isFilingDocument, *args, **kwargs):
if not (val.validateEFMplugin):
return
checkDTSdocument(val, modelDocument, isFilingDocument)
def filingStart(cntlr, options, filesource, entrypointFiles, sourceZipStream=None, responseZipStream=None, *args, **kwargs):
modelManager = cntlr.modelManager
# cntlr.addToLog("TRACE EFM filing start val={} plugin={}".format(modelManager.validateDisclosureSystem, getattr(modelManager.disclosureSystem, "EFMplugin", False)))
if modelManager.validateDisclosureSystem and getattr(modelManager.disclosureSystem, "EFMplugin", False):
# cntlr.addToLog("TRACE EFM filing start 2 classes={} moduleInfos={}".format(pluginMethodsForClasses, modulePluginInfos))
modelManager.efmFiling = Filing(cntlr, options, filesource, entrypointFiles, sourceZipStream, responseZipStream)
# this event is called for filings (of instances) as well as test cases, for test case it just keeps options accessible
for pluginXbrlMethod in pluginClassMethods("EdgarRenderer.Filing.Start"):
pluginXbrlMethod(cntlr, options, entrypointFiles, modelManager.efmFiling)
# check if any entrypointFiles have an encryption is specified
if isinstance(entrypointFiles, list):
for pluginXbrlMethod in pluginClassMethods("Security.Crypt.Filing.Start"):
pluginXbrlMethod(modelManager.efmFiling, options, filesource, entrypointFiles, sourceZipStream)
def guiTestcasesStart(cntlr, modelXbrl, *args, **kwargs):
modelManager = cntlr.modelManager
if (cntlr.hasGui and modelXbrl.modelDocument.type in Type.TESTCASETYPES and
modelManager.validateDisclosureSystem and getattr(modelManager.disclosureSystem, "EFMplugin", False)):
modelManager.efmFiling = Filing(cntlr)
def testcasesStart(cntlr, options, modelXbrl, *args, **kwargs):
# a test or RSS cases run is starting, in which case testcaseVariation... events have unique efmFilings
modelManager = cntlr.modelManager
if (hasattr(modelManager, "efmFiling") and
modelXbrl.modelDocument.type in Type.TESTCASETYPES):
efmFiling = modelManager.efmFiling
efmFiling.close() # not needed, dereference
del modelManager.efmFiling
if not hasattr(modelXbrl, "efmOptions") and options: # may have already been set by EdgarRenderer in gui startup
modelXbrl.efmOptions = options # save options in testcase's modelXbrl
def xbrlLoaded(cntlr, options, modelXbrl, entryPoint, *args, **kwargs):
# cntlr.addToLog("TRACE EFM xbrl loaded")
modelManager = cntlr.modelManager
if hasattr(modelManager, "efmFiling"):
if modelXbrl.modelDocument.type in (Type.INSTANCE, Type.INLINEXBRL, Type.INLINEXBRLDOCUMENTSET):
efmFiling = modelManager.efmFiling
efmFiling.addReport(modelXbrl)
_report = efmFiling.reports[-1]
_report.entryPoint = entryPoint
if "accessionNumber" in entryPoint and not hasattr(efmFiling, "accessionNumber"):
efmFiling.accessionNumber = entryPoint["accessionNumber"]
if "exhibitType" in entryPoint and not hasattr(_report, "exhibitType"):
_report.exhibitType = entryPoint["exhibitType"]
efmFiling.arelleUnitTests = modelXbrl.arelleUnitTests.copy() # allow unit tests to be used after instance processing finished
elif modelXbrl.modelDocument.type == Type.RSSFEED:
testcasesStart(cntlr, options, modelXbrl)
def xbrlRun(cntlr, options, modelXbrl, *args, **kwargs):
# cntlr.addToLog("TRACE EFM xbrl run")
modelManager = cntlr.modelManager
if (hasattr(modelManager, "efmFiling") and
modelXbrl.modelDocument.type in (Type.INSTANCE, Type.INLINEXBRL, Type.INLINEXBRLDOCUMENTSET)):
efmFiling = modelManager.efmFiling
_report = efmFiling.reports[-1]
if True: # HF TESTING: not (options.abortOnMajorError and len(modelXbrl.errors) > 0):
for pluginXbrlMethod in pluginClassMethods("EdgarRenderer.Xbrl.Run"):
pluginXbrlMethod(cntlr, options, modelXbrl, modelManager.efmFiling, _report)
def filingValidate(cntlr, options, filesource, entrypointFiles, sourceZipStream=None, responseZipStream=None, *args, **kwargs):
# cntlr.addToLog("TRACE EFM xbrl validate")
modelManager = cntlr.modelManager
if hasattr(modelManager, "efmFiling"):
efmFiling = modelManager.efmFiling
reports = efmFiling.reports
# check for dup inline and regular instances
# SDR checks
if any(report.documentType and report.documentType.endswith(" SDR")
for report in reports):
_kSdrs = [r for r in reports if r.documentType == "K SDR"]
if not _kSdrs and efmFiling.submissionType in ("SDR", "SDR-A"):
efmFiling.error("EFM.6.03.08.sdrHasNoKreports",
_("SDR filing has no K SDR reports"))
elif len(_kSdrs) > 1:
efmFiling.error("EFM.6.03.08.sdrHasMultipleKreports",
_("SDR filing has multiple K SDR reports for %(entities)s"),
{"entities": ", ".join(r.entityRegistrantName for r in _kSdrs),
"edgarCode": "cp-0308-Sdr-Has-Multiple-K-Reports"},
(r.url for r in _kSdrs))
_lSdrEntityReports = defaultdict(list)
for r in reports:
if r.documentType == "L SDR":
_lSdrEntityReports[r.entityCentralIndexKey if r.entityCentralIndexKey != "0000000000"
else r.entityRegistrantName].append(r)
for lSdrEntity, lSdrEntityReports in _lSdrEntityReports.items():
if len(lSdrEntityReports) > 1:
efmFiling.error("EFM.6.05.24.multipleLSdrReportsForEntity",
_("Filing entity has multiple L SDR reports: %(entity)s"),
{"entity": lSdrEntity},
(r.url for r in lSdrEntityReports))
# check for required extension files (schema, pre, lbl)
for r in reports:
hasSch = hasPre = hasCal = hasLbl = False
for f in r.reportedFiles:
if f.endswith(".xsd"): hasSch = True
elif f.endswith("_pre.xml"): hasPre = True
elif f.endswith("_cal.xml"): hasCal = True
elif f.endswith("_lab.xml"): hasLbl = True
missingFiles = ""
if not hasSch: missingFiles += ", schema"
if not hasPre: missingFiles += ", presentation linkbase"
if not hasLbl: missingFiles += ", label linkbase"
if missingFiles:
efmFiling.error("EFM.6.03.02.sdrMissingFiles",
_("%(docType)s report missing files: %(missingFiles)s"),
{"docType": r.documentType, "missingFiles": missingFiles[2:],
"edgarCode": "cp-0302-Sdr-Missing-Files"},
r.url)
if not r.hasUsGaapTaxonomy:
efmFiling.error("EFM.6.03.02.sdrMissingStandardSchema",
_("%(documentType)s submission must use a US GAAP standard schema"),
{"documentType": r.documentType,
"edgarCode": "cp-0302-Sdr-Missing-Standard-Schema"},
r.url)
if hasattr(r, "exhibitType") and r.exhibitType not in ("EX-99.K SDR", "EX-99.L SDR", "EX-99.K SDR.INS", "EX-99.L SDR.INS"):
efmFiling.error("EFM.6.03.02.sdrHasNonSdrExhibit",
_("An SDR filing contains non-SDR exhibit type %(exhibitType)s document type %(documentType)s"),
{"documentType": r.documentType, "exhibitType": r.exhibitType,
"edgarCode": "cp-0302-Sdr-Has-Non-Sdr-Exhibit"},
r.url)
_exhibitTypeReports = defaultdict(list)
for r in reports:
if hasattr(r, "exhibitType") and r.exhibitType:
_exhibitTypeReports[r.exhibitType.partition(".")[0]].append(r)
if len(_exhibitTypeReports) > 1:
efmFiling.error("EFM.6.03.08",
_("A filling contains multiple exhibit types %(exhibitTypes)s."),
{"exhibitTypes": ", ".join(_exhibitTypeReports.keys())},
[r.url for r in reports])
for _exhibitType, _exhibitReports in _exhibitTypeReports.items():
if _exhibitType not in ("EX-99",) and len(_exhibitReports) > 1:
efmFiling.error("EFM.6.03.08.moreThanOneIns",
_("A filing contains more than one instance for exhibit type %(exhibitType)s."),
{"exhibitType": _exhibitType},
[r.url for r in _exhibitReports])
def roleTypeName(modelXbrl, roleURI, *args, **kwargs):
modelManager = modelXbrl.modelManager
if hasattr(modelManager, "efmFiling"):
modelRoles = modelXbrl.roleTypes.get(roleURI, ())
if modelRoles and modelRoles[0].definition:
return re.sub(r"\{\s*(transposed|unlabeled|elements)\s*\}","", modelRoles[0].definition.rpartition('-')[2], flags=re.I).strip()
return roleURI
return None
def filingEnd(cntlr, options, filesource, entrypointFiles, sourceZipStream=None, responseZipStream=None, *args, **kwargs):
#cntlr.addToLog("TRACE EFM filing end")
modelManager = cntlr.modelManager
if hasattr(modelManager, "efmFiling"):
for pluginXbrlMethod in pluginClassMethods("EdgarRenderer.Filing.End"):
pluginXbrlMethod(cntlr, options, filesource, modelManager.efmFiling, sourceZipStream=sourceZipStream)
#cntlr.addToLog("TRACE EdgarRenderer end")
# save JSON file of instances and referenced documents
filingReferences = dict((report.url, report)
for report in modelManager.efmFiling.reports)
modelManager.efmFiling.close()
del modelManager.efmFiling
#cntlr.addToLog("TRACE EFN filing end complete")
def rssItemXbrlLoaded(modelXbrl, rssWatchOptions, rssItem, *args, **kwargs):
# Validate of RSS feed item (simulates filing & cmd line load events
if hasattr(rssItem.modelXbrl, "efmOptions"):
testcaseVariationXbrlLoaded(rssItem.modelXbrl, modelXbrl)
def rssItemValidated(val, modelXbrl, rssItem, *args, **kwargs):
# After validate of RSS feed item (simulates report and end of filing events)
if hasattr(rssItem.modelXbrl, "efmOptions"):
testcaseVariationValidated(rssItem.modelXbrl, modelXbrl)
def testcaseVariationXbrlLoaded(testcaseModelXbrl, instanceModelXbrl, modelTestcaseVariation, *args, **kwargs):
# Validate of RSS feed item or testcase variation (simulates filing & cmd line load events
modelManager = instanceModelXbrl.modelManager
if (hasattr(testcaseModelXbrl, "efmOptions") and
modelManager.validateDisclosureSystem and getattr(modelManager.disclosureSystem, "EFMplugin", False) and
instanceModelXbrl.modelDocument.type in (Type.INSTANCE, Type.INLINEXBRL, Type.INLINEXBRLDOCUMENTSET)):
cntlr = modelManager.cntlr
options = testcaseModelXbrl.efmOptions
entrypointFiles = [{"file":instanceModelXbrl.modelDocument.uri}]
if not hasattr(modelManager, "efmFiling"): # first instance of filing
modelManager.efmFiling = Filing(cntlr, options, instanceModelXbrl.fileSource, entrypointFiles, None, None, instanceModelXbrl.errorCaptureLevel)
# this event is called for filings (of instances) as well as test cases, for test case it just keeps options accessible
for pluginXbrlMethod in pluginClassMethods("EdgarRenderer.Filing.Start"):
pluginXbrlMethod(cntlr, options, entrypointFiles, modelManager.efmFiling)
modelManager.efmFiling.addReport(instanceModelXbrl)
_report = modelManager.efmFiling.reports[-1]
_report.entryPoint = entrypointFiles[0]
modelManager.efmFiling.arelleUnitTests = instanceModelXbrl.arelleUnitTests.copy() # allow unit tests to be used after instance processing finished
# check for parameters on instance
for _instanceElt in XmlUtil.descendants(modelTestcaseVariation, "*", "instance", "readMeFirst", "true", False):
if instanceModelXbrl.modelDocument.uri.endswith(_instanceElt.text):
if _instanceElt.get("exhibitType"):
_report.entryPoint["exhibitType"] = _report.exhibitType = _instanceElt.get("exhibitType")
break
def testcaseVariationXbrlValidated(testcaseModelXbrl, instanceModelXbrl, *args, **kwargs):
modelManager = instanceModelXbrl.modelManager
if (hasattr(modelManager, "efmFiling") and
instanceModelXbrl.modelDocument.type in (Type.INSTANCE, Type.INLINEXBRL, Type.INLINEXBRLDOCUMENTSET)):
efmFiling = modelManager.efmFiling
_report = modelManager.efmFiling.reports[-1]
for pluginXbrlMethod in pluginClassMethods("EdgarRenderer.Xbrl.Run"):
pluginXbrlMethod(modelManager.cntlr, efmFiling.options, instanceModelXbrl, efmFiling, _report)
def testcaseVariationValidated(testcaseModelXbrl, instanceModelXbrl, errors=None, *args, **kwargs):
modelManager = instanceModelXbrl.modelManager
if (hasattr(modelManager, "efmFiling") and
instanceModelXbrl.modelDocument.type in (Type.INSTANCE, Type.INLINEXBRL, Type.INLINEXBRLDOCUMENTSET)):
efmFiling = modelManager.efmFiling
if isinstance(errors, list):
del efmFiling.errors[:]
# validate report types
filingValidate(efmFiling.cntlr, efmFiling.options, efmFiling.filesource, efmFiling.entrypointfiles, efmFiling.sourceZipStream, efmFiling.responseZipStream) # validate each report
if isinstance(errors, list):
errors.extend(efmFiling.errors)
# simulate filingEnd
filingEnd(modelManager.cntlr, efmFiling.options, modelManager.filesource, [])
# flush logfile (assumed to be buffered, empty the buffer for next filing)
testcaseModelXbrl.modelManager.cntlr.logHandler.flush()
def fileSourceFile(cntlr, filepath, binary, stripDeclaration):
modelManager = cntlr.modelManager
if hasattr(modelManager, "efmFiling"):
for pluginXbrlMethod in pluginClassMethods("Security.Crypt.FileSource.File"):
_file = pluginXbrlMethod(cntlr, modelManager.efmFiling, filepath, binary, stripDeclaration)
if _file is not None:
return _file
return None
def fileSourceExists(cntlr, filepath):
modelManager = cntlr.modelManager
if hasattr(modelManager, "efmFiling"):
for pluginXbrlMethod in pluginClassMethods("Security.Crypt.FileSource.Exists"):
_existence = pluginXbrlMethod(modelManager.efmFiling, filepath)
if _existence is not None:
return _existence
return None
def commandLineOptionExtender(parser, *args, **kwargs):
# extend command line options to store to database
parser.add_option("--build-deprecated-concepts-file",
action="store_true",
dest="buildDeprecatedConceptsFile",
help=_("Build EFM Validation deprecated concepts file (pre-cache before use)"))
def utilityRun(self, options, *args, **kwargs):
if options.buildDeprecatedConceptsFile:
from .Util import buildDeprecatedConceptDatesFiles
buildDeprecatedConceptDatesFiles(self)
class Filing:
def __init__(self, cntlr, options=None, filesource=None, entrypointfiles=None, sourceZipStream=None, responseZipStream=None, errorCaptureLevel=None):
self.cntlr = cntlr
self.options = options
self.filesource = filesource
self.entrypointfiles = entrypointfiles
self.sourceZipStream = sourceZipStream
self.responseZipStream = responseZipStream
self.submissionType = None
self.reports = []
self.renderedFiles = set() # filing-level rendered files
self.reportZip = None
if responseZipStream:
self.setReportZipStreamMode('w')
else:
try: #zipOutputFile only present with EdgarRenderer plugin options
if options and options.zipOutputFile:
if not os.path.isabs(options.zipOutputFile):
zipOutDir = os.path.dirname(filesource.basefile)
zipOutFile = os.path.join(zipOutDir,options.zipOutputFile)
else:
zipOutFile = options.zipOutputFile
self.reportZip = zipfile.ZipFile(zipOutFile, 'w', zipfile.ZIP_DEFLATED, True)
except AttributeError:
self.reportZip = None
self.errorCaptureLevel = errorCaptureLevel or logging._checkLevel("INCONSISTENCY")
self.errors = []
self.arelleUnitTests = {} # copied from each instance loaded
for pluginXbrlMethod in pluginClassMethods("Security.Crypt.Init"):
pluginXbrlMethod(self, options, filesource, entrypointfiles, sourceZipStream)
def setReportZipStreamMode(self, mode): # mode is 'w', 'r', 'a'
# required to switch in-memory zip stream between write, read, and append modes
if self.responseZipStream:
if self.reportZip: # already open, close and reseek underlying stream
self.reportZip.close()
self.responseZipStream.seek(0)
self.reportZip = zipfile.ZipFile(self.responseZipStream, mode, zipfile.ZIP_DEFLATED, True)
def close(self):
''' MetaFiling.json (not needed?) list of all files written out
_reports = dict((report.basename, report.json) for report in self.reports)
_reports["filing"] = {"renderedFiles": sorted(self.renderedFiles)}
if self.options.logFile:
_reports["filing"]["logFile"] = self.options.logFile
if self.reportZip:
self.reportZip.writestr("MetaFiling.json", json.dumps(_reports, sort_keys=True, indent=jsonIndent))
else:
try:
if self.options.reportsFolder:
with open(os.path.join(self.options.reportsFolder, "MetaFiling.json"), mode='w') as f:
json.dump(_reports, f, sort_keys=True, indent=jsonIndent)
except AttributeError: # no reportsFolder attribute
pass
'''
if self.options and self.options.logFile:
if self.reportZip and self.reportZip.fp is not None: # open zipfile
_logFile = self.options.logFile
_logFileExt = os.path.splitext(_logFile)[1]
if _logFileExt == ".xml":
_logStr = self.cntlr.logHandler.getXml(clearLogBuffer=False) # may be saved to file later or flushed in web interface
elif _logFileExt == ".json":
_logStr = self.cntlr.logHandler.getJson(clearLogBuffer=False)
else: # no ext or _logFileExt == ".txt":
_logStr = self.cntlr.logHandler.getText(clearLogBuffer=False)
self.reportZip.writestr(_logFile, _logStr)
#else:
# with open(_logFile, "wt", encoding="utf-8") as fh:
# fh.write(_logStr)
if self.reportZip: # ok to close if already closed
self.reportZip.close()
self.__dict__.clear() # dereference all contents
def addReport(self, modelXbrl):
_report = Report(modelXbrl)
self.reports.append(_report)
def error(self, messageCode, message, messageArgs=None, file=None):
if file and len(self.entrypointfiles) > 0:
# relativize file(s)
if isinstance(file, _STR_BASE):
file = (file,)
if isinstance(self.entrypointfiles[0], dict):
_baseFile = self.entrypointfiles[0].get("file", ".")
else:
_baseFile = self.entrypointfiles[0]
relFiles = [relativeUri(_baseFile, f) for f in file]
else:
relFiles = None
self.cntlr.addToLog(message, messageCode=messageCode, messageArgs=messageArgs, file=relFiles, level="ERROR")
self.errors.append(messageCode)
@property
def hasInlineReport(self):
return any(getattr(report, "isInline", False) for report in self.reports)
def writeFile(self, filepath, data):
# write the data (string or binary)
for pluginXbrlMethod in pluginClassMethods("Security.Crypt.Write"):
if pluginXbrlMethod(self, filepath, data):
return
with io.open(filepath, "wt" if isinstance(data, str) else "wb") as fh:
fh.write(data)
class Report:
REPORT_ATTRS = {"DocumentType", "DocumentPeriodEndDate", "EntityRegistrantName",
"EntityCentralIndexKey", "CurrentFiscalYearEndDate", "DocumentFiscalYearFocus"}
def lc(self, name):
return name[0].lower() + name[1:]
def __init__(self, modelXbrl):
self.isInline = modelXbrl.modelDocument.type in (Type.INLINEXBRL, Type.INLINEXBRLDOCUMENTSET)
self.url = modelXbrl.modelDocument.uri
self.reportedFiles = set()
if modelXbrl.modelDocument.type == Type.INLINEXBRLDOCUMENTSET:
self.basenames = []
self.filepaths = []
for ixDoc in sorted(modelXbrl.modelDocument.referencesDocument.keys(), key=lambda d: d.objectIndex): # preserve order
if ixDoc.type == Type.INLINEXBRL:
self.basenames.append(ixDoc.basename)
self.filepaths.append(ixDoc.filepath)
self.reportedFiles.add(ixDoc.basename)
else:
self.basenames = [modelXbrl.modelDocument.basename]
self.filepaths = [modelXbrl.modelDocument.filepath]
self.reportedFiles.add(modelXbrl.modelDocument.basename)
for attrName in Report.REPORT_ATTRS:
setattr(self, self.lc(attrName), None)
self.instanceName = self.basenames[0]
for f in modelXbrl.facts:
cntx = f.context
if cntx is not None and cntx.isStartEndPeriod and not cntx.hasSegment:
if f.qname is not None and f.qname.localName in Report.REPORT_ATTRS and f.xValue:
setattr(self, self.lc(f.qname.localName), f.xValue)
self.reportedFiles |= referencedFiles(modelXbrl)
self.renderedFiles = set()
self.hasUsGaapTaxonomy = False
sourceDir = os.path.dirname(modelXbrl.modelDocument.filepath)
# add referenced files that are xbrl-referenced local documents
refDocUris = set()
def addRefDocs(doc):
if doc.type == Type.INLINEXBRLDOCUMENTSET:
for ixDoc in doc.referencesDocument.keys():
if ixDoc.type == Type.INLINEXBRL:
addRefDocs(ixDoc)
return
for refDoc in doc.referencesDocument.keys():
_file = refDoc.filepath
if refDoc.uri not in refDocUris:
refDocUris.add(refDoc.uri)
if refDoc.filepath and refDoc.filepath.startswith(sourceDir):
self.reportedFiles.add(refDoc.filepath[len(sourceDir)+1:]) # add file name within source directory
addRefDocs(refDoc)
if refDoc.type == Type.SCHEMA and refDoc.targetNamespace:
nsAuthority = authority(refDoc.targetNamespace, includeScheme=False)
nsPath = refDoc.targetNamespace.split('/')
if len(nsPath) > 2:
if nsAuthority in ("fasb.org", "xbrl.us") and nsPath[-2] == "us-gaap":
self.hasUsGaapTaxonomy = True
addRefDocs(modelXbrl.modelDocument)
def close(self):
self.__dict__.clear() # dereference all contents
@property
def json(self): # stringify un-jsonable attributes
return dict((name, value if isinstance(value,(str,int,float,Decimal,list,dict))
else sorted(value) if isinstance(value, set)
else str(value))
for name, value in self.__dict__.items())
__pluginInfo__ = {
# Do not use _( ) in pluginInfo itself (it is applied later, after loading
'name': 'Validate EFM',
'version': '1.19.1', # SEC EDGAR release 19.1
'description': '''EFM Validation.''',
'license': 'Apache-2',
'import': ('transforms/SEC',), # SEC inline can use SEC transformations
'author': '<NAME>',
'copyright': '(c) Copyright 2013-15 <NAME> Limited, All rights reserved.',
# classes of mount points (required)
'DisclosureSystem.Types': dislosureSystemTypes,
'DisclosureSystem.ConfigURL': disclosureSystemConfigURL,
'Validate.XBRL.Start': validateXbrlStart,
'Validate.XBRL.Finally': validateXbrlFinally,
'Validate.XBRL.DTS.document': validateXbrlDtsDocument,
'ModelXbrl.RoleTypeName': roleTypeName,
'CntlrCmdLine.Filing.Start': filingStart,
'CntlrWinMain.Xbrl.Loaded': guiTestcasesStart,
'Testcases.Start': testcasesStart,
'CntlrCmdLine.Options': commandLineOptionExtender,
'CntlrCmdLine.Utility.Run': utilityRun,
'CntlrCmdLine.Xbrl.Loaded': xbrlLoaded,
'CntlrCmdLine.Xbrl.Run': xbrlRun,
'CntlrCmdLine.Filing.Validate': filingValidate,
'CntlrCmdLine.Filing.End': filingEnd,
'RssItem.Xbrl.Loaded': rssItemXbrlLoaded,
'Validate.RssItem': rssItemValidated,
'TestcaseVariation.Xbrl.Loaded': testcaseVariationXbrlLoaded,
'TestcaseVariation.Xbrl.Validated': testcaseVariationXbrlValidated,
'TestcaseVariation.Validated': testcaseVariationValidated,
'FileSource.File': fileSourceFile,
'FileSource.Exists': fileSourceExists
}
|
<reponame>wilmerm/unolet-2022
"""
Objetos para interpretar etiquetas HTML básicas.
"""
VALUES_HTML_FORMAT = {
True: "true", False: "false", None: "none",
}
class attr(object):
def __init__(self, parent, name, value=None):
self.__dict__["parent"] = parent
self.__dict__["name"] = name.lower()
self.__dict__["value"] = self.FormatValue(value=value, name=name)
def __str__(self):
return self.value
def __repr__(self):
return self.value
def __setattr__(self, name, value):
if name == "value":
value = self.FormatValue(value, self.name)
return super().__setattr__(name, value)
def FormatValue(self, value, name=None):
"""
Formatea el valor a uno ideal para HTML
"""
try:
value = VALUES_HTML_FORMAT[value]
except (KeyError):
value = str(value)
if name in ("src", "class", "style"):
value = str(value).strip()
if name == "checked":
value = "true"
return value
class base(object):
has_close_tag = True
tag_name = ""
# Atributos que '__setattr__' NO guardará como un objeto 'html.attr'
no_html_attrname = ("has_close_tag", "tag_name", "parent", "inner")
def __init__(self, parent=None, **kwargs):
# Padre.
self.parent = parent
# Hijo(s)
inner = kwargs.get("inner", [])
if (not isinstance(inner, (list, tuple))):
inner = [inner]
self.inner = list(inner)
# Asignamos los atributos.
# El atributo class se puede asignar como: _class, css_class o cssclass
for k, v in kwargs.items():
# Si es un atributo no permitido., estos atributos son únicos
# para este objeto Python.
if (k in (self.no_html_attrname)):
continue
# El atributo class, al ser un nombre reservado de Python,
# podemos asignarlo alternativamente como _class, css_class o cssclass.
# este se agregará como '_class' y al momento de renderizar el objeto en HTML
# se retornará el valor correcto 'class'.
if (k == "css_class"):
k = "_class"
elif (k == "cssclass"):
k = "_class"
self.__dict__[k] = attr(self, k, v)
def __str__(self):
close_tagname = ""
if self.has_close_tag:
close_tagname = "</{tagname}>".format(tagname=self.tag_name)
return "<{tagname}{attrs}>{children}{closetagname}".format(tagname=self.tag_name, attrs=self.GetAttrsString(), children=self.GetChildrenString(), closetagname=close_tagname)
def __repr__(self):
return str(self)
def __setattr__(self, name, value):
if name in self.no_html_attrname:
return super().__setattr__(name, value)
elif isinstance(value, attr):
return super().__setattr__(name, value)
return super().__setattr__(name, attr(self, name, value))
def GetAttrs(self):
"""
Obtiene un listado solo los atributos que sean instancias de 'html.attr',
que son los atributos que serán pasados para construir el objeto HTML.
"""
out = []
for k, v in self.__dict__.items():
if (not isinstance(v, attr)):
continue
if (k in ("_class", "cssclass", "css_class")):
k = "class"
out.append((k, v))
return out
def SetAttr(self, name, value):
"""
Establece un atributo.
"""
return setattr(self, name, value)
def GetAttrsString(self):
"""
Igual que 'self.GetAttrs' solo que este devuelve un string ideal para
poner en el objeto HTML.
"""
out = " ".join(['%s="%s"' % (e[0], e[1]) for e in self.GetAttrs()])
out = " %s" % out.strip()
if out.replace(" ", "") == "":
return ""
return out
def GetTagName(self):
return self.__class__.__name__
def GetChildren(self):
"""
Obtiene el elemento hijo de este, si es que tiene.
"""
return self.inner
def GetChildrenString(self):
t = ""
for e in self.GetChildren():
t += str(e)
return t
def AppendChild(self, child):
self.inner.append(child)
class span(base):
tag_name = "span"
def __init__(self, parent=None, **kwargs):
base.__init__(self, parent, **kwargs)
class a(base):
tag_name = "a"
def __init__(self, parent=None, **kwargs):
base.__init__(self, parent, **kwargs)
class img(base):
has_close_tag = False
tag_name = "img"
def __init__(self, parent=None, **kwargs):
base.__init__(self, parent, **kwargs)
class label(base):
tag_name = "label"
def __init__(self, parent=None, **kwargs):
base.__init__(self, parent, **kwargs)
class input(base):
has_close_tag = False
tag_name = "input"
def __init__(self, parent=None, **kwargs):
base.__init__(self, parent, **kwargs)
_type = kwargs.get("type", "text")
self.type = attr(self, "type", _type)
class checkbox_input(input):
def __init__(self, parent=None, **kwargs):
input.__init__(self, parent, **kwargs)
self.type = attr(self, "type", "checkbox")
def __setattr__(self, name, value):
if name == "checked":
if value in (None, "null", "none", False, "False", "false", 0, "0"):
# Eliminamos este atributo.
try:
self.__dict__.pop("checked")
except (KeyError):
pass
return
return super().__setattr__(name, value)
class radio_input(input):
def __init__(self, parent=None, **kwargs):
input.__init__(self, parent, **kwargs)
self.type = attr(self, "type", "radio")
class select(base):
tag_name = "select"
def __init__(self, parent=None, **kwargs):
base.__init__(self, parent, **kwargs)
class option(base):
tag_name = "option"
def __init__(self, parent=None, **kwargs):
base.__init__(self, parent, **kwargs)
class table(base):
tag_name = "table"
def __init__(self, parent=None, **kwargs):
base.__init__(self, parent, **kwargs)
def GetTableFromDict(self, dic):
"""
Construye una tabla a partir de un diccionario.
"""
table = self
i = 0
for key in dic:
value = dic[key]
row = tr(self)
if (not isinstance(value, (list, tuple))):
value = [value]
ii = 0
for v in value:
cell = td(row)
cell.AppendChild(key)
row.AppendChild(cell)
cell = td(row)
cell.AppendChild(v)
row.AppendChild(cell)
ii += 1
table.AppendChild(row)
i += 1
return table
class tr(base):
tag_name = "tr"
def __init__(self, parent=None, **kwargs):
base.__init__(self, parent, **kwargs)
class th(base):
tag_name = "th"
def __init__(self, parent=None, **kwargs):
base.__init__(self, parent, **kwargs)
class td(base):
tag_name = "td"
def __init__(self, parent=None, **kwargs):
base.__init__(self, parent, **kwargs)
class ul(base):
tag_name = "ul"
def __init__(self, parent=None, **kwargs):
base.__init__(self, parent, **kwargs)
class li(base):
tag_name = "li"
def __init__(self, parent=None, **kwargs):
base.__init__(self, parent, **kwargs) |
#
# Copyright (C) 2008, <NAME>
#
# http://rl-glue-ext.googlecode.com/
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import rlglued.rlglue as rlglue
which_episode = 0
def run_episode(obj, step_limit):
global which_episode
terminal = obj.run_episode(step_limit)
total_steps = obj.num_steps()
total_reward = obj.reward_return()
print "Episode " + str(which_episode) + "\t " + str(total_steps) + " steps \t" + str(
total_reward) + " total reward\t " + str(terminal) + " natural end"
which_episode += 1
# Main Program starts here
rl_glue = rlglue.RLGlue()
print "\n\nExperiment starting up!"
task_spec = rl_glue.init()
print "RL_init called, the environment sent task spec: " + task_spec
print "\n\n----------Sending some sample messages----------"
# Talk to the agent and environment a bit...*/
responseMessage = rl_glue.agent_message("what is your name?")
print "Agent responded to \"what is your name?\" with: " + responseMessage
responseMessage = rl_glue.agent_message("If at first you don't succeed; call it version 1.0")
print "Agent responded to \"If at first you don't succeed; call it version 1.0 \" with: " + responseMessage + "\n"
responseMessage = rl_glue.env_message("what is your name?")
print "Environment responded to \"what is your name?\" with: " + responseMessage
responseMessage = rl_glue.env_message("If at first you don't succeed; call it version 1.0")
print "Environment responded to \"If at first you don't succeed; call it version 1.0 \" with: " + responseMessage
print "\n\n----------Running a few episodes----------"
run_episode(rl_glue, 100)
run_episode(rl_glue, 100)
run_episode(rl_glue, 100)
run_episode(rl_glue, 100)
run_episode(rl_glue, 100)
run_episode(rl_glue, 1)
# Remember that stepLimit of 0 means there is no limit at all!*/
run_episode(rl_glue, 0)
rl_glue.cleanup()
print "\n\n----------Stepping through an episode----------"
# We could also start over and do another experiment */
task_spec = rl_glue.init()
# We could run one step at a time instead of one episode at a time */
# Start the episode */
start_response = rl_glue.start()
first_obs = start_response.o.intArray[0]
first_act = start_response.a.intArray[0]
print "First observation and action were: " + str(first_obs) + " and: " + str(first_act)
# Run one step */
stepResponse = rl_glue.step()
# Run until the episode ends*/
while stepResponse.terminal != 1:
stepResponse = rl_glue.step()
# if (stepResponse.terminal != 1)
# Could optionally print state,action pairs */
# printf("(%d,%d) ",stepResponse.o.intArray[0],stepResponse.a.intArray[0])*/
print "\n\n----------Summary----------"
totalSteps = rl_glue.num_steps()
totalReward = rl_glue.reward_return()
print "It ran for " + str(totalSteps) + " steps, total reward was: " + str(totalReward)
rl_glue.cleanup()
|
#-*-coding:utf-8
#########################################################################################################
# name : whois_api.py
# desc : whois open api search as number
# :
#--------------------------------------------------------------------------------------------------------
# open api url :
# url1 = "http://whois.kisa.or.kr/openapi/whois.jsp?query=AS[AS NUMBER]&key=xxxxxxxxxxxxxxxxxxxxxx&answer=json"
#########################################################################################################
from time import sleep
import sys
import urllib
import json
import datetime
import pymysql
reload(sys)
sys.setdefaultencoding('utf-8')
as_number=0
arg_option=0
argc = len(sys.argv)
if(argc >=1):
if(argc ==1):
print("All update")
arg_option = 0
elif(argc ==2):
arg_option = 1
as_number = sys.argv[1]
else:
print('[Error]Argument count is wrong. \n')
exit(1)
else:
print('[Error]Argument is not enough. \n')
exit(1)
# MYSQL CONNECTION
conn = pymysql.connect(host='127.0.0.1', user='testuser', password='password', db='pmacct', charset='utf8')
curs = conn.cursor(pymysql.cursors.DictCursor)
if(arg_option==0):
# ==== select example ====
sql = "SELECT as_dst FROM pmacct.acct_dstas_info"
curs.execute(sql)
# DATA Fetch
rows = curs.fetchall()
for row in rows:
as_number=row['as_dst']
print row['as_dst']
print as_number
############ URL CALL & JSON PARSING
url1 = "http://whois.kisa.or.kr/openapi/whois.jsp?query=AS"
url2 = "&key=xxxxxxxxxxxxxxxxxxxxxxxxxxx&answer=json".format(as_number)
url3 = "{0}{1}{2}".format(url1,as_number,url2)
print url3
urlcall = urllib.urlopen(url3)
data = urlcall.read()
print data
jdata = json.loads(data)
countrycode = jdata["whois"]["countryCode"]
# print "countrycode:"
#
# print countrycode
try:
usql = "UPDATE pmacct.acct_dstas_info SET country_code='{0}' WHERE as_dst = '{1}'".format(countrycode, as_number)
curs.execute(usql)
except pymysql.Error as e:
print e.message or e.args
print "############## PYMYSQL ERROR ################"
conn.commit()
# prin "\n"
try:
engname = jdata["whois"]["english"]["asName"]
except Exception, e:
print "############## CONTINUE 1 ################"
continue
try:
usql2 = "UPDATE pmacct.acct_dstas_info SET as_eng_name= '{0}' WHERE as_dst = '{1}'".format(engname, as_number)
curs.execute(usql2)
print "engname:"+ engname
except pymysql.Error as e:
print e.message or e.args
print "############## CONTINUE 2 ################"
continue
# if e.args[0] == PYMYSQL_DUPLICATE_ERROR:
# handle_duplicate_pymysql_exception(e, func_a)
# else:
# raise
except Exception as e:
print "############## CONTINUE 3 : UNKNOWN ERROR ################"
continue
conn.commit()
print "\n"
try:
orgname = jdata["whois"]["english"]["orgInfo"]["name"]
print "orgname:" + orgname
except Exception, e:
orgname = jdata["whois"]["korean"]["orgInfo"]["name"]
print "######### orgname:" + orgname
try:
usql3 = "UPDATE pmacct.acct_dstas_info SET as_org_name= '{0}' WHERE as_dst = '{1}'".format(orgname, as_number)
curs.execute(usql3)
print "engname:"+ engname
except pymysql.Error as e:
print e.message or e.args
print "############## CONTINUE 2 ################"
continue
conn.commit()
sleep(1)
elif(arg_option==1):
############ URL CALL & JSON PARSING
url1 = "http://whois.kisa.or.kr/openapi/whois.jsp?query=AS"
url2 = "&key=xxxxxxxxxxxxxxxxxxxxxxxxxxx&answer=json".format(as_number)
url3 = "{0}{1}{2}".format(url1,as_number,url2)
print url3
urlcall = urllib.urlopen(url3)
data = urlcall.read()
print data
jdata = json.loads(data)
countrycode = jdata["whois"]["countryCode"]
# print "countrycode:"
#
# print countrycode
try:
usql = "UPDATE pmacct.acct_dstas_info SET country_code='{0}' WHERE as_dst = '{1}'".format(countrycode, as_number)
curs.execute(usql)
except pymysql.Error as e:
print e.message or e.args
print "############## PYMYSQL ERROR ################"
conn.commit()
# prin "\n"
try:
engname = jdata["whois"]["english"]["asName"]
except Exception, e:
print "############## EXCEPTION 1 ################"
try:
usql2 = "UPDATE pmacct.acct_dstas_info SET as_eng_name= '{0}' WHERE as_dst = '{1}'".format(engname, as_number)
curs.execute(usql2)
print "engname:"+ engname
except pymysql.Error as e:
print e.message or e.args
print "############## EXCEPTION 2 ################"
# if e.args[0] == PYMYSQL_DUPLICATE_ERROR:
# handle_duplicate_pymysql_exception(e, func_a)
# else:
# raise
except Exception as e:
print "############## CONTINUE 3 : UNKNOWN ERROR ################"
print "############## EXCEPTION 3 ################"
conn.commit()
print "\n"
try:
orgname = jdata["whois"]["english"]["orgInfo"]["name"]
print "orgname:" + orgname
except Exception, e:
orgname = jdata["whois"]["korean"]["orgInfo"]["name"]
print "######### orgname:" + orgname
try:
usql3 = "UPDATE pmacct.acct_dstas_info SET as_org_name= '{0}' WHERE as_dst = '{1}'".format(orgname, as_number)
curs.execute(usql3)
print "engname:"+ engname
except pymysql.Error as e:
print e.message or e.args
print "############## EXCEPTION 4 ################"
conn.commit()
else:
print('NO ARGUMENT')
conn.close()
exit(1)
conn.close()
|
<reponame>thefirstofthe300/python-harvest_apiv2
# Copyright 2020 Bradbase
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, sys
import unittest
import configparser
from dataclasses import asdict
from requests_oauthlib import OAuth2Session
from oauthlib.oauth2 import MobileApplicationClient, WebApplicationClient
import httpretty
import warnings
from dacite import from_dict
import json
sys.path.insert(0, sys.path[0]+"/..")
import harvest
from harvest.harvestdataclasses import *
"""
There is a sample test config.
Copy it, name it test_config.ini and fill it out with your test details.
tests/test_config.ini is already in .gitignore
Just in case, the test config file looks like this:
[PERSONAL ACCESS TOKEN]
url = https://api.harvestapp.com/api/v2
put_auth_in_header = True
personal_token = Bearer <PASSWORD>oflettersandnumbers
account_id = 1234567
[OAuth2 Implicit Code Grant]
uri = https://api.harvestapp.com/api/v2
client_id = aclientid
auth_url = https://id.getharvest.com/oauth2/authorize
[OAuth2 Authorization Code Grant]
uri = https://api.harvestapp.com/api/v2
client_id = aclientid
client_secret = itsmy<PASSWORD>
auth_url = https://id.getharvest.com/oauth2/authorize
token_url = https://id.getharvest.com/api/v2/oauth2/token
account_id = 1234567
"""
"""
Those who tread this path:-
These tests currently really only test that the default URL has been formed
correctly and that the datatype that gets returned can be typed into the dataclass.
Probably enough but a long way from "comprehensive".
"""
class TestEstimates(unittest.TestCase):
def setUp(self):
personal_access_token = PersonalAccessToken('ACCOUNT_NUMBER', 'PERSONAL_ACCESS_TOKEN')
self.harvest = harvest.Harvest('https://api.harvestapp.com/api/v2', personal_access_token)
warnings.filterwarnings("ignore", category=ResourceWarning, message="unclosed.*") # There's a bug in httpretty ATM.
httpretty.enable()
def teardown(self):
httpretty.reset()
httpretty.disable()
def test_estimate_messages(self):
estimate_message_2666236_dict = {
"id":2666236,
"sent_by":"<NAME>",
"sent_by_email":"<EMAIL>",
"sent_from":"<NAME>",
"sent_from_email":"<EMAIL>",
"send_me_a_copy":True,
"created_at":"2017-08-25T21:23:40Z",
"updated_at":"2017-08-25T21:23:40Z",
"recipients":[
{
"name":"<NAME>",
"email":"<EMAIL>"
},
{
"name":"<NAME>",
"email":"<EMAIL>"
}
],
"event_type":None,
"subject":"Estimate #1001 from API Examples",
"body":"---------------------------------------------\r\nEstimate Summary\r\n---------------------------------------------\r\nEstimate ID: 1001\r\nEstimate Date: 06/01/2017\r\nClient: 123 Industries\r\nP.O. Number: 5678\r\nAmount: $9,630.00\r\n\r\nYou can view the estimate here:\r\n\r\n%estimate_url%\r\n\r\nThank you!\r\n---------------------------------------------"
}
estimate_message_2666240_dict = {
"id":2666240,
"sent_by":"<NAME>",
"sent_by_email":"<EMAIL>",
"sent_from":"<NAME>",
"sent_from_email":"<EMAIL>",
"send_me_a_copy":True,
"created_at":"2017-08-25T21:27:52Z",
"updated_at":"2017-08-25T21:27:52Z",
"recipients":[
{
"name":"<NAME>",
"email":"<EMAIL>"
},
{
"name":"<NAME>",
"email":"<EMAIL>"
}
],
"event_type":None,
"subject":"Estimate #1001",
"body":"Here is our estimate."
}
estimate_message_2666241_dict = {
"id":2666241,
"sent_by":"<NAME>",
"sent_by_email":"<EMAIL>",
"sent_from":"<NAME>",
"sent_from_email":"<EMAIL>",
"send_me_a_copy":False,
"created_at":"2017-08-23T22:25:59Z",
"updated_at":"2017-08-23T22:25:59Z",
"event_type":"send",
"recipients":[],
"subject":None,
"body":None
}
estimate_message_2666244_dict = {
"id":2666244,
"sent_by":"<NAME>",
"sent_by_email":"<EMAIL>",
"sent_from":"<NAME>",
"sent_from_email":"<EMAIL>",
"send_me_a_copy":False,
"created_at":"2017-08-25T21:31:55Z",
"updated_at":"2017-08-25T21:31:55Z",
"recipients":[],
"event_type":"accept",
"subject":None,
"body":None
}
estimate_message_2666245_dict = {
"id":2666245,
"sent_by":"<NAME>",
"sent_by_email":"<EMAIL>",
"sent_from":"<NAME>",
"sent_from_email":"<EMAIL>",
"send_me_a_copy":False,
"created_at":"2017-08-25T21:31:55Z",
"updated_at":"2017-08-25T21:31:55Z",
"recipients":[],
"event_type":"decline",
"subject":None,
"body":None
}
estimate_message_2666246_dict = {
"id":2666246,
"sent_by":"<NAME>",
"sent_by_email":"<EMAIL>",
"sent_from":"<NAME>",
"sent_from_email":"<EMAIL>",
"send_me_a_copy":False,
"created_at":"2017-08-25T21:31:55Z",
"updated_at":"2017-08-25T21:31:55Z",
"recipients":[],
"event_type":"re-open",
"subject":None,
"body":None
}
estimate_messages_dict = {
"estimate_messages":[estimate_message_2666236_dict],
"per_page":100,
"total_pages":1,
"total_entries":1,
"next_page":None,
"previous_page":None,
"page":1,
"links":{
"first":"https://api.harvestapp.com/v2/estimates/1439818/messages?page=1&per_page=100",
"next":None,
"previous":None,
"last":"https://api.harvestapp.com/v2/estimates/1439818/messages?page=1&per_page=100"
}
}
# estimate_messages
httpretty.register_uri(httpretty.GET,
"https://api.harvestapp.com/api/v2/estimates/1439818/messages?page=1&per_page=100",
body=json.dumps(estimate_messages_dict),
status=200
)
estimate_messages = from_dict(data_class=EstimateMessages, data=estimate_messages_dict)
requested_estimate_messages = self.harvest.estimate_messages(estimate_id= 1439818)
self.assertEqual(requested_estimate_messages, estimate_messages)
# estimate_message
httpretty.register_uri(httpretty.POST,
"https://api.harvestapp.com/api/v2/estimates/1439818/messages",
body=json.dumps(estimate_message_2666240_dict),
status=200
)
new_estimate_message = from_dict(data_class=EstimateMessage, data=estimate_message_2666240_dict)
requested_new_estimate_message = self.harvest.create_estimate_message(estimate_id= 1439818, recipients= [{"name" : "<NAME>", "email" : "<EMAIL>"}], subject= "Estimate #1001", body= "Here is our estimate.", send_me_a_copy= True)
self.assertEqual(requested_new_estimate_message, new_estimate_message)
# mark estimate as sent
httpretty.register_uri(httpretty.POST,
"https://api.harvestapp.com/api/v2/estimates/1439818/messages",
body=json.dumps(estimate_message_2666241_dict),
status=201
)
mark_draft_estimate = from_dict(data_class=EstimateMessage, data=estimate_message_2666241_dict)
requested_mark_draft_estimate = self.harvest.mark_draft_estimate(estimate_id= 1439818, event_type= "send")
self.assertEqual(requested_mark_draft_estimate, mark_draft_estimate)
# mark estimate as accepted
httpretty.register_uri(httpretty.POST,
"https://api.harvestapp.com/api/v2/estimates/1439818/messages",
body=json.dumps(estimate_message_2666244_dict),
status=201
)
mark_draft_estimate = from_dict(data_class=EstimateMessage, data=estimate_message_2666244_dict)
requested_mark_draft_estimate = self.harvest.mark_draft_estimate(estimate_id= 1439818, event_type= "accept")
self.assertEqual(requested_mark_draft_estimate, mark_draft_estimate)
# mark estimate as declined
httpretty.register_uri(httpretty.POST,
"https://api.harvestapp.com/api/v2/estimates/1439818/messages",
body=json.dumps(estimate_message_2666245_dict),
status=201
)
mark_draft_estimate = from_dict(data_class=EstimateMessage, data=estimate_message_2666245_dict)
requested_mark_draft_estimate = self.harvest.mark_draft_estimate(estimate_id= 1439818, event_type= "decline")
self.assertEqual(requested_mark_draft_estimate, mark_draft_estimate)
# mark estimate as re-opened
httpretty.register_uri(httpretty.POST,
"https://api.harvestapp.com/api/v2/estimates/1439818/messages",
body=json.dumps(estimate_message_2666246_dict),
status=201
)
mark_draft_estimate = from_dict(data_class=EstimateMessage, data=estimate_message_2666246_dict)
requested_mark_draft_estimate = self.harvest.mark_draft_estimate(estimate_id= 1439818, event_type= "re-open")
self.assertEqual(requested_mark_draft_estimate, mark_draft_estimate)
httpretty.reset()
def test_estimates(self):
estimate_1439818_dict = {
"id":1439818,
"client_key":"<KEY>",
"number":"1001",
"purchase_order":"5678",
"amount":9630.0,
"tax":5.0,
"tax_amount":450.0,
"tax2":2.0,
"tax2_amount":180.0,
"discount":10.0,
"discount_amount":1000.0,
"subject":"Online Store - Phase 2",
"notes":"Some notes about the estimate",
"state":"sent",
"issue_date":"2017-06-01",
"sent_at":"2017-06-27T16:11:33Z",
"created_at":"2017-06-27T16:11:24Z",
"updated_at":"2017-06-27T16:13:56Z",
"accepted_at":None,
"declined_at":None,
"currency":"USD",
"client":{
"id":5735776,
"name":"123 Industries"
},
"creator":{
"id":1782884,
"name":"<NAME>"
},
"line_items":[
{
"id":53334195,
"kind":"Service",
"description":"Phase 2 of the Online Store",
"quantity":100.00, # TODO: this is supposed to be an int. Something isn't casting int to float.
"unit_price":100.00, # TODO: this is supposed to be an int. Something isn't casting int to float.
"amount":10000.00, # TODO: this is supposed to be an int. Something isn't casting int to float.
"taxed":True,
"taxed2":True
}
]
}
estimate_1439814_dict = {
"id":1439814,
"client_key":"<KEY>",
"number":"1000",
"purchase_order":"1234",
"amount":21000.0,
"tax":5.0,
"tax_amount":1000.0,
"tax2":None,
"tax2_amount":0.0,
"discount":None,
"discount_amount":0.0,
"subject":"Online Store - Phase 1",
"notes":"Some notes about the estimate",
"state":"accepted",
"issue_date":"2017-01-01",
"sent_at":"2017-06-27T16:10:30Z",
"created_at":"2017-06-27T16:09:33Z",
"updated_at":"2017-06-27T16:12:00Z",
"accepted_at":"2017-06-27T16:10:32Z",
"declined_at":None,
"currency":"USD",
"client":{
"id":5735776,
"name":"123 Industries"
},
"creator":{
"id":1782884,
"name":"<NAME>"
},
"line_items":[
{
"id":57531966,
"kind":"Service",
"description":"Phase 1 of the Online Store",
"quantity":1.00, # TODO: this is supposed to be an int. Something isn't casting int to float.
"unit_price":20000.00, # TODO: this is supposed to be an int. Something isn't casting int to float.
"amount":20000.00, # TODO: this is supposed to be an int. Something isn't casting int to float.
"taxed":True,
"taxed2":False
}
]
}
estimate_1439827_dict = {
"id":1439827,
"client_key":"<KEY>",
"number":"1002",
"purchase_order":None,
"amount":5000.0,
"tax":None,
"tax_amount":0.0,
"tax2":None,
"tax2_amount":0.0,
"discount":None,
"discount_amount":0.0,
"subject":"Project Quote",
"notes":None,
"state":"draft",
"issue_date":None,
"sent_at":None,
"created_at":"2017-06-27T16:16:24Z",
"updated_at":"2017-06-27T16:16:24Z",
"accepted_at":None,
"declined_at":None,
"currency":"USD",
"client":{
"id":5735774,
"name":"<NAME>"
},
"creator":{
"id":1782884,
"name":"<NAME>"
},
"line_items":[
{
"id":53339199,
"kind":"Service",
"description":"Project Description",
"quantity":1.0,
"unit_price":5000.0,
"amount":5000.0,
"taxed":False,
"taxed2":False
}
]
}
estimates_dict = {
"estimates":[estimate_1439818_dict, estimate_1439814_dict],
"per_page":100,
"total_pages":1,
"total_entries":2,
"next_page":None,
"previous_page":None,
"page":1,
"links":{
"first":"https://api.harvestapp.com/v2/estimates?page=1&per_page=100",
"next":None,
"previous":None,
"last":"https://api.harvestapp.com/v2/estimates?page=1&per_page=100"
}
}
# estimates
httpretty.register_uri(httpretty.GET,
"https://api.harvestapp.com/api/v2/estimates?page=1&per_page=100",
body=json.dumps(estimates_dict),
status=200
)
estimates = from_dict(data_class=Estimates, data=estimates_dict)
requested_estimates = self.harvest.estimates()
self.assertEqual(requested_estimates, estimates)
# get_estimte
httpretty.register_uri(httpretty.GET,
"https://api.harvestapp.com/api/v2/estimates/1439818",
body=json.dumps(estimate_1439818_dict),
status=200
)
estimate = from_dict(data_class=Estimate, data=estimate_1439818_dict)
requested_estimate = self.harvest.get_estimte(estimate_id= 1439818)
self.assertEqual(requested_estimate, estimate)
# create_estimate
httpretty.register_uri(httpretty.POST,
"https://api.harvestapp.com/api/v2/estimates",
body=json.dumps(estimate_1439827_dict),
status=201
)
new_estimate = from_dict(data_class=Estimate, data=estimate_1439827_dict)
requested_new_estimate = self.harvest.create_estimate(client_id= 5735774, subject= "ABC Project Quote", line_items= [{"kind" : "Service", "description" : "ABC Project Quote", "unit_price" : 5000.0}])
self.assertEqual(requested_new_estimate, new_estimate)
# update_estimate
estimate_1439827_dict["purchase_order"] = "2345"
httpretty.register_uri(httpretty.PATCH,
"https://api.harvestapp.com/api/v2/estimates/1439827",
body=json.dumps(estimate_1439827_dict),
status=200
)
new_estimate = from_dict(data_class=Estimate, data=estimate_1439827_dict)
requested_new_estimate = self.harvest.update_estimate(estimate_id= 1439827, purchase_order= "2345")
self.assertEqual(requested_new_estimate, new_estimate)
# create_estimate_line_item
estimate_1439827_dict["line_items"].append(
{
"id":53339200,
"kind":"Service",
"description":"Another Project",
"quantity":1.0,
"unit_price":1000.0,
"amount":1000.0,
"taxed":False,
"taxed2":False
}
)
httpretty.register_uri(httpretty.PATCH,
"https://api.harvestapp.com/api/v2/estimates/1439827",
body=json.dumps(estimate_1439827_dict),
status=200
)
new_estimate_line_item = from_dict(data_class=Estimate, data=estimate_1439827_dict)
requested_new_estimate_line_item = self.harvest.create_estimate_line_item(estimate_id= 1439827, line_items= [{"kind" : "Service", "description" : "Another Project", "unit_price" : 1000.0}])
self.assertEqual(requested_new_estimate_line_item, new_estimate_line_item)
# delete_estimate_line_items
del(estimate_1439827_dict["line_items"][0])
httpretty.register_uri(httpretty.PATCH,
"https://api.harvestapp.com/api/v2/estimates/1439827",
body=json.dumps(estimate_1439827_dict),
status=200
)
new_estimate_line_item = from_dict(data_class=Estimate, data=estimate_1439827_dict)
requested_new_estimate_line_item = self.harvest.create_estimate_line_item(estimate_id= 1439827, line_items= [{"id" : 53339199, "_destroy" : True}])
self.assertEqual(requested_new_estimate_line_item, new_estimate_line_item)
# delete_estimate
httpretty.register_uri(httpretty.DELETE,
"https://api.harvestapp.com/api/v2/estimates/1439827",
status=200
)
requested_deleted_estimate = self.harvest.delete_estimate(estimate_id= 1439827)
self.assertEqual(requested_deleted_estimate, None)
httpretty.reset()
def test_estimate_item_categories(self):
estimate_item_category_1378704_dict = {
"id":1378704,
"name":"Product",
"created_at":"2017-06-26T20:41:00Z",
"updated_at":"2017-06-26T20:41:00Z"
}
estimate_item_category_1378703_dict = {
"id":1378703,
"name":"Service",
"created_at":"2017-06-26T20:41:00Z",
"updated_at":"2017-06-26T20:41:00Z"
}
estimate_item_category_1379244_dict = {
"id":1379244,
"name":"Hosting",
"created_at":"2017-06-27T16:06:35Z",
"updated_at":"2017-06-27T16:06:35Z"
}
estimate_item_categories_dict = {
"estimate_item_categories":[estimate_item_category_1378704_dict, estimate_item_category_1378703_dict],
"per_page":100,
"total_pages":1,
"total_entries":2,
"next_page":None,
"previous_page":None,
"page":1,
"links":{
"first":"https://api.harvestapp.com/v2/estimate_item_categories?page=1&per_page=100",
"next":None,
"previous":None,
"last":"https://api.harvestapp.com/v2/estimate_item_categories?page=1&per_page=100"
}
}
# estimate_item_categories
httpretty.register_uri(httpretty.GET,
"https://api.harvestapp.com/api/v2/estimate_item_categories?page=1&per_page=100",
body=json.dumps(estimate_item_categories_dict),
status=200
)
estimate_item_categories = from_dict(data_class=EstimateItemCategories, data=estimate_item_categories_dict)
requested_estimate_item_categories = self.harvest.estimate_item_categories()
self.assertEqual(requested_estimate_item_categories, estimate_item_categories)
# get_estimate_item_category
httpretty.register_uri(httpretty.GET,
"https://api.harvestapp.com/api/v2/estimate_item_categories/1378704",
body=json.dumps(estimate_item_category_1378704_dict),
status=200
)
estimate_item_category = from_dict(data_class=EstimateItemCategory, data=estimate_item_category_1378704_dict)
requested_estimate_item_category = self.harvest.get_estimate_item_category(estimate_item_category_id=1378704)
self.assertEqual(requested_estimate_item_category, estimate_item_category)
# create_estimate_item_category
httpretty.register_uri(httpretty.POST,
"https://api.harvestapp.com/api/v2/estimate_item_categories",
body=json.dumps(estimate_item_category_1379244_dict),
status=201
)
new_estimate_item_category = from_dict(data_class=EstimateItemCategory, data=estimate_item_category_1379244_dict)
requested_new_estimate_item_category = self.harvest.create_estimate_item_category(name= "Hosting")
self.assertEqual(requested_new_estimate_item_category, new_estimate_item_category)
# update_estimate_item_category
estimate_item_category_1379244_dict["name"] = "Transportation"
httpretty.register_uri(httpretty.PATCH,
"https://api.harvestapp.com/api/v2/estimate_item_categories/1379244",
body=json.dumps(estimate_item_category_1379244_dict),
status=200
)
updated_estimate_item_category = from_dict(data_class=EstimateItemCategory, data=estimate_item_category_1379244_dict)
requested_updated_estimate_item_category = self.harvest.update_estimate_item_category(estimate_item_category_id= 1379244, name= "Transportation")
self.assertEqual(requested_updated_estimate_item_category, updated_estimate_item_category)
# delete_estimate_item_category
httpretty.register_uri(httpretty.DELETE,
"https://api.harvestapp.com/api/v2/estimate_item_categories/1379244",
status=200
)
requested_deleted_estimate_item_category = self.harvest.delete_estimate_item_category(estimate_item_id= 1379244)
self.assertEqual(requested_deleted_estimate_item_category, None)
httpretty.reset()
if __name__ == '__main__':
unittest.main()
|
<reponame>suulcoder/ProjectGenerator
"""
Class Databae will be util to make conection with Neo4J
Project Generator
Universidad del Valle de Guatemala
<NAME>
<NAME>
<NAME>
-Algunos proyectos almacenados en la base de datos fueron obtenidos de la siguiente fuente de información:
Sciencebuddies.(2019). Recommendation projects. Extraído de: https://www.sciencebuddies.org/science-fair-projects/topic-selection-wizard/recommendations?t=Long&p=2
"""
from neo4j import GraphDatabase, basic_auth
class Database(object):
"""Set database driver"""
def __init__(self, uri,user,password):
self._driver = GraphDatabase.driver(uri, auth=(user, password))
"""Close database"""
def close(self):
self._driver.close()
"""
This method is used to write a node in database and receives 3 arguments:
_id: is the identifier that you want to be saved on database, should be an string without spaces
nodeType: Is the type of node that you want, it must be a String, its first letter must be Uppercase
arguments: It containds the atributes of the node. It must be a dictionary where the key is the name of the atribute and the value must be the value of the atribute"""
def write(self,_id,nodeType,arguments):
result = ""
argumentsList = []
if(nodeType!=None):
result = result + "CREATE (" + _id + ":" + nodeType + ")\n"
counter = 0
for variable in arguments:
argumentsList.append(arguments[variable])
result = result + "SET " + _id + "." + variable + " = $arguments[" + str(counter) + "]\n"
counter = counter+1
with self._driver.session() as session:
session.write_transaction(self._create,argumentsList,result)
"""
This method is used to make a conection between two nodes and receives 7 parameters:
type1 and type2: Type of node 1 and 2 and must be an string, its first letter must be Uppercase
VariableName1 and VariableName2: It has the name of the key to be checked of the nodes. It must be a string withoud spaces
variable1 and variable2: contains the value of the Variables setted above, must be a string
linkName: it has the name that will have the link. Must be an string without spaces."""
def link(self,type1,type2,variableName1,variable1,VariableName2,variable2,linkName):
result = "MATCH (a:" + type1 + "),(b:" + type2 + ")\nWHERE a." + variableName1 + "= $variable1 AND b."+ VariableName2 + "= $variable2\nCREATE (a)-[:"+linkName+"]->(b)"
with self._driver.session() as session:
session.write_transaction(self._connect,result,variable1,variable2)
"""
This method is for delete an specific node it has 3 parameters
nodeType: it receives the type of node you want to delete, must be an string and its first letter must be uppercase
key: it receives a key or a reference that the node has.
value: it recives de value of the reference key."""
def delete(self,nodeType,key,value):
result = "MATCH (a:" + nodeType + ")\nWHERE a." + key + "= $value\nDETACH DELETE (a)"
with self._driver.session() as session:
session.write_transaction(self._delete,result,value)
"""
This method is for delete a relationship between nodes it has 7 parameters
type1 and type2: Type of node 1 and 2 and must be an string, its first letter must be Uppercase
VariableName1 and VariableName2: It has the name of the key to be checked of the nodes. It must be a string withoud spaces
variable1 and variable2: contains the value of the Variables setted above, must be a string
linkName: it has the name that will be deleted. Must be an string without spaces."""
def deleteLink(self,type1,type2,variableName1,variable1,VariableName2,variable2,linkName):
result = "MATCH (a:" + type1 + "),(b:" + type2 + ")\nWHERE a." + variableName1 + "= $variable1 AND b."+ VariableName2 + "= $variable2\nMATCH (a)-[r:"+linkName+"]->(b)\nDELETE r"
with self._driver.session() as session:
session.write_transaction(self._deleteLink,result,variable1,variable2)
"""
This method is for upgrade an specific atribute on a node it has 3 parameters
nodeType: it receives the type of node you want to upgrade, must be an string and its first letter must be uppercase
key: it receives a key or a reference that the node has.
value: it recives de value of the reference key.
newValue: it recieves de value that will be setted"""
def upgrade(self,nodeType,key,value,newValue):
result = "MATCH (a:" + nodeType + ")\nWHERE a." + key + "= $value\nSET a." + key + "= $newValue"
with self._driver.session() as session:
session.write_transaction(self._upgrade,result,value,newValue)
"""
This method is used to get a node it has three parameters
nodeType: it receives the type of node where you want to get information, must be an string and its first letter must be uppercase
key: it receives a key or a reference that the node has.
value: it recives de value of the reference key.
It will return a StatementResult type, that behaives like a java-map, you have to make an iterator to get informations"""
def getNode(self,nodeType,key,value):
result = "MATCH (a:" + nodeType + ")\nWHERE a." + key + "=$value\nRETURN a"
with self._driver.session() as session:
return session.write_transaction(self._getNode,result,value)
"""This method is useful to get all nodes that are connected by the same link to a node m connected with the same link to a node of reference
nodeType: it receives the type of node of reference, must be an string and its first letter must be uppercase
key: it receives a key or a reference that the node has.
value: it recives de value of the reference key.
link: receives the link name, must be a string without spaces
"""
def getNodesByOther(self,nodeType,key,value,link):
result= "MATCH (a:" + nodeType + ")\nWHERE a." + key + "=$value\nMATCH (a)-[:" + link + "]->(m)<-[:" + link + "]-(r)\nRETURN r"
with self._driver.session() as session:
return session.write_transaction(self._getNodes,result,value)
"""This method is useful to get all nodes connect to one of reference with an specific link
nodeType: it receives the type of node of reference, must be an string and its first letter must be uppercase
key: it receives a key or a reference that the node has.
value: it recives de value of the reference key.
link: receives the link name, must be a string without spaces"""
def getNodesByLink(self,nodeType,key,value,link):
result= "MATCH (a:" + nodeType + ")\nWHERE a." + key + "=$value\nMATCH (a)-[:" + link + "]->(m)\nRETURN m"
with self._driver.session() as session:
return session.write_transaction(self._getNodes,result,value)
"""This method is useful to get all nodes from the same type
nodeType: receives the String of the node type"""
def getAllType(self,nodeType):
result= "MATCH (a:" + nodeType + ")\nRETURN a"
with self._driver.session() as session:
return session.write_transaction(self._Default,result)
"""This method is used two now if there ara nodes on the database.
It will return None the database is empty"""
def getDefault(self):
result = "MATCH (n) return n"
with self._driver.session() as session:
resultado = session.write_transaction(self._Default,result)
return resultado
"""This method is used two set the default database, you should change the string result two change it. The cod must be in Cypher"""
def setDefault(self):
if (self.getDefault().single()==None):#We check if the database is empty
result = """
CREATE (ProjectGenerator:Project {title: "Project_Generator",description:"This project is about the creation of a software to generate projects. You need to know how to code.", time:11, complexity:"medium", integrants:2 })
CREATE (SunRotation:Project {title: "Sun_Rotation",description:"Calculate the angular velocity of the sun, coding", time:7, complexity:"low", integrants:3 })
CREATE (Behaviorism:Project {title: "Behaviorism",description:"Experiment with people and theory of behaviorism", time:210, complexity:"easy", integrants:1})
CREATE (Gestalt:Project {title: "Gestalt",description:"Experiment to avoid extintion", time:2102400000, complexity:"hard", integrants:55})
CREATE (Avengers:Project {title: "Avengers",description:"Social experiment where a superhero is near of you", time:210, complexity:"medium", integrants:5})
CREATE (CACAP:Project {title: "CACAP",description:"Centro de Administracion y Control Automatico de Papel", time:210, complexity:"hard", integrants:5})
CREATE (SpaceWars:Project {title: "SpaceWars",description:"Make an space war", time:11, complexity:"medium", integrants:2 })
CREATE (Notizen:Project {title: "NOTIZEN",description:"Make an app to take notes", time:7, complexity:"low", integrants:3 })
CREATE (Simulator:Project {title: "SIMULATE_WITH_PHYSICS",description:"Simulate with unity a phenomenom of classic physics", time:210, complexity:"easy", integrants:1})
CREATE (RRasberry:Project {title: "Robot_with_Raspberry",description:"Make a robot using a rapberry", time:210, complexity:"medium", integrants:5})
CREATE (RArduino:Project {title: "Robot_with_arduino",description:"Make a robot using an arduino", time:210, complexity:"hard", integrants:5})
CREATE (Story:Project {title: "Short_Story",description:"Write a short story", time:210, complexity:"hard", integrants:5})
CREATE (ElasticConstat:Project {title: "A Simple Experiment for Determining the Elastic Constant of a Fine Wire",description:"Determining the Elastic Constant of a Fine Wire", time:210, complexity:"hard", integrants:4})
CREATE (HollywoodPhysics:Project {title: "HollywoodPhysics",description:"Analyze hollywood movies with physics", time:210, complexity:"hard", integrants:3})
CREATE (BrominationA:Project {title: "Bromination of alkanes",description:"Bromination of alkanes", time:3, complexity:"medium",integrants:2})
CREATE (Halogenation:Project {title: "A Safe Simple Halogenation Experiment",description:"halogenation of alkanes", time:4, complexity:"hard", integrants:2})
CREATE (Hydrogenation:Project {title: "Catalytic Hydrogenation of Organic Compounds without H2 Supply: An Electrochemical System",description:"Catalytic hydrogenation using a Electrochemical Cell", time:5, complexity:"hard", integrants:2})
CREATE (SN2:Project {title: "A Simple SN2 Reaction for the Undergraduate Organic Laboratory",description:"making a bimolecular sustitution ", time:5, complexity:"hard", integrants:3})
CREATE (Lisp:Project {title: "Lisp_Interpreter",description:"Recreate a lisp interpreter using java", time:210, complexity:"hard", integrants:3})
CREATE (Recommendation:Project {title: "Recomendation_Algorithm",description:"Recreate an algorithm capable to make recommendations", time:210, complexity:"hard", integrants:3})
CREATE (Sodium:Project {title: "Determination of sodium in Swiss cheese through the method of volhard",description:"Analyzing Swiss cheese", time:210, complexity:"hard", integrants:4})
CREATE (Sanitary_Napkins_absorption:Project {title: "comparison of absorption of sanitary napkins of different brands",description:"Write a short story", time:210, complexity:"hard", integrants:5})
CREATE (Aluminum_Recycling:Project {title: "Recycling of aluminum chip produced in UVG mechanics workshop",description:"Recycling aluminum for alum formation", time:110, complexity:"hard", integrants:5})
CREATE (Inhibition_Klebsiella:Project {title: "Inhibition of Klebsiella pneumoniae biofilm through chamomile extract",description:"inhibit the growth of a bacterium by means of chamomile extract", time:510, complexity:"hard", integrants:5})
CREATE (biomimic:Project {title: "Helmet simulating an armadillo",description:"make a motorcycle helmet that mimics one of the characteristics of the armadillo", time:410, complexity:"hard", integrants:5})
CREATE (Fable:Project {title: "Fable",description:"write a story and dramatize it in class", time:210, complexity:"hard", integrants:8})
CREATE (Reports:Project {title: "make reports over the internet",description:"create a program that allows to make denunciations by Internet", time:250, complexity:"hard", integrants:5})
CREATE (Sonic_Pi:Project {title: "My own music",description:"create a song using sonic pi", time:510, complexity:"hard", integrants:4})
CREATE (Pokultura:Project {title: "Pokultura",description:"a simple card game that involves culture", time:510, complexity:"medium", integrants:6})
CREATE (mechanical_workshop:Project {title: "mechanical_workshop",description:"Create a program that organizes the information of a mechanical workshop", time:210, complexity:"hard", integrants:4})
CREATE (political_parties:Project {title: "political_parties",description:"create political parties", time:310, complexity:"medium", integrants:9})
CREATE (massacres_of_the_jungle:Project {title: "massacres_of_the_jungle",description:"historical book analysis", time:210, complexity:"medium", integrants:4})
CREATE (Simon_says:Project {title: "Simon_says",description:"Simon game implementation says using ARM language", time:310, complexity:"hard", integrants:2})
CREATE (slot_machines:Project {title: "slot_machines",description:"implementation of a slot game using ARM language", time:100, complexity:"medium", integrants:2})
CREATE (Alarm_clock:Project {title: "Alarm_clock",description:"Alarm clock implemented in ARM language", time:510, complexity:"hard", integrants:2})
CREATE (Timer:Project {title: "descending_counter.",description:"Timer: descending counter", time:315, complexity:"hard", integrants:2})
CREATE (Piano:Project {title: "Piano",description:"implementation of a piano from electronic components and ARM language", time:410, complexity:"hard", integrants:2})
CREATE (Stepper:Project {title: "Stepper_motor_controller",description:"implementation of a Stepper motor controller ARM language", time:210, complexity:"hard", integrants:2})
CREATE (ALU:Project {title: "ALU",description:"Each switch will represent an arithmetic or logical operation: AND, OR, ADD and SUB", time:210, complexity:"hard", integrants:2})
CREATE (revolutions_of_a_stepper:Project {title: "revolutions_of_a_stepper",description:"implementation of a Stepper knowing the number of revolutions and direction", time:310, complexity:"hard", integrants:2})
CREATE (Angular_velocity:Project {title: "Angular_velocity",description:"direct measurement using a Smartphone", time:100, complexity:"medium", integrants:4})
CREATE (Youngs_Modulus_of_a_Marshmallow:Project {title: "Youngs_Modulus_of_a_Marshmallow",description:"determining Young's Modulus of a Marshmallow", time:50, complexity:"easy", integrants:4})
CREATE (Slipping_Tipping:Project {title: "Slipping_and_Tipping",description:"Measuring Static Friction with a Straightedge", time:110, complexity:"medium", integrants:4})
CREATE (Rotational_energy:Project {title: "Rotational_energy",description:"determinating Rotational energy in a physical pendulum", time:200, complexity:"medium", integrants:4})
CREATE (Torque:Project {title: "A_New_Twist_on_Torque Labs.",description:"Measure the force that must act on the end of a pivoted rule", time:60, complexity:"easy", integrants:5})
CREATE (Stability:Project {title: "Stability_of_a_Can_of_Soda",description:"determinating the stability of a can soda in a car", time:110, complexity:"hard", integrants:5})
CREATE (Angular_Momentum:Project {title: "Which_reaches_the_bottom_first",description:"Using your knowledge of the dynamics of rotation, determine which object will first reach the bottom of an inclined plane.", time:75, complexity:"easy", integrants:4})
CREATE (Center_of_gravity:Project {title: "Center_of_gravity_of_a_student",description:"Use two bathroom scales to determine the location of the center of gravity using two different assemblies", time:510, complexity:"hard", integrants:5})
CREATE (Sun_Rotation:Project {title: "Sun_Rotation",description:"Using the Solar & Heliospheric Observatory Satellite (SOHO) to Determine the Rotation of the Sun", time:310, complexity:"hard", integrants:5})
CREATE (Torque_Angle:Project {title: "Torque_Angle",description:"Obtain experimentally the dependence of the sinus torque of the angle.", time:210, complexity:"hard", integrants:5})
CREATE (Jumping_frogs:Project {title: "Jumping_frogs",description:"Game simulation using electronic components and ARM language", time:510, complexity:"hard", integrants:2})
CREATE (word_leak:Project {title: "word_leak",description:"Game simulation using electronic components and ARM language,the game consists in generating incomplete words and the user must complete them", time:310, complexity:"hard", integrants:2})
CREATE (four_in_line:Project {title: "four_in_line",description:"Game simulation using electronic components and ARM language", time:210, complexity:"hard", integrants:2})
CREATE (Race:Project {title: "Race_with_obstacles",description:"Game simulation using electronic components and ARM language, the game consist in get to the goal first", time:510, complexity:"hard", integrants:2})
CREATE (greater_or_lesser:Project {title: "greater_or_lesser",description:"Game simulation using electronic components and ARM language, the game consist in roll two dice and win the one with the largest number", time:320, complexity:"hard", integrants:2})
CREATE (_Pics__Word:Project {title: "2_Pics_1_Word",description:"Game simulation using electronic components and ARM language, the game consist in show images that have a common theme and below disordered words which must be ordered in relation to the images", time:210, complexity:"hard", integrants:3})
CREATE (Battleship:Project {title: "Battleship",description:"Game simulation using electronic components and ARM language, recreating the famous game of the same name", time:210, complexity:"hard", integrants:3})
CREATE (Minesweep:Project {title: "Minesweep",description:"Game simulation using electronic components and ARM language, recreating the famous game of the same name", time:310, complexity:"hard", integrants:3})
CREATE (Rabbit_Chase:Project {title: "Rabbit_Chase",description:"Game simulation using electronic components and ARM language, recreating the famous game of the same name", time:210, complexity:"hard", integrants:3})
CREATE (GO:Project {title: "GO",description:"Game simulation using electronic components and ARM language, recreating the famous game of the same name", time:210, complexity:"medium", integrants:3})
CREATE (pair_odd:Project {title: "pair_odd",description:"Game simulation using electronic components and ARM language, recreating the famous game of the same name", time:210, complexity:"hard", integrants:3})
CREATE (Uplift_count:Project {title: "Uplift_count_from_0_to_9",description:"Using circuit knowledge create an ascending counter from 0 to 9", time:210, complexity:"medium", integrants:4})
CREATE (Descending_count:Project {title: "Descending_count_of_9_to_0",description:"Using circuit knowledge create a descending counter from 9 to 0", time:210, complexity:"medium", integrants:3})
CREATE (Active_bit_shift:Project {title: "Active_bit_shift",description:"Using circuit knowledge simulate a logic circuit with 8 output bits that performs the bit shift (active bit, issay, bit on and the others off)", time:210, complexity:"hard", integrants:3})
CREATE (Inactive_bit_shift:Project {title: "Inactive_bit_shift",description:"Using circuit knowledge simulate a logic circuit with 8 output bits that performs the bit shift", time:210, complexity:"hard", integrants:3})
CREATE (Bit_accumulator:Project {title: "Bit_accumulator",description:"Using circuit knowledge simulate a logic circuit that represents a bit accumulator", time:210, complexity:"hard", integrants:3})
CREATE (traffic_light:Project {title: "traffic_light",description:"a two way traffic light", time:210, complexity:"hard", integrants:2})
CREATE (stone_paper_or_scissors:Project {title: "stone_paper_or_scissors",description:"Design and simulate a logic circuit that implements the game of stone paper or scissors, using gates,dip-switches and LEDs, which complies with the established rules: scissors beats paper, paper beats stoneand stone beats scissors", time:210, complexity:"hard", integrants:2})
CREATE (AU:Project {title: "Arithmetic_Unit",description:"Design and simulate a circuit that implements the behavior of an Arithmetic Unit", time:210, complexity:"hard", integrants:3})
CREATE (Turn_signals:Project {title: "Turn_signals",description:"Design and simulate a logic circuit that implements the behavior of the directional lights of a car", time:210, complexity:"hard", integrants:3})
CREATE (Comparator_of_Numbers:Project {title: "Comparator_of_Numbers",description:"Design and simulate a circuit that has as input two numbers from 0 to 7 signed in addition to 2.", time:210, complexity:"hard", integrants:3})
CREATE (Totito:Project {title: "Totito",description:"Design and simulate a circuit that represents the characteristic grid of the game, using LEDS, switches and gates", time:210, complexity:"hard", integrants:4})
CREATE (LLS:Project {title: "LLS",description:"Design and simulate a circuit that represents the process of logically running a binary number towards the left", time:210, complexity:"hard", integrants:3})
CREATE (LRS:Project {title: "LRS",description:"Design and simulate a circuit that represents the process of logically running a binary number towards the right", time:210, complexity:"hard", integrants:3})
CREATE (Address_decoder:Project {title: "Address_decoder",description:"Design and simulate a circuit that decodes a 3-bit binary address and selects the position correct cell within a memory", time:210, complexity:"hard", integrants:3})
CREATE (Binary_to_vowel_converter:Project {title: "Binary_to_vowel_converter",description:"Design and simulate a circuit that shows the vowels A, E, I, O, U. A binary number of 3 bits represents each vowel", time:210, complexity:"hard", integrants:3})
CREATE (Car_stopping_Distance_on_a_Tabletop:Project {title: "Car_stopping_Distance_on_a_Tabletop",description:"Write a short story", time:210, complexity:"hard", integrants:5})
CREATE (The_energetics_of_a_bouncing:Project {title: "The_energetics_of_a_bouncing ball",description:"calculating the energy of a bouncing ball", time:210, complexity:"hard", integrants:4})
CREATE (Cotton_buds:Project {title: "Cotton_buds_momentum_and_impulse",description:"determinating momentum and impulse of cotton buds", time:510, complexity:"easy", integrants:5})
CREATE (Bernoulli_Law:Project {title: "Bernoulli_Law",description:"A Bernoulli's Law Lab in a Bottle", time:210, complexity:"hard", integrants:5})
CREATE (Archimedes_Principle:Project {title: "Archimedes_Principle",description:"Microcomputer-Based Laboratory for Archimedes Principle and Density of Liquids", time:210, complexity:"hard", integrants:5})
CREATE (Radio:Project {title: "Radio",description:"Simulate a Radio using java language", time:110, complexity:"hard", integrants:2})
CREATE (Calculator:Project {title: "Calculator",description:"Simulate a calculator using java language, the calculator interface must work for all main programs", time:210, complexity:"hard", integrants:5})
CREATE (Sorts:Project {title: "Sorts",description:"using a profiler meassure the time that each sort spent sorting", time:210, complexity:"hard", integrants:2})
CREATE (Design_patterns:Project {title: "Design_patterns",description:"Simulate a post-fix calculator using factory and singleton design patterns", time:210, complexity:"hard", integrants:2})
CREATE (Simpy:Project {title: "Simpy",description:"Using simpy enviroment recreate the operation of a processor", time:210, complexity:"medium", integrants:2})
CREATE (Cards:Project {title: "Cards",description:"recreate a card game using java language", time:210, complexity:"hard", integrants:2})
CREATE (Dictionary:Project {title: "Dictionary",description:"Create a dictionary that can translate phrases with stored words", time:210, complexity:"hard", integrants:2})
CREATE (movies:Project {title: "movies",description:"recreating movie recommendations depending on the movies viewed by the user", time:510, complexity:"hard", integrants:5})
CREATE (Places:Project {title: "places",description:"recommend people places to visit in Guatemala", time:210, complexity:"hard", integrants:5})
CREATE (Hospital:Project {title: "Hospital",description:"create a hospital system that allows patients to be stored and ordered in order of priority of illness", time:210, complexity:"easy", integrants:2})
CREATE (Dicc:Project {title: "dicc",description:"Recreate a dictionary using binary trees", time:210, complexity:"hard", integrants:2})
CREATE (Hexa:Project {title: "Hexa",description:"Recreate a calculator that conver decimal numbers to hexadecimal", time:210, complexity:"hard", integrants:1})
CREATE (Planes:Project {title: "Planes",description:"create a program in java language that stores aircraft with your specifications", time:210, complexity:"hard", integrants:3})
CREATE (students:Project {title: "Students",description:"calculate and order the averages of aspiring students to enter the university", time:210, complexity:"easy", integrants:1})
CREATE (cinema:Project {title: "cinema",description:"Sort the data of a chain of movie theaters about visitors and raising money", time:210, complexity:"easy", integrants:1})
CREATE (library:Project {title: "library",description:"create a program in java language to have control of book loans in a library", time:210, complexity:"easy", integrants:1})
CREATE (ipod:Project {title: "ipod",description:"simulate the functionality of an ipod with an interface that can be used in different main programs in java language", time:210, complexity:"easy", integrants:1})
CREATE (GUI:Project {title: "GUI",description:"learn to use graphical interface in java", time:210, complexity:"easy", integrants:1})
CREATE (guards:Project {title: "guards",description:"create a program that keeps track of how many shifts a guard has to make per month", time:210, complexity:"easy", integrants:1})
CREATE (radioactive:Project {title: "radioactive",description:"create a reactivity simulator of a compound to calculate how long it will take to stop being radioactive", time:210, complexity:"medium", integrants:1})
CREATE (ticket:Project {title: "ticket",description:"create a simulator of a machine that generates tickets in java language", time:210, complexity:"easy", integrants:1})
CREATE (figures:Project {title: "figures",description:"create figures that move from side to side using bluj", time:210, complexity:"easy", integrants:1})
CREATE (Hollywood_Movies:Project {title: "Hollywood_Movies",description:"analyze Hollywood movies with statistical tools", time:210, complexity:"easy", integrants:1})
CREATE (riddle:Project {title: "riddle",description:"create a program using python language to read a text and decipher a hidden code", time:210, complexity:"medium", integrants:1})
CREATE (angle:Project {title: "angle",description:"create a program in python language that calculates the angles of a triangle", time:210, complexity:"hard", integrants:5})
CREATE (Mongo:Project {title: "Mongo",description:"Short exercise using mongo db", time:210, complexity:"easy", integrants:1})
CREATE (Texas_Holdem:Project {title: "Texas_Holdem",description:"Recreate the card game Texas Holdem using python language", time:210, complexity:"hard", integrants:1})
CREATE (Menu:Project {title: "Menu",description:"Create a menu of food using python language", time:210, complexity:"easy", integrants:1})
CREATE (Quiniela:Project {title: "Quiniela",description:"Create a program in python language that can recreate a football quiniela", time:210, complexity:"hard", integrants:1})
CREATE (Law_Sines:Project {title: "Law_Sines",description:"Create a program in python language that calculate that use the law sines", time:210, complexity:"easy", integrants:1})
CREATE (grades:Project {title: "grades",description:"create a program in python language that can calculate how much do you need to pass the class", time:210, complexity:"easy", integrants:1})
CREATE (Frog:Project {title: "Frog",description:"simulate a game based in jumping frogs using python language", time:210, complexity:"hard", integrants:1})
CREATE (canibals_and_missionaries:Project {title: "canibals_and_missionaries",description:"recreate the game canibals and missionaries using python language", time:210, complexity:"hard", integrants:1})
CREATE (bill:Project {title: "bill",description:"create a program using python language to calculate a restaurant account", time:210, complexity:"easy", integrants:1})
CREATE (Lost_items:Project {title: "Lost_items",description:"Create a program in python language that help the students in college to find their lost items", time:210, complexity:"hard", integrants:5})
CREATE (frequency:Project {title: "frequency",description:"Create a program with python language that analyze sounds frequencies", time:210, complexity:"medium", integrants:1})
CREATE (arithmetic:Project {title: "arithmetic",description:"Create a program in python language that can make all arithmetic operations", time:210, complexity:"easy", integrants:1})
CREATE (Series:Project {title: "Series",description:"create a program in java language that can recommend series depending on the user's preferences", time:210, complexity:"hard", integrants:5})
CREATE (Restaurants:Project {title: "Restaurants",description:"Create a program in java language that can recommend restaurants depending on the users tastes", time:210, complexity:"hard", integrants:5})
CREATE (Videogames:Project {title: "Videogames",description:"Create a program in java language that can recommend videogames", time:210, complexity:"hard", integrants:5})
CREATE (rurple:Project {title: "rurple",description:"Create a short maze with the robot to learn to use rurple", time:210, complexity:"easy", integrants:1})
CREATE (chocolates:Project {title: "chocolates",description:"cook chocolates using liquid nitrogen", time:210, complexity:"hard", integrants:5})
CREATE (autobiography:Project {title: "autobiography",description:"Write an autobiography using news from dates mentioned", time:210, complexity:"easy", integrants:1})
CREATE (elevator:Project {title: "elevator",description:"With electronical knowledge simulate an elevator", time:210, complexity:"hard", integrants:2})
CREATE (car_video_game:Project {title: "car_video_game",description:"Create a videogame that allow to play with physical components", time:210, complexity:"hard", integrants:5})
CREATE (Pressure:Project {title: "Pressure",description:"Change in systolic pressure before and after the ingestion of an energizing drink", time:210, complexity:"hard", integrants:5})
CREATE (classical_music:Project {title: "classical_music",description:"Effect of classical music on the memory of the elderly", time:210, complexity:"hard", integrants:5})
CREATE (caffeine:Project {title: "caffeine",description:"Compare the reaction time with and without coffee intake", time:210, complexity:"hard", integrants:5})
CREATE (decibels:Project {title: "decibels",description:"Comparison of the decibel level produced by the sound equipment of a car with closed windows and one with open windows", time:210, complexity:"hard", integrants:5})
CREATE (nicotine:Project {title: "nicotine",description:"Efecto de la nicotina en el rendimiento de la condición física", time:210, complexity:"hard", integrants:5})
CREATE (glues:Project {title: "glues",description:"Comparison between 2 types of glues", time:210, complexity:"hard", integrants:5})
CREATE (blood_sugar:Project {title: "blood_sugar",description:"Blood sugar levels after eating", time:210, complexity:"hard", integrants:5})
CREATE (drinks:Project {title: "drinks",description:"Comparison of blood sugar levels with two different beverages", time:210, complexity:"hard", integrants:5})
CREATE (paint:Project {title: "paint",description:"Effectiveness level of a paint or waterproofing layer", time:210, complexity:"hard", integrants:5})
CREATE (diet_coke:Project {title: "diet_coke",description:"There is a significant difference between the gas content between a normal cola water and its dietary counterpart", time:210, complexity:"hard", integrants:5})
CREATE (milks:Project {title: "milks",description:"Is there significant difference in boiling time between two different types of milk", time:210, complexity:"hard", integrants:5})
CREATE (Sanitary_nap:Project {title: "Sanitary_nap",description:"Is there a significant difference in absorption between two different types of sanitary napkins of the same brand", time:210, complexity:"hard", integrants:5})
CREATE (batteries:Project {title: "batteries",description:"s there a significant difference in the duration of charging between alkaline batteries compared to traditional batteries", time:210, complexity:"hard", integrants:5})
CREATE (boiling_fusion_point:Project {title: "boiling_fusion_point",description:"Determination of Boiling Point and Fusion Point", time:210, complexity:"easy", integrants:3})
CREATE (liquid_liquid_extraction:Project {title: "liquid-liquid_extraction",description:"Liquid-liquid extraction of caffeine from an energy pill", time:210, complexity:"easy", integrants:2})
CREATE (biofuel:Project {title: "biofuel",description:"Simple and Fractional Distillation Production of biofuel: Ethanol from several substrates", time:210, complexity:"hard", integrants:2})
CREATE (Steam:Project {title: "Steam",description:"Steam Trap Distillation", time:210, complexity:"hard", integrants:2})
CREATE (Chromatography:Project {title: "Chromatography",description:"Thin Layer and Column Chromatography", time:210, complexity:"hard", integrants:2})
CREATE (Re_crystallization:Project {title: "Re-crystallization",description:"Re-crystallization", time:210, complexity:"hard", integrants:2})
CREATE (Halogenation_Alquenos:Project {title: "Halogenation_Alquenos",description:"Halogenation of Alquenos", time:210, complexity:"hard", integrants:2})
CREATE (Catalytic_hydrogenation:Project {title: "Catalytic_hydrogenation",description:"Catalytic hydrogenation using an electrochemical system", time:210, complexity:"hard", integrants:3})
CREATE (SN2_strawberries:Project {title: "SN2_strawberries",description:"SN2: Synthesis of artificial flavor to strawberries and raspberries", time:210, complexity:"hard", integrants:2})
CREATE (antacid:Project {title: "antacid",description:"Titration of an antacid tablet", time:210, complexity:"easy", integrants:4})
CREATE (vinegar:Project {title: "vinegar",description:"determine acetic acid content in a vinegar sample", time:210, complexity:"easy", integrants:4})
CREATE (junk_food:Project {title: "junk_food",description:"How many calories are in junk food", time:210, complexity:"easy", integrants:5})
CREATE (Hess_law:Project {title: "Hess_law",description:"Verification of Hess's law", time:210, complexity:"hard", integrants:5})
CREATE (Neutralization:Project {title: "Short_Story",description:"Heat of Neutralization", time:210, complexity:"hard", integrants:5})
CREATE (The_Yodo_clock:Project {title: "The_Yodo_clock",description:"The Yodo clock", time:210, complexity:"hard", integrants:5})
CREATE (Le_Chatelier:Project {title: "Le_Chatelier",description:"Constant balance and Le Chatelier", time:210, complexity:"hard", integrants:5})
CREATE (kps:Project {title: "kps",description:"Measurement of the equilibrium constant of solubility of a compound", time:210, complexity:"hard", integrants:5})
CREATE (pH:Project {title: "pH",description:"Determination of ph of certain solutions", time:210, complexity:"hard", integrants:5})
CREATE (separation_mixture:Project {title: "separation_mixture",description:"Separation of compounds from a mixture", time:210, complexity:"easy", integrants:5})
CREATE (chemical_reactions:Project {title: "Short_Story",description:"types of chemical reactions", time:210, complexity:"easy", integrants:5})
CREATE (metathesis:Project {title: "metathesis",description:"Metathesis reactions in aqueous solution", time:210, complexity:"easy", integrants:5})
CREATE (hydrate:Project {title: "hydrate",description:"Determination of the formula of a hydrate", time:210, complexity:"hard", integrants:5})
CREATE (Stoichiometry:Project {title: "Stoichiometry",description:"Stoichiometry of a reaction", time:210, complexity:"easy", integrants:5})
CREATE (R_constant:Project {title: "R_constant",description:"Calculation of the constant R", time:210, complexity:"hard", integrants:5})
CREATE (Manganese_colors:Project {title: "Manganese_colors",description:"Knowing the colors of Manganese by redox titration", time:210, complexity:"easy", integrants:4})
CREATE (performance:Project {title: "performance",description:"Percentage of reaction performance aluminum recycling", time:210, complexity:"hard", integrants:4})
CREATE (Copper:Project {title: "Copper",description:"Percentage of reaction performance, Copper chemical transformations", time:210, complexity:"hard", integrants:4})
CREATE (physical_properties:Project {title: "physical_properties",description:"Relationship between chemical bonds and physical properties", time:210, complexity:"hard", integrants:4})
CREATE (parking_lot:Project {title: "parking_lot",description:"with knowledge of electronics simulate a parking lot", time:210, complexity:"hard", integrants:5})
CREATE (tetris:Project {title: "tetris",description:"simulation of the game tetris", time:210, complexity:"hard", integrants:1})
CREATE (water_dispenser:Project {title: "water_dispenser",description:"with knowledge of electronics make a water_dispenser", time:210, complexity:"hard", integrants:5})
CREATE (food_dispenser:Project {title: "food_dispenser",description:"Write a short story", time:210, complexity:"hard", integrants:5})
CREATE (Remote_car:Project {title: "remote_car",description:"remote control car", time:210, complexity:"hard", integrants:2})
CREATE (home:Project {title: "home",description:"home automation", time:210, complexity:"hard", integrants:5})
CREATE (Key_finder:Project {title: "Key_finder",description:"with knowledge of electronics create a key finder", time:210, complexity:"hard", integrants:3})
CREATE (irrigation_system:Project {title: "irrigation_system",description:"with knowledge of electronics create an irrigation system", time:210, complexity:"hard", integrants:3})
CREATE (Advertisements:Project {title: "advertisements",description:"create an advertising campaign to sell a product", time:210, complexity:"easy", integrants:5})
CREATE (product:Project {title: "product",description:"create your own product and sell", time:210, complexity:"hard", integrants:5})
CREATE (DNA_extraction:Project {title: "DNA_extraction",description:"make your own DNA extraction kit from household chemicals and use it to extract DNA from strawberries", time:210, complexity:"hard", integrants:1})
CREATE (green_detergents:Project {title: "green_detergents",description:"compare the toxicity of green and conventional liquid detergents using worms as test organisms", time:210, complexity:"hard", integrants:2})
CREATE (acid_rain:Project {title: "acid_rain",description:"How does acid rain affect aquatic ecosystems", time:210, complexity:"hard", integrants:2})
CREATE (Soil_erosion:Project {title: "Soil_erosion",description:"can plants stop soil erosion?", time:210, complexity:"hard", integrants:5})
CREATE (Landslides:Project {title: "Landslides",description:"What causes rocks to slide down a slope", time:210, complexity:"medium", integrants:3})
CREATE (Molecular_scissors:Project {title: "Molecular_scissors",description:"Find out which enzymes will cut, and where by making a restriction map. Then you can figure out what will happen if you change the sequence of the DNA", time:210, complexity:"hard", integrants:3})
CREATE (genome_projects:Project {title: "genome_projects",description:"All animals have a genome, but do they all have genome projects? Find out which animals are currently having their genomes sequenced and how much we know already", time:210, complexity:"hard", integrants:2})
CREATE (Cryopreservation:Project {title: "Cryopreservation",description:"Cryopreservation—storing seeds in ultra-cold liquid nitrogen—is one method for maintaining plant genetic stocks in seed banks", time:210, complexity:"hard", integrants:4})
CREATE (pets_food:Project{title: "pets_food",description:"Are you in charge of feeding your family pet? How much food do you think your pet eats compared to other kinds of pets? ", time:210, complexity:"easy", integrants:3})
CREATE (drugs_genetics:Project {title: "drugs_genetics",description:"Write a short story", time:210, complexity:"hard", integrants:5})
CREATE (leaves_colors:Project {title: "leaves_colors",description:"In this project, you will uncover the hidden colors of fall by separating plant pigments with paper chromatography", time:210, complexity:"easy", integrants:4})
CREATE (antibodies:Project {title: "antibodies",description:"This project is a practical introduction to the human immune system in which you will learn about what antibodies are, how they are formed, and how they can be used to identify different types of cells", time:210, complexity:"hard", integrants:5})
CREATE (Stardust:Project {title: "Stardust",description:"catching stardust", time:210, complexity:"hard", integrants:5})
CREATE (heavy_metals:Project {title: "heavy_metals",description:"In this experiment, find out if one common heavy metal, copper, can be toxic to an aquatic environment", time:210, complexity:"hard", integrants:5})
CREATE (cabagge_clones:Project {title: "cabagge_clones",description:"In this science project you will get to find out by making your own cabbage clones", time:210, complexity:"hard", integrants:5})
CREATE (organic_waste:Project {title: "organic_waste",description:"Organic waste—like table scraps, agricultural waste, and human and animal waste—is biodegradable. This means, it can be chemically broken down by bacteria, fungi, or other living organisms into very small parts", time:210, complexity:"hard", integrants:5})
CREATE (kidney:Project {title: "kidney",description:" In this science project, with the help of bioinformatics databases, you will explore how a kidney could be bioengineered using stem cells", time:210, complexity:"hard", integrants:4})
CREATE (oxygen:Project {title: "oxygen",description:"Write a short story", time:210, complexity:"hard", integrants:5})
CREATE (prevent_erosion:Project {title: "prevent_erosion",description:"In this experiment you will learn how to prevent erosion", time:210, complexity:"hard", integrants:5})
CREATE (Sea:Project {title: "sea",description:"meassure how salty is the sea", time:210, complexity:"easy", integrants:4})
CREATE (Soil_worms:Project {title: "soil_worms",description:"In this science project, you will discover in what kind of soil it likes to do its work", time:210, complexity:"easy", integrants:4})
CREATE (earth_axis:Project {title: "earth_axis",description:"how do seasons affects earth axis", time:210, complexity:"easy", integrants:3})
CREATE (chick_breathe:Project {title: "chick_brethe",description:"find out how do chicks breathe inside a shell", time:210, complexity:"medium", integrants:4})
CREATE (faucet:Project {title: "faucet",description:"how faucet can save water", time:210, complexity:"hard", integrants:5})
CREATE (DNA_onion:Project {title: "DNA_onion",description:"extracting dna of an onion", time:210, complexity:"hard", integrants:5})
CREATE (Water_from_air:Project {title: "Water_from_air",description:"In this environmental engineering science project, you will investigate one way that people living in arid regions can collect water inexpensively: dew traps", time:210, complexity:"hard", integrants:5})
CREATE (moon:Project {title: "moon",description:"How much brighter is a full moon than the other phases of the moon? How is the brightness of the moon measured", time:210, complexity:"hard", integrants:5})
CREATE (capillary:Project {title: "capillary",description:" In this science project, you will use colored water and carnations to figure out where the water goes", time:210, complexity:"easy", integrants:2})
CREATE (ballon_car:Project {title: "ballon_car",description:"Do you think you could build a car powered by nothing but air? A balloon-powered car is pushed forward by air escaping from a balloon", time:210, complexity:"hard", integrants:5})
CREATE (bubbleology:Project {title: "bubbleology",description:"In this experiment you can test if adding corn syrup or glycerin to your bubble solution will make it just as good as the stuff you can buy", time:210, complexity:"easy", integrants:1})
CREATE (e_waste:Project {title: "e_waste",description:"In this science project, you'll explore what people in your community do with electronic waste, commonly called e-waste", time:210, complexity:"hard", integrants:3})
CREATE (clean_air:Project {title: "clean_air",description:"Find out how clean the air is in this simple experiment", time:210, complexity:"easy", integrants:2})
CREATE (soil_depth:Project {title: "soil_depth",description:"With this project you can get all the dirt on soil formation, soil horizons, and the composition of different soils", time:210, complexity:"hard", integrants:5})
CREATE (absorptivity:Project {title: "absorptivity",description:"In this science project, you will test the absorptivity of different materials (called sorbents) to discover which ones are best", time:210, complexity:"hard", integrants:5})
CREATE (germs_soup:Project {title: "germs_soup",description:" In this science project, you'll investigate which parts of the hand are the most difficult to wash germs off of.", time:210, complexity:"hard", integrants:5})
CREATE (roots:Project {title: "roots",description:" In this project, you will construct simple devices that hold several germinating seeds, which allow you to watch how growing rootlets respond as you rotate the devices, effectively altering", time:210, complexity:"hard", integrants:5})
CREATE (cereal_iron:Project {title: "cereal_iron",description:"In this experiment, you will devise a way of testing foods for supplemental iron additives. Then you will use your design to test different breakfast cereals to see how much iron they contain. Which brand of cereal will have the most iron in it", time:210, complexity:"hard", integrants:1})
CREATE (cell:Project {title: "cell",description:"Does an animal with a bigger genome need a larger cell nucleus to store its DNA", time:210, complexity:"hard", integrants:5})
CREATE (mutations:Project {title: "mutations",description:"n this science project, you will explore online genetic databases to identify how a mutation in a gene can result in a dysfunctional protein, and how other mutations may have no effect", time:210, complexity:"hard", integrants:5})
CREATE (rabid:Project {title: "rabid",description:"Nevertheless, it is important to avoid animals that have rabies so that you don't get infected. So which wild animals are likely to carry rabies", time:210, complexity:"hard", integrants:5})
CREATE (desalination:Project {title: "desalination",description:"n this science project, you will make a solar desalination apparatus using readily available materials, and a power source that is free", time:210, complexity:"hard", integrants:5})
CREATE (parallax:Project {title: "parallax",description:"In this astronomy science project you will find out by exploring the link between the distance of an object and perspective", time:210, complexity:"hard", integrants:5})
CREATE (memory:Project {title: "memory",description:"This is an easy project where you can test the effect of exercise on a critical brain function: memory", time:210, complexity:"hard", integrants:2})
CREATE (microorganisms:Project {title: "microorganisms",description:"This project uses liquid cultures and agar plates to investigate the effects of different concentrations of a food preservative on microbial growth", time:210, complexity:"medium", integrants:5})
CREATE (Submarines:Project {title: "Submarines",description:"In this science project, you can investigate how submarines use stabilizing fins to move forward. You might even figure out the secrets to maneuvering a submarine", time:210, complexity:"easy", integrants:2})
CREATE (flu:Project {title: "flu",description:" In this science project, you will make a simple model to investigate how the immune system defends the human body from common illnesses", time:210, complexity:"easy", integrants:3})
CREATE (water_toxicity:Project {title: "water_toxicity",description:" One way to test for the presence of toxic compounds in a water sample is a bioassay. In a bioassay, a living organism serves as a detector for toxins—the same way canaries were used in coal mines to detect invisible toxic gases. In this project, water fleas (Daphnia magna), a freshwater crustacean, are used in a bioassay to monitor water quality", time:210, complexity:"easy", integrants:4})
CREATE (winds:Project {title: "winds",description:"Find out how wind changes air pressure to bring to objects together in this easy and fun science fair project", time:210, complexity:"easy", integrants:3})
CREATE (Stethoscope:Project {title: "Stethoscope",description:"n this science project, you will make three of your own homemade stethoscopes and figure out which stethoscope design works best and why", time:210, complexity:"easy", integrants:4})
CREATE (wifi:Project {title: "wifi",description:"In this science project, you will do an experiment to find out which materials cause the biggest drop in signal strength from a wireless router", time:210, complexity:"easy", integrants:5})
CREATE (ants:Project {title: "ants",description:"his project is an interesting way to investigate what substances are effective as ant repellents. The goal is to find substances that keep ants away, yet are safe for humans and the environment", time:210, complexity:"hard", integrants:5})
CREATE (tsunami:Project {title: "tsunami",description:" In this ocean science project, you will model a tsunami and investigate how wave velocity (speed) depends on water depth. Does it match the mathematical equation", time:210, complexity:"medium", integrants:6})
CREATE (biomass:Project {title: "biomass",description:"You can get energy out of biomass by burning it, turning it into a liquid, or by turning it into a gas called biogas", time:210, complexity:"easy", integrants:5})
CREATE (paper_fiber:Project {title: "paper_fiber",description:" If you're interested in arts and crafts, you might like this project. It uses several alternative, renewable sources of fiber to make paper, and compares the resulting papers for strength and writing quality", time:210, complexity:"hard", integrants:5})
CREATE (bug:Project {title: "bug",description:"This science project shows you how you can ask a sowbug (or pillbug) a similar question in order to learn about their preferences. Give it a try to find out what types of microenvironments these tiny crustaceans prefer", time:210, complexity:"easy", integrants:5})
CREATE (geodes:Project {title: "geodes",description:"in this geology science project, you'll see if the same expression holds true for a rock, but not just any old rock, a special type of rock called a geode, which looks rather plain and ordinary on the outside, but inside can hold crystals and beautiful colors", time:210, complexity:"easy", integrants:4})
CREATE (candy_crystals:Project {title: "candy_crystals",description:"In this science fair project you'll learn how to grow your very own rock candy and determine if using seed crystals changes the growth rate of your sugar crystals", time:210, complexity:"hard", integrants:5})
CREATE (crater:Project {title: "crater",description:"You will then analyze that data for relationships between a crater's depth and diameter. This is your chance to perform a science project as a NASA researcher would", time:210, complexity:"medium", integrants:5})
CREATE (Soil_moisture:Project {title: "Soil_moisture",description:"How can you help conserve water and prevent such waste? One way is to build an electronic soil moisture sensor", time:210, complexity:"medium", integrants:5})
CREATE (birds:Project {title: "birds",description:"You'll be able to observe birds at close range, find out what birds inhabit your area, and learn about their seed-eating preferences", time:210, complexity:"easy", integrants:4})
CREATE (bristlebot:Project {title: "bristlebot",description:"As robots become more common, it is increasingly important to use green energy sources to power them. In this project, you will build and test a popular robot called a bristlebot — a tiny robot made using toothbrushes", time:210, complexity:"easy", integrants:5})
CREATE (radiation:Project {title: "radiation",description:"you will investigate how much radiation your cell phone emits ", time:210, complexity:"hard", integrants:5})
CREATE (Computer:Resource {title: "Computer", specifications: "A computer with an ide to code"})
CREATE (Unity:Resource {title: "Unity", specifications: "Software Unity"})
CREATE (Arduino:Resource {title: "Arduino", specifications: "Arduino a mini-computer"})
CREATE (AndroidStudio:Resource {title: "AndroidStudio", specifications: "Android Studio software"})
CREATE (Fruit:Resource {title: "fruit", specifications: "A fresh fruit"})
CREATE (Vegetable:Resource {title: "vegetable", specifications: "A fresh vegetable"})
CREATE (Subjects:Resource {title: "subjects", specifications: "Humans for investigation"})
CREATE (Custom:Resource {title: "custom", specifications: "a custom or suit"})
CREATE (Raspberry:Resource {title: "raspberry", specifications: "a mini-computer with raspberry"})
CREATE (Reagents:Resource {title: "reagents", specifications: "use the neccesary reagents"})
CREATE (Paper:Resource {title: "Paper", specifications: "Paper to write"})
CREATE (electrical_circuit:Resource {title: "electrical_circuit", specifications: "leds,protoboard,jumpers,resistances"})
CREATE (DataStructure:Course {title: "Data Structure",Departament: "Computer Science"})
CREATE (Physics2:Course {title: "Physics 2",Departament: "Physics"})
CREATE (Physics1:Course {title: "Physics 1",Departament: "Physics"})
CREATE (Psicology:Course {title: "Basic psicology",Departament:"Psicology"})
CREATE (Humanity:Course {title: "Humanity Sciences",Departament:"Social studies"})
CREATE (Code:Course {title: "Basic coding",Departament:"Computer Sciences"})
CREATE (VideoGames:Course {title: "VideoGames",Departament:"Computer Sciences"})
CREATE (MobilePlataforms:Course {title: "Mobile Plataforms",Departament:"Computer Sciences"})
CREATE (Assembler:Course {title: "Assembler",Departament:"Computer Sciences"})
CREATE (Letters:Course {title: "Writing",Departament:"Languages"})
CREATE (Organic1:Course {title: "Organic Chemistry",Departament:"Chemistry"})
CREATE (Global_citizenship:Course {title: "Global_citizenship",Departament:"social Sciences "})
CREATE (POO:Course {title: "POO",Departament:"Computer Sciences"})
CREATE (algorithms_and_basic_programming:Course {title: "algorithms_and_basic_programming",Departament:"Computer Sciences"})
CREATE (botany1:Course {title: "botany1",Departament:"biology"})
CREATE (genetic_resources:Course {title: "genetic_resources",Departament:"biology"})
CREATE (system_and_evolution:Course {title: "system_and_evolution",Departament:"biology"})
CREATE (forest_ecology:Course {title: "forest_ecology",Departament:"biology"})
CREATE (biometrics:Course {title: "biometrics",Departament:"biology"})
CREATE (biology:Course {title: "biology",Departament:"biology"})
CREATE (histology_and_histochemistry:Course {title: "histology_and_histochemistry",Departament:"biology"})
CREATE (introduction_to_molecular_biosciences:Course {title: "introduction_to_molecular_biosciences",Departament:"molecular biotechnology"})
CREATE (biochemistry_of_macromolecules:Course {title: "biochemistry_of_macromolecules",Departament:"molecular biotechnology"})
CREATE (microbiology1:Course {title: "microbiology1",Departament:"molecular biotechnology"})
CREATE (general_inmunology:Course {title: "general_inmunology",Departament:"molecular biotechnology"})
CREATE (molecular_biology:Course {title: "molecular_biology",Departament:"molecular biotechnology"})
CREATE (bioinformatics:Course {title: "bioinformatics",Departament:"molecular biotechnology"})
CREATE (biogeography:Course {title: "biogeography",Departament:"molecular biotechnology"})
CREATE (databases:Course {title: "databases",Departament:"Computer sciences"})
CREATE (statistics1:Course {title: "statistics1",Departament:"mathematics"})
CREATE (statistics2:Course {title: "statistics2",Departament:"mathematics"})
CREATE (life_sciences:Course {title: "life_sciences",Departament:"biology"})
CREATE (IPC:Course {title: "IPC",Departament:"investigation"})
CREATE (project_management:Course {title: "project_management",Departament:"administration"})
CREATE (electrical_circuits:Course {title: "electrical_circuits",Departament:"electronics"})
CREATE (analogic_electronics:Course {title: "analogic_electronics",Departament:"electronics"})
CREATE (Design_Thinking:Course {title: "Design_Thinking",Departament:"investigation"})
CREATE (electromagnetic_theory:Course {title: "electromagnetic_theory",Departament:"physic"})
CREATE (thermodynamics1:Course {title: "thermodynamics1",Departament:"physic"})
CREATE (thermodynamics:Course {title: "thermodynamics2",Departament:"physic"})
CREATE (bioengineering:Course {title: "bioengineering",Departament:"chemistry"})
CREATE (chemistry1:Course {title: "chemistry1",Departament:"chemistry"})
CREATE (chemistry2:Course {title: "chemistry2",Departament:"chemistry"})
CREATE (general_chemistry:Course {title: "general_chemistry",Departament:"chemistry"})
CREATE (analytic_chemistry:Course {title: "analytic_chemistry",Departament:"chemistry"})
CREATE (physical_chemistry:Course {title: "physical_chemistry",Departament:"chemistry"})
CREATE (dynamic_mechanics:Course {title: "dynamic_mechanics",Departament:"mechanics"})
CREATE (draw_CAD:Course {title: "draw_CAD",Departament:"Design"})
CREATE (signals_processing:Course {title: "signals_processing",Departament:"electronics"})
CREATE (quality_managment:Course {title: "quality_managment",Departament:"chemistry"})
CREATE (industrial_biological_processes:Course {title: "industrial_biological_processes",Departament:"chemistry"})
CREATE (pharmacognosy:Course {title: "pharmacognosy",Departament:"chemistry"})
CREATE (food_chemistry:Course {title: "food_chemistry",Departament:"chemistry"})
CREATE (applied_neuroscience:Course {title: "applied_neuroscience",Departament:"chemistry"})
CREATE
(ProjectGenerator)-[:PROJECT_FOR]->(DataStructure),
(Story)-[:PROJECT_FOR]->(Letters),
(Story)-[:USE_A]->(Paper),
(Story)-[:USE_A]->(Computer),
(Notizen)-[:PROJECT_FOR]->(Code),
(RRasberry)-[:PROJECT_FOR]->(Code),
(RArduino)-[:PROJECT_FOR]->(Code),
(RRasberry)-[:USE_A]->(Raspberry),
(RArduino)-[:USE_A]->(Arduino),
(RRasberry)-[:USE_A]->(Computer),
(RArduino)-[:USE_A]->(Computer),
(Simulator)-[:USE_A]->(Unity),
(Simulator)-[:USE_A]->(Computer),
(HollywoodPhysics)-[:USE_A]->(Computer),
(HollywoodPhysics)-[:USE_A]->(Paper),
(SpaceWars)-[:USE_A]->(Unity),
(SpaceWars)-[:USE_A]->(Computer),
(Notizen)-[:USE_A]->(AndroidStudio),
(Notizen)-[:USE_A]->(Computer),
(ElasticConstat)-[:USE_A]->(Paper),
(HollywoodPhysics)-[:PROJECT_FOR]->(Physics2),
(RRasberry)-[:PROJECT_FOR]->(Assembler),
(ElasticConstat)-[:PROJECT_FOR]->(Physics2),
(RArduino)-[:PROJECT_FOR]->(Assembler),
(Notizen)-[:PROJECT_FOR]->(MobilePlataforms),
(Simulator)-[:PROJECT_FOR]->(Code),
(Simulator)-[:PROJECT_FOR]->(VideoGames),
(Simulator)-[:PROJECT_FOR]->(Physics2),
(Simulator)-[:PROJECT_FOR]->(Physics1),
(SpaceWars)-[:PROJECT_FOR]->(Code),
(SpaceWars)-[:PROJECT_FOR]->(VideoGames),
(SunRotation)-[:PROJECT_FOR]->(Physics2),
(SunRotation)-[:PROJECT_FOR]->(DataStructure),
(SunRotation)-[:PROJECT_FOR]->(Code),
(ElasticConstat)-[:USE_A]->(Computer),
(ElasticConstat)-[:USE_A]->(Paper),
(ElasticConstat)-[:PROJECT_FOR]->(Physics2),
(HollywoodPhysics)-[:USE_A]->(Computer),
(HollywoodPhysics)-[:PROJECT_FOR]->(Physics2),
(BrominationA)-[:USE_A]->(Reagents),
(BrominationA)-[:PROJECT_FOR]->(Organic1),
(Halogenation)-[:USE_A]->(Reagents),
(Halogenation)-[:PROJECT_FOR]->(Organic1),
(Hydrogenation)-[:USE_A]->(Reagents),
(Hydrogenation)-[:PROJECT_FOR]->(Organic1),
(SN2)-[:USE_A]->(Reagents),
(SN2)-[:PROJECT_FOR]->(Organic1),
(Lisp)-[:USE_A]->(Computer),
(revolutions_of_a_stepper)-[:USE_A]->(Computer),
(revolutions_of_a_stepper)-[:PROJECT_FOR]->(Assembler),
(Angular_velocity)-[:USE_A]->(Computer),
(Angular_velocity)-[:PROJECT_FOR]->(Physics2),
(Youngs_Modulus_of_a_Marshmallow)-[:USE_A]->(Paper),
(Youngs_Modulus_of_a_Marshmallow)-[:PROJECT_FOR]->(Physics2),
(Slipping_Tipping)-[:USE_A]->(Computer),
(Slipping_Tipping)-[:PROJECT_FOR]->(Physics2),
(Rotational_energy)-[:USE_A]->(Computer),
(Rotational_energy)-[:PROJECT_FOR]->(Physics2),
(Torque)-[:USE_A]->(Paper),
(Torque)-[:PROJECT_FOR]->(Physics2),
(figures)-[:USE_A]->(Computer),
(figures)-[:PROJECT_FOR]->(POO),
(Hollywood_Movies)-[:USE_A]->(Computer),
(Hollywood_Movies)-[:PROJECT_FOR]->(POO),
(riddle)-[:USE_A]->(Computer),
(riddle)-[:PROJECT_FOR]->(algorithms_and_basic_programming),
(angle)-[:USE_A]->(Computer),
(angle)-[:PROJECT_FOR]->(algorithms_and_basic_programming),
(Mongo)-[:USE_A]->(Computer),
(Mongo)-[:PROJECT_FOR]->(algorithms_and_basic_programming),
(Texas_Holdem)-[:USE_A]->(Computer),
(Texas_Holdem)-[:PROJECT_FOR]->(algorithms_and_basic_programming),
(Menu)-[:USE_A]->(Computer),
(Menu)-[:PROJECT_FOR]->(algorithms_and_basic_programming),
(Sanitary_nap)-[:USE_A]->(Paper),
(Sanitary_nap)-[:PROJECT_FOR]->(statistics1),
(boiling_fusion_point)-[:USE_A]->(Reagents),
(boiling_fusion_point)-[:PROJECT_FOR]->(Organic1),
(liquid_liquid_extraction)-[:USE_A]->(Reagents),
(liquid_liquid_extraction)-[:PROJECT_FOR]->(Organic1),
(biofuel)-[:USE_A]->(Reagents),
(biofuel)-[:PROJECT_FOR]->(Organic1),
(Steam)-[:USE_A]->(Reagents),
(Steam)-[:PROJECT_FOR]->(Organic1),
(Chromatography)-[:USE_A]->(Reagents),
(Chromatography)-[:PROJECT_FOR]->(),
(Re_crystallization)-[:USE_A]->(Reagents),
(Re_crystallization)-[:PROJECT_FOR]->(Organic1),
(Halogenation_Alquenos)-[:USE_A]->(Reagents),
(Halogenation_Alquenos)-[:PROJECT_FOR]->(Organic1),
(Catalytic_hydrogenation)-[:USE_A]->(Reagents),
(Catalytic_hydrogenation)-[:PROJECT_FOR]->(Organic1),
(SN2_strawberries)-[:USE_A]->(Reagents),
(SN2_strawberries)-[:PROJECT_FOR]->(Organic1),
(antacid)-[:USE_A]->(Reagents),
(antacid)-[:PROJECT_FOR]->(chemistry2),
(vinegar)-[:USE_A]->(Reagents),
(vinegar)-[:PROJECT_FOR]->(chemistry2),
(junk_food)-[:USE_A]->(Reagents),
(junk_food)-[:PROJECT_FOR]->(chemistry2),
(Hess_law)-[:USE_A]->(Reagents),
(Hess_law)-[:PROJECT_FOR]->(chemistry2),
(Neutralization)-[:USE_A]->(Reagents),
(Neutralization)-[:PROJECT_FOR]->(chemistry2),
(The_Yodo_clock)-[:USE_A]->(Reagents),
(The_Yodo_clock)-[:PROJECT_FOR]->(chemistry2),
(Le_Chatelier)-[:USE_A]->(Reagents),
(Le_Chatelier)-[:PROJECT_FOR]->(chemistry2),
(kps)-[:USE_A]->(Reagents),
(kps)-[:PROJECT_FOR]->(chemistry2),
(pH)-[:USE_A]->(Reagents),
(pH)-[:PROJECT_FOR]->(chemistry2),
(separation_mixture)-[:USE_A]->(Reagents),
(separation_mixture)-[:PROJECT_FOR]->(chemistry2),
(chemical_reactions)-[:USE_A]->(Reagents),
(chemical_reactions)-[:PROJECT_FOR]->(chemistry2),
(metathesis)-[:USE_A]->(Reagents),
(metathesis)-[:PROJECT_FOR]->(chemistry1),
(flu)-[:USE_A]->(Reagents),
(flu)-[:PROJECT_FOR]->(general_inmunology),
(water_toxicity)-[:USE_A]->(Reagents),
(water_toxicity)-[:PROJECT_FOR]->(Organic1),
(winds)-[:USE_A]->(Paper),
(winds)-[:PROJECT_FOR]->(Physics1),
(Stethoscope)-[:USE_A]->(Paper),
(Stethoscope)-[:PROJECT_FOR]->(bioinformatics),
(wifi)-[:USE_A]->(Computer),
(wifi)-[:PROJECT_FOR]->(Physics2),
(ants)-[:USE_A]->(Reagents),
(ants)-[:PROJECT_FOR]->(biology),
(tsunami)-[:USE_A]->(Computer),
(tsunami)-[:PROJECT_FOR]->(Physics2),
(biomass)-[:USE_A]->(Reagents),
(biomass)-[:PROJECT_FOR]->(Physics2),
(paper_fiber)-[:USE_A]->(Paper),
(paper_fiber)-[:PROJECT_FOR]->(Organic1),
(bug)-[:USE_A]->(Reagents),
(bug)-[:PROJECT_FOR]->(biology),
(geodes)-[:USE_A]->(Computer),
(geodes)-[:PROJECT_FOR]->(biology),
(candy_crystals)-[:USE_A]->(Reagents),
(candy_crystals)-[:PROJECT_FOR]->(Organic1),
(crater)-[:USE_A]->(Computer),
(crater)-[:PROJECT_FOR]->(Physics1),
(Soil_moisture)-[:USE_A]->(Computer),
(Soil_moisture)-[:PROJECT_FOR]->(forest_ecology),
(birds)-[:USE_A]->(Paper),
(birds)-[:PROJECT_FOR]->(biology),
(bristlebot)-[:USE_A]->(Computer),
(bristlebot)-[:PROJECT_FOR]->(Physics2),
(radiation)-[:USE_A]->(Computer),
(radiation)-[:PROJECT_FOR]->(Physics2),
(hydrate)-[:USE_A]->(Reagents),
(hydrate)-[:PROJECT_FOR]->(chemistry1),
(Stoichiometry)-[:USE_A]->(Reagents),
(Stoichiometry)-[:PROJECT_FOR]->(chemistry1),
(R_constant)-[:USE_A]->(Reagents),
(R_constant)-[:PROJECT_FOR]->(chemistry1),
(Manganese_colors)-[:USE_A]->(Reagents),
(Manganese_colors)-[:PROJECT_FOR]->(chemistry1),
(performance)-[:USE_A]->(Reagents),
(performance)-[:PROJECT_FOR]->(chemistry1),
(Copper)-[:USE_A]->(Reagents),
(Copper)-[:PROJECT_FOR]->(chemistry1),
(physical_properties)-[:USE_A]->(Reagents),
(physical_properties)-[:PROJECT_FOR]->(chemistry1),
(Quiniela)-[:USE_A]->(Computer),
(Quiniela)-[:PROJECT_FOR]->(algorithms_and_basic_programming),
(Law_Sines)-[:USE_A]->(Computer),
(parking_lot)-[:USE_A]->(electrical_circuit),
(parking_lot)-[:PROJECT_FOR]->(electrical_circuits),
(tetris)-[:USE_A]->(electrical_circuit),
(tetris)-[:PROJECT_FOR]->(electrical_circuits),
(water_dispenser)-[:USE_A]->(electrical_circuit),
(water_dispenser)-[:PROJECT_FOR]->(electrical_circuits),
(food_dispenser)-[:USE_A]->(electrical_circuit),
(food_dispenser)-[:PROJECT_FOR]->(electrical_circuits),
(Remote_car)-[:USE_A]->(electrical_circuit),
(Remote_car)-[:PROJECT_FOR]->(electrical_circuits),
(home)-[:USE_A]->(electrical_circuit),
(home)-[:PROJECT_FOR]->(electrical_circuits),
(Key_finder)-[:USE_A]->(electrical_circuit),
(Key_finder)-[:PROJECT_FOR]->(electrical_circuits),
(irrigation_system)-[:USE_A]->(electrical_circuit),
(irrigation_system)-[:PROJECT_FOR]->(electrical_circuits),
(Advertisements)-[:USE_A]->(Computer),
(Advertisements)-[:PROJECT_FOR]->(Design_Thinking),
(product)-[:USE_A]->(Computer),
(product)-[:PROJECT_FOR]->(Design_Thinking),
(DNA_extraction)-[:USE_A]->(Computer),
(DNA_extraction)-[:PROJECT_FOR]->(life_sciences),
(green_detergents)-[:USE_A]->(Computer),
(green_detergents)-[:PROJECT_FOR]->(biochemistry_of_macromolecules),
(acid_rain)-[:USE_A]->(Computer),
(acid_rain)-[:PROJECT_FOR]->(biochemistry_of_macromolecules),
(Soil_erosion)-[:USE_A]->(Computer),
(Soil_erosion)-[:PROJECT_FOR]->(forest_ecology),
(Landslides)-[:USE_A]->(Computer),
(Landslides)-[:PROJECT_FOR]->(botany1),
(Law_Sines)-[:PROJECT_FOR]->(algorithms_and_basic_programming),
(grades)-[:USE_A]->(Computer),
(grades)-[:PROJECT_FOR]->(algorithms_and_basic_programming),
(Frog)-[:USE_A]->(Computer),
(Frog)-[:PROJECT_FOR]->(algorithms_and_basic_programming),
(canibals_and_missionaries)-[:USE_A]->(Computer),
(canibals_and_missionaries)-[:PROJECT_FOR]->(algorithms_and_basic_programming),
(bill)-[:USE_A]->(Computer),
(bill)-[:PROJECT_FOR]->(algorithms_and_basic_programming),
(Sea)-[:USE_A]->(Reagents),
(Sea)-[:PROJECT_FOR]->(system_and_evolution),
(Soil_worms)-[:USE_A]->(Computer),
(Soil_worms)-[:PROJECT_FOR]->(biogeography),
(earth_axis)-[:USE_A]->(Computer),
(earth_axis)-[:PROJECT_FOR]->(Physics1),
(chick_breathe)-[:USE_A]->(Reagents),
(chick_breathe)-[:PROJECT_FOR]->(biology),
(faucet)-[:USE_A]->(Reagents),
(faucet)-[:PROJECT_FOR]->(molecular_biology),
(DNA_onion)-[:USE_A]->(Reagents),
(DNA_onion)-[:PROJECT_FOR]->(biology),
(Water_from_air)-[:USE_A]->(Reagents),
(Water_from_air)-[:PROJECT_FOR]->(Organic1),
(moon)-[:USE_A]->(Computer),
(moon)-[:PROJECT_FOR]->(Physics1),
(capillary)-[:USE_A]->(Reagents),
(capillary)-[:PROJECT_FOR]->(life_sciences),
(ballon_car)-[:USE_A]->(Computer),
(ballon_car)-[:PROJECT_FOR]->(Physics2),
(bubbleology)-[:USE_A]->(Reagents),
(bubbleology)-[:PROJECT_FOR]->(biology),
(e_waste)-[:USE_A]->(Computer),
(e_waste)-[:PROJECT_FOR]->(dynamic_mechanics),
(clean_air)-[:USE_A]->(Reagents),
(clean_air)-[:PROJECT_FOR]->(Organic1),
(soil_depth)-[:USE_A]->(Computer),
(soil_depth)-[:PROJECT_FOR]->(biogeography),
(absorptivity)-[:USE_A]->(Reagents),
(absorptivity)-[:PROJECT_FOR]->(chemistry2),
(germs_soup)-[:USE_A]->(Reagents),
(germs_soup)-[:PROJECT_FOR]->(system_and_evolution),
(roots)-[:USE_A]->(Reagents),
(roots)-[:PROJECT_FOR]->(botany1),
(cereal_iron)-[:USE_A]->(Reagents),
(cereal_iron)-[:PROJECT_FOR]->(microbiology1),
(cell)-[:USE_A]->(Reagents),
(cell)-[:PROJECT_FOR]->(genetic_resources),
(mutations)-[:USE_A]->(Reagents),
(mutations)-[:PROJECT_FOR]->(genetic_resources),
(rabid)-[:USE_A]->(Reagents),
(rabid)-[:PROJECT_FOR]->(biology),
(desalination)-[:USE_A]->(Computer),
(desalination)-[:PROJECT_FOR]->(Physics2),
(parallax)-[:USE_A]->(Computer),
(parallax)-[:PROJECT_FOR]->(Physics2),
(memory)-[:USE_A]->(Reagents),
(memory)-[:PROJECT_FOR]->(general_inmunology),
(microorganisms)-[:USE_A]->(Reagents),
(microorganisms)-[:PROJECT_FOR]->(microbiology1),
(Submarines)-[:USE_A]->(Computer),
(Submarines)-[:PROJECT_FOR]->(Physics1),
(Molecular_scissors)-[:USE_A]->(Reagents),
(Molecular_scissors)-[:PROJECT_FOR]->(introduction_to_molecular_biosciences),
(genome_projects)-[:USE_A]->(Reagents),
(genome_projects)-[:PROJECT_FOR]->(genetic_resources),
(Cryopreservation)-[:USE_A]->(Reagents),
(Cryopreservation)-[:PROJECT_FOR]->(genetic_resources),
(pets_food)-[:USE_A]->(Reagents),
(pets_food)-[:PROJECT_FOR]->(biology),
(drugs_genetics)-[:USE_A]->(Reagents),
(drugs_genetics)-[:PROJECT_FOR]->(genetic_resources),
(leaves_colors)-[:USE_A]->(Reagents),
(leaves_colors)-[:PROJECT_FOR]->(genetic_resources),
(antibodies)-[:USE_A]->(),
(antibodies)-[:PROJECT_FOR]->(general_inmunology),
(Stardust)-[:USE_A]->(Computer),
(Stardust)-[:PROJECT_FOR]->(Physics1),
(heavy_metals)-[:USE_A]->(Reagents),
(heavy_metals)-[:PROJECT_FOR]->(Organic1),
(cabagge_clones)-[:USE_A]->(Reagents),
(cabagge_clones)-[:PROJECT_FOR]->(genetic_resources),
(organic_waste)-[:USE_A]->(Reagents),
(organic_waste)-[:PROJECT_FOR]->(Organic1),
(kidney)-[:USE_A]->(Reagents),
(kidney)-[:PROJECT_FOR]->(bioinformatics),
(oxygen)-[:USE_A]->(Reagents),
(oxygen)-[:PROJECT_FOR]->(Organic1),
(prevent_erosion)-[:USE_A]->(Paper),
(prevent_erosion)-[:PROJECT_FOR]->(forest_ecology),
(Lost_items)-[:USE_A]->(Computer),
(Lost_items)-[:PROJECT_FOR]->(algorithms_and_basic_programming),
(frequency)-[:USE_A]->(Computer),
(frequency)-[:PROJECT_FOR]->(algorithms_and_basic_programming),
(arithmetic)-[:USE_A]->(Computer),
(arithmetic)-[:PROJECT_FOR]->(algorithms_and_basic_programming),
(Series)-[:USE_A]->(Computer),
(Series)-[:PROJECT_FOR]->(DataStructure),
(Restaurants)-[:USE_A]->(Computer),
(Restaurants)-[:PROJECT_FOR]->(DataStructure),
(Videogames)-[:USE_A]->(Computer),
(Videogames)-[:PROJECT_FOR]->(DataStructure),
(rurple)-[:USE_A]->(Computer),
(rurple)-[:PROJECT_FOR]->(algorithms_and_basic_programming),
(Stability)-[:USE_A]->(Paper),
(Stability)-[:PROJECT_FOR]->(Physics2),
(Angular_Momentum)-[:USE_A]->(Computer),
(Angular_Momentum)-[:PROJECT_FOR]->(Physics2),
(Center_of_gravity)-[:USE_A]->(Paper),
(Center_of_gravity)-[:PROJECT_FOR]->(Physics2),
(Sun_Rotation)-[:USE_A]->(Computer),
(Sun_Rotation)-[:PROJECT_FOR]->(Physics2),
(Torque_Angle)-[:USE_A]->(Paper),
(Torque_Angle)-[:PROJECT_FOR]->(Physics2),
(Jumping_frogs)-[:USE_A]->(Computer),
(Jumping_frogs)-[:PROJECT_FOR]->(Assembler),
(word_leak)-[:USE_A]->(Computer),
(word_leak)-[:PROJECT_FOR]->(Assembler),
(four_in_line)-[:USE_A]->(Computer),
(four_in_line)-[:PROJECT_FOR]->(Assembler),
(Race)-[:USE_A]->(Computer),
(Race)-[:PROJECT_FOR]->(Assembler),
(greater_or_lesser)-[:USE_A]->(Computer),
(greater_or_lesser)-[:PROJECT_FOR]->(Assembler),
(_Pics__Word)-[:USE_A]->(Computer),
(_Pics__Word)-[:PROJECT_FOR]->(Assembler),
(Battleship)-[:USE_A]->(Computer),
(Battleship)-[:PROJECT_FOR]->(Assembler),
(Minesweep)-[:USE_A]->(Computer),
(Minesweep)-[:PROJECT_FOR]->(Assembler),
(Rabbit_Chase)-[:USE_A]->(Computer),
(Rabbit_Chase)-[:PROJECT_FOR]->(Assembler),
(GO)-[:USE_A]->(Computer),
(GO)-[:PROJECT_FOR]->(Assembler),
(pair_odd)-[:USE_A]->(Computer),
(pair_odd)-[:PROJECT_FOR]->(Assembler),
(Uplift_count)-[:USE_A]->(Computer),
(Uplift_count)-[:PROJECT_FOR]->(Assembler),
(chocolates)-[:USE_A]->(Paper),
(chocolates)-[:PROJECT_FOR]->(chemistry1),
(autobiography)-[:USE_A]->(Paper),
(autobiography)-[:PROJECT_FOR]->(Global_citizenship),
(elevator)-[:USE_A]->(electrical_circuit),
(elevator)-[:PROJECT_FOR]->(electrical_circuits),
(car_video_game)-[:USE_A]->(electrical_circuit),
(car_video_game)-[:PROJECT_FOR]->(electrical_circuits),
(Pressure)-[:USE_A]->(Paper),
(Pressure)-[:PROJECT_FOR]->(statistics1),
(classical_music)-[:USE_A]->(Computer),
(classical_music)-[:PROJECT_FOR]->(statistics1),
(caffeine)-[:USE_A]->(Paper),
(caffeine)-[:PROJECT_FOR]->(statistics1),
(decibels)-[:USE_A]->(Computer),
(decibels)-[:PROJECT_FOR]->(statistics1),
(nicotine)-[:USE_A]->(Computer),
(nicotine)-[:PROJECT_FOR]->(statistics1),
(glues)-[:USE_A]->(Paper),
(glues)-[:PROJECT_FOR]->(statistics1),
(blood_sugar)-[:USE_A]->(Paper),
(blood_sugar)-[:PROJECT_FOR]->(statistics1),
(drinks)-[:USE_A]->(Computer),
(drinks)-[:PROJECT_FOR]->(statistics1),
(paint)-[:USE_A]->(Computer),
(paint)-[:PROJECT_FOR]->(statistics1),
(diet_coke)-[:USE_A]->(Paper),
(diet_coke)-[:PROJECT_FOR]->(statistics1),
(milks)-[:USE_A]->(Paper),
(milks)-[:PROJECT_FOR]->(statistics1),
(batteries)-[:USE_A]->(Paper),
(batteries)-[:PROJECT_FOR]->(statistics1),
(Descending_count)-[:USE_A]->(Computer),
(Descending_count)-[:PROJECT_FOR]->(Assembler),
(Active_bit_shift)-[:USE_A]->(Computer),
(Active_bit_shift)-[:PROJECT_FOR]->(Assembler),
(Inactive_bit_shift)-[:USE_A]->(Computer),
(Inactive_bit_shift)-[:PROJECT_FOR]->(Assembler),
(Bit_accumulator)-[:USE_A]->(Computer),
(Bit_accumulator)-[:PROJECT_FOR]->(Assembler),
(traffic_light)-[:USE_A]->(Computer),
(traffic_light)-[:PROJECT_FOR]->(Assembler),
(stone_paper_or_scissors)-[:USE_A]->(Computer),
(stone_paper_or_scissors)-[:PROJECT_FOR]->(Assembler),
(AU)-[:USE_A]->(Computer),
(AU)-[:PROJECT_FOR]->(Assembler),
(Turn_signals)-[:USE_A]->(Computer),
(Turn_signals)-[:PROJECT_FOR]->(Assembler),
(Comparator_of_Numbers)-[:USE_A]->(Computer),
(Comparator_of_Numbers)-[:PROJECT_FOR]->(Assembler),
(Totito)-[:USE_A]->(Computer),
(Totito)-[:PROJECT_FOR]->(Assembler),
(LLS)-[:USE_A]->(Computer),
(LLS)-[:PROJECT_FOR]->(Assembler),
(LRS)-[:USE_A]->(Computer),
(LRS)-[:PROJECT_FOR]->(Assembler),
(Address_decoder)-[:USE_A]->(Computer),
(Address_decoder)-[:PROJECT_FOR]->(Assembler),
(Binary_to_vowel_converter)-[:USE_A]->(Computer),
(Binary_to_vowel_converter)-[:PROJECT_FOR]->(Assembler),
(Car_stopping_Distance_on_a_Tabletop)-[:USE_A]->(Computer),
(Car_stopping_Distance_on_a_Tabletop)-[:PROJECT_FOR]->(Physics1),
(The_energetics_of_a_bouncing)-[:USE_A]->(Computer),
(The_energetics_of_a_bouncing)-[:PROJECT_FOR]->(Physics1),
(Cotton_buds)-[:USE_A]->(Paper),
(Cotton_buds)-[:PROJECT_FOR]->(Physics1),
(Bernoulli_Law)-[:USE_A]->(Paper),
(Bernoulli_Law)-[:PROJECT_FOR]->(Physics1),
(Archimedes_Principle)-[:USE_A]->(Computer),
(Archimedes_Principle)-[:PROJECT_FOR]->(Physics1),
(Radio)-[:USE_A]->(Computer),
(Radio)-[:PROJECT_FOR]->(DataStructure),
(Calculator)-[:USE_A]->(Computer),
(Calculator)-[:PROJECT_FOR]->(DataStructure),
(Sorts)-[:USE_A]->(Computer),
(Sorts)-[:PROJECT_FOR]->(DataStructure),
(Design_patterns)-[:USE_A]->(Computer),
(Design_patterns)-[:PROJECT_FOR]->(DataStructure),
(Simpy)-[:USE_A]->(Computer),
(Simpy)-[:PROJECT_FOR]->(DataStructure),
(Cards)-[:USE_A]->(Computer),
(Cards)-[:PROJECT_FOR]->(DataStructure),
(Dictionary)-[:USE_A]->(Computer),
(Dictionary)-[:PROJECT_FOR]->(DataStructure),
(movies)-[:USE_A]->(Computer),
(movies)-[:PROJECT_FOR]->(DataStructure),
(Places)-[:USE_A]->(Computer),
(Places)-[:PROJECT_FOR]->(DataStructure),
(Hospital)-[:USE_A]->(Computer),
(Hospital)-[:PROJECT_FOR]->(DataStructure),
(Dicc)-[:USE_A]->(Computer),
(Dicc)-[:PROJECT_FOR]->(DataStructure),
(Hexa)-[:USE_A]->(Computer),
(Hexa)-[:PROJECT_FOR]->(POO),
(Planes)-[:USE_A]->(Computer),
(Planes)-[:PROJECT_FOR]->(POO),
(students)-[:USE_A]->(Computer),
(students)-[:PROJECT_FOR]->(POO),
(cinema)-[:USE_A]->(Computer),
(cinema)-[:PROJECT_FOR]->(POO),
(library)-[:USE_A]->(Computer),
(library)-[:PROJECT_FOR]->(POO),
(ipod)-[:USE_A]->(Computer),
(ipod)-[:PROJECT_FOR]->(POO),
(GUI)-[:USE_A]->(Computer),
(GUI)-[:PROJECT_FOR]->(POO),
(guards)-[:USE_A]->(Computer),
(guards)-[:PROJECT_FOR]->(POO),
(radioactive)-[:USE_A]->(Computer),
(radioactive)-[:PROJECT_FOR]->(POO),
(ticket)-[:USE_A]->(Computer),
(ticket)-[:PROJECT_FOR]->(POO),
(Lisp)-[:PROJECT_FOR]->(DataStructure),
(Recommendation)-[:USE_A]->(Computer),
(Recommendation)-[:PROJECT_FOR]->(DataStructure),
(Sodium)-[:USE_A]->(Reagents),
(Sodium)-[:PROJECT_FOR]->(Organic1),
(Sanitary_Napkins_absorption)-[:USE_A]->(Paper),
(Sanitary_Napkins_absorption)-[:PROJECT_FOR]->(statistics1),
(Aluminum_Recycling)-[:USE_A]->(Reagents),
(Aluminum_Recycling)-[:PROJECT_FOR]->(Organic1),
(Inhibition_Klebsiella)-[:USE_A]->(Reagents),
(Inhibition_Klebsiella)-[:PROJECT_FOR]->(forest_ecology),
(Gestalt)-[:USE_A]->(Computer),
(biomimic)-[:USE_A]->(Paper),
(biomimic)-[:PROJECT_FOR]->(life_sciences),
(Fable)-[:USE_A]->(Paper),
(Fable)-[:PROJECT_FOR]->(Design_Thinking),
(Reports)-[:USE_A]->(Computer),
(Reports)-[:PROJECT_FOR]->(algorithms_and_basic_programming),
(Sonic_Pi)-[:USE_A]->(Raspberry),
(Sonic_Pi)-[:PROJECT_FOR]->(Design_Thinking),
(Pokultura)-[:USE_A]->(Paper),
(Pokultura)-[:PROJECT_FOR]->(Design_Thinking),
(mechanical_workshop)-[:USE_A]->(Computer),
(mechanical_workshop)-[:PROJECT_FOR]->(POO),
(political_parties)-[:USE_A]->(Paper),
(political_parties)-[:PROJECT_FOR]->(Global_citizenship),
(massacres_of_the_jungle)-[:USE_A]->(Paper),
(massacres_of_the_jungle)-[:PROJECT_FOR]->(Global_citizenship),
(Simon_says)-[:USE_A]->(Computer),
(Simon_says)-[:PROJECT_FOR]->(Assembler),
(slot_machines)-[:USE_A]->(Computer),
(slot_machines)-[:PROJECT_FOR]->(Assembler),
(Alarm_clock)-[:USE_A]->(Computer),
(Alarm_clock)-[:PROJECT_FOR]->(Assembler),
(Timer)-[:USE_A]->(Computer),
(Timer)-[:PROJECT_FOR]->(Assembler),
(Piano)-[:USE_A]->(Computer),
(Piano)-[:PROJECT_FOR]->(Assembler),
(Stepper)-[:USE_A]->(Computer),
(Stepper)-[:PROJECT_FOR]->(Assembler),
(ALU)-[:USE_A]->(Computer),
(ALU)-[:PROJECT_FOR]->(Assembler),
(Gestalt)-[:PROJECT_FOR]->(Humanity),
(SunRotation)-[:USE_A]->(Computer),
(Gestalt)-[:USE_A]->(Subjects),
(ProjectGenerator)-[:USE_A]->(Computer),
(Behaviorism)-[:PROJECT_FOR]->(Psicology),
(Behaviorism)-[:USE_A]->(Vegetable),
(Behaviorism)-[:USE_A]->(Fruit),
(Behaviorism)-[:USE_A]->(Subjects),
(Avengers)-[:USE_A]->(Subjects),
(Avengers)-[:PROJECT_FOR]->(Psicology),
(Avengers)-[:USE_A]->(Custom),
(CACAP)-[:USE_A]->(Raspberry),
(CACAP)-[:PROJECT_FOR]->(Code)"""
with self._driver.session() as session:
return session.write_transaction(self._Default,result)
@staticmethod
def _Default(tx,result):
return tx.run(result)
@staticmethod
def _getNodes(tx,result,value):
return tx.run(result,value=value)
@staticmethod
def _getNode(tx,result,value):
return tx.run(result,value=value)
@staticmethod
def _upgrade(tx,result,value,newValue):
result = tx.run(result,value=value,newValue=newValue)
@staticmethod
def _deleteLink(tx,result,variable1,variable2):
result = tx.run(result,variable1=variable1,variable2=variable2)
@staticmethod
def _delete(tx,result,value):
result = tx.run(result,value=value)
@staticmethod
def _connect(tx,result,variable1,variable2):
result = tx.run(result,variable1=variable1,variable2=variable2)
"""This method is used by write"""
@staticmethod
def _create(tx,arguments,result):
result = tx.run(result,arguments=arguments)
|
# UpDown Triangle Co-ordinates
# This module provides sample code for working with equilateral triangles in an up-down configuration, i.e.
# ____________
# /\ /\ /
# / \ / \ /
# /____\/____\/
# /\ /\ /
# / \ / \ /
# /____\/____\/
# Each triangle is defined by three co-ordinates, a, b, c.
# b determines which row the triangle is in, and a and c the two diagonals.
# a + b + c always sums to either 1 or 2.
# There are many other possible co-ordinate schemes, but this one seems to have the simplest maths.
# Thus, the origin is a vertex, and it has 6 triangles around it:
# (1, 0, 0), (1, 1, 0), (0, 1, 0), (0, 1, 1), (0, 0, 1), (1, 0, 1)
# To find the neighbours of a down triangle, add 1 to a co-ordinate, and subtract one for neighbours of an up triangle.
from math import floor, ceil, sqrt
from settings import edge_length
from common import mod
sqrt3 = sqrt(3)
# Basics #######################################################################
def tri_center(a, b, c):
"""Returns the center of a given triangle in cartesian co-ordinates"""
# Each unit of a, b, c moves you in the direction of one of the edges of a
# down triangle, in linear combination.
# Or equivalently, this function multiplies by the inverse matrix to pick_tri
#
# NB: This function has the nice property that if you pass in x,y,z values that
# sum to zero (not a valid triangle), it'll return co-ordinates for the vertices of the
# triangles.
return (( 0.5 * a + -0.5 * c) * edge_length,
(-sqrt3 / 6 * a + sqrt3 / 3 * b - sqrt3 / 6 * c) * edge_length)
def points_up(a, b, c):
"""Returns True if this is an upwards pointing triangle, otherwise False"""
return a + b + c == 2
def tri_corners(a, b, c):
"""Returns the three corners of a given triangle in cartesian co-ordinates"""
if points_up(a, b, c):
return [
tri_center(1 + a, b, c),
tri_center(a, b, 1 + c),
tri_center(a, 1 + b, c),
]
else:
return [
tri_center(-1 + a, b, c),
tri_center(a, b, -1 + c),
tri_center(a, -1 + b, c),
]
def pick_tri(x, y):
"""Returns the triangle that contains a given cartesian co-ordinate point"""
# Using dot product, measures which row and diagonals a given point occupies.
# Or equivalently, multiply by the inverse matrix to tri_center
# Note we have to break symmetry, using floor(...)+1 instead of ceil, in order
# to deal with corner vertices like (0, 0) correctly.
return (
ceil(( 1 * x - sqrt3 / 3 * y) / edge_length),
floor(( sqrt3 * 2 / 3 * y) / edge_length) + 1,
ceil((-1 * x - sqrt3 / 3 * y) / edge_length),
)
def tri_neighbours(a, b, c):
"""Returns the tris that share an edge with the given tri"""
if points_up(a, b, c):
return [
(a - 1, b , c ),
(a , b - 1, c ),
(a , b , c - 1),
]
else:
return [
(a + 1, b , c ),
(a , b + 1, c ),
(a , b , c + 1),
]
def tri_dist(a1, b1, c1, a2, b2, c2):
"""Returns how many steps one tri is from another"""
return abs(a1 - a2) + abs(b1 - b2) + abs(c1 - c2)
def tri_disc(a, b, c, r):
"""Returns the tris that are at most distance r from the given tri"""
# This could probably be optimized more
for da in range(-r, r + 1):
for db in range(-r, r + 1):
dc = 1 - (a + b + c + da + db)
if abs(da) + abs(db) + abs(dc) <= r:
yield (a + da, b + db, c + dc)
dc += 1
if abs(da) + abs(db) + abs(dc) <= r:
yield (a + da, b + db, c + dc)
# Symmetry #####################################################################
def tri_rotate_60(a, b, c, n = 1):
"""Rotates the given triangle n * 60 degrees counter clockwise around the origin,
and returns the co-ordinates of the new triangle."""
n = mod(n, 6)
if n == 0:
return (a, b, c)
if n == 1:
return (1 - b, 1 - c, 1 - a)
if n == 2:
return (c, a, b)
if n == 3:
return (1 - a, 1 - b, 1 - c)
if n == 4:
return (b, c, a)
if n == 5:
return (1 - c, 1 - a, 1 - b)
def tri_rotate_about_60(a, b, c, about_a, about_b, about_c, n = 1):
"""Rotates the given triangle n* 60 degress counter clockwise about the given tri
and return the co-ordinates of the new triangle."""
(a, b, c) = tri_rotate_60(a - about_a, b - about_b, c - about_c)
return (a + about_a, b + about_b, c + about_c)
def tri_reflect_y(a, b, c):
"""Reflects the given triangle through the x-axis
and returns the co-ordinates of the new triangle"""
return (1 - c, 1 - b, 1 - a)
def tri_reflect_x(a, b, c):
"""Reflects the given triangle through the y-axis
and returns the co-ordinates of the new triangle"""
return (c, b, a)
def tri_reflect_by(a, b, c, n = 0):
"""Reflects the given triangle through the x-axis rotated counter clockwise by n * 30 degrees
and returns the co-ordinates of the new triangle"""
(a2, b2, c2) = tri_reflect_y(a, b, c)
return tri_rotate_60(a2, b2, c2, n)
# Shapes #######################################################################
def tri_line_intersect(x1, y1, x2, y2):
"""Returns the triangles that intersect the line specified in cartesian co-ordinates"""
x1 /= edge_length
y1 /= edge_length
x2 /= edge_length
y2 /= edge_length
dx = x2 - x1
dy = y2 - y1
# Convert from cartesian co-ordinates to the three triangle axes
fa = 1 * x1 - sqrt3 / 3 * y1
fb = sqrt3 * 2 / 3 * y1
fc = -1 * x1 - sqrt3 / 3 * y1
da = 1 * dx - sqrt3 / 3 * dy
db = sqrt3 * 2 / 3 * dy
dc = -1 * dx - sqrt3 / 3 * dy
# Now do raycasting on a 3d cube grid, except we ensure
# we step across cells in an order that alternates
# up/down triangles
a = ceil(fa)
b = floor(fb) + 1
c = ceil(fc)
isup = a + b + c == 2
stepa = 1 if da > 0 else -1
stepb = 1 if db > 0 else -1
stepc = 1 if dc > 0 else -1
ta = (a - int(da <= 0) - fa) / da if da != 0 else float('inf')
tb = (b - int(db <= 0) - fb) / db if db != 0 else float('inf')
tc = (c - int(dc <= 0) - fc) / dc if dc != 0 else float('inf')
ida = abs(1 / da) if da != 0 else float('inf')
idb = abs(1 / db) if db != 0 else float('inf')
idc = abs(1 / dc) if dc != 0 else float('inf')
yield (a, b, c)
while True:
if ta <= tb and ta <= tc and (stepa == 1) != isup:
if ta > 1: return
a += stepa
ta += ida
elif tb <= ta and tb <= tc and (stepb == 1) != isup:
if tb > 1: return
b += stepb
tb += idb
else:
if tc > 1: return
c += stepc
tc += idc
yield (a, b, c)
isup = not isup
def tri_line(a1, b1, c1, a2, b2, c2):
"""Returns the tris in a shortest path from one tri to another, staying as close to the straight line as possible"""
(x1, y1) = tri_center(a1, b1, c1)
(x2, y2) = tri_center(a2, b2, c2)
return tri_line_intersect(x1, y1, x2, y2)
def tri_rect_intersect(x, y, width, height):
"""Returns the tris that intersect the rectangle specified in cartesian co-ordinates"""
assert width >= 0, "Rectangle should have non-negative width"
assert height >= 0, "Rectangle should have non-negative height"
# For consistency, we treat the triangles as exclusive of their border, and the rect as inclusive
x /= edge_length
y /= edge_length
width /= edge_length
height /= edge_length
# Lower and upper bound by row
fl = sqrt3 * 2 / 3 * y
fu = sqrt3 * 2 / 3 * (y + height)
# Loop over all rows that the rectangle is in
for b in range(floor(fl) + 1, ceil(fu) + 1):
# Consider each row vs a trimmed rect
minb = max(b - 1, fl)
maxb = min(b, fu)
# The smallest / largest values for the diagonals
# can be read from the trimmed rect corners
mina = floor(x - maxb / 2) + 1
maxa = ceil(x + width - minb / 2)
minc = floor(-x - width - maxb / 2) + 1
maxc = ceil(-x - minb / 2)
# Walk along the row left to right
a = mina
c = maxc
assert a + b + c == 1 or a + b + c == 2
while a <= maxa and c >= minc:
yield (a, b, c)
if a + b + c == 1:
a += 1
else:
c -= 1 |
import requests
import base58
import ipfshttpclient
import multiaddr
import decimal
import csv
from _pysha3 import keccak_256
from decimal import Decimal
from typing import Any
from collections import defaultdict
from pathlib import Path
REGISTRY_ADDRESS = "0x0Ee5AFF42564C0D293164b39D85653666ae151Eb"
IPFS_GATEWAY = multiaddr.Multiaddr("/dns/gateway.pinata.cloud/tcp/443/https")
POLYGON_GATEWAY = "https://rpc-mainnet.matic.quiknode.pro"
MAX_RETRIES = 20
REST_PERIOD = 2
NULL_IPFS_CID = "QmNLei78zWmzUdbeRB3CiUfAizWUrbeeZh5K1rhAQKCh51"
"""classes and methods to interact with solidity contract
see https://github.com/rocketcapital-ai/competition/blob/main/contracts/Competition.sol
"""
class Competition:
def __init__(self, name: str, genesis_block: int, data_dir: Path):
self._name = name
self._genesis_block = genesis_block
self._data_dir = data_dir
self._address = get_competition_address(name)
@property
def name(self):
return self._name
@property
def genesis_block(self):
return self._genesis_block
@property
def address(self):
return self._address
def get_competition_dir(self):
"""returns the competition directory, a directory named as the competition inside the data directory"""
return self._data_dir.joinpath(self.name)
def get_latest_challenge_number(self) -> int:
"""calls solidity method getLatestChallengeNumber()"""
challenge_number_uint = self.call("getLatestChallengeNumber()")
return hex_to_int(challenge_number_uint)
def get_current_challenge_pool(self) -> Decimal:
"""calls solidity method getCurrentChallengeRewardsBudget()"""
current_challenge_pool_uint = self.call("getCurrentChallengeRewardsBudget()")
return hex_to_decimal(current_challenge_pool_uint)
def get_current_competition_pool(self) -> Decimal:
"""calls solidity method getCurrentTournamentRewardsBudget()"""
current_competition_pool_uint = self.call("getCurrentTournamentRewardsBudget()")
return hex_to_decimal(current_competition_pool_uint)
def get_current_stake_pool(self) -> Decimal:
"""calls solidity method getCurrentStakingRewardsBudget()"""
current_stake_pool_uint = self.call("getCurrentStakingRewardsBudget()")
return hex_to_decimal(current_stake_pool_uint)
def get_challenge(self, challenge_number: int) -> "Challenge":
return Challenge(challenge_number, self)
def call(self, fn_signature: str, *args):
return call(self._address, fn_signature, *args)
class Challenge:
def __init__(self, number: int, competition: Competition):
self._number = number
self._competition = competition
# these data are loaded from blockchain or from local file
self._stakes = None
self._stake_pool = None
self._challenge_pool = None
self._competition_pool = None
@property
def number(self):
return self._number
@property
def competition(self):
return self._competition
def get_challenge_dir(self):
"""returns the challenge directory, named as the challenge number inside the competition directory"""
return self._competition.get_competition_dir().joinpath(str(self._number))
def get_dataset_ipfs_cid(self) -> str:
"""calls solidity method getDatasetHash() and converts hash to cid"""
dataset_hash = self.call("getDatasetHash(uint32)", int_to_uint(self._number))
return hash_to_cid(dataset_hash)
def download_dataset_file(self, force=False, verbose=True) -> Path:
"""utility method to read the dataset cid and download the dataset file in the challenge directory"""
dataset_ipfs_cid = self.get_dataset_ipfs_cid()
challenge_dir = self.get_challenge_dir()
return download_ipfs_file(dataset_ipfs_cid, challenge_dir, prefix="dataset-", suffix=".zip", force=force, verbose=verbose)
def get_results_ipfs_cid(self) -> str:
"""calls solidity method getResultsHash() and converts hash to cid"""
results_hash = self.call("getResultsHash(uint32)", int_to_uint(self._number))
return hash_to_cid(results_hash)
def download_results_file(self, force=False, verbose=True) -> Path:
"""utility method to read the results cid and download the results file in the challenge directory"""
results_ipfs_cid = self.get_results_ipfs_cid()
challenge_dir = self.get_challenge_dir()
return download_ipfs_file(results_ipfs_cid, challenge_dir, prefix="results-", suffix=".csv", force=force, verbose=verbose)
def get_public_key_cid(self) -> str:
"""calls solidity method getKeyHash()"""
public_key_hash = self.call("getKeyHash(uint32)", int_to_uint(self._number))
return hash_to_cid(public_key_hash)
def download_public_key_file(self, force=False, verbose=True) -> Path:
"""utility method to read the public key cid and download the public key file in the challenge directory"""
public_key_cid = self.get_public_key_cid()
challenge_dir = self.get_challenge_dir()
return download_ipfs_file(public_key_cid, challenge_dir, prefix="public-key-", suffix=".pem", force=force, verbose=verbose)
def get_private_key_cid(self) -> str:
"""calls solidity method getPrivateKeyHash()"""
private_key_hash = self.call("getPrivateKeyHash(uint32)", int_to_uint(self._number))
return hash_to_cid(private_key_hash)
def download_private_key_file(self, force=False, verbose=True) -> Path:
"""utility method to read the private key cid and download the private key file in the challenge directory"""
private_key_cid = self.get_private_key_cid()
challenge_dir = self.get_challenge_dir()
return download_ipfs_file(private_key_cid, challenge_dir, prefix="private-key-", suffix=".pem", force=force, verbose=verbose)
def get_submission_counter(self) -> int:
"""calls solidity method getSubmissionCounter()"""
return hex_to_int(self.call("getSubmissionCounter(uint32)", int_to_uint(self._number)))
def get_submitter_addresses(self, start_index: int, end_index: int) -> [str]:
"""calls solidity method getSubmitters()"""
result = self.call("getSubmitters(uint32,uint256,uint256)", int_to_uint(self._number),
int_to_uint(start_index), int_to_uint(end_index))
# skip first 130 chars (0x + start index (32 bytes) and # of items (32 bytes))
result = result[130:]
return [result[i+24:i+64] for i in range(0, len(result), 64)]
def get_all_submitter_addresses(self) -> [str]:
"""utility method to get all submitters"""
num_submitters = self.get_submission_counter()
return self.get_submitter_addresses(0, num_submitters)
def get_phase(self) -> int:
"""calls solidity method getPhase()"""
return hex_to_int(self.call("getPhase(uint32)",
int_to_uint(self._number)))
def get_stakes(self) -> {str: Decimal}:
"""returns all the <address, stake> pairs of the challenge"""
if self._stakes is None:
self._load_blockchain_info()
return self._stakes
def get_participant(self, address: str) -> "Participant":
return Participant(address, self)
def get_all_participants(self) -> ["Participant"]:
"""utility method to return all participants"""
return [self.get_participant(address) for address in self.get_stakes()]
def get_stake_pool(self) -> Decimal:
"""returns the stake pool"""
if self._stake_pool is None:
self._load_blockchain_info()
return self._stake_pool
def get_challenge_pool(self) -> Decimal:
"""returns the challenge pool"""
if self._challenge_pool is None:
self._load_blockchain_info()
return self._challenge_pool
def get_competition_pool(self) -> Decimal:
"""returns the competition pool"""
if self._competition_pool is None:
self._load_blockchain_info()
return self._competition_pool
def call(self, fn_signature: str, *args):
return self._competition.call(fn_signature, *args)
def _load_blockchain_info(self) -> None:
"""scans the blockchain if needed and initializes the variables"""
stakes_info_file = self.get_challenge_dir().joinpath("_stakes.csv")
pools_info_file = self.get_challenge_dir().joinpath("_pools.csv")
if not stakes_info_file.exists() or not pools_info_file.exists():
self.get_challenge_dir().mkdir(parents=True, exist_ok=True)
# load blockchain info from blockchain and write to file
self._scan_blockchain()
with open(stakes_info_file, "w") as fout:
writer = csv.writer(fout)
writer.writerow(["address", "stake"])
for address, value in self._stakes.items():
writer.writerow([address, str(value)])
with open(pools_info_file, "w") as fout:
writer = csv.writer(fout)
writer.writerow(["pool", "amount"])
writer.writerow(["stake_pool", str(self._stake_pool)])
writer.writerow(["challenge_pool", str(self._challenge_pool)]),
writer.writerow(["competition_pool", str(self._competition_pool)])
else:
# load blockchain info from file
with open(stakes_info_file) as fin:
reader = csv.reader(fin)
next(reader)
self._stakes = {address: Decimal(stake) for address, stake in reader}
with open(pools_info_file) as fin:
reader = csv.reader(fin)
next(reader)
csv_dict = {name: value for name, value in reader}
self._stake_pool = Decimal(csv_dict.get("stake_pool"))
self._challenge_pool = Decimal(csv_dict.get("challenge_pool"))
self._competition_pool = Decimal(csv_dict.get("competition_pool"))
def _scan_blockchain(self) -> None:
latest_challenge = self._competition.get_latest_challenge_number()
assert self._number <= latest_challenge, f"challenge {self._number} has not been opened"
chunk = 1000
quantize = Decimal("1e-18")
latest_block = get_latest_block()
current_challenge = None
# track balance of rewards pool
total_rewards_pool_uint = 0
current_challenge_percentage = Decimal("0.2")
current_competition_percentage = Decimal("0.6")
# dictionaries to track and record values by challenge
stake_pool = {}
challenge_pool = {}
competition_pool = {}
challenge_percentage = {1: current_challenge_percentage}
competition_percentage = {1: current_competition_percentage}
stakes = defaultdict(lambda: Decimal("0"))
challenge = {}
# event signatures to look out for
sponsor_sig = str_to_fn_id("Sponsor(address,uint256,uint256)", True)
remainder_moved_sig = str_to_fn_id("RemainderMovedToPool(uint256)", True)
total_rewards_paid_sig = str_to_fn_id("TotalRewardsPaid(uint32,uint256,uint256,uint256)", True)
challenge_opened_sig = str_to_fn_id("ChallengeOpened(uint32)", True)
submission_closed_sig = str_to_fn_id("SubmissionClosed(uint32)", True)
challenge_percentage_sig = str_to_fn_id("ChallengeRewardsPercentageInWeiUpdated(uint256)", True)
competition_percentage_sig = str_to_fn_id("TournamentRewardsPercentageInWeiUpdated(uint256)", True)
stake_increased_sig = str_to_fn_id("StakeIncreased(address,uint256)", True)
stake_decreased_sig = str_to_fn_id("StakeDecreased(address,uint256)", True)
rewards_payment_sig = str_to_fn_id("RewardsPayment(uint32,address,uint256,uint256,uint256)", True)
print("scanning the Polygon blockchain")
for i in range(self._competition.genesis_block, latest_block, chunk):
print(".", end="", flush=True)
transactions = scan(i, i + chunk - 1, self._competition.address)
for tx in transactions:
topics = tx["topics"]
sig = topics[0]
if sig == challenge_percentage_sig:
current_challenge_percentage = hex_to_decimal(topics[1])
elif sig == competition_percentage_sig:
current_competition_percentage = hex_to_decimal(topics[1])
elif sig == sponsor_sig:
sponsored_amount = hex_to_int(topics[2])
pool_total = hex_to_int(topics[3])
total_rewards_pool_uint += sponsored_amount
assert total_rewards_pool_uint == pool_total, \
f"\nmisalignment in pool total; expected {total_rewards_pool_uint} but got {pool_total}"
elif sig == remainder_moved_sig:
remainder_added_to_pool = hex_to_int(topics[1])
total_rewards_pool_uint += remainder_added_to_pool
elif sig == total_rewards_paid_sig:
staking_rewards_paid = hex_to_int(topics[1])
challenge_rewards_paid = hex_to_int(topics[2])
competition_rewards_paid = hex_to_int(topics[3])
total_rewards_pool_uint -= staking_rewards_paid + challenge_rewards_paid + competition_rewards_paid
elif sig == challenge_opened_sig:
# when a challenge is opened, this is the point where the
# staking, challenge and competition pools are set
current_challenge = hex_to_int(topics[1])
this_challenge_pool = Decimal(total_rewards_pool_uint) * current_challenge_percentage * quantize
challenge_pool[current_challenge] = this_challenge_pool.quantize(quantize, rounding=decimal.ROUND_DOWN)
this_competition_pool = Decimal(total_rewards_pool_uint) * current_competition_percentage * quantize
competition_pool[current_challenge] = this_competition_pool.quantize(quantize, rounding=decimal.ROUND_DOWN)
this_stake_pool = (Decimal(total_rewards_pool_uint) * quantize) - challenge_pool[current_challenge] - competition_pool[current_challenge]
stake_pool[current_challenge] = this_stake_pool.quantize(quantize, rounding=decimal.ROUND_DOWN)
challenge_percentage[current_challenge] = current_challenge_percentage
competition_percentage[current_challenge] = current_competition_percentage
elif sig == stake_increased_sig:
address = topics[1][-40:]
stakes[address] += hex_to_decimal(topics[2])
elif sig == stake_decreased_sig:
address = topics[1][-40:]
stakes[address] -= hex_to_decimal(topics[2])
elif sig == rewards_payment_sig:
address = topics[1][-40:]
stake_reward = tx["data"][-64:]
stakes[address] += hex_to_decimal(topics[2]) + hex_to_decimal(topics[3]) + hex_to_decimal(stake_reward)
elif sig == submission_closed_sig:
closed_submission = hex_to_int(topics[1])
assert current_challenge == closed_submission, \
f"\nmisalignment with challenge numbers; expected {current_challenge} got {closed_submission}"
challenge[current_challenge] = stakes.copy()
# terminate if we have the information for the specified challenge
if current_challenge == self._number:
print("\nblockchain scan completed")
# sanity check if specified challenge is the latest challenge
if latest_challenge == self._number:
current_stake_pool = self._competition.get_current_stake_pool()
current_challenge_pool = self._competition.get_current_challenge_pool()
current_competition_pool = self._competition.get_current_competition_pool()
assert stake_pool[self._number] == current_stake_pool, \
f"stake pool misalignment; expected {stake_pool[self._number]} got {current_stake_pool}"
assert challenge_pool[self._number] == current_challenge_pool, \
f"challenge pool misalignment; expected {challenge_pool[self._number]} got {current_challenge_pool}"
assert competition_pool[self._number] == current_competition_pool, \
f"competition pool misalignment; expected {competition_pool[self._number]} got {current_competition_pool}"
self._stakes = challenge[self._number]
self._stake_pool = stake_pool[self._number]
self._challenge_pool = challenge_pool[self._number]
self._competition_pool = competition_pool[self._number]
return
assert False, f"could not determine pools for challenge {self._number}"
class Participant:
def __init__(self, address: str, challenge: Challenge):
self._address = address
self._challenge = challenge
@property
def address(self):
return self._address
@property
def challenge(self):
return self._challenge
def get_submitter_stake(self) -> Decimal:
"""calls solidity method getStakedAmountForChallenge()
NOTE: it returns 0 for if the staker did not submit a prediction for the challenge
"""
result = self._call("getStakedAmountForChallenge(uint32,address)",
int_to_uint(self._challenge.number),
address_to_uint(self._address))
return hex_to_decimal(result)
def get_stake(self) -> Decimal:
"""returns the stake read from the blockchain"""
return self._challenge.get_stakes().get(self._address)
def get_staking_reward(self) -> Decimal:
"""calls solidity method getStakingRewards()"""
result = self._call("getStakingRewards(uint32,address)",
int_to_uint(self._challenge.number),
address_to_uint(self._address))
return hex_to_decimal(result)
def get_submission_ipfs_cid(self) -> str:
"""calls solidity method getSubmission()"""
dataset_hash = self._call("getSubmission(uint32,address)",
int_to_uint(self._challenge.number),
address_to_uint(self._address))
return hash_to_cid(dataset_hash)
def download_submission_file(self, force=False, verbose=True) -> Path:
"""utility method to read the submission file cid and download the submission file in the challenge directory"""
submission_ipfs_cid = self.get_submission_ipfs_cid()
challenge_dir = self._challenge.get_challenge_dir()
return download_ipfs_file(submission_ipfs_cid, challenge_dir, prefix="submission-", suffix=".zip", force=force, verbose=verbose)
def get_challenge_reward(self) -> Decimal:
"""calls solidity method getChallengeRewards()"""
result = self._call("getChallengeRewards(uint32,address)",
int_to_uint(self._challenge.number),
address_to_uint(self._address))
return hex_to_decimal(result)
def get_competition_reward(self) -> Decimal:
"""calls solidity method getTournamentRewards()"""
result = self._call("getTournamentRewards(uint32,address)",
int_to_uint(self._challenge.number),
address_to_uint(self._address))
return hex_to_decimal(result)
def get_challenge_score(self) -> Decimal:
"""calls solidity method getChallengeScores()"""
result = self._call("getChallengeScores(uint32,address)",
int_to_uint(self._challenge.number),
address_to_uint(self._address))
return hex_to_decimal(result)
def get_competition_score(self) -> Decimal:
"""calls solidity method getTournamentScores()"""
result = self._call("getTournamentScores(uint32,address)",
int_to_uint(self._challenge.number),
address_to_uint(self._address))
return hex_to_decimal(result)
def _call(self, fn_signature: str, *args):
return self._challenge.call(fn_signature, *args)
def get_competition_address(competition_name: str) -> str:
data = encode_string(competition_name)
params = [{"to": REGISTRY_ADDRESS, "data": data}, "latest"]
return f"0x{network_read(params)[-40:]}"
def call(address: str, fn_signature: str, *args):
fn_id = str_to_fn_id(fn_signature)
data = fn_id
for arg in args:
data = data + arg
params = [{"to": address, "data": data}, "latest"]
return network_read(params)
def scan(from_block: int, to_block: int, address: str):
params = [{"fromBlock": hex(from_block), "toBlock": hex(to_block), "address": address}]
method = "eth_getLogs"
return network_read(params, method)
def get_latest_block() -> int:
params = ["latest", False]
method = "eth_getBlockByNumber"
return hex_to_int(network_read(params, method)["number"])
def network_read(params: [Any], method="eth_call") -> str:
payload = {"jsonrpc": "2.0", "method": method, "params": params, "id": 1}
headers = {"Content-Type": "application/json"}
retries = 0
while retries < MAX_RETRIES:
r = requests.post(POLYGON_GATEWAY, headers=headers, json=payload)
if r.ok:
return r.json()["result"]
if retries >= MAX_RETRIES:
assert False, "network read exceeded max retries. Please try again later"
# TODO: don't use absolute fn_id
def encode_string(param: str, fn_id="0x5d58ebc1") -> str:
line2 = "0" * 62 + "20"
line3 = hex(len(param))[2:]
line3 = "0" * (64 - len(line3)) + line3
param_line = ""
for i in range(0, len(param), 32):
chunk = param[i:i + 32]
utf8_encoded = chunk.encode("utf-8").hex()
padding = "0" * (64 - len(utf8_encoded))
param_line += utf8_encoded + padding
return fn_id + line2 + line3 + param_line
def hash_to_cid(hash_id: str) -> str:
if hash_id[:2] == "0x":
hash_id = hash_id[2:]
hash_id = "1220" + str(hash_id)
hash_id = int(hash_id, 16)
return base58.b58encode_int(hash_id).decode("utf-8")
def str_to_fn_id(fn_signature: str, full_sig=False) -> str:
hashed_string = keccak_256(fn_signature.encode("utf-8")).digest().hex()
if not full_sig:
hashed_string = hashed_string[:8]
return f"0x{hashed_string}"
def int_to_uint(n: int) -> str:
uint = hex(n)[2:]
return "0" * (64 - len(uint)) + uint
def address_to_uint(address: str) -> str:
return "0" * 24 + address
def hex_to_int(hex: str) -> int:
return int(hex[2:], 16)
def hex_to_decimal(hex: str) -> Decimal:
return Decimal(hex_to_int(hex)) / Decimal("1e18")
def download_ipfs_file(cid: str, target_dir: Path,
verbose=True, force=False, filename=None, prefix=None, suffix=None) -> Path:
if filename is None:
filename = cid
if prefix is not None:
filename = prefix + filename
if suffix is not None:
filename = filename + suffix
target_dir.mkdir(parents=True, exist_ok=True)
downloaded_file = target_dir.joinpath(cid)
target_file = target_dir.joinpath(filename)
if not target_file.exists() or force:
if verbose:
print(f"downloading file {cid}")
client = ipfshttpclient.connect(addr=IPFS_GATEWAY)
client.get(cid, target_dir)
if downloaded_file != target_file:
downloaded_file.rename(target_file)
return target_file
|
#!/usr/env/python3
from main import *
from pathlib import Path
import re
import os
from subprocess import getoutput
from monty.os import cd
import pymongo
COMPATH=Path("/WORK/nscc-gz_material_1/ICSD_vasp/abacus_calc/matgen_scf/completed")
ERRORS = Path("/WORK/nscc-gz_material_1/ICSD_vasp/abacus_calc/matgen_scf/some_errors")
def yield_stru(root):
for i in root.rglob('*'):
if len(i.parts) == len(root.parts) + 1:
yield comp(i), i
def comp(path):
scf_out = path / "SCF_OUT.ABACUS"
stru = path / path.name
if scf_out.exists():
files = {"stru": stru, "scf_log": scf_out / "running_scf.log",
"path": path, "scf": scf_out }
return files
def get_energy(log, stru, mid):
print(log)
is_sim = False
cmd1 = f"grep \'!FINAL_ETOT_IS\' {log}"
total_e = float(getoutput(cmd1).split()[1])
cmd2 = f"grep \'TOTAL ATOM NUMBER\' {log}"
n = int(getoutput(cmd2).split()[-1])
cmd3 = f"grep \'ntype\' {log}"
ntype = int(getoutput(cmd3).split()[-1])
cmd4 = f"grep EFERMI {log}"
ef = getoutput(cmd4).split()[2]
if ntype == 1:
is_sim = True
total_e_pa = total_e / n
cmd5 = f"grep \'atom label for species\' {log}"
syb = []
lbs = getoutput(cmd5)
for line in lbs.split("\n"):
syb.append(line.split("=")[-1].strip(' '))
cmd6 = f"grep \'number of atom for this type\' {log}"
ns = getoutput(cmd6)
num = []
for yl in ns.split('\n'):
num.append(int(yl.split("=")[-1]))
species = dict(zip(syb, num))
return {"id":mid, "energy": total_e, "epa": total_e_pa, "efermi": ef,"is_sim": is_sim, "symbol": species}
def get_stru(stru_filepath):
return get_structure_dat(stru_filepath)
def get_mag(scf_dir):
return get_magnetism(scf_dir)
def get_band(band_dir, stru_filename, scf_log_filepath):
return get_bandstructure(band_dir, stru_filename, scf_log_filepath)
def get_dos(dos_dir):
return get_density_of_states(dos_dir)
def get_paras(calc_dir):
kpt = get_kpt(calc_dir, "scf")
kpath = get_kpt(calc_dir, "band")
return kpt, kpath
def get_dat(raw):
db, stru_id, _ = re.split(r"[_|-]", raw["stru"].name)
db += "_id"
key = {db: int(stru_id)}
#stru = get_stru(raw["stru"])
#stru.update(key)
#band = get_band(raw["path"], raw["stru"].name, raw["scf_log"])
#band.update(key)
#dos = get_dos(raw["path"])
#dos.update(key)
#mag = get_mag(raw["path"])
#mag.update(key)
#cif = get_optimized_cif(raw["stru"])
#cif.update(key)
#return stru, mag, band, dos, cif, key
e = get_energy(raw["scf_log"], raw["stru"], int(stru_id))
e.update(key)
return e, key
def goe(e, fe, k):
r = {}
r.update(k)
r['efermi'] = e['efermi']
r['energy'] = e['energy']
r.update(fe)
return r
def get_db():
addr = "12.11.70.140:10102"
client = pymongo.MongoClient(addr)
db = client["abacus_data"]
return db
def upload_dat(db, *dat):
stru, mag, band, dos, cif, key = dat
stru_col = db["stru"]
band_col = db["bs_plotter"]
dos_col = db["dos_plotter"]
mag_col = db["mag"]
cif_col = db["cif"]
def _upload(col, data):
exist = col.find_one(key)
if exist is not None:
col.update_one(exist, {'$set': data})
else:
col.insert_one(data)
_upload(stru_col, stru)
_upload(band_col, band)
_upload(dos_col, dos)
_upload(mag_col, mag)
_upload(cif_col, cif)
print(f"upload {key} sucessed.")
def upload_eng(db, dat):
en_col = db["energy"]
def _upload(col, data):
exist = col.find_one(key)
if exist is not None:
col.update_one(exist, {'$set': data})
else:
col.insert_one(data)
_upload(en_col, dat)
print(f"upload sucessed.")
def calcfe(item, s):
if item['is_sim'] == 'True':
syb = list(item['symbol'].keys())[0]
te = float(item['energy'])
s2_id = s[syb][1]
if int(item["id"]) == int(s2_id):
foe = 0
else:
s2_e = s[syb][0]
v = int(list(item['symbol'].values())[0])
foe = (te - v * s2_e) / v
else:
syb = item['symbol']
te = float(item['energy'])
an = 0
for a, v in syb.items():
ie = int(v) * s[a][0]
te -= ie
an += int(v)
foe = round(te / an, 4)
#return f"{item['id']}\t{foe}"
return {"formation_energy": foe}
if __name__ == "__main__":
import json
adb = get_db()
print(adb, " connected!")
c = 0
with open("abacus_simple.json", "r") as f:
s = json.load(f)
for calc, i in yield_stru(COMPATH):
if calc is not None:
try:
res, key = get_dat(calc)
foe = calcfe(res, s)
except Exception as e:
c += 1
#os.system(f"mv {i} {ERRORS}")
continue
else:
ans = goe(res, foe, key)
print(ans)
upload_eng(adb, ans)
print("errors: ", c)
|
from infrastructure import *
from indputils import *
from gurobipy import GRB, Model, LinExpr
import string
import networkx as nx
import matplotlib.pyplot as plt
import copy
import random
import time
import math
import pickle
import sys
# HOME_DIR="/Users/Andrew/"
# if platform.system() == "Linux":
# HOME_DIR="/home/andrew/"
def indp(N, v_r, T=1, layers=[1, 3], controlled_layers=[1, 3], functionality={},
forced_actions=False, fixed_nodes={}, print_cmd=True, time_limit=None,
co_location=True, solution_pool=None):
"""
INDP optimization problem. It also solves td-INDP if T > 1.
Parameters
----------
N : :class:`~infrastructure.InfrastructureNetwork`
An InfrastructureNetwork instance.
v_r : list
Vector of the number of resources given to each layer in each timestep.
If the size of the vector is 1, it shows the total number of resources for all layers.
T : int, optional
Number of time steps to optimize over. The default is 1.
layers : list, optional
Layer IDs of N included in the optimization. The default is [1,3]
(1 for water and 3 for power in the Shelby County database).
controlled_layers : list, optional
Layer IDs that can be recovered in this optimization. Used for decentralized
optimization. The default is [1,3].
functionality : dict, optional
Dictionary of nodes to functionality values for non-controlled nodes.
Used for decentralized optimization. The default is {}.
forced_actions : bool, optional
If true, it forces the optimizer to repair at least one element. The default is False.
fixed_nodes : dict, optional
It fixes the functionality of given elements to a given value. The default is {}.
print_cmd : bool, optional
If true, analysis information is written to the console. The default is True.
time_limit : int, optional
Time limit for the optimizer to stop. The default is None.
co_location : bool, optional
If false, exclude geographical interdependency from the optimization. The default is True.
solution_pool : int, optional
The number of solutions that should be retrieved from the optimizer in addition to
the optimal one. The default is None.
Returns
-------
: list
A list of the form ``[m, results]`` for a successful optimization where m is the
Gurobi optimization model and results is a :class:`~indputils.INDPResults` object
generated using :func:`collect_results`.
If :envvar:`solution_pool` is set to a number, the function returns ``[m, results, sol_pool_results]``
where `sol_pool_results` is dictionary of solution that should be retrieved from the
optimizer in addition to the optimal one collected using :func:`collect_solution_pool`.
"""
start_time = time.time()
m = Model('indp')
m.setParam('OutputFlag', False)
if time_limit:
m.setParam('TimeLimit', time_limit)
g_prime_nodes = [n[0] for n in N.G.nodes(data=True) if n[1]['data']['inf_data'].net_id in layers]
g_prime = N.G.subgraph(g_prime_nodes)
# Damaged nodes in whole network
n_prime = [n for n in g_prime.nodes(data=True) if n[1]['data']['inf_data'].repaired == 0.0]
# Nodes in controlled network.
n_hat_nodes = [n[0] for n in g_prime.nodes(data=True) if n[1]['data']['inf_data'].net_id in controlled_layers]
n_hat = g_prime.subgraph(n_hat_nodes)
# Damaged nodes in controlled network.
n_hat_prime = [n for n in n_hat.nodes(data=True) if n[1]['data']['inf_data'].repaired == 0.0]
# Damaged arcs in whole network
a_prime = [(u, v, a) for u, v, a in g_prime.edges(data=True) if a['data']['inf_data'].functionality == 0.0]
# Damaged arcs in controlled network.
a_hat_prime = [(u, v, a) for u, v, a in a_prime if n_hat.has_node(u) and n_hat.has_node(v)]
S = N.S
# Populate interdependencies. Add nodes to N' if they currently rely on a non-functional node.
interdep_nodes = {}
for u, v, a in g_prime.edges(data=True):
if not functionality:
if a['data']['inf_data'].is_interdep and g_prime.nodes[u]['data']['inf_data'].functionality == 0.0:
# print "Dependency edge goes from:",u,"to",v
if v not in interdep_nodes:
interdep_nodes[v] = []
interdep_nodes[v].append((u, a['data']['inf_data'].gamma))
else:
# Should populate n_hat with layers that are controlled. Then go through n_hat.edges(data=True)
# to find interdependencies.
for t in range(T):
if t not in interdep_nodes:
interdep_nodes[t] = {}
if n_hat.has_node(v) and a['data']['inf_data'].is_interdep:
if functionality[t][u] == 0.0:
if v not in interdep_nodes[t]:
interdep_nodes[t][v] = []
interdep_nodes[t][v].append((u, a['data']['inf_data'].gamma))
# print "N'=",[n for (n,d) in N_prime]
for t in range(T):
# Add geographical space variables.
if co_location:
for s in S:
m.addVar(name='z_' + str(s.id) + "," + str(t), vtype=GRB.BINARY)
# Add over/under-supply variables for each node.
for n, d in n_hat.nodes(data=True):
m.addVar(name='delta+_' + str(n) + "," + str(t), lb=0.0)
m.addVar(name='delta-_' + str(n) + "," + str(t), lb=0.0)
# Add functionality binary variables for each node in N'.
for n, d in n_hat.nodes(data=True):
m.addVar(name='w_' + str(n) + "," + str(t), vtype=GRB.BINARY)
if T > 1:
m.addVar(name='w_tilde_' + str(n) + "," + str(t), vtype=GRB.BINARY)
# Fix node values (only for iINDP)
m.update()
for key, val in fixed_nodes.items():
m.getVarByName('w_' + str(key) + "," + str(0)).lb = val
m.getVarByName('w_' + str(key) + "," + str(0)).ub = val
# Add flow variables for each arc.
for u, v, a in n_hat.edges(data=True):
m.addVar(name='x_' + str(u) + "," + str(v) + "," + str(t), lb=0.0)
# Add functionality binary variables for each arc in A'.
for u, v, a in a_hat_prime:
m.addVar(name='y_' + str(u) + "," + str(v) + "," + str(t), vtype=GRB.BINARY)
if T > 1:
m.addVar(name='y_tilde_' + str(u) + "," + str(v) + "," + str(t), vtype=GRB.BINARY)
m.update()
# Populate objective function.
obj_func = LinExpr()
for t in range(T):
if co_location:
for s in S:
obj_func += s.cost * m.getVarByName('z_' + str(s.id) + "," + str(t))
for u, v, a in a_hat_prime:
if T == 1:
obj_func += (float(a['data']['inf_data'].reconstruction_cost) / 2.0) * m.getVarByName(
'y_' + str(u) + "," + str(v) + "," + str(t))
else:
obj_func += (float(a['data']['inf_data'].reconstruction_cost) / 2.0) * m.getVarByName(
'y_tilde_' + str(u) + "," + str(v) + "," + str(t))
for n, d in n_hat_prime:
if T == 1:
obj_func += d['data']['inf_data'].reconstruction_cost * m.getVarByName('w_' + str(n) + "," + str(t))
else:
obj_func += d['data']['inf_data'].reconstruction_cost * m.getVarByName(
'w_tilde_' + str(n) + "," + str(t))
for n, d in n_hat.nodes(data=True):
obj_func += d['data']['inf_data'].oversupply_penalty * m.getVarByName('delta+_' + str(n) + "," + str(t))
obj_func += d['data']['inf_data'].undersupply_penalty * m.getVarByName('delta-_' + str(n) + "," + str(t))
for u, v, a in n_hat.edges(data=True):
obj_func += a['data']['inf_data'].flow_cost * m.getVarByName('x_' + str(u) + "," + str(v) + "," + str(t))
m.setObjective(obj_func, GRB.MINIMIZE)
m.update()
# Constraints.
# Time-dependent constraints.
if T > 1:
for n, d in n_hat_prime:
m.addConstr(m.getVarByName('w_' + str(n) + ",0"), GRB.EQUAL, 0,
"Initial state at node " + str(n) + "," + str(0))
for u, v, a in a_hat_prime:
m.addConstr(m.getVarByName('y_' + str(u) + "," + str(v) + ",0"), GRB.EQUAL, 0,
"Initial state at arc " + str(u) + "," + str(v) + "," + str(0))
for t in range(T):
# Time-dependent constraint.
for n, d in n_hat_prime:
if t > 0:
wTildeSum = LinExpr()
for t_prime in range(1, t + 1):
wTildeSum += m.getVarByName('w_tilde_' + str(n) + "," + str(t_prime))
m.addConstr(m.getVarByName('w_' + str(n) + "," + str(t)), GRB.LESS_EQUAL, wTildeSum,
"Time dependent recovery constraint at node " + str(n) + "," + str(t))
for u, v, a in a_hat_prime:
if t > 0:
yTildeSum = LinExpr()
for t_prime in range(1, t + 1):
yTildeSum += m.getVarByName('y_tilde_' + str(u) + "," + str(v) + "," + str(t_prime))
m.addConstr(m.getVarByName('y_' + str(u) + "," + str(v) + "," + str(t)), GRB.LESS_EQUAL, yTildeSum,
"Time dependent recovery constraint at arc " + str(u) + "," + str(v) + "," + str(t))
# Enforce a_i,j to be fixed if a_j,i is fixed (and vice versa).
for u, v, a in a_hat_prime:
# print u,",",v
m.addConstr(m.getVarByName('y_' + str(u) + "," + str(v) + "," + str(t)), GRB.EQUAL,
m.getVarByName('y_' + str(v) + "," + str(u) + "," + str(t)),
"Arc reconstruction equality (" + str(u) + "," + str(v) + "," + str(t) + ")")
if T > 1:
m.addConstr(m.getVarByName('y_tilde_' + str(u) + "," + str(v) + "," + str(t)), GRB.EQUAL,
m.getVarByName('y_tilde_' + str(v) + "," + str(u) + "," + str(t)),
"Arc reconstruction equality (" + str(u) + "," + str(v) + "," + str(t) + ")")
# Conservation of flow constraint. (2) in INDP paper.
for n, d in n_hat.nodes(data=True):
outFlowConstr = LinExpr()
inFlowConstr = LinExpr()
demandConstr = LinExpr()
for u, v, a in n_hat.out_edges(n, data=True):
outFlowConstr += m.getVarByName('x_' + str(u) + "," + str(v) + "," + str(t))
for u, v, a in n_hat.in_edges(n, data=True):
inFlowConstr += m.getVarByName('x_' + str(u) + "," + str(v) + "," + str(t))
demandConstr += d['data']['inf_data'].demand - m.getVarByName(
'delta+_' + str(n) + "," + str(t)) + m.getVarByName('delta-_' + str(n) + "," + str(t))
m.addConstr(outFlowConstr - inFlowConstr, GRB.EQUAL, demandConstr,
"Flow conservation constraint " + str(n) + "," + str(t))
# Flow functionality constraints.
if not functionality:
interdep_nodes_list = interdep_nodes.keys() # Interdepndent nodes with a damaged dependee node
else:
interdep_nodes_list = interdep_nodes[t].keys() # Interdepndent nodes with a damaged dependee node
for u, v, a in n_hat.edges(data=True):
if (u in [n for (n, d) in n_hat_prime]) | (u in interdep_nodes_list):
m.addConstr(m.getVarByName('x_' + str(u) + "," + str(v) + "," + str(t)), GRB.LESS_EQUAL,
a['data']['inf_data'].capacity * m.getVarByName('w_' + str(u) + "," + str(t)),
"Flow in functionality constraint(" + str(u) + "," + str(v) + "," + str(t) + ")")
else:
m.addConstr(m.getVarByName('x_' + str(u) + "," + str(v) + "," + str(t)), GRB.LESS_EQUAL,
a['data']['inf_data'].capacity * N.G.nodes[u]['data']['inf_data'].functionality,
"Flow in functionality constraint (" + str(u) + "," + str(v) + "," + str(t) + ")")
if (v in [n for (n, d) in n_hat_prime]) | (v in interdep_nodes_list):
m.addConstr(m.getVarByName('x_' + str(u) + "," + str(v) + "," + str(t)), GRB.LESS_EQUAL,
a['data']['inf_data'].capacity * m.getVarByName('w_' + str(v) + "," + str(t)),
"Flow out functionality constraint(" + str(u) + "," + str(v) + "," + str(t) + ")")
else:
m.addConstr(m.getVarByName('x_' + str(u) + "," + str(v) + "," + str(t)), GRB.LESS_EQUAL,
a['data']['inf_data'].capacity * N.G.nodes[v]['data']['inf_data'].functionality,
"Flow out functionality constraint (" + str(u) + "," + str(v) + "," + str(t) + ")")
if (u, v, a) in a_hat_prime:
m.addConstr(m.getVarByName('x_' + str(u) + "," + str(v) + "," + str(t)), GRB.LESS_EQUAL,
a['data']['inf_data'].capacity * m.getVarByName(
'y_' + str(u) + "," + str(v) + "," + str(t)),
"Flow arc functionality constraint (" + str(u) + "," + str(v) + "," + str(t) + ")")
else:
m.addConstr(m.getVarByName('x_' + str(u) + "," + str(v) + "," + str(t)), GRB.LESS_EQUAL,
a['data']['inf_data'].capacity * N.G[u][v]['data']['inf_data'].functionality,
"Flow arc functionality constraint(" + str(u) + "," + str(v) + "," + str(t) + ")")
# Resource availability constraints.
is_sep_resource = False
if isinstance(v_r, int):
total_resource = v_r
else:
is_sep_resource = True
total_resource = sum([val for _, val in v_r.items()])
if len(v_r.keys()) != len(layers):
sys.exit("The number of resource cap values does not match the number of layers.\n")
resource_left_constr = LinExpr()
if is_sep_resource:
resource_left_constr_sep = {key: LinExpr() for key, _ in v_r.items()}
for u, v, a in a_hat_prime:
index_layer = a['data']['inf_data'].layer
if T == 1:
resource_left_constr += 0.5 * a['data']['inf_data'].resource_usage * m.getVarByName(
'y_' + str(u) + "," + str(v) + "," + str(t))
if is_sep_resource:
resource_left_constr_sep[index_layer] += 0.5 * a['data']['inf_data'].resource_usage * m.getVarByName(
'y_' + str(u) + "," + str(v) + "," + str(t))
else:
resource_left_constr += 0.5 * a['data']['inf_data'].resource_usage * m.getVarByName(
'y_tilde_' + str(u) + "," + str(v) + "," + str(t))
if is_sep_resource:
resource_left_constr_sep[index_layer] += 0.5 * a['data']['inf_data'].resource_usage * m.getVarByName(
'y_tilde_' + str(u) + "," + str(v) + "," + str(t))
for n, d in n_hat_prime:
index_layer = n[1]
if T == 1:
resource_left_constr += d['data']['inf_data'].resource_usage * m.getVarByName(
'w_' + str(n) + "," + str(t))
if is_sep_resource:
resource_left_constr_sep[index_layer] += d['data']['inf_data'].resource_usage * m.getVarByName(
'w_' + str(n) + "," + str(t))
else:
resource_left_constr += d['data']['inf_data'].resource_usage * m.getVarByName(
'w_tilde_' + str(n) + "," + str(t))
if is_sep_resource:
resource_left_constr_sep[index_layer] += d['data']['inf_data'].resource_usage * m.getVarByName(
'w_tilde_' + str(n) + "," + str(t))
m.addConstr(resource_left_constr, GRB.LESS_EQUAL, total_resource,
"Resource availability constraint at " + str(t) + ".")
if is_sep_resource:
for k, _ in v_r.items():
m.addConstr(resource_left_constr_sep[k], GRB.LESS_EQUAL, v_r[k],
"Resource availability constraint at " + str(t) + " for layer " + str(k) + ".")
# Interdependency constraints
infeasible_actions = []
for n, d in n_hat.nodes(data=True):
if not functionality:
if n in interdep_nodes:
interdep_l_constr = LinExpr()
interdep_r_constr = LinExpr()
for interdep in interdep_nodes[n]:
src = interdep[0]
gamma = interdep[1]
if not n_hat.has_node(src):
infeasible_actions.append(n)
interdep_l_constr += 0
else:
interdep_l_constr += m.getVarByName('w_' + str(src) + "," + str(t)) * gamma
interdep_r_constr += m.getVarByName('w_' + str(n) + "," + str(t))
m.addConstr(interdep_l_constr, GRB.GREATER_EQUAL, interdep_r_constr,
"Interdependency constraint for node " + str(n) + "," + str(t))
else:
if n in interdep_nodes[t]:
# print interdep_nodes[t]
interdep_l_constr = LinExpr()
interdep_r_constr = LinExpr()
for interdep in interdep_nodes[t][n]:
src = interdep[0]
gamma = interdep[1]
if not n_hat.has_node(src):
if print_cmd:
print("Forcing", str(n), "to be 0 (dep. on", str(src), ")")
infeasible_actions.append(n)
interdep_l_constr += 0
else:
interdep_l_constr += m.getVarByName('w_' + str(src) + "," + str(t)) * gamma
interdep_r_constr += m.getVarByName('w_' + str(n) + "," + str(t))
m.addConstr(interdep_l_constr, GRB.GREATER_EQUAL, interdep_r_constr,
"Interdependency constraint for node " + str(n) + "," + str(t))
# Forced actions (if applicable)
if forced_actions:
recovery_sum = LinExpr()
feasible_nodes = [(n, d) for n, d in n_hat_prime if n not in infeasible_actions]
if len(feasible_nodes) + len(a_hat_prime) > 0:
for n, d in feasible_nodes:
if T == 1:
recovery_sum += m.getVarByName('w_' + str(n) + "," + str(t))
else:
recovery_sum += m.getVarByName('w_tilde_' + str(n) + "," + str(t))
for u, v, a in a_hat_prime:
if T == 1:
recovery_sum += m.getVarByName('y_' + str(u) + "," + str(v) + "," + str(t))
else:
recovery_sum += m.getVarByName('y_tilde_' + str(u) + "," + str(v) + "," + str(t))
m.addConstr(recovery_sum, GRB.GREATER_EQUAL, 1, "Forced action constraint")
# Geographic space constraints
if co_location:
for s in S:
for n, d in n_hat_prime:
if d['data']['inf_data'].in_space(s.id):
if T == 1:
m.addConstr(
m.getVarByName('w_' + str(n) + "," + str(t)) * d['data']['inf_data'].in_space(s.id),
GRB.LESS_EQUAL, m.getVarByName('z_' + str(s.id) + "," + str(t)),
"Geographical space constraint for node " + str(n) + "," + str(t))
else:
m.addConstr(
m.getVarByName('w_tilde_' + str(n) + "," + str(t)) * d['data']['inf_data'].in_space(
s.id), GRB.LESS_EQUAL, m.getVarByName('z_' + str(s.id) + "," + str(t)),
"Geographical space constraint for node " + str(n) + "," + str(t))
for u, v, a in a_hat_prime:
if a['data']['inf_data'].in_space(s.id):
if T == 1:
m.addConstr(m.getVarByName('y_' + str(u) + "," + str(v) + "," + str(t)) * a['data'][
'inf_data'].in_space(s.id), GRB.LESS_EQUAL,
m.getVarByName('z_' + str(s.id) + "," + str(t)),
"Geographical space constraint for arc (" + str(u) + "," + str(v) + ")")
else:
m.addConstr(m.getVarByName('y_tilde_' + str(u) + "," + str(v) + "," + str(t)) * a['data'][
'inf_data'].in_space(s.id), GRB.LESS_EQUAL,
m.getVarByName('z_' + str(s.id) + "," + str(t)),
"Geographical space constraint for arc (" + str(u) + "," + str(v) + ")")
# print "Solving..."
m.update()
if solution_pool:
m.setParam('PoolSearchMode', 1)
m.setParam('PoolSolutions', 10000)
m.setParam('PoolGap', solution_pool)
m.optimize()
run_time = time.time() - start_time
# Save results.
if m.getAttr("Status") == GRB.OPTIMAL or m.status == 9:
if m.status == 9:
print('\nOptimizer time limit, gap = %1.3f\n' % m.MIPGap)
results = collect_results(m, controlled_layers, T, n_hat, n_hat_prime, a_hat_prime, S, coloc=co_location)
results.add_run_time(t, run_time)
if solution_pool:
sol_pool_results = collect_solution_pool(m, T, n_hat_prime, a_hat_prime)
return [m, results, sol_pool_results]
return [m, results]
else:
m.computeIIS()
if m.status == 9:
print(m.getAttr("Status"), ": SOLUTION NOT FOUND. (Check data and/or violated constraints).")
print('\nThe following constraint(s) cannot be satisfied:')
for c in m.getConstrs():
if c.IISConstr:
print('%s' % c.constrName)
return None
def collect_solution_pool(m, T, n_hat_prime, a_hat_prime):
"""
This function collect the result (list of repaired nodes and arcs) for all feasible solutions in the solution pool
Parameters
----------
m : gurobi.Model
The object containing the solved optimization problem.
T : int
Number of time steps in the optimization (T=1 for iINDP, and T>=1 for td-INDP).
n_hat_prime : list
List of damaged nodes in controlled network.
a_hat_prime : list
List of damaged arcs in controlled network.
Returns
-------
sol_pool_results : dict
A dictionary containing one dictionary per solution that contain list of repaired node and arcs in the solution.
"""
sol_pool_results = {}
current_sol_count = 0
for sol in range(m.SolCount):
m.setParam('SolutionNumber', sol)
# print(m.PoolObjVal)
sol_pool_results[sol] = {'nodes': [], 'arcs': []}
for t in range(T):
# Record node recovery actions.
for n, d in n_hat_prime:
node_var = 'w_tilde_' + str(n) + "," + str(t)
if T == 1:
node_var = 'w_' + str(n) + "," + str(t)
if round(m.getVarByName(node_var).xn) == 1:
sol_pool_results[sol]['nodes'].append(n)
# Record edge recovery actions.
for u, v, a in a_hat_prime:
arc_var = 'y_tilde_' + str(u) + "," + str(v) + "," + str(t)
if T == 1:
arc_var = 'y_' + str(u) + "," + str(v) + "," + str(t)
if round(m.getVarByName(arc_var).x) == 1:
sol_pool_results[sol]['arcs'].append((u, v))
if sol > 0 and sol_pool_results[sol] == sol_pool_results[current_sol_count]:
del sol_pool_results[sol]
elif sol > 0:
current_sol_count = sol
return sol_pool_results
def collect_results(m, controlled_layers, T, n_hat, n_hat_prime, a_hat_prime, S, coloc=True):
"""
THis function compute the results (actions and costs) of the optimal results and write them to a
:class:`~indputils.INDPResults` object.
Parameters
----------
m : gurobi.Model
The object containing the solved optimization problem.
controlled_layers : list
Layer IDs that can be recovered in this optimization.
T : int
Number of time steps in the optimization (T=1 for iINDP, and T>=1 for td-INDP).
n_hat : list
List of Damaged nodes in whole network.
n_hat_prime : list
List of damaged nodes in controlled network.
a_hat_prime : list
List of damaged arcs in controlled network.
S : list
List of geographical sites.
coloc : bool, optional
If false, exclude geographical interdependency from the results. . The default is True.
Returns
-------
indp_results : INDPResults
A :class:`~indputils.INDPResults` object containing the optimal restoration decisions.
"""
layers = controlled_layers
indp_results = INDPResults(layers)
# compute total demand of all layers and each layer
total_demand = 0.0
total_demand_layer = {l: 0.0 for l in layers}
for n, d in n_hat.nodes(data=True):
demand_value = d['data']['inf_data'].demand
if demand_value < 0:
total_demand += demand_value
total_demand_layer[n[1]] += demand_value
for t in range(T):
node_cost = 0.0
arc_cost = 0.0
flow_cost = 0.0
over_supp_cost = 0.0
under_supp_cost = 0.0
under_supp = 0.0
space_prep_cost = 0.0
node_cost_layer = {l: 0.0 for l in layers}
arc_cost_layer = {l: 0.0 for l in layers}
flow_cost_layer = {l: 0.0 for l in layers}
over_supp_cost_layer = {l: 0.0 for l in layers}
under_supp_cost_layer = {l: 0.0 for l in layers}
under_supp_layer = {l: 0.0 for l in layers}
space_prep_cost_layer = {l: 0.0 for l in layers} # !!! populate this for each layer
# Record node recovery actions.
for n, d in n_hat_prime:
node_var = 'w_tilde_' + str(n) + "," + str(t)
if T == 1:
node_var = 'w_' + str(n) + "," + str(t)
if round(m.getVarByName(node_var).x) == 1:
action = str(n[0]) + "." + str(n[1])
indp_results.add_action(t, action)
# Record edge recovery actions.
for u, v, a in a_hat_prime:
arc_var = 'y_tilde_' + str(u) + "," + str(v) + "," + str(t)
if T == 1:
arc_var = 'y_' + str(u) + "," + str(v) + "," + str(t)
if round(m.getVarByName(arc_var).x) == 1:
action = str(u[0]) + "." + str(u[1]) + "/" + str(v[0]) + "." + str(v[1])
indp_results.add_action(t, action)
# Calculate space preparation costs.
if coloc:
for s in S:
space_prep_cost += s.cost * m.getVarByName('z_' + str(s.id) + "," + str(t)).x
indp_results.add_cost(t, "Space Prep", space_prep_cost, space_prep_cost_layer)
# Calculate arc preparation costs.
for u, v, a in a_hat_prime:
arc_var = 'y_tilde_' + str(u) + "," + str(v) + "," + str(t)
if T == 1:
arc_var = 'y_' + str(u) + "," + str(v) + "," + str(t)
arc_cost += (a['data']['inf_data'].reconstruction_cost / 2.0) * m.getVarByName(arc_var).x
arc_cost_layer[u[1]] += (a['data']['inf_data'].reconstruction_cost / 2.0) * m.getVarByName(arc_var).x
indp_results.add_cost(t, "Arc", arc_cost, arc_cost_layer)
# Calculate node preparation costs.
for n, d in n_hat_prime:
node_var = 'w_tilde_' + str(n) + "," + str(t)
if T == 1:
node_var = 'w_' + str(n) + "," + str(t)
node_cost += d['data']['inf_data'].reconstruction_cost * m.getVarByName(node_var).x
node_cost_layer[n[1]] += d['data']['inf_data'].reconstruction_cost * m.getVarByName(node_var).x
indp_results.add_cost(t, "Node", node_cost, node_cost_layer)
# Calculate under/oversupply costs.
for n, d in n_hat.nodes(data=True):
over_supp_cost += d['data']['inf_data'].oversupply_penalty * m.getVarByName(
'delta+_' + str(n) + "," + str(t)).x
over_supp_cost_layer[n[1]] += d['data']['inf_data'].oversupply_penalty * m.getVarByName(
'delta+_' + str(n) + "," + str(t)).x
under_supp += m.getVarByName('delta-_' + str(n) + "," + str(t)).x
under_supp_layer[n[1]] += m.getVarByName('delta-_' + str(n) + "," + str(t)).x / total_demand_layer[n[1]]
under_supp_cost += d['data']['inf_data'].undersupply_penalty * m.getVarByName(
'delta-_' + str(n) + "," + str(t)).x
under_supp_cost_layer[n[1]] += d['data']['inf_data'].undersupply_penalty * m.getVarByName(
'delta-_' + str(n) + "," + str(t)).x
indp_results.add_cost(t, "Over Supply", over_supp_cost, over_supp_cost_layer)
indp_results.add_cost(t, "Under Supply", under_supp_cost, under_supp_cost_layer)
indp_results.add_cost(t, "Under Supply Perc", under_supp / total_demand, under_supp_layer)
# Calculate flow costs.
for u, v, a in n_hat.edges(data=True):
flow_cost += a['data']['inf_data'].flow_cost * m.getVarByName('x_' + str(u) + "," + str(v) + "," + str(t)).x
flow_cost_layer[u[1]] += a['data']['inf_data'].flow_cost * m.getVarByName(
'x_' + str(u) + "," + str(v) + "," + str(t)).x
indp_results.add_cost(t, "Flow", flow_cost, flow_cost_layer)
# Calculate total costs.
total_lyr = {}
total_nd_lyr = {}
for l in layers:
total_lyr[l] = flow_cost_layer[l] + arc_cost_layer[l] + node_cost_layer[l] + \
over_supp_cost_layer[l] + under_supp_cost_layer[l] + space_prep_cost_layer[l]
total_nd_lyr[l] = space_prep_cost_layer[l] + arc_cost_layer[l] + flow_cost + node_cost_layer[l]
indp_results.add_cost(t, "Total", flow_cost + arc_cost + node_cost + over_supp_cost + \
under_supp_cost + space_prep_cost, total_lyr)
indp_results.add_cost(t, "Total no disconnection", space_prep_cost + arc_cost + \
flow_cost + node_cost, total_nd_lyr)
return indp_results
def apply_recovery(N, indp_results, t):
"""
This function apply the restoration decisions (solution of INDP) to a gurobi model by changing the state of repaired
elements to functional
Parameters
----------
N : :class:`~infrastructure.InfrastructureNetwork`
The model of the interdependent network.
indp_results : INDPResults
A :class:`~indputils.INDPResults` object containing the optimal restoration decisions..
t : int
The time step to which the resukts should apply.
Returns
-------
None.
"""
for action in indp_results[t]['actions']:
if "/" in action:
# Edge recovery action.
data = action.split("/")
src = tuple([int(x) for x in data[0].split(".")])
dst = tuple([int(x) for x in data[1].split(".")])
N.G[src][dst]['data']['inf_data'].functionality = 1.0
else:
# Node recovery action.
node = tuple([int(x) for x in action.split(".")])
# print "Applying recovery:",node
N.G.nodes[node]['data']['inf_data'].repaired = 1.0
N.G.nodes[node]['data']['inf_data'].functionality = 1.0
def create_functionality_matrix(N, T, layers, actions, strategy_type="OPTIMISTIC"):
"""
Creates a functionality map for input into the functionality parameter in the indp function.
Parameters
----------
N : :class:`~infrastructure.InfrastructureNetwork`
An InfrastructureNetwork instance .
T : int
Number of time steps to optimize over.
layers : list
Layer IDs of N included in the optimization..
actions : list
An array of actions from a previous optimization. Likely taken from an
INDPResults variable 'indp_result[t]['actions']'..
strategy_type : str, optional
If no actions are provided, assigns a default functionality. Options are:
"OPTIMISTIC", "PESSIMISTIC" or "INFO_SHARE". The default is "OPTIMISTIC".
Returns
-------
functionality : dict
A functionality dictionary used for input into indp..
"""
functionality = {}
g_prime_nodes = [n[0] for n in N.G.nodes(data=True) if n[1]['data']['inf_data'].net_id in layers]
g_prime = N.G.subgraph(g_prime_nodes)
n_prime = [n for n in g_prime.nodes(data=True) if n[1]['data']['inf_data'].repaired == 0.0]
for t in range(T):
functionality[t] = {}
functional_nodes = []
for t_p in range(t):
for key in functionality[t_p]:
if functionality[t_p][key] == 1.0:
functionality[t][key] = 1.0
if strategy_type == "INFO_SHARE":
for a in actions[t]:
if a and not "/" in a:
node = int(string.split(a, ".")[0])
layer = int(string.split(a, ".")[1])
if layer in layers:
functional_nodes.append((node, layer))
for n, d in g_prime.nodes(data=True):
# print "layers=",layers,"n=",n
if d['data']['inf_data'].net_id in layers:
if (n, d) in n_prime and n in functional_nodes:
functionality[t][n] = 1.0
elif g_prime.has_node(n) and (n, d) not in n_prime:
functionality[t][n] = 1.0
else:
if strategy_type == "OPTIMISTIC":
functionality[t][n] = 1.0
elif strategy_type == "PESSIMISTIC":
functionality[t][n] = 0.0
elif strategy_type == "REALISTIC":
functionality[t][n] = d['data']['inf_data'].functionality
else:
if not n in functionality[t]:
functionality[t][n] = 0.0
return functionality
def initialize_network(base_dir="../data/INDP_7-20-2015/", external_interdependency_dir=None,
sim_number=1, cost_scale=1, magnitude=6, sample=0, v=3,
infrastructure_data=True, topology='Random'):
"""
Initializes an :class:`~infrastructure.InfrastructureNetwork` object from
Shelby County data or synthetic networks.
Parameters
----------
base_dir : str, optional
Base directory of Shelby County data or synthetic networks. The default is
"../data/INDP_7-20-2015/".
external_interdependency_dir : str, optional
Directory of external interdependencies for Shelby County data. The default is None.
sim_number : int, optional
Which simulation number to use as input. The default is 1.
cost_scale : float, optional
Scales the cost to improve efficiency. The default is 1.
magnitude : int, optional
Magnitude parameter of the initial disruption. The default is 6.
sample : int, optional
Sample number paramter of the initial disruption. The default is 0.
v : int, list, optional
Number of avaialable resources or resource cap. The default is 3.
infrastructure_data : bool, optional
If the data are for infrastructure. It should be set to False if a synthetic network
is being analyzed. The default is True.
topology : str, optional
Topology of the synthetic network that is being analyzed. The default is 'Random'.
Returns
-------
InterdepNet : :class:`~infrastructure.InfrastructureNetwork`
The object containing the network data.
v_temp : int, list
Number of avaialable resources or resource cap. Used for synthetic networks
where each sample network has a different resource cap.
layers_temp : list
List of layers in the analysis. Used for synthetic networks where each sample
network has different layers.
"""
layers_temp = []
v_temp = 0
if infrastructure_data:
# print "Loading Shelby County data..." #!!!
InterdepNet = load_infrastructure_data(BASE_DIR=base_dir,
external_interdependency_dir=external_interdependency_dir,
sim_number=sim_number, cost_scale=cost_scale,
magnitude=magnitude, v=v,
data_format=infrastructure_data)
# print "Data loaded." #!!!
else:
InterdepNet, v_temp, layers_temp = load_synthetic_network(BASE_DIR=base_dir, topology=topology,
config=magnitude, sample=sample,
cost_scale=cost_scale)
return InterdepNet, v_temp, layers_temp
def run_indp(params, layers=[1, 2, 3], controlled_layers=[], functionality={}, T=1, validate=False,
save=True, suffix="", forced_actions=False, saveModel=False, print_cmd_line=True,
co_location=True):
""" Runs an INDP problem with specified parameters. Outputs to directory specified in params['OUTPUT_DIR'].
:param params: Global parameters.
:param layers: Layers to consider in the infrastructure network.
:param T: Number of timesteps to optimize over.
:param validate: Validate solution.
"""
# Initialize failure scenario.
InterdepNet = None
if "N" not in params:
InterdepNet = initialize_network(base_dir="../data/INDP_7-20-2015/",
sim_number=params['SIM_NUMBER'],
magnitude=params["MAGNITUDE"])
else:
InterdepNet = params["N"]
if "NUM_ITERATIONS" not in params:
params["NUM_ITERATIONS"] = 1
if not controlled_layers:
controlled_layers = layers
v_r = params["V"]
if isinstance(v_r, (int)):
outDirSuffixRes = str(v_r)
else:
outDirSuffixRes = str(sum([val for _, val in v_r.items()])) + '_fixed_layer_Cap'
indp_results = INDPResults(params["L"])
if T == 1:
print("--Running INDP (T=1) or iterative INDP.")
if print_cmd_line:
print("Num iters=", params["NUM_ITERATIONS"])
# Run INDP for 1 time step (original INDP).
output_dir = params["OUTPUT_DIR"] + '_L' + str(len(layers)) + '_m' + str(
params["MAGNITUDE"]) + "_v" + outDirSuffixRes
# Initial calculations.
if params['DYNAMIC_PARAMS']:
original_N = copy.deepcopy(InterdepNet) # !!! deepcopy
dynamic_params = create_dynamic_param(params, N=original_N)
dynamic_parameters(InterdepNet, original_N, 0, dynamic_params)
results = indp(InterdepNet, 0, 1, layers, controlled_layers=controlled_layers,
functionality=functionality, co_location=co_location)
indp_results = results[1]
indp_results.add_components(0, INDPComponents.calculate_components(results[0], InterdepNet,
layers=controlled_layers))
for i in range(params["NUM_ITERATIONS"]):
print("-Time Step (iINDP)", i + 1, "/", params["NUM_ITERATIONS"])
if params['DYNAMIC_PARAMS']:
dynamic_parameters(InterdepNet, original_N, i + 1, dynamic_params)
results = indp(InterdepNet, v_r, T, layers, controlled_layers=controlled_layers,
forced_actions=forced_actions, co_location=co_location)
indp_results.extend(results[1], t_offset=i + 1)
if saveModel:
save_INDP_model_to_file(results[0], output_dir + "/Model", i + 1)
# Modify network to account for recovery and calculate components.
apply_recovery(InterdepNet, indp_results, i + 1)
indp_results.add_components(i + 1, INDPComponents.calculate_components(results[0], InterdepNet,
layers=controlled_layers))
# print "Num_iters=",params["NUM_ITERATIONS"]
else:
# td-INDP formulations. Includes "DELTA_T" parameter for sliding windows to increase
# efficiency.
# Edit 2/8/16: "Sliding window" now overlaps.
num_time_windows = 1
time_window_length = T
is_first_iteration = True
if "WINDOW_LENGTH" in params:
time_window_length = params["WINDOW_LENGTH"]
num_time_windows = T
output_dir = params["OUTPUT_DIR"] + '_L' + str(len(layers)) + "_m" + str(
params["MAGNITUDE"]) + "_v" + outDirSuffixRes
print("Running td-INDP (T=" + str(T) + ", Window size=" + str(time_window_length) + ")")
# Initial percolation calculations.
results = indp(InterdepNet, 0, 1, layers, controlled_layers=controlled_layers,
functionality=functionality, co_location=co_location)
indp_results = results[1]
indp_results.add_components(0, INDPComponents.calculate_components(results[0], InterdepNet,
layers=controlled_layers))
for n in range(num_time_windows):
print("-Time window (td-INDP)", n + 1, "/", num_time_windows)
functionality_t = {}
# Slide functionality matrix according to sliding time window.
if functionality:
for t in functionality:
if t in range(n, time_window_length + n + 1):
functionality_t[t - n] = functionality[t]
if len(functionality_t) < time_window_length + 1:
diff = time_window_length + 1 - len(functionality_t)
max_t = max(functionality_t.keys())
for d in range(diff):
functionality_t[max_t + d + 1] = functionality_t[max_t]
# Run td-INDP.
results = indp(InterdepNet, v_r, time_window_length + 1, layers,
controlled_layers=controlled_layers,
functionality=functionality_t, forced_actions=forced_actions,
co_location=co_location)
if saveModel:
save_INDP_model_to_file(results[0], output_dir + "/Model", n + 1)
if "WINDOW_LENGTH" in params:
indp_results.extend(results[1], t_offset=n + 1, t_start=1, t_end=2)
# Modify network for recovery actions and calculate components.
apply_recovery(InterdepNet, results[1], 1)
indp_results.add_components(n + 1,
INDPComponents.calculate_components(results[0],
InterdepNet, 1,
layers=controlled_layers))
else:
indp_results.extend(results[1], t_offset=0)
for t in range(1, T):
# Modify network to account for recovery actions.
apply_recovery(InterdepNet, indp_results, t)
indp_results.add_components(1, INDPComponents.calculate_components(results[0], InterdepNet, t,
layers=controlled_layers))
# Save results of current simulation.
if save:
if not os.path.exists(output_dir):
os.makedirs(output_dir)
indp_results.to_csv(output_dir, params["SIM_NUMBER"], suffix=suffix)
if not os.path.exists(output_dir + '/agents'):
os.makedirs(output_dir + '/agents')
indp_results.to_csv_layer(output_dir + '/agents', params["SIM_NUMBER"], suffix=suffix)
return indp_results
def run_info_share(params, layers=[1, 2, 3], T=1, validate=False, suffix=""):
"""Applies rounds of information sharing between INDP runs. Assumes each layer is controlled by a separate player.
params["NUM_ITERATIONS"] determines how many rounds of sharing to perform.
NOTE: This is still not fully functional.
:param params: Global parameters.
:param layers: Specifies which layers to optimize over.
:param T: Number of timesteps to optimize over.
:param validate: (Currently not used.)
"""
# Initialize network.
InterdepNet = None
num_iterations = params["NUM_ITERATIONS"]
if "N" not in params:
InterdepNet = initialize_network(base_dir="../data/INDP_7-20-2015/",
sim_number=params['SIM_NUMBER'],
magnitude=params["MAGNITUDE"])
params["N"] = InterdepNet
else:
InterdepNet = params["N"]
if "NUM_ITERATIONS" not in params:
params["NUM_ITERATIONS"] = 1
v_r = params["V"]
# Initialize player result variables.
player_strategies = {}
for P in layers:
player_strategies[P] = INDPResults()
# Begin Sharing Process.
for i in range(num_iterations):
results = {}
for P in layers:
negP = [x for x in layers if x != P]
print("P=", str(P), "i=", str(i))
if i == 0:
# Create functionality matrix. This corresponds to "OPTIMISTIC" or "PESSIMISTIC" in Sharkey paper.
# OPTIMISTIC implies that interdependencies are assumed to be fixed whenever a player needs them to be.
# PESSIMISTIC assumes interdependencies never become fixed.
functionality = create_functionality_matrix(InterdepNet, T, negP, actions=None,
strategy_type="OPTIMISTIC")
else:
print("Next iteration!")
actions = []
for t in range(T):
for l in negP:
actions.append(player_strategies[l][t]["actions"])
functionality = create_functionality_matrix(InterdepNet, T, negP, actions=actions,
strategy_type="INFO_SHARE")
params["N"] = InterdepNet.copy()
results[P] = run_indp(params, layers, controlled_layers=[P], T=T, functionality=functionality, save=True,
suffix="P" + str(P) + "_i" + str(i) + "_" + suffix, forced_actions=True)
for P in layers:
player_strategies[P] = results[P]
def run_inrg(params, layers=[1, 2, 3], validate=False, player_ordering=[3, 1], suffix=""):
InterdepNet = None
output_dir = params["OUTPUT_DIR"] + "_m" + str(params["MAGNITUDE"]) + "_v" + str(params["V"])
if "N" not in params:
InterdepNet = initialize_network(base_dir="../data/INDP_7-20-2015/",
sim_number=params['SIM_NUMBER'],
magnitude=params["MAGNITUDE"])
params["N"] = InterdepNet
else:
InterdepNet = params["N"]
v_r = params["V"]
# Initialize player result variables.
player_strategies = {}
for P in layers:
player_strategies[P] = INDPResults()
num_iterations = params["NUM_ITERATIONS"]
params_temp = {}
for key in params:
params_temp[key] = params[key]
params_temp["NUM_ITERATIONS"] = 1
for i in range(num_iterations):
curr_player_ordering = player_ordering
if player_ordering == "RANDOM":
curr_player_ordering = random.sample(layers, len(layers))
for P in curr_player_ordering:
print("Iteration", i, ", Player", P)
# functionality=create_functionality_matrix(InterdepNet,1,[x for x in layers if x != P],strategy_type="REALISTIC")
results = run_indp(params_temp, layers, controlled_layers=[P], T=1, save=False,
suffix="P" + str(P) + "_i" + str(i), forced_actions=True)
# print params["N"].G.node[(5,3)]['data']['inf_data'].functionality
if i == 0:
player_strategies[P] = results
else:
player_strategies[P].extend(results, t_offset=i + 1, t_start=1, t_end=2)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
for P in layers:
player_strategies[P].to_csv(output_dir, params["SIM_NUMBER"], suffix="P" + str(P) + "_" + suffix)
def create_dynamic_param(params, N=None):
print("Computing dislocation data...")
dynamic_param_dict = params['DYNAMIC_PARAMS']
return_type = dynamic_param_dict['RETURN']
dp_dict_col = ['time', 'node', 'current pop', 'total pop']
dynamic_params = {}
if dynamic_param_dict['TYPE'] == 'shelby_adopted':
print("Reading dislocation data from file...")
net_names = {'water': 1, 'gas': 2, 'power': 3, 'telecom': 4}
for key, val in net_names.items():
filename = dynamic_param_dict['DIR'] + 'dynamic_demand_' + return_type + '_' + key + '.pkl'
with open(filename, 'rb') as f:
dd_df = pickle.load(f)
dynamic_params[val] = dd_df[(dd_df['sce'] == params["MAGNITUDE"]) & \
(dd_df['set'] == params["SIM_NUMBER"])]
elif dynamic_param_dict['TYPE'] == 'incore':
testbed = dynamic_param_dict['TESTBED']
file_dir = dynamic_param_dict['DIR'] + testbed + '/Damage_scenarios/'
if os.path.exists(file_dir + 'pop_dislocation_data.pkl'):
print("Reading dislocation data from file...")
with open(file_dir + 'pop_dislocation_data.pkl', 'rb') as f:
dynamic_params = pickle.load(f)
return dynamic_params
pop_dislocation = pd.read_csv(file_dir + 'pop-dislocation-results.csv', low_memory=False)
mapping_data = pd.read_csv(file_dir + testbed + '_interdependency_table.csv', low_memory=False)
total_num_bldg = mapping_data.shape[0]
# total_num_hh = pop_dislocation[~(pd.isna(pop_dislocation['guid']))]
T = 10
for n, d in N.G.nodes(data=True):
if n[1] not in dynamic_params.keys():
dynamic_params[n[1]] = pd.DataFrame(columns=dp_dict_col)
guid = d['data']['inf_data'].guid
serv_area = mapping_data[mapping_data['substations_guid'] == guid]
# compute dynamic_params
num_dilocated = {t: 0 for t in range(T + 1)}
total_pop = 0
for _, bldg in serv_area.iterrows():
pop_bldg_dict = pop_dislocation[pop_dislocation['guid'] == bldg['buildings_guid']]
for _, hh in pop_bldg_dict.iterrows():
total_pop += hh['numprec'] if ~np.isnan(hh['numprec']) else 0
if hh['dislocated']:
# !!! Lumebrton dislocation time paramters
dt_params = {'DS1': 1.00, 'DS2': 2.33, 'DS3': 2.49, 'DS4': 3.62,
'white': 0.78, 'black': 0.88, 'hispanic': 0.83,
'income': -0.00, 'insurance': 1.06}
race_white = 1 if hh['race'] == 1 else 0
race_balck = 1 if hh['race'] == 2 else 0
hispan = hh['hispan'] if ~np.isnan(hh['hispan']) else 0
# !!! verfy that the explanatory variable correspond to columns in dt_params
linear_term = hh['insignific'] * dt_params['DS1'] + \
hh['moderate'] * dt_params['DS2'] + \
hh['heavy'] * dt_params['DS3'] + \
hh['complete'] * dt_params['DS4'] + \
race_white * dt_params['white'] + \
race_balck * dt_params['black'] + \
hispan * dt_params['hispanic'] + \
np.random.choice([0, 1], p=[.15, .85]) * dt_params[
'insurance'] # !!! insurance data
# hh['randincome']/1000*dt_params['income']+\#!!! income data
disloc_time = np.exp(linear_term)
return_time = math.ceil(disloc_time / 7) # !!! assume each time step is one week
for t in range(return_time):
if t <= T:
num_dilocated[t] += hh['numprec'] if ~np.isnan(hh['numprec']) else 0
for t in range(T + 1):
values = [t, n[0], total_pop - num_dilocated[t], total_pop]
dynamic_params[n[1]] = dynamic_params[n[1]].append(dict(zip(dp_dict_col, values)),
ignore_index=True)
with open(file_dir + 'pop_dislocation_data.pkl', 'wb') as f:
pickle.dump(dynamic_params, f)
return dynamic_params
def dynamic_parameters(N, original_N, t, dynamic_params):
for n, d in N.G.nodes(data=True):
data = dynamic_params[d['data']['inf_data'].net_id]
if d['data']['inf_data'].demand < 0:
current_pop = data.loc[(data['node'] == n[0]) & (data['time'] == t), 'current pop'].iloc[0]
total_pop = data.loc[(data['node'] == n[0]) & (data['time'] == t), 'total pop'].iloc[0]
original_demand = original_N.G.nodes[n]['data']['inf_data'].demand
d['data']['inf_data'].demand = original_demand * current_pop / total_pop
def save_INDP_model_to_file(model, outModelDir, t, l=0, suffix=''):
if not os.path.exists(outModelDir):
os.makedirs(outModelDir)
# Write models to file
lname = "/Model_t%d_l%d_%s.lp" % (t, l, suffix)
model.write(outModelDir + lname)
model.update()
# Write solution to file
sname = "/Solution_t%d_l%d_%s.txt" % (t, l, suffix)
fileID = open(outModelDir + sname, 'w')
for vv in model.getVars():
fileID.write('%s %g\n' % (vv.varName, vv.x))
fileID.write('Obj: %g' % model.objVal)
fileID.close()
def initialize_sample_network(layers=[1, 2]):
""" Initializes sample network
:param layers: (Currently not used).
:returns: An interdependent InfrastructureNetwork.
"""
InterdepNet = InfrastructureNetwork("sample_network")
node_to_demand_dict = {(1, 1): 5, (2, 1): -1, (3, 1): -2, (4, 1): -2, (5, 1): -4, (6, 1): 4,
(7, 2): -2, (8, 2): 6, (9, 2): 1, (10, 2): -5, (11, 2): 4, (12, 2): -4}
space_to_nodes_dict = {1: [(1, 1), (7, 2)], 2: [(2, 1), (8, 2)],
3: [(3, 1), (5, 1), (9, 2), (11, 2)], 4: [(4, 1), (6, 1), (10, 2), (12, 2)]}
arc_list = [((1, 1), (2, 1)), ((1, 1), (4, 1)), ((1, 1), (3, 1)), ((6, 1), (4, 1)), ((6, 1), (5, 1)),
((8, 2), (7, 2)), ((8, 2), (10, 2)), ((9, 2), (7, 2)), ((9, 2), (10, 2)), ((9, 2), (12, 2)),
((11, 2), (12, 2))]
interdep_list = [((1, 1), (7, 2)), ((2, 1), (8, 2)), ((9, 2), (3, 1)), ((4, 1), (10, 2))]
failed_nodes = [(1, 1), (2, 1), (3, 1), (5, 1), (6, 1),
(7, 2), (8, 2), (9, 2), (11, 2), (12, 2)]
if 3 in layers:
node_to_demand_dict.update({(13, 3): 3, (14, 3): 6, (15, 3): -5, (16, 3): -6,
(17, 3): 4, (18, 3): -2})
space_to_nodes_dict[1].extend([(13, 3), (14, 3), (15, 3)])
space_to_nodes_dict[2].extend([(16, 3), (17, 3), (18, 3)])
arc_list.extend([((13, 3), (15, 3)), ((14, 3), (15, 3)), ((14, 3), (16, 3)),
((17, 3), (15, 3)), ((17, 3), (16, 3)), ((17, 3), (18, 3))])
interdep_list.extend([((11, 2), (17, 3)), ((9, 2), (15, 3)), ((14, 3), (8, 2)), ((14, 3), (9, 2))])
failed_nodes.extend([(14, 3), (15, 3), (16, 3), (17, 3), (18, 3)])
global_index = 1
for n in node_to_demand_dict:
nn = InfrastructureNode(global_index, n[1], n[0])
nn.demand = node_to_demand_dict[n]
nn.reconstruction_cost = abs(nn.demand)
nn.oversupply_penalty = 50
nn.undersupply_penalty = 50
nn.resource_usage = 1
if n in failed_nodes:
nn.functionality = 0.0
nn.repaired = 0.0
InterdepNet.G.add_node((nn.local_id, nn.net_id), data={'inf_data': nn})
global_index += 1
for s in space_to_nodes_dict:
InterdepNet.S.append(InfrastructureSpace(s, 0))
for n in space_to_nodes_dict[s]:
InterdepNet.G.nodes[n]['data']['inf_data'].space = s
for a in arc_list:
aa = InfrastructureArc(a[0][0], a[1][0], a[0][1])
aa.flow_cost = 1
aa.capacity = 50
InterdepNet.G.add_edge((aa.source, aa.layer), (aa.dest, aa.layer), data={'inf_data': aa})
for g in interdep_list:
aa = InfrastructureInterdepArc(g[0][0], g[1][0], g[0][1], g[1][1], 1.0)
InterdepNet.G.add_edge((aa.source, aa.source_layer), (aa.dest, aa.dest_layer), data={'inf_data': aa})
return InterdepNet
def plot_indp_sample(params, folderSuffix="", suffix=""):
plt.figure(figsize=(16, 8))
if 3 in params["L"]:
plt.figure(figsize=(16, 10))
InterdepNet = initialize_sample_network(layers=params["L"])
pos = nx.spring_layout(InterdepNet.G)
pos[(1, 1)][0] = 0.5
pos[(7, 2)][0] = 0.5
pos[(2, 1)][0] = 0.0
pos[(8, 2)][0] = 0.0
pos[(3, 1)][0] = 2.0
pos[(9, 2)][0] = 2.0
pos[(4, 1)][0] = 1.5
pos[(10, 2)][0] = 1.5
pos[(5, 1)][0] = 3.0
pos[(11, 2)][0] = 3.0
pos[(6, 1)][0] = 2.5
pos[(12, 2)][0] = 2.5
pos[(2, 1)][1] = 0.0
pos[(4, 1)][1] = 0.0
pos[(6, 1)][1] = 0.0
pos[(1, 1)][1] = 1.0
pos[(3, 1)][1] = 1.0
pos[(5, 1)][1] = 1.0
pos[(8, 2)][1] = 2.0
pos[(10, 2)][1] = 2.0
pos[(12, 2)][1] = 2.0
pos[(7, 2)][1] = 3.0
pos[(9, 2)][1] = 3.0
pos[(11, 2)][1] = 3.0
node_dict = {1: [(1, 1), (2, 1), (3, 1), (4, 1), (5, 1), (6, 1)],
11: [(4, 1)], # Undamaged
12: [(1, 1), (2, 1), (3, 1), (5, 1), (6, 1)], # Damaged
2: [(7, 2), (8, 2), (9, 2), (10, 2), (11, 2), (12, 2)],
21: [(10, 2)],
22: [(7, 2), (8, 2), (9, 2), (11, 2), (12, 2)]}
arc_dict = {1: [((1, 1), (2, 1)), ((1, 1), (3, 1)), ((1, 1), (4, 1)), ((6, 1), (4, 1)),
((6, 1), (5, 1))],
2: [((8, 2), (7, 2)), ((8, 2), (10, 2)), ((9, 2), (7, 2)), ((9, 2), (10, 2)),
((9, 2), (12, 2)), ((11, 2), (12, 2))]}
if 3 in params["L"]:
pos[(13, 3)][0] = 0.5
pos[(14, 3)][0] = 0.0
pos[(15, 3)][0] = 2.0
pos[(16, 3)][0] = 1.5
pos[(17, 3)][0] = 3.0
pos[(18, 3)][0] = 2.5
pos[(13, 3)][1] = 5.0
pos[(14, 3)][1] = 4.0
pos[(15, 3)][1] = 5.0
pos[(16, 3)][1] = 4.0
pos[(17, 3)][1] = 5.0
pos[(18, 3)][1] = 4.0
node_dict[3] = [(13, 3), (14, 3), (15, 3), (16, 3), (17, 3), (18, 3)]
node_dict[31] = [(13, 3)]
node_dict[32] = [(14, 3), (15, 3), (16, 3), (17, 3), (18, 3)]
arc_dict[3] = [((13, 3), (15, 3)), ((14, 3), (15, 3)), ((14, 3), (16, 3)),
((17, 3), (15, 3)), ((17, 3), (16, 3)), ((17, 3), (18, 3))]
labels = {}
for n, d in InterdepNet.G.nodes(data=True):
labels[n] = "%d[%d]" % (n[0], d['data']['inf_data'].demand)
pos_moved = {}
for key, value in pos.items():
pos_moved[key] = [0, 0]
pos_moved[key][0] = pos[key][0] - 0.2
pos_moved[key][1] = pos[key][1] + 0.2
v_r = params["V"]
if isinstance(v_r, (int)):
totalResource = v_r
else:
totalResource = sum([val for _, val in v_r.items()])
output_dir = params["OUTPUT_DIR"] + '_L' + str(len(params["L"])) + '_m' + str(params["MAGNITUDE"]) + "_v" + str(
totalResource) + folderSuffix
action_file = output_dir + "/actions_" + str(params["SIM_NUMBER"]) + "_" + suffix + ".csv"
actions = {0: []}
if os.path.isfile(action_file):
with open(action_file) as f:
lines = f.readlines()[1:]
for line in lines:
data = line.split(",")
t = int(data[0])
action = str.strip(data[1])
if t not in actions:
actions[t] = []
actions[t].append(action)
T = max(actions.keys())
for t, value in actions.items():
plt.subplot(2, int((T + 1) / 2) + 1, t + 1, aspect='equal')
plt.title('Time = %d' % t)
for a in value:
data = a.split(".")
node_dict[int(data[1]) * 10 + 1].append((int(data[0]), int(data[1])))
node_dict[int(data[1]) * 10 + 2].remove((int(data[0]), int(data[1])))
nx.draw(InterdepNet.G, pos, node_color='w')
nx.draw_networkx_labels(InterdepNet.G, labels=labels, pos=pos,
font_color='w', font_family='CMU Serif') # ,font_weight='bold'
nx.draw_networkx_nodes(InterdepNet.G, pos, nodelist=node_dict[1], node_color='#b51717', node_size=1100,
alpha=0.7)
nx.draw_networkx_nodes(InterdepNet.G, pos, nodelist=node_dict[2], node_color='#005f98', node_size=1100,
alpha=0.7)
nx.draw_networkx_nodes(InterdepNet.G, pos_moved, nodelist=node_dict[12], node_color='k', node_shape="X",
node_size=150)
nx.draw_networkx_nodes(InterdepNet.G, pos_moved, nodelist=node_dict[22], node_color='k', node_shape="X",
node_size=150)
nx.draw_networkx_edges(InterdepNet.G, pos, edgelist=arc_dict[1], width=1, alpha=0.9, edge_color='r')
nx.draw_networkx_edges(InterdepNet.G, pos, edgelist=arc_dict[2], width=1, alpha=0.9, edge_color='b')
if 3 in params["L"]:
nx.draw_networkx_nodes(InterdepNet.G, pos, nodelist=node_dict[3], node_color='#009181', node_size=1100,
alpha=0.7)
nx.draw_networkx_nodes(InterdepNet.G, pos_moved, nodelist=node_dict[32], node_color='k', node_shape="X",
node_size=150)
nx.draw_networkx_edges(InterdepNet.G, pos, edgelist=arc_dict[3], width=1, alpha=0.9, edge_color='g')
plt.tight_layout()
plt.savefig(output_dir + '/plot_net' + suffix + '.png', dpi=300)
|
<filename>tests/util.py
import os
from functools import partial
import pytest
from ldap3 import Connection
from rest_tools.client import RestClient
from rest_tools.server import from_environment
import requests
from krs import bootstrap
from krs.token import get_token
from krs import ldap
from krs import rabbitmq
@pytest.fixture
def keycloak_bootstrap(monkeypatch):
monkeypatch.setenv('KEYCLOAK_REALM', 'testrealm')
monkeypatch.setenv('KEYCLOAK_CLIENT_ID', 'testclient')
monkeypatch.setenv('USERNAME', 'admin')
monkeypatch.setenv('PASSWORD', '<PASSWORD>')
secret = bootstrap.bootstrap()
monkeypatch.setenv('KEYCLOAK_CLIENT_SECRET', secret)
# make sure rabbitmq is set up for tests
tok = bootstrap.get_token()
bootstrap.add_rabbitmq_listener(realm=os.environ['KEYCLOAK_REALM'], token=tok)
token = partial(get_token, os.environ['KEYCLOAK_URL'],
client_id='testclient',
client_secret=secret,
)
rest_client = RestClient(
f'{os.environ["KEYCLOAK_URL"]}/auth/admin/realms/testrealm',
token=token,
retries=0,
)
yield rest_client
tok = bootstrap.get_token()
bootstrap.delete_service_role('testclient', token=tok)
bootstrap.delete_realm('testrealm', token=tok)
@pytest.fixture
def ldap_bootstrap(monkeypatch):
monkeypatch.setenv('LDAP_USER_BASE', 'ou=peopleTest,dc=icecube,dc=wisc,dc=edu')
monkeypatch.setenv('LDAP_GROUP_BASE', 'ou=groupTest,dc=icecube,dc=wisc,dc=edu')
LDAP_GROUPS_BASE = 'ou=groupsTest,dc=icecube,dc=wisc,dc=edu'
monkeypatch.setenv('LDAP_GROUPS_BASE', LDAP_GROUPS_BASE)
obj = ldap.LDAP()
config = obj.config
c = Connection(config['LDAP_URL'], user=config['LDAP_ADMIN_USER'], password=config['LDAP_ADMIN_PASSWORD'], auto_bind=True)
def cleanup():
ret = c.search(config["LDAP_USER_BASE"], '(uid=*)', attributes=['uid'])
if ret:
uids = [e['uid'] for e in c.entries]
for uid in uids:
c.delete(f'uid={uid},{config["LDAP_USER_BASE"]}')
c.delete(config["LDAP_USER_BASE"])
ret = c.search(config["LDAP_GROUP_BASE"], '(cn=*)', attributes=['cn'])
if ret:
names = [e['cn'] for e in c.entries]
for cn in names:
c.delete(f'cn={cn},{config["LDAP_GROUP_BASE"]}')
c.delete(config["LDAP_GROUP_BASE"])
ret = c.search(LDAP_GROUPS_BASE, '(cn=*)', attributes=['cn'])
if ret:
names = [e['cn'] for e in c.entries]
for cn in names:
c.delete(f'cn={cn},{LDAP_GROUPS_BASE}')
c.delete(LDAP_GROUPS_BASE)
cleanup()
args = {
'ou': 'peopleTest',
}
c.add(config["LDAP_USER_BASE"], ['organizationalUnit', 'top'], args)
args = {
'ou': 'groupTest',
}
c.add(config["LDAP_GROUP_BASE"], ['organizationalUnit', 'top'], args)
args = {
'ou': 'groupsTest',
}
c.add(LDAP_GROUPS_BASE, ['organizationalUnit', 'top'], args)
try:
yield obj
finally:
cleanup()
@pytest.fixture(scope="session")
def rabbitmq_bootstrap():
config = from_environment({
'RABBITMQ_MGMT_URL': 'http://localhost:15672',
'RABBITMQ_ADMIN_USER': 'admin',
'RABBITMQ_ADMIN_PASSWORD': '<PASSWORD>',
})
auth = (config['RABBITMQ_ADMIN_USER'], config['RABBITMQ_ADMIN_PASSWORD'])
for _ in range(100):
r = requests.get(f'{config["RABBITMQ_MGMT_URL"]}/api/users', auth=auth)
try:
r.raise_for_status()
except Exception:
time.sleep(1)
else:
break
else:
raise Exception('RabbitMQ is not responding!')
rabbitmq.create_user('keycloak_guest', 'guest')
|
<gh_stars>10-100
from __future__ import absolute_import, unicode_literals
from collections import OrderedDict
from django.urls import reverse, reverse_lazy
from django.utils.safestring import mark_safe
from django.utils.translation import gettext_lazy as _
from arctic.generics import (
CreateView,
DeleteView,
ListView,
TemplateView,
UpdateView,
)
from arctic.generics import collapsible_gettext as _c
from .forms import AdvancedArticleSearchForm, ArticleForm, FiltersAndSearchForm
from .inlines import ImagesInline
from .models import Article, Category, Tag
class DashboardView(TemplateView):
template_name = "arctic/index.html"
page_title = "Dashboard"
permission_required = "view_dashboard"
def get_context_data(self, **kwargs):
context = super(DashboardView, self).get_context_data(**kwargs)
return context
class ArticleListView(ListView):
paginate_by = 10
model = Article
fields = ["title", "description", "published", "category", "tags"]
ordering_fields = ["title", "description", "category", "published"]
search_fields = ["title"]
simple_search_form_class = FiltersAndSearchForm
advanced_search_form_class = AdvancedArticleSearchForm
breadcrumbs = (("Home", "index"), ("Article List", None))
action_links = [
("detail", "articles:detail", "fa-edit"),
("delete", "articles:delete", "fa-trash"),
]
field_links = {
"title": "articles:detail",
"category": ("articles:category-detail", "category_id"),
}
tool_links_collapse = 2
tool_links = [
(_("Create Article"), "articles:create", "fa-plus"),
(_("Export CSV"), ("get_export_url", "csv"), "fa-download"),
]
field_classes = {"published": ""}
permission_required = "view_article"
allowed_exports = ["csv"]
def get_field_actions(self, obj):
actions = list(self.action_links)
if obj.published:
actions.pop(1) # delete
return actions
def get_category_field(self, row_instance):
return row_instance.category.name
def get_published_field(self, row_instance):
symbol = "fa-check" if row_instance.published else "fa-minus"
return mark_safe('<i class="fa {}"></i>'.format(symbol))
def get_category_ordering_field(self):
return "category__name"
def get_published_field_classes(self, row_instance):
return "online" if row_instance.published else "offline"
class ArticleUpdateView(UpdateView):
page_title = _("Edit Article")
permission_required = "change_article"
model = Article
# success_url = reverse_lazy('articles:list')
inlines = [ImagesInline]
inline_sort_field = "order"
form_class = ArticleForm
actions = [(_("Cancel"), "cancel"), (_("Save"), "submit")]
layout = OrderedDict(
[
(_c("Basic Details"), ["title", ["category|4", "tags"]]),
(
_c("Body|Extra Information for this fieldset", True),
["description"],
),
(_("Extended Details"), [["published|4", "updated_at"]]),
]
)
def get_success_url(self):
return reverse("articles:detail", args=(self.object.pk,))
class ArticleCreateView(CreateView):
page_title = _("Create Article")
# fields = ['title', 'description', 'tags', 'category', 'published']
model = Article
inlines = [ImagesInline]
form_class = ArticleForm
permission_required = "add_article"
layout = OrderedDict(
[
(_c("Basic Details"), ["title", ["category|4", "tags"]]),
(
_c("Body|Extra Information for this fieldset", True),
["description"],
),
(_("Extended Details"), [["published|4", "updated_at"]]),
]
)
def get_success_url(self):
return reverse("articles:detail", args=(self.object.pk,))
class ArticleDeleteView(DeleteView):
model = Article
# success_url = reverse_lazy('articles:list')
permission_required = "delete_article"
class CategoryListView(ListView):
page_title = _("Categories")
model = Category
fields = ["name"]
field_links = {"name": "articles:category-detail"}
tool_links = [(_("Create Category"), "articles:category-create")]
modal_links = {
"articles:category-detail": {"type": "iframe", "height": 377},
"articles:category-create": {"type": "iframe", "height": 256},
}
permission_required = "view_category"
sorting_field = "order"
action_links = [("delete", "articles:category-delete", "fa-trash")]
class CategoryArticlesListView(ArticleListView):
def dispatch(self, request, *args, **kwargs):
self.pk = kwargs.pop("pk")
return super(CategoryArticlesListView, self).dispatch(
request, *args, **kwargs
)
# disable some settings from the default article list
tool_links = []
page_title = "Edit Category: Articles"
breadcrumbs = None
tabs = [
("Detail", ("articles:category-detail", "pk")),
("Related Articles", ("articles:category-articles-list", "pk")),
]
permission_required = "view_category"
def get_queryset(self):
qs = super(CategoryArticlesListView, self).get_queryset()
return qs.filter(category_id=self.pk)
class CategoryUpdateView(UpdateView):
page_title = _("Edit Category")
model = Category
fields = "__all__"
# success_url = reverse_lazy('articles:category-list')
tabs = [
("Detail", ("articles:category-detail", "pk")),
("Related Articles", ("articles:category-articles-list", "pk")),
]
actions = [
(_("Delete"), ("articles:category-delete", "pk")),
(_("Cancel"), "cancel"),
(_("Save"), "submit"),
]
permission_required = "change_category"
class CategoryCreateView(CreateView):
page_title = _("Create Category")
model = Category
fields = ["name"]
permission_required = "add_category"
def get_success_url(self):
if self.request.GET.get("inmodal"):
return reverse("arctic:redirect_to_parent")
return self.in_modal(str(self.success_url)) # success_url may be lazy
class CategoryDeleteView(DeleteView):
model = Category
redirect = True
success_url = reverse_lazy("articles:category-list")
permission_required = "delete_category"
class TagListView(ListView):
page_title = _("Tags")
model = Tag
fields = ["term"]
field_links = {"term": "articles:tag-detail"}
tool_links = [(_("Create Tag"), "articles:tag-create")]
permission_required = "view_tag"
allow_csv_export = True
class TagUpdateView(UpdateView):
page_title = _("Edit Tag")
model = Tag
fields = "__all__"
success_url = reverse_lazy("articles:tag-list")
permission_required = "change_tag"
class TagCreateView(CreateView):
page_title = _("Create Tag")
model = Tag
fields = ["term"]
permission_required = "add_tag"
def get_success_url(self):
return reverse("articles:tag-detail", args=(self.object.pk,))
class TagDeleteView(DeleteView):
model = Tag
success_url = reverse_lazy("articles:tag-list")
permission_required = "delete_tag"
|
import pandas as pd
import numpy as np
import os
import pandas as pd
from sklearn.neighbors import KDTree
import pickle
import random
###Building database and query files for evaluation
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
base_path= "../jittered_dataset_4096/"#"../partial_dataset/"
query_path = '../partial_radius_4096/'
all_folders=sorted(os.listdir(os.path.join(BASE_DIR,base_path)))
print(all_folders)
def check_in_test_set(northing, easting, points, x_width, y_width):
in_test_set=False
for point in points:
if(point[0]-x_width<northing and northing< point[0]+x_width and point[1]-y_width<easting and easting<point[1]+y_width):
in_test_set=True
break
return in_test_set
##########################################
def output_to_file(output, filename):
with open(filename, 'wb') as handle:
pickle.dump(output, handle, protocol=pickle.HIGHEST_PROTOCOL)
print("Done ", filename)
def get_sets_dict(filename):
#[key_dataset:{key_pointcloud:{'query':file,'northing':value,'easting':value}},key_dataset:{key_pointcloud:{'query':file,'northing':value,'easting':value}}, ...}
with open(filename, 'rb') as handle:
trajectories = pickle.load(handle)
print("Database Trajectories Loaded.")
return trajectories
database_sets = get_sets_dict('3d_jittered_spaces_evaluation_database.pickle')
database_trees=[]
for folder in all_folders:
print("Training tree for:",folder)
df_database= pd.DataFrame(columns=['file','northing','easting','alting'])
df_locations= pd.read_csv(os.path.join(base_path,folder,"pointcloud_centroids_4m_0.5.csv"),sep=',')
#df_locations['timestamp']=folder+pointcloud_fols+df_locations['timestamp'].astype(str)+'.bin'
#df_locations=df_locations.rename(columns={'timestamp':'file'})
for index, row in df_locations.iterrows():
#df_test=df_test.append(row, ignore_index=True)
df_database=df_database.append(row, ignore_index=True)
database_tree = KDTree(df_database[['northing','easting','alting']])
database_trees.append(database_tree)
def construct_query_sets(partial_path, pointcloud_fols, filename):#, partial_name):#, p, output_name):
test_trees=[]
#for folder in folders:
# print(folder)
df_test= pd.DataFrame(columns=['file','northing','easting','alting'])
df_locations= pd.read_csv(os.path.join(BASE_DIR,query_path,partial_path,filename),sep=',')
#df_locations['timestamp']=folder+pointcloud_fols+df_locations['timestamp'].astype(str)+'.bin'
#df_locations=df_locations.rename(columns={'timestamp':'file'})
for index, row in df_locations.iterrows():
df_test=df_test.append(row, ignore_index=True)
#elif(check_in_test_set(row['northing'], row['easting'], p, x_width, y_width)):
#df_test=df_test.append(row, ignore_index=True)
test_tree = KDTree(df_test[['northing','easting','alting']])
test_trees.append(test_tree)
test_sets=[]
#for folder in folders:
test={}
df_locations['timestamp']=partial_path+pointcloud_fols+df_locations['timestamp'].astype(str)+'.bin'
df_locations=df_locations.rename(columns={'timestamp':'file'})
for index,row in df_locations.iterrows():
#entire business district is in the test set
test[len(test.keys())]={'query':row['file'],'northing':row['northing'],'easting':row['easting'],'alting':row['alting']}
#elif(check_in_test_set(row['northing'], row['easting'], p, x_width, y_width)):
#test[len(test.keys())]={'query':row['file'],'northing':row['northing'],'easting':row['easting']}
test_sets.append(test)
print("Database (Tree) sets:",len(database_sets),"; Test (Tree) sets:",len(test_sets))
for i in range(len(database_sets)):
tree=database_trees[i]
for j in range(len(test_sets)):
#if(i==j):
# continue
for key in range(len(test_sets[j].keys())):
coor=np.array([[test_sets[j][key]["northing"],test_sets[j][key]["easting"],test_sets[j][key]["alting"]]])
index = tree.query_radius(coor, r=20) #r=4
#indices of the positive matches in database i of each query (key) in test set j
test_sets[j][key][i]=index[0].tolist()
#'partial_spaces/'+partial_name+'_evaluation_database.pickle')
output_to_file(test_sets, '3d_jittered_{}_evaluation_query.pickle'.format(partial_path))#'partial_spaces/'+partial_name+'_evaluation_query.pickle')
#For Oxford
#runs_folder = "oxford/"
#train_folders = all_folders[2::2]
for radius in np.arange(0.25,2.1,0.25):
#partial_path = os.path.join(BASE_DIR,query_path+'partial_radius_'+str(radius)+"_4096")# #folders.append(all_folders[index])
partial_path = 'partial_radius_'+str(radius)+"_4096"# #folders.append(all_folders[index])
print(partial_path)
construct_query_sets(partial_path, "/pointcloud_4m_bin/", "pointcloud_centroids_4m.csv")#, all_folders[index])
for radius in np.arange(0.25,2.1,0.25):
partial_path = 'ransac_partial_radius_'+str(radius)+"_4096"#
print(partial_path)
construct_query_sets(partial_path, "/pointcloud_4m_bin/", "pointcloud_centroids_4m.csv")#, all_folders[index])
#print(all_folders)
#print("training:",train_folders)
#
|
# 角色简体中文名字典(Dictionary)
characterSChineseDic = {
'Reimu_Hakurei': '博丽灵梦',
'Marisa_Kirisame': '雾雨魔理沙',
# 'SinGyoku': '神玉',
'Mima': '魅魔',
# 'Kikuri': '菊理',
'Konngara': '矜羯罗',
# 'YuugenMagan': '幽幻魔眼',
# 'Elis': '依莉斯',
# 'Sariel': '萨丽爱尔',
# 'Onmyou_gyoku': '阴阳玉',
# 'Yamaaruki': '山步',
'Genjii': '玄爷',
'Rika': '里香',
# 'Meira': '明罗',
# 'Marisa': '魔梨沙',
# 'Furawa_sensha': 'Flower~战车',
# 'ibiruai_sigma': '邪眼西格玛',
'Ellen': '爱莲',
# 'Kotohime': '小兔姬',
'Kana_Anaberal': '卡娜·安娜贝拉尔',
# 'Asakura_Rikako': '朝仓理香子',
'Chiyuri_Kitashirakawa': '北白河千百合',
'Yumemi_Okazaki': '冈崎梦美',
# 'Mimichan': '咪咪号',
# 'Ru_koto': '留琴',
# 'Ma_maru_chi': '玛玛露奇',
# 'Sokuratesu': '苏格拉底',
# 'Orange': '奥莲姬',
'Kurumi': '胡桃',
# 'Elliy': '艾丽',
# 'Yuka': '幽香',
# 'Mugetu': '梦月',
# 'Gengetu': '幻月',
'Sara': '萨拉',
# 'Luize': '露易兹',
# 'Alice': '爱丽丝',
'Yuki': '雪',
'Mai': '舞',
'Yumeko': '梦子',
'Shinki': '神绮',
'Rumia': '露米娅',
'Daiyousei': '大妖精',
'Cirno': '琪露诺',
'hong_meiling': '红美铃',
'Koakuma': '小恶魔',
'Patchouli_Knowledge': '帕秋莉·诺蕾姬',
'Sakuya_Izayoi': '十六夜咲夜',
'Remilia_Scarlet': '蕾米莉亚·斯卡蕾特',
'Flandre_Scarlet': '芙兰朵露·斯卡蕾特',
'Rin_Satsuki': '冴月麟',
# 'Kedama': '毛玉',
'Letty_Whiterock': '蕾蒂·霍瓦特洛克',
'Chen': '橙',
'Alice_Margatroid': '爱丽丝·玛格特洛依德',
'Lily_White': '莉莉霍瓦特',
'Lunasa_Prismriver': '露娜萨·普莉兹姆利巴',
'Merlin_Prismriver': '梅露兰·普莉兹姆利巴',
'Lyrica_Prismriver': '莉莉卡·普莉兹姆利巴',
'Youmu_Konpaku': '魂魄妖梦',
'Yuyuko_Saigyouji': '西行寺幽幽子',
'Ran_Yakumo': '八云蓝',
'Yukari_Yakumo': '八云紫',
'Youki_Konpaku': '魂魄妖忌',
# 'Leira_Prismriver': '蕾拉·普莉兹姆利巴',
'Suika_Ibuki': '伊吹萃香',
'Wriggle_Nightbug': '莉格露·奈特巴格',
'Mystia_Lorelei': '米斯蒂娅·萝蕾拉',
'Keine_Kamishirasawa': '上白泽慧音',
'Tewi_Inaba': '因幡帝',
'Reisen_Udongein_Inaba': '铃仙·优昙华院·因幡',
'Eirin_Yagokoro': '八意永琳',
'Kaguya_Houraisan': '蓬莱山辉夜',
'fujiwara_no_mokou': '藤原妹红',
'Aya_Shameimaru': '射命丸文',
'Medicine_Melancholy': '梅蒂欣·梅兰可莉',
'Yuuka_Kazami': '风见幽香',
'Komachi_Onozuka': '小野塚小町',
'Shikieiki_Yamaxanadu': '四季映姬·亚玛撒那度',
'Shizuha_Aki': '秋静叶',
'Minoriko_Aki': '秋穰子',
'Hina_Kagiyama': '键山雏',
'nitori_kawashiro': '河城荷取',
'momiji_inubashiri': '犬走椛',
'Kochiya_Sanae': '东风谷早苗',
'Kanako_Yasaka': '八坂神奈子',
'Suwako_Moriya': '洩矢诹访子',
'Iku_Nagae': '永江衣玖',
'tenshi_hinanai': '比那名居天子',
'Kisume': '琪斯美',
'Yamame_Kurodani': '黑谷山女',
'Parsee_Mizuhashi': '水桥帕露西',
'yuugi_hoshiguma': '星熊勇仪',
'Satori_Komeiji': '古明地觉',
'Rin_Kaenbyou': '火焰猫燐',
'utsuho_reiuji': '灵乌路空',
'Koishi_Komeiji': '古明地恋',
'Nazrin': '娜兹玲',
'Kogasa_Tatara': '多多良小伞',
'ichirin_kumoi': '云居一轮',
'Unzan': '云山',
'minamitsu_murasa': '村纱水蜜',
'shou_toramaru': '寅丸星',
'byakuren_hijiri': '圣白莲',
'Nue_Houjuu': '封兽鵺',
# 'Myouren': '命莲',
# 'Oo-Namazu': '大鲶鱼',
# 'Taisai Sei-kun': '太岁星君',
# 'Daidarabotchi': '大太法师',
# 'Hisoutensoku': '非想天则',
# 'Titania': '提泰妮娅',
# 'Goliath_Doll': '歌利亚人偶',
'Hatate_Himekaidou': '姬海棠果',
'Kyouko_Kasodani': '幽谷响子',
'Yoshika_Miyako': '宫古芳香',
'seiga_kaku': '霍青娥',
'soga_no_tojiko': '苏我屠自古',
'Mononobe_no_Futo': '物部布都',
'Toyosatomimi_no_Miko': '丰聪耳神子',
'mamizou_futatsuiwa': '二岩猯藏',
'Hata_no_Kokoro': '秦心',
# 'Hata_no_Kawakatsu': '秦河胜',
'Wakasagihime': '若鹭姬',
'Sekibanki': '赤蛮奇',
'kagerou_imaizumi': '今泉影狼',
'Benben_Tsukumo': '九十九弁弁',
'Yatsuhashi_Tsukumo': '九十九八桥',
'Seija_Kijin': '鬼人正邪',
'Shinmyoumaru_Sukuna': '少名针妙丸',
'Raiko_Horikawa': '堀川雷鼓',
'Sumireko_Usami': '宇佐见堇子',
'Seiran': '清兰',
'Ringo': '铃瑚',
'Doremy_Sweet': '哆来咪·苏伊特',
'sagume_kishin': '稀神探女',
'Clownpiece': '克劳恩皮丝',
'Junko': '纯狐',
'Hecatia_Lapislazuli': '赫卡提亚·拉碧斯拉祖利',
'joon_yorigami': '依神女苑',
'shion_yorigami': '依神紫苑',
'Eternity_Larva': '爱塔妮缇拉尔瓦',
'Nemuno_Sakata': '坂田合欢',
'Aunn_Komano': '高丽野阿吽',
'Narumi_Yatadera': '矢田寺成美',
'Satono_Nishida': '尔子田里乃',
'Mai_Teireida': '丁礼田舞',
'Okina_Matara': '摩多罗隐岐奈',
# 'Ebisu_Eika': '戎璎花',
'Urumi_Ushizaki': '牛崎润美',
'Kutaka_Niwatari': '庭渡久侘歌',
# 'Yachie_Kitcho': '吉吊八千慧',
'Mayumi_Joutougu': '杖刀偶磨弓',
'Keiki_Haniyasushin': '埴安神袿姬',
# 'Saki_Kurokoma': '骊驹早鬼',
'Rinnosuke_Morichika': '森近霖之助',
'Tokiko': '朱鹭子',
# 'Kawauso_no_Shoujyo': '水獭少女',
'Sunny_Milk': '桑尼米尔克',
'Luna_Child': '露娜切露德',
'Star_Sapphire': '斯塔萨菲雅',
'Watatsuki_no_Toyohime': '绵月丰姬',
'Watatsuki_no_Yorihime': '绵月依姬',
# 'Reisen': '铃仙二号',
'Hieda_no_Akyuu': '稗田阿求',
'kasen_ibara': '茨木华扇',
# 'Ibaraki_Douji_no_Ude': '茨木童子之臂',
# 'Koutei': '黄帝',
# 'Kume': '久米',
# 'Mukou': '务光',
# 'Sendai_Shirou': '仙台四郎',
# 'Kanda': '竿打',
# 'Houso': '彭祖',
# 'Zashiki_warashi': '座敷童子',
# 'Hobgoblin': '地精',
# 'Manzairaku': '万岁乐',
'Kosuzu_Motoori': '本居小铃',
# 'Enenra': '烟烟罗',
# 'Chupacabra': '卓柏卡布拉',
# 'JyaRyu': '邪龙',
# 'Kutsutsura': '沓颊',
# 'Shoujo_no_Onryou': '怨灵少女',
# 'Yuuten_Shyounin': '祐天上人',
# 'The Fortune-teller': '易者',
# 'Anxious Moustached Villager': '长相酷似周杰伦的村民',
# 'Salt Merchant': '盐家老板',
# 'Unnamed Umatsuki': '马凭',
# 'Kanami': '观阿弥',
# 'Zeami': '世阿弥',
# 'Okunoda_Miyoi': '奥野田美宵',
}
|
# -*- coding: utf-8 -*-
# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
"""
Abstract base class for time series classifiers.
class name: BaseClassifier
Defining methods:
fitting - fit(self, X, y)
predicting - predict(self, X)
- predict_proba(self, X)
Inspection methods:
hyper-parameter inspection - get_params()
fitted parameter inspection - get_fitted_params()
State:
fitted model/strategy - by convention, any attributes ending in "_"
fitted state flag - is_fitted (property)
fitted state inspection - check_is_fitted()
"""
__all__ = [
"BaseClassifier",
]
__author__ = ["mloning", "fkiraly", "TonyBagnall", "MatthewMiddlehurst"]
import numpy as np
from sktime.base import BaseEstimator
from sktime.utils.validation import check_n_jobs
from sktime.utils.validation.panel import check_X, check_X_y
class BaseClassifier(BaseEstimator):
"""Abstract base class for time series classifiers.
The base classifier specifies the methods and method signatures that all
classifiers have to implement.
"""
_tags = {
"coerce-X-to-numpy": True,
"coerce-X-to-pandas": False,
"capability:multivariate": False,
"capability:unequal_length": False,
"capability:missing_values": False,
"capability:train_estimate": False,
"capability:contractable": False,
"capability:multithreading": False,
}
def __init__(self):
self.classes_ = []
self.n_classes_ = 0
self._class_dictionary = {}
self._threads_to_use = 1
super(BaseClassifier, self).__init__()
def fit(self, X, y):
"""Fit time series classifier to training data.
Parameters
----------
X : 2D np.array (univariate, equal length series) of shape = [n_instances,
series_length]
or 3D np.array (any number of dimensions, equal length series) of shape =
[n_instances,n_dimensions,series_length]
or pd.DataFrame with each column a dimension, each cell a pd.Series (any
number of dimensions, equal or unequal length series)
y : 1D np.array of shape = [n_instances] - the class labels.
Returns
-------
self :
Reference to self.
Notes
-----
Changes state by creating a fitted model that updates attributes
ending in "_" and sets is_fitted flag to True.
"""
coerce_to_numpy = self.get_tag("coerce-X-to-numpy")
coerce_to_pandas = self.get_tag("coerce-X-to-pandas")
allow_multivariate = self.get_tag("capability:multivariate")
X, y = check_X_y(
X,
y,
coerce_to_numpy=coerce_to_numpy,
coerce_to_pandas=coerce_to_pandas,
enforce_univariate=not allow_multivariate,
)
multithread = self.get_tag("capability:multithreading")
if multithread:
try:
self._threads_to_use = check_n_jobs(self.n_jobs)
except NameError:
raise AttributeError(
"self.n_jobs must be set if capability:multithreading is True"
)
self.classes_ = np.unique(y)
self.n_classes_ = self.classes_.shape[0]
for index, classVal in enumerate(self.classes_):
self._class_dictionary[classVal] = index
self._fit(X, y)
# this should happen last
self._is_fitted = True
return self
def predict(self, X) -> np.ndarray:
"""Predicts labels for sequences in X.
Parameters
----------
X : 2D np.array (univariate, equal length series) of shape = [n_instances,
series_length]
or 3D np.array (any number of dimensions, equal length series) of shape =
[n_instances,n_dimensions,series_length]
or pd.DataFrame with each column a dimension, each cell a pd.Series (any
number of dimensions, equal or unequal length series)
Returns
-------
y : 1D np.array of shape = [n_instances] - predicted class labels
"""
self.check_is_fitted()
coerce_to_numpy = self.get_tag("coerce-X-to-numpy")
coerce_to_pandas = self.get_tag("coerce-X-to-pandas")
allow_multivariate = self.get_tag("capability:multivariate")
X = check_X(
X,
coerce_to_numpy=coerce_to_numpy,
coerce_to_pandas=coerce_to_pandas,
enforce_univariate=not allow_multivariate,
)
return self._predict(X)
def predict_proba(self, X) -> np.ndarray:
"""Predicts labels probabilities for sequences in X.
Parameters
----------
X : 2D np.array (univariate, equal length series) of shape = [n_instances,
series_length]
or 3D np.array (any number of dimensions, equal length series) of shape =
[n_instances,n_dimensions,series_length]
or pd.DataFrame with each column a dimension, each cell a pd.Series (any
number of dimensions, equal or unequal length series)
Returns
-------
y : 2D array of shape = [n_instances, n_classes] - estimated class
probabilities
"""
self.check_is_fitted()
coerce_to_numpy = self.get_tag("coerce-X-to-numpy")
coerce_to_pandas = self.get_tag("coerce-X-to-pandas")
allow_multivariate = self.get_tag("capability:multivariate")
X = check_X(
X,
coerce_to_numpy=coerce_to_numpy,
coerce_to_pandas=coerce_to_pandas,
enforce_univariate=not allow_multivariate,
)
return self._predict_proba(X)
def score(self, X, y) -> float:
"""Scores predicted labels against ground truth labels on X.
Parameters
----------
X : 2D np.array (univariate, equal length series) of shape = [n_instances,
series_length]
or 3D np.array (any number of dimensions, equal length series) of shape =
[n_instances,n_dimensions,series_length]
or pd.DataFrame with each column a dimension, each cell a pd.Series (any
number of dimensions, equal or unequal length series)
y : array-like, shape = [n_instances] - actual class labels
Returns
-------
float, accuracy score of predict(X) vs y
"""
from sklearn.metrics import accuracy_score
return accuracy_score(y, self.predict(X), normalize=True)
def _fit(self, X, y):
"""Fit time series classifier to training data.
Abstract method, must be implemented.
Parameters
----------
X : 3D np.array, array-like or sparse matrix
of shape = [n_instances,n_dimensions,series_length]
or shape = [n_instances,series_length]
or pd.DataFrame with each column a dimension, each cell a pd.Series
y : array-like, shape = [n_instances] - the class labels
Returns
-------
self :
Reference to self.
Notes
-----
Changes state by creating a fitted model that updates attributes
ending in "_" and sets is_fitted flag to True.
"""
raise NotImplementedError(
"_fit is a protected abstract method, it must be implemented."
)
def _predict(self, X) -> np.ndarray:
"""Predicts labels for sequences in X.
Abstract method, must be implemented.
Parameters
----------
X : 3D np.array, array-like or sparse matrix
of shape = [n_instances,n_dimensions,series_length]
or shape = [n_instances,series_length]
or pd.DataFrame with each column a dimension, each cell a pd.Series
Returns
-------
y : array-like, shape = [n_instances] - predicted class labels
"""
raise NotImplementedError(
"_predict is a protected abstract method, it must be implemented."
)
def _predict_proba(self, X) -> np.ndarray:
"""Predicts labels probabilities for sequences in X.
Default behaviour is to call _predict and set the predicted class probability
to 1, other class probabilities to 0. Override if better estimates are
obtainable.
Parameters
----------
X : 3D np.array, array-like or sparse matrix
of shape = [n_instances,n_dimensions,series_length]
or shape = [n_instances,series_length]
or pd.DataFrame with each column a dimension, each cell a pd.Series
Returns
-------
y : array-like, shape = [n_instances, n_classes] - estimated probabilities
of class membership.
"""
dists = np.zeros((X.shape[0], self.n_classes_))
preds = self._predict(X)
for i in range(0, X.shape[0]):
dists[i, self._class_dictionary[preds[i]]] = 1
return dists
|
from os.path import dirname, join
import numpy as np
import pandas as pd
import pytest
from bambi.models import Model, Term
@pytest.fixture(scope="module")
def diabetes_data():
data_dir = join(dirname(__file__), "data")
data = pd.read_csv(join(data_dir, "diabetes.txt"), sep="\t")
data["age_grp"] = 0
data.loc[data["AGE"] > 40, "age_grp"] = 1
data.loc[data["AGE"] > 60, "age_grp"] = 2
return data
@pytest.fixture(scope="module")
def base_model(diabetes_data):
return Model(diabetes_data)
def test_term_init(diabetes_data):
model = Model(diabetes_data)
term = Term("BMI", diabetes_data["BMI"])
# Test that all defaults are properly initialized
assert term.name == "BMI"
assert term.categorical == False
assert not term.random
assert term.levels is not None
assert term.data.shape == (442, 1)
def test_distribute_random_effect_over(diabetes_data):
# Random slopes
model = Model(diabetes_data)
model.add("BP ~ 1")
model.add(random="C(age_grp)|BMI")
model.build(backend="pymc")
assert model.terms["C(age_grp)[T.1]|BMI"].data.shape == (442, 163)
# Nested or crossed random intercepts
model.reset()
model.add("BP ~ 1")
model.add(random="0+C(age_grp)|BMI")
model.build(backend="pymc")
assert model.terms["C(age_grp)[0]|BMI"].data.shape == (442, 163)
# 163 unique levels of BMI in diabetes_data
def test_model_init_from_filename():
from os.path import dirname, join
data_dir = join(dirname(__file__), "data")
filename = join(data_dir, "diabetes.txt")
model = Model(filename)
assert isinstance(model.data, pd.DataFrame)
assert model.data.shape == (442, 11)
assert "BMI" in model.data.columns
def test_model_term_names_property(diabetes_data):
model = Model(diabetes_data)
model.add("BMI ~ age_grp")
model.add("BP")
model.add("S1")
model.build(backend="pymc")
assert model.term_names == ["Intercept", "age_grp", "BP", "S1"]
def test_add_to_model(diabetes_data):
model = Model(diabetes_data)
model.add("BP ~ BMI")
model.build(backend="pymc")
assert isinstance(model.terms["BMI"], Term)
model.add("age_grp")
model.build(backend="pymc")
assert set(model.terms.keys()) == {"Intercept", "BMI", "age_grp"}
# Test that arguments are passed appropriately onto Term initializer
model.add(random="C(age_grp)|BP")
model.build(backend="pymc")
assert isinstance(model.terms["C(age_grp)[T.1]|BP"], Term)
assert "BP[108.0]" in model.terms["C(age_grp)[T.1]|BP"].levels
def test_one_shot_formula_fit(diabetes_data):
model = Model(diabetes_data)
model.fit("S3 ~ S1 + S2", samples=50, run=False)
model.build(backend="pymc3")
nv = model.backend.model.named_vars
targets = ["S3", "S1", "Intercept"]
assert len(set(nv.keys()) & set(targets)) == 3
def test_invalid_chars_in_random_effect(diabetes_data):
model = Model(diabetes_data)
with pytest.raises(ValueError):
model.fit(random=["1+BP|age_grp"])
def test_add_formula_append(diabetes_data):
model = Model(diabetes_data)
model.add("S3 ~ 0")
model.add("S1")
model.build(backend="pymc")
assert hasattr(model, "y") and model.y is not None and model.y.name == "S3"
assert "S1" in model.terms
model.add("S2", append=False)
assert model.y is None
model.add("S3 ~ 0")
model.build(backend="pymc")
assert "S2" in model.terms
assert "S1" not in model.terms
def test_derived_term_search(diabetes_data):
model = Model(diabetes_data)
model.add("BMI ~ 1", random="age_grp|BP", categorical=["age_grp"])
model.build(backend="pymc")
terms = model._match_derived_terms("age_grp|BP")
names = set([t.name for t in terms])
assert names == {"1|BP", "age_grp[T.1]|BP", "age_grp[T.2]|BP"}
term = model._match_derived_terms("1|BP")[0]
assert term.name == "1|BP"
# All of these should find nothing
assert model._match_derived_terms("1|ZZZ") is None
assert model._match_derived_terms("ZZZ|BP") is None
assert model._match_derived_terms("BP") is None
assert model._match_derived_terms("BP") is None
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
from pprint import pprint
from loaddata import load_xlsx
import pyrotein as pr
import numpy as np
# [[[ OBTAIN THE CONSENSUS SEQUENCE ]]]
# Read the sequence alignment result...
# [WARNING] !!!sequence alignment is not trustworthy
fl_aln = 'step3.msa.fasta'
seq_dict = pr.fasta.read(fl_aln)
# Obtain the consensus sequence (super seq)...
tally_dict = pr.fasta.tally_resn_in_seqs(seq_dict)
super_seq = pr.fasta.infer_super_seq(tally_dict)
# [[[ FIND SIZE OF DISTANCE MATRIX ]]]
# Get the sequence index (alignment) on the n-term side...
nseqi = pr.fasta.get_lseqi(super_seq)
cseqi = pr.fasta.get_rseqi(super_seq)
## cseqi = 290
# Define atoms used for distance matrix analysis...
backbone = ["N", "CA", "C", "O"]
len_res = len(backbone)
len_seq = (cseqi - nseqi + 1) * len_res
# [[[ RMSD ANALYSIS ]]]
drc = "pdb"
pdb, chain = "5dgy", "C"
fl_pdb = f"{pdb}.pdb"
pdb_path = os.path.join(drc, fl_pdb)
atoms_pdb = pr.atom.read(pdb_path)
atom_dict = pr.atom.create_lookup_table(atoms_pdb)
chain_dict = atom_dict[chain]
# Obtain seq string for the current chain...
tar_seq = seq_dict[f"{pdb.lower()}_{chain}"]
# Obtain xyzs
entry = f"{pdb}_{chain}"
print(entry)
## xyzs = pr.atom.extract_xyz_by_seq(backbone, chain_dict, tar_seq, nseqi, cseqi)
if True:
# Extract resn to resi mapping (like the sequence viewer on PyMol)...
# Non amino acid residue (like ligand) are bypassed
resn_to_resi_dict = pr.atom.resn_to_resi(chain_dict)
# Select the bound sequence by nseqi and cseqi...
tar_seq_bound = tar_seq[nseqi : cseqi + 1]
tar_seq_bound_continous = tar_seq[nseqi : cseqi + 1].replace('-', '')
# Obtain the original sequence from PDB...
# May have overhead in the beginning or the end
seq_orig = ''.join([ v for v in resn_to_resi_dict.values() ])
# Obtain the starting index by string match...
lb_term = seq_orig.find(tar_seq_bound_continous)
# Obtain the ending index by the length of the coutinous (no '-') sequence...
ub_term = lb_term + len(tar_seq_bound_continous)
# Obtain list of resi bound by nseqi and cseqi...
resi_list = [ v for v in resn_to_resi_dict.keys() ]
resi_bound_list = resi_list[lb_term : ub_term]
# Initialize mapping...
seqi_to_resi_dict = { k : None for k in range(nseqi, cseqi + 1) }
# Counter to go through the bound sequence by nseqi and cseqi...
res_counter = 0
# Loop through
for i, seqi in enumerate(range(nseqi, cseqi + 1)):
# Skip the '-' residue...
if tar_seq_bound[i] == '-': continue
# Access the resi...
resi = resi_bound_list[res_counter]
# Record the mapping...
seqi_to_resi_dict[seqi] = resi
# Increment the residue counter...
res_counter += 1
|
# HSV limit finder
import cv2
import numpy as np
def empty(a): # argument required
pass
def stackImages(scale,imgArray):
rows = len(imgArray)
cols = len(imgArray[0])
rowsAvailable = isinstance(imgArray[0], list)
width = imgArray[0][0].shape[1]
height = imgArray[0][0].shape[0]
if rowsAvailable:
for x in range ( 0, rows):
for y in range(0, cols):
if imgArray[x][y].shape[:2] == imgArray[0][0].shape [:2]:
imgArray[x][y] = cv2.resize(imgArray[x][y], (0, 0), None, scale, scale)
else:
imgArray[x][y] = cv2.resize(imgArray[x][y], (imgArray[0][0].shape[1], imgArray[0][0].shape[0]), None, scale, scale)
if len(imgArray[x][y].shape) == 2: imgArray[x][y]= cv2.cvtColor(imgArray[x][y], cv2.COLOR_GRAY2BGR)
imageBlank = np.zeros((height, width, 3), np.uint8)
hor = [imageBlank]*rows
hor_con = [imageBlank]*rows
for x in range(0, rows):
hor[x] = np.hstack(imgArray[x])
ver = np.vstack(hor)
else:
for x in range(0, rows):
if imgArray[x].shape[:2] == imgArray[0].shape[:2]:
imgArray[x] = cv2.resize(imgArray[x], (0, 0), None, scale, scale)
else:
imgArray[x] = cv2.resize(imgArray[x], (imgArray[0].shape[1], imgArray[0].shape[0]), None,scale, scale)
if len(imgArray[x].shape) == 2: imgArray[x] = cv2.cvtColor(imgArray[x], cv2.COLOR_GRAY2BGR)
hor= np.hstack(imgArray)
ver = hor
return ver
# cap = cv2.VideoCapture(0) # 0 - default webcam
# cap.set(3, 640) # width
# cap.set(4, 480) # height
# cap.set(10, 100) # brightness
cv2.namedWindow('Trackbars') # Creating trackbars to isolate required color
cv2.resizeWindow('Trackbars', 640, 240)
# cv2.createTrackbar('H minimum', 'Trackbars', 0, 179, empty) # 180 hues available in opencv (lower and upper limits for trackbars), empty is a function called each time the trackbar is changed
# cv2.createTrackbar('H maximum', 'Trackbars', 179, 179, empty) # initial trackbars for color detection and limit identification
# cv2.createTrackbar('S minimum', 'Trackbars', 0, 255, empty)
# cv2.createTrackbar('S maximum', 'Trackbars', 255, 255, empty)
# cv2.createTrackbar('V minimum', 'Trackbars', 0, 255, empty)
# cv2.createTrackbar('V maximum', 'Trackbars', 255, 255, empty)
cv2.createTrackbar('H minimum', 'Trackbars', 0, 255, empty) # trackbars for specific colour
cv2.createTrackbar('H maximum', 'Trackbars', 179, 179, empty)
cv2.createTrackbar('S minimum', 'Trackbars', 0, 255, empty)
cv2.createTrackbar('S maximum', 'Trackbars', 255, 255, empty)
cv2.createTrackbar('V minimum', 'Trackbars', 0, 255, empty)
cv2.createTrackbar('V maximum', 'Trackbars', 255, 255, empty)
while True:
# success, img = cap.read() # <successful execution (boolean)>, <image variable>
img = cv2.imread('doge.png')
imgHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) # conversion to HSV from BGR
hMin = cv2.getTrackbarPos('H minimum', 'Trackbars')
hMax = cv2.getTrackbarPos('H maximum', 'Trackbars')
sMin = cv2.getTrackbarPos('S maximum', 'Trackbars')
sMax = cv2.getTrackbarPos('S maximum', 'Trackbars')
vMin = cv2.getTrackbarPos('V minimum', 'Trackbars')
vMax = cv2.getTrackbarPos('V maximum', 'Trackbars')
# print(hMin, hMax, sMin, sMax, vMin, vMax)
lower = np.array([hMin, sMin, vMin]) # minimum range array
upper = np.array([hMax, sMax, vMax]) # maximum range array
mask = cv2.inRange(imgHSV, lower, upper) # filtering out colours from HSV image
imgResult = cv2.bitwise_and(img, img,mask=mask) # adds two images and creates a new one where non black colours on the mask are given colour from the original
imgStacked = stackImages(0.5, ([img, mask, imgResult]))
cv2.imshow('Test window', imgStacked)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
print()
print('Required values : ')
print('hMin, sMin, vMin, hMax, sMax, vMax = ', hMin, ',', sMin, ',', vMin, ',', hMax, ',', sMax, ',', vMax)
|
<reponame>Cladett/imitation<filename>tests/test_tabular.py
"""Test tabular environments and tabular MCE IRL."""
import gym
import jax.experimental.optimizers as jaxopt
import numpy as np
import pytest
from imitation.algorithms.tabular_irl import (
LinearRewardModel,
MLPRewardModel,
mce_irl,
mce_occupancy_measures,
mce_partition_fh,
)
from imitation.envs.examples.model_envs import RandomMDP
from imitation.envs.resettable_env import TabularModelEnv
def rollouts(env, n=10, seed=None):
rv = []
for i in range(n):
done = False
if seed is not None:
# if a seed is given, then we use the same seed each time (should
# give same trajectory each time)
env.seed(seed)
env.action_space.seed(seed)
obs = env.reset()
traj = [obs]
while not done:
act = env.action_space.sample()
obs, rew, done, info = env.step(act)
traj.append((obs, rew))
rv.append(traj)
return rv
def test_random_mdp():
for i in range(3):
n_states = 4 * (i + 3)
n_actions = i + 2
branch_factor = i + 1
if branch_factor == 1:
# make sure we have enough actions to get reasonable trajectories
n_actions = min(n_states, max(n_actions, 4))
horizon = 5 * (i + 1)
random_obs = (i % 2) == 0
obs_dim = (i * 3 + 4) ** 2 + i
mdp = RandomMDP(
n_states=n_states,
n_actions=n_actions,
branch_factor=branch_factor,
horizon=horizon,
random_obs=random_obs,
obs_dim=obs_dim if random_obs else None,
generator_seed=i,
)
# sanity checks on sizes of things
assert mdp.transition_matrix.shape == (n_states, n_actions, n_states)
assert np.allclose(1, np.sum(mdp.transition_matrix, axis=-1))
assert np.all(mdp.transition_matrix >= 0)
assert (
mdp.observation_matrix.shape[0] == n_states
and mdp.observation_matrix.ndim == 2
)
assert mdp.reward_matrix.shape == (n_states,)
assert mdp.horizon == horizon
assert np.all(mdp.initial_state_dist >= 0)
assert np.allclose(1, np.sum(mdp.initial_state_dist))
assert np.sum(mdp.initial_state_dist > 0) == branch_factor
# make sure trajectories aren't all the same if we don't specify same
# seed each time
trajectories = rollouts(mdp, 100)
assert len(set(map(str, trajectories))) > 1
trajectories = rollouts(mdp, 100, seed=42)
# make sure trajectories ARE all the same if we do specify the same
# seed each time
assert len(set(map(str, trajectories))) == 1
def test_policy_om_random_mdp():
"""Test that optimal policy occupancy measure ("om") for a random MDP is sane."""
mdp = gym.make("imitation/Random-v0")
V, Q, pi = mce_partition_fh(mdp)
assert np.all(np.isfinite(V))
assert np.all(np.isfinite(Q))
assert np.all(np.isfinite(pi))
# Check it is a probability distribution along the last axis
assert np.all(pi >= 0)
assert np.allclose(np.sum(pi, axis=-1), 1)
Dt, D = mce_occupancy_measures(mdp, pi=pi)
assert np.all(np.isfinite(D))
assert np.any(D > 0)
# expected number of state visits (over all states) should be equal to the
# horizon
assert np.allclose(np.sum(D), mdp.horizon)
class ReasonableMDP(TabularModelEnv):
observation_matrix = np.array(
[
[3, -5, -1, -1, -4, 5, 3, 0],
# state 1 (top)
[4, -4, 2, 2, -4, -1, -2, -2],
# state 2 (bottom, equiv to top)
[3, -1, 5, -1, 0, 2, -5, 2],
# state 3 (middle, very low reward and so dominated by others)
[-5, -1, 4, 1, 4, 1, 5, 3],
# state 4 (final, all self loops, good reward)
[2, -5, 1, -5, 1, 4, 4, -3],
]
)
transition_matrix = np.array(
[
# transitions out of state 0
[
# action 0: goes to state 1 (sometimes 2)
[0, 0.9, 0.1, 0, 0],
# action 1: goes to state 3 deterministically
[0, 0, 0, 1, 0],
# action 2: goes to state 2 (sometimes 2)
[0, 0.1, 0.9, 0, 0],
],
# transitions out of state 1
[
# action 0: goes to state 3 or 4 (sub-optimal)
[0, 0, 0, 0.05, 0.95],
# action 1: goes to state 3 (bad)
[0, 0, 0, 1, 0],
# action 2: goes to state 4 (good!)
[0, 0, 0, 0, 1],
],
# transitions out of state 2 (basically the same)
[
# action 0: goes to state 3 or 4 (sub-optimal)
[0, 0, 0, 0.05, 0.95],
# action 1: goes to state 3 (bad)
[0, 0, 0, 1, 0],
# action 2: goes to state 4 (good!)
[0, 0, 0, 0, 1],
],
# transitions out of state 3 (all go to state 4)
[
# action 0
[0, 0, 0, 0, 1],
# action 1
[0, 0, 0, 0, 1],
# action 2
[0, 0, 0, 0, 1],
],
# transitions out of state 4 (all go back to state 0)
[
# action 0
[1, 0, 0, 0, 0],
# action 1
[1, 0, 0, 0, 0],
# action 2
[1, 0, 0, 0, 0],
],
]
)
reward_matrix = np.array(
[
# state 0 (okay reward, but we can't go back so it doesn't matter)
1,
# states 1 & 2 have same (okay) reward
2,
2,
# state 3 has very negative reward (so avoid it!)
-20,
# state 4 has pretty good reward (good enough that we should move out
# of 1 & 2)
3,
]
)
# always start in s0 or s4
initial_state_dist = [0.5, 0, 0, 0, 0.5]
horizon = 20
def test_policy_om_reasonable_mdp():
# MDP described above
mdp = ReasonableMDP()
# get policy etc. for our MDP
V, Q, pi = mce_partition_fh(mdp)
Dt, D = mce_occupancy_measures(mdp, pi=pi)
assert np.all(np.isfinite(V))
assert np.all(np.isfinite(Q))
assert np.all(np.isfinite(pi))
assert np.all(np.isfinite(Dt))
assert np.all(np.isfinite(D))
# check that actions 0 & 2 (which go to states 1 & 2) are roughly equal
assert np.allclose(pi[:19, 0, 0], pi[:19, 0, 2])
# also check that they're by far preferred to action 1 (that goes to state
# 3, which has poor reward)
assert np.all(pi[:19, 0, 0] > 2 * pi[:19, 0, 1])
# make sure that states 3 & 4 have roughly uniform policies
pi_34 = pi[:5, 3:5]
assert np.allclose(pi_34, np.ones_like(pi_34) / 3.0)
# check that states 1 & 2 have similar policies to each other
assert np.allclose(pi[:19, 1, :], pi[:19, 2, :])
# check that in state 1, action 2 (which goes to state 4 with certainty) is
# better than action 0 (which only gets there with some probability), and
# that both are better than action 1 (which always goes to the bad state).
assert np.all(pi[:19, 1, 2] > pi[:19, 1, 0])
assert np.all(pi[:19, 1, 0] > pi[:19, 1, 1])
# check that Dt[0] matches our initial state dist
assert np.allclose(Dt[0], mdp.initial_state_dist)
@pytest.mark.expensive
@pytest.mark.parametrize(
"model_class,model_kwargs",
[(LinearRewardModel, dict()), (MLPRewardModel, dict(hiddens=[32, 32]))],
)
def test_mce_irl_reasonable_mdp(model_class, model_kwargs):
# test MCE IRL on the MDP
mdp = ReasonableMDP()
# demo occupancy measure
V, Q, pi = mce_partition_fh(mdp)
Dt, D = mce_occupancy_measures(mdp, pi=pi)
rmodel = model_class(mdp.obs_dim, seed=13, **model_kwargs)
opt = jaxopt.adam(1e-2)
final_weights, final_counts = mce_irl(mdp, opt, rmodel, D, linf_eps=1e-3)
assert np.allclose(final_counts, D, atol=1e-3, rtol=1e-3)
# make sure weights have non-insane norm
assert np.linalg.norm(final_weights) < 1000
|
from requests import Response
from test import TestAPI
from test.test_api.constants import ACTION, Actions, NAME, NEW_NAME, SEG_NAME, Types, TYPE, ErrorMsg
class Rename(TestAPI):
def setUp(self):
self.rename_body = {
ACTION: Actions.RENAME,
NAME: TestAPI.NAME,
NEW_NAME: str()
}
self.rename_response = {
"id": str(),
NAME: str(),
SEG_NAME: TestAPI.SEG_NAME,
TYPE: Types.INPUT_HEADER
}
response = self.post(f"/api/test_data", json={ACTION: Actions.CREATE, NAME: TestAPI.NAME,
SEG_NAME: TestAPI.SEG_NAME})
self.rename_response["id"] = response.json()["id"]
self.cleanup = [TestAPI.NAME]
def _rename_test_data(self, new_name: str) -> Response:
self.cleanup.append(new_name)
self.rename_body[NEW_NAME] = new_name
self.rename_response[NAME] = new_name
response = self.post(f"/api/test_data", json=self.rename_body)
return response
def tearDown(self):
for name in self.cleanup:
self.delete(f"/api/test_data", params={NAME: name})
def test_rename(self):
response = self._rename_test_data(f"{TestAPI.NAME} - 2")
self.assertEqual(200, response.status_code)
self.assertDictEqual(self.rename_response, response.json())
def test_basic_errors(self):
# Test no name and and no new_name
error_response = {
NAME: ErrorMsg.NOT_EMPTY,
NEW_NAME: ErrorMsg.NOT_EMPTY
}
response = self.post("/api/test_data", json={ACTION: Actions.RENAME})
self.assertEqual(400, response.status_code)
self.assertDictEqual(error_response, response.json())
# Test empty name and new_name
error_response = {
NAME: ErrorMsg.NOT_EMPTY,
NEW_NAME: ErrorMsg.NOT_EMPTY
}
self.rename_body[NAME] = " "
self.rename_body[NEW_NAME] = " "
response = self.post("/api/test_data", json=self.rename_body)
self.assertEqual(400, response.status_code)
self.assertDictEqual(error_response, response.json())
# Test name not found and new_name not unique
error_response = {
NAME: ErrorMsg.NOT_FOUND,
NEW_NAME: ErrorMsg.UNIQUE
}
self.rename_body[NAME] = "some invalid name"
self.rename_body[NEW_NAME] = TestAPI.NAME
response = self.post("/api/test_data", json=self.rename_body)
self.assertEqual(400, response.status_code)
self.assertDictEqual(error_response, response.json())
def test_long_names(self):
# Test invalid name
error_response = {
NEW_NAME: ErrorMsg.LESS_100
}
self.rename_body[NEW_NAME] = self.NAME_101
response = self.post(f"/api/test_data", json=self.rename_body)
self.assertEqual(400, response.status_code)
self.assertDictEqual(error_response, response.json())
# Test valid longest name
response = self._rename_test_data(self.NAME_100)
self.assertEqual(200, response.status_code)
self.assertDictEqual(self.rename_response, response.json())
class Copy(TestAPI):
def setUp(self):
self.copy_body = {
ACTION: Actions.COPY,
NAME: TestAPI.NAME,
NEW_NAME: str()
}
self.copy_response = [{
"id": str(),
NAME: str(),
SEG_NAME: TestAPI.SEG_NAME,
TYPE: Types.INPUT_HEADER
}]
self.cleanup = [TestAPI.NAME]
self.post(f"/api/test_data", json={ACTION: Actions.CREATE, NAME: TestAPI.NAME, SEG_NAME: TestAPI.SEG_NAME})
def _copy_test_data(self, new_name: str) -> Response:
self.cleanup.append(new_name)
self.copy_body[NEW_NAME] = new_name
response = self.post(f"/api/test_data", json=self.copy_body)
self.copy_response[0][NAME] = new_name
self.copy_response[0]["id"] = response.json()[0]["id"]
return response
def tearDown(self):
for name in self.cleanup:
self.delete(f"/api/test_data", params={NAME: name})
def test_copy(self):
response = self._copy_test_data(f"{TestAPI.NAME} - 2")
self.assertEqual(200, response.status_code)
self.assertListEqual(self.copy_response, response.json())
def test_basic_errors(self):
# Test no name and and no new_name
error_response = {
NAME: ErrorMsg.NOT_EMPTY,
NEW_NAME: ErrorMsg.NOT_EMPTY
}
response = self.post("/api/test_data", json={ACTION: Actions.COPY})
self.assertEqual(400, response.status_code)
self.assertDictEqual(error_response, response.json())
# Test empty name and new_name
error_response = {
NAME: ErrorMsg.NOT_EMPTY,
NEW_NAME: ErrorMsg.NOT_EMPTY
}
self.copy_body[NAME] = " "
self.copy_body[NEW_NAME] = " "
response = self.post("/api/test_data", json=self.copy_body)
self.assertEqual(400, response.status_code)
self.assertDictEqual(error_response, response.json())
# Test name not found and new_name not unique
error_response = {
NAME: ErrorMsg.NOT_FOUND,
NEW_NAME: ErrorMsg.UNIQUE
}
self.copy_body[NAME] = "some invalid name"
self.copy_body[NEW_NAME] = TestAPI.NAME
response = self.post("/api/test_data", json=self.copy_body)
self.assertEqual(400, response.status_code)
self.assertDictEqual(error_response, response.json())
def test_long_names(self):
# Test invalid name
error_response = {
NEW_NAME: ErrorMsg.LESS_100
}
self.copy_body[NEW_NAME] = self.NAME_101
response = self.post(f"/api/test_data", json=self.copy_body)
self.assertEqual(400, response.status_code)
self.assertDictEqual(error_response, response.json())
# Test valid longest name
response = self._copy_test_data(self.NAME_100)
self.assertEqual(200, response.status_code)
self.assertListEqual(self.copy_response, response.json())
|
import math
from algorithm import heft_time_reservation,heft,greedy,lbck,greedy_time_reservation,get_algorithm_timelist,set_paramerters,greedy_nlook_back,greedy_time_reservation_nlook_back,heft_n_look_back,NSGA_n_look_back
from Dataset import get_connect_task_graph,generate_randomtimeline,get_connect_multiple_task_graph,default_timewindow,lookahead_window_size
import copy
# 'Global View Greedy-Reservation','Global View HEFT','Global View Greedy',"NSGA",
# "Partial View Greedy-Reservation-Improved",'Partial View HEFT-Improved','Partial View Greedy-Improved'
ALGOR_NAME_LIST = [
"Partial View Greedy-Reservation",'Partial View HEFT','Partial View Greedy',"Partial View NSGA"
]
#
#--------------Configure-------------
# DNN FLOP:0 Google :1
defalut_tasktype = [0,0,0]
defalut_inter_num = 1
defalut_edge_nums = 4
defalut_max_edge_nums = 10
defalut_delta = 0.01
defalut_avaratio = 0.75
defalut_sigma = 0.05
defalut_start_ratio = 0.2
defalut_start_sigma = 0.05
defalut_max_cpu_capbility = 305
defalut_request_number = [200,200,200]
# --------------Metrix---------------
# get the total completion time
def get_max_time(anslist):
max_time = -1
if anslist == 2 or anslist == 3:
return max_time
for tmp in anslist:
tmp.sort(key=lambda x: x[2], reverse=True)
if len(tmp) != 0:
max_time = max(max_time, tmp[0][1])
return max_time
# get the throughput
def get_throught_ratio(anslist,deadline):
max_time = 0
if anslist == 3 or anslist == 2:
return 0
for tmp in anslist:
tmp.sort(key=lambda x: x[2], reverse=True)
if len(tmp) != 0:
max_time = max(max_time, tmp[0][2])
if deadline > max_time:
return 1
else:
return 0
# return max_time
# get algorithm running time
def get_run_time():
import time
# starttime = time.clock()
# heft_time_reservation()
# endtime = time.clock()
# heft_time_reservation_time = endtime - starttime
# starttime = time.clock()
# greedy_time_reservation()
# endtime = time.clock()
# greedy_time_reservation_time = endtime - starttime
# starttime = time.clock()
# heft()
# endtime = time.clock()
# heft_time = endtime - starttime
# starttime = time.clock()
# lbck()
# endtime = time.clock()
# lbck_time = endtime - starttime
# starttime = time.clock()
# greedy()
# endtime = time.clock()
# greedy_time = endtime - starttime
starttime = time.clock()
greedy_time_reservation_nlook_back(lookahead_window_size)
endtime = time.clock()
greedy_time_reservation_nlook_back_time = endtime - starttime
starttime = time.clock()
heft_n_look_back(lookahead_window_size)
endtime = time.clock()
heft_n_look_back_time = endtime - starttime
starttime = time.clock()
greedy_nlook_back(lookahead_window_size)
endtime = time.clock()
greedy_nlook_back_time = endtime - starttime
starttime = time.clock()
NSGA_n_look_back(lookahead_window_size)
endtime = time.clock()
NSGA_nlook_back_time = endtime - starttime
return [greedy_time_reservation_nlook_back_time,heft_n_look_back_time,greedy_nlook_back_time,NSGA_nlook_back_time]
def result_ratiocal(result_dict_list,inter_num):
avg_dict = {}
rangelen = len(result_dict_list[0][ALGOR_NAME_LIST[0]])
for i in range(len(ALGOR_NAME_LIST)):
avg_dict[ALGOR_NAME_LIST[i]] = [0 for j in range(rangelen)]
# avg_time_dict = time_dict_list[0]
for i in range(len(result_dict_list)):
for key in avg_dict.keys():
for j in range(len(avg_dict[key])):
avg_dict[key][j] += result_dict_list[i][key][j][0]
for key in avg_dict.keys():
for j in range(len(avg_dict[key])):
avg_dict[key][j] /= inter_num
return avg_dict
def result_timecal(result_dict_list,inter_num):
avg_dict = {}
tmp_dict = {}
rangelen = len(result_dict_list[0][ALGOR_NAME_LIST[0]])
for i in range(len(ALGOR_NAME_LIST)):
tmp_dict[ALGOR_NAME_LIST[i]] = [[] for j in range(rangelen)]
# avg_time_dict = time_dict_list[0]
for i in range(len(result_dict_list)):
for key in tmp_dict.keys():
for j in range(len(result_dict_list[i][key])):
if result_dict_list[i][key][j][0] != -1:
tmp_dict[key][j].append(result_dict_list[i][key][j][0])
for i in range(len(ALGOR_NAME_LIST)):
avg_dict[ALGOR_NAME_LIST[i]] = [0 for j in range(len(tmp_dict[ALGOR_NAME_LIST[i]]))]
for i in range(len(ALGOR_NAME_LIST)):
for j in range(len(tmp_dict[ALGOR_NAME_LIST[i]])):
avg_dict[ALGOR_NAME_LIST[i]][j] = sum(tmp_dict[ALGOR_NAME_LIST[i]][j])
for key in avg_dict.keys():
for j in range(len(avg_dict[key])):
if len(tmp_dict[key][j]) != 0:
avg_dict[key][j] /= len(tmp_dict[key][j])
else:
avg_dict[key][j] = -1
return avg_dict
# --------------Metrix---------------
def taskgraph_exp(data_prefix, taskgraph,**kwargs):
# from code02 import set_paramerters,get_time_list
import pandas as pd
avatimelist = []
avatime_ratio = defalut_avaratio
edge_computer_cability = []
resouce_upbound = []
time_list = [[] for i in range(len(ALGOR_NAME_LIST))]
ratio_list = [[] for i in range(len(ALGOR_NAME_LIST))]
request_number = defalut_request_number
# n_look = 30
# file_prefix = 'exp1_edge_num_change'
max_edge_num = defalut_max_edge_nums
edge_nums = defalut_edge_nums
max_cpu_capbility = defalut_max_cpu_capbility
delta = defalut_delta
mu = defalut_avaratio
ratio_sigma = defalut_sigma
window_size = default_timewindow
start_ratio = defalut_start_ratio
start_sigma = defalut_start_sigma
change_edge_num = True
if "ava_ratio" in kwargs:
avatime_ratio = kwargs['ava_ratio']
if 'max_edge_num' in kwargs:
max_edge_num = kwargs['max_edge_num']
if 'change_edge_num' in kwargs:
change_edge_num = kwargs['change_edge_num']
if 'max_cpu_capbility' in kwargs:
max_cpu_capbility = kwargs['max_cpu_capbility']
if 'decision_time_list' in kwargs:
decision_time_list = kwargs['decision_time_list']
if 'delta' in kwargs:
delta = kwargs['delta']
if 'mu' in kwargs:
mu = kwargs['mu']
if 'ratio_sigma' in kwargs:
ratio_sigma = kwargs['ratio_sigma']
if 'request_number' in kwargs:
request_number = kwargs['request_number']
if 'start_ratio' in kwargs:
start_ratio = kwargs['start_ratio']
if 'start_sigma' in kwargs:
start_sigma = kwargs['start_sigma']
if 'window_size' in kwargs:
window_size = kwargs['window_size']
task_info = None
if "task_info" in kwargs:
task_info = kwargs['task_info']
pre,succ,workload,datasize,taskindex2order_map,order2taskindex_map,order2subtaskindex_map = task_info
# set_paramerters()
if change_edge_num:
# edge_num = 3
decision_time_list = []
avatimelist = []
new_decision_time_list,new_avatimelist = generate_randomtimeline(num_edges=max_edge_num,
start_ratio=start_ratio,start_sigma=start_sigma,ava_ratio=avatime_ratio,ratio_sigma=ratio_sigma)
for edge_num in range(3, max_edge_num):
edge_num_time_list = []
# reset ava_time_list
decision_time_list = copy.deepcopy(new_decision_time_list[:edge_num])
avatimelist = copy.deepcopy(new_avatimelist[:edge_num])
# reset random time
random_time = [[delta for i in range(len(workload))] for i in range(edge_num)]
# reset W
W = [[12.5 for i in range(edge_num)] for i in range(edge_num)]
# reset edge_computer_capblity
edge_computer_cability = [max_cpu_capbility for i in range(edge_num)]
# reset resouce upbound
resouce_upbound = []
for tmpava_bydevice in avatimelist:
tmpsum = 0
for tmpinterval in tmpava_bydevice:
tmplen = tmpinterval[1] - tmpinterval[0]
tmpsum = tmpsum + tmplen
resouce_upbound.append(tmpsum)
set_paramerters(workload=workload, datasize=datasize, pre=pre, succ=succ, num_edges=edge_num, ava_time_list=avatimelist, random_time=random_time, bandwidth_edge=W,
taskindex2order_map=taskindex2order_map,order2taskindex_map=order2taskindex_map,order2subtaskindex_map=order2subtaskindex_map,window_size=window_size,
edge_computer_capability=edge_computer_cability, resouce_upbound=resouce_upbound,decision_time_list=decision_time_list)
edge_num_time_list += get_algorithm_timelist()
for i in range(len(edge_num_time_list)):
# ratio_list[i].append([get_throught_ratio(edge_num_time_list[i],deadline=defalut_deadline)])
time_list[i].append([get_max_time(edge_num_time_list[i])])
else:
edge_num = edge_nums
edge_num_time_list = []
# reset ava_time_list
if 'avatimelist' in kwargs:
avatimelist = kwargs['avatimelist']
if 'decision_time_list' in kwargs:
decision_time_list = kwargs['decision_time_list']
# else:
# avatimelist= [generate_ava_time_and_unava_time(avatime_radio, 20, 300) for i in range(edge_num)]
# reset random time
random_time = [[delta for i in range(len(workload))] for i in range(edge_num)]
# reset W
W = [[12.5 for i in range(edge_num)] for i in range(edge_num)]
# reset edge_computer_capblity
edge_computer_cability = [max_cpu_capbility for i in range(edge_num)]
# reset resouce upbound
resouce_upbound = []
for tmpava_bydevice in avatimelist:
tmpsum = 0
for tmpinterval in tmpava_bydevice:
tmplen = tmpinterval[1] - tmpinterval[0]
tmpsum = tmpsum + tmplen
resouce_upbound.append(tmpsum)
set_paramerters(workload=workload, datasize=datasize, pre=pre, succ=succ, num_edges=edge_num,window_size=window_size,
ava_time_list=avatimelist, random_time=random_time, bandwidth_edge=W,decision_time_list=decision_time_list,
taskindex2order_map=taskindex2order_map,order2taskindex_map=order2taskindex_map,order2subtaskindex_map=order2subtaskindex_map,
edge_computer_capability=edge_computer_cability, resouce_upbound=resouce_upbound)
# tmptimelist = get_time_list()
edge_num_time_list += get_algorithm_timelist()
for i in range(len(edge_num_time_list)):
# ratio_list[i].append(get_throught_ratio(edge_num_time_list[i],deadline=defalut_deadline))
time_list[i].append(get_max_time(edge_num_time_list[i]))
time_dict = {}
for i in range(len(time_list)):
time_dict[ALGOR_NAME_LIST[i]] = time_list[i]
# ratio_dict = {}
# for i in range(len(ratio_list)):
# ratio_dict[ALGOR_NAME_LIST[i]] = ratio_list[i]
return time_dict
def taskgraph_exp_runtime(data_prefix, taskgraph,**kwargs):
'''
running time exp
* edge_num
* ava_time_list
* random_time
* W
* edge_computer_capbility
* resource_upbound
:return:
'''
# from code02 import set_paramerters,get_time_list
import pandas as pd
avatimelist = []
avatime_ratio = defalut_avaratio
sigma = defalut_sigma
edge_computer_cability = []
resouce_upbound = []
runtime_list = [[] for i in range(len(ALGOR_NAME_LIST))]
max_edge_num = defalut_max_edge_nums
edge_nums = defalut_edge_nums
max_cpu_capbility = defalut_max_cpu_capbility
delta = defalut_delta
window_size = default_timewindow
start_ratio = defalut_start_ratio
start_sigma= defalut_start_sigma
request_number = defalut_request_number
change_edge_num = True
# set big task graph paramerters
pre,succ,workload,datasize,taskindex2order_map,order2taskindex_map,order2subtaskindex_map = get_connect_multiple_task_graph(request_number,taskgraph,tasktype=defalut_tasktype)
# if 'n_look' in kwargs:
# n_look = kwargs['n_look']
if 'max_edge_num' in kwargs:
max_edge_num = kwargs['max_edge_num']
if 'change_edge_num' in kwargs:
change_edge_num = kwargs['change_edge_num']
if 'max_cpu_capbility' in kwargs:
max_cpu_capbility = kwargs['max_cpu_capbility']
if 'decision_time_list' in kwargs:
decision_time_list = kwargs['decision_time_list']
if 'delta' in kwargs:
delta = kwargs['delta']
if 'sigma' in kwargs:
sigma = kwargs['sigma']
if 'window_size' in kwargs:
window_size = kwargs['window_size']
task_info = None
if "task_info" in kwargs:
task_info = kwargs['task_info']
pre,succ,workload,datasize,taskindex2order_map,order2taskindex_map,order2subtaskindex_map = task_info
# set_paramerters()
# edge_num = 3
decision_time_list = []
avatimelist = []
new_decision_time_list,new_avatimelist = generate_randomtimeline(num_edges=max_edge_num,
start_ratio=start_ratio,start_sigma=start_sigma,ava_ratio=avatime_ratio,ratio_sigma=sigma)
for edge_num in range(3, max_edge_num):
edge_num_time_list = []
# reset ava_time_list
decision_time_list = copy.deepcopy(new_decision_time_list[:edge_num])
avatimelist = copy.deepcopy(new_avatimelist[:edge_num])
# reset random time
random_time = [[delta for i in range(len(workload))] for i in range(edge_num)]
# reset W
W = [[100 for i in range(edge_num)] for i in range(edge_num)]
# reset edge_computer_capblity
edge_computer_cability = [max_cpu_capbility for i in range(edge_num)]
# reset resouce upbound
resouce_upbound = []
for tmpava_bydevice in avatimelist:
tmpsum = 0
for tmpinterval in tmpava_bydevice:
tmplen = tmpinterval[1] - tmpinterval[0]
tmpsum = tmpsum + tmplen
resouce_upbound.append(tmpsum)
set_paramerters(workload=workload, datasize=datasize, pre=pre, succ=succ, num_edges=edge_num,window_size=window_size,
ava_time_list=avatimelist, random_time=random_time, bandwidth_edge=W,decision_time_list=decision_time_list,
taskindex2order_map=taskindex2order_map,order2taskindex_map=order2taskindex_map,order2subtaskindex_map=order2subtaskindex_map,
edge_computer_capability=edge_computer_cability, resouce_upbound=resouce_upbound)
# tmptimelist = get_time_list()
edge_num_time_list += get_run_time()
for i in range(len(edge_num_time_list)):
runtime_list[i].append([edge_num_time_list[i]])
runtime_dict = {}
for i in range(len(runtime_list)):
runtime_dict[ALGOR_NAME_LIST[i]] = runtime_list[i]
return runtime_dict
# with the processing capacity of processor
def exp_2_graph(taskgraphtype, expprefix):
import pandas as pd
from tqdm import tqdm
time_dict_list = []
# ratio_dict_list = []
inter_num = defalut_inter_num
new_max_cpu_capbility = 300
# set big task graph paramerters
task_info = get_connect_multiple_task_graph(defalut_request_number,taskgraphtype,tasktype=defalut_tasktype)
for i in tqdm(range(inter_num)):
tmptimedict = {}
# tmpratiodict = {}
for tmpalgorname in ALGOR_NAME_LIST:
tmptimedict[tmpalgorname] = []
# for tmpalgorname in ALGOR_NAME_LIST:
# tmpratiodict[tmpalgorname] = []
decision_time_list,avatimelist = generate_randomtimeline(num_edges=defalut_edge_nums,
start_ratio=defalut_start_ratio,start_sigma=defalut_start_sigma,
ava_ratio=defalut_avaratio,ratio_sigma=defalut_sigma)
for max_cpu_capbility in range(new_max_cpu_capbility, 600,30):
# avatimelist = [generate_ava_time_by_jieduan(0.5, 20, 400, mu=5, sigma=5) for k in range(5)]
# print("avatimelist:")
# print(avatimelist)
# print("decision_time_list:")
# print(decision_time_list)
time_dict = taskgraph_exp("graph_iteration_{0}_{1}".format(i + 1, expprefix), taskgraphtype,
task_info = task_info,
max_edge_num=defalut_edge_nums,
avatimelist=avatimelist,
decision_time_list=decision_time_list,
max_cpu_capbility=max_cpu_capbility,
change_edge_num=False)
for tmpalgorname in ALGOR_NAME_LIST:
tmptimedict[tmpalgorname].append(time_dict[tmpalgorname])
# tmpratiodict[tmpalgorname].append(ratio_dict[tmpalgorname])
time_dict_list.append(tmptimedict)
# ratio_dict_list.append(tmpratiodict)
avg_time_dict = result_timecal(time_dict_list,inter_num=inter_num)
# avg_ratio_dict = result_ratiocal(ratio_dict_list,inter_num=inter_num)
# print(avg_time_dict)
df = pd.DataFrame(data=avg_time_dict)
df.to_csv("{0}_{1}_{2}_{3}.csv".format('graph_iteration',str(inter_num),'max_time' ,expprefix), index=True)
# df = pd.DataFrame(data=avg_ratio_dict)
# df.to_csv("{0}_{1}_{2}_{3}.csv".format('graph_iteration',str(inter_num),'ratio' , expprefix), index=True)
# with the extra delay
def exp_3_graph(taskgraphtype, expprefix):
import pandas as pd
from tqdm import tqdm
time_dict_list = []
# ratio_dict_list = []
inter_num = defalut_inter_num
# avatime_radio = 0.5
# simga = 0.1
new_delta = defalut_delta
task_info = get_connect_multiple_task_graph(defalut_request_number,taskgraphtype,tasktype=defalut_tasktype)
for i in tqdm(range(inter_num)):
tmptimedict = {}
# tmpratiodict = {}
for tmpalgorname in ALGOR_NAME_LIST:
tmptimedict[tmpalgorname] = []
# for tmpalgorname in ALGOR_NAME_LIST:
# tmpratiodict[tmpalgorname] = []
decision_time_list,avatimelist = generate_randomtimeline(num_edges=defalut_edge_nums,
start_ratio=defalut_start_ratio,start_sigma=defalut_start_sigma,
ava_ratio=defalut_avaratio,ratio_sigma=defalut_sigma)
for t in range(0, 10):
delta = new_delta + new_delta * t
# avatimelist = [generate_ava_time_by_jieduan(0.5, 20, 400, mu=5, sigma=5) for k in range(5)]
# print("avatimelist:")
# print(avatimelist)
# print("decision_time_list:")
# print(decision_time_list)
time_dict = taskgraph_exp("graph_iteration_{0}_{1}".format(i + 1, expprefix), taskgraphtype,task_info=task_info,
max_edge_num=defalut_edge_nums,
avatimelist=avatimelist,
decision_time_list=decision_time_list,
delta=delta,
change_edge_num=False)
for tmpalgorname in ALGOR_NAME_LIST:
tmptimedict[tmpalgorname].append(time_dict[tmpalgorname])
# tmpratiodict[tmpalgorname].append(ratio_dict[tmpalgorname])
time_dict_list.append(tmptimedict)
# ratio_dict_list.append(tmpratiodict)
avg_time_dict = result_timecal(time_dict_list,inter_num=inter_num)
# avg_ratio_dict = result_ratiocal(ratio_dict_list,inter_num=inter_num)
# print(avg_time_dict)
df = pd.DataFrame(data=avg_time_dict)
df.to_csv("{0}_{1}_{2}_{3}.csv".format('graph_iteration',str(inter_num),'max_time' ,expprefix), index=True)
# df = pd.DataFrame(data=avg_ratio_dict)
# df.to_csv("{0}_{1}_{2}_{3}.csv".format('graph_iteration',str(inter_num),'ratio' , expprefix), index=True)
# with the number of edge servers
def exp_1_graph(taskgraphtype, expprefix):
import pandas as pd
from tqdm import tqdm
time_dict_list = []
# ratio_dict_list = []
# avatime_radio = 0.5
inter_num = defalut_inter_num
task_info = get_connect_multiple_task_graph(defalut_request_number,taskgraphtype,tasktype=defalut_tasktype)
# simga = 0.1
# new_delta = 0.1
for i in tqdm(range(inter_num)):
tmptimedict = {}
# tmpratiodict = {}
for tmpalgorname in ALGOR_NAME_LIST:
tmptimedict[tmpalgorname] = []
# for tmpalgorname in ALGOR_NAME_LIST:
# tmpratiodict[tmpalgorname] = []
# avatimelist = [generate_ava_time_by_jieduan(0.5, 20, 400, mu=5, sigma=5) for k in range(5)]
# decision_time_list,avatimelist = generate_randomtimeline(defalut_edge_nums,
# ava_ratio=defalut_avaratio,ava_sigma=defalut_sigma,
# start_ratio=defalut_start_ratio,start_sigma=defalut_start_sigma)
# print("avatimelist:")
# print(avatimelist)
# print("decision_time_list:")
# print(decision_time_list)
time_dict = taskgraph_exp("graph_iteration_{0}_{1}".format(i + 1, expprefix), taskgraphtype,task_info=task_info,
change_edge_num=True)
for tmpalgorname in ALGOR_NAME_LIST:
tmptimedict[tmpalgorname] = [time_dict[tmpalgorname][i] for i in range(len(time_dict[tmpalgorname]))]
# tmpratiodict[tmpalgorname] = [ratio_dict[tmpalgorname][i] for i in range(len(ratio_dict[tmpalgorname]))]
time_dict_list.append(tmptimedict)
# ratio_dict_list.append(tmpratiodict)
avg_time_dict = result_timecal(time_dict_list,inter_num=inter_num)
# avg_ratio_dict = result_ratiocal(ratio_dict_list,inter_num=inter_num)
# print(avg_time_dict)
df = pd.DataFrame(data=avg_time_dict)
df.to_csv("{0}_{1}_{2}_{3}.csv".format('graph_iteration',str(inter_num),'max_time' ,expprefix), index=True)
# df = pd.DataFrame(data=avg_ratio_dict)
# df.to_csv("{0}_{1}_{2}_{3}.csv".format('graph_iteration',str(inter_num),'ratio' , expprefix), index=True)
# with the number of edge severs for running time
def exp_7_graph(taskgraphtype, expprefix):
import pandas as pd
from tqdm import tqdm
runtime_dict_list = []
inter_num = defalut_inter_num
task_info = get_connect_multiple_task_graph(defalut_request_number,taskgraphtype,tasktype=defalut_tasktype)
for i in tqdm(range(inter_num)):
tmpruntimedict = {}
for tmpalgorname in ALGOR_NAME_LIST:
tmpruntimedict[tmpalgorname] = []
# avatimelist = [generate_ava_time_by_jieduan(0.5, 20, 400, mu=5, sigma=5) for k in range(5)]
# decision_time_list,avatimelist = generate_randomtimeline(defalut_edge_nums,ava_ratio=avatime_radio,sigma=simga)
# print("avatimelist:")
# print(avatimelist)
# print("decision_time_list:")
# print(decision_time_list)
runtime_dict = taskgraph_exp_runtime("graph_iteration_{0}_{1}".format(i + 1, expprefix), taskgraphtype,task_info=task_info,
change_edge_num=True)
for tmpalgorname in ALGOR_NAME_LIST:
tmpruntimedict[tmpalgorname] = [runtime_dict[tmpalgorname][i] for i in range(len(runtime_dict[tmpalgorname]))]
runtime_dict_list.append(tmpruntimedict)
avg_runtime_dict = result_timecal(runtime_dict_list,inter_num=inter_num)
# print(avg_time_dict)
df = pd.DataFrame(data=avg_runtime_dict)
df.to_csv("{0}_{1}_{2}_{3}.csv".format('graph_iteration',str(inter_num),'run_time' ,expprefix), index=True)
# with ratio and number of inference task
def exp_41_graph(taskgraphtype, expprefix):
import pandas as pd
from tqdm import tqdm
time_dict_list = []
ratio_dict_list = []
inter_num = defalut_inter_num
# start_time_sigma = 0.001
# start_time_ratio = 0.001
request_number = [20,10,10]
task_info = get_connect_multiple_task_graph(request_number,taskgraphtype,tasktype=defalut_tasktype)
for i in tqdm(range(inter_num)):
tmptimedict = {}
# tmpratiodict = {}
for tmpalgorname in ALGOR_NAME_LIST:
tmptimedict[tmpalgorname] = []
# for tmpalgorname in ALGOR_NAME_LIST:
# tmpratiodict[tmpalgorname] = []
# avatimelist = [generate_ava_time_by_jieduan(0.5, 20, 400, mu=5, sigma=5) for k in range(5)]
# decision_time_list,avatimelist = generate_randomtimeline(defalut_edge_nums,ava_ratio=avatime_radio,sigma=simga)
# print("avatimelist:")
# print(avatimelist)
# print("decision_time_list:")
# print(decision_time_list)
time_dict = taskgraph_exp("graph_iteration_{0}_{1}".format(i + 1, expprefix),
taskgraphtype,task_info=task_info,
change_edge_num=True)
for tmpalgorname in ALGOR_NAME_LIST:
tmptimedict[tmpalgorname] = [time_dict[tmpalgorname][i] for i in range(len(time_dict[tmpalgorname]))]
# tmpratiodict[tmpalgorname] = [ratio_dict[tmpalgorname][i] for i in range(len(ratio_dict[tmpalgorname]))]
time_dict_list.append(tmptimedict)
# ratio_dict_list.append(tmpratiodict)
avg_time_dict = result_timecal(time_dict_list,inter_num=inter_num)
# avg_ratio_dict = result_ratiocal(ratio_dict_list,inter_num=inter_num)
# print(avg_time_dict)
df = pd.DataFrame(data=avg_time_dict)
df.to_csv("{0}_{1}_{2}_{3}.csv".format('graph_iteration',str(inter_num),'max_time' ,expprefix), index=True)
# df = pd.DataFrame(data=avg_ratio_dict)
# df.to_csv("{0}_{1}_{2}_{3}.csv".format('graph_iteration',str(inter_num),'ratio' , expprefix), index=True)
def exp_42_graph(taskgraphtype, expprefix):
import pandas as pd
from tqdm import tqdm
time_dict_list = []
ratio_dict_list = []
inter_num = defalut_inter_num
request_number = [10,20,10]
task_info = get_connect_multiple_task_graph(request_number,taskgraphtype,tasktype=defalut_tasktype)
for i in tqdm(range(inter_num)):
tmptimedict = {}
# tmpratiodict = {}
for tmpalgorname in ALGOR_NAME_LIST:
tmptimedict[tmpalgorname] = []
# for tmpalgorname in ALGOR_NAME_LIST:
# tmpratiodict[tmpalgorname] = []
# avatimelist = [generate_ava_time_by_jieduan(0.5, 20, 400, mu=5, sigma=5) for k in range(5)]
# decision_time_list,avatimelist = generate_randomtimeline(defalut_edge_nums,ava_ratio=avatime_radio,sigma=simga)
# print("avatimelist:")
# print(avatimelist)
# print("decision_time_list:")
# print(decision_time_list)
time_dict = taskgraph_exp("graph_iteration_{0}_{1}".format(i + 1, expprefix),
taskgraphtype,task_info=task_info,
change_edge_num=True)
for tmpalgorname in ALGOR_NAME_LIST:
tmptimedict[tmpalgorname] = [time_dict[tmpalgorname][i] for i in range(len(time_dict[tmpalgorname]))]
# tmpratiodict[tmpalgorname] = [ratio_dict[tmpalgorname][i] for i in range(len(ratio_dict[tmpalgorname]))]
time_dict_list.append(tmptimedict)
# ratio_dict_list.append(tmpratiodict)
avg_time_dict = result_timecal(time_dict_list,inter_num=inter_num)
# avg_ratio_dict = result_ratiocal(ratio_dict_list,inter_num=inter_num)
# print(avg_time_dict)
df = pd.DataFrame(data=avg_time_dict)
df.to_csv("{0}_{1}_{2}_{3}.csv".format('graph_iteration',str(inter_num),'max_time' ,expprefix), index=True)
# df = pd.DataFrame(data=avg_ratio_dict)
# df.to_csv("{0}_{1}_{2}_{3}.csv".format('graph_iteration',str(inter_num),'ratio' , expprefix), index=True)
def exp_43_graph(taskgraphtype, expprefix):
import pandas as pd
from tqdm import tqdm
time_dict_list = []
ratio_dict_list = []
inter_num = defalut_inter_num
# start_time_sigma = 0.001
# start_time_ratio = 0.001
request_number = [10,10,20]
task_info = get_connect_multiple_task_graph(request_number,taskgraphtype,tasktype=defalut_tasktype)
for i in tqdm(range(inter_num)):
tmptimedict = {}
# tmpratiodict = {}
for tmpalgorname in ALGOR_NAME_LIST:
tmptimedict[tmpalgorname] = []
# for tmpalgorname in ALGOR_NAME_LIST:
# tmpratiodict[tmpalgorname] = []
# avatimelist = [generate_ava_time_by_jieduan(0.5, 20, 400, mu=5, sigma=5) for k in range(5)]
# decision_time_list,avatimelist = generate_randomtimeline(defalut_edge_nums,ava_ratio=avatime_radio,sigma=simga)
# print("avatimelist:")
# print(avatimelist)
# print("decision_time_list:")
# print(decision_time_list)
time_dict = taskgraph_exp("graph_iteration_{0}_{1}".format(i + 1, expprefix),
taskgraphtype,task_info=task_info,
change_edge_num=True)
for tmpalgorname in ALGOR_NAME_LIST:
tmptimedict[tmpalgorname] = [time_dict[tmpalgorname][i] for i in range(len(time_dict[tmpalgorname]))]
# tmpratiodict[tmpalgorname] = [ratio_dict[tmpalgorname][i] for i in range(len(ratio_dict[tmpalgorname]))]
time_dict_list.append(tmptimedict)
# ratio_dict_list.append(tmpratiodict)
avg_time_dict = result_timecal(time_dict_list,inter_num=inter_num)
# avg_ratio_dict = result_ratiocal(ratio_dict_list,inter_num=inter_num)
# print(avg_time_dict)
df = pd.DataFrame(data=avg_time_dict)
df.to_csv("{0}_{1}_{2}_{3}.csv".format('graph_iteration',str(inter_num),'max_time' ,expprefix), index=True)
# df = pd.DataFrame(data=avg_ratio_dict)
# df.to_csv("{0}_{1}_{2}_{3}.csv".format('graph_iteration',str(inter_num),'ratio' , expprefix), index=True)
# with the ratio of available time
def exp_5_graph(taskgraphtype, expprefix):
import pandas as pd
from tqdm import tqdm
time_dict_list = []
ratio_dict_list = []
inter_num = defalut_inter_num
new_avatime_ratio = 70
new_max_avatime_ratio = 75
# simga = 0.1
task_info = get_connect_multiple_task_graph(defalut_request_number,taskgraphtype,tasktype=defalut_tasktype)
for i in tqdm(range(inter_num)):
tmptimedict = {}
# tmpratiodict = {}
for tmpalgorname in ALGOR_NAME_LIST:
tmptimedict[tmpalgorname] = []
# for tmpalgorname in ALGOR_NAME_LIST:
# tmpratiodict[tmpalgorname] = []
for avatime_ratio in range(new_avatime_ratio, new_max_avatime_ratio,1):
avatime_ratio = avatime_ratio/100
# avatimelist = [generate_ava_time_by_jieduan(0.5, 20, 400, mu=5, sigma=5) for k in range(5)]
decision_time_list,avatimelist = generate_randomtimeline(num_edges=defalut_edge_nums,
start_ratio=defalut_start_ratio,start_sigma=defalut_start_sigma,
ava_ratio=avatime_ratio,ratio_sigma=defalut_sigma)
# print("avatimelist:")
# print(avatimelist)
# print("decision_time_list:")
# print(decision_time_list)
time_dict = taskgraph_exp("graph_iteration_{0}_{1}".format(i + 1, expprefix), taskgraphtype,task_info=task_info,
max_edge_num=defalut_edge_nums,
avatimelist=avatimelist,
decision_time_list=decision_time_list,
change_edge_num=False)
for tmpalgorname in ALGOR_NAME_LIST:
tmptimedict[tmpalgorname].append(time_dict[tmpalgorname])
# tmpratiodict[tmpalgorname].append(ratio_dict[tmpalgorname])
time_dict_list.append(tmptimedict)
# ratio_dict_list.append(tmpratiodict)
avg_time_dict = result_timecal(time_dict_list,inter_num=inter_num)
# avg_ratio_dict = result_ratiocal(ratio_dict_list,inter_num=inter_num)
# print(avg_time_dict)
df = pd.DataFrame(data=avg_time_dict)
df.to_csv("{0}_{1}_{2}_{3}.csv".format('graph_iteration',str(inter_num),'max_time' ,expprefix), index=True)
# df = pd.DataFrame(data=avg_ratio_dict)
# df.to_csv("{0}_{1}_{2}_{3}.csv".format('graph_iteration',str(inter_num),'ratio' , expprefix), index=True)
# with distribution of unavailable interval for the algorithm stable
def exp_61_graph(taskgraphtype, expprefix):
import pandas as pd
from tqdm import tqdm
time_dict_list = []
ratio_dict_list = []
inter_num = defalut_inter_num
start_time_sigma = 0.1
start_time_ratio = 0.01
task_info = get_connect_multiple_task_graph(defalut_request_number,taskgraphtype,tasktype=defalut_tasktype)
for i in tqdm(range(inter_num)):
tmptimedict = {}
# tmpratiodict = {}
for tmpalgorname in ALGOR_NAME_LIST:
tmptimedict[tmpalgorname] = []
# for tmpalgorname in ALGOR_NAME_LIST:
# tmpratiodict[tmpalgorname] = []
# avatimelist = [generate_ava_time_by_jieduan(0.5, 20, 400, mu=5, sigma=5) for k in range(5)]
# decision_time_list,avatimelist = generate_randomtimeline(defalut_edge_nums,ava_ratio=avatime_radio,sigma=simga)
# print("avatimelist:")
# print(avatimelist)
# print("decision_time_list:")
# print(decision_time_list)
time_dict = taskgraph_exp("graph_iteration_{0}_{1}".format(i + 1, expprefix),
taskgraphtype,start_ratio=start_time_ratio,start_sigma=start_time_sigma,task_info=task_info,
change_edge_num=True)
for tmpalgorname in ALGOR_NAME_LIST:
tmptimedict[tmpalgorname] = [time_dict[tmpalgorname][i] for i in range(len(time_dict[tmpalgorname]))]
# tmpratiodict[tmpalgorname] = [ratio_dict[tmpalgorname][i] for i in range(len(ratio_dict[tmpalgorname]))]
time_dict_list.append(tmptimedict)
# ratio_dict_list.append(tmpratiodict)
avg_time_dict = result_timecal(time_dict_list,inter_num=inter_num)
# avg_ratio_dict = result_ratiocal(ratio_dict_list,inter_num=inter_num)
# print(avg_time_dict)
df = pd.DataFrame(data=avg_time_dict)
df.to_csv("{0}_{1}_{2}_{3}.csv".format('graph_iteration',str(inter_num),'max_time' ,expprefix), index=True)
# df = pd.DataFrame(data=avg_ratio_dict)
# df.to_csv("{0}_{1}_{2}_{3}.csv".format('graph_iteration',str(inter_num),'ratio' , expprefix), index=True)
def exp_62_graph(taskgraphtype, expprefix):
import pandas as pd
from tqdm import tqdm
time_dict_list = []
ratio_dict_list = []
inter_num = defalut_inter_num
start_time_sigma = 0.1
start_time_ratio = 0.1
task_info = get_connect_multiple_task_graph(defalut_request_number,taskgraphtype,tasktype=defalut_tasktype)
for i in tqdm(range(inter_num)):
tmptimedict = {}
# tmpratiodict = {}
for tmpalgorname in ALGOR_NAME_LIST:
tmptimedict[tmpalgorname] = []
# for tmpalgorname in ALGOR_NAME_LIST:
# tmpratiodict[tmpalgorname] = []
# avatimelist = [generate_ava_time_by_jieduan(0.5, 20, 400, mu=5, sigma=5) for k in range(5)]
# decision_time_list,avatimelist = generate_randomtimeline(defalut_edge_nums,ava_ratio=avatime_radio,sigma=simga)
# print("avatimelist:")
# print(avatimelist)
# print("decision_time_list:")
# print(decision_time_list)
time_dict = taskgraph_exp("graph_iteration_{0}_{1}".format(i + 1, expprefix),
taskgraphtype,start_ratio=start_time_ratio,start_sigma=start_time_sigma,task_info=task_info,
change_edge_num=True)
for tmpalgorname in ALGOR_NAME_LIST:
tmptimedict[tmpalgorname] = [time_dict[tmpalgorname][i] for i in range(len(time_dict[tmpalgorname]))]
# tmpratiodict[tmpalgorname] = [ratio_dict[tmpalgorname][i] for i in range(len(ratio_dict[tmpalgorname]))]
time_dict_list.append(tmptimedict)
# ratio_dict_list.append(tmpratiodict)
avg_time_dict = result_timecal(time_dict_list,inter_num=inter_num)
# avg_ratio_dict = result_ratiocal(ratio_dict_list,inter_num=inter_num)
# print(avg_time_dict)
df = pd.DataFrame(data=avg_time_dict)
df.to_csv("{0}_{1}_{2}_{3}.csv".format('graph_iteration',str(inter_num),'max_time' ,expprefix), index=True)
# df = pd.DataFrame(data=avg_ratio_dict)
# df.to_csv("{0}_{1}_{2}_{3}.csv".format('graph_iteration',str(inter_num),'ratio' , expprefix), index=True)
def exp_63_graph(taskgraphtype, expprefix):
import pandas as pd
from tqdm import tqdm
time_dict_list = []
ratio_dict_list = []
inter_num = defalut_inter_num
start_time_sigma = 0.2
start_time_ratio = 0.1
task_info = get_connect_multiple_task_graph(defalut_request_number,taskgraphtype,tasktype=defalut_tasktype)
for i in tqdm(range(inter_num)):
tmptimedict = {}
# tmpratiodict = {}
for tmpalgorname in ALGOR_NAME_LIST:
tmptimedict[tmpalgorname] = []
# for tmpalgorname in ALGOR_NAME_LIST:
# tmpratiodict[tmpalgorname] = []
# avatimelist = [generate_ava_time_by_jieduan(0.5, 20, 400, mu=5, sigma=5) for k in range(5)]
# decision_time_list,avatimelist = generate_randomtimeline(defalut_edge_nums,ava_ratio=avatime_radio,sigma=simga)
# print("avatimelist:")
# print(avatimelist)
# print("decision_time_list:")
# print(decision_time_list)
time_dict = taskgraph_exp("graph_iteration_{0}_{1}".format(i + 1, expprefix),
taskgraphtype,start_ratio=start_time_ratio,start_sigma=start_time_sigma,task_info=task_info,
change_edge_num=True)
for tmpalgorname in ALGOR_NAME_LIST:
tmptimedict[tmpalgorname] = [time_dict[tmpalgorname][i] for i in range(len(time_dict[tmpalgorname]))]
# tmpratiodict[tmpalgorname] = [ratio_dict[tmpalgorname][i] for i in range(len(ratio_dict[tmpalgorname]))]
time_dict_list.append(tmptimedict)
# ratio_dict_list.append(tmpratiodict)
avg_time_dict = result_timecal(time_dict_list,inter_num=inter_num)
# avg_ratio_dict = result_ratiocal(ratio_dict_list,inter_num=inter_num)
# print(avg_time_dict)
df = pd.DataFrame(data=avg_time_dict)
df.to_csv("{0}_{1}_{2}_{3}.csv".format('graph_iteration',str(inter_num),'max_time' ,expprefix), index=True)
# df = pd.DataFrame(data=avg_ratio_dict)
# df.to_csv("{0}_{1}_{2}_{3}.csv".format('graph_iteration',str(inter_num),'ratio' , expprefix), index=True)
# look_ahead window size 0.1-0.46
def exp_8_graph(taskgraphtype, expprefix):
import pandas as pd
from tqdm import tqdm
time_dict_list = []
ratio_dict_list = []
inter_num = defalut_inter_num
new_window_radio = 100
new_max_window_radio = 500
# simga = 0.1
task_info = get_connect_multiple_task_graph(defalut_request_number,taskgraphtype,tasktype=defalut_tasktype)
for i in tqdm(range(inter_num)):
tmptimedict = {}
# tmpratiodict = {}
for tmpalgorname in ALGOR_NAME_LIST:
tmptimedict[tmpalgorname] = []
# for tmpalgorname in ALGOR_NAME_LIST:
# tmpratiodict[tmpalgorname] = []
decision_time_list,avatimelist = generate_randomtimeline(num_edges=defalut_edge_nums,
start_ratio=defalut_start_ratio,start_sigma=defalut_start_sigma,
ava_ratio=defalut_avaratio,ratio_sigma=defalut_sigma)
for window_radio in range(new_window_radio, new_max_window_radio,40):
window_size = window_radio/1000 * default_timewindow
# avatimelist = [generate_ava_time_by_jieduan(0.5, 20, 400, mu=5, sigma=5) for k in range(5)]
# print("avatimelist:")
# print(avatimelist)
# print("decision_time_list:")
# print(decision_time_list)
time_dict = taskgraph_exp("graph_iteration_{0}_{1}".format(i + 1, expprefix), taskgraphtype,task_info=task_info,
max_edge_num=defalut_edge_nums,
avatimelist=avatimelist,
decision_time_list=decision_time_list,
window_size=window_size,
change_edge_num=False)
for tmpalgorname in ALGOR_NAME_LIST:
tmptimedict[tmpalgorname].append(time_dict[tmpalgorname])
# tmpratiodict[tmpalgorname].append(ratio_dict[tmpalgorname])
time_dict_list.append(tmptimedict)
# ratio_dict_list.append(tmpratiodict)
avg_time_dict = result_timecal(time_dict_list,inter_num=inter_num)
# avg_ratio_dict = result_ratiocal(ratio_dict_list,inter_num=inter_num)
# print(avg_time_dict)
df = pd.DataFrame(data=avg_time_dict)
df.to_csv("{0}_{1}_{2}_{3}.csv".format('graph_iteration',str(inter_num),'max_time' ,expprefix), index=True)
# df = pd.DataFrame(data=avg_ratio_dict)
# df.to_csv("{0}_{1}_{2}_{3}.csv".format('graph_iteration',str(inter_num),'ratio' , expprefix), index=True)
# throughout with the increase of Te 25-30
def exp_9_graph(taskgraphtype, expprefix):
import pandas as pd
from tqdm import tqdm
time_dict_list = []
ratio_dict_list = []
inter_num = defalut_inter_num
new_timewindow = 10
new_max_timewindow = 40
# simga = 0.1
task_info = get_connect_multiple_task_graph(defalut_request_number,taskgraphtype,tasktype=defalut_tasktype)
for i in tqdm(range(inter_num)):
tmptimedict = {}
# tmpratiodict = {}
for tmpalgorname in ALGOR_NAME_LIST:
tmptimedict[tmpalgorname] = []
# for tmpalgorname in ALGOR_NAME_LIST:
# tmpratiodict[tmpalgorname] = []
for time_window in range(new_timewindow, new_max_timewindow,10):
# for time_window in range(5):
# time_window = time_window
decision_time_list,avatimelist = generate_randomtimeline(num_edges=defalut_edge_nums,
timewindow=time_window,timenumber=10,
start_ratio=defalut_start_ratio,start_sigma=defalut_start_sigma,
ava_ratio=defalut_avaratio,ratio_sigma=defalut_sigma)
# avatimelist = [generate_ava_time_by_jieduan(0.5, 20, 400, mu=5, sigma=5) for k in range(5)]
# print("avatimelist:")
# print(avatimelist)
# print("decision_time_list:")
# print(decision_time_list)
time_dict = taskgraph_exp("graph_iteration_{0}_{1}".format(i + 1, expprefix), taskgraphtype,task_info=task_info,
max_edge_num=defalut_edge_nums,
avatimelist=avatimelist,
decision_time_list=decision_time_list,
change_edge_num=False)
for tmpalgorname in ALGOR_NAME_LIST:
tmptimedict[tmpalgorname].append(time_dict[tmpalgorname])
# tmpratiodict[tmpalgorname].append(ratio_dict[tmpalgorname])
time_dict_list.append(tmptimedict)
# ratio_dict_list.append(tmpratiodict)
avg_time_dict = result_timecal(time_dict_list,inter_num=inter_num)
# avg_ratio_dict = result_ratiocal(ratio_dict_list,inter_num=inter_num)
# print(avg_time_dict)
df = pd.DataFrame(data=avg_time_dict)
df.to_csv("{0}_{1}_{2}_{3}.csv".format('graph_iteration',str(inter_num),'max_time' ,expprefix), index=True)
# df = pd.DataFrame(data=avg_ratio_dict)
# df.to_csv("{0}_{1}_{2}_{3}.csv".format('graph_iteration',str(inter_num),'ratio' , expprefix), index=True)
def exp2():
# exp_2_graph("a", 'exp2_grapha')
exp_2_graph(['ResNet18','Vgg16','AlexNet'],'exp2_graph_Connect')
# exp_2_graph('ResNet18','exp2_graph_ResNet18')
# exp_2_graph('Vgg16','exp2_graph_Vgg16')
# exp_2_graph('Inceptionv3','exp2_graph_Inceptionv3')
# exp_2_graph(2, 'exp2_graphb')
# exp_2_graph(3, 'exp2_graphc')
# exp_2_graph(4, 'exp2_graphd')
# exp_2_graph(5, 'exp2_graphe')
def exp1():
# exp_1_graph("a", 'exp1_grapha')
exp_1_graph(['ResNet18','Vgg16','AlexNet'],'exp1_graph_Connect')
# exp_1_graph('ResNet18','exp1_graph_ResNet18')
# exp_1_graph('Vgg16','exp1_graph_Vgg16')
# exp_1_graph('Inceptionv3','exp1_graph_Inceptionv3')
# exp_1_graph(2, 'exp1_graphb')
# exp_1_graph(3, 'exp1_graphc')
# exp_1_graph(4, 'exp1_graphd')
# exp_1_graph(5, 'exp1_graphe')
def exp3():
# exp_3_graph("a", 'exp3_grapha')
exp_3_graph(['ResNet18','Vgg16','AlexNet'],'exp3_graph_Connect')
# exp_3_graph('ResNet18','exp3_graph_ResNet18')
# exp_3_graph('Vgg16','exp3_graph_Vgg16')
# exp_3_graph('Inceptionv3','exp3_graph_Inceptionv3')
# exp_3_graph(2, 'exp3_graphb')
# exp_3_graph(3, 'exp3_graphc')
# exp_3_graph(4, 'exp3_graphd')
# exp_3_graph(5, 'exp3_graphe')
def exp4():
# exp_4_graph("a", 'exp4_grapha')
exp_41_graph(['ResNet18','Vgg16','AlexNet'],'exp41_graph_Connect')
exp_42_graph(['ResNet18','Vgg16','AlexNet'],'exp42_graph_Connect')
exp_43_graph(['ResNet18','Vgg16','AlexNet'],'exp43_graph_Connect')
# exp_4_graph('ResNet18','exp4_graph_ResNet18')
# exp_4_graph('Vgg16','exp4_graph_Vgg16')
# exp_4_graph('Inceptionv3','exp4_graph_Inceptionv3')
# exp_4_graph(2, 'exp4_graphb')
# exp_4_graph(3, 'exp4_graphc')
# exp_4_graph(4, 'exp4_graphd')
# exp_4_graph(5, 'exp4_graphe')
def exp5():
# exp_5_graph("a", 'exp5_grapha')
exp_5_graph(['ResNet18','Vgg16','AlexNet'],'exp5_graph_Connect')
# exp_5_graph('ResNet18','exp5_graph_ResNet18')
# exp_5_graph('Vgg16','exp5_graph_Vgg16')
# exp_5_graph('Inceptionv3','exp5_graph_Inceptionv3')
# exp_5_graph(2, 'exp5_graphb')
# exp_5_graph(3, 'exp5_graphc')
# exp_5_graph(4, 'exp5_graphd')
# exp_5_graph(5, 'exp5_graphe')
def exp6():
# exp_61_graph("a", 'exp61_grapha')
exp_61_graph(['ResNet18','Vgg16','AlexNet'],'exp61_graph_Connect')
# exp_61_graph('ResNet18','exp61_graph_ResNet18')
# exp_61_graph('Vgg16','exp61_graph_Vgg16')
# exp_61_graph('Inceptionv3','exp61_graph_Inceptionv3')
# exp_61_graph(2, 'exp61_graphb')
# exp_61_graph(3, 'exp61_graphc')
# exp_61_graph(4, 'exp61_graphd')
# exp_61_graph(5, 'exp61_graphe')
# exp_62_graph("a", 'exp62_grapha')
exp_62_graph(['ResNet18','Vgg16','AlexNet'],'exp62_graph_Connect')
# exp_62_graph('ResNet18','exp62_graph_ResNet18')
# exp_62_graph('Vgg16','exp62_graph_Vgg16')
# exp_62_graph('Inceptionv3','exp62_graph_Inceptionv3')
# exp_62_graph(2, 'exp62_graphb')
# exp_62_graph(3, 'exp62_graphc')
# exp_62_graph(4, 'exp62_graphd')
# exp_62_graph(5, 'exp62_graphe')
# exp_63_graph("a", 'exp63_grapha')
exp_63_graph(['ResNet18','Vgg16','AlexNet'],'exp63_graph_Connect')
# exp_63_graph('ResNet18','exp63_graph_ResNet18')
# exp_63_graph('Vgg16','exp63_graph_Vgg16')
# exp_63_graph('Inceptionv3','exp63_graph_Inceptionv3')
# exp_63_graph(2, 'exp63_graphb')
# exp_63_graph(3, 'exp63_graphc')
# exp_63_graph(4, 'exp63_graphd')
# exp_63_graph(5, 'exp63_graphe')
def exp7():
# exp_7_graph("a", 'exp7_grapha')
exp_7_graph(['ResNet18','Vgg16','AlexNet'],'exp7_graph_Connect')
# exp_7_graph('ResNet18','exp7_graph_ResNet18')
# exp_7_graph('Vgg16','exp7_graph_Vgg16')
# exp_7_graph('Inceptionv3','exp7_graph_Inceptionv3')
# exp_7_graph(2, 'exp7_graphb')
# exp_7_graph(3, 'exp7_graphc')
# exp_7_graph(4, 'exp7_graphd')
# exp_7_graph(5, 'exp7_graphe')
def exp8():
exp_8_graph(['ResNet18','Vgg16','AlexNet'],'exp8_graph_Connect')
def exp9():
exp_9_graph(['ResNet18','Vgg16','AlexNet'],'exp9_graph_Connect')
if __name__ == "__main__":
# exp1()
# exp2()
# exp3()
# exp4()
# exp5()
# exp6()
# exp7()
exp8()
# exp9()
|
from django.contrib.gis.db.backends.base.adapter import WKTAdapter
from django.contrib.gis.db.backends.base.operations import \
BaseSpatialOperations
from django.contrib.gis.db.backends.utils import SpatialOperator
from django.contrib.gis.db.models import GeometryField, aggregates
from django.db.backends.mysql.operations import DatabaseOperations
from django.utils.functional import cached_property
class MySQLOperations(BaseSpatialOperations, DatabaseOperations):
mysql = True
name = 'mysql'
Adapter = WKTAdapter
@cached_property
def geom_func_prefix(self):
return '' if self.is_mysql_5_5 else 'ST_'
@cached_property
def is_mysql_5_5(self):
return self.connection.mysql_version < (5, 6, 1)
@cached_property
def is_mysql_5_6(self):
return self.connection.mysql_version < (5, 7, 6)
@cached_property
def uses_invalid_empty_geometry_collection(self):
return self.connection.mysql_version >= (5, 7, 5)
@cached_property
def select(self):
return self.geom_func_prefix + 'AsText(%s)'
@cached_property
def from_wkb(self):
return self.geom_func_prefix + 'GeomFromWKB'
@cached_property
def from_text(self):
return self.geom_func_prefix + 'GeomFromText'
@cached_property
def gis_operators(self):
MBREquals = 'MBREqual' if self.is_mysql_5_6 else 'MBREquals'
return {
'bbcontains': SpatialOperator(func='MBRContains'), # For consistency w/PostGIS API
'bboverlaps': SpatialOperator(func='MBROverlaps'), # ...
'contained': SpatialOperator(func='MBRWithin'), # ...
'contains': SpatialOperator(func='MBRContains'),
'disjoint': SpatialOperator(func='MBRDisjoint'),
'equals': SpatialOperator(func=MBREquals),
'exact': SpatialOperator(func=MBREquals),
'intersects': SpatialOperator(func='MBRIntersects'),
'overlaps': SpatialOperator(func='MBROverlaps'),
'same_as': SpatialOperator(func=MBREquals),
'touches': SpatialOperator(func='MBRTouches'),
'within': SpatialOperator(func='MBRWithin'),
}
@cached_property
def function_names(self):
return {'Length': 'GLength'} if self.is_mysql_5_5 else {}
disallowed_aggregates = (
aggregates.Collect, aggregates.Extent, aggregates.Extent3D,
aggregates.MakeLine, aggregates.Union,
)
@cached_property
def unsupported_functions(self):
unsupported = {
'AsGML', 'AsKML', 'AsSVG', 'Azimuth', 'BoundingCircle', 'ForceRHR',
'LineLocatePoint', 'MakeValid', 'MemSize', 'Perimeter',
'PointOnSurface', 'Reverse', 'Scale', 'SnapToGrid', 'Transform',
'Translate',
}
if self.connection.mysql_version < (5, 7, 5):
unsupported.update({'AsGeoJSON', 'GeoHash', 'IsValid'})
if self.is_mysql_5_5:
unsupported.update({'Difference', 'Distance', 'Intersection', 'SymDifference', 'Union'})
return unsupported
def geo_db_type(self, f):
return f.geom_type
def get_db_converters(self, expression):
converters = super().get_db_converters(expression)
if isinstance(expression.output_field, GeometryField) and self.uses_invalid_empty_geometry_collection:
converters.append(self.convert_invalid_empty_geometry_collection)
return converters
# https://dev.mysql.com/doc/refman/en/spatial-function-argument-handling.html
# MySQL 5.7.5 adds support for the empty geometry collections, but they are represented with invalid WKT.
def convert_invalid_empty_geometry_collection(self, value, expression, connection, context):
if value == b'GEOMETRYCOLLECTION()':
return b'GEOMETRYCOLLECTION EMPTY'
return value
|
<gh_stars>10-100
import os
import pandas as pd
import numpy as np
import argparse
def load_datasets(data_dir, tasks):
datasets = {}
for task in tasks:
# TODO: implement all GLUE tasks
dataset = {}
dirname = os.path.join(data_dir, task)
if task in ["QNLI", "CoLA", "QQP", "SST-2", "RTE", "MRPC", "SNLI", "MNLI", "STS-B"]:
if task == "MNLI":
splits = ["train", "dev_matched", "dev_mismatched"]
else:
splits = ["train", "dev"]
for split in splits:
filename = os.path.join(dirname, f"{split}.tsv")
with open(filename, 'r') as f:
lines = f.readlines()
dataset[split] = lines
datasets[task] = dataset
else:
dataset = {}
dirname = os.path.join(data_dir, task)
splits = ["train", "test"]
for split in splits:
filename = os.path.join(dirname, f"{split}.csv")
dataset[split] = pd.read_csv(filename, header=None)
datasets[task] = dataset
return datasets
def get_label(task, line):
if task in ["QNLI", "CoLA", "SST-2", "QQP", "RTE", "MRPC", "SNLI", "MNLI", "STS-B"]:
line = line.strip().split('\t')
if task == 'QNLI':
return line[-1]
elif task == 'CoLA':
return line[1]
elif task == 'QQP':
return line[-1]
elif task == 'SST-2':
return line[-1]
elif task == 'RTE':
return line[-1]
elif task == 'MRPC':
return line[0]
elif task == 'SNLI':
return line[-1]
elif task == 'MNLI':
return line[-1]
elif task == 'STS-B':
return 0 if float(line[-1]) < 2.5 else 1
else:
raise NotImplementedError
else:
return line[0]
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--k", type=int, default=16, help="Training examples for each class.")
parser.add_argument("--task", type=str, nargs="+",
default=['SST-2', 'sst-5', 'mr', 'cr', 'mpqa', 'subj', 'trec', 'CoLA', 'MRPC', 'QQP', 'STS-B', 'MNLI', 'SNLI', 'QNLI', 'RTE'],
help="Task names")
parser.add_argument("--seed", type=int, nargs="+",
default=[13, 21, 42, 87, 100],
help="Random seeds")
parser.add_argument("--data_dir", type=str, default="data/original_data/glue", help="Path to original data")
parser.add_argument("--output_dir", type=str, default="data/training_data", help="Output path")
args = parser.parse_args()
args.output_dir = os.path.join(args.output_dir, 'k_shot')
k = args.k
print(f"K = {k}")
datasets = load_datasets(args.data_dir, args.task)
for seed in args.seed:
print(f"Seed = {seed}")
for task, dataset in datasets.items():
np.random.seed(seed=seed)
print(f"Task = {task}")
if task in ["MNLI", "MRPC", "QNLI", "QQP", "RTE", "SNLI", "SST-2", "STS-B", "WNLI", "CoLA"]:
if task in ["CoLA"]:
train_header, train_lines = [], dataset['train']
else:
train_header, train_lines = dataset['train'][0:1], dataset['train'][1:]
np.random.shuffle(train_lines)
else:
train_lines = dataset['train'].values.tolist()
np.random.shuffle(train_lines)
task_dir = os.path.join(args.output_dir, task)
setting_dir = os.path.join(task_dir, f"{k}-{seed}")
os.makedirs(setting_dir, exist_ok=True)
if task in ["MNLI", "MRPC", "QNLI", "QQP", "RTE", "SNLI", "SST-2", "STS-B", "WNLI", "CoLA"]:
for split, lines in dataset.items():
if split.startswith("train"):
continue
split = split.replace('dev', 'test')
with open(os.path.join(setting_dir, f"{split}.tsv"), "w") as f:
for line in lines:
f.write(line)
else:
dataset['test'].to_csv(os.path.join(setting_dir, 'test.csv'), header=False, index=False)
label_list = {}
for line in train_lines:
label = get_label(task, line)
if label not in label_list:
label_list[label] = [line]
else:
label_list[label].append(line)
if task in ["QNLI", "CoLA", "QQP", "SST-2", "RTE", "MRPC", "SNLI", "MNLI", "STS-B"]:
with open(os.path.join(setting_dir, "train.tsv"), "w") as f:
for line in train_header:
f.write(line)
for label in label_list:
for line in label_list[label][:k]:
f.write(line)
name = "dev.tsv"
if task == 'MNLI':
name = "dev_matched.tsv"
# dev_rate = 11 if '10x' in args.mode else 2
with open(os.path.join(setting_dir, name), "w") as f:
for line in train_header:
f.write(line)
for label in label_list:
for line in label_list[label][k:k*2]:
f.write(line)
# the rest of train datsets as unlabeled data
with open(os.path.join(setting_dir, 'unlabeled.tsv'), 'w') as f:
for line in train_header:
f.write(line)
for label in label_list:
for line in label_list[label][k*2:]:
f.write(line)
else:
new_train = []
for label in label_list:
for line in label_list[label][:k]:
new_train.append(line)
new_train = pd.DataFrame(new_train)
new_train.to_csv(os.path.join(setting_dir, 'train.csv'), header=False, index=False)
new_dev = []
for label in label_list:
dev_rate = 2
for line in label_list[label][k:k*dev_rate]:
new_dev.append(line)
new_dev = pd.DataFrame(new_dev)
new_dev.to_csv(os.path.join(setting_dir, 'dev.csv'), header=False, index=False)
unlabeled = []
for label in label_list:
for line in label_list[label][k*2:]:
unlabeled.append(line)
unlabeled = pd.DataFrame(unlabeled)
unlabeled.to_csv(os.path.join(setting_dir, 'unlabeled.csv'), header=False, index=False)
if __name__ == '__main__':
main() |
from app import app
from flask import request, jsonify, session, make_response
import hashlib, uuid
import os
import binascii
from functools import wraps
import sys
sys.path.insert(0, 'app/services')
from dbservice import *
def jres(s, type='message'):
return jsonify({type:s})
def get_user_from_session(sessionid):
stored_session = load_session(sessionid)
user = load_user(stored_session['username'])
if user is None:
raise ValueError('user not found')
return user
def insert_non_empty_in_dict(d, l):
for i,j in l:
if j is not None and len(j) > 0:
d[i]=j
def admin_check(fun):
@wraps(fun)
def wrapped(*args, **kwargs):
user = get_user_from_session(request.cookies['sessionid'])
if user['utype'] == 'admin':
return fun(*args, **kwargs)
return jres('Permission denied: ' + str(user['utype']), type='error'), 403
return wrapped
def parent_check(fun):
@wraps(fun)
def wrapped(*args, **kwargs):
user = get_user_from_session(request.cookies['sessionid'])
if user['utype'] == 'parent':
return fun(*args, **kwargs)
return jres('Permission denied: ' + str(user['utype']), type='error'), 403
return wrapped
def teacher_check(fun):
@wraps(fun)
def wrapped(*args, **kwargs):
user = get_user_from_session(request.cookies['sessionid'])
if user['utype'] == 'teacher':
return fun(*args, **kwargs)
return jres('Permission denied: ' + str(user['utype']), type='error'), 403
return wrapped
def auth_check(fun):
@wraps(fun)
def wrapper(*args, **kwargs):
try:
sessionid = request.cookies['sessionid']
except:
return jres('authetication required')
stored_session = load_session(sessionid)
if stored_session is not None and stored_session['sessionid'] == sessionid:
username = stored_session['username']
return fun(*args, **kwargs)
return jres('Invalid session', type='error'), 403
return wrapper
@app.route('/')
def index():
return jres('working', type='status')
@app.route('/login', methods=['POST'])
def authenticate():
username = request.form["username"]
password = request.form["password"]
user = load_user(username)
if user is not None:
pwdhash = user['<PASSWORD>']
salt = user['<PASSWORD>']
hashed_password = hashlib.sha256(password.encode('utf-8') + salt.encode('utf-8')).hexdigest()
if hashed_password == <PASSWORD>:
sessionid = str(binascii.hexlify(os.urandom(24)).decode())
store_session(username, sessionid)
response = make_response(jres('login successful. Welcome ' + username))
response.set_cookie('sessionid', value=sessionid, httponly=True) # secure=True,
return response
return jres('Invalid username or password!', type='error')
@app.route('/logout', methods=['GET'])
@auth_check
def logout():
sessionid = request.cookies['sessionid']
remove_session(sessionid)
session.pop(sessionid, None)
return jres('log out successful')
# @app.route('/home', methods=['GET'])
# @auth_check
# def home():
# sessionid = request.cookies['sessionid']
# stored_session = load_session(sessionid)
# username = stored_session['username']
# links = [("/profile", "Profile"), ("/appointments", "Appointments"), ("/notifications", "Notifications"), ("/logout", "Log out")]
# return jsonify(links)
#All
@app.route('/profile', methods=['GET'])
@auth_check
def get_profile():
user = get_user_from_session(request.cookies['sessionid'])
return jsonify(user['profile'])
# @app.route('/profile/<username>', methods=['GET'])
# @auth_check
# def get_public_profile(username):
# user = load_user(username)
# return jsonify(user['profile'])
@app.route('/edit/profile/', methods=['PUT'])
@auth_check
def put_edit_profile():
user = get_user_from_session(request.cookies['sessionid'])
username = user['username']
is_admin = user['utype'] == 'admin'
try:
data = {}
if(is_admin):
insert_non_empty_in_dict(data, [(x,request.form[x]) for x in ['name', 'surname', 'birthdate']])
insert_non_empty_in_dict(data, [(x,request.form[x]) for x in ['address', 'email', 'phone']])
update_user_profile(username, data)
except:
return jsonify('profile_edit.html', profile=user['profile'], message='Input error', is_admin=True)
return jres('profile updated')
@app.route('/edit/profile/<username>', methods=['PUT'])
@auth_check
def put_edit_user_profile(username):
is_admin = True
try:
data = {}
if(is_admin):
insert_non_empty_in_dict(data, [(x,request.form[x]) for x in ['name', 'surname', 'birthdate']])
insert_non_empty_in_dict(data, [(x,request.form[x]) for x in ['address', 'email', 'phone']])
print (username, data)
update_user_profile(username, data)
except:
return jsonify('profile_edit.html', profile=user['profile'], message='Input error', is_admin=True)
return jres('profile updated')
@app.route('/appointments', methods=['GET'])
@auth_check
def get_appointments():
user = get_user_from_session(request.cookies['sessionid'])
appointments = load_appointments(user['profile']['email'])
if appointments is None:
return jres('no appointments found'), 404
return jsonify([x for x in appointments])
@app.route('/appointment', methods=['POST'])
@auth_check
def post_appointment():
data = request.form
user = get_user_from_session(request.cookies['sessionid'])
store_appointment(sender=user['profile']['email'], receiver=data['receiver'],
date=data['date'], topic=data['topic'], time=data['time'])
return jres('appointment posted correctly')
@app.route('/appointment/<id>', methods=['GET'])
@auth_check
def get_appointment(id):
user = get_user_from_session(request.cookies['sessionid'])
appointment = load_appointment(number=int(id), email=user['profile']['email'])
if appointment is None:
return jres('appointment not found', 404)
return jsonify(appointment)
@app.route('/edit/appointment/<id>', methods=['PUT'])
@auth_check
def put_appointment(id):
user = get_user_from_session(request.cookies['sessionid'])
data = request.form
appointment = load_appointment(number=int(id), email=user['profile']['email'])
if appointment is None:
return jres('you cannot edit this appointment', type='error')
edit_appointment(number=int(id), sender=user['profile']['email'], receiver=data['receiver'],
date=data['date'], topic=data['topic'], time=data['time'])
return jres('appointment updated')
@app.route('/appointment/<id>', methods=['DELETE'])
@auth_check
def delete_appointment(id):
user = get_user_from_session(request.cookies['sessionid'])
appointment = load_appointment(number=int(id), email=user['profile']['email'])
if appointment is None or user['profile']['email'] != appointment['sender']:
return jres('you cannot remove this appointment', type='error')
remove_appointment(number=int(id))
return jres('appointment removed')
@app.route('/notifications', methods=['GET'])
@auth_check
def get_notifications():
notifications = load_notifications()
if notifications is None:
return jres('no notifications found'), 404
return jsonify([x for x in notifications])
@app.route('/notification/<id>', methods=['GET'])
@auth_check
def get_notification(id):
notification = load_notification(id)
if notification is None:
return jres('notificatin not found', type='error'), 404
return jsonify(notification)
##############################################################################
#Parents
##############################################################################
@app.route('/children', methods=['GET'])
@auth_check
@parent_check
def get_children():
user = get_user_from_session(request.cookies['sessionid'])
children = load_children(user)
if children is None:
return jres('No children found'), 404
for c in children:
c.pop("_id", None)
return jsonify(children)
#return jres(children)
@app.route('/child/<id>/profile', methods=['GET'])
@auth_check
@parent_check
def get_child_profile(id):
user = get_user_from_session(request.cookies['sessionid'])
child = load_child(user, id)
if child is None:
return jres('No child found'), 404
return jsonify(child['profile'])
@app.route('/child/<id>/profile', methods=['PUT'])
@auth_check
@parent_check
def put_child_profile(id):
user = get_user_from_session(request.cookies['sessionid'])
child = load_child(user, id)
try:
data = {}
insert_non_empty_in_dict(data, [(x,request.form[x]) for x in ['address', 'email', 'phone']])
update_user_profile(child['username'], data)
except:
return jres('Input error', type='error')
return jres('profile updated')
@app.route('/child/<name>/grades', methods=['GET'])
@auth_check
@parent_check
def get_child_grades(name):
user = get_user_from_session(request.cookies['sessionid'])
child = load_user(name)
if (int(user['number']) in child['parents']):
grades = load_child_grades(child['username'], None)
if grades is None:
return jres('No grades found'), 404
for g in grades:
g.pop('_id', None)
return jsonify([x for x in grades])
@app.route('/child/<name>/classes', methods=['GET'])
@auth_check
@parent_check
def get_child_classes(name):
user = get_user_from_session(request.cookies['sessionid'])
child = load_user(name)
if (int(user['number']) in child['parents']):
classes = load_child_classes(child['username'])
if classes is None:
return jres('No classes found'), 404
for c in classes:
c.pop('_id', None)
return jsonify([x for x in classes])
###################################################################
###################################################################
###################################################################
@app.route('/payments', methods=['GET'])
@auth_check
@parent_check
def get_payments_all():
user = get_user_from_session(request.cookies['sessionid'])
all_payments = []
if 'children' in user:
for c in user['children']:
payments = load_payments(c, None)
if payments:
all_payments = all_payments + payments
if not all_payments:
return jres('No payments found'), 404
for p in all_payments:
if '_id' in p:
p.pop('_id', None)
return jsonify(all_payments)
@app.route('/payments/history', methods=['GET'])
@auth_check
@parent_check
def get_payments_history():
user = get_user_from_session(request.cookies['sessionid'])
all_payments = []
if 'children' in user:
for c in user['children']:
payments = load_payments(c, status='completed')
if payments:
all_payments = all_payments + payments
if not all_payments:
return jres('No payments found'), 404
for p in all_payments:
if '_id' in p:
p.pop('_id', None)
return jsonify(all_payments)
@app.route('/payments/due', methods=['GET'])
@auth_check
@parent_check
def get_payments_due():
user = get_user_from_session(request.cookies['sessionid'])
all_payments = []
if 'children' in user:
for c in user['children']:
payments = load_payments(c, status='due')
if payments:
all_payments = all_payments + payments
if not all_payments:
return jres('No payments found'), 404
for p in all_payments:
if '_id' in p:
p.pop('_id', None)
return jsonify(all_payments)
@app.route('/child/<childid>/payment/<paymentid>', methods=['GET'])
@auth_check
@parent_check
def get_payment(childid, paymentid):
user = get_user_from_session(request.cookies['sessionid'])
if 'children' in user and int(childid) in user['children']:
payment = load_user_payment(childid, paymentid)
if payment is None:
return jres('No payment found'), 404
if '_id' in payment:
payment.pop('_id', None)
return jsonify(payment)
@app.route('/child/<childid>/payment/<paymentid>', methods=['POST'])
@auth_check
@parent_check
def post_payment(childid, paymentid):
user = get_user_from_session(request.cookies['sessionid'])
payment = {}
if 'children' in user and int(childid) in user['children']:
payment = load_user_payment(childid, paymentid)
else:
return jres('Child not found. Are they yours?', 404)
if payment is None:
return jres('No payment found'), 404
try:
payment = pay_payment(childid, paymentid)
if payment is None:
return jres('Payment missing', 404)
except Exception as e:
jres('Payment was not finalized ' + str(e), 405)
return jres('payment was successful ' + str(payment), 200)
#Teachers
@app.route('/classes', methods=['GET'])
@auth_check
@teacher_check
def get_classes():
user = get_user_from_session(request.cookies['sessionid'])
classes = find_classes(user['username'])
if classes is None:
return jres('No class found'), 404
else:
for c in classes:
c.pop('_id', None)
return jsonify(classes)
@app.route('/class/<id>', methods=['GET'])
@auth_check
@teacher_check
def get_class(id):
user = get_user_from_session(request.cookies['sessionid'])
_class = find_class(user['username'], id)
if _class is None:
return jres('No class found'), 404
_class.pop('_id', None)
#print(str(_class))
return jsonify(_class)
@app.route('/class/<id>/grades', methods=['GET'])
@auth_check
@teacher_check
def get_class_grades(id):
user = get_user_from_session(request.cookies['sessionid'])
_class = find_class(user['username'], id)
#if _class is None or len(_class['grades']) < 0:
# return jres('No grades found'), 404
if _class is None:
return jres('Wrong class chosen'), 404
else:
grades = get_grades_in_class(_class['name'])
if grades is None:
return jres('No grades found'), 404
for g in grades:
g.pop('_id', None)
return jsonify(grades)
@app.route('/class/<id>/grade', methods=['POST'])
@auth_check
@teacher_check
def post_class_grade(id):
user = get_user_from_session(request.cookies['sessionid'])
_class = find_class(user['username'], id)
if _class is None:
return jres('No class found'), 404
try:
store_grade(_class['name'], request.form['grade'], request.form['student'])
except:
return jres('Error storing the grade', type='error')
return jres('Grade stored successfully', 200)
@app.route('/class/<class_id>/grade/<grade_id>', methods=['PUT'])
@auth_check
@teacher_check
def put_class_grade(class_id, grade_id):
user = get_user_from_session(request.cookies['sessionid'])
_class = find_class(user, class_id)
if _class is None:
return jres('No class found'), 404
try:
update_grade(user, class_id, grade_id, request.form['mark'], request.form['childid'], request.form['description'])
except:
return jres('Error storing the grade', type='error')
return jsonify(_class['grades'])
@app.route('/class/<class_id>/grade/<grade_id>', methods=['DELETE'])
@auth_check
@teacher_check
def delete_grade_grade(class_id, grade_id):
user = get_user_from_session(request.cookies['sessionid'])
_class = find_class(user['username'], class_id)
if _class is None:
return jres('No class found'), 404
try:
grades = get_grades_in_class(_class['name'])
for g in grades:
if int(g["number"]) == int(grade_id):
delete_grade(_class['name'], grade_id)
except:
return jres('Error deleting the grade', type='error')
grades = get_grades_in_class(_class['name'])
for g in grades:
g.pop('_id', None)
return jsonify(grades)
#########################################################################
#########################################################################
#########################################################################
#Admins
@app.route('/users', methods=['GET'])
@auth_check
@admin_check
def get_users():
users = load_all_users()
if users is None:
return jres('No user found', type='error')
return jsonify(users)
@app.route('/user', methods=['POST'])
@auth_check
@admin_check
def post_user():
try:
username = request.form["username"]
user = load_user(username)
if user is not None:
return jres('username already exists', type='error')
password = request.form["password"]
type = request.form["utype"]
salt = str(binascii.hexlify(os.urandom(10)).decode())
pwdhash = hashlib.sha256(password.encode('utf-8') + salt.encode('utf-8')).hexdigest()
insert_user(username, {'pwdhash':pwdhash, 'salt':salt, 'utype':type})
except Exception as e:
return jres('Error creating user: '+ str(e.args), type='error')
return jres('User created successfuly')
@app.route('/user/<id>', methods=['GET'])
@auth_check
@admin_check
def get_user(id):
user = load_user_by_id(id)
if user is None:
return jres('User not found', type='error')
return jsonify(user)
@app.route('/user/<id>', methods=['PUT'])
@auth_check
@admin_check
def put_user(id):
user = load_user_by_id(id)
if user is None:
return jres('No user found', type='error')
try:
username = request.form["username"]
password = request.form["password"]
type = request.form["utype"]
salt = str(binascii.hexlify(os.urandom(10)).decode())
pwdhash = hashlib.sha256(password.encode('utf-8') + salt.encode('utf-8')).hexdigest()
insert_user(username, {'pwdhash':pwdhash, 'salt':salt, 'utype':type})
except Exception as e:
return jres('Error creating user: '+ str(e.args), type='error')
return jres('User updated successfuly')
@app.route('/user/<id>/profile', methods=['PUT'])
@auth_check
@admin_check
def put_user_profile(id):
user = load_user_by_id(id)
if user is None:
return jres('No user found', type='error')
try:
update_user_profile(user['username'], request.form)
except Exception as e:
return jres('Error updating profile: '+ str(e.args), type='error')
return jres('User created successfuly')
@app.route('/user/<id>', methods=['DELETE'])
@auth_check
@admin_check
def delete_user(id):
users = load_user_by_id(id)
if users is None:
return jres('No user found', type='error')
if remove_user(id):
return jres('succes', type='message')
else:
return jres('failed', type='error')
@app.route('/admin/classes', methods=['GET'])
@auth_check
@admin_check
def get_all_classes():
classes = load_all_classes()
if classes is None:
return jres('No class found'), 404
for c in classes:
c.pop('_id', None)
return jsonify(classes)
@app.route('/admin/class', methods=['POST'])
@auth_check
@admin_check
def post_class():
try:
req_data = request.get_json()
teacher = req_data['teacher']
days = req_data['days']
classname = req_data['class']
students = req_data['students']
hours = req_data['hours']
insert_class(teacher, students, days, hours, classname)
except Exception as e:
return jres('Error: ' + str(students) + " === " + str(days) + ":::" + str(e), type='error'), 404
return jres('success')
@app.route('/admin/class/<name>', methods=['GET'])
@auth_check
@admin_check
def get_class_admin(name):
_class = get_class_by_name(name)
if _class is None:
return jres('No class found'), 404
_class.pop('_id', None)
return jsonify(_class)
#return jres("Whatever " + str(_class))
@app.route('/class/<id>', methods=['PUT'])
@auth_check
@admin_check
def put_class(id):
return jsonify('class.html')
@app.route('/class/<id>', methods=['DELETE'])
@auth_check
@admin_check
def delete_class(id):
return jsonify('class.html')
@app.route('/admin/teachers', methods=['GET'])
@auth_check
@admin_check
def get_teachers():
students = load_users_by_type('teacher')
if students is None:
return jres('No teacher was found')
for s in students:
s.pop('_id', None)
return jsonify(students)
@app.route('/admin/students', methods=['GET'])
@auth_check
@admin_check
def get_students():
students = load_users_by_type('child')
if students is None:
return jres('No child was found')
for s in students:
s.pop('_id', None)
return jsonify(students)
@app.route('/admin/admins', methods=['GET'])
@auth_check
@admin_check
def get_admins():
students = load_users_by_type('admin')
if students is None:
return jres('No admin was found')
for s in students:
s.pop('_id', None)
return jsonify(students)
@app.route('/admin/parents', methods=['GET'])
@auth_check
@admin_check
def get_parents():
students = load_users_by_type('parent')
if students is None:
return jres('No parent was found')
for s in students:
s.pop('_id', None)
return jsonify(students)
@app.route('/parent/<parent_id>/children', methods=['GET'])
@auth_check
@admin_check
def get_parent_children(parent_id):
parent = load_user_by_id(parent_id)
if parent is not None:
children = load_children(parent)
if children is None:
return jres('No child was found', 404)
for s in children:
s.pop('_id', None)
return jsonify(children)
return jres('No parent was found', 404)
@app.route('/parent/<parent_id>/child', methods=['POST'])
@auth_check
@admin_check
def post_parent_child(parent_id):
req_data = request.get_json()
username = req_data['username']
data = {}
profile = {
"birthdate": req_data['birthdate'],
"name": req_data['name'],
"surname": req_data['surname']
}
password = req_data['password']
salt = '<PASSWORD>'
hashed_password = hashlib.sha256(password.encode('utf-8') + salt.encode('utf-8')).hexdigest()
parent = load_user_by_id(parent_id)
if parent is None:
return jres('No parent was found', 404)
data['profile'] = profile
data['pwdhash'] = <PASSWORD>
data['pwdsalt'] = salt
data['parents'] = [parent_id]
try:
insert_child_with_parent(username, data, parent)
except Exception as e:
return jres('Error: ' + str(parent) + " === " + str(data) + " ::: " + str(e), type='error'), 404
return jres('Student ' + str(username) + ' added correctly')
@app.route('/student/<name>/classes', methods=['GET'])
@auth_check
@admin_check
def get_student_class(name):
user = get_user_from_session(request.cookies['sessionid'])
child = load_user(name)
if child['utype'] == 'child':
classes = load_child_classes(child['username'])
if classes is None:
return jres('No classes found'), 404
for c in classes:
c.pop('_id', None)
#return jsonify([x for x in classes])
return jsonify(classes)
@app.route('/class/<class_id>/student/<student_id>', methods=['PUT'])
@auth_check
@admin_check
def put_student_class(class_id, student_id):
user = get_user_from_session(request.cookies['sessionid'])
child = load_user_by_id(student_id)
_class = find_class_by_id(class_id)
if child is None or _class is None:
return jres('Paramenters not found', 404)
if child['utype'] == 'child' and _class['students']:
c = add_student_to_class(class_id, child['username'])
else:
v = child['username'] in _class['students']
return jres('Wrong paramenters: child' + str(child) + ' class: ' + str(_class) + ' bool: ' + str(v), 404)
if c is not None:
c.pop('_id', None)
#return jsonify([x for x in classes])
return jsonify(c)
@app.route('/student/<student_id>/payments', methods=['GET'])
@auth_check
@admin_check
def get_student_payments(student_id):
payments = load_payments(student_id, None)
if payments is None:
return jres('No payment was found')
for p in payments:
p.pop('_id', None)
return jsonify(payments)
@app.route('/student/<student_id>/payment', methods=['POST'])
@auth_check
@admin_check
def post_student_payment(student_id):
req_data = request.get_json()
dueDate = req_data['dueDate']
amount = req_data['amount']
try:
create_payment_for_student(student_id, dueDate, amount)
except Exception as e:
return jres('Error storing the payment ' + str(e), type='error')
return jres('Payment saved correctly', 200)
@app.route('/student/<student_id>/payment/<id>', methods=['GET'])
@auth_check
@admin_check
def get_student_payment(student_id, id):
payment = load_user_payment(student_id, id)
if payments is None:
return jres('No such payment exists')
payment.pop('_id', None)
return jsonify(payment)
@app.route('/student/<student_id>/payment/<id>', methods=['PUT'])
@auth_check
@admin_check
def put_student_payment(student_id, id):
return jsonify('student.html')
@app.route('/student/<student_id>/payment/<id>', methods=['DELETE'])
@auth_check
@admin_check
def delete_student_payment(student_id, id):
try:
delete_payment_for_user(int(id), int(student_id))
except Exception as e:
return jres('Error deleting the payment ' + str(e), type='error')
return jres('Payment deleted correctly', 200)
@app.route('/parent/<parent_id>/child/<id>', methods=['GET'])
@auth_check
@admin_check
def get_parent_child(parent_id, id):
return jsonify('parent.html')
@app.route('/parent/<parent_id>/child/<id>', methods=['PUT'])
@auth_check
@admin_check
def put_parent_child(parent_id, id):
return jsonify('parent.html')
@app.route('/parent/<parent_id>/child/<id>', methods=['DELETE'])
@auth_check
@admin_check
def delete_parent_child(parent_id, id):
return jsonify('parent.html')
@app.route('/teacher/<teacher_id>/class', methods=['POST'])
@auth_check
@admin_check
def post_teacher_class(teacher_id):
return jsonify('teacher.html')
@app.route('/notification', methods=['POST'])
@auth_check
@admin_check
def post_notification():
return jsonify('notification.html')
@app.route('/notification/<user_id>', methods=['POST'])
@auth_check
@admin_check
def post_notification_to_user(user_id):
return jsonify('notification.html')
|
<filename>run_all_benchmark_functions.py
import sys
sys.path.insert(0,'..')
sys.path.insert(0,'../..')
from bayes_opt import BayesOpt,BayesOpt_KnownOptimumValue
import numpy as np
#from bayes_opt import auxiliary_functions
from bayes_opt import functions
from bayes_opt import utilities
import warnings
#from bayes_opt import acquisition_maximization
import sys
import itertools
import matplotlib.pyplot as plt
np.random.seed(6789)
warnings.filterwarnings("ignore")
counter = 0
myfunction_list=[]
#myfunction_list.append(functions.sincos())
#myfunction_list.append(functions.branin())
#myfunction_list.append(functions.hartman_3d())
#myfunction_list.append(functions.ackley(input_dim=5))
myfunction_list.append(functions.alpine1(input_dim=5))
#myfunction_list.append(functions.hartman_6d())
#myfunction_list.append(functions.gSobol(a=np.array([1,1,1,1,1])))
#myfunction_list.append(functions.gSobol(a=np.array([1,1,1,1,1,1,1,1,1,1])))
acq_type_list=[]
temp={}
temp['name']='erm' # expected regret minimization
temp['IsTGP']=0 # recommended to use tgp for ERM
acq_type_list.append(temp)
temp={}
temp['name']='cbm' # confidence bound minimization
temp['IsTGP']=1 # recommended to use tgp for CBM
#acq_type_list.append(temp)
#temp={}
#temp['name']='kov_mes' # MES+f*
#temp['IsTGP']=0 # we can try 'tgp'
#acq_type_list.append(temp)
temp={}
temp['name']='kov_ei' # this is EI + f*
temp['IsTGP']=0 # we can try 'tgp' by setting it =1
#acq_type_list.append(temp)
temp={}
temp['name']='ucb' # vanilla UCB
temp['IsTGP']=0 # we can try 'tgp' by setting it =1
#acq_type_list.append(temp)
temp={}
temp['name']='ei' # vanilla EI
temp['IsTGP']=0 # we can try 'tgp' by setting it =1
#acq_type_list.append(temp)
temp={}
temp['name']='random' # vanilla EI
temp['IsTGP']=0 # we can try 'tgp' by setting it =1
#acq_type_list.append(temp)
fig=plt.figure()
color_list=['r','b','k','m','c','g','o']
marker_list=['s','x','o','v','^','>','<']
for idx, (myfunction,acq_type,) in enumerate(itertools.product(myfunction_list,acq_type_list)):
print("=====================func:",myfunction.name)
print("==================acquisition type",acq_type)
IsTGP=acq_type['IsTGP']
acq_name=acq_type['name']
nRepeat=10
ybest=[0]*nRepeat
MyTime=[0]*nRepeat
MyOptTime=[0]*nRepeat
marker=[0]*nRepeat
bo=[0]*nRepeat
[0]*nRepeat
for ii in range(nRepeat):
if 'kov' in acq_name or acq_name == 'erm' or acq_name == 'cbm':
bo[ii]=BayesOpt_KnownOptimumValue(myfunction.func,myfunction.bounds,myfunction.fstar, \
acq_name,IsTGP,verbose=1)
else:
bo[ii]=BayesOpt(myfunction.func,myfunction.bounds,acq_name,verbose=1)
ybest[ii],MyTime[ii]=utilities.run_experiment(bo[ii],n_init=3*myfunction.input_dim,\
NN=10*myfunction.input_dim,runid=ii)
MyOptTime[ii]=bo[ii].time_opt
print("ii={} BFV={:.3f}".format(ii,myfunction.ismax*np.max(ybest[ii])))
Score={}
Score["ybest"]=ybest
Score["MyTime"]=MyTime
Score["MyOptTime"]=MyOptTime
utilities.print_result_sequential(bo,myfunction,Score,acq_type)
## plot the result
# process the result
y_best_sofar=[0]*len(bo)
for uu,mybo in enumerate(bo):
y_best_sofar[uu]=[ (myfunction.fstar - np.max(mybo.Y_ori[:ii+1]) ) for ii in range(len(mybo.Y_ori))]
y_best_sofar[uu]=y_best_sofar[uu][3*myfunction.input_dim:] # remove the random phase for plotting purpose
y_best_sofar=np.asarray(y_best_sofar)
myxaxis=range(y_best_sofar.shape[1])
plt.errorbar(myxaxis,np.mean(y_best_sofar,axis=0), np.std(y_best_sofar,axis=0)/np.sqrt(nRepeat),
label=acq_type['name'],color=color_list[idx],marker=marker_list[idx])
plt.ylabel("Simple Regret",fontsize=14)
plt.xlabel("Iterations",fontsize=14)
plt.legend(prop={'size': 14})
strTitle="{:s} D={:d}".format(myfunction.name,myfunction.input_dim)
plt.title(strTitle,fontsize=18)
|
<filename>vendor/packages/translate-toolkit/translate/convert/test_po2csv.py
#!/usr/bin/env python
from translate.convert import po2csv
from translate.convert import csv2po
from translate.convert import test_convert
from translate.misc import wStringIO
from translate.storage import po
from translate.storage import csvl10n
from translate.storage.test_base import headerless_len, first_translatable
class TestPO2CSV:
def po2csv(self, posource):
"""helper that converts po source to csv source without requiring files"""
inputfile = wStringIO.StringIO(posource)
inputpo = po.pofile(inputfile)
convertor = po2csv.po2csv()
outputcsv = convertor.convertstore(inputpo)
return outputcsv
def csv2po(self, csvsource, template=None):
"""helper that converts csv source to po source without requiring files"""
inputfile = wStringIO.StringIO(csvsource)
inputcsv = csvl10n.csvfile(inputfile)
if template:
templatefile = wStringIO.StringIO(template)
inputpot = po.pofile(templatefile)
else:
inputpot = None
convertor = csv2po.csv2po(templatepo=inputpot)
outputpo = convertor.convertstore(inputcsv)
return outputpo
def singleelement(self, storage):
"""checks that the pofile contains a single non-header element, and returns it"""
assert headerless_len(storage.units) == 1
return first_translatable(storage)
def test_simpleentity(self):
"""checks that a simple csv entry definition converts properly to a po entry"""
minipo = r'''#: term.cpp
msgid "Term"
msgstr "asdf"'''
csvfile = self.po2csv(minipo)
unit = self.singleelement(csvfile)
assert unit.location == "term.cpp"
assert unit.source == "Term"
assert unit.target == "asdf"
def test_multiline(self):
"""tests multiline po entries"""
minipo = r'''msgid "First part "
"and extra"
msgstr "Eerste deel "
"en ekstra"'''
csvfile = self.po2csv(minipo)
unit = self.singleelement(csvfile)
assert unit.source == "First part and extra"
assert unit.target == "Eerste deel en ekstra"
def test_escapednewlines(self):
"""Test the escaping of newlines"""
minipo = r'''msgid "First line\nSecond line"
msgstr "Eerste lyn\nTweede lyn"
'''
csvfile = self.po2csv(minipo)
unit = self.singleelement(csvfile)
assert unit.source == "First line\nSecond line"
assert unit.target == "Eerste lyn\nTweede lyn"
pofile = self.csv2po(str(csvfile))
unit = self.singleelement(pofile)
assert unit.source == "First line\nSecond line"
assert unit.target == "Eerste lyn\nTweede lyn"
def test_escapedtabs(self):
"""Test the escaping of tabs"""
minipo = r'''msgid "First column\tSecond column"
msgstr "Eerste kolom\tTweede kolom"
'''
csvfile = self.po2csv(minipo)
unit = self.singleelement(csvfile)
assert unit.source == "First column\tSecond column"
assert unit.target == "Eerste kolom\tTweede kolom"
assert csvfile.findunit("First column\tSecond column").target == "Eerste kolom\tTweede kolom"
def test_escapedquotes(self):
"""Test the escaping of quotes (and slash)"""
minipo = r'''msgid "Hello \"Everyone\""
msgstr "Good day \"All\""
msgid "Use \\\"."
msgstr "Gebruik \\\"."
'''
csvfile = self.po2csv(minipo)
assert csvfile.findunit('Hello "Everyone"').target == 'Good day "All"'
assert csvfile.findunit('Use \\".').target == 'Gebruik \\".'
def test_escapedescape(self):
"""Test the escaping of pure escapes is unaffected"""
minipo = r'''msgid "Find\\Options"
msgstr "Vind\\Opsies"
'''
csvfile = self.po2csv(minipo)
print minipo
print csvfile
assert csvfile.findunit(r'Find\Options').target == r'Vind\Opsies'
def test_singlequotes(self):
"""Tests that single quotes are preserved correctly"""
minipo = '''msgid "source 'source'"\nmsgstr "target 'target'"\n'''
csvfile = self.po2csv(minipo)
print str(csvfile)
assert csvfile.findunit("source 'source'").target == "target 'target'"
# Make sure we don't mess with start quotes until writing
minipo = '''msgid "'source'"\nmsgstr "'target'"\n'''
csvfile = self.po2csv(minipo)
print str(csvfile)
assert csvfile.findunit(r"'source'").target == r"'target'"
# TODO check that we escape on writing not in the internal representation
def test_empties(self):
"""Tests that things keep working with empty entries"""
minipo = 'msgid "Source"\nmsgstr ""\n\nmsgid ""\nmsgstr ""'
csvfile = self.po2csv(minipo)
assert csvfile.findunit("Source") is not None
assert csvfile.findunit("Source").target == ""
assert headerless_len(csvfile.units) == 1
def test_kdecomments(self):
"""test that we don't carry KDE comments to CSV"""
minipo = '#: simple.c\nmsgid "_: KDE comment\\n"\n"Same"\nmsgstr "Same"\n'
csvfile = self.po2csv(minipo)
unit = self.singleelement(csvfile)
assert unit.source == "Same"
assert unit.target == "Same"
class TestPO2CSVCommand(test_convert.TestConvertCommand, TestPO2CSV):
"""Tests running actual po2csv commands on files"""
convertmodule = po2csv
def test_help(self):
"""tests getting help"""
options = test_convert.TestConvertCommand.test_help(self)
options = self.help_check(options, "--columnorder=COLUMNORDER", last=True)
|
from uuid import getnode as get_mac
import threading
import signal
import requests
import json
import hashlib
import sys
import os
def to_error_message(errs):
"""
Formats an array of error dicts into an error message string. Returns an error message.
"""
return ', '.join(map(lambda e: f"{e['title']}: {e['detail']}", errs))
def validate_license_key_with_fingerprint(license_key, machine_fingerprint):
"""
Validates a license key scoped to a machine fingerprint. Returns a validation code and the license's ID.
"""
validation = requests.post(
f"https://api.keygen.sh/v1/accounts/{os.environ['KEYGEN_ACCOUNT_ID']}/licenses/actions/validate-key",
headers={
'Content-Type': 'application/vnd.api+json',
'Accept': 'application/vnd.api+json'
},
data=json.dumps({
'meta': {
'scope': { 'fingerprint': machine_fingerprint },
'key': license_key
}
})
).json()
license_id = None
if 'data' in validation:
data = validation['data']
if data != None:
license_id = data['id']
if 'errors' in validation:
errs = validation['errors']
print(f'[keygen.validate_license_key_with_fingerprint] license_id={license_id} machine_fingerprint={machine_fingerprint} errors={to_error_message(errs)}',
file=sys.stderr)
return None, license_id
validation_code = validation['meta']['constant']
print(f'[keygen.validate_license_key_with_fingerprint] validation_code={validation_code} license_id={license_id} machine_fingerprint={machine_fingerprint}')
return validation_code, license_id
def activate_machine_for_license(license_id, machine_fingerprint):
"""
Activates a machine for a license. Returns the activated machine's ID.
"""
activation = requests.post(
f"https://api.keygen.sh/v1/accounts/{os.environ['KEYGEN_ACCOUNT_ID']}/machines",
headers={
'Authorization': f"Bearer {os.environ['KEYGEN_ACTIVATION_TOKEN']}",
'Content-Type': 'application/vnd.api+json',
'Accept': 'application/vnd.api+json'
},
data=json.dumps({
'data': {
'type': 'machines',
'attributes': {
'fingerprint': machine_fingerprint
},
'relationships': {
'license': {
'data': { 'type': 'licenses', 'id': license_id }
}
}
}
})
).json()
if 'errors' in activation:
errs = activation['errors']
print(f'[keygen.activate_machine_for_license] license_id={license_id} machine_fingerprint={machine_fingerprint} errors={to_error_message(errs)}',
file=sys.stderr)
return None
machine_id = activation['data']['id']
print(f'[keygen.activate_machine_for_license] license_id={license_id} machine_id={machine_id} machine_fingerprint={machine_fingerprint}')
return machine_id
def deactivate_machine(machine_id):
"""
Deactivates a machine. Returns a boolean indicating success or failure.
"""
deactivation = requests.delete(
f"https://api.keygen.sh/v1/accounts/{os.environ['KEYGEN_ACCOUNT_ID']}/machines/{machine_id}",
headers={
'Authorization': f"Bearer {os.environ['KEYGEN_ACTIVATION_TOKEN']}",
'Accept': 'application/vnd.api+json'
}
)
if deactivation.status_code != 204:
data = deactivation.json()
errs = data['errors']
print(f'[keygen.deactivate_machine] machine_id={machine_id} errors={to_error_message(errs)}',
file=sys.stderr)
return False
print(f'[keygen.deactivate_machine] machine_id={machine_id}')
return True
def deactivate_machine_on_exit(machine_id):
"""
Deactivates a machine on exit signal. Exits program with exit code indicating deactivation success or failure.
"""
ok = deactivate_machine(machine_fingerprint)
if ok:
sys.exit(0)
else:
sys.exit(1)
def ping_heartbeat_for_machine(machine_id):
"""
Performs a hearbeat ping for a machine. Returns a boolean indicating success or failure.
"""
ping = requests.post(
f"https://api.keygen.sh/v1/accounts/{os.environ['KEYGEN_ACCOUNT_ID']}/machines/{machine_id}/actions/ping-heartbeat",
headers={
'Authorization': f"Bearer {os.environ['KEYGEN_ACTIVATION_TOKEN']}",
'Accept': 'application/vnd.api+json'
}
).json()
if 'errors' in ping:
errs = ping['errors']
print(f'[keygen.ping_heartbeat_for_machine] machine_id={machine_id} errors={to_error_message(errs)}',
file=sys.stderr)
return False
print(f'[keygen.ping_heartbeat_for_machine] machine_id={machine_id}')
return True
def maintain_hearbeat_for_machine(machine_id):
"""
Performs minutely hearbeat pings for a machine on a loop.
"""
timer = threading.Timer(60.0, lambda: maintain_hearbeat_for_machine(machine_id))
ok = ping_heartbeat_for_machine(machine_id)
if not ok:
sys.exit(1)
timer.start()
# Fingerprint the current device and get the license key
machine_fingerprint = hashlib.sha256(str(get_mac()).encode('utf-8')).hexdigest()
license_key = sys.argv[1]
# Validate the license key scoped to the current machine fingerprint
validation_code, license_id = validate_license_key_with_fingerprint(license_key, machine_fingerprint)
if validation_code == 'NOT_FOUND':
sys.exit(1)
# Attempt to activate the machine if it's not already activated
activation_is_required = validation_code == 'NO_MACHINE' or \
validation_code == 'NO_MACHINES' or \
validation_code == 'FINGERPRINT_SCOPE_MISMATCH'
if activation_is_required:
machine_id = activate_machine_for_license(license_id, machine_fingerprint)
if machine_id == None:
sys.exit(1)
# Attempt to deactivate machine on process exit
signal.signal(signal.SIGINT, lambda _s, _f: deactivate_machine_on_exit(machine_fingerprint))
# Start a heartbeat ping loop
maintain_hearbeat_for_machine(machine_fingerprint)
|
# Copyright 2019-2020 The Kale Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dill
import logging
from .resource_load import resource_load
from .resource_save import resource_save
from .resource_load import resource_all as fallback_load
from .resource_save import resource_all as fallback_save
log = logging.getLogger(__name__)
def _get_obj_name(s):
return s.split('/')[-1]
@resource_load.register(r'.*\.pyfn')
def resource_function_load(uri, **kwargs):
"""Load a Python function."""
log.info("Loading function: %s", _get_obj_name(uri))
return dill.load(open(uri, "rb"))
@resource_save.register(r'function')
def resource_function_save(obj, path, **kwargs):
"""Save a Python function."""
log.info("Saving function: %s", _get_obj_name(path))
with open(path + ".pyfn", "wb") as f:
dill.dump(obj, f)
@resource_load.register(r'.*\.npy') # match anything ending in .npy
def resource_numpy_load(uri, **kwargs):
"""Load a numpy resource."""
try:
import numpy as np
log.info("Loading numpy obj: %s", _get_obj_name(uri))
return np.load(uri)
except ImportError:
return fallback_load(uri, **kwargs)
@resource_save.register(r'numpy\..*')
def resource_numpy_save(obj, path, **kwargs):
"""Save a numpy resource."""
try:
import numpy as np
log.info("Saving numpy obj: %s", _get_obj_name(path))
np.save(path + ".npy", obj)
except ImportError:
fallback_save(obj, path, **kwargs)
@resource_load.register(r'.*\.pdpkl')
def resource_pandas_load(uri, **kwargs):
"""Load a pandas resource."""
try:
import pandas as pd
log.info("Loading pandas obj: %s", _get_obj_name(uri))
return pd.read_pickle(uri)
except ImportError:
return fallback_load(uri, **kwargs)
@resource_save.register(r'pandas\..*(DataFrame|Series)')
def resource_pandas_save(obj, path, **kwargs):
"""Save a pandas DataFrame or Series."""
try:
import pandas as pd # noqa: F401
log.info("Saving pandas obj: %s", _get_obj_name(path))
obj.to_pickle(path + '.pdpkl')
except ImportError:
fallback_save(obj, path, **kwargs)
@resource_load.register(r'.*\.pt')
def resource_torch_load(uri, **kwargs):
"""Load a torch resource."""
try:
import torch
log.info("Loading PyTorch model: %s", _get_obj_name(uri))
obj_torch = torch.load(uri, pickle_module=dill)
if "nn.Module" in str(type(obj_torch)):
# if the object is a Module we need to run eval
obj_torch.eval()
return obj_torch
except ImportError:
return fallback_load(uri, **kwargs)
@resource_save.register(r'torch.*')
def resource_torch_save(obj, path, **kwargs):
"""Save a torch resource."""
try:
import torch
log.info("Saving PyTorch model: %s", _get_obj_name(path))
torch.save(obj, path + ".pt", pickle_module=dill)
except ImportError:
fallback_save(obj, path, **kwargs)
@resource_load.register(r'.*\.keras')
def resource_keras_load(uri, **kwargs):
"""Load a Keras model."""
try:
from keras.models import load_model
log.info("Loading Keras model: %s", _get_obj_name(uri))
obj_keras = load_model(uri)
return obj_keras
except ImportError:
return fallback_load(uri, **kwargs)
@resource_save.register(r'keras\..*')
def resource_keras_save(obj, path, **kwargs):
"""Save a Keras model."""
try:
log.info("Saving Keras model: %s", _get_obj_name(path))
obj.save(path + ".keras")
except ImportError:
fallback_save(obj, path, **kwargs)
@resource_load.register(r'.*\.tfkeras')
def resource_tf_load(uri, **kwargs):
"""Load a Keras model."""
try:
from tensorflow.keras.models import load_model
log.info(f"Loading tf.Keras model: {uri}")
try:
obj_tfkeras = load_model(uri, compile=False)
except OSError:
# XXX: try to load a model that was saved within a versioned
# folder (for tensorflow serve)
obj_tfkeras = load_model(uri + "/1", compile=False)
return obj_tfkeras
except ImportError:
return fallback_load(uri, **kwargs)
@resource_save.register(r'tensorflow.python.keras.*')
def resource_tf_save(obj, path, **kwargs):
"""Save a tf.Keras model."""
try:
log.info("Saving TF Keras model: %s", _get_obj_name(path))
# XXX: Adding `/1` since tensorflow serve expects the model's models
# to be saved under a versioned folder
obj.save(path + ".tfkeras/1")
except ImportError:
fallback_save(obj, path, **kwargs)
|
# coding=utf-8
"""
Most serialisation tests are coupled with the type tests (test_type.py)
"""
from __future__ import absolute_import
import datetime
import uuid
from hypothesis import given
from hypothesis.strategies import dictionaries as dictionary, characters, integers
from pathlib import Path
from eodatasets import serialise, compat, type as ptype
from tests import TestCase, slow
strings_without_trailing_underscore = characters(blacklist_characters='_')
class TestSerialise(TestCase):
def test_as_key_value(self):
self.assert_values_equal(
serialise.as_flat_key_value({
'a': 1,
'b': compat.long_int(2),
'c': 2.3,
'd': {
'd_inner': {
'a': 42
}
}
}),
[
('a', 1),
('b', compat.long_int(2)),
('c', 2.3),
('d.d_inner.a', 42)
]
)
@slow
@given(dictionary(strings_without_trailing_underscore, integers()))
def test_flat_dict_flattens_identically(self, dict_):
print(dict_)
self.assert_items_equal(
dict_.items(),
serialise.as_flat_key_value(dict_)
)
@slow
@given(dictionary(characters(), integers()))
def test_flat_dict_flattens_without_underscore_suffix(self, dict_):
# A (single) trailing underscore should be stripped from key names if present, as these are added
# for python name conflicts.
# If we append an underscore to every key, the result should be identical.
self.assert_items_equal(
dict_.items(),
serialise.as_flat_key_value({k + '_': v for k, v in dict_.items()})
)
def test_key_value_simple_obj(self):
class Test1(ptype.SimpleObject):
def __init__(self, a, b, c, d=None):
self.a = a
self.b = b
self.c = c
self.d = d
self.assert_values_equal(
serialise.as_flat_key_value(
Test1(
a=1,
b=compat.long_int(2),
c=2.3,
d=Test1(
a=1,
b=2,
c={'a': 42}
)
)
),
[
('a', 1),
('b', compat.long_int(2)),
('c', 2.3),
('d.a', 1),
('d.b', 2),
('d.c.a', 42)
]
)
def test_key_value_types(self):
class Test1(ptype.SimpleObject):
def __init__(self, a, b, c, d=None):
self.a = a
self.b = b
self.c = c
self.d = d
uuid_ = uuid.uuid1()
date_ = datetime.datetime.utcnow()
self.assert_values_equal(
[
('a:0', 'a'),
('a:1', 'b'),
('a:2:a', 1),
('a:2:b', 2),
('b', compat.long_int(2)),
('c', date_.isoformat()),
('d:a', str(uuid_)),
('d:b', 'something/testpath.txt'),
('d:c:a', 42)
],
serialise.as_flat_key_value(
Test1(
a=['a', 'b', Test1(1, 2, None)],
b=compat.long_int(2),
c=date_,
d=Test1(
a=uuid_,
b=Path('/tmp/something/testpath.txt'),
c={'a': 42}
)
),
relative_to=Path('/tmp'),
key_separator=':'
)
)
def test_fails_on_unknown(self):
class UnknownClass(object):
pass
with self.assertRaises(ValueError) as context:
# It returns a generator, so we have to wrap it in a list to force evaluation.
list(serialise.as_flat_key_value({'a': 1, 'b': UnknownClass()}))
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Run Stochastic Gradient Langevin Dynamics (SGLD) and Bayesian Dark Knowledge (BDK)"""
from __future__ import print_function
import argparse
import time
import numpy
import matplotlib.pyplot as plt
import mxnet as mx
import mxnet.ndarray as nd
from algos import HMC, SGD, SGLD, DistilledSGLD
from data_loader import load_mnist, load_toy, load_synthetic
from utils import BiasXavier, SGLDScheduler
class CrossEntropySoftmax(mx.operator.NumpyOp):
"""Calculate CrossEntropy softmax function"""
def __init__(self):
super(CrossEntropySoftmax, self).__init__(False)
def list_arguments(self):
return ['data', 'label']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
data_shape = in_shape[0]
label_shape = in_shape[0]
output_shape = in_shape[0]
return [data_shape, label_shape], [output_shape]
def forward(self, in_data, out_data):
x = in_data[0]
y = out_data[0]
y[:] = numpy.exp(x - x.max(axis=1).reshape((x.shape[0], 1))).astype('float32')
y /= y.sum(axis=1).reshape((x.shape[0], 1))
def backward(self, out_grad, in_data, out_data, in_grad):
l = in_data[1]
y = out_data[0]
dx = in_grad[0]
dx[:] = (y - l)
class LogSoftmax(mx.operator.NumpyOp):
"""Generate helper functions to evaluate softmax loss function"""
def __init__(self):
super(LogSoftmax, self).__init__(False)
def list_arguments(self):
return ['data', 'label']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
data_shape = in_shape[0]
label_shape = in_shape[0]
output_shape = in_shape[0]
return [data_shape, label_shape], [output_shape]
def forward(self, in_data, out_data):
x = in_data[0]
y = out_data[0]
y[:] = (x - x.max(axis=1, keepdims=True)).astype('float32')
y -= numpy.log(numpy.exp(y).sum(axis=1, keepdims=True)).astype('float32')
# y[:] = numpy.exp(x - x.max(axis=1).reshape((x.shape[0], 1)))
# y /= y.sum(axis=1).reshape((x.shape[0], 1))
def backward(self, out_grad, in_data, out_data, in_grad):
l = in_data[1]
y = out_data[0]
dx = in_grad[0]
dx[:] = (numpy.exp(y) - l).astype('float32')
def classification_student_grad(student_outputs, teacher_pred):
return [student_outputs[0] - teacher_pred]
def regression_student_grad(student_outputs, teacher_pred, teacher_noise_precision):
student_mean = student_outputs[0]
student_var = student_outputs[1]
grad_mean = nd.exp(-student_var) * (student_mean - teacher_pred)
grad_var = (1 - nd.exp(-student_var) * (nd.square(student_mean - teacher_pred)
+ 1.0 / teacher_noise_precision)) / 2
return [grad_mean, grad_var]
def get_mnist_sym(output_op=None, num_hidden=400):
"""Get symbol of mnist"""
net = mx.symbol.Variable('data')
net = mx.symbol.FullyConnected(data=net, name='mnist_fc1', num_hidden=num_hidden)
net = mx.symbol.Activation(data=net, name='mnist_relu1', act_type="relu")
net = mx.symbol.FullyConnected(data=net, name='mnist_fc2', num_hidden=num_hidden)
net = mx.symbol.Activation(data=net, name='mnist_relu2', act_type="relu")
net = mx.symbol.FullyConnected(data=net, name='mnist_fc3', num_hidden=10)
if output_op is None:
net = mx.symbol.SoftmaxOutput(data=net, name='softmax')
else:
net = output_op(data=net, name='softmax')
return net
def synthetic_grad(X, theta, sigma1, sigma2, sigmax, rescale_grad=1.0, grad=None):
"""Get synthetic gradient value"""
if grad is None:
grad = nd.empty(theta.shape, theta.context)
theta1 = theta.asnumpy()[0]
theta2 = theta.asnumpy()[1]
v1 = sigma1 ** 2
v2 = sigma2 ** 2
vx = sigmax ** 2
denominator = numpy.exp(-(X - theta1) ** 2 / (2 * vx)) + numpy.exp(
-(X - theta1 - theta2) ** 2 / (2 * vx))
grad_npy = numpy.zeros(theta.shape)
grad_npy[0] = -rescale_grad * ((numpy.exp(-(X - theta1) ** 2 / (2 * vx)) * (X - theta1) / vx
+ numpy.exp(-(X - theta1 - theta2) ** 2 / (2 * vx)) *
(X - theta1 - theta2) / vx) / denominator).sum() + theta1 / v1
grad_npy[1] = -rescale_grad * ((numpy.exp(-(X - theta1 - theta2) ** 2 / (2 * vx)) *
(X - theta1 - theta2) / vx) / denominator).sum() + theta2 / v2
grad[:] = grad_npy
return grad
def get_toy_sym(teacher=True, teacher_noise_precision=None):
"""Get toy symbol"""
if teacher:
net = mx.symbol.Variable('data')
net = mx.symbol.FullyConnected(data=net, name='teacher_fc1', num_hidden=100)
net = mx.symbol.Activation(data=net, name='teacher_relu1', act_type="relu")
net = mx.symbol.FullyConnected(data=net, name='teacher_fc2', num_hidden=1)
net = mx.symbol.LinearRegressionOutput(data=net, name='teacher_output',
grad_scale=teacher_noise_precision)
else:
net = mx.symbol.Variable('data')
net = mx.symbol.FullyConnected(data=net, name='student_fc1', num_hidden=100)
net = mx.symbol.Activation(data=net, name='student_relu1', act_type="relu")
student_mean = mx.symbol.FullyConnected(data=net, name='student_mean', num_hidden=1)
student_var = mx.symbol.FullyConnected(data=net, name='student_var', num_hidden=1)
net = mx.symbol.Group([student_mean, student_var])
return net
def dev(gpu_id=None):
return mx.gpu(gpu_id) if gpu_id else mx.cpu()
def run_mnist_SGD(num_training=50000, gpu_id=None):
X, Y, X_test, Y_test = load_mnist(num_training)
minibatch_size = 100
net = get_mnist_sym()
data_shape = (minibatch_size,) + X.shape[1::]
data_inputs = {'data': nd.zeros(data_shape, ctx=dev(gpu_id)),
'softmax_label': nd.zeros((minibatch_size,), ctx=dev(gpu_id))}
initializer = mx.init.Xavier(factor_type="in", magnitude=2.34)
exe, exe_params, _ = SGD(sym=net, dev=dev(gpu_id), data_inputs=data_inputs, X=X, Y=Y,
X_test=X_test, Y_test=Y_test,
total_iter_num=1000000,
initializer=initializer,
lr=5E-6, prior_precision=1.0, minibatch_size=100)
def run_mnist_SGLD(num_training=50000, gpu_id=None):
X, Y, X_test, Y_test = load_mnist(num_training)
minibatch_size = 100
net = get_mnist_sym()
data_shape = (minibatch_size,) + X.shape[1::]
data_inputs = {'data': nd.zeros(data_shape, ctx=dev(gpu_id)),
'softmax_label': nd.zeros((minibatch_size,), ctx=dev(gpu_id))}
initializer = mx.init.Xavier(factor_type="in", magnitude=2.34)
exe, sample_pool = SGLD(sym=net, dev=dev(gpu_id), data_inputs=data_inputs, X=X, Y=Y,
X_test=X_test, Y_test=Y_test,
total_iter_num=1000000,
initializer=initializer,
learning_rate=4E-6, prior_precision=1.0, minibatch_size=100,
thin_interval=100, burn_in_iter_num=1000)
def run_mnist_DistilledSGLD(num_training=50000, gpu_id=None):
"""Run DistilledSGLD on mnist dataset"""
X, Y, X_test, Y_test = load_mnist(num_training)
minibatch_size = 100
if num_training >= 10000:
num_hidden = 800
total_iter_num = 1000000
teacher_learning_rate = 1E-6
student_learning_rate = 0.0001
teacher_prior = 1
student_prior = 0.1
perturb_deviation = 0.1
else:
num_hidden = 400
total_iter_num = 20000
teacher_learning_rate = 4E-5
student_learning_rate = 0.0001
teacher_prior = 1
student_prior = 0.1
perturb_deviation = 0.001
teacher_net = get_mnist_sym(num_hidden=num_hidden)
logsoftmax = LogSoftmax()
student_net = get_mnist_sym(output_op=logsoftmax, num_hidden=num_hidden)
data_shape = (minibatch_size,) + X.shape[1::]
teacher_data_inputs = {'data': nd.zeros(data_shape, ctx=dev(gpu_id)),
'softmax_label': nd.zeros((minibatch_size,), ctx=dev(gpu_id))}
student_data_inputs = {'data': nd.zeros(data_shape, ctx=dev(gpu_id)),
'softmax_label': nd.zeros((minibatch_size, 10), ctx=dev(gpu_id))}
teacher_initializer = BiasXavier(factor_type="in", magnitude=1)
student_initializer = BiasXavier(factor_type="in", magnitude=1)
student_exe, student_params, _ = \
DistilledSGLD(teacher_sym=teacher_net, student_sym=student_net,
teacher_data_inputs=teacher_data_inputs,
student_data_inputs=student_data_inputs,
X=X, Y=Y, X_test=X_test, Y_test=Y_test, total_iter_num=total_iter_num,
student_initializer=student_initializer,
teacher_initializer=teacher_initializer,
student_optimizing_algorithm="adam",
teacher_learning_rate=teacher_learning_rate,
student_learning_rate=student_learning_rate,
teacher_prior_precision=teacher_prior, student_prior_precision=student_prior,
perturb_deviation=perturb_deviation, minibatch_size=100, dev=dev(gpu_id))
def run_toy_SGLD(gpu_id=None):
"""Run SGLD on toy dataset"""
X, Y, X_test, Y_test = load_toy()
minibatch_size = 1
teacher_noise_precision = 1.0 / 9.0
net = get_toy_sym(True, teacher_noise_precision)
data_shape = (minibatch_size,) + X.shape[1::]
data_inputs = {'data': nd.zeros(data_shape, ctx=dev(gpu_id)),
'teacher_output_label': nd.zeros((minibatch_size, 1), ctx=dev(gpu_id))}
initializer = mx.init.Uniform(0.07)
exe, params, _ = SGLD(sym=net,
data_inputs=data_inputs,
X=X,
Y=Y,
X_test=X_test,
Y_test=Y_test,
total_iter_num=50000,
initializer=initializer,
learning_rate=1E-4,
# lr_scheduler=mx.lr_scheduler.FactorScheduler(100000, 0.5),
prior_precision=0.1,
burn_in_iter_num=1000,
thin_interval=10,
task='regression',
minibatch_size=minibatch_size,
dev=dev(gpu_id)) # disable=unbalanced-tuple-unpacking
def run_toy_DistilledSGLD(gpu_id):
"""Run DistilledSGLD on toy dataset"""
X, Y, X_test, Y_test = load_toy()
minibatch_size = 1
teacher_noise_precision = 1.0
teacher_net = get_toy_sym(True, teacher_noise_precision)
student_net = get_toy_sym(False)
data_shape = (minibatch_size,) + X.shape[1::]
teacher_data_inputs = {'data': nd.zeros(data_shape, ctx=dev(gpu_id)),
'teacher_output_label': nd.zeros((minibatch_size, 1), ctx=dev(gpu_id))}
student_data_inputs = {'data': nd.zeros(data_shape, ctx=dev(gpu_id))}
teacher_initializer = mx.init.Uniform(0.07)
student_initializer = mx.init.Uniform(0.07)
student_grad_f = lambda student_outputs, teacher_pred: \
regression_student_grad(student_outputs, teacher_pred, teacher_noise_precision)
student_exe, student_params, _ = \
DistilledSGLD(teacher_sym=teacher_net, student_sym=student_net,
teacher_data_inputs=teacher_data_inputs,
student_data_inputs=student_data_inputs,
X=X, Y=Y, X_test=X_test, Y_test=Y_test, total_iter_num=80000,
teacher_initializer=teacher_initializer,
student_initializer=student_initializer,
teacher_learning_rate=1E-4, student_learning_rate=0.01,
# teacher_lr_scheduler=mx.lr_scheduler.FactorScheduler(100000, 0.5),
student_lr_scheduler=mx.lr_scheduler.FactorScheduler(8000, 0.8),
student_grad_f=student_grad_f,
teacher_prior_precision=0.1, student_prior_precision=0.001,
perturb_deviation=0.1, minibatch_size=minibatch_size, task='regression',
dev=dev(gpu_id))
def run_toy_HMC(gpu_id=None):
"""Run HMC on toy dataset"""
X, Y, X_test, Y_test = load_toy()
minibatch_size = Y.shape[0]
noise_precision = 1 / 9.0
net = get_toy_sym(True, noise_precision)
data_shape = (minibatch_size,) + X.shape[1::]
data_inputs = {'data': nd.zeros(data_shape, ctx=dev(gpu_id)),
'teacher_output_label': nd.zeros((minibatch_size, 1), ctx=dev(gpu_id))}
initializer = mx.init.Uniform(0.07)
sample_pool = HMC(net, data_inputs=data_inputs, X=X, Y=Y, X_test=X_test, Y_test=Y_test,
sample_num=300000, initializer=initializer, prior_precision=1.0,
learning_rate=1E-3, L=10, dev=dev(gpu_id))
def run_synthetic_SGLD():
"""Run synthetic SGLD"""
theta1 = 0
theta2 = 1
sigma1 = numpy.sqrt(10)
sigma2 = 1
sigmax = numpy.sqrt(2)
X = load_synthetic(theta1=theta1, theta2=theta2, sigmax=sigmax, num=100)
minibatch_size = 1
total_iter_num = 1000000
lr_scheduler = SGLDScheduler(begin_rate=0.01, end_rate=0.0001, total_iter_num=total_iter_num,
factor=0.55)
optimizer = mx.optimizer.create('sgld',
learning_rate=None,
rescale_grad=1.0,
lr_scheduler=lr_scheduler,
wd=0)
updater = mx.optimizer.get_updater(optimizer)
theta = mx.random.normal(0, 1, (2,), mx.cpu())
grad = nd.empty((2,), mx.cpu())
samples = numpy.zeros((2, total_iter_num))
start = time.time()
for i in range(total_iter_num):
if (i + 1) % 100000 == 0:
end = time.time()
print("Iter:%d, Time spent: %f" % (i + 1, end - start))
start = time.time()
ind = numpy.random.randint(0, X.shape[0])
synthetic_grad(X[ind], theta, sigma1, sigma2, sigmax,
rescale_grad=X.shape[0] / float(minibatch_size), grad=grad)
updater('theta', grad, theta)
samples[:, i] = theta.asnumpy()
plt.hist2d(samples[0, :], samples[1, :], (200, 200), cmap=plt.cm.jet)
plt.colorbar()
plt.show()
if __name__ == '__main__':
numpy.random.seed(100)
mx.random.seed(100)
parser = argparse.ArgumentParser(
description="Examples in the paper [NIPS2015]Bayesian Dark Knowledge and "
"[ICML2011]Bayesian Learning via Stochastic Gradient Langevin Dynamics")
parser.add_argument("-d", "--dataset", type=int, default=1,
help="Dataset to use. 0 --> TOY, 1 --> MNIST, 2 --> Synthetic Data in "
"the SGLD paper")
parser.add_argument("-l", "--algorithm", type=int, default=2,
help="Type of algorithm to use. 0 --> SGD, 1 --> SGLD, other-->DistilledSGLD")
parser.add_argument("-t", "--training", type=int, default=50000,
help="Number of training samples")
parser.add_argument("--gpu", type=int, help="0 to use GPU, not set to use CPU")
args = parser.parse_args()
training_num = args.training
if args.dataset == 1:
if args.algorithm == 0:
run_mnist_SGD(training_num, gpu_id=args.gpu)
elif args.algorithm == 1:
run_mnist_SGLD(training_num, gpu_id=args.gpu)
else:
run_mnist_DistilledSGLD(training_num, gpu_id=args.gpu)
elif args.dataset == 0:
if args.algorithm == 1:
run_toy_SGLD(gpu_id=args.gpu)
elif args.algorithm == 2:
run_toy_DistilledSGLD(gpu_id=args.gpu)
elif args.algorithm == 3:
run_toy_HMC(gpu_id=args.gpu)
else:
run_synthetic_SGLD()
|
<reponame>alvarolopez/cloud-bdii-provider
import argparse
import mock
import xml.etree.ElementTree
from cloud_info import exceptions
from cloud_info.providers import opennebula
from cloud_info.tests import base
from cloud_info.tests import data
FAKES = data.ONE_FAKES
class OpenNebulaBaseProviderOptionsTest(base.TestCase):
def setUp(self):
super(OpenNebulaBaseProviderOptionsTest, self).setUp()
self.provider = opennebula.OpenNebulaBaseProvider
def test_populate_parser(self):
parser = argparse.ArgumentParser()
self.provider.populate_parser(parser)
opts = parser.parse_args(['--on-auth', 'foo',
'--on-rpcxml-endpoint', 'bar',
'--vmcatcher-images'])
self.assertEqual(opts.on_auth, 'foo')
self.assertEqual(opts.on_rpcxml_endpoint, 'bar')
self.assertTrue(opts.cloudkeeper_images)
def test_options(self):
class Opts(object):
on_auth = 'foo'
on_rpcxml_endpoint = 'bar'
cloudkeeper_images = False
# Check that the required opts are there
for opt in ('on_auth', 'on_rpcxml_endpoint'):
o = Opts()
setattr(o, opt, None)
self.assertRaises(exceptions.OpenNebulaProviderException,
self.provider, o)
class OpenNebulaProviderOptionsTest(OpenNebulaBaseProviderOptionsTest):
def setUp(self):
super(OpenNebulaProviderOptionsTest, self).setUp()
self.provider = opennebula.OpenNebulaProvider
class OpenNebulaROCCIProviderOptionsTest(OpenNebulaBaseProviderOptionsTest):
def setUp(self):
super(OpenNebulaROCCIProviderOptionsTest, self).setUp()
self.provider = opennebula.OpenNebulaROCCIProvider
def test_populate_parser(self):
parser = argparse.ArgumentParser()
self.provider.populate_parser(parser)
opts = parser.parse_args(['--on-auth', 'foo',
'--on-rpcxml-endpoint', 'bar',
'--rocci-template-dir', 'test',
'--vmcatcher-images'])
self.assertEqual(opts.on_auth, 'foo')
self.assertEqual(opts.on_rpcxml_endpoint, 'bar')
self.assertEqual(opts.rocci_template_dir, 'test')
self.assertTrue(opts.cloudkeeper_images)
def test_options(self):
class Opts(object):
on_auth = 'foo'
on_rpcxml_endpoint = 'bar'
rocci_template_dir = 'test'
rocci_remote_templates = False
cloudkeeper_images = False
# Check that the required opts are there
for opt in ('on_auth', 'on_rpcxml_endpoint', 'rocci_template_dir'):
o = Opts()
setattr(o, opt, None)
self.assertRaises(exceptions.OpenNebulaProviderException,
self.provider, o)
class IndigoONProviderOptionsTest(OpenNebulaBaseProviderOptionsTest):
def setUp(self):
super(IndigoONProviderOptionsTest, self).setUp()
self.provider = opennebula.IndigoONProvider
class OpenNebulaBaseProviderTest(base.TestCase):
def __init__(self, *args, **kwargs):
super(OpenNebulaBaseProviderTest, self).__init__(*args, **kwargs)
self.provider_class = opennebula.OpenNebulaBaseProvider
self.maxDiff = None
self.expected_images = FAKES.opennebula_base_provider_expected_images
def setUp(self):
super(OpenNebulaBaseProviderTest, self).setUp()
class FakeProvider(self.provider_class):
def __init__(self, opts):
self.opts = opts
self.on_auth = opts.on_auth
self.on_rpcxml_endpoint = opts.on_rpcxml_endpoint
self.cloudkeeper_images = opts.cloudkeeper_images
self.static = mock.Mock()
self.static.get_image_defaults.return_value = {}
self.xml_parser = xml.etree.ElementTree
self.server_proxy = mock.Mock()
self.server_proxy.one.templatepool.info.return_value = (
'OK', FAKES.templatepool)
class Opts(object):
on_auth = 'oneadmin:opennebula'
on_rpcxml_endpoint = 'http://localhost:2633/RPC2'
cloudkeeper_images = False
self.provider = FakeProvider(Opts())
def test_get_images(self):
self.assertDictEqual(
self.expected_images, self.provider.get_images())
def test_get_templates(self):
self.assertDictEqual({}, self.provider.get_templates())
class OpenNebulaProviderTest(OpenNebulaBaseProviderTest):
def __init__(self, *args, **kwargs):
super(OpenNebulaProviderTest, self).__init__(*args, **kwargs)
self.provider_class = opennebula.OpenNebulaProvider
class OpenNebulaROCCIProviderTest(OpenNebulaBaseProviderTest):
def __init__(self, *args, **kwargs):
super(OpenNebulaROCCIProviderTest, self).__init__(*args, **kwargs)
self.provider_class = opennebula.OpenNebulaROCCIProvider
self.expected_images = FAKES.opennebula_rocci_provider_expected_images
self.expected_templates = \
FAKES.opennebula_rocci_provider_expected_templates
self.expected_templates_remote = \
FAKES.opennebula_rocci_provider_expected_templates_remote
def setUp(self):
super(OpenNebulaROCCIProviderTest, self).setUp()
class FakeProvider(self.provider_class):
def __init__(self, opts):
self.opts = opts
self.on_auth = opts.on_auth
self.on_rpcxml_endpoint = opts.on_rpcxml_endpoint
self.rocci_template_dir = opts.rocci_template_dir
self.rocci_remote_templates = opts.rocci_remote_templates
self.cloudkeeper_images = opts.cloudkeeper_images
self.xml_parser = xml.etree.ElementTree
self.static = mock.Mock()
self.static.get_image_defaults.return_value = {}
self.static.get_template_defaults.return_value = {}
self.server_proxy = mock.Mock()
self.server_proxy.one.templatepool.info.return_value = (
'OK', FAKES.templatepool)
self.server_proxy.one.imagepool.info.return_value = (
'OK', FAKES.imagepool)
self.server_proxy.one.documentpool.info.return_value = (
'OK', FAKES.documentpool)
class Opts(object):
on_auth = 'foo'
on_rpcxml_endpoint = 'bar'
rocci_template_dir = FAKES.rocci_dir
rocci_remote_templates = False
cloudkeeper_images = False
class OptsRemote(object):
on_auth = 'foo'
on_rpcxml_endpoint = 'bar'
rocci_template_dir = ''
rocci_remote_templates = True
cloudkeeper_images = False
self.provider = FakeProvider(Opts())
self.provider_remote = FakeProvider(OptsRemote())
def test_get_templates(self):
self.assertDictEqual(
self.expected_templates,
self.provider.get_templates())
def test_get_templates_remote(self):
self.assertDictEqual(
self.expected_templates_remote,
self.provider_remote.get_templates())
class IndigoONProviderTest(OpenNebulaBaseProviderTest):
def __init__(self, *args, **kwargs):
super(IndigoONProviderTest, self).__init__(*args, **kwargs)
self.provider_class = opennebula.IndigoONProvider
self.expected_images = FAKES.indigo_on_provider_expected_images
self.expected_templates = FAKES.indigo_on_provider_expected_templates
def setUp(self):
super(IndigoONProviderTest, self).setUp()
class FakeProvider(self.provider_class):
def __init__(self, opts):
self.opts = opts
self.on_auth = opts.on_auth
self.on_rpcxml_endpoint = opts.on_rpcxml_endpoint
self.cloudkeeper_images = opts.cloudkeeper_images
self.xml_parser = xml.etree.ElementTree
self.static = mock.Mock()
self.static.get_image_defaults.return_value = {}
self.static.get_template_defaults.return_value = {}
self.server_proxy = mock.Mock()
self.server_proxy.one.templatepool.info.return_value = (
'OK', FAKES.templatepool)
self.server_proxy.one.imagepool.info.return_value = (
'OK', FAKES.imagepool)
class Opts(object):
on_auth = 'foo'
on_rpcxml_endpoint = 'bar'
cloudkeeper_images = False
self.provider = FakeProvider(Opts())
def test_get_templates(self):
self.assertDictEqual(
self.expected_templates, self.provider.get_templates())
|
<reponame>tzhanl/azure-sdk-for-python<filename>sdk/applicationinsights/azure-mgmt-applicationinsights/azure/mgmt/applicationinsights/models/application_insights_component_feature_capabilities_py3.py
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ApplicationInsightsComponentFeatureCapabilities(Model):
"""An Application Insights component feature capabilities.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar support_export_data: Whether allow to use continuous export feature.
:vartype support_export_data: bool
:ivar burst_throttle_policy: Reserved, not used now.
:vartype burst_throttle_policy: str
:ivar metadata_class: Reserved, not used now.
:vartype metadata_class: str
:ivar live_stream_metrics: Reserved, not used now.
:vartype live_stream_metrics: bool
:ivar application_map: Reserved, not used now.
:vartype application_map: bool
:ivar work_item_integration: Whether allow to use work item integration
feature.
:vartype work_item_integration: bool
:ivar power_bi_integration: Reserved, not used now.
:vartype power_bi_integration: bool
:ivar open_schema: Reserved, not used now.
:vartype open_schema: bool
:ivar proactive_detection: Reserved, not used now.
:vartype proactive_detection: bool
:ivar analytics_integration: Reserved, not used now.
:vartype analytics_integration: bool
:ivar multiple_step_web_test: Whether allow to use multiple steps web test
feature.
:vartype multiple_step_web_test: bool
:ivar api_access_level: Reserved, not used now.
:vartype api_access_level: str
:ivar tracking_type: The application insights component used tracking
type.
:vartype tracking_type: str
:ivar daily_cap: Daily data volume cap in GB.
:vartype daily_cap: float
:ivar daily_cap_reset_time: Daily data volume cap UTC reset hour.
:vartype daily_cap_reset_time: float
:ivar throttle_rate: Reserved, not used now.
:vartype throttle_rate: float
"""
_validation = {
'support_export_data': {'readonly': True},
'burst_throttle_policy': {'readonly': True},
'metadata_class': {'readonly': True},
'live_stream_metrics': {'readonly': True},
'application_map': {'readonly': True},
'work_item_integration': {'readonly': True},
'power_bi_integration': {'readonly': True},
'open_schema': {'readonly': True},
'proactive_detection': {'readonly': True},
'analytics_integration': {'readonly': True},
'multiple_step_web_test': {'readonly': True},
'api_access_level': {'readonly': True},
'tracking_type': {'readonly': True},
'daily_cap': {'readonly': True},
'daily_cap_reset_time': {'readonly': True},
'throttle_rate': {'readonly': True},
}
_attribute_map = {
'support_export_data': {'key': 'SupportExportData', 'type': 'bool'},
'burst_throttle_policy': {'key': 'BurstThrottlePolicy', 'type': 'str'},
'metadata_class': {'key': 'MetadataClass', 'type': 'str'},
'live_stream_metrics': {'key': 'LiveStreamMetrics', 'type': 'bool'},
'application_map': {'key': 'ApplicationMap', 'type': 'bool'},
'work_item_integration': {'key': 'WorkItemIntegration', 'type': 'bool'},
'power_bi_integration': {'key': 'PowerBIIntegration', 'type': 'bool'},
'open_schema': {'key': 'OpenSchema', 'type': 'bool'},
'proactive_detection': {'key': 'ProactiveDetection', 'type': 'bool'},
'analytics_integration': {'key': 'AnalyticsIntegration', 'type': 'bool'},
'multiple_step_web_test': {'key': 'MultipleStepWebTest', 'type': 'bool'},
'api_access_level': {'key': 'ApiAccessLevel', 'type': 'str'},
'tracking_type': {'key': 'TrackingType', 'type': 'str'},
'daily_cap': {'key': 'DailyCap', 'type': 'float'},
'daily_cap_reset_time': {'key': 'DailyCapResetTime', 'type': 'float'},
'throttle_rate': {'key': 'ThrottleRate', 'type': 'float'},
}
def __init__(self, **kwargs) -> None:
super(ApplicationInsightsComponentFeatureCapabilities, self).__init__(**kwargs)
self.support_export_data = None
self.burst_throttle_policy = None
self.metadata_class = None
self.live_stream_metrics = None
self.application_map = None
self.work_item_integration = None
self.power_bi_integration = None
self.open_schema = None
self.proactive_detection = None
self.analytics_integration = None
self.multiple_step_web_test = None
self.api_access_level = None
self.tracking_type = None
self.daily_cap = None
self.daily_cap_reset_time = None
self.throttle_rate = None
|
<reponame>sdjespersen/pyzinc
import io
from typing import Iterable, Iterator, IO, List
from . import tokens
from .tokens import NumberToken, Token, TokenType
EOF = 'EOF'
class ZincTokenizerException(Exception):
"""An exception indicating that the string could not be tokenized."""
pass
def _is_letter(c: str) -> bool:
return c != EOF and (('a' <= c and c <= 'z') or ('A' <= c and c <= 'Z'))
def _is_digit(c: str) -> bool:
return '0' <= c <= '9'
def _is_id_start(c: str) -> bool:
return c != EOF and 'a' <= c and c <= 'z'
def _is_id_part(c: str) -> bool:
return _is_letter(c) or _is_digit(c) or c == '_'
def tokenize(s: str) -> Iterable[Token]:
"""Tokenize a Zinc string."""
return tokenize_buf(io.StringIO(s))
def tokenize_buf(buf: IO) -> Iterable[Token]:
"""Tokenize a Zinc buffer."""
tkzr = ZincTokenizer(buf)
while True:
tok = next(tkzr)
yield tok
if tok is tokens.EOF:
break
class ZincTokenizer(Iterator[Token]):
"""Tokenizer for the Zinc format.
Adapted from the Java reference implementation by <NAME>.
FMI: https://project-haystack.org/doc/Zinc
"""
def __init__(self, buf: IO) -> None:
self._buf: IO = buf
self._cur: str = ''
self._peek: str = ''
self.line: int = 0
self._consume()
self._consume()
def __next__(self) -> Token:
# skip non-meaningful whitespace
while True:
if self._cur in (' ', '\t', '\xa0'):
self._consume()
continue
# TODO: skip comments?
break
if self._cur in ('\n', '\r'):
if self._cur == '\r' and self._peek == '\n':
self._consume('\r')
self._consume()
self.line += 1
return tokens.NEWLINE
# handle various starting chars
if self._cur == EOF:
return tokens.EOF
if _is_id_start(self._cur):
return self._tokenize_id()
if self._cur == 'C' and self._peek == '(':
return self._tokenize_coord()
if self._cur.isupper():
return self._tokenize_reserved()
if self._cur == '"':
return self._tokenize_str()
if self._cur == '@':
return self._tokenize_ref()
if self._cur == '`':
return self._tokenize_uri()
if (_is_digit(self._cur) or
(self._cur == '-' and _is_digit(self._peek))):
return self._tokenize_num()
# otherwise, symbol
return self._tokenize_symbol()
def _tokenize_id(self) -> Token:
s = []
while self._cur != EOF and _is_id_part(self._cur):
s.append(self._cur)
self._consume()
return Token(TokenType.ID, ''.join(s))
def _tokenize_coord(self) -> Token:
s = ['C', '(']
self._consume('C')
self._consume('(')
# lat
s.append(self._consume_decimal_unscientific())
s.append(',')
self._consume(',')
# allow one optional space
if self._cur == ' ':
self._consume()
# lng
s.append(self._consume_decimal_unscientific())
s.append(')')
self._consume(')')
return Token(TokenType.COORD, ''.join(s))
def _consume_decimal_unscientific(self) -> str:
s = []
if self._cur == '-':
s.append(self._cur)
self._consume()
dots = 0
while _is_digit(self._cur) or self._cur == '.':
if self._cur == '.':
dots += 1
s.append(self._cur)
self._consume()
v = ''.join(s)
if dots > 1:
raise ZincTokenizerException(f"Invalid float {v}")
return v
def _tokenize_reserved(self) -> Token:
s = []
while self._cur != EOF and _is_letter(self._cur):
s.append(self._cur)
self._consume()
v = ''.join(s)
if v == 'N':
return tokens.NULL
if v == 'M':
return tokens.MARKER
if v == 'R':
return tokens.REMOVE
if v == 'NA':
return tokens.NA
if v == 'NaN':
return tokens.NAN
if v == 'T':
return tokens.TRUE
if v == 'F':
return tokens.FALSE
if v == 'INF':
return tokens.POS_INF
raise ZincTokenizerException(f"Invalid token {v}")
def _tokenize_num(self) -> Token:
def _is_unit(c: str):
return c != EOF and (c in ('%', '$', '/') or ord(c) > 128)
if self._cur == '0' and self._peek == 'x':
self._tokenize_hex()
# consume all things that might be part of this number token
s: List[str] = []
colons = 0
dashes = 0
unit_index = 0
exp = False
while True:
if self._cur is EOF:
break
if not _is_digit(self._cur):
if exp and (self._cur in ('+', '-')):
# this is exponent notation
pass
elif self._cur == '-':
dashes += 1
elif self._cur == ':' and _is_digit(self._peek):
colons += 1
elif ((exp or colons >= 1) and self._cur == '+'):
pass
elif self._cur == '.':
if not _is_digit(self._peek):
break
elif (self._cur in ('e', 'E') and
(self._peek in ('-', '+') or _is_digit(self._peek))):
exp = True
elif _is_letter(self._cur) or _is_unit(self._cur):
if unit_index == 0:
unit_index = len(s)
elif self._cur == '_':
if unit_index == 0 and _is_digit(self._peek):
self._consume()
continue
elif unit_index == 0:
unit_index = len(s)
else:
# done with the number
break
s.append(self._cur)
self._consume()
if dashes == 2 and colons == 0:
return Token(TokenType.DATE, ''.join(s))
if dashes == 0 and colons >= 1:
return self._tokenize_as_time(''.join(s), 1)
if dashes >= 2:
return self._tokenize_as_datetime(''.join(s))
return NumberToken(''.join(s), unit_index)
def _tokenize_hex(self) -> Token:
self._consume('0')
self._consume('x')
s = []
while True:
if self._cur in '0123456789abcdefABCDEF':
s.append(self._cur)
self._consume()
continue
if self._cur == '_':
continue
break
return Token(TokenType.HEX, ''.join(s))
def _tokenize_as_time(self, s: str, colons: int) -> Token:
if s and s[1] == ':':
s = '0' + s
if self._peek is not EOF and self._peek.isupper():
tz = self._consume_timezone()
return Token(TokenType.TIME, s + tz)
raise ZincTokenizerException(f"Invalid time token {s}")
def _tokenize_as_datetime(self, s: str) -> Token:
if self._peek is not EOF and self._peek.isupper():
tz = self._consume_timezone()
return Token(TokenType.DATETIME, s + tz)
return Token(TokenType.DATETIME, s)
def _consume_timezone(self) -> str:
tz = []
if self._cur != ' ' or not self._peek.isupper():
raise ZincTokenizerException("Expecting timezone!")
self._consume()
tz.append(' ')
while _is_id_part(self._cur):
tz.append(self._cur)
self._consume()
if self._cur in '+-' and ''.join(tz).endswith('GMT'):
tz.append(self._cur)
self._consume()
while _is_digit(self._cur):
tz.append(self._cur)
self._consume()
return ''.join(tz)
def _tokenize_str(self) -> Token:
self._consume('"')
s = []
while True:
if self._cur == EOF:
raise ZincTokenizerException("Unexpected end of str")
if self._cur == '"':
self._consume('"')
break
if self._cur == '\\':
s.append(self._escape())
continue
s.append(self._cur)
self._consume()
return Token(TokenType.STRING, ''.join(s))
def _tokenize_ref(self) -> Token:
def _is_ref_char(c: str) -> bool:
return _is_letter(c) or _is_digit(c) or c in '_:-.~'
self._consume('@')
s = []
while True:
if _is_ref_char(self._cur):
s.append(self._cur)
self._consume()
elif self._cur == ' ' and self._peek == '"':
# upcoming quote is the display name for the ref
s.append(self._cur)
self._consume()
s += '"' + self._tokenize_str().val + '"'
else:
break
return Token(TokenType.REF, ''.join(s))
def _tokenize_uri(self) -> Token:
self._consume('`')
s = []
while True:
if self._cur == '`':
self._consume('`')
break
if self._cur == EOF or self._cur == '\n':
raise ZincTokenizerException("Unexpected end of URI")
if self._cur == '\\':
if self._peek in ':/?#[]@\\&=;':
s.append(self._cur)
self._consume()
s.append(self._cur)
self._consume()
else:
s.append(self._escape())
else:
s.append(self._cur)
self._consume()
return Token(TokenType.URI, ''.join(s))
def _escape(self) -> str:
self._consume('\\')
if self._cur in 'bfnrt"$\'`\\':
s = '\\' + self._cur # type: str
self._consume()
return s
# check for uxxxx
if self._cur == 'u':
coll = [] # type: List[str]
self._consume('u')
for _ in range(4):
coll.append(self._cur)
self._consume()
s = ''.join(coll)
try:
return chr(int(s, base=16))
except ValueError:
raise ZincTokenizerException(
f"Invalid unicode sequence: {s}")
raise ZincTokenizerException(f"Invalid escape sequence: {self._cur}")
def _tokenize_symbol(self) -> Token:
c = self._cur
self._consume()
if c == ',':
return tokens.COMMA
elif c == ':':
return tokens.COLON
elif c == ';':
return tokens.SEMICOLON
elif c == '[':
return tokens.LBRACKET
elif c == ']':
return tokens.RBRACKET
elif c == '{':
return tokens.LBRACE
elif c == '}':
return tokens.RBRACE
elif c == '(':
return tokens.LPAREN
elif c == ')':
return tokens.RPAREN
elif c == '<':
if self._cur == '<':
self._consume('<')
return tokens.DOUBLELT
if self._cur == '=':
self._consume('=')
return tokens.LTEQ
return tokens.LT
elif c == '>':
if self._cur == '>':
self._consume('>')
return tokens.DOUBLEGT
if self._cur == '=':
self._consume('=')
return tokens.GTEQ
return tokens.GT
elif c == '-':
if self._cur == '>':
self._consume('>')
return tokens.ARROW
return tokens.MINUS
elif c == '=':
if self._cur == '=':
self._consume('=')
return tokens.EQUALS
return tokens.ASSIGN
elif c == '!':
if self._cur == '=':
self._consume('=')
return tokens.NOTEQUALS
return tokens.BANG
elif c == '/':
return tokens.SLASH
raise ZincTokenizerException(f"Unexpected symbol: '{c}'")
def _consume(self, expected=None) -> None:
if expected is not None and self._cur != expected:
raise ZincTokenizerException(
f"Expected {expected} but found {self._cur}")
try:
self._cur = self._peek
self._peek = self._buf.read(1) or EOF
except Exception:
self._cur = EOF
self._peek = EOF
|
<filename>Image/main.py
import torch
from torchvision import transforms
from torchvision.datasets.folder import ImageFolder
from torch.utils.data import DataLoader
import numpy as np
import os
import visdom
import matplotlib.pyplot as plt
import SegmentVGG16
import train_extractor
import MutualInformation
import decoder
import train_decoder
torch.manual_seed(0)
os.environ["CUDA_VISIBLE_DEVICES"] = "2, 3"
vis = visdom.Visdom(env="vgg16")
transform = transforms.Compose([
transforms.ToTensor()
])
total_epoch = 10
batch_size = 128
lr = 0.0001
data_train = ImageFolder('/root/DATA/CelebA/tag2/train', transform=transform)
data_test = ImageFolder('/root/DATA/CelebA/tag2/val', transform=transform)
data_train_loader = DataLoader(data_train, batch_size=batch_size, shuffle=True, num_workers=32)
data_test_loader = DataLoader(data_test, batch_size=batch_size, shuffle=True, num_workers=32)
data2_train = ImageFolder('/root/DATA/CelebA/tag5/train', transform=transform)
data2_test = ImageFolder('/root/DATA/CelebA/tag5/val', transform=transform)
data2_train_loader = DataLoader(data2_train, batch_size=batch_size, shuffle=True, num_workers=32)
data2_test_loader = DataLoader(data2_test, batch_size=1, shuffle=False, num_workers=32)
def adjust_learning_rate(epoch, init_lr=0.0001):
schedule = [12]
cur_lr = init_lr
for schedule_epoch in schedule:
if epoch >= schedule_epoch:
cur_lr *= 0.1
return cur_lr
def adjust_learning_rate_classifier(epoch, init_lr=0.0001):
schedule = [12]
cur_lr = init_lr
for schedule_epoch in schedule:
if epoch >= schedule_epoch:
cur_lr *= 0.1
return cur_lr
def adjust_learning_rate_decoder(epoch, init_lr=0.0001):
schedule = [10]
cur_lr = init_lr
for schedule_epoch in schedule:
if epoch >= schedule_epoch:
cur_lr *= 0.1
return cur_lr
def get_FE():
FE = torch.load("Models/mix/pre_train/FE.pth")
CF = torch.load("Models/mix/pre_train/CF.pth")
MI = MutualInformation.MutlInfo()
if torch.cuda.is_available():
if torch.cuda.device_count() > 1:
FE = torch.nn.DataParallel(FE)
CF = torch.nn.DataParallel(CF)
MI = torch.nn.DataParallel(MI)
FE = FE.cuda()
CF = CF.cuda()
MI = MI.cuda()
try:
for epoch in range(total_epoch):
print("epoch %d" % epoch)
current_lr = adjust_learning_rate(epoch, lr)
FE, CF, MI = train_extractor.train(FE, CF, MI, data_train_loader, current_lr, vis)
train_extractor.test_classifier(FE, CF, data_test_loader)
except KeyboardInterrupt:
pass
if torch.cuda.device_count() > 1:
torch.save(FE.module, "Models/mix/extractor/FE.pth")
torch.save(CF.module, "Models/mix/extractor/FE_CF.pth")
torch.save(MI.module, "Models/mix/extractor/FE_MI.pth")
else:
torch.save(FE, "Models/mix/extractor/FE.pth")
torch.save(CF, "Models/mix/extractor/FE_CF.pth")
torch.save(MI, "Models/mix/extractor/FE_MI.pth")
return FE
def get_ZFE():
FE = torch.load("Models/mix/pre_train/FE.pth")
CF = torch.load("Models/mix/pre_train/CF.pth")
if torch.cuda.is_available():
if torch.cuda.device_count() > 1:
FE = torch.nn.DataParallel(FE)
CF = torch.nn.DataParallel(CF)
FE = FE.cuda()
CF = CF.cuda()
try:
for epoch in range(total_epoch):
print("epoch %d" % epoch)
current_lr = adjust_learning_rate(epoch, lr)
FE, CF = train_extractor.train_Z(FE, CF, data_train_loader, current_lr, vis)
train_extractor.test_classifier(FE, CF, data_test_loader)
except KeyboardInterrupt:
pass
if torch.cuda.device_count() > 1:
torch.save(FE.module, "Models/mix/extractor/FE.pth")
torch.save(CF.module, "Models/mix/extractor/FE_CF.pth")
else:
torch.save(FE, "Models/mix/extractor/FE.pth")
torch.save(CF, "Models/mix/extractor/FE_CF.pth")
return FE
def get_zdecoder(FE_path):
FE = torch.load(FE_path)
for p in FE.parameters():
p.requires_grad = False
DC = decoder.ZDecoder()
if torch.cuda.is_available():
if torch.cuda.device_count() > 1:
FE = torch.nn.DataParallel(FE)
DC = torch.nn.DataParallel(DC)
FE = FE.cuda()
DC = DC.cuda()
try:
for epoch in range(total_epoch):
print("epoch %d" % epoch)
cur_lr = adjust_learning_rate_decoder(epoch, lr)
DC = train_decoder.train_zdecoder(FE, DC, data_train_loader, cur_lr, vis)
except KeyboardInterrupt:
pass
if torch.cuda.device_count() > 1:
torch.save(DC.module, "Models/gender/decoder/decoder.pth")
else:
torch.save(DC, "Models/gender/decoder/decoder.pth")
return DC
def get_classifier(FE_path):
FE = torch.load(FE_path)
for p in FE.parameters():
p.requires_grad = False
CF = SegmentVGG16.Classifier()
if torch.cuda.is_available():
if torch.cuda.device_count() > 1:
FE = torch.nn.DataParallel(FE)
CF = torch.nn.DataParallel(CF)
FE = FE.cuda()
CF = CF.cuda()
try:
for epoch in range(total_epoch):
print("epoch %d" % epoch)
current_lr = adjust_learning_rate_classifier(epoch, lr)
CF = train_extractor.train_classifier(FE, CF, data2_train_loader, current_lr, vis)
train_extractor.test_classifier(FE, CF, data2_test_loader)
except KeyboardInterrupt:
pass
if torch.cuda.device_count() > 1:
torch.save(CF.module, "Models/mix/smiling/Classifier.pth")
else:
torch.save(CF, "Models/mix/smiling/Classifier.pth")
return CF
def show_result(FE_path, DC_path, save_path, data_test_loader, withU=False, image_counter=2):
FE = torch.load(FE_path)
DC = torch.load(DC_path)
if torch.cuda.is_available():
if torch.cuda.device_count() > 1:
FE = torch.nn.DataParallel(FE)
DC = torch.nn.DataParallel(DC)
FE = FE.cuda()
DC = DC.cuda()
FE.eval()
DC.eval()
for i, (images, labels) in enumerate(data_test_loader):
if i != image_counter:
continue
img = images[0]
img = img.numpy()
img = np.transpose(img, (1, 2, 0))
# plt.imshow(img)
plt.imsave('images/%d/raw.eps' % image_counter, img, format='eps')
if withU:
new_labels = torch.zeros((len(labels), 2))
for counter in range(len(labels)):
if labels[counter] == 0:
new_labels[counter][0] = 1
else:
new_labels[counter][1] = 1
if torch.cuda.is_available():
images, new_labels = images.cuda(), new_labels.cuda()
reconstruct_imgs = DC(FE(images), new_labels)
else:
if torch.cuda.is_available():
images = images.cuda()
reconstruct_imgs = DC(FE(images))
reconstruct_img = reconstruct_imgs[0]
reconstruct_img[reconstruct_img < 0] = 0
reconstruct_img[reconstruct_img > 1] = 1
reconstruct_img = reconstruct_img.cpu().detach().numpy()
reconstruct_img = np.transpose(reconstruct_img, (1, 2, 0))
# plt.imshow(reconstruct_img)
plt.imsave(save_path, reconstruct_img, format='eps')
break
plt.show()
# plt.savefig('Result/img/loss_with_info_constrain_or_not.eps', format='eps')
if __name__ == '__main__':
# run get_FE or get_ZFE to get a feature extractor constrained by DIM info
FE = get_FE()
ZD = get_zdecoder("Models/gender/extractor/FE.pth")
CF = get_classifier("Models/mix/extractor/FE.pth")
|
<reponame>AlexHopsworks/airflow-chef<filename>files/default/hopsworks_plugin/sensors/hopsworks_sensor.py
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.sensors.base_sensor_operator import BaseSensorOperator
from airflow.exceptions import AirflowException
from airflow.utils.decorators import apply_defaults
from hopsworks_plugin.hooks.hopsworks_hook import HopsworksHook
JOB_SUCCESS_FINAL_STATES = {'FINISHED'}
JOB_FAILED_FINAL_STATES = {'FAILED', 'KILLED', 'FRAMEWORK_FAILURE',
'APP_MASTER_START_FAILED', 'INITIALIZATION_FAILED'}
JOB_FINAL_STATES = JOB_FAILED_FINAL_STATES.union(JOB_SUCCESS_FINAL_STATES)
class HopsworksJobFinishSensor(BaseSensorOperator):
"""
Sensor to wait for a job to finish regardless of the final state
:param job_name: Name of the job in Hopsworks
:type job_name: str
:param project_id: Hopsworks Project ID the job is associated with
:type project_id: int
:param project_name: Hopsworks Project name this job is associated with
:type project_name: str
:param response_check: Custom function to check the return state
:type response_check: function
"""
@apply_defaults
def __init__(
self,
hopsworks_conn_id = 'hopsworks_default',
job_name = None,
project_id = None,
project_name = None,
response_check = None,
*args,
**kwargs):
super(HopsworksJobFinishSensor, self).__init__(*args, **kwargs)
self.hopsworks_conn_id = hopsworks_conn_id
self.job_name = job_name
self.project_id = project_id
self.project_name = project_name
self.response_check = response_check
def _get_hook(self):
return HopsworksHook(self.hopsworks_conn_id, self.project_id, self.project_name, self.owner)
def poke(self, context):
hook = self._get_hook()
state = hook.get_job_state(self.job_name)
if self.response_check:
return self.response_check(state)
# If no check was defined, assume that any FINAL state is success
return state.upper() in JOB_FINAL_STATES
class HopsworksJobSuccessSensor(BaseSensorOperator):
"""
Sensor to wait for a successful completion of a job
If the job fails, the sensor will fail
:param job_name: Name of the job in Hopsworks
:type job_name: str
:param project_id: Hopsworks Project ID the job is associated with
:type project_id: int
:param project_name: Hopsworks Project name this job is associated with
:type project_name: str
"""
@apply_defaults
def __init__(
self,
hopsworks_conn_id = 'hopsworks_default',
job_name = None,
project_id = None,
project_name = None,
poke_interval = 10,
timeout = 3600,
*args,
**kwargs):
super(HopsworksJobSuccessSensor, self).__init__(*args, **kwargs)
self.hopsworks_conn_id = hopsworks_conn_id
self.job_name = job_name
self.project_id = project_id
self.project_name = project_name
def _get_hook(self):
return HopsworksHook(self.hopsworks_conn_id, self.project_id, self.project_name, self.owner)
def poke(self, context):
hook = self._get_hook()
state = hook.get_job_state(self.job_name)
if state.upper() in JOB_FAILED_FINAL_STATES:
raise AirflowException("Hopsworks job failed")
return state.upper() in JOB_SUCCESS_FINAL_STATES
|
<reponame>salfamusic/DisentangledFaceGAN<filename>preprocess/preprocess_utils.py<gh_stars>0
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import numpy as np
import os
from scipy.io import loadmat,savemat
from PIL import Image,ImageOps
from array import array
from mtcnn import MTCNN
from keras.utils import get_file
import cv2
import dlib
import bz2
LANDMARKS_MODEL_URL = 'http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2'
class LandmarksDetector:
def __init__(self, predictor_model_path):
"""
:param predictor_model_path: path to shape_predictor_68_face_landmarks.dat file
"""
self.detector = dlib.get_frontal_face_detector() # cnn_face_detection_model_v1 also can be used
self.shape_predictor = dlib.shape_predictor(predictor_model_path)
def get_landmarks(self, image):
img = dlib.load_rgb_image(image)
dets = self.detector(img, 1)
lms = []
for detection in dets:
face_landmarks = np.array([[item.x, item.y] for item in self.shape_predictor(img, detection).parts()])
lms.append(face_landmarks)
return lms
def unpack_bz2(src_path):
data = bz2.BZ2File(src_path).read()
dst_path = src_path[:-4]
with open(dst_path, 'wb') as fp:
fp.write(data)
return dst_path
def get_landmarks_mtcnn(image):
img = cv2.cvtColor(cv2.imread(image), cv2.COLOR_BGR2RGB)
detector = MTCNN()
faces = detector.detect_faces(img)
face_arrs = []
for face in faces:
nose = face['keypoints']['nose']
left_eye = face['keypoints']['left_eye']
right_eye = face['keypoints']['right_eye']
mouth_left = face['keypoints']['mouth_left']
mouth_right = face['keypoints']['mouth_right']
nose = [nose[0], nose[1]]
left_eye = [left_eye[0], left_eye[1]]
right_eye = [right_eye[0], right_eye[1]]
mouth_left = [mouth_left[0], mouth_left[1]]
mouth_right = [mouth_right[0], mouth_right[1]]
res = np.stack([nose, left_eye, right_eye, mouth_left, mouth_right], axis=0)
res = res[[1,2,0,3,4],:]
face_arrs.append(res)
return face_arrs
def get_landmarks_dlib(image):
landmarks_model_path = unpack_bz2(get_file('shape_predictor_68_face_landmarks.dat.bz2',
LANDMARKS_MODEL_URL, cache_subdir='temp'))
landmarks_detector = LandmarksDetector(landmarks_model_path)
faces = landmarks_detector.get_landmarks(image)
face_arrs = []
# calculate 5 facial landmarks using 68 landmarks
lm_idx = np.array([31,37,40,43,46,49,55]) - 1
for face in faces:
# nose, left eye, right eye, left mouth, right mouth
face_5 = np.stack([face[lm_idx[0],:],np.mean(face[lm_idx[1:2],:],0),np.mean(face[lm_idx[3:4],:],0),face[lm_idx[5],:],face[lm_idx[6],:]], axis = 0)
face_5 = face_5[[1,2,0,3,4],:]
face_arrs.append(face_5)
return face_arrs
# Load expression basis provided by Guo et al.,
# https://github.com/Juyong/3DFace.
def LoadExpBasis():
n_vertex = 53215
Expbin = open('./renderer/BFM face model/Exp_Pca.bin','rb')
exp_dim = array('i')
exp_dim.fromfile(Expbin,1)
expMU = array('f')
expPC = array('f')
expMU.fromfile(Expbin,3*n_vertex)
expPC.fromfile(Expbin,3*exp_dim[0]*n_vertex)
expPC = np.array(expPC)
expPC = np.reshape(expPC,[exp_dim[0],-1])
expPC = np.transpose(expPC)
expEV = np.loadtxt('./renderer/BFM face model/std_exp.txt')
return expPC,expEV
# Load BFM09 face model and transfer it to our face model
def transferBFM09():
original_BFM = loadmat('./renderer/BFM face model/01_MorphableModel.mat')
shapePC = original_BFM['shapePC'] # shape basis
shapeEV = original_BFM['shapeEV'] # corresponding eigen value
shapeMU = original_BFM['shapeMU'] # mean face
texPC = original_BFM['texPC'] # texture basis
texEV = original_BFM['texEV'] # eigen value
texMU = original_BFM['texMU'] # mean texture
expPC,expEV = LoadExpBasis() # expression basis and eigen value
idBase = shapePC*np.reshape(shapeEV,[-1,199])
idBase = idBase/1e5 # unify the scale to decimeter
idBase = idBase[:,:80] # use only first 80 basis
exBase = expPC*np.reshape(expEV,[-1,79])
exBase = exBase/1e5 # unify the scale to decimeter
exBase = exBase[:,:64] # use only first 64 basis
texBase = texPC*np.reshape(texEV,[-1,199])
texBase = texBase[:,:80] # use only first 80 basis
# Our face model is cropped along face landmarks which contains only 35709 vertex.
# original BFM09 contains 53490 vertex, and expression basis provided by Guo et al. contains 53215 vertex.
# thus we select corresponding vertex to get our face model.
index_exp = loadmat('./renderer/BFM face model/BFM_front_idx.mat')
index_exp = index_exp['idx'].astype(np.int32) - 1 #starts from 0 (to 53215)
index_shape = loadmat('./renderer/BFM face model/BFM_exp_idx.mat')
index_shape = index_shape['trimIndex'].astype(np.int32) - 1 #starts from 0 (to 53490)
index_shape = index_shape[index_exp]
idBase = np.reshape(idBase,[-1,3,80])
idBase = idBase[index_shape,:,:]
idBase = np.reshape(idBase,[-1,80])
texBase = np.reshape(texBase,[-1,3,80])
texBase = texBase[index_shape,:,:]
texBase = np.reshape(texBase,[-1,80])
exBase = np.reshape(exBase,[-1,3,64])
exBase = exBase[index_exp,:,:]
exBase = np.reshape(exBase,[-1,64])
meanshape = np.reshape(shapeMU,[-1,3])/1e5
meanshape = meanshape[index_shape,:]
meanshape = np.reshape(meanshape,[1,-1])
meantex = np.reshape(texMU,[-1,3])
meantex = meantex[index_shape,:]
meantex = np.reshape(meantex,[1,-1])
# region used for image rendering, and 68 landmarks index etc.
gan_tl = loadmat('./renderer/BFM face model/gan_tl.mat')
gan_tl = gan_tl['f']
gan_mask = loadmat('./renderer/BFM face model/gan_mask.mat')
gan_mask = gan_mask['idx']
other_info = loadmat('./renderer/BFM face model/facemodel_info.mat')
keypoints = other_info['keypoints']
point_buf = other_info['point_buf']
tri = other_info['tri']
# save our face model
savemat('./renderer/BFM face model/BFM_model_front_gan.mat',{'meanshape':meanshape,'meantex':meantex,'idBase':idBase,'exBase':exBase,'texBase':texBase,\
'tri':tri,'point_buf':point_buf,'keypoints':keypoints,'gan_mask':gan_mask,'gan_tl':gan_tl})
#calculating least sqaures problem
def POS(xp,x):
npts = xp.shape[1]
A = np.zeros([2*npts,8])
A[0:2*npts-1:2,0:3] = x.transpose()
A[0:2*npts-1:2,3] = 1
A[1:2*npts:2,4:7] = x.transpose()
A[1:2*npts:2,7] = 1
b = np.reshape(xp.transpose(),[2*npts,1])
k,_,_,_ = np.linalg.lstsq(A,b)
R1 = k[0:3]
R2 = k[4:7]
sTx = k[3]
sTy = k[7]
s = (np.linalg.norm(R1) + np.linalg.norm(R2))/2
t = np.stack([sTx,sTy],axis = 0)
return t,s
# align image for 3D face reconstruction
def process_img(img,lm,t,s,target_size = 512.):
w0,h0 = img.size
w = (w0/s*102).astype(np.int32)
h = (h0/s*102).astype(np.int32)
img = img.resize((w,h),resample = Image.BICUBIC)
left = (w/2 - target_size/2 + float((t[0] - w0/2)*102/s)).astype(np.int32)
right = left + target_size
up = (h/2 - target_size/2 + float((h0/2 - t[1])*102/s)).astype(np.int32)
below = up + target_size
img = img.crop((left,up,right,below))
lm = np.stack([lm[:,0] - t[0] + w0/2,lm[:,1] - t[1] + h0/2],axis = 1)/s*102
lm = lm - np.reshape(np.array([(w/2 - target_size/2),(h/2-target_size/2)]),[1,2])
return img,lm
def Preprocess(img,lm,lm3D,target_size = 512.):
w0,h0 = img.size
# change from image plane coordinates to 3D sapce coordinates(X-Y plane)
lm = np.stack([lm[:,0],h0 - 1 - lm[:,1]], axis = 1)
# calculate translation and scale factors using 5 facial landmarks and standard landmarks
t,s = POS(lm.transpose(),lm3D.transpose())
s = s*224./target_size
# processing the image
img_new,lm_new = process_img(img,lm,t,s,target_size = target_size)
lm_new = np.stack([lm_new[:,0],target_size - lm_new[:,1]], axis = 1)
trans_params = np.array([w0,h0,102.0/s,t[0],t[1]])
return img_new,lm_new,trans_params
def load_lm3d():
Lm3D = loadmat('preprocess/similarity_Lm3D_all.mat')
Lm3D = Lm3D['lm']
# calculate 5 facial landmarks using 68 landmarks
lm_idx = np.array([31,37,40,43,46,49,55]) - 1
# nose, left eye, right eye, left mouth, right mouth
Lm3D = np.stack([Lm3D[lm_idx[0],:],np.mean(Lm3D[lm_idx[[1,2]],:],0),np.mean(Lm3D[lm_idx[[3,4]],:],0),Lm3D[lm_idx[5],:],Lm3D[lm_idx[6],:]], axis = 0)
Lm3D = Lm3D[[1,2,0,3,4],:]
return Lm3D
# load input images and corresponding 5 landmarks
def load_img(img_path,lm_path):
image = Image.open(img_path)
lm = np.loadtxt(lm_path)
return image,lm
# Crop and rescale face region for GAN training
def crop_n_rescale_face_region(image,coeff):
tx = coeff[0,254]
ty = coeff[0,255]
tz = coeff[0,256]
f = 1015.*512/224
cam_pos = 10.
scale = 1.22*224/512
# cancel translation and rescale face size
M = np.float32([[1,0,-f*tx/(cam_pos - tz)],[0,1,f*ty/(cam_pos - tz)]])
(rows, cols) = image.shape[:2]
img_shift = cv2.warpAffine(image,M,(cols,rows))
# crop image to 256*256
scale_ = scale*(cam_pos - tz)/cam_pos
w = int(cols*scale_)
h = int(rows*scale_)
res = cv2.resize(img_shift,(w,h))
res = Image.fromarray(res.astype(np.uint8),'RGB')
res = ImageOps.expand(res,border=10,fill = 'black')
res = res.crop((round(w/2)-128+10,round(h/2)-128+10,round(w/2)+128+10,round(h/2)+128+10))
res = np.array(res)
res = res.astype(np.uint8)
return res
def crop_n_rescale_no_translation_face_region(image,coeff):
tz = coeff[0,256]
f = 1015.*512/224
cam_pos = 10.
scale = 1.22*224/512
# crop image to 256*256
(rows, cols) = image.shape[:2]
scale_ = scale*(cam_pos - tz)/cam_pos
w = int(cols*scale_)
h = int(rows*scale_)
res = cv2.resize(image,(w,h))
res = Image.fromarray(res.astype(np.uint8),'RGB')
res = ImageOps.expand(res,border=10,fill = 'black')
res = res.crop((round(w/2)-128+10,round(h/2)-128+10,round(w/2)+128+10,round(h/2)+128+10))
res = np.array(res)
res = res.astype(np.uint8)
return res
|
<gh_stars>1-10
# knotClassifier.py
# Author = <NAME>
# This script trains a convolutional neural network on a dataset of images
# The final weights are then saved within the same directory as 'first_try.h5'
# Script Usage: python knotClassifier.py
# For example: When this script is in the same directory as a dataset folder named 'dataResized',
# where all training data is in 'dataResized/train',
# and all validation data is in 'dataResized/validation', simply enter:
# python knotClassifier.py to train default the medium convolutional neural network with no data augmentation.
# To train with data augmentation simply add the -a flag like so:
# python knotClassifier.py -a
# To train either the small or large convolutional neural network instead of the default medium convolutional neural network
# simply add the corresponding -s flag for the small neural network or the -l flag for the large neural network like so:
# python knotClassifier.py -a -s or python knotClassifier.py -a -l
# -----------------------------------------------------------
# PARSE ARGUMENTS
import argparse
parser = argparse.ArgumentParser(description='Train a convolutional neural network to classify knots.')
group = parser.add_mutually_exclusive_group()
group.add_argument('-s', '--small', action='store_true', help='Use small model architecture')
group.add_argument('-l', '--large', action='store_true', help='Use large model architecture')
group2 = parser.add_mutually_exclusive_group()
group2.add_argument('-a', '--augmentation', action='store_true', help='Use data augmentation')
args = parser.parse_args()
# -----------------------------------------------------------
# IMPORT STATEMENTS
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense, BatchNormalization
from keras.layers.noise import GaussianNoise
from keras.utils import np_utils
from keras import backend as K
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.metrics import confusion_matrix
import itertools
# -----------------------------------------------------------
# GLOBAL VARIABLES
# Target dimensions for the images
# (150, 150) has been shown to be an optimal image size for training
img_width, img_height = 150, 150
# Global variables that are directory specific
train_data_dir = 'dataResized/train'
validation_data_dir = 'dataResized/validation'
nb_train_samples = 1170
nb_validation_samples = 274
epochs = 100
batch_size = 32
if K.image_data_format() == 'channels_first':
input_shape = (3, img_width, img_height)
else:
input_shape = (img_width, img_height, 3)
# -----------------------------------------------------------
# CNN MODELS
# SMALL CNN MODEL
if args.small:
print 'You are using the small CNN model architecture'
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=input_shape))
model.add(Activation('relu'))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(32))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(10))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
print model.summary()
# LARGE CNN MODEL
elif args.large:
print 'You are using the large CNN model architecture'
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=input_shape))
model.add(Activation('relu'))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(10))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
print model.summary()
# MEDIUM CNN MODEL
else:
print 'You are using the medium CNN model architecture'
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(10))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
print model.summary()
# -----------------------------------------------------------
# DATA AUGMENTATION
if args.augmentation:
print 'You are using data augmentation'
# Training Augmentation Configuration
# Rotates an image for every degree
# Rescales images
# Modifies shear intensity
# Modifies zoom
# Shifts width
# Shifts height
# Flips images horizontally
# Flips images vertically
train_datagen = ImageDataGenerator(
rotation_range=360,
rescale=1. / 255,
shear_range=0.2,
zoom_range=0.3,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True,
vertical_flip=True
)
# Testing Augmentation Configuration
# Only Rescales images
# Very important for the validation data to have no augmentation
# Enables validation on real data, and not augmented data
test_datagen = ImageDataGenerator(
rescale=1. / 255
)
# NO DATA AUGMENTATION
else:
print 'You are not using data augmentation'
# Training Augmentation Configuration
# Only rescales images
# No augmentation
train_datagen = ImageDataGenerator(
rescale=1. /255
)
# Testing Augmentation Configuration
# Only rescales images
# No augmentation
test_datagen = ImageDataGenerator(
rescale=1. /255
)
# -----------------------------------------------------------
# TRAINING GENERATOR
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='categorical')
# VALIDATION GENERATOR
validation_generator = test_datagen.flow_from_directory(
validation_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='categorical')
# MODEL FITTING
fit = model.fit_generator(
train_generator,
steps_per_epoch=nb_train_samples // batch_size,
epochs=epochs,
validation_data=validation_generator,
validation_steps=nb_validation_samples // batch_size,
shuffle=True)
# SAVE MODEL (INCLUDING WEIGHTS)
model.save('first_try.h5')
# -----------------------------------------------------------
# TSNE VISUALISATION
predictions = model.predict_generator(validation_generator,
steps=nb_validation_samples)
# First, reduce to 10 dimensions with PCA
pca = PCA(n_components=10)
pca_results = pca.fit_transform(predictions)
print('Variance PCA: {}'.format(np.sum(pca.explained_variance_ratio_)))
# Next, run t-SNE on the PCA results to obtain a 2D plot
tsne = TSNE(n_components=2, perplexity=30, learning_rate=250, verbose = 1)
tsne_results = tsne.fit_transform(pca_results[:5000])
# Convert to binary class matrix
categoricalClasses = np_utils.to_categorical(validation_generator.classes[:5000], num_classes = 10)
# Create a figure where each class has a unique colour
colour_map = np.argmax(categoricalClasses, axis=1)
tsneFigure = plt.figure(figsize=(10,10))
for colour in range(10):
indices = np.where(colour_map==colour)
indices = indices[0]
plt.scatter(tsne_results[indices,0],
tsne_results[indices,1],
label=colour)
plt.legend()
plt.title('t-SNE Visualisation')
tsneFigure.savefig('tsneVisualisation.jpg')
plt.close()
# -----------------------------------------------------------
# PLOT TRAINING HISTORY
historyFigure, (left, right) = plt.subplots(ncols=2, figsize=(20,10))
# Plot the training history loss
def plotHistoryLoss(fit):
left.plot(fit.history['loss'],label="Training Loss")
left.plot(fit.history['val_loss'],label="Validation Loss")
left.set_title('Model Loss')
left.set_xlabel('Epoch')
left.set_ylabel('Loss')
left.legend(loc='upper left')
# Plot the training history accuracy
def plotHistoryAccuracy(fit):
right.plot(fit.history['acc'],label="Training Accuracy")
right.plot(fit.history['val_acc'],label="Validation Accuracy")
right.set_title('Model Accuracy')
right.set_xlabel('Epoch')
right.set_ylabel('Accuracy')
right.legend(loc='upper left')
plotHistoryLoss(fit)
plotHistoryAccuracy(fit)
historyFigure.savefig('trainingHistory.jpg')
plt.close()
# -----------------------------------------------------------
# PLOT CONFUSION MATRIX
matrix_predictions = model.predict_generator(validation_generator)
matrix_predictions = np.argmax(matrix_predictions, axis=-1)
print validation_generator.classes
print matrix_predictions
confusionMatrix = confusion_matrix(validation_generator.classes, matrix_predictions)
class_names = ['Alpine Butterfly Knot', 'Bowline Knot', 'Clove Hitch', 'Figure-8 Knot', 'Figure-8 Loop',
'Fisherman Knot', 'Flemish Bend', 'Overhand Knot', 'Reef Knot', 'Slip Knot']
# plot_confusion_matrix function obtained from sklearn documentation
# Confusion Matrix Examples
# http://scikit-learn.org/stable/modules/generated/sklearn.metrics.confusion_matrix.html
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
np.set_printoptions(precision=2)
# Plot Confusion Matrix
final_confusion_matrix = plt.figure(figsize=(20, 10))
plot_confusion_matrix(confusionMatrix, classes=class_names,
title='Confusion Matrix')
final_confusion_matrix.savefig('confusionMatrix.jpg')
# Plot Normalised Confusion Matrix
final_normalised_confusion_matrix = plt.figure(figsize=(20, 10))
plot_confusion_matrix(confusionMatrix, classes=class_names, normalize=True,
title='Normalised Confusion Matrix')
final_normalised_confusion_matrix.savefig('normalisedConfusionMatrix.jpg')
plt.close()
|
<gh_stars>1-10
# Copyright 2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rdflib.paths import MulPath
from rdflib.term import Variable, URIRef, Literal
from typing import Union
from .classes import Operator, Triple
import rdflib
def op_to_query(op: Operator):
if op.type == 'SelectQuery':
query = select_op_to_query(op)
elif op.type == 'Project':
query = project_op_to_query(op)
elif op.type == 'LeftJoin':
query = left_join_op_to_query(op)
elif op.type == 'Join':
query = join_op_to_query(op)
elif op.type == 'Filter':
query = filter_op_to_query(op)
elif op.type == 'BGP':
query = bgp_op_to_query(op)
elif op.type == 'ToMultiSet':
query = to_multiset_to_query(op)
elif op.type == 'Distinct':
query = distinct_to_query(op)
else:
raise NotImplementedError(op.type)
return query
def project_op_to_query(op: Operator):
query = ' '.join(map(lambda x: '?' + str(x), op.project_vars)) + ' WHERE {\n'
for c in op.children:
query += op_to_query(c)
query += '}'
return query
def select_op_to_query(op: Operator):
query = 'SELECT '
for c in op.children:
query += op_to_query(c)
return query
def left_join_op_to_query(op: Operator):
query = ''
p1_child = op.children[0]
query += op_to_query(p1_child)
p2_child = op.children[1]
query += 'OPTIONAL {\n'
query += op_to_query(p2_child)
query += '}\n'
return query
def join_op_to_query(op: Operator):
query = ''
p1_child = op.children[0]
query += op_to_query(p1_child)
p2_child = op.children[1]
query += '{\n'
query += op_to_query(p2_child)
query += '}\n'
return query
def bgp_op_to_query(op: Operator):
query = ''
for t in op.triples:
query += triple_string(t)
if len(op.children) > 0:
raise NotImplementedError
return query
def filter_op_to_query(op: Operator):
query = ''
for c in op.children:
query += op_to_query(c)
if len(op.expressions) > 0:
query += 'FILTER('
expr_mapper = lambda e:term_string(e.lhs) + ' ' + e.op + ' ' + term_string(e.rhs)
query += '&& '.join(map(expr_mapper, op.expressions))
if len(op.expressions) > 0:
query += ')\n'
return query
def mulpath_op_to_query(op: Operator):
query = ''
for c in op.children:
query += op_to_query(c)
return query
def distinct_to_query(op: Operator):
query = 'SELECT DISTINCT '
for c in op.children:
query += op_to_query(c)
return query
def to_multiset_to_query(op:Operator):
query = ''
for c in op.children:
query += op_to_query(c)
return query
def triple_string(triple: Triple):
return term_string(triple.subject) + ' ' + term_string(triple.verb) + ' ' + term_string(triple.object) + ' .\n'
def term_string(term: Union[Variable, URIRef, Literal, MulPath]):
if type(term) == Variable:
return '?' + str(term)
elif type(term) == URIRef:
return '<' + str(term) + '>'
elif type(term) == MulPath:
return '<' + str(term.path) + '>' + str(term.mod)
elif type(term) == Literal:
if term.datatype is None:
return '"' + str(term) + '"'
elif term.datatype in {rdflib.XSD.dateTime, rdflib.XSD.boolean, rdflib.XSD.float, rdflib.XSD.int}:
return term.n3()
else:
raise NotImplementedError(term.datatype)
else:
raise NotImplementedError(term)
|
import collections
import json
import os
import time
import torch
import yaml
import torch.nn as nn
from abc import abstractmethod
class Runner:
def __init__(self, args):
# register variables
self.model_path = None
self.tensorboard_path = None
self.dataset = {'train': None, 'test': None}
self.model = None
self.optimizer = None
self.scheduler = None
self.cur_time = 0
self.epoch = 0
# check arguments
self.args = self.check_args(args)
# print args
args_json = json.dumps(vars(self.args), sort_keys=True, indent=2)
self.print_log(args_json, print_time=False)
# devices
if self.args.use_cuda:
if type(args.device) is list:
self.output_dev = args.device[0]
else:
self.output_dev = args.device
else:
self.output_dev = 'cpu'
# model
self.load_dataset()
self.load_model()
self.load_optimizer()
self.load_scheduler()
self.initialize_model()
# data parallel
assert self.model is not None
if type(self.args.device) is list and \
len(self.args.device) > 1 and self.args.use_cuda:
self.model = nn.DataParallel(
self.model, device_ids=args.device,
output_device=self.output_dev
)
def check_args(self, args):
self.model_path = os.path.join(args.save_dir, 'models')
if not os.path.exists(self.model_path):
os.makedirs(self.model_path)
self.tensorboard_path = os.path.join(args.save_dir, 'tensorboard')
if not os.path.exists(self.tensorboard_path):
os.makedirs(self.tensorboard_path)
args.use_cuda = args.use_cuda and torch.cuda.is_available()
args.num_points = max(1, args.num_points)
args.knn = max(1, args.knn)
args.save_interval = max(1, args.save_interval)
args.eval_interval = max(1, args.eval_interval)
args.log_interval = max(1, args.log_interval)
args.train_batch_size = max(1, args.train_batch_size)
args.test_batch_size = max(1, args.test_batch_size)
args.num_epochs = max(1, args.num_epochs)
# save configuration file
config_file = os.path.join(args.save_dir, 'config.yaml')
args_dict = vars(args)
with open(config_file, 'w') as f:
yaml.dump(args_dict, f)
return args
@abstractmethod
def load_dataset(self):
pass
@abstractmethod
def load_model(self):
pass
@abstractmethod
def initialize_model(self):
pass
def load_optimizer(self):
if 'sgd' in self.args.optimizer.lower():
paras = self.model.parameters()
try:
self.optimizer = torch.optim.SGD(
paras,
lr=self.args.lr,
momentum=self.args.momentum,
weight_decay=1e-4
)
except ValueError as e:
self.print_log(str(e))
elif 'adam' in self.args.optimizer.lower():
paras = self.model.parameters()
try:
self.optimizer = torch.optim.Adam(
paras,
lr=self.args.lr,
weight_decay=1e-4
)
except ValueError as e:
self.print_log(str(e))
else:
raise ValueError('Unsupported optimizer.')
def load_scheduler(self):
# TODO: to support more schedulers
if self.optimizer is None:
return
self.scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
self.optimizer, self.args.num_epochs, eta_min=self.args.lr / 100.0
)
@abstractmethod
def run(self):
pass
def load_model_weights(self, model, weights_file, ignore):
self.print_log(f'Loading model weights from {weights_file}...')
check_points = torch.load(
weights_file, map_location=lambda storage, loc: storage
)
self.epoch = check_points['epoch'] + 1
# load model weights
model_weights = collections.OrderedDict([
(k.split('module.')[-1], v.to(self.output_dev))
for k, v in check_points['model'].items()
])
for w in ignore:
if model_weights.pop(w, None) is not None:
self.print_log('Successfully remove weights: {}.'.format(w))
else:
self.print_log('Can not remove weights: {}.'.format(w))
self._try_load_weights(model, model_weights)
def load_optimizer_weights(self, optimizer, weights_file):
self.print_log(f'Loading optimizer weights from {weights_file}...')
check_points = torch.load(
weights_file, map_location=lambda storage, loc: storage
)
# load optimizer configuration
optim_weights = check_points['optimizer']
self._try_load_weights(optimizer, optim_weights)
def load_scheduler_weights(self, scheduler, weights_file):
self.print_log(f'Loading scheduler weights from {weights_file}...')
check_points = torch.load(
weights_file, map_location=lambda storage, loc: storage
)
# load scheduler configuration
sched_weights = check_points['scheduler']
self._try_load_weights(scheduler, sched_weights)
def _try_load_weights(self, model, weights):
try:
model.load_state_dict(weights)
except:
state = model.state_dict()
diff = list(set(state.keys()).difference(set(weights.keys())))
self.print_log('Can not find these weights:')
for d in diff:
self.print_log(d)
state.update(weights)
model.load_state_dict(state)
def save_weights(self, epoch, model, optimizer, scheduler, save_path):
model_weights = collections.OrderedDict([
(k.split('module.')[-1], v.cpu())
for k, v in model.state_dict().items()
])
optim_weights = optimizer.state_dict()
sched_weights = scheduler.state_dict()
save_dict = {
'epoch': epoch,
'model': model_weights,
'optimizer': optim_weights,
'scheduler': sched_weights
}
torch.save(save_dict, save_path)
self.print_log('Model ' + save_path + ' saved.')
def record_time(self):
self.cur_time = time.time()
return self.cur_time
def tick(self):
split_time = time.time() - self.cur_time
self.record_time()
return split_time
def print_log(self, msg, print_time=True):
if print_time:
localtime = time.strftime("%Y/%m/%d %H:%M:%S", time.localtime())
msg = "[" + localtime + '] ' + msg
print(msg)
if self.args.print_log:
with open(os.path.join(self.args.save_dir, 'log.txt'), 'a') as f:
print(msg, file=f)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.