text stringlengths 957 885k |
|---|
import sys, os, pickle
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.metrics import classification_report, confusion_matrix, roc_curve, auc, accuracy_score, precision_score, recall_score, f1_score
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.pipeline import Pipeline
import nltk.corpus
stopwords = set([stopword.replace('\'', '')
for stopword in nltk.corpus.stopwords.words('english')])
# a bit ugly: assumes hadm_id2path.pkl has been written by extract_concepts.py
with open('hadm_id2path.pkl', 'rb') as f:
hadm_id2path = pickle.load(f)
tmplist = []
for (hadm_id, path) in hadm_id2path.items():
(path, epfile) = os.path.split(path)
(_, pat_id) = os.path.split(path)
timeseries_file = (pat_id + '_' +
epfile.replace('_noteconcepts', '_timeseries'))
tmplist.append((timeseries_file, hadm_id))
hadm_table = dict(tmplist)
def load_listfile(path):
hadm_ids = []
ys = []
with open(path, 'r') as f:
f.readline() # first line is header
for line in f:
line = line.strip()
timeseries_file = line.split(',')[0]
y = int(line[-1])
hadm_ids.append(hadm_table[timeseries_file])
ys.append(y)
return pd.DataFrame(data=ys, index=hadm_ids, columns=['TARGET'])
def add_text(df_train, df_val, df_test):
with open(sys.argv[2], 'rb') as f:
df_text = pickle.load(f)
df_train['TEXT'] = df_text
df_val['TEXT'] = df_text
df_test['TEXT'] = df_text
return df_train, df_val, df_test
print('loading listfiles...')
df_train = load_listfile(os.path.join(sys.argv[1], 'train_listfile.csv'))
df_val = load_listfile(os.path.join(sys.argv[1], 'val_listfile.csv'))
df_test = load_listfile(os.path.join(sys.argv[1], 'test_listfile.csv'))
print('loading text...')
df_train, df_val, df_test = add_text(df_train, df_val, df_test)
X_train = df_train['TEXT'].values
X_val = df_val['TEXT'].values
X_test = df_test['TEXT'].values
y_train = df_train['TARGET'].values
y_val = df_val['TARGET'].values
y_test = df_test['TARGET'].values
print('fitting model...')
model = Pipeline([('vect', CountVectorizer(stop_words=stopwords, binary=True)),
#('tfidf', TfidfTransformer(use_idf=True)),
# ('lr', LogisticRegression(solver='lbfgs', penalty='l2',
# C=0.001, max_iter=10000, class_weight='balanced'))
('rf', RandomForestClassifier(n_estimators=200,
class_weight='balanced', random_state=42,
max_leaf_nodes=750))
])
model.fit(X_train, y_train)
val_pred = model.predict(X_val)
val_prob = model.predict_proba(X_val)[:, 1]
fpr, tpr, _ = roc_curve(y_val, val_prob)
roc_auc = auc(fpr, tpr)
print(confusion_matrix(y_val, val_pred))
print(classification_report(y_val, val_pred))
print('ROC AUC:', roc_auc)
# only uncomment when hyperparameters have been determined
quit()
test_pred = model.predict(X_test)
test_prob = model.predict_proba(X_test)[:, 1]
fpr, tpr, _ = roc_curve(y_test, test_prob)
test_roc_auc = auc(fpr, tpr)
print('\\begin{tabular}{@{}c@{}} %.3f \\\\ \\textbf{%.3f} \end{tabular} &' %
(accuracy_score(y_val, val_pred), accuracy_score(y_test, test_pred)))
print('\\begin{tabular}{@{}c@{}} %.3f \\\\ \\textbf{%.3f} \end{tabular} &' %
(precision_score(y_val, val_pred), precision_score(y_test, test_pred)))
print('\\begin{tabular}{@{}c@{}} %.3f \\\\ \\textbf{%.3f} \end{tabular} &' %
(recall_score(y_val, val_pred), recall_score(y_test, test_pred)))
print('\\begin{tabular}{@{}c@{}} %.3f \\\\ \\textbf{%.3f} \end{tabular} &' %
(f1_score(y_val, val_pred), f1_score(y_test, test_pred)))
print('\\begin{tabular}{@{}c@{}} %.3f \\\\ \\textbf{%.3f} \end{tabular} \\\\ \\hline' %
(roc_auc, test_roc_auc))
#vocab = [(word, index) for (word, index) in model['vect'].vocabulary_.items()]
#vocab.sort(key=lambda x: x[1])
#vocab = [item[0] for item in vocab]
#coefs = model['lr'].coef_
#significance = [(w, c) for (w, c) in zip(vocab, coefs[0,:])]
#significance.sort(key=lambda x: x[1])
#print(significance[0:10])
#print(significance[-1:-11:-1])
|
import random
import math
import copy
import itertools
class mReasoner():
""" mReasoner implementation based on <NAME>. and <NAME>. (2013).
Some functions are directly translated from the source.
For original code see http://www.modeltheory.org/models/mreasoner/
"""
def __init__(self):
self.sigma = 0 # counterexample search
self.lam = 4 # size
self.epsilon = 0 # encoding
self.omega = 1 # weaken
def get_premises(self, syllogism):
"""takes a syllogism in string representation and returns its premises"""
if syllogism[2] == "1":
return [syllogism[0] + "a" + "b", syllogism[1] + "b" + "c"]
elif syllogism[2] == "2":
return [syllogism[0] + "b" + "a", syllogism[1] + "c" + "b"]
elif syllogism[2] == "3":
return [syllogism[0] + "a" + "b", syllogism[1] + "c" + "b"]
elif syllogism[2] == "4":
return [syllogism[0] + "b" + "a", syllogism[1] + "b" + "c"]
def get_individuals(self, subj, obj, mood):
"""returns all individuals that must be a part of a mental model,
all individuals that directly follow from the premise (canonical)
and all individuals that hold in the premise (combinations)"""
individuals = []
if mood == "A":
individuals.append([subj, obj])
canonical_individuals = [[subj, obj]]
combinations = [[subj, obj], ["-" + subj, obj], ["-" + subj, "-" + obj]]
elif mood == "I":
individuals.append([subj, obj])
canonical_individuals = [[subj, obj], [subj]]
combinations = [[subj, obj], ["-" + subj, obj], [subj, "-" + obj], ["-" + subj, "-" + obj]]
elif mood == "E":
individuals.extend([[subj, "-" + obj], ["-" + subj, obj]])
canonical_individuals = [[subj, "-" + obj], ["-" + subj, obj]]
combinations = [["-" + subj, obj], [subj, "-" + obj], ["-" + subj, "-" + obj]]
elif mood == "O":
individuals.append([subj, "-" + obj])
canonical_individuals = [[subj, "-" + obj], [subj, obj], [obj]]
combinations = [[subj, obj], ["-" + subj, obj], [subj, "-" + obj], ["-" + subj, "-" + obj]]
return individuals, canonical_individuals, combinations
def start_model(self, premise, size):
"""encodes the first premise of the model by randomly drawing individuals
"""
subj = premise[1]
obj = premise[2]
mood = premise[0]
individuals, canonical_individuals, combinations = self.get_individuals(subj, obj, mood)
i = len(individuals)
while i < size:
if random.random() < self.epsilon:
individuals.append(random.choice(canonical_individuals))
else:
individuals.append(random.choice(combinations))
i += 1
# make sure that obj is part of O premise model
if mood == "O" and len([x for x in individuals if obj in x]) <= 0:
individuals.pop()
individuals.append(random.choice([[obj], [subj, obj]]))
return individuals
def combine(self, premise, model):
"""expands the mental model by the second premise"""
mood = premise[0]
subj = premise[1]
obj = premise[2]
j = 0
for i in range(len(model)):
if mood == "A":
if "b" in model[i]:
model[i] = model[i] + ["c"]
elif mood == "I":
if subj == "b" and "b" in model[i]:
model[i] = model[i] + ["c"]
j += 1
if j == 2:
break
elif subj == "c" and "b" in model[i]:
model[i] = model[i] + ["c"]
model.extend([["c"]])
break
elif mood == "E":
if subj == "b" and "b" in model[i]:
model[i] = model[i] + ["-c"]
else:
model.extend([[subj, "-" + obj], [subj, "-" + obj], [subj, "-" + obj], [subj, "-" + obj]])
break
elif mood == "O":
if subj == "b" and "b" in model[i]:
model[i] = model[i] + ["-c"]
j += 1
if j == 2:
break
elif subj == "c":
model.extend([["-b", "c"], ["c"]])
break
return [sorted(row, key=lambda e: e[-1]) for row in model]
def generate_size(self):
"""uses truncated poisson density to generate raondom integers"""
size = 0
candidate_size = 0
while not size:
candidate_size = self.poisson_random_number(self.lam)
if not (candidate_size >= 0 and candidate_size <= 1):
size = candidate_size
return size
def poisson_random_number(self, lam):
u = random.random()
p = 0
i = 0
while True:
p += self.trunc_poisson_density(i, lam)
if u < p:
return i
i += 1
def trunc_poisson_density(self, n, lam):
if n > 34:
n = 34
if lam > 0:
return math.pow(lam, n) * math.exp(-lam) / math.factorial(n)
elif lam == 0:
if n == 0:
return 1
else:
return 0
elif lam < 0:
raise Exception
def build_model(self, premises):
"""returns initial mental model based on premises"""
intensions = self.get_premises(premises)
first = intensions[0]
second = intensions[1]
capacity = self.generate_size()
model = self.start_model(first, capacity)
model = self.combine(second, model)
return model
def form_initial_conclusion(self, syllogism):
"""based on the dominant mood and the figure of the premise diffrent conclusions are preferred
>>> m = mReasoner()
>>> m.form_initial_conclusion("AE1")
['Eac']
>>> m.form_initial_conclusion("AE2")
['Eac', 'Eca']
>>> m.form_initial_conclusion("EA1")
['Eac']
>>> m.form_initial_conclusion("EA2")
['Eac', 'Eca']
"""
dominant_mood = self.dominant_mood(syllogism)
figure = syllogism[2]
if figure == "1":
term_order = ["ac"]
elif figure == "2":
if "O" in syllogism:
term_order = ["ca"]
else:
term_order = ["ac", "ca"]
elif figure == "3":
if syllogism[0] == syllogism[1] or syllogism[0] == "O":
term_order = ["ac"]
elif syllogism[1] == "O":
term_order = ["ca"]
else:
term_order = ["ac", "ca"]
elif figure == "4":
if syllogism[0] == syllogism[1]:
term_order = ["ac"]
elif dominant_mood == "O" and syllogism[0] == "O":
term_order = ["ca"]
elif dominant_mood == "O" and syllogism[1] == "O":
term_order = ["ac"]
elif syllogism[:1] == "IE" or syllogism[:1] == "EI":
term_order = ["ac"]
else:
term_order = ["ac", "ca"]
conclusions = []
for order in term_order:
conclusions.append(dominant_mood + order)
return conclusions
def dominant_mood(self, intension):
if "O" in intension:
dominant_mood = "O"
elif "E" in intension:
dominant_mood = "E"
elif "I" in intension:
dominant_mood = "I"
else:
dominant_mood = "A"
return dominant_mood
def check_conclusions(self, conclusions, model):
"""checks which conclusions hold in the mental model and returns them
>>> m = mReasoner()
>>> m.check_conclusions(['Eac'], [['a', 'b'], ['b'], ['b', 'c']])
['Eac']
>>> m.check_conclusions(['Eca', 'Eac'], [['a', 'b'], ['b'], ['b', 'c']])
['Eca', 'Eac']
>>> m.check_conclusions(['Oac'], [['a', 'b', 'c'], ['a', 'b'], ['b', 'c']])
['Oac']
"""
valid = []
for conclusion in conclusions:
mood_holds = {"A": True, "I": False, "E": True, "O": False}
subj = conclusion[1]
obj = conclusion[2]
individuals_with_subj = [ind for ind in model if subj in ind]
for individual in individuals_with_subj:
# subj without obj
if obj not in individual:
mood_holds["A"] = False
mood_holds["O"] = True
# subj with obj
if obj in individual:
mood_holds["I"] = True
mood_holds["E"] = False
# All and No Conclusions False if no Subj in model
if len(individuals_with_subj) < 1:
mood_holds["E"] = False
mood_holds["A"] = False
if mood_holds[conclusion[0]]:
valid.append(conclusion)
# No conclusion valid
if len(valid) < 1:
valid.append("NVC")
return valid
def valid_counter_example(self, syllogism, conclusion):
"""
>>> m = mReasoner()
>>> l = []
>>> for syl in ["AE1", "AE2", "EA1", "EA2"]:
... counter = m.valid_counter_example(syl, ['Eac'])
... valid = True if counter is not None else False
... l.append(valid)
>>> l
[False, True, True, True]
"""
possibilities = self.possible_models(syllogism)
premises = self.get_premises(syllogism)
counter = self.find_counterexample(possibilities, conclusion[0], premises)
return counter
def possible_models(self, syllogism):
"""create all possibible models of size 2 based on the mood of the first premise"""
all_possible_models = []
premises = self.get_premises(syllogism)
first_premise = premises[0]
subj, obj, mood = first_premise[1], first_premise[2], first_premise[0]
_, canonical_individuals, combinations = self.get_individuals(subj, obj, mood)
possible_individuals = canonical_individuals + combinations
# remove duplicates
possible_individuals = [list(i) for i in set(map(tuple, possible_individuals))]
subj, obj = premises[1][1], premises[1][2]
mood = premises[1][0]
# create all possible individual combinations of size 2
comb = list(itertools.combinations_with_replacement(possible_individuals, 2))
for possibility in comb:
all_possible_models.append([sorted(row, key=lambda e: e[-1]) for row in possibility])
return all_possible_models
def find_counterexample(self, possibilities, conclusion, premises):
""" finds a model that holds in both premises but refutes the initial conclusion"""
test_models = []
for possible_model in possibilities:
new_models = self.add_end_term(possible_model)
for model in new_models:
if model not in test_models:
test_models.append(model)
for model in test_models:
if premises[0] in self.check_conclusions([premises[0]], model):
if premises[1] in self.check_conclusions([premises[1]], model):
if conclusion not in self.check_conclusions([conclusion], model):
return model
def add_end_term(self, model):
"""iterates over the model and finds all possible individuals with missing end terms
returns all possibilities of a model with added end terms"""
new_models = []
new_model = copy.deepcopy(model)
for element in ["a", "c"]:
rows = []
token = 'a' if element == 'c' else 'c'
for i, indv in enumerate(model):
if element not in indv and '-' + element not in indv:
if token in indv or '-' + token in indv:
rows.append(i)
if len(rows) < 1:
continue
# collection of added end terms to one possibible individual
for i in rows:
if element == 'a':
new_model[i] = ['a'] + new_model[i]
else:
new_model[i].append(element)
new_models.append(new_model)
new_model = copy.deepcopy(model)
# try to add end terms to all possible individuals
if len(rows) > 1:
for i in rows:
if element == 'a':
new_model[i] = ['a'] + new_model[i]
else:
new_model[i].append(element)
new_models.append(new_model)
return new_models
def system2(self, syllogism, model, conclusion, verify_target=None, weaken=None):
"""tries to refute an initial conclusion by searching for a counterexample.
if such a counterexample is found weakens the initial conclusion and tries to
refute it again with a counterexample"""
# remove duplicates
omega = self.omega if weaken is None else weaken
weaken = random.random()
new_model = self.valid_counter_example(syllogism, [conclusion])
# counterexample found
weaker_conclusion = False
if new_model is not None:
# weaken
if weaken < omega:
if conclusion[0] == "A":
weaker_conclusion = "I" + conclusion[1:]
elif conclusion[0] == "E":
weaker_conclusion = "O" + conclusion[1:]
# weaker conclusion exists test if it holds in alternative model
if weaker_conclusion:
weaker_conclusion = [weaker_conclusion]
valid_conclusions = self.check_conclusions(weaker_conclusion, new_model)
# belief bias: return weaker conclusion if it holds
if verify_target in valid_conclusions:
return valid_conclusions
# weaker conclusion holds, try to refute it
if valid_conclusions == ["NVC"]:
return valid_conclusions
new_model = self.valid_counter_example(syllogism, weaker_conclusion)
# counterexample found for weaker conclusion
if new_model is not None:
return ["NVC"]
else:
return weaker_conclusion
# counterexample found but do not weaken conclusion
if not weaker_conclusion and new_model is not None:
return ["NVC"]
return [conclusion]
def predict(self, syllogism, system2=None, weaken=None, verify_target=None):
"""
>>> m = mReasoner()
>>> m.predict("AE1", system2=0, weaken=0)
['Eac']
>>> m.predict("AE1", system2=1, weaken=0)
['Eac']
>>> m.predict("AE2", system2=1, weaken=0)
['NVC']
>>> m.predict("AE2", system2=1, weaken=1)
['Oac']
>>> m.predict("EA1", system2=1, weaken=1)
['NVC']
>>> m.predict("EA2", system2=1, weaken=1)
['Eca']
"""
sigma = self.sigma if system2 is None else system2
omega = self.omega if weaken is None else weaken
model = self.build_model(syllogism)
initial_conclusions = self.form_initial_conclusion(syllogism)
valid_conclusions = self.check_conclusions(initial_conclusions, model)
# selective processing: accept conclusion if matches with the target regardless of sigma
if verify_target in valid_conclusions:
return valid_conclusions
if random.random() < sigma:
conclusions = []
for conclusion in valid_conclusions:
if conclusion == "NVC":
continue
conclusion = self.system2(syllogism, model, conclusion, verify_target=verify_target, weaken=omega)
if conclusion != ["NVC"]:
conclusions.extend(conclusion)
valid_conclusions = conclusions
if len(valid_conclusions) < 1:
return ["NVC"]
return valid_conclusions
|
# -*- coding: utf-8 -*-
from duckietown_utils import DuckietownConstants
from duckietown_utils import get_list_of_packages_in_catkin_ws
from duckietown_utils import on_circle, on_laptop
from duckietown_utils import on_duckiebot
from .checks import * # @UnusedWildImport
from .entry import Diagnosis, Entry, SeeDocs
from .python_source_checks import add_python_package_checks
from .suite_git import add_suite_git
from .suite_ssh import good_ssh_configuration
class Manager():
def __init__(self):
self.entries = []
def add(self, only_run_if, desc, check, diagnosis, *suggestions):
assert isinstance(check, Check), type(check)
if not suggestions:
automated = check.get_suggestion()
if automated is not None:
suggestions = [automated]
E = Entry(desc=desc, check=check,
diagnosis=diagnosis,
resolutions=suggestions,
only_run_if=only_run_if)
self.entries.append(E)
return E
def get_checks():
""" Returns a list of Entry """
manager = Manager()
add = manager.add
JOY_DEVICE = '/dev/input/js0'
this_is_a_duckiebot = on_duckiebot()
this_is_a_laptop = on_laptop()
this_is_circle = on_circle()
username = getpass.getuser()
if this_is_a_duckiebot:
add(None,
"Camera is detected",
CommandOutputContains('sudo vcgencmd get_camera', 'detected=1'),
Diagnosis("The camera is not connected."))
add(None,
"Scipy is installed",
CanImportPackages(['scipy', 'scipy.io']),
Diagnosis("Scipy is not installed correctly."))
add(None,
"sklearn is installed",
CanImportPackages(['sklearn']),
Diagnosis("sklearn is not installed correctly."))
python_packages = [
# 'ros_node_utils',
'procgraph',
'comptests',
]
for p in python_packages:
add(None,
"%s is installed" % p,
CanImportPackages([p]),
Diagnosis("Dependency %r is not installed correctly." % p),
Suggestion(" pip install --user %s" % p))
add(None,
"Date is set correctly",
CheckDate(),
Diagnosis("The date is not set correctly."))
not_root=add(None,
"Not running as root",
YouAreNotUser('root'),
Diagnosis("You should not run the code as root."))
if this_is_a_duckiebot:
not_ubuntu = add(not_root,
"Not running as ubuntu",
YouAreNotUser('ubuntu'),
Diagnosis("You should not run the code as ubuntu."))
add(not_ubuntu,
"Member of group sudo",
UserBelongsToGroup(username, "sudo"),
Diagnosis("You are not authorized to run sudo."))
add(not_ubuntu,
"Member of group input",
UserBelongsToGroup(username, "input"),
Diagnosis("You are not authorized to use the joystick."))
add(not_ubuntu,
"Member of group video",
UserBelongsToGroup(username, "video"),
Diagnosis("You are not authorized to read from the camera device."))
add(not_ubuntu,
"Member of group i2c",
UserBelongsToGroup(username, "input"),
Diagnosis("You are not authorized to use the motor shield."))
for g in ['sudo','input','video','i2c']:
add(None,
"User ubuntu member of group `%s`" % g,
UserBelongsToGroup("ubuntu", g),
Diagnosis("Image not created properly."))
if this_is_a_laptop or this_is_a_duckiebot:
good_ssh_configuration(manager)
required_packages = set()
if this_is_a_duckiebot or this_is_a_laptop or this_is_circle:
required_packages.update(make_list("""
vim byobu
git git-extras
htop atop iftop
aptitude apt-file
build-essential libblas-dev liblapack-dev libatlas-base-dev gfortran libyaml-cpp-dev
python-dev ipython python-sklearn
python-termcolor
ros-kinetic-desktop-full
ntpdate
python-pip
ipython
python-ruamel.yaml
virtualenv
libxml2-dev
libxslt1-dev
libffi-dev
bibtex2html
pdftk
python-frozendict
python-tables
mplayer
mencoder
"""))
if this_is_a_duckiebot:
required_packages.update(make_list("""
i2c-tools
python-smbus
"""))
if this_is_a_laptop or this_is_circle:
required_packages.update(make_list("""
git-lfs
"""))
# TODO
# suggested = ['emacs', 'zsh', 'nethogs']
for p in required_packages:
add(None, p, CheckPackageInstalled(p), Diagnosis('Package %r not installed.' % p))
forbidden_packages = ["python-roslaunch", "rosbash"]
for p in forbidden_packages:
add(None, p, CheckPackageNotInstalled(p), Diagnosis('Forbidden package %r is installed.' % p))
if not this_is_circle:
add_suite_git(manager)
if this_is_a_duckiebot:
add(None,
"Edimax detected",
CommandOutputContains('iwconfig', 'rtl8822bu'),
Diagnosis("It seems that the Edimax is not detected."))
add(None,
'The hostname is configured',
CheckHostnameConfigured(),
Diagnosis('You have not completed a proper setup.'))
add(None,
'/etc/hosts is sane',
CheckGoodHostsFile(),
Diagnosis('The contents of /etc/hosts will cause problems later on.'))
if this_is_a_duckiebot:
add(None,
'Correct kernel version',
GoodKernel(),
Diagnosis('You have been messing with the kernel.'),
Suggestion('You probably need to start with a pristine SD card.'))
add(None,
'Wifi name configured',
WifiNameConfigured(),
Diagnosis('You have not completed the Wifi configuration.'))
add(None,
"Messages are compiled",
CheckImportMessages(),
Diagnosis("The messages are not compiling correctly."))
# if not this_is_circle:
# add(None,
# 'Shell is bash',
# EnvironmentVariableIsEqualTo('SHELL', '/bin/bash'),
# Diagnosis('You have not set the shell to /bin/bash'),
# Suggestion('You can change the shell using `chsh`.'))
if this_is_a_duckiebot:
add(None,
"Joystick detected",
DeviceExists(JOY_DEVICE),
Diagnosis("The joystick is not found at %s" % JOY_DEVICE))
DUCKIETOWN_ROOT = DuckietownConstants.DUCKIETOWN_ROOT_variable
DUCKIEFLEET_ROOT = DuckietownConstants.DUCKIEFLEET_ROOT_variable
DUCKIETOWN_CONFIG_SEQUENCE = DuckietownConstants.DUCKIETOWN_CONFIG_SEQUENCE_variable
v = DUCKIETOWN_CONFIG_SEQUENCE
add(None,
'Provided environment variable %s.' % v,
EnvironmentVariableExists(v),
Diagnosis("%s is not set." % v),
Suggestion('You have to set %r in your environment (e.g. .bashrc)' % v))
variables_to_check = [DUCKIETOWN_ROOT, DUCKIEFLEET_ROOT, #DUCKIETOWN_CONFIG_SEQUENCE
]
existence = {}
for v in variables_to_check:
var_exists = add(None,
'Provided environment variable %s.' % v,
EnvironmentVariableExists(v),
Diagnosis("%s is not set." % v),
Suggestion('You have to set %r in your environment (e.g. .bashrc)' % v))
existence[v] = add(var_exists,
'Existence of path ${%s}' % v,
DirExists('${%s}' % v),
Diagnosis("%s is set but it points to a non-existing directory." % v)
)
add(existence[DUCKIETOWN_ROOT],
'Software repo downloaded with SSH scheme.',
GitCorrectRemote('${%s}' % DUCKIETOWN_ROOT),
Diagnosis("You downloaded the repo using https."),
)
# scuderia_exists = add(existence[DUCKIEFLEET_ROOT],
# 'Existence of scuderia file',
# ScuderiaFileExists(),
# Diagnosis('You do not have a scuderia file.'),
# SeeDocs('scuderia')
# )
git_lfs_installed = add(None, # @UnusedVariable
'Git LFS installed',
GitLFSInstalled(),
Diagnosis('You have not installed Git LFS'),
SeeDocs('git-lfs'))
#
# ok_scuderia = add(scuderia_exists,
# 'Validation of scuderia file',
# ValidScuderiaFile(),
# Diagnosis('You have an invalid scuderia file.'),
# SeeDocs('scuderia')
# )
if this_is_a_duckiebot:
add(None,
'This robot is mentioned in scuderia.',
ThisRobotInScuderiaFile(),
Diagnosis('You have not added the robot to the scuderia.'),
SeeDocs('scuderia'))
progs = ['roslaunch', 'rosrun']
for prog in progs:
add(None,
'Good path for "%s"' % prog,
CommandOutputContains('which %s' % prog, '/opt/ros/kinetic'),
Diagnosis('The program `%s` is not resolved to the one in /opt/ros' % prog))
# add(None,
# 'Hub is installed',
# CommandOutputContains('hub --version'),
# Diagnosis('The program "hub" is not installed'),
# SeeDocs("hub"))
machines_exists = add(None,
'Existence of machines file',
MachinesExists(),
Diagnosis('You have an invalid or missing machines file.'),
SeeDocs('machines'),
)
if this_is_a_duckiebot:
add(machines_exists,
'Machines file contains this robot',
MachinesValid(),
Diagnosis('You have an invalid machines file.'),
)
#
# add(machines_exists,
# 'Machines is updated',
# MachinesNewerThanScuderia(),
# Diagnosis('Scuderia was modified after machines created'),
# )
if True: # TODO
if this_is_a_laptop:
existence = add(None,
'Environment variable DUCKIETOWN_DATA',
EnvironmentVariableExists('DUCKIETOWN_DATA'),
Diagnosis("DUCKIETOWN_DATA is not set."
"""
The environment variable DUCKIETOWN_DATA must either:
1) be set to "n/a"
2) point to an existing path corresponding to Dropbox/duckietown-data.
(containing a subdirectory 'logs')
"""
))
logs = [
"${DUCKIETOWN_DATA}/logs/20160400-phase3-logs/dp45/20160406/20160406-226-All_red_lights_followTheLeader1-2cv.bag",
]
for l in logs:
add(existence,
'Log %r exists in DUCKIETOWN_DATA' % os.path.basename(l),
FileExists(l),
Diagnosis("The DUCKIETOWN_DATA folder does not contain the logs it should.")
)
if False:
# TODO: not sure if this is needed
if this_is_a_duckiebot:
add(None,
'Environment variable VEHICLE_NAME',
EnvironmentVariableExists('VEHICLE_NAME'),
Diagnosis("""
The environment variable VEHICLE_NAME must be the name of your robot
(if you are on the robot)."""),
Suggestion("""
Add this line to ~/.bashrc:
export VEHICLE_NAME= (your vehicle name)
"""))
try:
packagename2dir = get_list_of_packages_in_catkin_ws()
except DTConfigException:
pass
else:
for package_name, dirname in packagename2dir.items():
add_python_package_checks(add, package_name, dirname)
# TODO: DISPLAY is not set
# files in src/ or scripts/ are executable
# There is no file "util.py" copied from pkg_name
# add(None,
# 'Passwordless sudo',
# FileContains('/etc/'))
# TODO: date
return manager.entries
def make_list(s):
return [x for x in s.replace('\n', ' ').split() if x.strip()]
|
# coding = utf-8
import numbers
from typing import Union, List
import torch
from torch import Tensor, Size
from torch.nn import Module, init
from torch.nn.parameter import Parameter
from torch.autograd import Variable
from torch.nn.functional import normalize
# cite: https://github.com/lancopku/AdaNorms
# cite: NeurIPS19 Understanding and Improving Layer Normalization
class AdaNorm(Module):
__constants__ = ['normalized_shape', 'eps', 'elementwise_affine', 'k', 'scale']
normalized_shape: Union[int, List[int], torch.Size]
eps: float
elementwise_affine: bool
k: float
scale: float
def __init__(self, normalized_shape: Union[int, List[int], torch.Size], k: float = 1 / 10, scale: float = 2., eps: float = 1e-5, elementwise_affine: bool = True) -> None:
super(AdaNorm, self).__init__()
self.k = k
self.scale = scale
self.eps = eps
self.elementwise_affine = elementwise_affine
if isinstance(normalized_shape, numbers.Integral):
normalized_shape = (normalized_shape,)
else:
raise ValueError('Only last layer for AdaNorm currently')
self.normalized_shape = tuple(normalized_shape)
if self.elementwise_affine:
self.weight = Parameter(torch.Tensor(*normalized_shape))
self.bias = Parameter(torch.Tensor(*normalized_shape))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self) -> None:
if self.elementwise_affine:
init.ones_(self.weight)
init.zeros_(self.bias)
def forward(self, input: Tensor) -> Tensor:
mean = input.mean(-1, keepdim=True)
std = input.std(-1, keepdim=True)
input = input - mean
mean = input.mean(-1, keepdim=True)
graNorm = (self.k * (input - mean) / (std + self.eps)).detach()
input_norm = (input - input * graNorm) / (std + self.eps)
return self.scale * input_norm
def extra_repr(self) -> Tensor:
return '{normalized_shape}, eps={eps}, elementwise_affine={elementwise_affine}, k={k}, scale={scale}'.format(**self.__dict__)
# regulize (preserve) orthogonality among output features/channels
# under Spectral Restricted Isometry Property (of orthogonal matrix)
# extra hyper-parameters is added and should be searched: coefficient (weight) of SRIPTerm
# recommended by the authors: 1e-1(epoch 0) --> 1e-3(20) --> 1e-4(50) --> 1e-6(70) --> 0(120) of 200 epochs totally
# while at the same time changing coefficient of weight decay: 1e-8(0) --> 1e-4(20)
# cite: https://github.com/VITA-Group/Orthogonality-in-CNNs/blob/master/Imagenet/resnet/train_n.py
# cite: NeurIPS18 Can We Gain More from Orthogonality Regularizations in Training Deep CNNs?
def getSRIPTerm(model: Module, device='cpu'):
term = None
for W in model.parameters():
if W.ndimension() < 2:
continue
else:
# for convolutional:
# W.shape = [OUTPUT_CHANNELS, INPUT_CHANNELS, KERNEL_SIZE]
# rows = OUTPUT_CHANNELS, cols = INPUT_CHANNELS * KERNEL_SIZE
# for linner:
# W.shape = [OUTPUT_FEATURES, INTPUT_FEATURES]
# rows = OUTPUT_FEATURES, cols = INTPUT_FEATURES
cols = W[0].numel()
rows = W.shape[0]
w1 = W.view(-1, cols)
wt = torch.transpose(w1, 0, 1)
m = torch.matmul(wt, w1)
ident = Variable(torch.eye(cols,cols))
ident = ident.to(device)
w_tmp = (m - ident)
height = w_tmp.size(0)
# iterative computing approximate sigma
u = normalize(w_tmp.new_empty(height).normal_(0, 1), dim=0, eps=1e-12)
v = normalize(torch.matmul(w_tmp.t(), u), dim=0, eps=1e-12)
u = normalize(torch.matmul(w_tmp, v), dim=0, eps=1e-12)
sigma = torch.dot(u, torch.matmul(w_tmp, v))
if term is None:
term = (sigma) ** 2
else:
term = term + (sigma) ** 2
return term
|
<filename>Menu.py
from Record import Record
# Menu class to store record class
class Menu(Record):
menu_dict = {}
lastIndex = 0
def add_record(self, prod_name, prod_code, unit_price, quantity, salesperson_id):
if len(self.menu_dict) == 0:
index = 1
else:
index = index_increase(Menu.lastIndex)
super().__init__(index, prod_name, prod_code, unit_price, quantity, salesperson_id)
r = Record(index, prod_name, prod_code, unit_price, quantity, salesperson_id)
Menu.lastIndex = index
Menu.menu_dict[Menu.lastIndex] = r
print("\n====New record added====\n")
def update_record(self, index):
try:
toBeChanged = self.menu_dict[index]._record_dict
except KeyError:
print("\n====Please choose a valid index====\n")
else:
changedPN = str(input(f"New Product Name (Leave blank if you do not wish to change it.)=> "))
if changedPN != "":
toBeChanged["prod_name"] = changedPN
changedPC = str(input(f"New Product Code (Leave blank if you do not wish to change it.)=> "))
if changedPC != "":
toBeChanged["prod_code"] = changedPC
while True:
changedUP = input(f"New Product Price (Leave blank if you do not wish to change it.)=> ")
if changedUP == "":
break
else:
try:
changedUP = float(changedUP)
except ValueError:
print("\n====Please enter a valid Price Point====\n")
else:
toBeChanged["unit_price"] = changedUP
break
while True:
changedPQ = input(f"New Quantity (Leave blank if you do not wish to change it.)=> ")
if changedPQ == "":
break
else:
try:
changedPQ = int(changedPQ)
except ValueError:
print("\n====Please enter a valid Quantity====\n")
else:
toBeChanged["quantity"] = changedPQ
break
changedSID = str(input(f"New Salesperson ID (Leave blank if you do not wish to change it.)=> "))
if changedSID != "":
toBeChanged["salesperson_id"] = changedSID
print(f"\n====Index {index} updated====\n")
def remove_record(self, index):
try:
del self.menu_dict[index]
except KeyError:
print("\n====Please choose a valid index====\n")
else:
print(f"\n====Index {index} removed====\n")
def display_records(self, index=None):
# if index is not None or index is not list:
# raise KeyError(f"Index type should either be None or List. {type(index)} type detected.")
try:
keyList = [*self.menu_dict[1]._record_dict]
except KeyError:
print("\n====There are no records found====\n")
else:
print('\n')
print("{:>5} {:>12} {:>12} {:>12} {:>12} {:>18}".format(*self.menu_dict[1]._record_dict))
print('---------------------------------------------------------------------------------')
# Show all records
if index is None:
for record in Menu.menu_dict:
r = self.menu_dict[record]._record_dict
print("{:>5} {:>12} {:>12} {:>12} {:>12} {:>18}".format(r[keyList[0]], r[keyList[1]], r[keyList[2]],
r[keyList[3]], r[keyList[4]],
r[keyList[5]]))
# Show certain records
else:
for record in index:
r = self.menu_dict[record]._record_dict
print("{:>5} {:>12} {:>12} {:>12} {:>12} {:>18}".format(r[keyList[0]], r[keyList[1]], r[keyList[2]],
r[keyList[3]], r[keyList[4]],
r[keyList[5]]))
print('---------------------------------------------------------------------------------')
print('\n')
def __str__(self):
return "\n 1 -Add Record\n 2 -Update Selected Record\n 3 -Remove Record\n 4 -Display All Records\n 5 -Sort " \
"Record\n 6 -Search Record\n 7 -Exit Application"
# Increase index by 1
def index_increase(lastNum):
if type(lastNum) != int:
raise TypeError(f'Type lastNum has to be a int. Type {type(lastNum)} detected.')
return int(lastNum + 1)
|
<gh_stars>0
# Copyright (c) 2016, 2017, 2018, 2019 <NAME>.
#
# clgen is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# clgen is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with clgen. If not, see <https://www.gnu.org/licenses/>.
"""Unit tests for //deeplearning/clgen/cli.py."""
import os
import pathlib
import tempfile
import pytest
from deeplearning.clgen import clgen
from deeplearning.clgen import errors
from deeplearning.clgen.proto import clgen_pb2
from labm8.py import app
from labm8.py import pbutil
from labm8.py import test
FLAGS = app.FLAGS
# Instance tests.
def test_Instance_no_working_dir_field(abc_instance_config):
"""Test that working_dir is None when no working_dir field in config."""
abc_instance_config.ClearField("working_dir")
instance = clgen.Instance(abc_instance_config)
assert instance.working_dir is None
def test_Instance_working_dir_shell_variable_expansion(abc_instance_config):
"""Test that shell variables are expanded in working_dir."""
working_dir = abc_instance_config.working_dir
os.environ["FOO"] = working_dir
abc_instance_config.working_dir = "$FOO/"
instance = clgen.Instance(abc_instance_config)
assert str(instance.working_dir) == working_dir
def test_Instance_no_model_field(abc_instance_config):
"""Test that UserError is raised when no model field in config."""
abc_instance_config.ClearField("model_specification")
with test.Raises(errors.UserError) as e_info:
clgen.Instance(abc_instance_config)
assert "Field not set: 'Instance.model_specification'" == str(e_info.value)
def test_Instance_no_sampler_field(abc_instance_config):
"""Test that UserError is raised when no model field in config."""
abc_instance_config.ClearField("model_specification")
with test.Raises(errors.UserError) as e_info:
clgen.Instance(abc_instance_config)
assert "Field not set: 'Instance.model_specification'" == str(e_info.value)
def test_Instance_Session_clgen_dir(abc_instance_config):
"""Test that $CLEN_CACHE is set to working_dir inside a session."""
instance = clgen.Instance(abc_instance_config)
with instance.Session():
assert os.environ["CLGEN_CACHE"] == abc_instance_config.working_dir
def test_Instance_Session_no_working_dir(
abc_instance_config, tempdir2: pathlib.Path
):
"""Test that $CLEN_CACHE is not modified config doesn't set working_dir."""
abc_instance_config.ClearField("working_dir")
os.environ["CLGEN_CACHE"] = str(tempdir2)
instance = clgen.Instance(abc_instance_config)
with instance.Session():
assert os.environ["CLGEN_CACHE"] == str(tempdir2)
def test_Instance_Session_yield_value(abc_instance_config):
"""Test that Session() yields the instance."""
instance = clgen.Instance(abc_instance_config)
with instance.Session() as s:
assert instance == s
def test_Instance_ToProto_equality(abc_instance_config):
"""Test that ToProto() returns the same as the input config."""
instance = clgen.Instance(abc_instance_config)
assert abc_instance_config == instance.ToProto()
# RunWithErrorHandling() tests.
def test_RunWithErrorHandling_return_value(clgen_cache_dir):
"""Test that RunWithErrorHandling() returns correct value for function."""
del clgen_cache_dir
assert clgen.RunWithErrorHandling(lambda a, b: a // b, 4, 2) == 2
def test_RunWithErrorHandling_system_exit(clgen_cache_dir):
"""Test that SystemExit is raised on exception."""
del clgen_cache_dir
with test.Raises(SystemExit):
clgen.RunWithErrorHandling(lambda a, b: a // b, 1, 0)
def test_RunWithErrorHandling_exception_debug(clgen_cache_dir):
"""Test that FLAGS.debug disables exception catching."""
del clgen_cache_dir
app.FLAGS(["argv[0]", "--clgen_debug"])
with test.Raises(ZeroDivisionError):
clgen.RunWithErrorHandling(lambda a, b: a // b, 1, 0)
# main tests.
def test_main_unrecognized_arguments():
"""Test that UsageError is raised if arguments are not recognized."""
with test.Raises(app.UsageError) as e_info:
clgen.main(["argv[0]", "--foo", "--bar"])
assert "Unrecognized command line options: '--foo --bar'" == str(e_info.value)
def test_main_no_config_flag():
"""Test that UsageError is raised if --config flag not set."""
with test.Raises(app.UsageError) as e_info:
clgen.main(["argv[0]"])
assert "CLgen --config file not found: '/clgen/config.pbtxt'" == str(
e_info.value
)
def test_main_config_file_not_found():
"""Test that UsageError is raised if --config flag not found."""
with tempfile.TemporaryDirectory() as d:
app.FLAGS.unparse_flags()
app.FLAGS(["argv[0]", "--config", f"{d}/config.pbtxt"])
with test.Raises(app.UsageError) as e_info:
clgen.main(["argv[0]"])
assert f"CLgen --config file not found: '{d}/config.pbtxt'" == str(
e_info.value
)
def test_main_print_cache_path_corpus(abc_instance_file, capsys):
"""Test that --print_cache_path=corpus prints directory path."""
app.FLAGS.unparse_flags()
app.FLAGS(
["argv[0]", "--config", abc_instance_file, "--print_cache_path=corpus"]
)
clgen.main([])
out, err = capsys.readouterr()
assert "/corpus/" in out
assert pathlib.Path(out.strip()).is_dir()
def test_main_print_cache_path_model(abc_instance_file, capsys):
"""Test that --print_cache_path=model prints directory path."""
app.FLAGS.unparse_flags()
app.FLAGS(
["argv[0]", "--config", abc_instance_file, "--print_cache_path=model"]
)
clgen.main([])
out, err = capsys.readouterr()
assert "/model/" in out
assert pathlib.Path(out.strip()).is_dir()
def test_main_print_cache_path_sampler(abc_instance_file, capsys):
"""Test that --print_cache_path=sampler prints directory path."""
app.FLAGS.unparse_flags()
app.FLAGS(
["argv[0]", "--config", abc_instance_file, "--print_cache_path=sampler"]
)
clgen.main([])
out, err = capsys.readouterr()
assert "/samples/" in out
# A sampler's cache isn't created until Sample() is called.
assert not pathlib.Path(out.strip()).is_dir()
def test_main_print_cache_invalid_argument(abc_instance_file):
"""Test that UsageError raised if --print_cache_path arg not valid."""
app.FLAGS.unparse_flags()
app.FLAGS(
["argv[0]", "--config", abc_instance_file, "--print_cache_path=foo"]
)
with test.Raises(app.UsageError) as e_info:
clgen.main([])
assert "Invalid --print_cache_path argument: 'foo'" == str(e_info.value)
def test_main_min_samples(abc_instance_file):
"""Test that min_samples samples are produced."""
app.FLAGS.unparse_flags()
app.FLAGS(["argv[0]", "--config", abc_instance_file, "--min_samples", "1"])
clgen.main([])
def test_main_stop_after_corpus(abc_instance_file):
"""Test that --stop_after corpus prevents model training."""
app.FLAGS.unparse_flags()
app.FLAGS(
["argv[0]", "--config", abc_instance_file, "--stop_after", "corpus"]
)
clgen.main([])
instance = clgen.Instance(
pbutil.FromFile(pathlib.Path(abc_instance_file), clgen_pb2.Instance())
)
assert not instance.model.is_trained
def test_main_stop_after_train(abc_instance_file):
"""Test that --stop_after train trains the model."""
app.FLAGS.unparse_flags()
app.FLAGS(["argv[0]", "--config", abc_instance_file, "--stop_after", "train"])
clgen.main([])
instance = clgen.Instance(
pbutil.FromFile(pathlib.Path(abc_instance_file), clgen_pb2.Instance())
)
assert instance.model.is_trained
def test_main_stop_after_uncrecognized(abc_instance_file):
"""Test that --stop_after raises an error on unknown."""
app.FLAGS.unparse_flags()
app.FLAGS(["argv[0]", "--config", abc_instance_file, "--stop_after", "foo"])
with test.Raises(app.UsageError):
clgen.main([])
if __name__ == "__main__":
test.Main()
|
<reponame>dwlcreat/dsf
'''
Function:
乒乓球小游戏-主函数
Author:
Charles
微信公众号:
Charles的皮卡丘
'''
import sys
import config
import pygame
from sprites import *
'''定义按钮'''
def Button(screen, position, text, button_size=(200, 50)):
left, top = position
bwidth, bheight = button_size
pygame.draw.line(screen, (150, 150, 150), (left, top), (left+bwidth, top), 5)
pygame.draw.line(screen, (150, 150, 150), (left, top-2), (left, top+bheight), 5)
pygame.draw.line(screen, (50, 50, 50), (left, top+bheight), (left+bwidth, top+bheight), 5)
pygame.draw.line(screen, (50, 50, 50), (left+bwidth, top+bheight), (left+bwidth, top), 5)
pygame.draw.rect(screen, (100, 100, 100), (left, top, bwidth, bheight))
font = pygame.font.Font(config.FONTPATH, 30)
text_render = font.render(text, 1, (255, 235, 205))
return screen.blit(text_render, (left+50, top+10))
'''
Function:
开始界面
Input:
--screen: 游戏界面
Return:
--game_mode: 1(单人模式)/2(双人模式)
'''
def startInterface(screen):
clock = pygame.time.Clock()
while True:
screen.fill((41, 36, 33))
button_1 = Button(screen, (150, 175), '1 Player')
button_2 = Button(screen, (150, 275), '2 Player')
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.MOUSEBUTTONDOWN:
if button_1.collidepoint(pygame.mouse.get_pos()):
return 1
elif button_2.collidepoint(pygame.mouse.get_pos()):
return 2
clock.tick(10)
pygame.display.update()
'''结束界面'''
def endInterface(screen, score_left, score_right):
clock = pygame.time.Clock()
font1 = pygame.font.Font(config.FONTPATH, 30)
font2 = pygame.font.Font(config.FONTPATH, 20)
msg = 'Player on left won!' if score_left > score_right else 'Player on right won!'
texts = [font1.render(msg, True, config.WHITE),
font2.render('Press ESCAPE to quit.', True, config.WHITE),
font2.render('Press ENTER to continue or play again.', True, config.WHITE)]
positions = [[120, 200], [155, 270], [80, 300]]
while True:
screen.fill((41, 36, 33))
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RETURN:
return
elif event.key == pygame.K_ESCAPE:
sys.exit()
pygame.quit()
for text, pos in zip(texts, positions):
screen.blit(text, pos)
clock.tick(10)
pygame.display.update()
'''运行游戏Demo'''
def runDemo(screen):
# 加载游戏素材
hit_sound = pygame.mixer.Sound(config.HITSOUNDPATH)
goal_sound = pygame.mixer.Sound(config.GOALSOUNDPATH)
pygame.mixer.music.load(config.BGMPATH)
pygame.mixer.music.play(-1, 0.0)
font = pygame.font.Font(config.FONTPATH, 50)
# 开始界面
game_mode = startInterface(screen)
# 游戏主循环
# --左边球拍(ws控制, 仅双人模式时可控制)
score_left = 0
racket_left = Racket(config.RACKETPICPATH, 'LEFT')
# --右边球拍(↑↓控制)
score_right = 0
racket_right = Racket(config.RACKETPICPATH, 'RIGHT')
# --球
ball = Ball(config.BALLPICPATH)
clock = pygame.time.Clock()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit(-1)
screen.fill((41, 36, 33))
# 玩家操作
pressed_keys = pygame.key.get_pressed()
if pressed_keys[pygame.K_UP]:
racket_right.move('UP')
elif pressed_keys[pygame.K_DOWN]:
racket_right.move('DOWN')
if game_mode == 2:
if pressed_keys[pygame.K_w]:
racket_left.move('UP')
elif pressed_keys[pygame.K_s]:
racket_left.move('DOWN')
else:
racket_left.automove(ball)
# 球运动
scores = ball.move(ball, racket_left, racket_right, hit_sound, goal_sound)
score_left += scores[0]
score_right += scores[1]
# 显示
# --分隔线
pygame.draw.rect(screen, config.WHITE, (247, 0, 6, 500))
# --球
ball.draw(screen)
# --拍
racket_left.draw(screen)
racket_right.draw(screen)
# --得分
screen.blit(font.render(str(score_left), False, config.WHITE), (150, 10))
screen.blit(font.render(str(score_right), False, config.WHITE), (300, 10))
if score_left == 11 or score_right == 11:
return score_left, score_right
clock.tick(100)
pygame.display.update()
'''主函数'''
def main():
# 初始化
pygame.init()
pygame.mixer.init()
screen = pygame.display.set_mode((config.WIDTH, config.HEIGHT))
pygame.display.set_caption('pingpong - 微信公众号: Charles的皮卡丘')
# 开始游戏
while True:
score_left, score_right = runDemo(screen)
endInterface(screen, score_left, score_right)
'''run'''
if __name__ == '__main__':
main() |
<gh_stars>1-10
#!usr/bin/env/python3
# -*- coding: utf-8 -*-
# 1. Test with no Minitrino directory
# 2. Test with no Minitrino config file (ensure template is created)
# 3. Test reset w/ existing config dir and file (ensure template is created)
# 4. Test editing an invalid config file
import os
import subprocess
import minitrino.test.helpers as helpers
from inspect import currentframe
from types import FrameType
from typing import cast
# Using the Click CliRunner does not appear to play well with commands that
# require user input. That is being researched.
def main():
helpers.log_status(__file__)
# test_no_directory()
test_reset_with_directory()
# test_edit_invalid_config()
# test_edit_valid_config()
def test_no_directory():
"""Verifies that a configuration directory and config file are created with
config --reset."""
helpers.log_status(cast(FrameType, currentframe()).f_code.co_name)
subprocess.call(f"rm -rf {helpers.MINITRINO_USER_DIR}", shell=True)
return_code = subprocess.call("minitrino config", shell=True)
assert return_code == 0
assert os.path.isdir(helpers.MINITRINO_USER_DIR)
assert os.path.isfile(helpers.CONFIG_FILE)
helpers.log_success(cast(FrameType, currentframe()).f_code.co_name)
def test_reset_with_directory():
"""Verifies that the configuration directory is only removed and restored
with the user's approval. This is a valid test case for both 'yes' and 'no'
responses."""
import time
helpers.log_status(cast(FrameType, currentframe()).f_code.co_name)
subprocess.call(
f"mkdir {helpers.MINITRINO_USER_DIR}", shell=True, stdout=subprocess.DEVNULL
)
start_time = time.time()
end_time = 2.0
output = ""
while time.time() - start_time <= end_time:
process = subprocess.Popen(
"minitrino -v config --reset",
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
)
while True:
output_line = process.stdout.readline()
if output_line == "":
break
output, _ = process.communicate() # Get full output (stdout + stderr)
if time.time() >= end_time:
process.terminate()
break
process = subprocess.Popen(
"minitrino -v config --reset",
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True,
universal_newlines=True,
)
output = process.communicate(input="y\n", timeout=1)[0]
process.terminate()
assert process.returncode == 0
assert all(
(
"Configuration directory exists" in output,
"Created Minitrino configuration directory" in output,
"Opening existing config file at path" in output,
)
)
assert os.path.isdir(helpers.MINITRINO_USER_DIR)
assert os.path.isfile(helpers.CONFIG_FILE)
helpers.log_success(cast(FrameType, currentframe()).f_code.co_name)
def test_edit_invalid_config():
"""Verifies that an error is not thrown if the config is 'invalid' (such as
a missing section or value required to perform an action). This is because
there is default behavior built in, and all major functions should still
work without a valid configuration file."""
helpers.log_status(cast(FrameType, currentframe()).f_code.co_name)
subprocess.call(f"rm -rf {helpers.MINITRINO_USER_DIR}", shell=True)
subprocess.call(f"mkdir {helpers.MINITRINO_USER_DIR}", shell=True)
subprocess.call(f"touch {helpers.CONFIG_FILE}", shell=True)
return_code = subprocess.call(f"minitrino config", shell=True)
assert return_code == 0
helpers.log_success(cast(FrameType, currentframe()).f_code.co_name)
def test_edit_valid_config():
"""Verifies that the user can edit an existing configuration file."""
helpers.log_status(cast(FrameType, currentframe()).f_code.co_name)
subprocess.call(f"rm -rf {helpers.MINITRINO_USER_DIR}", shell=True)
subprocess.call(f"mkdir {helpers.MINITRINO_USER_DIR}", shell=True)
helpers.make_sample_config()
return_code = subprocess.call(f"minitrino config", shell=True)
assert return_code == 0
helpers.log_success(cast(FrameType, currentframe()).f_code.co_name)
if __name__ == "__main__":
main()
|
<filename>tripleoclient/tests/v1/overcloud_deploy/fakes.py<gh_stars>0
# Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import mock
from tripleoclient.tests import fakes
FAKE_STACK = {
'parameters': {
'ControllerCount': 1,
'ComputeCount': 1,
'ObjectStorageCount': 0,
'BlockStorageCount': 0,
'CephStorageCount': 0,
'DeployIdentifier': '',
},
'stack_name': 'overcloud',
'stack_status': "CREATE_COMPLETE",
'outputs': [{
'output_key': 'KeystoneURL',
'output_value': 'http://0.0.0.0:8000',
}, {
'output_key': 'EndpointMap',
'output_value': {
'KeystoneAdmin': {
'host': '0.0.0.0',
'uri': 'http://0.0.0.0:35357',
'port': 35357,
},
'KeystoneInternal': {
'host': '0.0.0.0',
'uri': 'http://0.0.0.0:5000',
'port': 5000,
},
'KeystonePublic': {
'host': '0.0.0.0',
'uri': 'http://0.0.0.0:5000',
'port': 5000,
},
'NovaAdmin': {
'host': '0.0.0.0',
'uri': 'http://0.0.0.0:5000',
'port': 8774,
},
'NovaInternal': {
'host': '0.0.0.0',
'uri': 'http://0.0.0.0:5000',
'port': 8774,
},
'NovaPublic': {
'host': '0.0.0.0',
'uri': 'https://0.0.0.0:8774',
'port': 8774,
},
}
}]
}
def create_to_dict_mock(**kwargs):
mock_with_to_dict = mock.Mock()
mock_with_to_dict.configure_mock(**kwargs)
mock_with_to_dict.to_dict.return_value = kwargs
return mock_with_to_dict
def create_tht_stack(**kwargs):
stack = FAKE_STACK.copy()
stack.update(kwargs)
return create_to_dict_mock(**stack)
def create_env_with_ntp(**kwargs):
env = {
'parameter_defaults': {
'CinderEnableRbdBackend': True,
'NtpServer': 'ntp.local',
},
}
env.update(kwargs)
return env
def create_env(**kwargs):
env = {
'parameter_defaults': {
'CinderEnableRbdBackend': True,
},
}
env.update(kwargs)
return env
class TestDeployOvercloud(fakes.FakePlaybookExecution):
def setUp(self):
super(TestDeployOvercloud, self).setUp(ansible_mock=False)
class FakeNeutronNetwork(dict):
def __init__(self, **attrs):
NETWORK_ATTRS = ['id',
'name',
'status',
'tenant_id',
'is_admin_state_up',
'mtu',
'segments',
'is_shared',
'subnet_ids',
'provider:network_type',
'provider:physical_network',
'provider:segmentation_id',
'router:external',
'availability_zones',
'availability_zone_hints',
'is_default',
'tags']
raw = dict.fromkeys(NETWORK_ATTRS)
raw.update(attrs)
raw.update({
'provider_physical_network': attrs.get(
'provider:physical_network', None),
'provider_network_type': attrs.get(
'provider:network_type', None),
'provider_segmentation_id': attrs.get(
'provider:segmentation_id', None)
})
super(FakeNeutronNetwork, self).__init__(raw)
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(key)
def __setattr__(self, key, value):
if key in self:
self[key] = value
else:
raise AttributeError(key)
class FakeNeutronSubnet(dict):
def __init__(self, **attrs):
SUBNET_ATTRS = ['id',
'name',
'network_id',
'cidr',
'tenant_id',
'is_dhcp_enabled',
'dns_nameservers',
'allocation_pools',
'host_routes',
'ip_version',
'gateway_ip',
'ipv6_address_mode',
'ipv6_ra_mode',
'subnetpool_id',
'segment_id',
'tags']
raw = dict.fromkeys(SUBNET_ATTRS)
raw.update(attrs)
super(FakeNeutronSubnet, self).__init__(raw)
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(key)
def __setattr__(self, key, value):
if key in self:
self[key] = value
else:
raise AttributeError(key)
|
<gh_stars>0
import collections, datetime, functools, itertools
import json, logging, pathlib, random, re
import unittest
import heapq
from logging import DEBUG, INFO, WARNING, ERROR, FATAL
import sds_ml.tree_search as tree_search
log = logging.getLogger(__name__)
class TestTreeSearch(unittest.TestCase):
def setUp(self):
logging.basicConfig(level=logging.DEBUG)
self.log = logging.getLogger(__file__)
self.rng = random.Random()
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
pass
def test_make_tree(self):
sml_tree = tree_search.build_tree(depth=2, branch_count=2, data_f=lambda node: None)
log.info("sml_tree %s", sml_tree.report())
lrg_tree = tree_search.build_tree(depth=3, branch_count=3, data_f=lambda node: node.parent and (node.parent.data * 2 + node.child_num) or 1)
log.info("lrg_tree %s", lrg_tree.report())
for node in sml_tree.bredth_first():
#log.info(node.name)
pass
log.log(DEBUG, "depth first search from %s", sml_tree.name)
for node in sml_tree.depth_first():
#log.info(node.name)
pass
def test_get_binary_subtrees(self):
tree = tree_search.build_tree(depth=3, branch_count=3)
binary_subtrees = list(tree_search.get_binary_subtrees(tree))
log.info("subtrees of tree: %s", tree.shape_report())
expected_depth_3_branch_count_3_binary_subtrees = 49
self.assertEqual(
sum(1 for x in binary_subtrees),
expected_depth_3_branch_count_3_binary_subtrees,
)
for num, subtree in enumerate(binary_subtrees):
log.info("#%3s: %s", num, ", ".join(node.name for node in subtree))
binary_subtrees = list(tree_search.get_subtrees(tree, split_num=2))
self.assertEqual(
sum(1 for x in binary_subtrees),
expected_depth_3_branch_count_3_binary_subtrees,
)
for num, subtree in enumerate(binary_subtrees):
log.info("#%3s: %s", num, ", ".join(node.name for node in subtree))
def test_get_subtrees(self):
tree = tree_search.build_tree(depth=3, branch_count=4)
expected_depth_3_branch_count_4_split_3_subtrees = 501
subtrees = tree_search.get_subtrees(tree, split_num=3)
for num, subtree in enumerate(subtrees, start=1):
log.info("#%3s: %s", num, ", ".join(node.name for node in subtree))
self.assertEqual(num, expected_depth_3_branch_count_4_split_3_subtrees)
def test_best_subtree(self):
self.rng.seed(1)
def node_num(node):
return self.rng.random()
tree = tree_search.build_tree(depth=3, branch_count=4, data_f=node_num)
for node in tree.bredth_first():
log.info("%s: %.3f", node.name, node.data)
subtree_lists = (list(subtree) for subtree in tree_search.get_subtrees(tree, split_num=3))
subtree_scores = [
(
" ".join(node.name for node in subtree),
sum(node.data for node in subtree)/len(subtree),
)
for subtree
in subtree_lists
]
subtree_scores.sort(key=lambda x:x[1], reverse=True)
log.info("scores")
for name, score in subtree_scores[:20]:
log.info("[%s] %.3f", name, score)
def test_all_best_subtrees(self):
tree_depths = [1,2,3]
branch_counts = [1,2,3]
split_num = [1,2,3]
# tree_depths = itertools.count(start=1)
# branch_counts = itertools.count(start=1)
# split_num = itertools.count(start=1)
def node_num(node):
return self.rng.random()
def subtree_score(leaf_list):
return sum(node.data for node in leaf_list)/len(leaf_list)
subtree_count = 0
def iter_counter(i):
nonlocal subtree_count
subtree_count = 0
for item in i:
subtree_count += 1
yield item
for tree_depth, branch_count, split_num in itertools.product(tree_depths, branch_counts, split_num):
log.info("building tree. tree depth: %s, branch count: %s", tree_depth, branch_count)
tree = tree_search.build_tree(depth=tree_depth, branch_count=branch_count, data_f=node_num)
log.info("getting subtrees of split: %s", split_num)
subtree_gen = iter_counter(tree_search.get_subtrees(tree, split_num=split_num))
best_tree = max(subtree_gen, key=subtree_score)
log.info("Evaluated %s subtrees", subtree_count)
log.info("best tree has score %0.3f. [%s]\n---", subtree_score(best_tree), " ".join(node.name for node in best_tree))
def test_foo(self):
six = 6
expected = 6
self.assertEqual(
six,
expected,
"Expected %s to equal %s" % (six, expected)
)
|
<filename>src/WinEoP/api_scanner.py
#-------------------------------------------------------------------------------
# Name: api_scanner.py
# Purpose: auto generate code for WinEoP
# Author: quangnh89
# Created: 2015
#-------------------------------------------------------------------------------
import re
file_name = r'Utils\ApiStub.h'
# convert a string to encoded-buffer
def buildstrElement(s, encoded = True):
a = 'unsigned char str%s[%d];' % ((s.replace('.', '_')), len(s) + 1)
b = '{ '
for c in s:
if encoded:
b += "'%c' ^ XOR_KEY, " % (c)
else:
b += "'%c', " % (c)
if encoded:
b += 'XOR_KEY},'
else:
b += "'\0'},"
return (a, b)
# write code between '// -- [Auto generated] END --' and '// -- [Auto generated] BEGIN --' tag
def WriteCodeToFile(src_file, data):
start = False
end = False
write_pos = 0
remain_line = []
src_f = open(src_file, 'r+t')
while True:
line = src_f.readline()
if line == '':
break
# start write from here
if not start and line.find('// -- [Auto generated] BEGIN --') != -1:
start = True
if write_pos == 0: write_pos = src_f.tell()
continue
# end
if not end and line.find('// -- [Auto generated] END --') != -1:
end = True
if not start:
continue
if end:
remain_line.append(line)
if write_pos > 0:
src_f.truncate(write_pos)
src_f.seek(0, 2)
for i in data:
src_f.writelines(i + '\n')
for i in remain_line:
src_f.writelines(i)
src_f.close()
# some types should be modified due to conflict
custom_function_type= {'SOCKET': 'FP_SOCKET'}
def main():
f = open(file_name, 'rt')
begin = False
leave = False
la = []
lb = []
lc = []
while True:
line = f.readline() # read each line
if line == '':
break
if line.find('{') != -1:
begin = True
continue
if line.find('}') != -1:
break
if line.find('//') != -1 and line.find('#') != -1: # comment
continue
if line.find('**leave**') != -1: # **leave**
leave = True
continue
if not begin:
continue
if leave:
continue
if line.find('#if') != -1 or line.find('#elif') != -1 or line.find('#endif') != -1 or line.find('#else') != -1: # #if, #else, #elif, #endif
if line[len(line) -1] == '\n':
line = line[:-1]
lc.append(line)
continue
# search for library.dll
if line.find('//') != -1:
searchObj = re.search('[A-Za-z0-9_]*(\.[A-Za-z0-9]{3})?\n', line, re.M|re.I)
if (searchObj):
dll_name = searchObj.group(0)
dll_name = dll_name[:-1]
if dll_name == 'kernel32.dll':
continue
a, b = buildstrElement(dll_name)
c = 'LOAD_ENCODED_LIB(%s);' % dll_name.replace('.', '_')
la.append(a)
lb.append(b)
lc.append(c)
continue
# processing function name
searchObj = re.search('\S*;', line, re.M|re.I)
if searchObj:
found_name = searchObj.group(0)
matchObj = re.search('[A-Za-z0-9]*', found_name, re.M|re.I)
if matchObj:
function_name =matchObj.group(0)
a, b = buildstrElement(function_name)
function_type = function_name.upper()
if function_type in custom_function_type:
function_type = custom_function_type[function_type]
c = 'GET_ENCODED_FUNCTION(hModule, %s, %s);' % (function_name, function_type)
la.append(a)
lb.append(b)
lc.append(c)
f.close() # close apistub.h
WriteCodeToFile('StringTable.h', la)
WriteCodeToFile('StringTable.cpp', lb)
WriteCodeToFile(r'Utils\ApiStub.cpp', lc)
if __name__ == '__main__':
main()
|
<reponame>softwarefactory-project/rdopkg
# -*- encoding: utf-8 -*-
from __future__ import print_function
import json
import os
from six.moves import input
from rdopkg import action as _action
from rdopkg import actions
from rdopkg import const
from rdopkg import exception
from rdopkg import helpers
from rdopkg.utils import log
def default_action_manager():
action_manager = _action.ActionManager()
action_manager.add_actions_module(actions, 'base')
return action_manager
class ActionRunner(object):
def __init__(self, action_manager=None, state_file_path=None):
if not action_manager:
action_manager = default_action_manager()
self.action_manager = action_manager
self.action = []
self.args = {}
self.state_file_path = state_file_path or const.STATE_FILE_FN
def save_state(self):
if not (self.action and self.action[0].continuable):
return
data = {
'action': self.action_manager.action_serialize(self.action),
'args': self.args
}
sf = open(self.state_file_path, 'wt')
json.dump(data, sf)
sf.close()
def get_state(self):
if not os.path.isfile(self.state_file_path):
return None
with open(self.state_file_path, 'rt') as f:
return json.load(f)
def load_state(self):
self.action = []
self.args = {}
data = self.get_state()
if data is None:
return
action = self.action_manager.action_deserialize(data['action'])
self.args = data['args']
self.action = action
def load_state_safe(self):
try:
self.load_state()
except Exception as ex:
print("Error loading state file '%s':\n %s" %
(self.state_file_path, ex))
cnf = input("Do you want to delete this (likely corrupt) "
"state file? [Yn] ")
if cnf == '' or cnf.lower() == 'y':
os.remove(self.state_file_path)
print("State file removed.")
else:
raise
def clear_state(self, state_file=True, verbose=False):
self.action = []
self.args = {}
if state_file and os.path.isfile(self.state_file_path):
os.remove(self.state_file_path)
if verbose:
print("State file removed.")
def _new_action_check(self, new_action):
if not new_action.continuable:
# only actions that save state care about state
return
state = self.get_state()
if state:
action_name = state['action'][0]
print(log.term.important(
"You're in the middle of previous action: %s\n" %
action_name))
print((
" a) View status of previous action:\n"
" {t.cmd}rdopkg status{t.normal}\n\n"
" b) Continue the previous action:\n"
" {t.cmd}rdopkg --continue{t.normal} / "
"{t.cmd}rdopkg -c{t.normal}\n\n"
" a) Abort the previous action:\n"
" {t.cmd}rdopkg --abort{t.normal}"
).format(t=log.term))
raise exception.ActionInProgress(action=action_name)
def new_action(self, action, args=None):
new_action = None
new_args = {}
if isinstance(action, _action.Action):
new_action = action
else:
for a in self.action_manager.actions:
if a.name == action:
new_action = a
break
if not new_action:
raise exception.InvalidAction(action=action)
if new_action.alias:
for a in self.action_manager.actions:
if a.name == new_action.alias:
# const_args of alias action are passed as args
new_args = new_action.const_args or {}
new_action = a
break
self._new_action_check(new_action)
self.action = [new_action]
if args:
new_args.update(args)
self.args = new_args
if not new_action.continuable and new_action.steps:
self.save_state()
def print_progress(self):
if not self.action:
return
def _print_steps(steps, current, indent=1, done=True):
if current:
_current = current[0]
else:
_current = []
found_current = False
for step in steps:
next_current = []
if done and step == _current:
next_current = current[1:]
found_current = True
schar = '*'
elif done:
schar = 'x'
else:
schar = ' '
print("%s[%s] %s" % (indent * " ", schar, step.name))
if step.steps:
_print_steps(step.steps, next_current,
indent=indent + 1, done=done)
if found_current:
done = False
_print_steps([self.action[0]], self.action)
def status(self):
if self.action:
print(
"{t.bold}Action in progress: {t.green}{a}{t.normal}\n".format(
t=log.term, a=self.action[0].name))
if self.args:
print(log.term.bold("Arguments:"))
for key in sorted(self.args, key=self.args.get):
print(" %s: %s" % (key, self.args[key]))
print(log.term.bold("\nProgress:"))
self.print_progress()
else:
print(log.term.bold("No arguments.\n"))
else:
print(log.term.bold("No action in progress."))
def check_actions(self):
fails = []
def _check_action(a, indent=''):
s = "%s%s: %s" % (indent, a.module, a.name)
if a.steps:
print(s)
for s in a.steps:
_check_action(s, indent + ' ')
else:
action_fun = self.action_manager._get_action_fun(a)
if not action_fun:
s += ' {t.red_bold}NOT AVAILABLE{t.normal}'.format(
t=log.term)
fails.append(a)
print(s)
for a in self.action_manager.actions:
_check_action(a)
return fails
def engage(self):
if not self.action:
raise exception.NoActionInProgress
continuable = self.action[0].continuable
def _save_state():
if continuable:
self.save_state()
self.action_manager.ensure_leaf_action(self.action,
on_change_callback=_save_state)
abort = False
while self.action:
new_args = None
select_next = True
step = self.action[-1]
try:
new_args = self.action_manager.run_action(step, self.args)
except exception.ActionRequired as ex:
helpers.action_required(str(ex))
new_args = ex.kwargs.get('args', None)
select_next = not ex.kwargs.get('rerun', False)
abort = True
except exception.ActionFinished as ex:
self.clear_state(state_file=continuable)
print(ex)
return
except exception.ActionGoto as ex:
# Here be dragons.
select_next = False
new_args = ex.kwargs.get('args', None)
goto = [self.action[0].name] + ex.kwargs['goto']
self.action = self.action_manager.action_deserialize(goto)
if new_args:
self.args.update(new_args)
if select_next:
self.action = self.action_manager.next_action(self.action)
if not self.action:
if continuable:
print("Action finished.")
self.clear_state(state_file=continuable)
return
if continuable:
self.save_state()
if abort:
return
|
from functools import partial
import numpy as np
from scipy.interpolate import BSpline
import torch
from itertools import product
class AbstractBasis(object):
def __init__(self):
self.basis_functions = []
self.is_setup = False
def get_basis_functions(self):
if not self.is_setup:
raise RuntimeError("need to build basis functions first")
else:
return self.basis_functions
def basis_expansion_np(self, z):
basis_functions = self.get_basis_functions()
outputs = np.stack([b(z) for b in basis_functions], axis=1)
return outputs
def basis_expansion_torch(self, z):
z = z.cpu().numpy()
outputs = self.basis_expansion_np(z)
return torch.from_numpy(outputs).float()
def setup(self, z_train):
self._build_basis_functions(z_train)
self.is_setup = True
# print(len(self.basis_functions))
def _build_basis_functions(self, z_train):
NotImplementedError()
class MultivariateBasis(AbstractBasis):
"""
valid for multivariate Z
"""
def __init__(self, basis_class_list, basis_args_list):
self.basis_list = []
for basis_class, basis_args in zip(basis_class_list, basis_args_list):
self.basis_list.append(basis_class(**basis_args))
AbstractBasis.__init__(self)
@staticmethod
def product_basis_func(z, basis_func_tuple):
assert z.shape[1] == len(basis_func_tuple)
outputs = np.stack([b(z[:, i].reshape(-1, 1))
for i, b in enumerate(basis_func_tuple)], axis=0)
return outputs.prod(0)
def _build_basis_functions(self, z_train):
basis_func_list = []
for b in self.basis_list:
b.setup(z_train)
basis_func_list.append(b.get_basis_functions())
for basis_func_tuple in product(*basis_func_list):
basis_func = partial(self.product_basis_func,
basis_func_tuple=basis_func_tuple)
self.basis_functions.append(basis_func)
class CartesianProductBasis(AbstractBasis):
"""
takes sequence of Basis methods for each output dimension, and constructs
cartesian product basis, for multi-output bases
"""
def __init__(self, basis_class_list, basis_args_list):
self.basis_list = []
for basis_class, basis_args in zip(basis_class_list, basis_args_list):
self.basis_list.append(basis_class(**basis_args))
AbstractBasis.__init__(self)
@staticmethod
def cartesian_product_basis_func(z, basis_func_tuple):
return np.concatenate([b(z) for b in basis_func_tuple], axis=1)
def _build_basis_functions(self, z_train):
basis_func_list = []
for b in self.basis_list:
b.setup(z_train)
basis_func_list.append(b.get_basis_functions())
for basis_func_tuple in product(*basis_func_list):
basis_func = partial(self.cartesian_product_basis_func,
basis_func_tuple=basis_func_tuple)
self.basis_functions.append(basis_func)
class PolynomialSplineBasis(AbstractBasis):
"""
only valid for univariate Z, and single output
"""
def __init__(self, knots, degree):
self.degree = degree
self.knots = knots
AbstractBasis.__init__(self)
@staticmethod
def polynomial_spline_basis_func(z, basis_element):
output = basis_element(z.flatten()).reshape(-1, 1)
np.nan_to_num(output, copy=False, nan=0.0)
return output
def _build_basis_functions(self, z_train):
for d in range(self.degree + 1):
for i in range(len(self.knots) - d - 1):
t = self.knots[i:i + d + 2]
basis_element = BSpline.basis_element(t, extrapolate=False)
basis_func = partial(self.polynomial_spline_basis_func,
basis_element=basis_element)
self.basis_functions.append(basis_func)
class CardinalPolynomialSplineBasis(PolynomialSplineBasis):
"""
for cardinal basis, meaning knots are distinct and evenly spaced
"""
def __init__(self, num_knots, degree, eps=1e-5):
self.num_knots = num_knots
self.degree = degree
self.eps = eps
PolynomialSplineBasis.__init__(self, [], degree)
def _build_basis_functions(self, z_train):
start = float(np.min(z_train)) - self.eps
end = float(np.max(z_train)) + self.eps
self.knots = list(np.linspace(start=start, stop=end,
num=self.num_knots))
PolynomialSplineBasis._build_basis_functions(self, z_train)
class MultivariatePolynomialSplineBasis(MultivariateBasis):
def __init__(self, num_knots, degree, z_dim, eps=1e-5):
basis_class_list = [CardinalPolynomialSplineBasis for _ in range(z_dim)]
basis_args_list = [{"num_knots": num_knots, "degree": degree,
"eps": eps} for _ in range(z_dim)]
MultivariateBasis.__init__(self, basis_class_list=basis_class_list,
basis_args_list=basis_args_list)
class MultiOutputPolynomialSplineBasis(CartesianProductBasis):
def __init__(self, num_knots, degree, z_dim, num_out, eps=1e-5):
basis_class_list = [MultivariatePolynomialSplineBasis
for _ in range(num_out)]
basis_args_list = [{"num_knots": num_knots, "degree": degree,
"z_dim": z_dim, "eps": eps} for _ in range(num_out)]
CartesianProductBasis.__init__(self, basis_class_list=basis_class_list,
basis_args_list=basis_args_list)
def debug():
knots = [0.0, 0.25, 0.50, 0.75, 1.0]
# test_points = np.array([-0.1, 0.0, 0.1, 0.2, 0.3, 0.4, 0.5,
# 0.6, 0.7, 0.8, 0.9, 1.0, 1.1]).reshape(-1, 1)
degree = 2
# basis = CardinalPolynomialSplineBasis(start=0, end=1,
# num_knots=5, degree=2)
# class_list = [CardinalPolynomialSplineBasis, CardinalPolynomialSplineBasis]
# args_list = [{"start": 0.0, "end": 1.0, "num_knots": 5, "degree": 2},
# {"start": 0.0, "end": 1.0, "num_knots": 4, "degree": 1}]
# basis = CartesianProductBasis(class_list, args_list)
# basis_expansion = basis.basis_expansion_torch(torch.from_numpy(test_points).float())
# # print(basis_expansion[:, :10, 0])
# print(basis_expansion.shape)
test_points = np.random.randn(10, 2)
basis = MultivariatePolynomialSplineBasis(num_knots=6, degree=2, z_dim=2)
basis.setup(test_points)
basis_expansion = basis.basis_expansion_torch(torch.from_numpy(test_points).float())
print(basis_expansion.shape)
# for t in range(len(test_points)):
# print(basis_expansion[t, :, 0])
exp_0 = basis_expansion[0, :, 0]
idx = [i for i in range(len(exp_0)) if exp_0[i] != 0]
# print(exp_0[idx])
# print(len(idx))
if __name__ == "__main__":
debug()
|
'''
stemdiff.radial
---------------
Convert a 2D powder diffraction pattern
to a 1D radially averaged distribution profile.
'''
import numpy as np
import matplotlib.pyplot as plt
from skimage import measure
def calc_radial_distribution(arr):
"""
Calculate 1D-radially averaged distrubution profile
from 2D-PNBD diffraction pattern.
Parameters
----------
arr : 2D-numpy array
The numpy array which contains the 2D-PNBD pattern.
Returns
-------
radial_distance, intensity : 1D numpy arrays
* radial_distance = distances from the center of 2D-PNBD [pixels]
* intensity = intensities at given distances [arbitrary units]
Note
----
The plot of [radial_distance, intensity] = 1D-radial profile
corresponding to the input 2D-PNBD diffraction pattern.
"""
# 1) Find center
# (We employ function from skimage.measure (not from stemdiff.io),
# (because we want float/non-integer values from the whole array.
M = measure.moments(arr,1)
(xc,yc) = (M[1,0]/M[0,0], M[0,1]/M[0,0])
# 2) Get image dimensions
# (the algorithm works even for rectangles, not only squares
(width,height) = arr.shape
# 3) 2D-pole/meshgrid with calculated radial distances
# (trick 1: the array/meshgrid will be employed for mask
# (it has the same size as the original array for rad.distr.calculation
[X,Y] = np.meshgrid(np.arange(width)-yc, np.arange(height)-xc)
R = np.sqrt(np.square(X) + np.square(Y))
# 4) Initialize variables
radial_distance = np.arange(1,np.max(R),1)
intensity = np.zeros(len(radial_distance))
index = 0
bin_size = 2
# 5) Calcualte radial profile
# (Gradual calculation of average intenzity
# (in circles with increasing distance from the center
# (trick 2: to create the circles, we will employ mask from trick 1
for i in radial_distance:
mask = np.greater(R, i - bin_size/2) & np.less(R, i + bin_size/2)
values = arr[mask]
intensity[index] = np.mean(values)
index += 1
# 6) Return the profile
return(radial_distance,intensity)
def save_radial_distribution(arr,filename):
"""
Save 1D-radially averaged distrubution profile,
which is calculated from 2D-PNBD diffraction pattern, as a TXT-file.
Parameters
----------
arr : 2D-numpy array
The numpy array which contains the 2D-PNBD pattern.
filename : str
Name of the output file.
Returns
-------
None.
The output of the function is the saved file.
"""
R,I = calc_radial_distribution(arr)
arr2 = np.array([R,I]).transpose()
np.savetxt(filename, arr2, fmt='%3d %8.1f')
def read_radial_distribution(filename):
"""
Read 1D-radially averaged distrubution profile from a TXT-file.
Parameters
----------
filename : str
Name of the input file;
the file is expected to contain two columns [distance, intensity].
Returns
-------
arr : 2D-numpy array
The array containing two columns [distance, intensity].
"""
arr = np.loadtxt(filename, unpack=True)
return(arr)
def plot_radial_distributions(
radial_distribution_files, xlimit, ylimit, output=None):
"""
Plot several 1D-radial distrubution files in one graph.
Parameters
----------
radial_distribution_files : 2D-list
list with several rows containing [filename, plot-style, name], where:
* filename = name of the TXT-file to plot
* plot-style = matplotlib.pyplot style, such as 'r-' (red line)
* name = name of the data, which will appear in the plot legend
xlimit : int
maximum of the X-axis
ylimit : TYPE
maximum of the Y-axis
output : TYPE, optional, default=None
Name of the output file;
if the output argument is given,
the plot is not only shown on screen, but also saved in [output] file.
Returns
-------
None.
The output is the plot on screen
(and also in [output] file if the output argument is given).
"""
# Read radial distribution files
n = len(radial_distribution_files)
rdist = radial_distribution_files
# Plot radial distribution files
for i in range(n):
R,I = read_radial_distribution(rdist[i][0])
myls = rdist[i][1]
mylabel = rdist[i][2]
plt.plot(R,I, myls, label=mylabel)
# ...adjust plot
plt.xlabel('Radial distance [pixel]')
plt.ylabel('Intensity [grayscale]')
plt.xlim(0,xlimit)
plt.ylim(0,ylimit)
plt.legend()
plt.grid()
# ...save plot as PNG (only if argument [output] was given)
if output: plt.savefig(output, dpi=300)
# ...show plot
plt.show()
|
#!/usr/local/bin/python3.4
import os, sys, logging, json, argparse, time, datetime, requests, uuid
from concurrent.futures import ThreadPoolExecutor
from web3 import Web3
from sdn_mapper import sdn_mapper
from vl_computation import vl_computation
from database import database as db
from config_files import settings
def init_logging():
global logger
# Create a custom logger
logger = logging.getLogger('PDL-Slicing/Transport')
logger.setLevel(logging.DEBUG)
# Create handlers
date = datetime.date.today()
c_handler = logging.StreamHandler()
f_handler = logging.FileHandler('log_file_'+str(date)+'.log')
c_handler.setLevel(logging.DEBUG)
f_handler.setLevel(logging.INFO)
# Create formatters and add it to handlers
c_format = logging.Formatter('%(name)s - %(levelname)s - %(message)s')
f_format = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
c_handler.setFormatter(c_format)
f_handler.setFormatter(f_format)
# Add handlers to the logger
logger.addHandler(c_handler)
logger.addHandler(f_handler)
def init_environment_variables():
with open('config_files/config_env.env') as f:
for line in f:
if 'export' not in line:
continue
if line.startswith('#'):
continue
# Remove leading `export `
# then, split name / value pair
key, value = line.replace('export ', '', 1).strip().split('=', 1)
os.environ[key] = value
def init_blockchain():
global slice_contract
global transport_contract
global web3
# ETHEREUM (GANACHE) CHAIN CONNECTION
bl_ip = os.environ.get("BLOCKCHAIN_IP")
bl_port = os.environ.get("BLOCKCHAIN_PORT")
ethereum_url = "http://" + str(bl_ip) + ":" + str(bl_port)
# web3.py instance
web3 = Web3(Web3.HTTPProvider(ethereum_url))
# checks connection and gets currentblockcnumber
logger.info("Connection with te blockchain ready: %s", str(web3.isConnected()))
logger.info("Current Ethereum block number: %s", str(web3.eth.blockNumber))
# ETHEREUM SMART CONTRACT ASSOCIATION
# uses ABI and contract_address within config_file
with open('config_files/slice_blockchain.json', 'r') as slice_config_file:
datastore = json.load(slice_config_file)
slice_abi = datastore["abi"]
slice_contract_address = datastore["contract_address"]
with open('config_files/transport_blockchain.json', 'r') as transport_config_file:
datastore = json.load(transport_config_file)
transport_abi = datastore["abi"]
transport_contract_address = datastore["contract_address"]
# ETHEREUM NODE CONFIGURATION
# defines peer account ID and selects smart contract to attack
web3.eth.defaultAccount = web3.eth.accounts[int(os.environ.get("BL_ID"))]
slice_contract = web3.eth.contract(address=slice_contract_address, abi=slice_abi)
transport_contract = web3.eth.contract(address=transport_contract_address, abi=transport_abi)
def init_thread_pool(workers):
global executor
executor = ThreadPoolExecutor(max_workers=workers)
def init_abstract_context(sdn_ctrl_ip, sdn_ctrl_port, model):
settings.logger.info("Topology abstraction using the %s model.", model)
response = sdn_mapper.get_local_context(sdn_ctrl_ip, sdn_ctrl_port)
context = json.loads(response[0])
if (model == "vnode"):
abstracted_context = vl_computation.vnode_abstraction(context)
elif (model == "vlink"):
abstracted_context = vl_computation.vlink_abstraction(context)
elif (model == "transparent"):
abstracted_context = context
else:
print("Wrong abstraction model selected. Validate [ABSTRACION_MODEL] in the configuration file to be one of these: vnode, vlink, transparent.")
abstracted_context = {"msg": "Wrong abstraction model selected. It is not programmed."}
pass
response = db.add_element(abstracted_context, "context")
def init_e2e_topology():
response = db.get_elements("context")
vl_computation.add_context_e2e_graph(response) |
<gh_stars>0
import selenium
from selenium import webdriver
import numpy as np
import pandas as pd
import bs4
from bs4 import BeautifulSoup
import time
"""
This code is used to scrape ScienceDirect of publication urls and write them to
a text file in the current directory for later use.
To use this code, go to ScienceDirect.com and search for the topic of interest.
Then, copy the URL and paste it into terminal when prompted for user input.
"""
def scrape_page(driver):
"""
This method finds all hrefs on webpage
"""
elems = driver.find_elements_by_xpath("//a[@href]")
return elems
def clean(elems):
"""
This method takes a list of scraped selenium web elements
and filters/ returns only the hrefs leading to publications.
"""
urls = []
for elem in elems:
url = elem.get_attribute("href")
if 'article' in url and 'pdf' not in url\
and 'search' not in url\
and 'show=' not in url:
urls.append(url)
return urls
def build_annual_urls(first_url,year):
"""
This method takes the first SD url and creates a list of
urls which lead to the following pages on SD which will be
scraped. The page list is for a given year.
"""
page_urls = []
for i in range(20):
url_100 = first_url.replace('&show=25','&show=100')
urli = url_100 + '&offset=' + str(i) + '00' + '&articleTypes=REV%2CFLA' + '&years=' + str(year)
page_urls.append(urli)
return page_urls
def scrape_all(first_url,driver,year):
"""
This method takes the first ScienceDirect url and navigates
through all 60 pages of listed publications, scraping each url
on each page. Returns a list of the urls. Scrapes all urls for a given year.
"""
page_list = build_annual_urls(first_url,year)
urls = []
for page in page_list:
driver.get(page)
time.sleep(1) #must sleep to allow page to load
elems = scrape_page(driver)
links = clean(elems)
if len(links) < 2:
break
for link in links:
urls.append(link)
return urls
def proxify(scraped_urls,prefix):
"""
This method takes a list of scraped urls and turns them into urls that
go through the UW Library proxy so that all of them are full access.
"""
proxy_urls = []
for url in scraped_urls:
sd_id = url[-17:]
newlink = prefix + sd_id
proxy_urls.append(newlink)
return proxy_urls
def write_urls(urls,filename):
"""
This method takes a list of urls and writes them to a desired text file.
"""
file = open(filename,'w')
for link in urls:
file.write(link)
file.write('\n')
driver = webdriver.Chrome()
prefix = 'https://www-sciencedirect-com.offcampus.lib.washington.edu/science/article/pii/'
first_url = input("Copy/Paste the ScienceDirect URL here: ")
print('\n')
filename = input("Input filename with .txt extension you wish to store urls in: ")
master_list = []
years = np.arange(1990,2020)
for year in years:
year = str(year)
scraped_urls = scrape_all(first_url,driver,year)
proxy_urls = proxify(scraped_urls,prefix)
for link in proxy_urls:
master_list.append(link)
print('Number of URLs collected = ',len(master_list))
write_urls(master_list,filename)
driver.quit()
|
import click
import os
from pathlib import Path
@click.command()
@click.argument('name')
# @click.argument('path')
def createbot(name):
path='.'
if os.path.isdir(os.path.join(name,path)):
click.echo(f"\u001b[31mError\u001b[0m : '{name}' folder already exists in the current directory.")
return
"""Main folder"""
os.mkdir(os.path.join(path,name))
"""Cogs folder"""
os.mkdir(os.path.join(path,name,'Cogs'))
"""Embeds folder"""
os.mkdir(os.path.join(path,name,'Embeds'))
base=Path(__file__).parent.parent
template=os.path.join(base,'template')
"""gitignore file"""
with open(os.path.join(template,'gitignore.py'),'r') as f_source,open(os.path.join(path,name,'.gitignore'),'w') as f_dest:
f_dest.write('\n'.join(f_source.read().split('\n')[1:-1]))
"""_constants file"""
with open(os.path.join(template,'_constants.py'),'r') as f_source,open(os.path.join(path,name,'_constants.py'),'w') as f_dest:
f_dest.write(f_source.read())
"""Main file"""
with open(os.path.join(template,'Main.py'),'r') as f_source,open(os.path.join(path,name,'Main.py'),'w') as f_dest:
f_dest.write(f_source.read())
"""secret file"""
with open(os.path.join(template,'secret.py'),'r') as f_source,open(os.path.join(path,name,'secret.py'),'w') as f_dest:
f_dest.write(f_source.read())
"""settings file"""
with open(os.path.join(template,'settings.py'),'r') as f_source,open(os.path.join(path,name,'settings.py'),'w') as f_dest:
f_dest.write(f_source.read())
"""__init__ file"""
with open(os.path.join(path,name,'__init__.py'),'w') as f:
pass
with open(os.path.join(path,name,'Cogs','__init__.py'),'w') as f:
pass
with open(os.path.join(path,name,'Embeds','__init__.py'),'w') as f:
pass
"""Cogs"""
with open(os.path.join(template,'Cogs','Base.py'),'r') as f_source,open(os.path.join(path,name,'Cogs','Base.py'),'w') as f_dest:
f_dest.write(f_source.read())
with open(os.path.join(template,'Cogs','Errors.py'),'r') as f_source,open(os.path.join(path,name,'Cogs','Errors.py'),'w') as f_dest:
f_dest.write(f_source.read())
with open(os.path.join(template,'Cogs','General.py'),'r') as f_source,open(os.path.join(path,name,'Cogs','General.py'),'w') as f_dest:
f_dest.write(f_source.read())
"""Embeds"""
with open(os.path.join(template,'Embeds','GeneralEmbed.py'),'r') as f_source,open(os.path.join(path,name,'Embeds','GeneralEmbed.py'),'w') as f_dest:
f_dest.write(f_source.read())
"""README"""
with open(os.path.join(template,'README.py'),'r') as f_source,open(os.path.join(path,name,'README.md'),'w') as f_dest:
f_dest.write('\n'.join(f_source.read().split('\n')[1:-1]))
click.echo(f"\n\u001b[32mSuccessfully created '{name}' in the current directory\u001b[0m\n") |
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import gae_ts_mon
swarming_tasks = gae_ts_mon.CounterMetric(
'findit/swarmingtasks', 'Swarming tasks triggered',
[gae_ts_mon.StringField('category'),
gae_ts_mon.StringField('operation')])
outgoing_http_errors = gae_ts_mon.CounterMetric(
'findit/outgoinghttperrors', 'Failed http requests to various servers',
[gae_ts_mon.StringField('host'),
gae_ts_mon.StringField('exception')])
outgoing_http_statuses = gae_ts_mon.CounterMetric(
'findit/outgoinghttpstatuses', 'Http requests to external services',
[gae_ts_mon.StringField('host'),
gae_ts_mon.StringField('status_code')])
issues = gae_ts_mon.CounterMetric(
'findit/issues', 'Bugs updated with findings',
[gae_ts_mon.StringField('category'),
gae_ts_mon.StringField('operation')])
flakes = gae_ts_mon.CounterMetric(
'findit/flakes', 'Flakes requested or analyzed', [
gae_ts_mon.StringField('source'),
gae_ts_mon.StringField('operation'),
gae_ts_mon.StringField('trigger'),
gae_ts_mon.StringField('canonical_step_name'),
gae_ts_mon.StringField('isolate_target_name')
])
try_jobs = gae_ts_mon.CounterMetric('findit/try-jobs', 'Try jobs triggered', [
gae_ts_mon.StringField('operation'),
gae_ts_mon.StringField('type'),
gae_ts_mon.StringField('master_name'),
gae_ts_mon.StringField('builder_name')
])
try_job_errors = gae_ts_mon.CounterMetric(
'findit/try-job-errors', 'Try job errors encountered', [
gae_ts_mon.StringField('error'),
gae_ts_mon.IntegerField('type'),
gae_ts_mon.StringField('master_name'),
gae_ts_mon.StringField('builder_name')
])
analysis_durations = gae_ts_mon.CumulativeDistributionMetric(
'findit/analysis-durations', 'Durations of analyses performed', [
gae_ts_mon.StringField('type'),
gae_ts_mon.StringField('result'),
])
culprit_found = gae_ts_mon.CounterMetric(
'findit/culprits',
'Culprits identified by findit',
[
gae_ts_mon.StringField('type'),
# Valid values:
# revert_created, revert_committed, revert_confirmed,
# revert_status_error, revert_commit_error, culprit_notified,
# culprit_notified_error, irc_notified, irc_notified_error.
gae_ts_mon.StringField('action_taken')
])
flake_analyses = gae_ts_mon.CounterMetric(
'findit/flake-analyses', 'Flake analyses completed by findit', [
gae_ts_mon.StringField('result'),
gae_ts_mon.StringField('action_taken'),
gae_ts_mon.StringField('reason'),
])
cache_evictions = gae_ts_mon.CounterMetric(
'findit/cache-evictions', 'Caches evicted from Findit trybots', [
gae_ts_mon.StringField('platform'),
])
aborted_pipelines = gae_ts_mon.CounterMetric(
'findit/aborted-pipelines',
'Analysis pipelines aborted',
[
gae_ts_mon.StringField('type'), # 'flake', 'test' or 'compile'.
])
completed_pipelines = gae_ts_mon.CounterMetric(
'findit/completed-pipelines',
'Analysis pipelines completed',
[
gae_ts_mon.StringField('type'), # 'flake', 'test', or 'compile'.
])
pipeline_times = gae_ts_mon.CounterMetric('findit/pipeline-times',
'Current age of ongoing pipelines', [
gae_ts_mon.StringField('type'),
])
waterfall_analysis_statuses = gae_ts_mon.CounterMetric(
'findit/waterfall-analysis-statuses',
'Current number of waterfall analyses in each status',
[
gae_ts_mon.StringField('master_name'),
gae_ts_mon.StringField('builder_name'),
# compile, test
gae_ts_mon.StringField('failure_type'),
gae_ts_mon.StringField('canonical_step_name'),
gae_ts_mon.StringField('isolate_target_name'),
# error, complete.
# TODO(crbug/869684): Use a gauge metric to record intermittent statuses
gae_ts_mon.StringField('status'),
# Pre-Analysis, Heuristic, Swarming, Try_job
gae_ts_mon.StringField('analysis_type'),
])
flakes_identified_by_waterfall_analyses = gae_ts_mon.CounterMetric(
'findit/flakes-identified-by-waterfall-analyses',
'Number of flakes identified by waterfall analyses', [
gae_ts_mon.StringField('canonical_step_name'),
gae_ts_mon.StringField('isolate_target_name'),
gae_ts_mon.StringField('operation'),
])
flake_detection_query_failures = gae_ts_mon.CounterMetric(
'findit/flake-detection-query-failures',
'Number of failed Flake Detection query executions', [
gae_ts_mon.StringField('flake_type'),
])
flake_detection_flake_occurrences = gae_ts_mon.CounterMetric(
'findit/flake-detection-flake-occurrences',
'Number of flake occurrences detected by Flake Detection', [
gae_ts_mon.StringField('flake_type'),
])
flake_detection_issues = gae_ts_mon.CounterMetric(
'findit/flake-detection-issues',
'Number of issues created or updated by Flake Detection', [
gae_ts_mon.StringField('operation'),
])
code_coverage_cq_errors = gae_ts_mon.CounterMetric(
'code-coverage/cq-bot-errors',
'Number of cq builds with coverage data step failures', [
gae_ts_mon.StringField('project'),
gae_ts_mon.StringField('bucket'),
gae_ts_mon.StringField('builder'),
])
code_coverage_full_reports = gae_ts_mon.CounterMetric(
'code-coverage/full-reports',
'Number of whole-codebase coverage reports',
[
gae_ts_mon.StringField('host'),
gae_ts_mon.StringField('project'), # Gerrit project.
gae_ts_mon.StringField('ref'),
gae_ts_mon.StringField('builder'), # <luci_project>/<bucket>/<builder>
])
code_coverage_report_timestamp = gae_ts_mon.GaugeMetric(
'code-coverage/report_timestamp',
'Timestamp of the completion of the last report',
[
gae_ts_mon.StringField('host'),
gae_ts_mon.StringField('project'), # Gerrit project.
gae_ts_mon.StringField('ref'),
gae_ts_mon.StringField('builder'), # <luci_project>/<bucket>/<builder>
gae_ts_mon.BooleanField('is_success'),
])
|
<reponame>b-bold/ThreatExchange<filename>hasher-matcher-actioner/tests/scripts/check_deployed_instance.py
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Utility script to confirm the basic functionally of a deployed hma instance
"""
import typing as t
import os
import boto3
from botocore.exceptions import ClientError
import time
import sys
s3_client = boto3.client("s3")
dynamodb = boto3.resource("dynamodb")
PREFIX = os.environ["TF_PREFIX"]
BUCKET_NAME = os.environ["TF_BUCKET_NAME"]
DYNAMODB_TABLE = os.environ["TF_DYNAMODB_TABLE"]
# Copy of what is configured in tf files (not worth building a parser for them for single point of mataince imo)
THREAT_EXCHANGE_PDQ_DATA_KEY = f"""{os.environ["TF_TE_DATA_FOLDER"]}script_sample_data{os.environ["TF_PDQ_FILE_EXTENSION"]}"""
PDQ_INDEX_KEY = "index/pdq_hashes.index"
# Test uses hasher-matcher-actioner/tests/data/b.jpg
TEST_PHOTO_KEY = f"""{os.environ["TF_IMAGE_FOLDER_KEY"]}test_photo_from_script.jpg"""
TEST_PHOTO_EXPECTED_HASH = (
"f8f8f0cee0f4a84f06370a22038f63f0b36e2ed596621e1d33e6b39c4e9c9b22"
)
TEST_PHOTO_EXPECTED_ID = "4109214869142910"
TEST_TE_DATA_PDQ = f"""0000000000000000000000000000000000000000000000000000000000000000,0000000000000001,2020-07-31T18:47:52+0000,tag1 tag2 tag3
000000000000000000000000000000000000000000000000000000000000ffff,0000000000000001,2020-07-31T18:47:52+0000,tag1 tag2 tag3
0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f,0000000000000002,2020-07-31T18:47:52+0000,tag1 tag2 tag3
f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0,0000000000000003,2020-07-31T18:47:52+0000,tag1 tag2 tag3
ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff,0000000000000004,2020-07-31T18:47:52+0000,tag1 tag2 tag3
1111111111111111111111111111111111111111111111111111111111111111,0000000000000005,2020-07-31T18:47:52+0000,tag1 tag2 tag3
{TEST_PHOTO_EXPECTED_HASH},{TEST_PHOTO_EXPECTED_ID},2020-07-31T18:47:52+0000,tag1 tag2 tag3
"""
class TestError(Exception):
"""
Wrapper for exceptions which cause return codes
"""
def __init__(self, message: str, returncode: int = 1) -> None:
super().__init__(message)
self.returncode = returncode
def upload_s3_object(bucket: str, key: str, body: str) -> None:
"""
Uploads an object, throws if upload fails.
put_object only returns if succesful and will overwrite anything
currently in the bucket with this key.
"""
try:
print(f"Attempting to upload {key}...")
s3_client.put_object(Bucket=bucket, Key=key, Body=body)
except ClientError as e:
raise TestError(e.msg)
def wait_for_s3_object(bucket: str, key: str, wait_time=5, retries=20) -> None:
"""
Waits for specfic object in given s3 bucket.
"""
try:
print(f"Looking for {key} in {bucket} (check every {wait_time}s x{retries})...")
waiter = s3_client.get_waiter("object_exists")
waiter.wait(
Bucket=bucket,
Key=key,
WaiterConfig={"Delay": wait_time, "MaxAttempts": retries},
)
except ClientError as e:
raise TestError(e.msg)
def wait_for_db_item(
table, key: t.Dict, attributes: t.List, wait_time=5, retries=20
) -> t.Dict:
"""
Query in a retry loop for a specfic item in table.
"""
try:
print(f"Looking for {key} in {table} (check every {wait_time}s x{retries})...")
while retries > 0:
result = table.get_item(
Key=key,
AttributesToGet=attributes,
)
if "Item" in result:
return result["Item"]
time.sleep(wait_time)
retries -= 1
except ClientError as e:
raise TestError(e.msg)
def run(bucket: str, table) -> bool:
"""
~End-to-end test of HMA that:
uploads TE data -> looks for the index
uploads a photo -> looks for the hash and a match
throws TestError if it encoutners unexpected results
TODO break method into reusable subflows
"""
# Upload TE Data
upload_s3_object(bucket, THREAT_EXCHANGE_PDQ_DATA_KEY, TEST_TE_DATA_PDQ)
# Index created?
wait_for_s3_object(bucket, PDQ_INDEX_KEY)
# Upload Photo
with open(os.path.dirname(__file__) + "/../data/b.jpg", "rb") as data:
upload_s3_object(bucket, TEST_PHOTO_KEY, data.read())
# Hash to exist?
result = wait_for_db_item(
table,
key={"PK": f"c#{TEST_PHOTO_KEY}", "SK": "type#pdq"},
attributes=["ContentHash", "HashType"],
)
if "ContentHash" not in result:
raise TestError("Failed to find hash")
if result["ContentHash"] != TEST_PHOTO_EXPECTED_HASH:
raise TestError("Found Incorrect Hash")
# match found?
result = wait_for_db_item(
table,
key={"PK": f"c#{TEST_PHOTO_KEY}", "SK": f"s#te#{TEST_PHOTO_EXPECTED_ID}"},
attributes=["HashType"],
)
if "HashType" not in result:
raise TestError("Failed to find match")
def cleanup(bucket, table):
"""
Delete all the objects created in run.
"""
print(
"Cleaning up... ",
)
table.delete_item(
Key={"PK": f"c#{TEST_PHOTO_KEY}", "SK": f"s#te#{TEST_PHOTO_EXPECTED_ID}"},
)
table.delete_item(
Key={"PK": f"c#{TEST_PHOTO_KEY}", "SK": "<KEY>"},
)
s3_client.delete_object(Bucket=bucket, Key=TEST_PHOTO_KEY)
s3_client.delete_object(Bucket=bucket, Key=PDQ_INDEX_KEY)
s3_client.delete_object(Bucket=bucket, Key=THREAT_EXCHANGE_PDQ_DATA_KEY)
print("Deleted s3 objects and datastore items")
def main():
"""
Attempts to run the test, if run throws it will try to clean up before exiting.
"""
print("\nChecking deployed instance...")
table = dynamodb.Table(DYNAMODB_TABLE)
print(f"Running against instance using prefix: {PREFIX}")
try:
run(BUCKET_NAME, table)
except TestError as e:
print(e, file=sys.stderr)
try:
cleanup(BUCKET_NAME, table)
finally:
sys.exit(e.returncode)
cleanup(BUCKET_NAME, table)
print("\nSuccess! Deployed instance seems to behave as expected!")
print(
'Remember to run "terraform destroy" or "make dev_destroy_instance" if you are done with your deployed instance.'
)
if __name__ == "__main__":
main()
|
"""
Module Source:
https://github.com/eladhoffer/quantized.pytorch
"""
import torch
from torch.autograd.function import InplaceFunction, Function
import torch.nn as nn
import torch.nn.functional as F
import math
import numpy as np
class UniformQuantize(InplaceFunction):
"""!
Perform uniform-quantization on input tensor
Note that this function seems to behave differently on cpu or gpu.
This is probably related to the underlaying implmentation difference of pytorch.
Cpu seems to be more precise (with better accuracy for DFQ-models)
"""
@staticmethod
def forward(ctx, input, num_bits=8, min_value=None, max_value=None, inplace=False, symmetric=False, num_chunks=None):
num_chunks = num_chunks = input.shape[0] if num_chunks is None else num_chunks
if min_value is None or max_value is None:
B = input.shape[0]
y = input.view(B // num_chunks, -1)
if min_value is None:
min_value = y.min(-1)[0].mean(-1) # C
#min_value = float(input.view(input.size(0), -1).min(-1)[0].mean())
if max_value is None:
#max_value = float(input.view(input.size(0), -1).max(-1)[0].mean())
max_value = y.max(-1)[0].mean(-1) # C
ctx.inplace = inplace
ctx.num_bits = num_bits
ctx.min_value = min_value
ctx.max_value = max_value
if ctx.inplace:
ctx.mark_dirty(input)
output = input
else:
output = input.clone()
if symmetric:
qmin = -2. ** (num_bits - 1)
qmax = 2 ** (num_bits - 1) - 1
max_value = abs(max_value)
min_value = abs(min_value)
if max_value > min_value:
scale = max_value / qmax
else:
max_value = min_value
scale = max_value / (qmax + 1)
min_value = 0.
else:
qmin = 0.
qmax = 2. ** num_bits - 1.
scale = (max_value - min_value) / (qmax - qmin)
scale = max(scale, 1e-8)
# test = np.array([scale])
# print("test", test.dtype)
output.add_(-min_value).div_(scale)
output.clamp_(qmin, qmax).round_() # quantize
output.mul_(scale).add_(min_value) # dequantize
return output
@staticmethod
def backward(ctx, grad_output):
# straight-through estimator
grad_input = grad_output
return grad_input, None, None, None, None, None, None
def quantize(x, num_bits=8, min_value=None, max_value=None, inplace=False, symmetric=False, num_chunks=None):
return UniformQuantize().apply(x, num_bits, min_value, max_value, inplace, symmetric, num_chunks)
class QuantMeasure(nn.Module):
"""docstring for QuantMeasure."""
def __init__(self, update_stat=False, num_bits=8, momentum=0.1):
super(QuantMeasure, self).__init__()
self.register_buffer('running_min', torch.zeros(1))
self.register_buffer('running_max', torch.zeros(1))
self.momentum = momentum
self.num_bits = num_bits
self.update_stat = update_stat
def forward(self, input):
if self.update_stat:
# self.running_max = max(self.running_max, input.detach().max())
# self.running_min = min(self.running_min, input.detach().min())
self.running_max = max(self.running_max, input.detach().view(input.size(0), -1).max(-1)[0].mean())
self.running_min = min(self.running_min, input.detach().view(input.size(0), -1).min(-1)[0].mean())
if self.training:
min_value = input.detach().view(input.size(0), -1).min(-1)[0].mean()
max_value = input.detach().view(input.size(0), -1).max(-1)[0].mean()
self.running_min.mul_(1 - self.momentum).add_(min_value * (self.momentum))
self.running_max.mul_(1 - self.momentum).add_(max_value * (self.momentum))
else:
min_value = self.running_min
max_value = self.running_max
return quantize(input, self.num_bits, min_value=float(min_value), max_value=float(max_value), num_chunks=16)
def set_update_stat(self, update_stat):
self.update_stat = update_stat
class QConv2d(nn.Conv2d):
"""docstring for QConv2d."""
def __init__(self, in_channels, out_channels, kernel_size,
stride=1, padding=0, dilation=1, groups=1, bias=True, num_bits=8, num_bits_act=8, num_bits_bias=16, momentum=0.1):
super(QConv2d, self).__init__(in_channels, out_channels, kernel_size,
stride, padding, dilation, groups, bias)
self.num_bits = num_bits
self.num_bits_bias = num_bits_bias
self.quant = QuantMeasure(num_bits=num_bits_act, momentum=momentum)
def set_scale(self, scale=None, scale_prev=None):
"""
scale_prev should be the same parameter as 'scale' from previous layer
"""
if scale is not None:
self.register_parameter("scale", nn.Parameter(scale.view(-1, 1, 1, 1)))
if scale_prev is not None:
self.scale_prev = scale_prev
def merge_scale_to_weight(self):
if hasattr(self, 'scale_prev') and self.scale_prev is not None:
weight = self.merge_scale_prev(self.weight.detach(), self.scale_prev)
self.weight.data.copy_(weight)
self.scale_prev = None
if hasattr(self, 'scale') and self.scale is not None:
weight, bias = self.merge_scale(self.weight.detach(), self.bias.detach() if self.bias is not None else self.bias, self.scale)
self.weight.data.copy_(weight)
if self.bias is not None:
self.bias.data.copy_(bias)
self.scale = None
def merge_scale_prev(self, weight, scale_prev):
sweight = weight.clone()
step = weight.shape[0] // self.groups
step_s = weight.shape[1]
scale_prev = scale_prev[:, 0, 0, 0].view(1, -1, 1, 1)
for g in range(self.groups):
sweight[g*step:(g+1)*step] = weight[g*step:(g+1)*step] / scale_prev[:, g*step_s:(g+1)*step_s]
return sweight
def merge_scale(self, weight, bias, scale):
weight = weight * scale
if bias is not None:
bias = bias * scale.view(-1)
return weight, bias
def forward(self, input):
input = self.quant(input)
sweight = self.weight.clone()
sbias = self.bias
if hasattr(self, 'scale_prev') and self.scale_prev is not None:
# multiply by scale
# scale = torch.clamp(self.scale_prev, max=1)
scale = self.scale_prev
sweight = self.merge_scale_prev(self.weight, scale)
else:
sweight = self.weight
if hasattr(self, 'scale') and self.scale is not None:
# scale = torch.clamp(self.scale, max=1)
scale = self.scale
# multiply by scale
sweight, sbias = self.merge_scale(sweight, sbias, scale)
qweight = quantize(sweight, num_bits=self.num_bits,
min_value=float(sweight.min()),
max_value=float(sweight.max()))
if sbias is not None:
qbias = quantize(sbias, num_bits=self.num_bits_bias)
else:
qbias = None
output = F.conv2d(input, qweight, qbias, self.stride,
self.padding, self.dilation, self.groups)
return output
class QuantConv2d(nn.Conv2d):
"""docstring for QuantConv2d."""
def __init__(self, in_channels, out_channels, kernel_size,
stride=1, padding=0, dilation=1, groups=1, bias=True, num_bits=8, num_bits_act=8, num_bits_bias=16, momentum=0.1):
super(QuantConv2d, self).__init__(in_channels, out_channels, kernel_size,
stride, padding, dilation, groups, bias)
self.num_bits = num_bits
self.num_bits_bias = num_bits_bias
self.quant = QuantMeasure(num_bits=num_bits_act, momentum=momentum)
def forward(self, input):
input = self.quant(input)
qweight = quantize(self.weight, num_bits=self.num_bits,
min_value=float(self.weight.min()),
max_value=float(self.weight.max()))
if self.bias is not None:
qbias = quantize(self.bias, num_bits=self.num_bits_bias)
else:
qbias = None
output = F.conv2d(input, qweight, qbias, self.stride,
self.padding, self.dilation, self.groups)
return output
class QuantNConv2d(nn.Conv2d):
"""docstring for QuantNConv2d."""
def __init__(self, in_channels, out_channels, kernel_size,
stride=1, padding=0, dilation=1, groups=1, bias=True, num_bits=8, num_bits_act=8, momentum=0.1):
super(QuantNConv2d, self).__init__(in_channels, out_channels, kernel_size,
stride, padding, dilation, groups, bias)
self.quant = QuantMeasure(num_bits=num_bits_act, momentum=momentum)
def forward(self, input):
input = self.quant(input)
output = F.conv2d(input, self.weight, self.bias, self.stride,
self.padding, self.dilation, self.groups)
return output
class QLinear(nn.Linear):
"""docstring for QConv2d."""
def __init__(self, in_features, out_features, bias=True, num_bits=8, num_bits_act=8, num_bits_bias=16, momentum=0.1):
super(QLinear, self).__init__(in_features, out_features, bias)
self.num_bits = num_bits
self.num_bits_bias = num_bits_bias
self.quant = QuantMeasure(num_bits=num_bits_act, momentum=momentum)
def set_scale(self, scale=None, scale_prev=None):
if scale is not None:
self.register_parameter("scale", nn.Parameter(scale.view(-1, 1)))
if scale_prev is not None:
self.scale_prev = scale_prev
def merge_scale_to_weight(self):
if hasattr(self, 'scale_prev') and self.scale_prev is not None:
weight = self.merge_scale_prev(self.weight.detach(), self.scale_prev)
self.weight.data.copy_(weight)
self.scale_prev = None
if hasattr(self, 'scale') and self.scale is not None:
weight, bias = self.merge_scale(self.weight.detach(), self.bias.detach() if self.bias is not None else self.bias, self.scale)
self.weight.data.copy_(weight)
if self.bias is not None:
self.bias.data.copy_(bias)
self.scale = None
def merge_scale_prev(self, weight, scale_prev):
return weight * scale_prev.view(1, -1)
def merge_scale(self, weight, bias, scale):
weight = weight * scale
if bias is not None:
bias = bias * scale.view(-1)
return weight, bias
def forward(self, input):
input = self.quant(input)
sbias = self.bias
sweight = self.weight.clone()
if hasattr(self, 'scale_prev') and self.scale_prev is not None:
# multiply by scale
# scale = torch.clamp(self.scale_prev, max=1)
scale = self.scale_prev
sweight = self.merge_scale_prev(sweight, scale)
if hasattr(self, 'scale') and self.scale is not None:
# multiply by scale
# scale = torch.clamp(self.scale, max=1)
scale = self.scale
sweight, sbias = self.merge_scale(sweight, sbias, scale)
qweight = quantize(sweight, num_bits=self.num_bits,
min_value=float(sweight.min()),
max_value=float(sweight.max()))
if sbias is not None:
qbias = quantize(sbias, num_bits=self.num_bits_bias)
else:
qbias = None
output = F.linear(input, qweight, qbias)
return output
class QuantLinear(nn.Linear):
"""docstring for QuantLinear."""
def __init__(self, in_features, out_features, bias=True, num_bits=8, num_bits_act=8, num_bits_bias=16, momentum=0.1):
super(QuantLinear, self).__init__(in_features, out_features, bias)
self.num_bits = num_bits
self.num_bits_bias = num_bits_bias
self.quant = QuantMeasure(num_bits=num_bits_act, momentum=momentum)
def forward(self, input):
input = self.quant(input)
qweight = quantize(self.weight, num_bits=self.num_bits,
min_value=float(self.weight.min()),
max_value=float(self.weight.max()))
if self.bias is not None:
qbias = quantize(self.bias, num_bits=self.num_bits_bias)
else:
qbias = None
output = F.linear(input, qweight, qbias)
return output
class QuantNLinear(nn.Linear):
"""docstring for QuantLinear."""
def __init__(self, in_features, out_features, bias=True, num_bits=8, num_bits_act=8, momentum=0.1):
super(QuantNLinear, self).__init__(in_features, out_features, bias)
self.quant = QuantMeasure(num_bits=num_bits_act, momentum=momentum)
def forward(self, input):
input = self.quant(input)
output = F.linear(input, self.weight, self.bias)
return output
def set_layer_bits(graph, bits_weight=8, bits_activation=8, bits_bias=16, targ_type=None):
print("Setting num_bits for targ layers...")
assert targ_type != None, "targ_type cannot be None"
for idx in graph:
if type(graph[idx]) in targ_type:
if hasattr(graph[idx], 'quant'):
graph[idx].quant = QuantMeasure(bits_activation)
if hasattr(graph[idx], 'num_bits'):
graph[idx].num_bits = bits_weight
if hasattr(graph[idx], 'num_bits_bias'):
graph[idx].num_bits_bias = bits_bias
|
<filename>src/timessquare/config.py<gh_stars>0
"""Configuration definition."""
from __future__ import annotations
from enum import Enum
from typing import Any, Mapping, Optional
from urllib.parse import urlparse
from arq.connections import RedisSettings
from pydantic import (
BaseSettings,
Field,
HttpUrl,
PostgresDsn,
RedisDsn,
SecretStr,
validator,
)
from safir.arq import ArqMode
__all__ = ["Config", "Profile", "LogLevel"]
class Profile(str, Enum):
production = "production"
development = "development"
class LogLevel(str, Enum):
DEBUG = "DEBUG"
INFO = "INFO"
WARNING = "WARNING"
ERROR = "ERROR"
CRITICAL = "CRITICAL"
class Config(BaseSettings):
name: str = Field("times-square", env="SAFIR_NAME")
profile: Profile = Field(Profile.production, env="SAFIR_PROFILE")
log_level: LogLevel = Field(LogLevel.INFO, env="SAFIR_LOG_LEVEL")
logger_name: str = "timessquare"
"""The name of the logger, which is also the root Python namespace
of the application.
"""
environment_url: HttpUrl = Field(env="TS_ENVIRONMENT_URL")
"""The base URL of the Rubin Science Platform environment.
This is used for creating URLs to other RSP services.
"""
gafaelfawr_token: SecretStr = Field(env="TS_GAFAELFAWR_TOKEN")
"""This token is used to make requests to other RSP services, such as
Noteburst.
"""
path_prefix: str = Field("/times-square", env="TS_PATH_PREFIX")
"""The URL prefix where the application's externally-accessible endpoints
are hosted.
"""
database_url: PostgresDsn = Field(..., env="TS_DATABASE_URL")
database_password: SecretStr = Field(..., env="TS_DATABASE_PASSWORD")
redis_url: RedisDsn = Field("redis://localhost:6379/0", env="TS_REDIS_URL")
"""URL for the redis instance, used by the worker queue."""
github_app_id: Optional[str] = Field(None, env="TS_GITHUB_APP_ID")
"""The GitHub App ID, as determined by GitHub when setting up a GitHub
App.
"""
github_webhook_secret: Optional[SecretStr] = Field(
None, env="TS_GITHUB_WEBHOOK_SECRET"
)
"""The GitHub app's webhook secret, as set when the App was created. See
https://docs.github.com/en/developers/webhooks-and-events/webhooks/securing-your-webhooks
"""
github_app_private_key: Optional[SecretStr] = Field(
None, env="TS_GITHUB_APP_PRIVATE_KEY"
)
"""The GitHub app private key. See
https://docs.github.com/en/developers/apps/building-github-apps/authenticating-with-github-apps#generating-a-private-key
"""
enable_github_app: bool = Field(True, env="TS_ENABLE_GITHUB_APP")
"""Toggle to enable GitHub App functionality.
If configurations required to function as a GitHub App are not set,
this configuration is automatically toggled to False. It also also be
manually toggled to False if necessary.
"""
redis_queue_url: RedisDsn = Field(
"redis://localhost:6379/1", env="TS_REDIS_QUEUE_URL"
)
queue_name: str = Field("arq:queue", env="TS_REDIS_QUEUE_NAME")
"""Name of the arq queue that the worker processes from."""
arq_mode: ArqMode = Field(ArqMode.production, env="TS_ARQ_MODE")
@validator("path_prefix")
def validate_path_prefix(cls, v: str) -> str:
# Handle empty path prefix (i.e. app is hosted on its own domain)
if v == "":
raise ValueError(
"Times square does not yet support being hosted from "
"the root path. Set a value for $TS_PATH_PREFIX."
)
# Remove any trailing / since individual paths operations add those.
v = v.rstrip("/")
# Add a / prefix if not present
if not v.startswith("/"):
v = "/" + v
return v
@validator("github_webhook_secret", "github_app_private_key", pre=True)
def validate_none_secret(
cls, v: Optional[SecretStr]
) -> Optional[SecretStr]:
"""Validate a SecretStr setting which may be "None" that is intended
to be `None`.
This is useful for secrets generated from 1Password or environment
variables where the value cannot be null.
"""
if v is None:
return v
elif isinstance(v, str):
if v.strip().lower() == "none":
return None
else:
return v
else:
raise ValueError(f"Value must be None or a string: {v!r}")
@validator("enable_github_app")
def validate_github_app(cls, v: bool, values: Mapping[str, Any]) -> bool:
"""Validate ``enable_github_app`` by ensuring that other GitHub
configurations are also set.
"""
if v is False:
# Allow the GitHub app to be disabled regardless of other
# configurations.
return False
if (
(values.get("github_app_private_key") is None)
or (values.get("github_webhook_secret") is None)
or (values.get("github_app_id") is None)
):
return False
return True
@property
def arq_redis_settings(self) -> RedisSettings:
"""Create a Redis settings instance for arq."""
url_parts = urlparse(self.redis_queue_url)
redis_settings = RedisSettings(
host=url_parts.hostname or "localhost",
port=url_parts.port or 6379,
database=int(url_parts.path.lstrip("/")) if url_parts.path else 0,
)
return redis_settings
config = Config()
"""Configuration for Times Square."""
|
<filename>custom_loops/ICELoops.py
import time
import os
from QTMtoolbox_master.functions import qtmlab
class ICELoops():
def __init__(self):
print('Running measurement using ICELoops class.')
def RT_Field(self,instruments_dict, meas_list, dtw, magnet_instrument, variable, setpoint,
rate,**kwargs):
for arg in kwargs:
# print(arg)
# print(kwargs[arg])
globals()[arg]=kwargs[arg]
# print(locatedata_folder)
meas_dict = qtmlab.generate_meas_dict(instruments_dict, meas_list)
qtmlab.meas_dict = meas_dict
qtmlab.dtw = dtw
magnet_instrument.write_rate(0.12)
# Approach from above
qtmlab.move(magnet_instrument, variable, setpoint, rate)
time.sleep(10)
magnet_instrument.write_fvalue(0)
field_values = np.linspace(begin, end, number_points)
for fval in field_values:
# Ramp magnet while heating to save time
# Don't move (blocks python), but rather set rate (see above) and give setpoint
magnet_instrument.write_fvalue(fval)
self.heat_triton_to_1p2K(triton_instrument)
filename=os.path.join(locatedata_folder,'V-BST113-114_Rxx_cooldown_from_above_' + str(fval) + 'T.csv')
qtmlab.record(2, 1000, filename, silent=True)
magnet_instrument.write_fvalue(0)
def heat_triton_to_1p2K(self,triton_instrument):
print('Heating Triton:')
print(' Close PID loop')
triton_instrument.loop_on()
time.sleep(10)
# After turning on the loop, somehow the setpoint can be changed by the
# LakeShore software itself (setpoint = current temp value)
# Hence, afterwards update setpoint.
print(' Reading setpoint')
y = True
while y:
x = float(triton_instrument.read_PID8())
if x == 1.2:
print(' Setpoint is already 1.2 K')
time.sleep(3)
y = False
else:
print(' Setpoint was' + str(x) + ' K. Changing to 1.2 K')
triton_instrument.write_PID8(1.2)
time.sleep(3)
# The range can also be changed by the LakeShore software, so update it
# afterwards as well
print(' Set range to 10 mA')
triton_instrument.write_range(10)
time.sleep(3)
qtmlab.waitfor(triton_instrument, 'temp8', 1.2, 0.2, 120)
print(' Turn off PID loop')
triton_instrument.loop_off()
time.sleep(5)
def IV_test(self,instruments_dict,meas_list,dtw,ivvi_instrument,keith_instrument,**kwargs):
for arg in kwargs:
# print(arg)
# print(kwargs[arg])
globals()[arg]=kwargs[arg]
print(locatedata_folder)
meas_dict = qtmlab.generate_meas_dict(instruments_dict, meas_list)
qtmlab.meas_dict = meas_dict
qtmlab.dtw = dtw
# filename=os.path.join(locatedata_folder,'data.dat')
filename='datatest.dat'
qtmlab.sweep(ivvi_instrument, variable, start, stop, rate, npoints, filename, sweepdev, md=None, scale='lin')
|
<reponame>bear/palala
#!/usr/bin/env python2.6
__author__ = "<NAME> (<EMAIL>)"
__copyright__ = "Copyright 2009-2010, <NAME>"
__license__ = "Apache v2"
__version__ = "0.1"
__contributors__ = []
"""
bSwitch - input bot switch
Loads a process per account to pull down each defined list
"""
import os, sys
import time
import json
import datetime
from xml.etree import cElementTree as ET
from xml.sax.saxutils import escape, quoteattr
from Queue import Empty
from multiprocessing import Process, Queue, current_process, freeze_support
import palala
import palala.xmpp
import palala.utils
log = None
xmppConfig = {}
inbound = Queue()
pubsub = Queue()
def parseXMPPConfig(filename):
if filename is not None and os.path.exists(filename):
lines = open(filename, 'r').readlines()
for line in lines:
if len(line) > 0 and not line.startswith('#'):
l = line[:-1].split(',')
if len(l) > 1:
# jid, password
jid = l[0].strip()
pw = l[1].strip()
if jid not in xmppConfig:
xmppConfig[jid] = {}
x = xmppConfig[jid]
x['jid'] = jid
x['password'] = pw
#data = { 'source': { 'type': 'twitter',
# 'resource': user['id'],
# },
# 'timestamp': '%s' % datetime.datetime.now().strftime('%Y-%m-%dT%H%M%SZ'),
# 'metadata': { 'user': user },
# 'event': 'user',
# }
#data = { 'source': { 'type': 'twitter',
# 'channel': 'friends',
# 'resource': cfgTwitter['userid'],
# },
# 'timestamp': '%s' % datetime.datetime.now().strftime('%Y-%m-%dT%H%M%SZ'),
# 'metadata': { 'post': item },
# 'event': 'inbound',
# }
# friends timeline
#{"created_at": "Fri Nov 13 07:45:00 +0000 2009",
# "favorited": false,
# "id": 5674211579,
# "source": "<a href=\"http://blip.fm\" rel=\"nofollow\">Blip.fm</a>",
# "text": "listening to \"Damien Rice - 9 Crimes - Official Video\" \u266b http://blip.fm/~g9zm7",
# "truncated": false,
# "user": {"description": "just a girl in a virtual world - VW developer: www.thevesuviusgroup.com & non-profit web 2.0 gal: olp.globalkids.org",
# "favourites_count": 113,
# "followers_count": 2144,
# "friends_count": 2245,
# "id": 816952,
# "location": "Boston, MA",
# "name": "<NAME>",
# "profile_background_color": "FFFBF0",
# "profile_background_tile": true,
# "profile_image_url": "http://a3.twimg.com/profile_images/478765027/09162009587_normal.jpg",
# "profile_link_color": "B73030",
# "profile_sidebar_fill_color": "http://a3.twimg.com/profile_background_images/3309557/moi-twitter2.jpg",
# "profile_text_color": "000000",
# "protected": false,
# "screen_name": "RhiannonSL",
# "statuses_count": 2252,
# "time_zone": "Eastern Time (US & Canada)",
# "url": "http://joycebettencourt.com",
# "utc_offset": -18000
# }
# }
def toAtom(data):
xmlTitle = palala.utils.escXML(data['title']).encode('UTF-8')
atom = ET.Element('{http://www.w3.org/2005/Atom}entry')
a_title = ET.Element('title', {'type': 'html'})
a_title.text = xmlTitle
a_link = ET.Element('link', {'href': data['referenceURL']})
a_id = ET.Element('id')
a_id.text = data['guid']
a_uri = ET.Element('uri')
a_uri.text = data['uri']
a_author = ET.Element('author')
a_name = ET.Element('name')
a_name.text = data['fullname']
a_author.append(a_name)
a_author.append(a_uri)
a_published = ET.Element('published')
a_published.text = data['postDate']
a_updated = ET.Element('updated')
a_updated.text = data['postDate']
a_source = ET.Element('source')
a_source_title = ET.Element('title', {'type': 'html'})
a_source_id = ET.Element('id')
a_source_generator = ET.Element('generator')
a_source_title.text = xmlTitle
a_source_id.text = data['uri']
a_source_generator.text = 'Palala %s' % __version__
a_source.append(a_source_generator)
a_source.append(a_source_id)
a_source.append(a_source_title)
a_source.append(a_updated)
a_content = ET.Element('content')
a_content.text = data['content']
atom.append(a_id)
atom.append(a_title)
atom.append(a_updated)
atom.append(a_published)
atom.append(a_link)
atom.append(a_author)
atom.append(a_source)
atom.append(a_content)
log.debug('xml: %s' % palala.utils.tostring(atom, namespace_map={}, cdata=('encoded',)))
return atom
def processTwitterPost(xmpp, body):
jData = json.loads(body)
guid, atom = twitterToAtom(jData)
try:
xmpp.pubsub.setItem(xmpp.rootnode, 'test::atom', [(guid, atom),])
except:
palala.utils.dumpException('processXMPP pubsub send')
def twitterToAtom(jData):
s = jData['timestamp'][:19] # 2008-08-19T19:45:14Z or 2008-08-19T19:45:14.183Z
t = time.strptime(s, '%Y-%m-%dT%H:%M:%S')
createDate = datetime.datetime(t[0],t[1],t[2],t[3],t[4],t[5],t[6])
# s = jData['metadata']['post']['created_at'][:19] # Fri Nov 13 07:45:00 +0000 2009
# t = time.strptime(s, '%Y-%m-%dT%H:%M:%S')
# postDate = datetime.datetime(t[0],t[1],t[2],t[3],t[4],t[5],t[6])
postDate = createDate
postID = '%s' % jData['metadata']['post']['id']
userID = '%s' % jData['metadata']['post']['user']['id']
username = '%s' % jData['metadata']['post']['user']['screen_name']
fullname = username
uData = palala.cache('twitter:user:id:%s' % userID)
if uData is not None:
jUser = json.loads(uData)
fullname = jUser['name']
username = jUser['screen_name']
guid = '%s-%s-%s' % (jData['source']['type'], userID, postID)
postURL = 'http://twitter.com/%s/status/%s' % (username, postID)
atom = {}
atom['guid'] = guid
atom['referenceURL'] = postURL
atom['uri'] = postURL
atom['createDate'] = createDate.strftime('%Y-%m-%dT%H:%M:%S+00:00')
atom['postDate'] = postDate.strftime('%Y-%m-%dT%H:%M:%S+00:00')
atom['title'] = 'Post from %s (via )' % username
atom['fullname'] = fullname
atom['username'] = username
atom['content'] = jData['metadata']['post']['text']
return atom['guid'], toAtom(atom)
def updateTwitterUser(body):
try:
jData = json.loads(body)
user = jData['metadata']['post']['user']
palala.cache('twitter:user:id:%s' % user['id'], json.dumps(user))
except:
palala.utils.dumpException('updateTwitterUser()')
def eventHandler(event):
source, type, exchange, key, body = event
log.info('switch: %s %s %s %s' % (source, type, exchange, key))
try:
if source == 'rmq':
if type == 'post':
if key == 'twitter.post':
log.info('pushing %s item to inbound queue' % key)
inbound.put((key, body))
except:
palala.utils.dumpException('rmq eventHandler')
def processInbound(publish, qInbound, qPubsub):
log.info('processInbound start')
while True:
try:
item = qInbound.get(False)
if item is not None:
try:
key, body = item
#log.info('updating twitter user cache')
#try:
# updateTwitterUser(body)
#except:
# palala.utils.dumpException('updateTwitterUser()')
log.info('pushing to pubsub queue')
qPubsub.put((key, body))
except:
palala.utils.dumpException('processInbound loop')
except Empty:
time.sleep(1)
log.info('processInbound stop')
def processXMPP(publish, qPubsub, cfg):
log.info('processXMPP start')
try:
xmpp = palala.xmpp.xmppService(cfg['jid'], cfg['password'], palala.publish)
xmpp.connect()
xmpp.process(threaded=True)
while True:
try:
item = qPubsub.get(False)
if item is not None:
try:
key, body = item
processTwitterPost(xmpp, body)
except:
palala.utils.dumpException('twitterToAtom()')
except Empty:
time.sleep(1)
except:
palala.utils.dumpException('exception during XMPP startup')
log.info('processXMPP stop')
_configDefaults = { 'rmqConfig': 'switch.rmq',
}
_configItems = {
'xmppConfig': ('', '--xmppconfig', 'xmpp-switch.cfg', 'XMPP Config file'),
}
if __name__ == '__main__':
palala.init('bSwitch', configDefaults=_configDefaults, configItems=_configItems)
parseXMPPConfig(palala.options.xmppConfig)
log = palala.log
if palala.start(eventHandler):
for jid in xmppConfig:
Process(target=processXMPP, args=(palala.publish, pubsub, xmppConfig[jid],)).start()
Process(target=processInbound, args=(palala.publish, inbound, pubsub)).start()
|
<reponame>Cylon-bot/toolbox-for-trading-bot
from typing import List, Optional, Union, Dict
import pandas as pd
__author__ = "<NAME>"
__copyright__ = "Copyright 2021, Thibault Delrieu"
__license__ = "MIT"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
class Candle:
"""
class with all the important information about the candles you use for your strat
"""
def __init__(
self,
candle_info: Union[Dict, pd.DataFrame],
bollinger_band: bool = False,
rsi: bool = False,
minimum_rejection: Optional[float] = None,
ema_list: Optional[List[int]] = None,
body_min: Optional[float] = None,
id_candle: Optional[int] = None,
):
self.ID = id_candle
self.open = candle_info["open"]
self.close = candle_info["close"]
self.high = candle_info["high"]
self.low = candle_info["low"]
self.date = candle_info["time"]
self.body = self.close - self.open
self.EMAs = self.add_ema_info(candle_info, ema_list)
self.bullish = self.check_bullish_or_bearish()
if bollinger_band:
self.upper_bollinger = candle_info["upper_bollinger"]
self.middle_bollinger = candle_info["middle_bollinger"]
self.lower_bollinger = candle_info["lower_bollinger"]
if rsi:
self.RSI = candle_info["RSI"]
pips = 0.0001
if minimum_rejection is not None:
minimum_rejection_pips = minimum_rejection * pips
self.high_rejection, self.low_rejection = self.is_rejection_wicks(
minimum_rejection_pips
)
else:
self.high_rejection, self.low_rejection = "Unknown", "Unknown"
if body_min is not None and minimum_rejection is not None:
body_min_pips = body_min * pips
self.doji = self.check_doji(body_min_pips)
else:
self.doji = "Unknown"
self.engulfing = "Unknown"
def check_bullish_or_bearish(self) -> bool:
"""
check if the candle is bullish or bearish
"""
if self.close - self.open >= 0:
return True
else:
return False
@staticmethod
def add_ema_info(
candle_info: pd.DataFrame, ema_list: Optional[List[int]]
) -> Optional[dict[int, any]]:
"""
add the info of the EMA to the candle
"""
emas = dict()
if ema_list is None:
return None
for ema in ema_list:
emas[ema] = candle_info[f"EMA{str(ema)}"]
return emas
def check_engulfing(self, previous_candle: "Candle"):
"""
check if the candle is an engulfing candle
"""
if self.bullish and not previous_candle.bullish:
if self.close > previous_candle.high:
self.engulfing = True
else:
self.engulfing = False
elif not self.bullish and previous_candle.bullish:
if self.close < previous_candle.low:
self.engulfing = True
else:
self.engulfing = False
else:
self.engulfing = False
def check_doji(self, body_min) -> Optional[bool]:
"""
check if the candle is a doji, need a body min parameter
"""
if (
self.high_rejection == "Unknown"
or self.low_rejection == "Unknown"
or self.body == "Unknown"
):
return None
if (self.high_rejection or self.low_rejection) and abs(self.body) <= body_min:
return True
else:
return False
def is_rejection_wicks(self, minimum_rejection: float) -> (bool, bool):
"""
check if the wicks are rejection wicks, need minimum rejection parameter
"""
if self.bullish:
if (
self.high - self.close > minimum_rejection
and self.open - self.low > minimum_rejection
):
return True, True
elif self.high - self.close > minimum_rejection > self.open - self.low:
return True, False
elif self.high - self.close < minimum_rejection < self.open - self.low:
return False, True
else:
return False, False
if not self.bullish:
if (
self.high - self.open > minimum_rejection
and self.close - self.low > minimum_rejection
):
return True, True
elif self.high - self.open > minimum_rejection > self.close - self.low:
return True, False
elif self.high - self.open < minimum_rejection < self.close - self.low:
return False, True
else:
return False, False
def print_details(self, print_message: bool = False):
"""
print all the details of the candle for the user --> need refactoring (use __str__ instead)
"""
pips = 0.0001
pips_inverse = 1 / pips
message = (
f"Candle: {self.date}\n"
f"Open: {self.open}\n"
f"Close: {self.close}\n"
f"High: {self.high}\n"
f"Low: {self.low}\n"
f"body: {round(self.body * pips_inverse, 2)} Pips\n"
f"doji: {self.doji}\n"
f"EMA: {self.EMAs}\n"
f"Bullish: {self.bullish}\n"
f"Engulfing: {self.engulfing}\n"
f"High_rejection: {self.high_rejection}\n"
f"Low_rejection: {self.low_rejection}\n"
f'{"." * 50}'
)
if print_message:
print(message)
return message
def rebuild_candle(candles_workers: pd.DataFrame):
built_candle_dict = {"close": candles_workers["close"].iloc[-1]}
for number_candle, candle in enumerate(candles_workers.iloc()):
if number_candle == 0:
built_candle_dict["high"] = candle["high"]
built_candle_dict["low"] = candle["low"]
built_candle_dict["open"] = candle["open"]
built_candle_dict["time"] = candle["time"]
else:
if candle["high"] > built_candle_dict["high"]:
built_candle_dict["high"] = candle["high"]
if candle["low"] < built_candle_dict["low"]:
built_candle_dict["low"] = candle["low"]
built_candle = Candle(built_candle_dict)
return built_candle
|
<filename>hdtscraper/main.py
import requests
from bs4 import BeautifulSoup
import re
import time
import os
class hdtscraper:
page_url = 'https://hdtorrents.xyz/index.php?page={}'
session = None
session_start = None
login_url = 'https://hdtorrents.xyz/takelogin.php'
home_url = 'https://hdtorrents.xyz'
session_length = 10 * 60
def __init__(self, username, password):
self.username = username
self.password = password
def __login(self):
self.session = requests.Session()
self.session_start = time.time()
response = self.session.get(self.login_url)
assert response.status_code == 200, f'The login url "{self.login_url}" returned status code "{response.status_code}"'
credentials = dict(username=self.username, password=<PASSWORD>)
csrf_token = response.raw.headers._container['set-cookie'][1].split('=')[1].split(';')[0]
credentials['csrfmiddlewaretoken'] = csrf_token
response = self.session.post(self.login_url, data=credentials, headers={'Referer': self.home_url})
assert response.status_code == 200, f'The login url "{self.login_url}" returned status code "{response.status_code}"'
@staticmethod
def is_up(home_url='https://hdtorrents.xyz'):
try:
session = requests.session()
response = session.get(home_url)
assert response.status_code == 200
except:
return False
return True
def login(self):
if self.session_start is None or time.time() > (self.session_start + self.session_length):
self.__login()
return self.session
def get_last(self, movie_count, max_pages=50):
movies = []
for i in range(max_pages):
movies = movies + self.get_page(i)
if len(movies) >= movie_count:
break
return movies[:movie_count]
def get_new(self, hd_id, max_pages=50):
movies = []
hd_ids = []
for i in range(max_pages):
movies = movies + self.get_page(i)
hd_ids = [m['hd_id'] for m in movies]
if str(hd_id) in hd_ids:
break
if str(hd_id) in hd_ids:
return movies[:hd_ids.index(str(hd_id))]
else:
return movies
def get_page(self, page_index):
session = self.login()
response = session.get(self.page_url.format(page_index))
content = response.content
soup = BeautifulSoup(content, "html.parser")
raw_movies = soup.find_all('td',{'style':'text-align: left;'})
movies = []
for movie_content in raw_movies:
try:
movie = {}
movie['resolution'] = movie_content.find_all('img',{'align':'right', 'border':'0'})[0]['title']
movie['imdb_url'] = movie_content.find_all('a',{'target':'blank'})[0]['href']
movie['imdb_code'] = movie['imdb_url'].split('/')[-2]
movie['size'] = float(re.findall(r'(?P<size>\d+\.\d+) GB', str(movie_content))[0])
right_content = movie_content.find_all('div',{'align':'right'})[0]
movie['torrent_url'] = self.home_url + '/' + right_content.find_all('a')[0]['href']
movie['torrent_name'] = movie['torrent_url'].split('=')[-1]
movie['torrent_details_url'] = self.home_url + '/' + right_content.find_all('a')[1]['href']
movie['hd_id'] = movie['torrent_details_url'].split('=')[-1]
movies.append(movie)
except:
pass
return movies
def download_torrent(self, movie, dst_path):
session = self.login()
response = session.get(movie['torrent_url'])
with open(os.path.join(dst_path, movie['torrent_name']), 'wb') as torrent:
torrent.write(response.content)
if __name__ == '__main__':
import getpass
if hdtscraper.is_up():
username = input('Username: ')
password = <PASSWORD>('Password: ')
hd = hdtscraper(username, password)
movie = hd.get_last(1)[0]
hd.download_torrent(movie, '.')
else:
print('The website is down') |
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import pickle
import argparse
import random
import copy
from transformers import AutoTokenizer
import torch
import torch.nn as nn
from eval import get_token_segments, confusion_matrix_tokens, evaluate_model_answer_spans, get_jaccard_score
CRA_TOKENS = ['[BGN]', '[END]']
# convert_tokens_to_ids -- to get id of tokens!!
# use this to remove BGN och END from CRA encodings.
def get_CRA_map(CRA_data):
context_text_map = {}
for data in CRA_data:
id = data['context_id']
if id in context_text_map:
context_text_map[id].append(data)
else:
context_text_map[id] = [data]
return context_text_map
def get_tokens_for_segments(data, segments, tokenizer):
tokens = tokenizer.convert_ids_to_tokens(data["input_ids"])
all_l = []
for segment in segments:
# print('segment: ', segment)
l = ''
for i in range(segment[1]):
token = tokens[segment[0]+i]
# token can be NoneType?
if token.startswith('##'):
l += token[2:]
elif len(l) > 0: # not the first word in answer phrase
l += ' ' + token
else:
l += token
# print(l)
all_l.append(l)
return all_l
def get_answers(data, tokenizer, answer_type):
tokens = tokenizer.convert_ids_to_tokens(data["input_ids"])
word_ids = data.word_ids()
segments = get_token_segments(data[answer_type], word_ids)
all_l = []
for segment in segments:
# print('segment: ', segment)
l = ''
for i in range(segment[1]):
token = tokens[segment[0]+i]
# token can be NoneType?
if token.startswith('##'):
l += token[2:]
elif len(l) > 0: # not the first word in answer phrase
l += ' ' + token
else:
l += token
# print(l)
all_l.append(l)
return all_l
def remove_BGN_END_tokens(CRA_data, tokenizer):
ids_to_remove = tokenizer.convert_tokens_to_ids(CRA_TOKENS)
mod_input_ids = []
mod_labels = []
mod_preds = []
for idx, tok_id in enumerate(CRA_data['input_ids']):
if not tok_id in ids_to_remove:
mod_input_ids.append(tok_id)
mod_labels.append(CRA_data['true_labels'][idx])
mod_preds.append(CRA_data['predicted_labels'][idx])
CRA_data['input_ids'] = mod_input_ids
CRA_data['true_labels'] = mod_labels
CRA_data['predicted_labels'] = mod_preds
return CRA_data
def make_CRA_seg_str(segments):
keys = []
for s in segments:
key = str(s[0]) + ' ' + str(s[1])
if not key in keys:
keys.append(key)
return keys
def label_CA_data_mod(data, ok_answers):
"""
Function to label the CA data with only the rountrip consistent answers
"""
new_data = copy.deepcopy(data)
new_predictions = [0 for _ in range(len(data['predicted_labels']))]
for ans in ok_answers:
for i in range(ans[1]):
if i == 0:
new_predictions[ans[0]+i] = 1
else:
new_predictions[ans[0]+i] = 2
new_data['predicted_labels'] = new_predictions
return new_data
def compare_token_segments(CA_data, CRA_data, tokenizer, context_text_map):
num_removed_exact = 0
num_predicted_answers_exact = 0
num_removed = 0
num_predicted_answers = 0
total_num_answers = 0
CA_data_mod = []
y_labels = []
y_preds = []
answer_stats = {'FP': 0, 'TP': 0, 'FN': 0, 'jaccard': [], 'overlap': []}
for data in CA_data:
word_ids = data.word_ids()
segments = get_token_segments(data['predicted_labels'], word_ids)
context_id = data['context_id']
print('context text id: ', context_id)
CRA_data_context = context_text_map[context_id]
CRA_segments = []
# collect all CRA segments!
for CRA_data in CRA_data_context:
data_mod = remove_BGN_END_tokens(CRA_data, tokenizer)
CRA_word_ids = data_mod.word_ids()
CRA_segments += get_token_segments(data_mod['predicted_labels'], CRA_word_ids)
# label_answers = get_answers(data, tokenizer, 'true_labels')
# total_num_answers += len(label_answers)
# print('correct_answers: ', label_answers)
CRA_keys = make_CRA_seg_str(CRA_segments)
ok_answers = []
ok_answers_exact = []
for s in segments:
start = s[0]
end = s[0]+s[1]-1
key = str(s[0]) + ' ' + str(s[1])
added = False
for s_cra in CRA_segments:
start_cra = s_cra[0]
end_cra = s_cra[0]+s_cra[1]-1
if not added and start <= end_cra and end >= start_cra:
#there is overlap!
# jacc = get_jaccard_score(s, s_cra)
# if jacc > 0.5:
ok_answers.append(s)
added = True
num_predicted_answers += 1
if not added:
num_removed += 1
if key in CRA_keys:
ok_answers_exact.append(s)
num_predicted_answers_exact += 1
else:
num_removed_exact += 1
ans = get_tokens_for_segments(data, ok_answers, tokenizer)
print(ans)
# label the CA data with only the roundtrip consistent labels
data_mod = label_CA_data_mod(data, ok_answers)
CA_data_mod.append(data_mod)
y_labels += data_mod['true_labels']
y_preds += data_mod['predicted_labels']
item_stats = evaluate_model_answer_spans(data_mod['true_labels'], data_mod['predicted_labels'], word_ids)
answer_stats['FP'] += item_stats['FP']
answer_stats['TP'] += item_stats['TP']
answer_stats['FN'] += item_stats['FN']
answer_stats['jaccard'] += item_stats['jaccard']
answer_stats['overlap'] += item_stats['overlap']
print('number of predicted: ', num_predicted_answers)
print('number of removed: ', num_removed)
confusion_matrix_tokens(y_labels, y_preds, 'TEST')
pre = answer_stats['TP']/(answer_stats['TP']+answer_stats['FP'])
rec = answer_stats['TP']/(answer_stats['TP']+answer_stats['FN'])
f1 = 2 * (pre * rec)/(pre + rec)
print('Precision, answers: {:.2f}'.format(pre))
print('Recall, answers: {:.2f}'.format(rec))
print('Mean Jaccard score: {:.2f}'.format(np.mean(np.ravel(answer_stats['jaccard']))))
print('Mean answer length diff (predicted - true): {:.2f}'.format(np.mean(np.ravel(answer_stats['overlap']))))
return CA_data_mod
def compare_text_segments(CA_data, CRA_data, tokenizer, context_text_map):
num_removed = 0
total_num_predicted_answers = 0
total_num_answers = 0
for data in CA_data:
CA_answers = get_answers(data, tokenizer, 'predicted_labels')
context_id = data['context_id']
CRA_data_context = context_text_map[context_id]
CRA_answers = []
# collect all CRA answers!
for CRA_data in CRA_data_context:
CRA_answers += get_answers(CRA_data, tokenizer, 'predicted_labels')
label_answers = get_answers(data, tokenizer, 'true_labels')
total_num_answers += len(label_answers)
print('correct_answers: ', label_answers)
consistent_answers = []
for ans in CA_answers:
total_num_predicted_answers += 1
if ans in CRA_answers:
print('answer: ', ans)
consistent_answers.append(ans)
else:
num_removed += 1
print('number of predicted: ', total_num_predicted_answers)
print('number of removed: ', num_removed)
print('number of answers: ', total_num_answers)
def compare_CA_CRA_predictions(CA_data, CRA_data, tokenizer):
# get the predicted labels for each context text
context_text_map = get_CRA_map(CRA_data)
print('len CRA map: ', len(context_text_map.keys()))
CA_data_mod = compare_token_segments(CA_data, CRA_data, tokenizer, context_text_map)
# compare_text_segments(CA_data, CRA_data, tokenizer, context_text_map)
def main(args):
tokenizer = AutoTokenizer.from_pretrained('KB/bert-base-swedish-cased')
num_added_toks = tokenizer.add_tokens(CRA_TOKENS)
print('Added ', num_added_toks, 'tokens')
with open(args.CA_data_path, "rb") as input_file:
CA_data = pickle.load(input_file)
with open(args.CRA_data_path, "rb") as input_file:
CRA_data = pickle.load(input_file)
compare_CA_CRA_predictions(CA_data, CRA_data, tokenizer)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Prepare dataset with labels')
# command-line arguments
parser.add_argument('CA_data_path', type=str,
help='path CA output data', action='store')
parser.add_argument('CRA_data_path', type=str,
help='path to CRA data file', action='store')
args = parser.parse_args()
main(args) |
"""Perform integration tests for `orion.algo.ax`."""
import statistics as stats
from typing import ClassVar, List
import pytest
from orion.algo.axoptimizer import has_Ax
from orion.benchmark.task.base import BenchmarkTask
from orion.core.utils import backward
from orion.testing.algo import BaseAlgoTests, TestPhase, first_phase_only
if not has_Ax:
pytest.skip("skipping Ax tests", allow_module_level=True)
else:
import numpy
from botorch.test_functions.multi_objective import BraninCurrin
from torch import Tensor
N_INIT = 5
TOL = 0.1
class BraninCurrinTask(BenchmarkTask):
r"""Two objective problem composed of the Branin and Currin functions.
Branin (rescaled):
f(x) = (
15*x_1 - 5.1 * (15 * x_0 - 5) ** 2 / (4 * pi ** 2) + 5 * (15 * x_0 - 5)
/ pi - 5
) ** 2 + (10 - 10 / (8 * pi)) * cos(15 * x_0 - 5))
Currin:
f(x) = (1 - exp(-1 / (2 * x_1))) * (
2300 * x_0 ** 3 + 1900 * x_0 ** 2 + 2092 * x_0 + 60
) / 100 * x_0 ** 3 + 500 * x_0 ** 2 + 4 * x_0 + 20
"""
def __init__(self, max_trials=20):
self._branincurrin = BraninCurrin()
super().__init__(max_trials=max_trials)
# pylint: disable=arguments-differ
def call(self, x):
"""Evaluate 2-D branin and currin functions."""
_y = self._branincurrin(Tensor(x))
return [
dict(name="branin", type="objective", value=_y[0].item()),
dict(name="currin", type="statistic", value=_y[1].item()),
]
def get_search_space(self):
"""Return the search space for the task objective function"""
rspace = {"x": "uniform(0, 1, shape=2, precision=10)"}
return rspace
class TestAxOptimizer(BaseAlgoTests):
"""Test suite for algorithm AxOptimizer"""
algo_name = "axoptimizer"
max_trials = 20
config = {
"seed": 1234, # Because this is so random
"n_initial_trials": N_INIT,
"extra_objectives": set(),
"constraints": [],
}
phases: ClassVar[List[TestPhase]] = [
TestPhase("Sobol", 0, "space.sample"),
TestPhase("BO", N_INIT, "space.sample"),
]
@first_phase_only
def test_configuration_fail(self):
"""Test that Ax configuration is valid"""
with pytest.raises(AssertionError) as exc_info:
self.create_algo(
config={**self.config, "constraints": ["constraint = 3"]},
)
assert exc_info.value
with pytest.raises(AssertionError) as exc_info:
self.create_algo(
config={**self.config, "constraints": ["constraint < 10"]},
)
assert exc_info.value
@first_phase_only
def test_is_done_cardinality(self, *args, **kwargs):
# Set higher max_trials to explore all cardinality space
_max_trials = self.max_trials
self.max_trials = 200
super().test_is_done_cardinality(*args, **kwargs)
self.max_trials = _max_trials
@pytest.mark.parametrize("seed", [123])
def test_seed_rng(self, seed: int):
"""Test that the seeding gives reproducible results."""
algo = self.create_algo(seed=seed)
trial_a = algo.suggest(1)[0]
trial_b = algo.suggest(1)[0]
assert trial_a != trial_b
new_algo = self.create_algo(seed=seed)
assert new_algo.n_observed == algo.n_observed
trial_c = new_algo.suggest(1)[0]
assert trial_c == trial_a
numpy.testing.assert_allclose(
numpy.array(list(trial_a.params.values())).astype(float),
numpy.array(list(trial_c.params.values())).astype(float),
atol=TOL,
rtol=TOL,
)
@first_phase_only
def test_seed_rng_init(self):
"""Test that the seeding gives reproducibile results."""
config = self.config.copy()
config["seed"] = 1
algo = self.create_algo(config=config)
state = algo.state_dict
first_trial = algo.suggest(1)[0]
second_trial = algo.suggest(1)[0]
assert first_trial != second_trial
config = self.config.copy()
config["seed"] = 2
new_algo = self.create_algo(config=config)
new_algo_state = new_algo.state_dict
different_seed_trial = new_algo.suggest(1)[0]
assert different_seed_trial != first_trial
config = self.config.copy()
config["seed"] = 1
new_algo = self.create_algo(config=config)
same_seed_trial = new_algo.suggest(1)[0]
assert same_seed_trial == first_trial
numpy.testing.assert_allclose(
numpy.array(list(first_trial.params.values())).astype(float),
numpy.array(list(same_seed_trial.params.values())).astype(float),
atol=TOL,
rtol=TOL,
)
@pytest.mark.parametrize("seed", [123, 456])
def test_state_dict(self, seed: int, phase: TestPhase):
"""Verify that resetting state makes sampling deterministic"""
algo = self.create_algo(seed=seed)
state = algo.state_dict
a = algo.suggest(1)[0]
# Create a new algo, without setting a seed.
# The other algorithm is initialized at the start of the next phase.
n_initial_trials = phase.end_n_trials
# Use max_trials-1 so the algo can always sample at least one trial.
if n_initial_trials == self.max_trials:
n_initial_trials -= 1
# NOTE: Seed is part of configuration, not state. Configuration is assumed to be the same
# for both algorithm instances.
new_algo = self.create_algo(n_observed_trials=n_initial_trials, seed=seed)
new_state = new_algo.state_dict
b = new_algo.suggest(1)[0]
assert a != b
new_algo.set_state(state)
c = new_algo.suggest(1)[0]
numpy.testing.assert_allclose(
numpy.array(list(a.params.values())).astype(float),
numpy.array(list(c.params.values())).astype(float),
atol=TOL,
rtol=TOL,
)
@first_phase_only
def test_optimize_multi_objectives(self):
"""Test that algorithm optimizes somehow (this is on-par with random search)"""
_max_trials = 20
task = BraninCurrinTask()
space = self.create_space(task.get_search_space())
algo = self.create_algo(
config={**self.config, "extra_objectives": ["statistic"]}, space=space
)
algo.algorithm.max_trials = _max_trials
safe_guard = 0
trials = []
objectives = []
while trials or not algo.is_done:
if safe_guard >= _max_trials:
break
if not trials:
trials = algo.suggest(_max_trials - len(objectives))
trial = trials.pop(0)
results = task(trial.params["x"])
objectives.append((results[0]["value"], results[1]["value"]))
backward.algo_observe(
algo,
[trial],
[dict(objective=objectives[-1][0], statistic=objectives[-1][1])],
)
safe_guard += 1
rand_objectives = []
for trial in algo.space.sample(len(objectives)):
results = task(trial.params["x"])
rand_objectives.append((results[0]["value"], results[1]["value"]))
objectives_branin, objectives_currin = list(zip(*objectives))
_, rand_objectives_currin = list(zip(*rand_objectives))
assert algo.is_done
# branin
assert min(objectives_branin) <= 10
# currin
assert min(objectives_currin) <= min(rand_objectives_currin)
@first_phase_only
def test_objectives_constraints(self):
"""Test that algorithm optimizes somehow (this is on-par with random search)"""
_max_trials = 20
task = BraninCurrinTask()
space = self.create_space(task.get_search_space())
algo = self.create_algo(
config={
**self.config,
"constraints": ["constraint >= 3", "constraint <= 10"],
},
space=space,
)
algo.algorithm.max_trials = _max_trials
safe_guard = 0
trials = []
objectives = []
while trials or not algo.is_done:
if safe_guard >= _max_trials:
break
if not trials:
trials = algo.suggest(_max_trials - len(objectives))
trial = trials.pop(0)
results = task(trial.params["x"])
objectives.append((results[0]["value"], results[1]["value"]))
backward.algo_observe(
algo,
[trial],
[dict(objective=objectives[-1][0], constraint=objectives[-1][1])],
)
safe_guard += 1
objectives_branin, objectives_currin = list(zip(*objectives))
assert algo.is_done
# branin
assert (
min(objectives_branin)
<= stats.mean(objectives_branin) - stats.stdev(objectives_branin) * 0.7
)
# currin
assert 3 <= stats.mean(objectives_currin[-5:]) <= 10
|
<reponame>pochiel/mitsune_3_bot
import sqlite3
from contextlib import closing
import pickle
import bz2
from athreat import athreat
import datetime
class data_manager(object):
def set_team(self, t_name, t_symbol, owner_id):
team = (t_symbol, t_name, owner_id)
try:
with closing(sqlite3.connect(self.dbname)) as conn:
db_cursor = conn.cursor()
db_cursor.execute('INSERT INTO teams (symbol, name, owner_id) VALUES (?,?,?)', team)
conn.commit()
return True
except sqlite3.Error as e:
return False
def get_team(self, t_symbol):
return self.team_db[t_symbol]
def get_teamDB(self):
ret = ()
try:
with closing(sqlite3.connect(self.dbname)) as conn:
db_cursor = conn.cursor()
db_cursor.execute('SELECT symbol, name FROM teams')
records = db_cursor.fetchall()
return records
except sqlite3.Error as e:
return ret
def set_athreat(self, ath):
self.dbname = 'database.db'
try:
# executeメソッドでSQL文を実行する
with closing(sqlite3.connect(self.dbname)) as conn:
db_cursor = conn.cursor()
insert_sql = "insert into athreat (symbol, number, obj) values (?,?,?)"
insert_objs = (ath.t_symbol, ath.number, sqlite3.Binary(pickle.dumps(ath, pickle.HIGHEST_PROTOCOL)))
db_cursor.executemany(insert_sql, (insert_objs,))
conn.commit()
return True
except sqlite3.Error as e:
return False
# 試合レコードを追加
def add_match(self, home_t_symbol, visit_t_symbol):
self.dbname = 'database.db'
try:
# executeメソッドでSQL文を実行する
with closing(sqlite3.connect(self.dbname)) as conn:
db_cursor = conn.cursor()
insert_sql = "insert into match ( home_team_symbol, visit_team_symbol, home_team_point, visit_team_point, completed, planned ) values (?,?,0,0,0,0)"
insert_objs = (home_t_symbol, visit_t_symbol)
db_cursor.executemany(insert_sql, (insert_objs,))
conn.commit()
return True
except sqlite3.Error as e:
return False
# 未開催 試合レコードを取得
def get_match_channel(self, match_id):
ret = ""
self.dbname = 'database.db'
try:
with closing(sqlite3.connect(self.dbname)) as conn:
db_cursor = conn.cursor()
db_cursor.execute('SELECT channel_id FROM stadium WHERE owner_id=(SELECT owner_id FROM teams WHERE symbol=(SELECT home_team_symbol FROM match WHERE match_num=?));', (match_id,))
record = db_cursor.fetchone()
return record[0]
except sqlite3.Error as e:
return ret
# 試合結果を登録
def set_match_result(self, match_id, home_score, visit_score):
ret = ""
self.dbname = 'database.db'
try:
with closing(sqlite3.connect(self.dbname)) as conn:
db_cursor = conn.cursor()
insert_sql = 'UPDATE match SET home_team_point=?,visit_team_point=?,completed=1 where match_num=?;'
insert_objs = (home_score, visit_score, match_id)
db_cursor.executemany(insert_sql, (insert_objs,))
except sqlite3.Error as e:
return ret
# 未開催 試合レコードを取得
def get_umcompleted_match(self):
ret = ()
self.dbname = 'database.db'
try:
# executeメソッドでSQL文を実行する
with closing(sqlite3.connect(self.dbname)) as conn:
db_cursor = conn.cursor()
db_cursor.execute('SELECT match_num, home_team_symbol, visit_team_symbol, home_team_point, visit_team_point FROM match WHERE completed=0 and planned=0')
record = db_cursor.fetchone()
return record
except sqlite3.Error as e:
return ret
# 計画済み試合を取得
def get_match_planned(self):
ret = ()
self.dbname = 'database.db'
try:
# executeメソッドでSQL文を実行する
with closing(sqlite3.connect(self.dbname)) as conn:
db_cursor = conn.cursor()
db_cursor.execute('SELECT match_num, home_team_symbol, visit_team_symbol, home_team_point, visit_team_point FROM match WHERE completed=0 and planned=1')
record = db_cursor.fetchall()
return record
except sqlite3.Error as e:
return ret
# 試合レコードを修正 計画済みにすう
def set_match_to_planned(self, match_id):
ret = ()
self.dbname = 'database.db'
try:
# executeメソッドでSQL文を実行する
with closing(sqlite3.connect(self.dbname)) as conn:
db_cursor = conn.cursor()
update_sql = "UPDATE match set planned=1 where match_num = ?"
update_objs = (match_id, )
db_cursor.executemany(update_sql, (update_objs,))
conn.commit()
return True
except sqlite3.Error as e:
return ret
# スタジアム追加
def add_stadium(self, home_team_symbol, name, channel_id, owner_id):
self.dbname = 'database.db'
try:
# executeメソッドでSQL文を実行する
with closing(sqlite3.connect(self.dbname)) as conn:
# すでに球場が登録済みだったらエラー
db_cursor = conn.cursor()
db_cursor.execute('SELECT count(*) FROM stadium where owner_id=?', (owner_id,))
records = db_cursor.fetchone()
if records[0] != 0:
return False
# すでにチャンネルに球場がひもづいていたらエラー
db_cursor.execute('SELECT count(*) FROM stadium where owner_id=?', (channel_id,))
records = db_cursor.fetchone()
if records[0] != 0:
return False
# 球場登録
insert_sql = "insert into stadium ( owner_id, home_team_symbol, name, channel_id ) values (?,?,?,?)"
insert_objs = (owner_id, home_team_symbol, name, channel_id)
db_cursor.executemany(insert_sql, (insert_objs,))
conn.commit()
return True
except sqlite3.Error as e:
return False
# 前回のスタメンをロードする
def load_lasttime_starting_member(self, team_symbol):
ret = ""
self.dbname = 'database.db'
try:
with closing(sqlite3.connect(self.dbname)) as conn:
db_cursor = conn.cursor()
db_cursor.execute('SELECT symbol, batting_num, number, position FROM last_starting_member WHERE symbol = ?;', (team_symbol,))
records = db_cursor.fetchall()
return records
except sqlite3.Error as e:
return ret
# スタメンを覚えておく
def record_starting_member(self, team):
batting_num = 0
self.dbname = 'database.db'
for member in team.batting_order:
batting_num = batting_num + 1
try:
# executeメソッドでSQL文を実行する
with closing(sqlite3.connect(self.dbname)) as conn:
db_cursor = conn.cursor()
insert_sql = "INSERT OR REPLACE INTO last_starting_member (symbol, batting_num, number, position) values (?,?,?,?)"
insert_objs = (team.symbol, batting_num, member[1].number, member[0])
db_cursor.executemany(insert_sql, (insert_objs,))
conn.commit()
except sqlite3.Error as e:
return ret
return True
# 打撃成績を記録する
def record_batting_result(self, match_num, inning, top_or_bottom, batting_num,
batter_team_symbol, batter_num, pitcher_team_symbol, pitcher_num,
result):
match_date = datetime.datetime.now()
self.dbname = 'database.db'
try:
# executeメソッドでSQL文を実行する
with closing(sqlite3.connect(self.dbname)) as conn:
db_cursor = conn.cursor()
insert_sql = "INSERT OR REPLACE INTO batting_result (match_date, match_num, inning, top_or_bottom, batting_num, batter_team_symbol, batter_num, pitcher_team_symbol, pitcher_num, result) values (?,?,?,?,?,?,?,?,?,?)"
insert_objs = (match_date, match_num, inning, top_or_bottom, batting_num, batter_team_symbol, batter_num, pitcher_team_symbol, pitcher_num, result)
db_cursor.executemany(insert_sql, (insert_objs,))
conn.commit()
except sqlite3.Error as e:
return ret
return True
# コンストラクタ
def __init__(self, client):
# チームデータベース
self.team_db = {}
# データベース作成
self.createDatabase()
def createDatabase(self):
self.dbname = 'database.db'
try:
# executeメソッドでSQL文を実行する
with closing(sqlite3.connect(self.dbname)) as conn:
db_cursor = conn.cursor()
# スタジアムテーブル
create_table = '''create table IF NOT EXISTS stadium ( owner_id INTEGER PRIMARY KEY, home_team_symbol text, name text, channel_id INTEGER )'''
db_cursor.execute(create_table)
# 試合テーブル
create_table = '''create table IF NOT EXISTS match ( match_num INTEGER PRIMARY KEY AUTOINCREMENT, home_team_symbol text, visit_team_symbol text, home_team_point int, visit_team_point int, completed int, planned int )'''
db_cursor.execute(create_table)
# チームテーブル
create_table = '''create table IF NOT EXISTS teams ( symbol text unique, name varchar(64), owner_id INTEGER )'''
db_cursor.execute(create_table)
# 選手テーブル
create_table = '''create table IF NOT EXISTS athreat (
symbol text,
number int,
obj blob,
unique (symbol, number) )'''
db_cursor.execute(create_table)
# 前回スタメンテーブル
create_table = '''create table IF NOT EXISTS last_starting_member (
symbol text,
batting_num int,
number int,
position int,
unique (symbol, batting_num) )'''
db_cursor.execute(create_table)
# 打撃成績テーブル
create_table = '''create table IF NOT EXISTS batting_result (
match_date datetime,
match_num INTEGER,
inning INTEGER,
top_or_bottom INTEGER,
batting_num INTEGER,
batter_team_symbol text,
batter_num INTEGER,
pitcher_team_symbol text,
pitcher_num INTEGER,
result text,
unique (match_date, match_num, inning, top_or_bottom, batting_num ) )'''
db_cursor.execute(create_table)
except sqlite3.Error as e:
pass # do nothing
# チームの一軍選手一覧を取得する
def get_team_1st_member(self, t_symbol):
ret = []
self.dbname = 'database.db'
try:
with closing(sqlite3.connect(self.dbname)) as conn:
db_cursor = conn.cursor()
db_cursor.execute('SELECT number, obj FROM athreat where symbol=?', (t_symbol,))
records = db_cursor.fetchall()
for rec in records:
ret.append([rec[0], pickle.loads(rec[1])])
except sqlite3.Error as e:
ret = ('', None)
pass
return ret
# チームシンボルからチーム名を引く
def get_team_name(self, t_symbol):
ret = ''
self.dbname = 'database.db'
try:
with closing(sqlite3.connect(self.dbname)) as conn:
db_cursor = conn.cursor()
db_cursor.execute('SELECT name FROM teams where symbol=?', (t_symbol,))
records = db_cursor.fetchone()
if records != None:
ret = records[0]
except sqlite3.Error as e:
pass
return ret
# オーナーidからチームシンボルを引く
def get_team_symbol(self, owner_id):
ret = ''
self.dbname = 'database.db'
try:
with closing(sqlite3.connect(self.dbname)) as conn:
db_cursor = conn.cursor()
db_cursor.execute('SELECT symbol FROM teams where owner_id=?', (owner_id,))
records = db_cursor.fetchone()
if records != None:
ret = records[0]
except sqlite3.Error as e:
pass
return ret
# チームシンボルからオーナーidを引く
def get_owner_id(self, team_symbol):
ret = ''
self.dbname = 'database.db'
try:
with closing(sqlite3.connect(self.dbname)) as conn:
db_cursor = conn.cursor()
db_cursor.execute('SELECT owner_id FROM teams where symbol=?', (team_symbol,))
records = db_cursor.fetchone()
if records != None:
ret = records[0]
except sqlite3.Error as e:
pass
return ret
# チームシンボル・背番号から選手オブジェクトを引く
def get_athreat(self, team_symbol, uni_num):
ret = None
self.dbname = 'database.db'
try:
with closing(sqlite3.connect(self.dbname)) as conn:
db_cursor = conn.cursor()
db_cursor.execute('SELECT obj FROM athreat WHERE symbol=? and number=?;', (team_symbol,uni_num,))
records = db_cursor.fetchone()
if records != None:
ret = pickle.loads(records[0])
except sqlite3.Error as e:
pass
return ret
# 全体の残試合数を取得
def get_remain_games(self):
ret = 0
self.dbname = 'database.db'
try:
with closing(sqlite3.connect(self.dbname)) as conn:
db_cursor = conn.cursor()
db_cursor.execute('SELECT count(*) FROM match where completed=0')
records = db_cursor.fetchone()
ret = records[0]
except sqlite3.Error as e:
pass
return ret
|
<gh_stars>0
class Node:
def __init__(self, value):
self.value = value
self.next = None
def __repr__(self):
return str(self.value)
class LinkedList:
def __init__(self):
self.head = None
def __str__(self):
cur_head = self.head
out_string = ""
while cur_head:
out_string += str(cur_head.value) + " -> "
cur_head = cur_head.next
return out_string
def append(self, value):
if self.head is None:
self.head = Node(value)
return
node = self.head
while node.next:
node = node.next
node.next = Node(value)
def size(self):
size = 0
node = self.head
while node:
size += 1
node = node.next
return size
def create_set(linklist):
set1 = set()
node = linklist.head
while node:
set1.add(node.value)
node = node.next
return set1
def union(llist_1, llist_2):
# Your Solution Here
un_set = create_set(llist_1).union(create_set(llist_2))
union_list = LinkedList()
for el in un_set:
union_list.append(el)
return union_list
def intersection(llist_1, llist_2):
# Your Solution Here
set1 = create_set(llist_1)
set2 = create_set(llist_2)
inter = set1.intersection(set2)
inter_list = LinkedList()
for el in inter:
inter_list.append(el)
return inter_list
# Test case 1
linked_list_1 = LinkedList()
linked_list_2 = LinkedList()
element_1 = [3, 2, 4, 35, 6, 65, 6, 4, 3, 21]
element_2 = [6, 32, 4, 9, 6, 1, 11, 21, 1]
for i in element_1:
linked_list_1.append(i)
for i in element_2:
linked_list_2.append(i)
print("Case 1")
print("Union")
print(union(linked_list_1, linked_list_2)) # 32 -> 65 -> 2 -> 35 -> 3 -> 4 -> 6 -> 1 -> 9 -> 11 -> 21 ->
print("Intersection")
print(intersection(linked_list_1, linked_list_2)) # 4 -> 21 -> 6 ->
# Test case 2
linked_list_3 = LinkedList()
linked_list_4 = LinkedList()
element_1 = [3, 2, 4, 35, 6, 65, 6, 4, 3, 23]
element_2 = [1, 7, 8, 9, 11, 21, 1]
for i in element_1:
linked_list_3.append(i)
for i in element_2:
linked_list_4.append(i)
print("Case 2")
print("Union")
print(union(linked_list_3, linked_list_4)) # 65 -> 2 -> 35 -> 3 -> 4 -> 6 -> 1 -> 7 -> 8 -> 9 -> 11 -> 21 -> 23 ->
print("Intersection")
print(intersection(linked_list_3, linked_list_4)) # empty
# Test case 3
linked_list_5 = LinkedList()
linked_list_6 = LinkedList()
element_3 = [3, 2, 4, 35, 12, 65, 6, 4, 23]
element_4 = []
for i in element_3:
linked_list_5.append(i)
for i in element_4:
linked_list_6.append(i)
print("Case 2")
print("Union")
print(union(linked_list_5, linked_list_6)) # 65 -> 2 -> 35 -> 3 -> 4 -> 6 -> 12 -> 23 ->
print("Intersection")
print(intersection(linked_list_5, linked_list_6)) # empty
# Test case 4
linked_list_7 = LinkedList()
linked_list_8 = LinkedList()
element_5 = []
element_6 = []
for i in element_5:
linked_list_7.append(i)
for i in element_6:
linked_list_8.append(i)
print("Case 3")
print("Union")
print(union(linked_list_7, linked_list_8)) # empty
print("Intersection")
print(intersection(linked_list_7, linked_list_8)) # empty |
import sys
import os.path
import torch
import visdom
import argparse
import random
import time
import math
from PIL import Image
import numpy as np
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
import torchvision.models as models
from torch.autograd import Variable
from torch.utils import data
from torch.optim.lr_scheduler import MultiStepLR
from ptsemseg.models import get_model
from ptsemseg.loader import get_loader, get_data_path
from ptsemseg.loss import cross_entropy2d
from ptsemseg.metrics import AverageMeter
from ptsemseg.metrics import MultiAverageMeter
from ptsemseg.metrics import Metrics
from ptsemseg.utils import save_checkpoint
from ptsemseg.loader import transforms
from validate import validate
best_metric_value = 0
# Setup visdom for visualization
vis = visdom.Visdom()
def main(args):
global best_metric_value
train_transforms = transforms.Compose([transforms.Resize(512, minside=False),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(10, resample=Image.BILINEAR),
transforms.PadToSize(480),
transforms.RandomResizedCrop(480, scale=(0.5, 2), ratio=(1, 1)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])])
val_transforms = transforms.Compose([transforms.Resize(512, minside=False),
transforms.PadToSize(480),
transforms.CenterCrop(480),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])])
# Setup Dataset and Dataloader
data_loader = get_loader(args.dataset)
data_path = get_data_path(args.dataset)
# Train
train_dataset = data_loader(data_path,transform=train_transforms)
args.n_classes = train_dataset.n_classes
trainloader = data.DataLoader(train_dataset,
batch_size=args.batch_size,
num_workers=args.num_workers,
shuffle=True,
pin_memory=True)
# Validation
val_dataset = data_loader(data_path,
split='val',
transform=val_transforms)
valloader = data.DataLoader(val_dataset,
batch_size=args.batch_size,
num_workers=args.num_workers,
shuffle=False,
pin_memory=True)
# Setup Model
model = get_model(args)
optimizer = torch.optim.SGD(model.parameters(),
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_metric_value = checkpoint['best_metric_value']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
if torch.cuda.is_available():
model = torch.nn.DataParallel(model, device_ids=range(torch.cuda.device_count())).cuda()
cudnn.benchmark = True
if args.lr_policy == "MultiStepLR":
scheduler = MultiStepLR(optimizer, milestones=[int(x) for x in args.milestones.split(',')])
loss_viswindow = vis.line(X=torch.zeros((1, )).cpu(),
Y=torch.zeros((1, 2)).cpu(),
opts=dict(xlabel='Epochs',
ylabel='Loss',
title='Loss trough Epochs',
legend=['Train','Val']))
# Open log file
if not os.path.exists(args.save_path):
os.makedirs(args.save_path)
log_file = open(os.path.join(args.save_path, 'logs.txt'), 'w')
log_header = 'epoch'
log_header += ',train_loss'
for m in args.metrics:
log_header += ',train_' + m
log_header += ',val_loss'
for m in args.metrics:
log_header += ',val_' + m
log_file.write(log_header + '\n')
# Main training loop
for epoch in range(args.start_epoch, args.n_epoch):
trainmetrics = train(trainloader, model, cross_entropy2d, optimizer, epoch, args)
args.split='val'
valmetrics = validate(valloader, model, cross_entropy2d, epoch, args)
if args.lr_policy == "MultiStepLR":
scheduler.step()
# Write log file
log_line = '{}'.format(epoch)
log_line += ',{:.3f}'.format(trainmetrics['loss'].avg)
for m in trainmetrics['metrics'].meters:
log_line += ',{:.3f}'.format(m.avg)
log_line += ',{:.3f}'.format(valmetrics['loss'].avg)
for m in valmetrics['metrics'].meters:
log_line += ',{:.3f}'.format(m.avg)
log_file.write(log_line + '\n')
# Track loss trough epochs
vis.line(
X=torch.ones((1,2)).cpu()*epoch,
Y=torch.Tensor([trainmetrics['loss'].avg, valmetrics['loss'].avg]).unsqueeze(0).cpu(),
win=loss_viswindow,
update='append')
# Take best and save model
curr_metric_value = valmetrics['metrics'].meters[0].avg
is_best = curr_metric_value > best_metric_value
best_metric_value = max(curr_metric_value, best_metric_value)
if epoch % args.save_every == 0 and epoch != 0:
save_checkpoint({
'epoch': epoch + 1,
'args': args,
'state_dict': model.module.state_dict(),
'best_metric_value': best_metric_value,
'optimizer': optimizer.state_dict(),
}, os.path.join(args.save_path,
"{}_{}_{}.pth".format(args.arch,
args.dataset,
epoch)))
if is_best:
save_checkpoint({
'epoch': epoch + 1,
'args': args,
'state_dict': model.module.state_dict(),
'best_metric_value': best_metric_value,
'optimizer': optimizer.state_dict(),
}, os.path.join(args.save_path, 'model_best.pth.tar'))
log_file.close()
def train(trainloader, model, criterion, optimizer, epoch, args):
batch_time = AverageMeter()
data_time = AverageMeter()
eval_time = AverageMeter()
losses = AverageMeter()
multimeter = MultiAverageMeter(len(args.metrics))
metrics = Metrics(n_classes=args.n_classes,
exclude_background=args.exclude_background)
# Initialize current epoch log
if epoch==0:
epoch_loss_window = vis.line(X=torch.zeros(1),
Y=torch.zeros(1),
opts=dict(xlabel='minibatches',
ylabel='Loss',
title='Epoch {} Training Loss'.format(epoch),
legend=['Loss']))
model.train()
end = time.perf_counter()
for i, (images, labels) in enumerate(trainloader):
if args.max_iters_per_epoch != 0:
if i > args.max_iters_per_epoch:
break
# measure data loading time
data_time.update(time.perf_counter() - end)
if torch.cuda.is_available():
images = Variable(images.cuda(0, async=True))
labels = Variable(labels.cuda(0, async=True))
else:
images = Variable(images)
labels = Variable(labels)
batch_size = images.size(0)
# Forward pass
outputs = model(images)
# Compute metrics
start_eval_time = time.perf_counter()
# sample to lighten evaluation
sample_idx = random.randint(0,batch_size-1)
pred = outputs.data[sample_idx,:,:,:].max(0)[1].cpu().numpy()
gt = labels.data[sample_idx,:,:].cpu().numpy()
values = metrics.compute(args.metrics, gt, pred)
multimeter.update(values, batch_size)
eval_time.update(time.perf_counter() - start_eval_time)
loss = criterion(outputs, labels)
losses.update(loss.data[0], batch_size)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if epoch==0:
vis.line(
X=torch.ones(1) * i,
Y=torch.Tensor([loss.data[0]]),
win=epoch_loss_window,
update='append')
batch_log_str = ('Epoch: [{}/{}][{}/{}] '
'Time {batch_time.val:.3f} ({batch_time.avg:.3f}) '
'Data {data_time.val:.3f} ({data_time.avg:.3f}) '
'Eval {eval_time.val:.3f} ({eval_time.avg:.3f})\t'
'Loss: {loss.val:.3f} ({loss.avg:.3f})'.format(
epoch+1, args.n_epoch, i,
math.floor(trainloader.dataset.__len__()/trainloader.batch_size),
batch_time=batch_time, data_time=data_time,
eval_time=eval_time, loss=losses))
for mi,mn in enumerate(args.metrics):
batch_log_str += ' {}: {:.3f} ({:.3f})'.format(mn ,
multimeter.meters[mi].val,
multimeter.meters[mi].avg)
print(batch_log_str)
#measure elapsed time
batch_time.update(time.perf_counter() - end)
end = time.perf_counter()
return dict(loss = losses, metrics = multimeter)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Hyperparams')
# Architecture -------------------------------------------------------------
parser.add_argument('--arch', nargs='?', type=str, default='fcn8s',
help='Architecture to use [\'fcn8s, unet, segnet etc\']')
parser.add_argument('--backend', nargs='?', type=str, default='resnet18',
help='Backend to use (available only for pspnet)'
'available: squeezenet, densenet, resnet18,34,50,101,152')
parser.add_argument('--auxiliary_loss', action='store_true',
help='Activate auxiliary loss for deeply supervised models')
# Data ---------------------------------------------------------------------
parser.add_argument('--dataset', nargs='?', type=str, default='pascal',
help='Dataset to use [\'pascal, camvid, ade20k etc\']')
parser.add_argument('--img_rows', nargs='?', type=int, default=256,
help='Height of the input image')
parser.add_argument('--img_cols', nargs='?', type=int, default=256,
help='Height of the input image')
# Learning hyperparams -----------------------------------------------------
parser.add_argument('--n_epoch', nargs='?', type=int, default=100,
help='# of the epochs')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--batch_size', nargs='?', type=int, default=1,
help='Batch Size')
parser.add_argument('--lr', nargs='?', type=float, default=1e-5,
help='Learning Rate')
parser.add_argument('--lr_policy', nargs='?', type=str, default='MultiStepLR',
help='Adopted learning rate policy: MultiStepLR or PolyLR')
parser.add_argument('--weight_decay', nargs='?', type=float, default=5e-4,
help='Weight Decay')
parser.add_argument('--milestones', nargs='?', type=str, default='10,20,30',
help='Milestones for LR decreasing when using MultiStepLR')
parser.add_argument('--momentum', nargs='?', type=float, default=0.9,
help='Momentum')
parser.add_argument('--feature_scale', nargs='?', type=int, default=1,
help='Divider for # of features to use')
# Others -------------------------------------------------------------------
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--save_path', nargs='?', type=str, default='.',
help='Location where checkpoints are saved')
parser.add_argument('--save_every', nargs='?', type=int, default=10,
help='Save model every x epochs.')
parser.add_argument('--metrics', nargs='?', type=str, default='pixel_acc,iou_class',
help='Metrics to compute and show, the first in the list '
'is also used to evaluate the best model to save')
parser.add_argument('--num_workers', nargs='?', type=int, default=4,
help='Number of processes to load and preprocess images')
parser.add_argument('--max_iters_per_epoch', nargs='?', type=int, default=0,
help='Max number of iterations per epoch.'
' Useful for debug purposes')
parser.add_argument('--exclude_background', action='store_true',
help='Exclude background class when evaluating')
parser.add_argument('--segmentation_maps_path', nargs='?', type=str,
default='', help='Directory to save segmentation maps'
'when validating. Leave it blank to disable saving')
parser.add_argument('--alpha_blend', action='store_true',
help='Blend input image with predicted mask when saving'
' (only in validation)')
args = parser.parse_args()
#Params preprocessing
args.metrics = args.metrics.split(',')
# For now settings for each backend are hardcoded
args.pspnet_sizes = (1,2,3,6)
if args.backend == 'squeezenet':
args.psp_size = 512
args.deep_features_size = 256
elif args.backend == 'densenet':
args.psp_size = 1024
args.deep_features_size = 512
elif args.backend == 'resnet18' or args.backend == 'resnet34':
args.psp_size = 512
args.deep_features_size = 256
elif args.backend == 'resnet50' or args.backend == 'resnet101' or args.backend == 'resnet152':
args.psp_size = 2048
args.deep_features_size = 1024
# Call main function
main(args)
|
<gh_stars>0
import sys
from awsglue.transforms import *
from awsglue.utils import getResolvedOptions
from pyspark.context import SparkContext
from awsglue.context import GlueContext
from awsglue.job import Job
from awsglue.dynamicframe import DynamicFrame
import pyspark.sql.functions as F
## @params: [JOB_NAME]
args = getResolvedOptions(sys.argv, ['JOB_NAME'])
sc = SparkContext()
glueContext = GlueContext(sc)
spark = glueContext.spark_session
job = Job(glueContext)
job.init(args['JOB_NAME'], args)
## @type: DataSource
## @args: [database = "raw", table_name = "lda_s3_raw", transformation_ctx = "datasource0"]
## @return: datasource0
## @inputs: []
datasource0 = glueContext.create_dynamic_frame.from_catalog(database = "raw", table_name = "lda_s3_raw", transformation_ctx = "datasource0")
## @type: ApplyMapping
## @args: [mapping = [("sensor_type", "string", "sensor_type", "string"), ("subject_id", "string", "subject_id", "string"), ("heart_beat_count", "double", "heart_beat_count", "double"), ("timestamp", "string", "timestamp", "string"), ("ro_id", "string", "ro_id", "string"), ("x_gyro", "double", "x_gyro", "double"), ("z_gyro", "double", "z_gyro", "double"), ("y_gyro", "double", "y_gyro", "double"), ("x_acceleration", "double", "x_acceleration", "double"), ("y_acceleration", "double", "y_acceleration", "double"), ("z_acceleration", "double", "z_acceleration", "double"), ("partition_0", "string", "year", "string"), ("partition_1", "string", "month", "string"), ("partition_2", "string", "day", "string"), ("partition_3", "string", "hour", "string")], transformation_ctx = "applymapping1"]
## @return: applymapping1
## @inputs: [frame = datasource0]
applymapping1 = ApplyMapping.apply(frame = datasource0, mappings = [("sensor_type", "string", "sensor_type", "string"), ("currentTemperature", "string", "currentTemperature", "string"), ("status", "string", "status", "string"), ("partition_0", "string", "year", "string"), ("partition_1", "string", "month", "string"), ("partition_2", "string", "day", "string"), ("partition_3", "string", "hour", "string")], transformation_ctx = "applymapping1")
spark_df = applymapping1.toDF()
unique_val = spark_df.select('sensor_type').distinct().collect()
def drop_null_columns(df):
"""
This function drops all the columns which contains null values.
"""
null_set = {"none", "null" , "nan"}
# Iterate over each column in the DF
for col in df.columns:
# Get the distinct values of the column
unique_val = df.select(col).distinct().collect()[0][0]
# See whether the unique value is only none/nan or null
if str(unique_val).lower() in null_set:
#print("Dropping " + col + " because of all null values.")
df = df.drop(col)
return df
for val in unique_val:
data_filter = spark_df.filter("sensor_type == '"+val.sensor_type+"'")
data_filter = drop_null_columns(data_filter)
data_filter_dynamic_frame = DynamicFrame.fromDF(data_filter, glueContext, "data_filter_dynamic_frame")
## @type: ResolveChoice
## @args: [choice = "make_struct", transformation_ctx = "resolvechoice2"]
## @return: resolvechoice2
## @inputs: [frame = applymapping1]
resolvechoice2 = ResolveChoice.apply(frame = data_filter_dynamic_frame, choice = "make_struct", transformation_ctx = "resolvechoice2")
## @type: DropNullFields
## @args: [transformation_ctx = "dropnullfields3"]
## @return: dropnullfields3
## @inputs: [frame = resolvechoice2]
#dropnullfields3 = DropNullFields.apply(frame = resolvechoice2, transformation_ctx = "dropnullfields3")
## @type: DataSink
## @args: [connection_type = "s3", connection_options = {"path": "s3://lda-s3-conform"}, format = "parquet", transformation_ctx = "datasink4"]
## @return: datasink4
## @inputs: [frame = dropnullfields3]
datasink4 = glueContext.write_dynamic_frame.from_options(frame = resolvechoice2, connection_type = "s3", connection_options = {"path": "s3://lda-s3-conform/"+val.sensor_type , "partitionKeys": ["year", "month", "day", "hour"]}, format = "parquet", transformation_ctx = "datasink4")
job.commit() |
# TensorFlow and tf.keras
import tensorflow as tf
from tensorflow import keras
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
# Helper libraries
from pandas import read_csv
import numpy as np
import pandas as pd
import cv2
print(tf.__version__)
array_of_img = [] # this if for store all of the image data
image_size = 100
directory_name = "train/train/"
for i in range(1, 18001):
# print(filename) #just for test
# img is used to store the image data
# img = cv2.imread(directory_name + str(i) + ".jpg", cv2.IMREAD_GRAYSCALE)
img = cv2.imread(directory_name + str(i) + ".jpg")
img = img / 255.0
img = cv2.resize(img, (image_size, image_size))
array_of_img.append(img)
train_images = np.array(array_of_img)
array_of_img = []
dataframe = read_csv('train.csv')
array = dataframe.values
train_labels = np.array(array[:, 1], dtype='int8')
del dataframe
del array
class_names = ['male', 'female']
# train_images = train_images.reshape(train_images.shape[0], image_size, image_size, 1)
# train_images = train_images.astype('float32')
X_train, X_val, Y_train, Y_val = train_test_split(train_images, train_labels, test_size=0.1, random_state=3)
data_augmentation = keras.Sequential(
[
keras.layers.GaussianNoise(0.1, input_shape=(image_size, image_size, 3)),
keras.layers.experimental.preprocessing.RandomFlip("horizontal"),
keras.layers.experimental.preprocessing.RandomTranslation(0.1, 0.1),
keras.layers.experimental.preprocessing.RandomRotation(0.1),
keras.layers.experimental.preprocessing.RandomZoom(0.1),
]
)
model = keras.Sequential([
data_augmentation,
keras.layers.Conv2D(25, kernel_size=(3, 3), padding='same', activation='relu'),
keras.layers.Conv2D(50, kernel_size=(3, 3), padding='same', activation='relu'),
keras.layers.BatchNormalization(),
keras.layers.MaxPooling2D(),
keras.layers.Dropout(0.2),
keras.layers.Conv2D(50, kernel_size=(3, 3), padding='same', activation='relu'),
keras.layers.Conv2D(100, kernel_size=(3, 3), padding='same', activation='relu'),
keras.layers.BatchNormalization(),
keras.layers.MaxPooling2D(),
keras.layers.Dropout(0.2),
keras.layers.Conv2D(100, kernel_size=(3, 3), padding='same', activation='relu'),
keras.layers.Conv2D(200, kernel_size=(3, 3), padding='same', activation='relu'),
keras.layers.BatchNormalization(),
keras.layers.MaxPooling2D(),
keras.layers.Dropout(0.2),
keras.layers.Flatten(),
keras.layers.Dense(50, activation='relu'),
keras.layers.BatchNormalization(),
keras.layers.Dropout(0.2),
keras.layers.Dense(100, activation='relu'),
keras.layers.BatchNormalization(),
keras.layers.Dense(200, activation='relu'),
keras.layers.BatchNormalization(),
keras.layers.Dropout(0.2),
keras.layers.Dense(2, activation='softmax')
])
model.summary()
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
metrics=['accuracy']
)
save_weights = 'save_weights.h5'
last_weights = 'last_weights.h5'
best_weights = 'best_weights.h5'
# model.load_weights(save_weights)
checkpoint = keras.callbacks.ModelCheckpoint(best_weights, monitor='val_accuracy', save_best_only=True, mode='max',
verbose=1)
reduce = keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3, verbose=0, mode='auto',
min_delta=0.0001, cooldown=0, min_lr=0)
earlyStopping = keras.callbacks.EarlyStopping(monitor='val_loss', patience=50, verbose=1, mode='auto')
callbacks = [checkpoint]
# hist = model.fit(train_images, train_labels, epochs=100)
hist = model.fit(X_train, Y_train, epochs=2000, validation_data=(X_val, Y_val), use_multiprocessing=True,
callbacks=callbacks, workers=3)
model.save_weights(last_weights)
plt.figure()
acc = hist.history['accuracy']
val_acc = hist.history['val_accuracy']
loss = hist.history['loss']
val_loss = hist.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'r', label='Training acc') # 'bo'为画蓝色圆点,不连线
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.plot(epochs, loss, 'g', label='Training loss')
plt.plot(epochs, val_loss, 'y', label='Validation loss')
plt.title('Training and validation accuracy,Training and validation loss')
plt.legend() # 绘制图例,默认在右上角
plt.show()
# model.load_weights(best_weights) 加载本次训练最佳权重
model.load_weights(save_weights) # 加载之前已保存的能复现结果的最佳权重
del train_images
del train_labels
directory_name = "test/test/"
for i in range(18001, 23709):
# print(filename) #just for test
# img is used to store the image data
# img = cv2.imread(directory_name + str(i) + ".jpg", cv2.IMREAD_GRAYSCALE)
img = cv2.imread(directory_name + str(i) + ".jpg")
img = img / 255.0
img = cv2.resize(img, (image_size, image_size))
array_of_img.append(img)
test_images = np.array(array_of_img)
del array_of_img
# test_images = test_images.reshape(test_images.shape[0], image_size, image_size, 1)
# test_images = test_images.astype('float32')
# probability_model = tf.keras.Sequential([model,tf.keras.layers.Softmax()])
# predictions = probability_model.predict(test_images)
predictions = model.predict(test_images)
results = np.argmax(predictions, axis=1)
submissions = pd.read_csv('test.csv')
submissions['label'] = results
submissions.to_csv('submission.csv', index=False)
|
<reponame>fran-f/keypirinha-terminal-profiles<filename>src/terminal_profiles.py
"""
Windows Terminal Profiles plugin
More info at https://github.com/fran-f/keypirinha-terminal-profiles
"""
# Disable warning for relative import statements
# pylint: disable=import-error, relative-beyond-top-level
import os
import sys
import shutil
import keypirinha as kp
import keypirinha_util as kpu
from .lib.windows_terminal_wrapper import WindowsTerminalWrapper
class TerminalProfiles(kp.Plugin):
"""
Add catalog items for all the profiles configured in Windows Terminal.
"""
ACTION_OPEN = {
'name' : "wt.open",
'label' : "Open",
'short_desc' : "Open this profile in a new window"
}
ACTION_OPEN_NEW_TAB = {
'name' : "wt.open_new_tab",
'label' : "Open new tab",
'short_desc' : "Open this profile in a new tab of an existing window"
}
ACTION_ELEVATE = {
'name' : "wt.elevate",
'label' : "Run as Administrator",
'short_desc' : "Open this profile in a new window with elevated privileges"
}
ICON_POSTFIX = ".scale-200.png"
INSTANCE_SEPARATOR = "::"
default_icon = None
use_profile_icons = False
terminal_instances = None
def on_start(self):
"""Respond to on_start Keypirinha messages"""
self._load_settings()
self._set_up()
actions = [
self.ACTION_OPEN,
self.ACTION_OPEN_NEW_TAB,
self.ACTION_ELEVATE,
]
self.set_actions(
kp.ItemCategory.REFERENCE,
[self.create_action(**a) for a in actions]
)
def on_events(self, flags):
"""Respond to on_events Keypirinha messages"""
if flags & kp.Events.PACKCONFIG:
self._clean_up()
self._load_settings()
self._set_up()
def on_catalog(self):
"""Respond to on_catalog Keypirinha messages"""
if not self.terminal_instances:
return
self.set_catalog([
self._item_for_profile(instance, profile)
for instance in self.terminal_instances.values()
for profile in instance["wrapper"].profiles()
])
def on_execute(self, item, action):
"""Respond to on_execute Keypirinha messages"""
[instance, _, profile] = item.target().partition(self.INSTANCE_SEPARATOR)
terminal = self.terminal_instances[instance]["wrapper"]
if action is None:
terminal.openprofile(profile)
return
if action.name() == self.ACTION_ELEVATE['name']:
terminal.openprofile(profile, elevate=True)
elif action.name() == self.ACTION_OPEN_NEW_TAB['name']:
terminal.opennewtab(profile)
else:
terminal.openprofile(profile)
def on_suggest(self, user_input, items_chain):
"""Respond to on_suggest Keypirinha messages"""
# pass
def _load_settings(self):
"""
Load the configuration file and extract settings to local variables.
"""
settings = PluginSettings(self)
self.use_profile_icons = settings.use_profile_icons()
self.terminal_instances = dict(settings.terminal_instances())
def _set_up(self):
"""
Initialise the plugin based on the extracted configuration.
"""
self.default_icon = self.load_icon(self._resource("WindowsTerminal.png"))
self.set_default_icon(self.default_icon)
def _clean_up(self):
"""
Clean up any resources, to start anew with fresh configuration.
"""
if self.default_icon:
self.default_icon.free()
self.default_icon = None
def _item_for_profile(self, instance, profile):
"""
Return a catalog item for a profile.
"""
guid = profile.get("guid")
name = profile.get("name")
if not guid or not name:
self.warn("Skipping invalid profile with name:'%s' guid:'%s'" % (name, guid))
return None
icon = profile.get("icon", None)
icon_handle = self._load_profile_icon(icon, guid) \
if self.use_profile_icons else None
return self.create_item(
category=kp.ItemCategory.REFERENCE,
label=instance["prefix"] + name,
short_desc="Open a new terminal",
icon_handle=icon_handle,
target=instance["name"] + self.INSTANCE_SEPARATOR + guid,
args_hint=kp.ItemArgsHint.FORBIDDEN,
hit_hint=kp.ItemHitHint.IGNORE
)
def _load_profile_icon(self, icon, guid):
"""
Attempt to load an icon for the given profile.
"""
iconfile = None
if not icon:
# check if this is a default profile
if guid[0] == '{' and guid[-1] == '}':
iconfile = self._resource(guid + self.ICON_POSTFIX)
else:
# internal icons ms-appx:///ProfileIcons/{...}.png
if icon.startswith("ms-appx:///ProfileIcons/"):
iconfile = self._resource(icon[24:-4] + self.ICON_POSTFIX)
if iconfile:
try:
return self.load_icon(iconfile)
except ValueError:
pass
else:
# could it be an external file?
try:
# External files cannot be loaded as icon, so we try to copy it
# to the plugin's cache directory, and load it from there.
cache_dir = self.get_package_cache_path(True)
icon_file = guid + ".ico"
source = icon[8:] if icon[0:8] == "file:///" else os.path.expandvars(icon)
shutil.copyfile(source, cache_dir + "\\" + icon_file)
return self.load_icon("cache://Terminal-Profiles/" + icon_file)
except (ValueError, FileNotFoundError, OSError):
self.warn("Cannot load icon '%s' for profile %s" % (icon, guid))
return None
@staticmethod
def _resource(filename):
return "res://Terminal-Profiles/resources/" + filename
class PluginSettings:
"""Wrapper for the plugin configuration file."""
INSTANCE_PREFIX = "terminal/"
DEFAULT_ITEM_PREFIX = "Windows Terminal (%s): "
LOCALAPPDATA = kpu.shell_known_folder_path("{f1b32785-6fba-4fcf-9d55-7b8e7f157091}")
WINDOWSAPPS = LOCALAPPDATA + "\\Microsoft\\WindowsApps"
PACKAGED_SETTINGS = LOCALAPPDATA + "\\Packages\\%s\\LocalState\\settings.json"
PACKAGED_EXECUTABLE = WINDOWSAPPS + "\\%s\\wt.exe"
MISSING_KEY_ERROR = """
⚠ Config section [%s] defines a custom installation, but the value for '%s' is missing.
"""
def __init__(self, plugin):
self._settings = plugin.load_settings()
self._logger = plugin
def use_profile_icons(self):
"""True if we should show try to load per-profile icons."""
return self._settings.get_bool(
key="use_profile_icons",
section="items",
fallback=True
)
def terminal_instances(self):
"""Return the list of terminal instances in the configuration file."""
for section_name in self._instancesections():
instance_name = section_name[len(self.INSTANCE_PREFIX):]
# Skip an instance if it defines 'enabled = false'
if not self._settings.get_bool(key="enabled", section=section_name, fallback=True):
continue
prefix = self._get(section_name, "prefix", "Windows Terminal (%s)" % (instance_name))
app_package = self._get(section_name, "app_package")
if app_package and not self._package_exists(app_package):
self._logger.info(
"Skipping '%s', package %s does not exist" % (instance_name, app_package)
)
continue
# For packaged instances, paths are derived from the package id...
packaged_settings_file = self.PACKAGED_SETTINGS % (app_package) if app_package else None
packaged_executable = self.PACKAGED_EXECUTABLE % (app_package) if app_package else None
# ...but you can still override them
settings_file = self._get(section_name, "settings_file", packaged_settings_file)
executable = self._get(section_name, "executable", packaged_executable)
# For custom instances, settings_file and executable are required
if not app_package:
if not settings_file:
self._logger.warn(self.MISSING_KEY_ERROR % (section_name, "settings_file"))
continue
if not executable:
self._logger.warn(self.MISSING_KEY_ERROR % (section_name, "executable"))
continue
self._logger.info(
"Adding profiles for '%s' (%s)" % (instance_name, app_package or "custom")
)
try:
wrapper = WindowsTerminalWrapper(settings_file, executable)
yield (instance_name, {
"name": instance_name,
"prefix": prefix,
"wrapper": wrapper
})
except ValueError:
message = sys.exc_info()[1]
self._logger.warn(message)
def _instancesections(self):
return [
s for s in self._settings.sections() \
if s.lower().startswith(self.INSTANCE_PREFIX)
]
def _get(self, section, key, fallback=None):
return self._settings.get(key=key, section=section, fallback=fallback, unquote=True)
def _package_exists(self, app_package):
return os.path.exists(self.WINDOWSAPPS + "\\" + app_package)
|
# coding: utf-8
# /*##########################################################################
#
# Copyright (c) 2016 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
__authors__ = ["<NAME>, <NAME>, <NAME> - ESRF ISDD Advanced Analysis and Modelling"]
__license__ = "MIT"
__date__ = "31/08/2016"
import numpy as np
import scipy.constants as codata
import scipy.special as special
from pySRU.Source import Source,PLANE_UNDULATOR,BENDING_MAGNET
from pySRU.ElectronBeam import ElectronBeam
#TODO
class SourceBendingMagnet(Source):
def __init__(self, electron_beam, magnetic_structure, magnetic_field=None):
super(self.__class__, self).__init__(electron_beam=electron_beam,
magnetic_field=magnetic_field,
magnetic_structure=magnetic_structure)
def magnetic_field_strength(self):
return self.magnetic_structure.Bo
def copy(self):
return SourceBendingMagnet(electron_beam=self.electron_beam.copy(),
magnetic_field=self.magnetic_field.copy(),
magnetic_structure=self.magnetic_structure)
def arc_length(self):
return self.magnetic_structure.horizontal_div* self.magnetic_structure.radius_curv
def horizontal_divergence(self):
return self.magnetic_structure.horizontal_div
def radius_curvature(self,):
return self.magnetic_structure.radius_curv
#TODO faire le tri
def critical_energy (self):
Ec= 0.665 * (self.Electron_energy() ) ** 2 * self.magnetic_structure.Bo
return Ec*1e-6
def critical_frequency2(self):
critical_energy=self.critical_energy()
return critical_energy/codata.hbar
def critical_frequency(self):
gamma=self.Lorentz_factor()
res=3.*(gamma**3)*codata.c/(2.*self.radius_curvature())
return res
def H2(self,y):
K23=special.kv((2./3.),0.5*y)
return (y*K23)**2
def theoretical_flux_on_axis(self,n):
#n=frequency/self.critical_frequency()
#print('y= %.3f'%y)
result= 1.327e13*((self.Electron_energy())**2
)*self.I_current()*self.H2(n)
return result
def radiation_theoric(self,omega,observation_angle):
gamma=self.Lorentz_factor()
X=gamma*observation_angle
y=omega/self.critical_frequency()
xi=y*0.5*np.sqrt((1.+X**2)**3)
cst=(3.*codata.alpha*(gamma**2)*1e-3*1e-6*self.I_current()*y**2)/(codata.e*4.*np.pi**2)
rad=((1.+X**2)**2)*((special.kv((2./3.),xi))**2+((X**2)/(1.+X**2))*(special.kv((2./3.),xi))**2)
return rad*cst
def choose_distance_automatic(self, alpha=2,photon_frequency=None):
return self.magnetic_structure.length*10**(alpha)
def choose_nb_pts_trajectory(self, alpha=0.01,photon_frequency=None):
return self.magnetic_structure.length*4.*10**(alpha)
def choose_initial_contidion_automatic(self):
Zo=-self.magnetic_structure.length*1.5
ic=np.array([0.0,0.0,self.electron_speed()*codata.c,0.0,0.0,Zo])
return ic
def choose_photon_frequency(self):
return self.critical_frequency()*0.1
def choose_angle_deflection_max(self):
theta=5.0/self.Lorentz_factor()
return theta
def angle_deflection_central_cone(self):
return 1.0/self.Lorentz_factor()
def analytical_times_vector(self, Nb_pts):
to=-self.magnetic_structure.length*0.5/(self.electron_speed()*codata.c)
t1=to+self.arc_length()/(self.electron_speed()*codata.c)
time=np.linspace(to,t1,Nb_pts)
return time
def construct_times_vector(self, initial_contition, Nb_pts):
#TODO a changer ne marche pas ???
electron_speed=(self.electron_speed() * codata.c)
to = -self.magnetic_structure.length * 0.5 / electron_speed
t1 = to + self.arc_length() / electron_speed
time_start=initial_contition[5]/ electron_speed
if time_start <to :
time=np.linspace(time_start,-time_start,Nb_pts)
else :
delta_t=t1-time_start
if delta_t >0.0 :
time=np.linspace(time_start,time_start+2.*delta_t,Nb_pts)
else :#TODO a tester
time=np.linspace(time_start,2.*time_start,Nb_pts)
return time
def rtol_for_ODE_method(self):
return 1e-11
def atol_for_ODE_method(self):
gamma=self.Lorentz_factor()
atol_vx= self.electron_speed()*codata.c*1e-11
atol_vz = self.electron_speed()*codata.c*1e-11
atol_x = (2.*self.radius_curvature())/(3.*gamma**2)*1e-2
atol_z = (2.*self.radius_curvature())/(3.*gamma**2)*1e-2
return np.array([atol_vx,1e-10,atol_vz,atol_x,1e-10,atol_z])
# source parameter
def print_parameters(self):
super(self.__class__, self).print_parameters()
print('Bending Magnet')
print(' length : %.5f (m)'%self.magnetic_structure.length)
print(' magnetic_field_strength : %.5f (T)'%self.magnetic_structure.Bo)
print(' horizontal divergeance : %.5f (rad ?)' % self.horizontal_divergence())
print(' radius curvature : %.3f (m)' % self.radius_curvature())
print(' critical frequency : %f *1e20 (unite ?)' %(self.critical_frequency()/1e20))
if __name__ == "__main__" :
pass
|
# Copyright (c) 2016 The UUV Simulator Authors.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from fin_model import FinModel
import rospy
import numpy as np
import tf2_ros
from tf_quaternion.transformations import quaternion_matrix
from uuv_thrusters.models import Thruster
from uuv_auv_control_allocator.msg import AUVCommand
from uuv_gazebo_ros_plugins_msgs.msg import FloatStamped
from geometry_msgs.msg import Wrench, WrenchStamped
import os
import yaml
class ActuatorManager(object):
MAX_FINS = 4
def __init__(self):
# Acquiring the namespace of the vehicle
self.namespace = rospy.get_namespace().replace('/', '')
rospy.loginfo('Initialize control allocator for vehicle <%s>' % self.namespace)
self.tf_buffer = tf2_ros.Buffer()
self.listener = tf2_ros.TransformListener(self.tf_buffer)
tf_trans_ned_to_enu = None
try:
target = '%s/base_link' % self.namespace
source = '%s/base_link_ned' % self.namespace
rospy.loginfo('Lookup transfrom from %s to %s' % (source, target))
tf_trans_ned_to_enu = self.tf_buffer.lookup_transform(
target, source, rospy.Time(), rospy.Duration(1))
except Exception, e:
print('No transform found between base_link and base_link_ned'
' for vehicle ' + self.namespace)
print(str(e))
self.base_link_ned_to_enu = None
if tf_trans_ned_to_enu is not None:
self.base_link_ned_to_enu = quaternion_matrix(
(tf_trans_ned_to_enu.transform.rotation.x,
tf_trans_ned_to_enu.transform.rotation.y,
tf_trans_ned_to_enu.transform.rotation.z,
tf_trans_ned_to_enu.transform.rotation.w))[0:3, 0:3]
print 'base_link transform NED to ENU=\n', self.base_link_ned_to_enu
self.base_link = rospy.get_param('~base_link', 'base_link')
# Check if the thruster configuration is available
if not rospy.has_param('~thruster_config'):
raise rospy.ROSException('Thruster configuration not available')
# Retrieve the thruster configuration parameters
self.thruster_config = rospy.get_param('~thruster_config')
# Check if all necessary thruster model parameter are available
thruster_params = ['conversion_fcn_params', 'conversion_fcn',
'topic_prefix', 'topic_suffix', 'frame_base', 'max_thrust']
for p in thruster_params:
if p not in self.thruster_config:
raise rospy.ROSException(
'Parameter <%s> for thruster conversion function is missing' % p)
# Setting up the thruster topic name
self.thruster_topic = '/%s/%s/%d/%s' % (self.namespace,
self.thruster_config['topic_prefix'], 0,
self.thruster_config['topic_suffix'])
self.thruster = None
# Check if the fin configuration is available
if not rospy.has_param('~fin_config'):
raise rospy.ROSException('Fin configuration is not available')
# Retrieve the fin configuration is available
self.fin_config = rospy.get_param('~fin_config')
# Check if all necessary fin parameters are available
fin_params = ['fluid_density', 'lift_coefficient', 'fin_area',
'topic_prefix', 'topic_suffix', 'frame_base']
for p in fin_params:
if p not in self.fin_config:
raise rospy.ROSException(
'Parameter <%s> for fin configuration is missing' % p)
self.fin_lower_limit = -np.pi / 2
if 'lower_limit' in self.fin_config:
self.fin_lower_limit = self.fin_config['lower_limit']
self.fin_upper_limit = np.pi / 2
if 'upper_limit' in self.fin_config:
self.fin_upper_limit = self.fin_config['upper_limit']
if self.fin_config['lower_limit'] >= self.fin_config['upper_limit']:
raise rospy.ROSException('Fin angle limits are invalid')
self.fins = dict()
self.n_fins = 0
if not self.find_actuators():
raise rospy.ROSException('No thruster and/or fins found')
def find_actuators(self):
"""Calculate the control allocation matrix, if one is not given."""
self.ready = False
rospy.loginfo('ControlAllocator: updating thruster poses')
base = '%s/%s' % (self.namespace, self.base_link)
frame = '%s/%s%d' % (self.namespace, self.thruster_config['frame_base'], 0)
rospy.loginfo('Lookup: Thruster transform found %s -> %s' % (base, frame))
trans = self.tf_buffer.lookup_transform(base, frame, rospy.Time(), rospy.Duration(1))
pos = np.array([trans.transform.translation.x,
trans.transform.translation.y,
trans.transform.translation.z])
quat = np.array([trans.transform.rotation.x,
trans.transform.rotation.y,
trans.transform.rotation.z,
trans.transform.rotation.w])
rospy.loginfo('Thruster transform found %s -> %s' % (base, frame))
rospy.loginfo('pos=' + str(pos))
rospy.loginfo('rot=' + str(quat))
# Read transformation from thruster
self.thruster = Thruster.create_thruster(
self.thruster_config['conversion_fcn'], 0,
self.thruster_topic, pos, quat,
**self.thruster_config['conversion_fcn_params'])
for i in range(self.MAX_FINS):
try:
frame = '%s/%s%d' % (self.namespace, self.fin_config['frame_base'], i)
rospy.loginfo('Lookup: Fin transform found %s -> %s' % (base, frame))
trans = self.tf_buffer.lookup_transform(base, frame, rospy.Time(), rospy.Duration(1))
pos = np.array([trans.transform.translation.x,
trans.transform.translation.y,
trans.transform.translation.z])
quat = np.array([trans.transform.rotation.x,
trans.transform.rotation.y,
trans.transform.rotation.z,
trans.transform.rotation.w])
rospy.loginfo('Fin transform found %s -> %s' % (base, frame))
rospy.loginfo('pos=' + str(pos))
rospy.loginfo('quat=' + str(quat))
fin_topic = '/%s/%s/%d/%s' % (self.namespace,
self.fin_config['topic_prefix'], i, self.fin_config['topic_suffix'])
self.fins[i] = FinModel(
i,
pos,
quat,
fin_topic)
except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException):
rospy.loginfo('Could not get transform from %s to %s ' % (base, frame))
break
self.n_fins = len(self.fins.keys())
rospy.loginfo('# fins found: %d' % len(self.fins.keys()))
for i in range(self.n_fins):
rospy.loginfo(i)
rospy.loginfo(self.fins[i].pos)
rospy.loginfo(self.fins[i].rot)
self.ready = True
return True
def compute_control_force(self, thrust, delta, u):
actuator_model = self.thruster.tam_column.reshape((6, 1)) * thrust
for i in self.fins:
f_lift = (0.5 * self.fin_config['fluid_density'] *
self.fin_config['lift_coefficient'] * self.fin_config['fin_area'] *
delta[i] * u**2)
tau = np.zeros(6)
tau[0:3] = f_lift * self.fins[i].lift_vector
tau[3::] = np.cross(self.fins[i].pos, f_lift)
actuator_model += tau
return actuator_model
def publish_commands(self, command):
self.thruster.publish_command(command[0])
for i in range(self.n_fins):
self.fins[i].publish_command(command[i + 1]) |
<reponame>nccgroup/libptmalloc<gh_stars>10-100
# -*- coding: future_fstrings -*-
from __future__ import print_function
import argparse
import binascii
import struct
import sys
import logging
import pprint
import re
from libptmalloc.frontend import printutils as pu
from libptmalloc.frontend import helpers as h
from libptmalloc.frontend.commands.gdb import ptcmd
log = logging.getLogger("libptmalloc")
log.trace("ptconfig.py")
try:
import gdb
except ImportError:
print("Not running inside of GDB, exiting...")
raise Exception("sys.exit()")
class ptconfig(ptcmd.ptcmd):
"""Command to manage ptmalloc configuration"""
def __init__(self, ptm):
log.debug("ptconfig.__init__()")
super(ptconfig, self).__init__(ptm, "ptconfig")
self.parser = argparse.ArgumentParser(
description="""Show/change ptmalloc configuration""",
formatter_class=argparse.RawTextHelpFormatter,
add_help=False,
epilog="""E.g.
ptconfig
ptconfig -v 2.27
ptconfig -t off""")
self.parser.add_argument(
"-h", "--help", dest="help", action="store_true", default=False,
help="Show this help"
)
self.parser.add_argument(
"-v", "--version", dest="version", type=float, default=None,
help="Change the glibc version manually (e.g. 2.27)"
)
self.parser.add_argument(
"-t", "--tcache", dest="tcache", type=str, default=None,
help="Enable or disable tcache (on/off)"
)
self.parser.add_argument(
"-o", "--distribution", dest="distribution", type=str, default=None,
help="Target OS distribution (e.g. debian, ubuntu, centos, photon)"
)
self.parser.add_argument(
"-r", "--release", dest="release", type=str, default=None,
help="Target OS release version (e.g. 10 for debian, 18.04 for ubuntu, 8 for centos, 3.0 for photon)"
)
# allows to enable a different log level during development/debugging
self.parser.add_argument(
"--loglevel", dest="loglevel", default=None,
help=argparse.SUPPRESS
)
@staticmethod
def set_distribution(ptm, distribution):
if distribution != "photon":
print("Distribution has default glibc settings, ignoring")
return
ptm.distribution = distribution
@staticmethod
def set_release(ptm, release):
if ptm.distribution == "photon":
if release != "3.0":
print("Release has default glibc settings for Photon OS, ignoring")
return
else:
print("Unsupported distribution or has default glibc setttings, ignoring")
return
ptm.release = release
@h.catch_exceptions
@ptcmd.ptcmd.init_and_cleanup
def invoke(self, arg, from_tty):
"""Inherited from gdb.Command
See https://sourceware.org/gdb/current/onlinedocs/gdb/Commands-In-Python.html
"""
log.debug("ptconfig.invoke()")
updated = False
if self.args.version != None:
self.ptm.version = self.args.version
# Resetting it
if self.ptm.version >= 2.26:
self.ptm.tcache_enabled = True
else:
self.ptm.tcache_enabled = False
updated = True
if self.args.tcache != None:
if self.args.tcache == "on":
self.ptm.tcache_enabled = True
elif self.args.tcache == "off":
self.ptm.tcache_enabled = False
else:
print("Unsupported tcache value, only \"on\" and \"off\" are supported, ignoring")
updated = True
if self.args.distribution != None:
ptconfig.set_distribution(self.ptm, self.args.distribution)
updated = True
if self.args.release != None:
ptconfig.set_release(self.ptm, self.args.release)
updated = True
if updated:
# Resetting some cached info
self.ptm.cache.mstate = None
return
# no argument specified
d = {}
d["glibc version"] = self.ptm.version
if self.ptm.tcache_enabled is True:
d["tcache"] = "enabled"
elif self.ptm.tcache_enabled is False:
d["tcache"] = "disabled"
if self.ptm.distribution is not None:
d["distribution"] = self.ptm.distribution
if self.ptm.release is not None:
d["release"] = self.ptm.release
for k,v in d.items():
pu.print_header("{:<20}".format(k), end="")
print(v)
|
"""Helper utility functions."""
import collections
def deep_update(source, overrides):
"""Update a nested dictionary or similar mapping.
Modify ``source`` in place.
"""
for key, value in overrides.iteritems():
if isinstance(value, collections.Mapping) and value:
returned = deep_update(source.get(key, {}), value)
source[key] = returned
elif isinstance(value, list):
source[key] = source.get(key, []) + value
else:
source[key] = overrides[key]
return source
cadd_columns = {
'Chrom': 'String',
'Pos': 'Integer',
'Ref': 'String',
'Anc': 'String',
'Alt': 'String',
'Type': 'String',
'Length': 'Integer',
'isTv': 'String',
'isDerived': 'String',
'AnnoType': 'String',
'Consequence': 'String',
'ConsScore': 'Integer',
'ConsDetail': 'String',
'GC': 'Float',
'CpG': 'Float',
'mapAbility20bp': 'Float',
'mapAbility35bp': 'Float',
'scoreSegDup': 'Float',
'priPhCons': 'Float',
'mamPhCons': 'Float',
'verPhCons': 'Float',
'priPhyloP': 'Float',
'mamPhyloP': 'Float',
'verPhyloP': 'Float',
'GerpN': 'Float',
'GerpS': 'Float',
'GerpRS': 'Float',
'GerpRSpval': 'Float',
'bStatistic': 'Float',
'mutIndex': 'Float',
'dnaHelT': 'Float',
'dnaMGW': 'Float',
'dnaProT': 'Float',
'dnaRoll': 'Float',
'mirSVR-Score': 'Float',
'mirSVR-E': 'Float',
'mirSVR-Aln': 'Float',
'targetScan': 'Float',
'fitCons': 'Float',
'cHmmTssA': 'Float',
'cHmmTssAFlnk': 'Float',
'cHmmTxFlnk': 'Float',
'cHmmTx': 'Float',
'cHmmTxWk': 'Float',
'cHmmEnhG': 'Float',
'cHmmEnh': 'Float',
'cHmmZnfRpts': 'Float',
'cHmmHet': 'Float',
'cHmmTssBiv': 'Float',
'cHmmBivFlnk': 'Float',
'cHmmEnhBiv': 'Float',
'cHmmReprPC': 'Float',
'cHmmReprPCWk': 'Float',
'cHmmQuies': 'Float',
'EncExp': 'Float',
'EncH3K27Ac': 'Float',
'EncH3K4Me1': 'Float',
'EncH3K4Me3': 'Float',
'EncNucleo': 'Float',
'EncOCC': 'Float',
'EncOCCombPVal': 'Float',
'EncOCDNasePVal': 'Float',
'EncOCFairePVal': 'Float',
'EncOCpolIIPVal': 'Float',
'EncOCctcfPVal': 'Float',
'EncOCmycPVal': 'Float',
'EncOCDNaseSig': 'Float',
'EncOCFaireSig': 'Float',
'EncOCpolIISig': 'Float',
'EncOCctcfSig': 'Float',
'EncOCmycSig': 'Float',
'Segway': 'String',
'tOverlapMotifs': 'Float',
'motifDist': 'Float',
'motifECount': 'Float',
'motifEName': 'String',
'motifEHIPos': 'String',
'motifEScoreChng': 'Float',
'TFBS': 'Float',
'TFBSPeaks': 'Float',
'TFBSPeaksMax': 'Float',
'isKnownVariant': 'String',
'ESP_AF': 'Float',
'ESP_AFR': 'Float',
'ESP_EUR': 'Float',
'TG_AF': 'Float',
'TG_ASN': 'Float',
'TG_AMR': 'Float',
'TG_AFR': 'Float',
'TG_EUR': 'Float',
'minDistTSS': 'Integer',
'minDistTSE': 'Integer',
'GeneID': 'String',
'FeatureID': 'String',
'CCDS': 'String',
'GeneName': 'String',
'cDNApos': 'Float',
'relcDNApos': 'Float',
'CDSpos': 'Float',
'relCDSpos': 'Float',
'protPos': 'Float',
'relProtPos': 'Float',
'Domain': 'String',
'Dst2Splice': 'Float',
'Dst2SplType': 'String',
'Exon': 'String',
'Intron': 'String',
'oAA': 'String',
'nAA': 'String',
'Grantham': 'Float',
'PolyPhenCat': 'String',
'PolyPhenVal': 'Float',
'SIFTcat': 'String',
'SIFTval': 'Float',
'RawScore': 'Float',
'PHRED': 'Float'
}
|
import frappe
from dateutil import parser
from frappe.model.rename_doc import rename_doc
from erpnext.buying.doctype.purchase_order.purchase_order import make_purchase_invoice
from dcl.inflow_import.stock import make_stock_entry
def truncate(f, n):
'''Truncates/pads a float f to n decimal places without rounding'''
s = '%.12f' % f
i, p, d = s.partition('.')
return float('.'.join([i, (d+'0'*n)[:n]]))
#dcl.inflow_import.import_buy.start_import
def start_import(file):
import csv
import os
current_customer = ""
current_order = ""
SI_dict = {}
last_single_SI_dict = {}
SI_items = []
last_single_SI_items = []
paid_and_fulfilled_items = []
last_single_paid_and_fulfilled_items = []
fulfilled_items = []
last_single_fulfilled_items = []
paid_items = []
last_single_paid_items = []
paid_pi = {}
# input_file = csv.DictReader(open(os.path.dirname(os.path.abspath(__file__))+'/data/inFlow_PurchaseOrder_test.csv'))
input_file = csv.DictReader(open(os.path.dirname(os.path.abspath(__file__))+'/data/'+file))
# current_customer = input_file[0]["Customer"]
income_accounts = "5111 - Cost of Goods Sold - DCL"
# income_accounts = "Sales - J"
cost_centers = "Main - DCL"
# cost_centers = "Main - J"
rows = list(input_file)
total_paid = 0.0
last_single_total_paid = 0.0
# print rows
totalrows = len(rows) - 1
for i,row in enumerate(rows):
# print row
if row["Location"].strip():
if row["Location"].strip() == "DCL House, Plot 1299 Fumilayo Ransome Kuti Way, Area 3, PMB 690 Garki, Abuja":
to_warehouse = "DCLWarehouse - Abuja - DCL"
elif row[
"Location"].strip() == "DCL Laboratory Products Ltd, Plot 5 Block 4 Etal Avenue off Kudirat Abiola Way by NNPC Lagos NG - DCL":
to_warehouse = "Lagos Warehouse - DCL"
else:
to_warehouse = row["Location"].strip() + " - DCL"
else:
to_warehouse = ""
#make item non stock
item_code1 = row["ItemName"].strip()
frappe.db.sql("""UPDATE `tabItem` SET is_stock_item=1 WHERE item_code=%s""", (item_code1))
frappe.db.commit()
to_warehouse = "DCLWarehouse - Abuja - DCL"
if row["Location"].strip():
exists_cat = frappe.db.sql("""SELECT Count(*) FROM `tabWarehouse` WHERE warehouse_name=%s""", (row["Location"].strip()))
# print exists_cat, row["Location"]
if exists_cat[0][0] == 0:
item_code = row["Location"]
SI = frappe.get_doc({"doctype": "Warehouse",
"warehouse_name": item_code.strip()
})
SI_created = SI.insert(ignore_permissions=True)
frappe.db.commit()
item_code1 = row["ItemName"].strip()
# if row[
# "ItemName"] == "Kerosene stove, four burner pressure type for use with 39L autoclave / steriliser.\nSupplied specifically without top plate (ring) for use only with the autoclave / steam sterilizer.":
if "Kerosene stove, four burner pressure type for use with 39L autoclave / steriliser." in item_code1:
item_code1 = "Kerosene Stove"
exists_cat = frappe.db.sql("""SELECT Count(*) FROM `tabItem` WHERE item_code=%s""", (item_code1))
# print exists_cat
if exists_cat[0][0] == 0:
SI = frappe.get_doc({"doctype": "Item",
"item_code": item_code1,
"description": row["ItemDescription"],
# "item_group": row["Category"].strip() + " Category"
"item_group": "All Item Groups"
})
SI_created = SI.insert(ignore_permissions=True)
frappe.db.commit()
#CREATE SUPPLIER IF NOT EXISTS
exists_supplier = frappe.db.sql("""SELECT Count(*) FROM `tabSupplier` WHERE name=%s""",(row["Vendor"].strip()))
if exists_supplier[0][0] == 0:
frappe.get_doc({"doctype":"Supplier","supplier_name":row["Vendor"].strip(),
"supplier_group":"All Supplier Groups","supplier_type":"Company"}).insert()
frappe.db.commit()
if i==0:
current_customer = row["Vendor"].strip()
current_order = row["OrderNumber"]
dt = parser.parse(row["OrderDate"])
currency = ""
conversion_rate = 0.0
if float(row["ExchangeRate"]) != 0.0 and float(row["ExchangeRate"]) != 1.0:
currency = row["CurrencyCode"]
conversion_rate = float(row["ExchangeRate"])
elif float(row["ExchangeRate"]) == 0.0 or float(row["ExchangeRate"]) == 1.0:
currency = "NGN"
conversion_rate = 0.0
po_status = ""
if row["InventoryStatus"] == "Fulfilled" and row["PaymentStatus"] == "Paid":
po_status = "Completed"
elif row["InventoryStatus"] == "Unfulfilled" and row["PaymentStatus"] == "Paid":
po_status = "To Receive"
elif row["InventoryStatus"] == "Fulfilled" and row["PaymentStatus"] == "Unpaid":
po_status = "To Bill"
SI_dict = {"doctype": "Purchase Order",
"title": current_customer,
"supplier": current_customer,
"posting_date": dt.date(),
"schedule_date": dt.date(), # TODO + 30 days
"transaction_date": dt.date(),
# "due_date": row["DueDate"],
"po_status":po_status,
"due_date": dt.date(),
"items": SI_items,
# "docstatus": 1,
"outstanding_amount": total_paid,
"name": row["OrderNumber"],
"OrderDate":dt,
"inflow_remarks":row["OrderRemarks"],
"inflow_file":file,
"currency": currency,
"conversion_rate":conversion_rate
}
# print(current_customer,row["Vendor"],totalrows)
print " ",totalrows,i
if current_customer != row["Vendor"].strip() or current_customer != row["Vendor"].strip() \
or current_order!= row["OrderNumber"] or totalrows == i:
if totalrows == i and current_customer == row["Vendor"]:
print "LAST ROW!"
item_code1 = row["ItemName"].strip()
# if row[
# "ItemName"] == "Kerosene stove, four burner pressure type for use with 39L autoclave / steriliser.\nSupplied specifically without top plate (ring) for use only with the autoclave / steam sterilizer.":
if "Kerosene stove, four burner pressure type for use with 39L autoclave / steriliser." in item_code1:
item_code1 = "Kerosene Stove"
print row["ItemName"]
SI_item = {
# "item_code": installment.item, # test
"description": row["ItemDescription"].strip() or row["ItemName"],
"item_name": item_code1,
"item_code": item_code1,
# "rate": truncate(float(row["ItemSubtotal"]),2),
"rate": truncate(float(row["ItemUnitPrice"]),2),
"conversion_factor": 1,
"uom": "Nos",
"expense_account": income_accounts,
"cost_center": cost_centers,
"qty": float(row["ItemQuantity"]),
"received_qty": float(row["ItemQuantity"]),
# "warehouse":row["Location"].strip() +" - DCL",
"warehouse":to_warehouse,
"InventoryStatus":row["InventoryStatus"],
"PaymentStatus":row["PaymentStatus"],
"OrderDate":row["OrderDate"]
}
SI_items.append(SI_item)
if row["PaymentStatus"] == "Paid" and row["InventoryStatus"] == "Fulfilled":
paid_and_fulfilled_items.append({
# "item_code": installment.item, # test
"description": row["ItemDescription"] or row["ItemName"],
"item_name": item_code1,
"item_code": item_code1,
# "rate": truncate(float(row["ItemSubtotal"]),2),
"rate": truncate(float(row["ItemUnitPrice"]), 2),
"conversion_factor": 1,
"uom": "Nos",
"expense_account": income_accounts,
"cost_center": cost_centers,
"qty": row["ItemQuantity"],
# "warehouse": row["Location"].strip() + " - DCL",
"warehouse": to_warehouse,
"InventoryStatus": row["InventoryStatus"],
"PaymentStatus": row["PaymentStatus"]
})
if row["PaymentStatus"] == "Paid" and row["InventoryStatus"] != "Fulfilled":
paid_items.append({
# "item_code": installment.item, # test
"description": row["ItemDescription"] or row["ItemName"],
"item_name": item_code1,
"item_code": item_code1,
# "rate": truncate(float(row["ItemSubtotal"]),2),
"rate": truncate(float(row["ItemUnitPrice"]), 2),
"conversion_factor": 1,
"uom": "Nos",
"expense_account": income_accounts,
"cost_center": cost_centers,
"qty": row["ItemQuantity"],
# "warehouse": row["Location"].strip() + " - DCL",
"warehouse": to_warehouse,
"InventoryStatus": row["InventoryStatus"],
"PaymentStatus": row["PaymentStatus"]
})
if row["PaymentStatus"] != "Paid" and row["InventoryStatus"] == "Fulfilled":
fulfilled_items.append({
"description": row["ItemDescription"] or row["ItemName"],
"item_name": item_code1,
"item_code": item_code1,
"rate": truncate(float(row["ItemUnitPrice"]), 2),
"conversion_factor": 1,
"uom": "Nos",
"expense_account": income_accounts,
"cost_center": cost_centers,
"qty": row["ItemQuantity"],
# "warehouse": row["Location"].strip() + " - DCL",
"warehouse": to_warehouse,
"InventoryStatus": row["InventoryStatus"],
"PaymentStatus": row["PaymentStatus"]
})
total_paid += float(row["ItemSubtotal"])
elif totalrows == i:
print "LAST SINGLE ROW!"
item_code1 = row["ItemName"].strip()
# if row[
# "ItemName"] == "Kerosene stove, four burner pressure type for use with 39L autoclave / steriliser.\nSupplied specifically without top plate (ring) for use only with the autoclave / steam sterilizer.":
if "Kerosene stove, four burner pressure type for use with 39L autoclave / steriliser." in item_code1:
item_code1 = "Kerosene Stove"
last_single_SI_items.append({
# "item_code": installment.item, # test
"description": row["ItemDescription"].strip() or row["ItemName"],
"item_name": item_code1,
"item_code": item_code1,
# "rate": truncate(float(row["ItemSubtotal"]),2),
"rate": truncate(float(row["ItemUnitPrice"]), 2),
"conversion_factor": 1,
"uom": "Nos",
"expense_account": income_accounts,
"cost_center": cost_centers,
"qty": row["ItemQuantity"],
# "warehouse":row["Location"].strip() +" - DCL",
"warehouse": to_warehouse,
"InventoryStatus": row["InventoryStatus"],
"PaymentStatus": row["PaymentStatus"],
"OrderDate": row["OrderDate"]
})
print last_single_SI_items
last_single_SI_dict = {"doctype": "Purchase Order",
"title": current_customer,
"supplier": current_customer,
"posting_date": dt.date(),
"schedule_date": dt.date(), # TODO + 30 days
"transaction_date": dt.date(),
# "due_date": row["DueDate"],
"due_date": dt.date(),
"items": last_single_SI_items,
# "docstatus": 1,
"outstanding_amount": total_paid,
"name": row["OrderNumber"],
"OrderDate": dt,
"inflow_remarks": row["OrderRemarks"],
"currency": currency,
"conversion_rate": conversion_rate,
"inflow_file":file
}
if row["PaymentStatus"] == "Paid" and row["InventoryStatus"] == "Fulfilled":
last_single_paid_and_fulfilled_items.append({
# "item_code": installment.item, # test
"description": row["ItemDescription"] or row["ItemName"],
"item_name": item_code1,
"item_code": item_code1,
# "rate": truncate(float(row["ItemSubtotal"]),2),
"rate": truncate(float(row["ItemUnitPrice"]), 2),
"conversion_factor": 1,
"uom": "Nos",
"expense_account": income_accounts,
"cost_center": cost_centers,
"qty": row["ItemQuantity"],
# "warehouse": row["Location"].strip() + " - DCL",
"warehouse": to_warehouse,
"InventoryStatus": row["InventoryStatus"],
"PaymentStatus": row["PaymentStatus"]
})
if row["PaymentStatus"] == "Paid" and row["InventoryStatus"] != "Fulfilled":
last_single_paid_items.append({
# "item_code": installment.item, # test
"description": row["ItemDescription"] or row["ItemName"],
"item_name": item_code1,
"item_code": item_code1,
# "rate": truncate(float(row["ItemSubtotal"]),2),
"rate": truncate(float(row["ItemUnitPrice"]), 2),
"conversion_factor": 1,
"uom": "Nos",
"expense_account": income_accounts,
"cost_center": cost_centers,
"qty": row["ItemQuantity"],
# "warehouse": row["Location"].strip() + " - DCL",
"warehouse": to_warehouse,
"InventoryStatus": row["InventoryStatus"],
"PaymentStatus": row["PaymentStatus"]
})
if row["PaymentStatus"] != "Paid" and row["InventoryStatus"] == "Fulfilled":
last_single_fulfilled_items.append({
"description": row["ItemDescription"] or row["ItemName"],
"item_name": item_code1,
"item_code": item_code1,
"rate": truncate(float(row["ItemUnitPrice"]), 2),
"conversion_factor": 1,
"uom": "Nos",
"expense_account": income_accounts,
"cost_center": cost_centers,
"qty": row["ItemQuantity"],
# "warehouse": row["Location"].strip() + " - DCL",
"warehouse": to_warehouse,
"InventoryStatus": row["InventoryStatus"],
"PaymentStatus": row["PaymentStatus"]
})
last_single_total_paid += float(row["ItemSubtotal"])
SI_dict.update({"outstanding_amount":total_paid,
"inflow_file":file,
"per_received":100.0,
"per_billed":100.0
})
print SI_dict["items"]
SI = frappe.get_doc(SI_dict)
# print SI_dict
print(" CURRENT:",current_order,SI_dict["po_status"])
SI_created = SI.insert(ignore_permissions=True)
SI_created.submit()
"""
To Receive and Bill
To Bill
To Receive
Completed
"""
# print " PO Status: ",SI_dict["po_status"]
# if SI_dict["po_status"] == "To Receive and Bill":
# print "To Receive and Bill"
# SI_created.db_set("per_received", 100, update_modified=False)
# SI_created.db_set("per_billed", 100, update_modified=False)
# elif SI_dict["po_status"] == "To Receive":
# print "To Receive"
# SI_created.db_set("per_billed", 100, update_modified=False)
# if SI_dict["po_status"] == "To Bill":
# print "To Bill"
# SI_created.db_set("per_received", 100, update_modified=False)
# SI_created.status = SI_dict["po_status"]
frappe.db.commit()
#/home/jvfiel/frappe-v11/apps/erpnext/erpnext/buying/doctype/purchase_order/purchase_order.py
from erpnext.buying.doctype.purchase_order.purchase_order import update_status
#/home/jvfiel/frappe-v11/apps/frappe/frappe/model/rename_doc.py
rename_doc("Purchase Order",SI_created.name,current_order,force=True)
frappe.db.commit()
# update_status(SI_dict["po_status"], current_order)
# SI_created.set_status(update=True, status=SI_dict["po_status"])
#self.db_set('status', self.status, update_modified = update_modified)
# SI_created.db_set(fieldname='status',value=SI_dict['po_status'])
# frappe.db.sql("""UPDATE `tabPurchase Order` SET status=%s WHERE name=%s""",(SI_dict["po_status"],current_order),debug=1)
#self.db_set("per_received", flt(received_qty / total_qty) * 100, update_modified=False)
# frappe.db.commit()
print paid_and_fulfilled_items
if paid_and_fulfilled_items:
pi = make_purchase_invoice(current_order)
if to_warehouse:
pi.update_stock = 1
pi.is_paid = 1
pi.items = []
pi.posting_date = SI_dict['OrderDate'].date()
pi.posting_time = str(SI_dict['OrderDate'].time())
pi_total = 0.0
if float(SI_dict["conversion_rate"]) != 0.0 and float(SI_dict["conversion_rate"]) != 1.0:
pi.currency = SI_dict["currency"]
pi.conversion_rate = float(SI_dict["conversion_rate"])
elif float(SI_dict["conversion_rate"]) == 0.0 or float(SI_dict["conversion_rate"]) == 1.0:
pi.currency = "NGN"
pi.conversion_rate = None
zeros = []
for item in paid_and_fulfilled_items:
# if float(item["rate"]) < 0:
# zeros.append(item)
# else:
nl = pi.append('items', {})
nl.description = item["description"]
nl.item_name = item["item_name"]
nl.item_code = item["item_name"]
nl.rate = float(item["rate"])
# nl.base_rate = float(item["rate"])
nl.conversion_factor = item["conversion_factor"]
nl.uom = item["uom"]
nl.expense_account = item["expense_account"]
nl.cost_center = item["cost_center"]
nl.qty = float(item["qty"])
nl.warehouse = item["warehouse"]
nl.purchase_order = current_order
pi_total += float(nl.rate) * float(nl.qty)
print(nl.rate)
# if pi.items:
pi.set_posting_time = 1
pi.cash_bank_account = "Access Bank - DCL"
pi.taxes_and_charges = ""
pi.taxes = []
pi.inflow_file = file
print " ", paid_and_fulfilled_items
print " Paid and Fulfilled PI Total", pi_total,current_order,pi.currency
# print " ", pi.as_dict()["items"]
if pi_total:
pi.mode_of_payment = "Cash"
# if pi.conversion_rate:
# print "<<<<",pi.grand_total,">>>>"
# print "<<<<",pi.conversion_rate,">>>>"
# print "<<<<",pi.grand_total * pi.conversion_rate,">>>>"
pi.paid_amount = pi.grand_total
pi.base_paid_amount = pi.outstanding_amount
pi.insert()
pi.save()
frappe.db.commit()
pi.submit()
frappe.db.commit()
else:
for item in zeros:
make_stock_entry(item_code=item["item_code"], qty=item['qty'],
to_warehouse=item["warehouse"],
valuation_rate=1, remarks="This is affected by data import. " + file,
posting_date=pi.posting_date,
posting_time=pi.posting_time,
set_posting_time=1, inflow_file=file)
frappe.db.commit()
print "Stock entry created."
if paid_items:
pi = make_purchase_invoice(current_order)
# pi.update_stock = 1
pi.is_paid = 1
pi.items = []
pi.posting_date = SI_dict['OrderDate'].date()
pi.posting_time = str(SI_dict['OrderDate'].time())
pi_total = 0.0
if float(SI_dict["conversion_rate"]) != 0.0 and float(SI_dict["conversion_rate"]) != 1.0:
pi.currency = SI_dict["currency"]
pi.conversion_rate = float(SI_dict["conversion_rate"])
elif float(SI_dict["conversion_rate"]) == 0.0 or float(SI_dict["conversion_rate"]) == 1.0:
pi.currency = "NGN"
pi.conversion_rate = None
zeros = []
for item in paid_items:
nl = pi.append('items', {})
nl.description = item["description"]
nl.item_name = item["item_name"]
nl.item_code = item["item_name"]
nl.rate = float(item["rate"])
nl.conversion_factor = item["conversion_factor"]
nl.uom = item["uom"]
nl.expense_account = item["expense_account"]
nl.cost_center = item["cost_center"]
nl.qty = float(item["qty"])
nl.warehouse = item["warehouse"]
nl.purchase_order = current_order
pi_total += float(nl.rate) * float(nl.qty)
# if pi.items:
pi.set_posting_time = 1
pi.cash_bank_account = "Access Bank - DCL"
pi.taxes_and_charges = ""
pi.taxes = []
pi.inflow_file = file
print " Paid Items:", paid_items
print " Paid Items Only PI Total", pi_total,current_order,pi.currency
# print " ", pi.as_dict()["items"]
if pi_total:
pi.mode_of_payment = "Cash"
pi.insert()
frappe.db.commit()
if pi.currency != "NGN":
pi.paid_amount = pi.grand_total
pi.base_paid_amount = pi.outstanding_amount
pi.save()
frappe.db.commit()
pi.submit()
frappe.db.commit()
else:
pass
if fulfilled_items:
pi = make_purchase_invoice(current_order)
if to_warehouse:
pi.update_stock = 1
# pi.is_paid = 1
pi.items = []
pi.posting_date = SI_dict['OrderDate'].date()
pi.posting_time = str(SI_dict['OrderDate'].time())
pi_total = 0.0
if float(SI_dict["conversion_rate"]) != 0.0 and float(
SI_dict["conversion_rate"]) != 1.0:
pi.currency = SI_dict["currency"]
pi.conversion_rate = float(SI_dict["conversion_rate"])
elif float(SI_dict["conversion_rate"]) == 0.0 or float(
SI_dict["conversion_rate"]) == 1.0:
pi.currency = "NGN"
pi.conversion_rate = None
zeros = []
for item in fulfilled_items:
nl = pi.append('items', {})
nl.description = item["description"]
nl.item_name = item["item_name"]
nl.item_code = item["item_name"]
nl.rate = float(item["rate"])
nl.conversion_factor = item["conversion_factor"]
nl.uom = item["uom"]
nl.expense_account = item["expense_account"]
nl.cost_center = item["cost_center"]
nl.qty = float(item["qty"])
nl.received_qty = float(item["qty"])
nl.warehouse = item["warehouse"]
nl.purchase_order = current_order
pi_total += abs(float(nl.rate) * float(nl.qty))
# print nl.rate
# if pi.items:
pi.set_posting_time = 1
pi.cash_bank_account = "Access Bank - DCL"
pi.taxes_and_charges = ""
pi.taxes = []
pi.inflow_file = file
print " ", fulfilled_items
print " Fulfilled Items Only PI Total", pi_total, current_order, pi.currency
print " conversion rate", pi.conversion_rate
if pi_total:
pi.mode_of_payment = "Cash"
pi.insert()
frappe.db.commit()
if pi.currency != "NGN":
# pi.paid_amount = pi.grand_total
# pi.base_paid_amount = pi.outstanding_amount
pi.rounding_adjustment = 0.0
pi.disable_rounded_total = 1
pi.save()
frappe.db.commit()
pi.submit()
frappe.db.commit()
else:
pass
current_customer = row["Vendor"].strip()
current_order = row["OrderNumber"]
dt = parser.parse(row["OrderDate"])
SI_items = []
currency = ""
conversion_rate = 0.0
if float(row["ExchangeRate"]) != 0.0 and float(row["ExchangeRate"]) != 1.0:
currency = row["CurrencyCode"]
conversion_rate = float(row["ExchangeRate"])
elif float(row["ExchangeRate"]) == 0.0 or float(row["ExchangeRate"]) == 1.0:
currency = "NGN"
conversion_rate = 0.0
po_status = ""
if row["InventoryStatus"] == "Fulfilled" and row["PaymentStatus"] == "Paid":
po_status = "Completed"
elif row["InventoryStatus"] == "Unfulfilled" and row["PaymentStatus"] == "Paid":
po_status = "To Receive"
elif row["InventoryStatus"] == "Fulfilled" and row["PaymentStatus"] == "Unpaid":
po_status = "To Bill"
SI_dict = {"doctype": "Purchase Order",
"title": current_customer,
"supplier": current_customer,
"posting_date": dt.date(),
"schedule_date": dt.date(), # TODO + 30 days
"transaction_date": dt.date(),
# "due_date": row["DueDate"],
"po_status":po_status,
"due_date": dt.date(),
"items": SI_items,
# "docstatus": 1,
"outstanding_amount": total_paid,
"name": row["OrderNumber"],
"OrderDate":dt,
"inflow_remarks": row["OrderRemarks"],
"inflow_file": file,
"currency": currency,
"conversion_rate": conversion_rate
}
paid_items = []
fulfilled_items = []
paid_and_fulfilled_items = []
# else:
item_code1 = row["ItemName"].strip()
# if row[
# "ItemName"] == "Kerosene stove, four burner pressure type for use with 39L autoclave / steriliser.\nSupplied specifically without top plate (ring) for use only with the autoclave / steam sterilizer.":
if "Kerosene stove, four burner pressure type for use with 39L autoclave / steriliser." in item_code1:
item_code1 = "Kerosene Stove"
SI_item = {
# "item_code": installment.item, # test
"description": row["ItemDescription"].strip() or row["ItemName"],
"item_name": item_code1,
"item_code": item_code1,
# "warehouse": row["Location"].strip() +" - DCL",
"warehouse": to_warehouse,
"rate": float(row["ItemUnitPrice"]),
"conversion_factor":1,
"uom":"Nos",
"expense_account": income_accounts,
"cost_center": cost_centers,
"qty": float(row["ItemQuantity"]),
"received_qty": float(row["ItemQuantity"]),
"InventoryStatus": row["InventoryStatus"],
"PaymentStatus": row["PaymentStatus"],
"OrderDate":row["OrderDate"]
}
SI_items.append(SI_item)
if row["PaymentStatus"] == "Paid" and row["InventoryStatus"] == "Fulfilled":
paid_and_fulfilled_items.append({
# "item_code": installment.item, # test
"description": row["ItemDescription"] or row["ItemName"],
"item_name": item_code1,
"item_code": item_code1,
# "rate": truncate(float(row["ItemSubtotal"]),2),
"rate": truncate(float(row["ItemUnitPrice"]), 2),
"conversion_factor": 1,
"uom": "Nos",
"expense_account": income_accounts,
"cost_center": cost_centers,
"qty": row["ItemQuantity"],
# "warehouse": row["Location"].strip() + " - DCL",
"warehouse": to_warehouse,
"InventoryStatus": row["InventoryStatus"],
"PaymentStatus": row["PaymentStatus"]
})
if row["PaymentStatus"] == "Paid" and row["InventoryStatus"] != "Fulfilled":
paid_items.append({
# "item_code": installment.item, # test
"description": row["ItemDescription"] or row["ItemName"],
"item_name": item_code1,
"item_code": item_code1,
# "rate": truncate(float(row["ItemSubtotal"]),2),
"rate": truncate(float(row["ItemUnitPrice"]), 2),
"conversion_factor": 1,
"uom": "Nos",
"expense_account": income_accounts,
"cost_center": cost_centers,
"qty": row["ItemQuantity"],
# "warehouse": row["Location"].strip() + " - DCL",
"warehouse": to_warehouse,
"InventoryStatus": row["InventoryStatus"],
"PaymentStatus": row["PaymentStatus"]
})
if row["PaymentStatus"] != "Paid" and row["InventoryStatus"] == "Fulfilled":
fulfilled_items.append({
"description": row["ItemDescription"] or row["ItemName"],
"item_name": item_code1,
"item_code": item_code1,
"rate": truncate(float(row["ItemUnitPrice"]), 2),
"conversion_factor": 1,
"uom": "Nos",
"expense_account": income_accounts,
"cost_center": cost_centers,
"qty": row["ItemQuantity"],
# "warehouse": row["Location"].strip() + " - DCL",
"warehouse": to_warehouse,
"InventoryStatus": row["InventoryStatus"],
"PaymentStatus": row["PaymentStatus"]
})
total_paid +=float(row["ItemSubtotal"])
if last_single_SI_dict != {}:
print "* END *", current_order
print last_single_SI_dict["items"]
SI = frappe.get_doc(last_single_SI_dict)
# print SI_dict
SI_created = SI.insert(ignore_permissions=True)
frappe.db.commit()
SI_created.submit()
frappe.db.commit()
rename_doc("Purchase Order", SI_created.name, current_order, force=True)
frappe.db.commit()
if last_single_paid_and_fulfilled_items:
pi = make_purchase_invoice(current_order)
pi.update_stock = 1
pi.is_paid = 1
pi.items = []
pi.posting_date = SI_dict['OrderDate'].date()
pi.posting_time = str(SI_dict['OrderDate'].time())
pi_total = 0.0
if float(last_single_SI_dict["conversion_rate"]) != 0.0 and float(last_single_SI_dict["conversion_rate"]) != 1.0:
pi.currency = SI_dict["currency"]
pi.conversion_rate = float(SI_dict["conversion_rate"])
elif float(last_single_SI_dict["conversion_rate"]) == 0.0 or float(last_single_SI_dict["conversion_rate"]) == 1.0:
pi.currency = "NGN"
pi.conversion_rate = None
zeros = []
for item in last_single_paid_and_fulfilled_items:
# if float(item["rate"]) < 0:
# zeros.append(item)
# else:
nl = pi.append('items', {})
nl.description = item["description"]
nl.item_name = item["item_name"]
nl.item_code = item["item_name"]
nl.rate = float(item["rate"])
# nl.base_rate = float(item["rate"])
nl.conversion_factor = item["conversion_factor"]
nl.uom = item["uom"]
nl.expense_account = item["expense_account"]
nl.cost_center = item["cost_center"]
nl.qty = float(item["qty"])
nl.warehouse = item["warehouse"]
nl.purchase_order = current_order
pi_total += float(nl.rate) * float(nl.qty)
# if pi.items:
pi.set_posting_time = 1
pi.cash_bank_account = "Access Bank - DCL"
pi.taxes_and_charges = ""
pi.taxes = []
pi.inflow_file = file
# print " ", paid_and_fulfilled_items
print " Paid and Fulfilled PI Total", pi_total, current_order, pi.currency
# print " ", pi.as_dict()["items"]
if pi_total:
pi.mode_of_payment = "Cash"
# if pi.conversion_rate:
# print "<<<<",pi.grand_total,">>>>"
# print "<<<<",pi.conversion_rate,">>>>"
# print "<<<<",pi.grand_total * pi.conversion_rate,">>>>"
pi.paid_amount = pi.grand_total
pi.base_paid_amount = pi.outstanding_amount
pi.insert()
pi.save()
frappe.db.commit()
pi.submit()
frappe.db.commit()
else:
for item in zeros:
make_stock_entry(item_code=item["item_code"], qty=item['qty'],
to_warehouse=item["warehouse"],
valuation_rate=1, remarks="This is affected by data import. " + file,
posting_date=pi.posting_date,
posting_time=pi.posting_time,
set_posting_time=1, inflow_file=file)
frappe.db.commit()
print "Stock entry created."
if last_single_paid_items:
pi = make_purchase_invoice(current_order)
# pi.update_stock = 1
pi.is_paid = 1
pi.items = []
pi.posting_date = last_single_SI_dict['OrderDate'].date()
pi.posting_time = str(last_single_SI_dict['OrderDate'].time())
pi_total = 0.0
if float(last_single_SI_dict["conversion_rate"]) != 0.0 and float(last_single_SI_dict["conversion_rate"]) != 1.0:
pi.currency = last_single_SI_dict["currency"]
pi.conversion_rate = float(last_single_SI_dict["conversion_rate"])
elif float(last_single_SI_dict["conversion_rate"]) == 0.0 or float(last_single_SI_dict["conversion_rate"]) == 1.0:
pi.currency = "NGN"
pi.conversion_rate = None
zeros = []
for item in last_single_paid_items:
nl = pi.append('items', {})
nl.description = item["description"]
nl.item_name = item["item_name"]
nl.item_code = item["item_name"]
nl.rate = float(item["rate"])
nl.conversion_factor = item["conversion_factor"]
nl.uom = item["uom"]
nl.expense_account = item["expense_account"]
nl.cost_center = item["cost_center"]
nl.qty = float(item["qty"])
nl.warehouse = item["warehouse"]
nl.purchase_order = current_order
pi_total += float(nl.rate) * float(nl.qty)
# if pi.items:
pi.set_posting_time = 1
pi.cash_bank_account = "Access Bank - DCL"
pi.taxes_and_charges = ""
pi.taxes = []
pi.inflow_file = file
# print " ", paid_items
print " Paid Items Only PI Total", pi_total, current_order, pi.currency
# print " ", pi.as_dict()["items"]
if pi_total:
pi.mode_of_payment = "Cash"
pi.insert()
frappe.db.commit()
if pi.currency != "NGN":
pi.paid_amount = pi.grand_total
pi.base_paid_amount = pi.outstanding_amount
pi.save()
frappe.db.commit()
pi.submit()
frappe.db.commit()
else:
pass
if last_single_fulfilled_items:
pi = make_purchase_invoice(current_order)
pi.update_stock = 1
# pi.is_paid = 1
pi.items = []
pi.posting_date = last_single_SI_dict['OrderDate'].date()
pi.posting_time = str(last_single_SI_dict['OrderDate'].time())
pi_total = 0.0
if float(last_single_SI_dict["conversion_rate"]) != 0.0 and float(
last_single_SI_dict["conversion_rate"]) != 1.0:
pi.currency = last_single_SI_dict["currency"]
pi.conversion_rate = float(last_single_SI_dict["conversion_rate"])
elif float(last_single_SI_dict["conversion_rate"]) == 0.0 or float(
last_single_SI_dict["conversion_rate"]) == 1.0:
pi.currency = "NGN"
pi.conversion_rate = None
zeros = []
for item in last_single_fulfilled_items:
nl = pi.append('items', {})
nl.description = item["description"]
nl.item_name = item["item_name"]
nl.item_code = item["item_name"]
nl.rate = float(item["rate"])
nl.conversion_factor = item["conversion_factor"]
nl.uom = item["uom"]
nl.expense_account = item["expense_account"]
nl.cost_center = item["cost_center"]
nl.qty = float(item["qty"])
nl.warehouse = item["warehouse"]
nl.purchase_order = current_order
pi_total += float(nl.rate) * float(nl.qty)
# if pi.items:
pi.set_posting_time = 1
pi.cash_bank_account = "Access Bank - DCL"
pi.taxes_and_charges = ""
pi.taxes = []
pi.inflow_file = file
# print " ", paid_items
print " Paid Items Only PI Total", pi_total, current_order, pi.currency
# print " ", pi.as_dict()["items"]
if pi_total:
pi.mode_of_payment = "Cash"
pi.insert()
frappe.db.commit()
if pi.currency != "NGN":
pi.paid_amount = pi.grand_total
pi.base_paid_amount = pi.outstanding_amount
pi.save()
frappe.db.commit()
pi.submit()
frappe.db.commit()
else:
pass
None
def remove_imported_data(file):
SIs = frappe.db.sql("""SELECT name FROM `tabPurchase Invoice` WHERE inflow_file=%s""",(file))
for si in SIs:
si_doc = frappe.get_doc("Purchase Invoice",si[0])
if si_doc.docstatus == 1:
si_doc.cancel()
si_doc.delete()
# SIs = frappe.db.sql("""SELECT name FROM `tabStock Entry` WHERE docstatus=1""")
#
# for si in SIs:
# si_doc = frappe.get_doc("Stock Entry", si[0])
# si_doc.cancel()
# si_doc.delete()
SIs = frappe.db.sql("""SELECT name FROM `tabPurchase Order` WHERE inflow_file=%s""",(file))
for si in SIs:
si_doc = frappe.get_doc("Purchase Order", si[0])
if si_doc.docstatus == 1:
si_doc.cancel()
si_doc.delete() |
#!/usr/bin/env python
"""
.. See the NOTICE file distributed with this work for additional information
regarding copyright ownership.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
from basic_modules.metadata import Metadata
from basic_modules.workflow import Workflow
from tools_demos.simpleTool1 import SimpleTool1
from tools_demos.simpleTool2 import SimpleTool2
from utils import remap
from utils import logger
class SimpleWorkflow(Workflow): # pylint: disable=too-few-public-methods
"""
Simple example of Workflow using PyCOMPSs, called using an App.
- SimpleTool1:
reads an integer from a file, increments it, and writes it to file
- SimpleTool2:
reads two integers from two file and writes their sum to file
- SimpleWorkflow:
implements the following workflow:
1 2
| |
SimpleTool1 SimpleTool1
| |
+-----.-----+
|
SimpleTool2
|
3
Where 1 and 2 are inputs, 3 is the output, Tool1 and Tool2 are the
SimpleTool1 and SimpleTool2 defined above.
The "main()" uses the WorkflowApp to launch SimpleWorkflow in order to
unstage intermediate outputs.
"""
configuration = {}
def __init__(self, configuration=None):
"""
Initialise the tool with its configuration.
Parameters
----------
configuration : dict
a dictionary containing parameters that define how the operation
should be carried out, which are specific to each Tool.
"""
if configuration is None:
configuration = {}
self.configuration.update(configuration)
def run(self, input_files, metadata, output_files):
logger.info("\t0. perform checks")
assert len(input_files.keys()) == 2
assert len(metadata.keys()) == 2
logger.info("\t1.a Instantiate Tool 1 and run")
simple_tool1 = SimpleTool1(self.configuration)
try:
output1, outmd1 = simple_tool1.run(
# Use remap to convert role "number1" to "input" for simpleTool1
remap(input_files, input="number1"),
remap(metadata, input="number1"),
# Use a temporary file name for intermediate outputs
{"output": 'file1.out'})
except Exception as err: # pylint: disable=broad-except
logger.fatal("Tool 1, run 1 failed: {}", err)
return {}, {}
logger.progress(50) # out of 100
logger.info("\t1.b (Instantiate Tool) and run")
try:
output2, outmd2 = simple_tool1.run(
# Use remap to convert role "number2" to "input" for simpleTool1
remap(input_files, input="number2"),
remap(metadata, input="number2"),
# Use a temporary file name for intermediate outputs
{"output": 'file2.out'})
except Exception as err: # pylint: disable=broad-except
logger.fatal("Tool 1, run 2 failed: {}", err)
return {}, {}
logger.progress(75) # out of 100
logger.info("\t2. Instantiate Tool and run")
simple_tool2 = SimpleTool2(self.configuration)
try:
output3, outmd3 = simple_tool2.run(
# Instead of using remap, here we re-build dicts to convert input roles
{"input1": output1["output"], "input2": output2["output"]},
{"input1": outmd1["output"], "input2": outmd2["output"]},
# Workflow output files are from this Tool
output_files)
except Exception as err: # pylint: disable=broad-except
logger.fatal("Tool 2 failed: {}", err)
return {}, {}
logger.progress(100) # out of 100
logger.info("\t4. Optionally edit the output metadata")
logger.info("\t5. Return")
return output3, outmd3
# -----------------------------------------------------------------------------
def main(input_files, input_metadata, output_files):
"""
Main function
-------------
This function launches the app.
"""
# 1. Instantiate and launch the App
logger.info("1. Instantiate and launch the App")
from apps.workflowapp import WorkflowApp
app = WorkflowApp()
result = app.launch(SimpleWorkflow, input_files, input_metadata,
output_files, {})
# 2. The App has finished
logger.info("2. Execution finished")
return result
def main_json():
"""
Alternative main function
-------------
This function launches the app using configuration written in
two json files: config.json and input_metadata.json.
"""
# 1. Instantiate and launch the App
logger.info("1. Instantiate and launch the App")
from apps.jsonapp import JSONApp
app = JSONApp()
result = app.launch(SimpleWorkflow,
"tools_demos/config.json",
"tools_demos/input_metadata.json",
"/tmp/results.json")
# 2. The App has finished
logger.info("2. Execution finished; see /tmp/results.json")
return result
if __name__ == "__main__":
# Note that the code that was within this if condition has been moved
# to a function called 'main'.
# The reason for this change is to improve performance.
INPUT_FILE_1 = "/tmp/file1"
INPUT_FILE_2 = "/tmp/file2"
OUTPUT_FILE = "/tmp/outputFile"
# The VRE has to prepare the data to be processed.
# In this example we create 2 files for testing purposes.
logger.info("1. Create some data: 2 input files")
with open(INPUT_FILE_1, "w") as f:
f.write("5")
with open(INPUT_FILE_2, "w") as f:
f.write("9")
logger.info("\t* Files successfully created")
# Maybe it is necessary to prepare a metadata parser from json file
# when building the Metadata objects.
INPUT_METADATA_F1 = Metadata("Number", "plainText")
INPUT_METADATA_F2 = Metadata("Number", "plainText")
main({"number1": INPUT_FILE_1,
"number2": INPUT_FILE_2},
{"number1": INPUT_METADATA_F1,
"number2": INPUT_METADATA_F2},
{"output": OUTPUT_FILE})
main_json()
|
"""Utils for loading and adding annotations to the data"""
import ast
import logging
import msgpack
import os
import pandas as pd
from functools import lru_cache
from gensim.corpora import Dictionary
from gensim.matutils import corpus2csc
from spacy.tokens import Doc
from tqdm import tqdm
from src import HOME_DIR
from src.utils.spacy import nlp, apply_extensions
from src.utils.wiki2vec import lookup_entity
logger = logging.getLogger(__name__)
cache = lru_cache(maxsize=None)
def _load_spacy():
"""Loads serialized spacy vocab and docs
Returns
-------
dict
Maps doc id to bytes for spacy doc.
"""
spacy_path = os.path.join(HOME_DIR, 'data/processed/spacy')
if os.path.exists(spacy_path):
with open(spacy_path, 'rb') as f:
m = msgpack.load(f)
nlp.vocab.from_bytes(m[b'vocab'])
return m[b'docs']
else:
logger.warn('No serialized Spacy found')
return None
class Paragraph:
"""A paragraph from the corpus
Parameters
----------
row : pd.Series
The row of data referring to this speech.
parent : src.corpus.Speech
A reference to the Speech object that contains this paragraph.
"""
def __init__(self, row, parent):
self.index = row.paragraph_index
self.id_ = row.paragraph_id
self.row = row
self.speech = parent
def spacy_doc(self):
return self.speech.spacy_doc()._.paragraphs[self.index]
def session(self):
return self.row.session
def year(self):
return self.row.year
def country_code(self):
return self.row.country
def country(self):
return self.row.country_name
class Speech:
"""A speech from the corpus
Serialized Spacy docs are lazy loaded.
Parameters
----------
group : pd.DataFrame
The subset of rows/paragraphs that belong to this speech.
spacy_bytes : bytes
Serialized spacy doc.
"""
def __init__(self, group, spacy_bytes=None):
self.id_ = group.document_id.unique()[0]
self._spacy_bytes = spacy_bytes
self.group = group
self.paragraphs = [
Paragraph(row, self)
for _, row in group.iterrows()
]
@cache
def spacy_doc(self):
if self._spacy_bytes is not None:
doc = apply_extensions(Doc(nlp.vocab).from_bytes(self._spacy_bytes))
assert len(doc._.paragraphs) == len(self.paragraphs)
return doc
else:
raise FileNotFoundError('No serialized Spacy found')
def session(self):
return self.group.session.iloc[0]
def year(self):
return self.group.year.iloc[0]
def country_code(self):
return self.group.country.iloc[0]
def country(self):
return self.group.country_name.iloc[0]
class Corpus:
"""UN General Debate Corpus"""
def __init__(self, filename='data/processed/debates_paragraphs.csv'):
self.filename = filename
self._load(filename)
def _load(self, filename):
debates = pd.read_csv(os.path.join(HOME_DIR, filename))
debates.bag_of_words = debates.bag_of_words.apply(ast.literal_eval)
self.debates = debates
spacy = _load_spacy()
# Ensure the following two lists are sorted in the same order as the
# debates df.
self.speeches = [
Speech(
group,
spacy.pop(id_) if spacy else None)
for id_, group in debates.groupby('document_id')
]
self.paragraphs = [par for sp in self.speeches for par in sp.paragraphs]
for par_id_from_df, par in zip(debates.paragraph_id, self.paragraphs):
assert par_id_from_df == par.id_
self.speech_id_to_speech = {
sp.id_: sp for sp in self.speeches}
self.paragraph_id_to_paragraph = {
par.id_: par for par in self.paragraphs}
def paragraph(self, id_):
"""Get a paragraph by id"""
return self.paragraph_id_to_paragraph[id_]
def speech(self, id_):
"""Get a speech by id"""
return self.speech_id_to_speech[id_]
def add_dataframe_column(self, column):
"""Add column to the dataframe
Add a column to the corpus dataframe and save it so that it loads next
time. Useful for adding paragraph level annotations that don't
necessarily make sense as a Spacy extension.
Parameters
----------
column : pd.Series
New column to append to the corpus dataframe. Should be named.
"""
self.debates = pd.concat([self.debates, column], axis=1)
self.debates.to_csv(self.filename, index=False)
@cache
def corpus_entity_matrix(self):
paragraph_entities = []
for paragraph in tqdm(self.paragraphs):
paragraph_entity = []
for nc in paragraph.spacy_doc().noun_chunks:
entity = lookup_entity(nc)
if entity:
paragraph_entity.append(entity.title)
paragraph_entities.append(paragraph_entity)
dictionary = Dictionary(paragraph_entities)
paragraph_entities = [dictionary.doc2bow(p) for p in paragraph_entities]
return corpus2csc(paragraph_entities), dictionary
def load_spacy_cache(self):
"""Convenience function for doing all of the spacy loading upfront."""
for sp in tqdm(self.speeches):
sp.spacy_doc()
|
<gh_stars>10-100
import json
import pandas as pd
import hashlib
import os
# mapping from: https://pbpython.com/pandas_dtypes.html
# -> https://gitlab.datadrivendiscovery.org/MIT-LL/d3m_data_supply/blob/shared/schemas/datasetSchema.json
DTYPES = {
'int64': 'integer',
'float64': 'real',
'bool': 'boolean',
'object': 'string',
'datetime64': 'dateTime',
'category': 'categorical'
}
DSV_EXTENSIONS = ['.csv', '.tsv', '.xlsx', '.xls']
DATASET_SCHEMA_VERSION = '4.0.0'
PROBLEM_SCHEMA_VERSION = '4.0.0'
def d3m_wrap_dataset(outputDir, dataPaths, about, problem):
problem = problem or {}
targets = problem.get('targets', [])
datasetID = about['datasetName'].replace(' ', '_')
datasetPath = os.path.join(outputDir, datasetID)
datasetDir = os.path.join(datasetPath, 'TRAIN', 'dataset_TRAIN')
problemDir = os.path.join(datasetPath, 'TRAIN', 'problem_TRAIN')
os.makedirs(datasetDir, exist_ok=True)
os.makedirs(problemDir, exist_ok=True)
# construct a mapping to output paths
outDataPaths = {}
for dataPath in dataPaths:
offset = 1
if os.path.splitext(dataPath)[1] in DSV_EXTENSIONS:
# filename, extension = os.path.splitext(os.path.basename(dataPath))
# TODO: disable this line once paths aren't hardcoded to 'learningData'
filename = 'learningData'
candidateName = os.path.join('tables', filename + '.csv')
while candidateName in outDataPaths:
offset += 1
filename, extension = os.path.splitext(os.path.basename(dataPath))
candidateName = os.path.join('tables', filename + offset + '.csv')
outDataPaths[candidateName] = dataPath
def infer_roles(column_name):
roles = []
if column_name == 'd3mIndex':
roles.append('index')
elif column_name in targets:
roles.append('suggestedTarget')
else:
roles.append('attribute')
if column_name in problem.get('time', []):
roles.append('timeIndicator')
return roles
targetConfigs = []
# individually load, index, analyze, and save each dataset
resourceConfigs = []
for resIndex, outDataPath in enumerate(outDataPaths):
data = d3m_load_resource(outDataPaths[outDataPath])
if issubclass(type(data), pd.DataFrame):
resourceID = os.path.splitext(os.path.basename(outDataPath))[0]
columnConfigs = []
for colIndex, (colName, colType) in enumerate(zip(data.columns.values, data.dtypes)):
columnConfig = {
'colIndex': colIndex,
'colName': colName,
'colType': DTYPES.get(str(colType), None) or 'unknown',
'role': infer_roles(colName)
}
columnConfigs.append(columnConfig)
if columnConfig['role'][0] == 'suggestedTarget':
targetConfigs.append({
'resID': resourceID,
'colIndex': colIndex,
'colName': colName
})
resourceConfigs.append({
'resID': resourceID,
'resPath': outDataPath,
'resType': 'table',
'resFormat': {
"text/csv": [
"csv"
]
},
'isCollection': False,
'columns': [
{
'colIndex': i,
'colName': column[0],
'colType': DTYPES.get(str(column[1]), None) or 'unknown',
'role': infer_roles(column[0])
} for i, column in enumerate(zip(data.columns.values, data.dtypes))
]
})
# TODO: one splitfile is repeatedly overwritten
with open(os.path.join(problemDir, 'dataSplits.json'), 'w') as splitFile:
splitFile.write('d3mIndex,type,repeat,fold\n')
for i in range(len(data)):
splitFile.write(str(i) + ',TRAIN,0,0' + '\n')
fullDataPath = os.path.join(datasetDir, outDataPath)
os.makedirs(os.path.dirname(fullDataPath), exist_ok=True)
data.to_csv(fullDataPath, index=False)
# write dataset config
with open(os.path.join(datasetDir, 'datasetDoc.json'), 'w') as datasetDoc:
datasetDoc.write(json.dumps({
'about': {**{
'datasetID': datasetID,
'datasetSchemaVersion': DATASET_SCHEMA_VERSION,
'redacted': True,
'digest': hashlib.sha256(about['datasetName'].encode()).hexdigest()
}, **about},
'dataResources': resourceConfigs
}, indent=4))
# write problem
with open(os.path.join(problemDir, 'problemDoc.json'), 'w') as problemDoc:
problemID = problem.get('problemName', datasetID + '_problem_TRAIN')
problemDoc.write(json.dumps({
'about': {
'problemID': problemID,
'problemName': problem.get('problemName', about['datasetName'] + ' problem'),
'taskKeywords': problem.get('taskKeywords', ['classification']),
'problemSchemaVersion': PROBLEM_SCHEMA_VERSION,
'problemVersion': '1.0'
},
'inputs': {
'data': [{
'datasetID': datasetID,
'targets': [
{**{'targetIndex': targetIndex}, **target} for targetIndex, target in enumerate(targetConfigs)
]
}],
'dataSplits': problem.get('dataSplits', {
"method": "holdOut",
"testSize": 0.35,
"stratified": False,
"numRepeats": 0,
"splitsFile": "dataSplits.csv"
}),
'performanceMetrics': [
{'metric': metric} for metric in problem.get('metrics', ['rootMeanSquaredError'])
],
"expectedOutputs": {
"predictionsFile": "predictions.csv"
}
}
}, indent=4))
def d3m_load_resource(path):
if path.endswith('.csv'):
data = pd.read_csv(path, low_memory=False)
elif path.endswith('.tsv'):
data = pd.read_csv(path, delimiter='\t', low_memory=False)
elif os.path.splitext(path)[1] in ['.xlsx', '.xls']:
data = pd.read_excel(path)
else:
return None
if 'd3mIndex' not in data:
data.insert(0, 'd3mIndex', range(len(data)))
return data
|
<filename>lib/utils/net_utils.py<gh_stars>100-1000
import torch
from torch import nn
from easydict import EasyDict
import os
from tensorboardX import SummaryWriter
import torchvision.utils as vutils
import numpy as np
class History:
def load_dict(self, *args):
raise NotImplementedError()
def plot(self):
raise NotImplementedError()
def update(self, *args):
raise NotImplementedError()
class LossHistory(History):
def __init__(self):
self.losses = {'train': [], 'dev': []}
self.accs = {'train': [], 'dev': []}
self.bounding_accs = {'train': [], 'dev': []}
self.shrink = {'train': [], 'dev': []}
def load_dict(self, other):
self.losses = other.losses
self.accs = other.accs
self.bounding_accs = other.bounding_accs
self.shrink = other.shrink
def plot(self):
train_loss, train_acc, dev_loss, dev_acc, train_bound_acc, dev_bound_acc = \
self.losses['train'], self.accs['train'], self.losses['dev'], self.accs['dev'], self.bounding_accs['train'], \
self.bounding_accs['dev']
train_shrink=self.shrink['train']
dev_shrink=self.shrink['dev']
epochs = len(train_loss)
plt.plot(range(1, 1 + epochs), train_loss, label='train_loss')
plt.plot(range(1, 1 + epochs), dev_loss, label='dev_loss')
plt.plot(range(1, 1 + epochs), train_acc, label='train_acc')
plt.plot(range(1, 1 + epochs), dev_acc, label='dev_acc')
plt.plot(range(1, 1 + epochs), train_bound_acc, label='train_bound_acc')
plt.plot(range(1, 1 + epochs), dev_bound_acc, label='dev_bound_acc')
plt.plot(range(1, 1 + epochs), train_shrink, label='train_shrink')
plt.plot(range(1, 1 + epochs), dev_shrink, label='dev_shrink')
plt.legend()
plt.show()
def smooth_l1_loss(vertex_pred, vertex_targets, vertex_weights, sigma=1.0, normalize=True, reduce=True):
'''
:param vertex_pred: [b,vn*2,h,w]
:param vertex_targets: [b,vn*2,h,w]
:param vertex_weights: [b,1,h,w]
:param sigma:
:param normalize:
:param reduce:
:return:
'''
b,ver_dim,_,_=vertex_pred.shape
sigma_2 = sigma ** 2
vertex_diff = vertex_pred - vertex_targets
diff = vertex_weights * vertex_diff
abs_diff = torch.abs(diff)
smoothL1_sign = (abs_diff < 1. / sigma_2).detach().float()
in_loss = torch.pow(diff, 2) * (sigma_2 / 2.) * smoothL1_sign \
+ (abs_diff - (0.5 / sigma_2)) * (1. - smoothL1_sign)
if normalize:
in_loss=torch.sum(in_loss.view(b,-1),1) / (ver_dim * torch.sum(vertex_weights.view(b,-1),1) + 1e-3)
if reduce:
torch.mean(in_loss)
return in_loss
def conv(num_input, num_output, kernel_size, stride, padding, relu=True):
if relu is False:
return nn.Conv2d(num_input, num_output, kernel_size, stride, padding)
else:
return nn.Sequential(
nn.Conv2d(num_input, num_output, kernel_size, stride, padding),
nn.ReLU(inplace=True)
)
def load_model(model, optim, model_dir, epoch=-1):
if not os.path.exists(model_dir):
return 0
pths = [int(pth.split('.')[0]) for pth in os.listdir(model_dir)]
if len(pths) == 0:
return 0
if epoch==-1:
pth = max(pths)
else:
pth = epoch
pretrained_model = torch.load(os.path.join(model_dir, '{}.pth'.format(pth)))
model.load_state_dict(pretrained_model['net'])
optim.load_state_dict(pretrained_model['optim'])
print('load model {} epoch {}'.format(model_dir,pretrained_model['epoch']))
return pretrained_model['epoch'] + 1
def load_net(net, model_dir):
if not os.path.exists(model_dir):
return 0
pths = [int(pth.split('.')[0]) for pth in os.listdir(model_dir)]
if len(pths) == 0:
return 0
pth = max(pths)
pretrained_model = torch.load(os.path.join(model_dir, '{}.pth'.format(pth)))
net.load_state_dict(pretrained_model['net'])
return pretrained_model['epoch'] + 1
def save_model(net, optim, epoch, model_dir):
os.system('mkdir -p {}'.format(model_dir))
torch.save({
'net': net.state_dict(),
'optim': optim.state_dict(),
'epoch': epoch
}, os.path.join(model_dir, '{}.pth'.format(epoch)))
class AverageMeter(EasyDict):
"""Computes and stores the average and current value"""
def __init__(self):
super().__init__()
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class Recorder(object):
colors = [[ 0, 0, 0],
[128, 64, 128],
[244, 35, 232],
[ 70, 70, 70],
[102, 102, 156],
[190, 153, 153],
[153, 153, 153],
[250, 170, 30],
[220, 220, 0],
[107, 142, 35],
[152, 251, 152],
[ 0, 130, 180],
[220, 20, 60],
[255, 0, 0],
[ 0, 0, 142],
[ 0, 0, 70],
[ 0, 60, 100],
[ 0, 80, 100],
[ 0, 0, 230],
[119, 11, 32]]
def __init__(self, rec=True, rec_dir=None, dump_fn=None):
from matplotlib import cm
if rec:
self.writer = SummaryWriter(log_dir=rec_dir)
self.cmap = cm.get_cmap()
else:
self.writer = None
self.dump_fn=dump_fn
def rec_loss(self, loss, step, name='data/loss'):
msg='{} {} {}'.format(name, step, loss)
print(msg)
if self.dump_fn is not None:
with open(self.dump_fn,'a') as f:
f.write(msg+'\n')
if self.writer is None:
return
self.writer.add_scalar(name, loss, step)
def rec_loss_batch(self, losses_batch, step, epoch, prefix='train'):
msg='{} epoch {} step {}'.format(prefix, epoch, step)
for k,v in losses_batch.items():
msg+=' {} {:.8f} '.format(k.split('/')[-1],v)
print(msg)
if self.dump_fn is not None:
with open(self.dump_fn,'a') as f:
f.write(msg+'\n')
if self.writer is None:
return
for k,v in losses_batch.items():
self.writer.add_scalar(k, v, step)
def rec_segmentation(self, seg, num_classes, nrow, step, name='seg'):
if self.writer is None:
return
seg = torch.argmax(seg, dim=1).long()
r = seg.clone()
g = seg.clone()
b = seg.clone()
for l in range(num_classes):
inds = (seg == l)
r[inds] = self.colors[l][0]
g[inds] = self.colors[l][1]
b[inds] = self.colors[l][2]
seg = torch.stack([r, g, b], dim=1)
seg = vutils.make_grid(seg, nrow)
self.writer.add_image(name, seg, step)
def rec_vertex(self, vertex, mask, nrow, step, name='vertex'):
if self.writer is None:
return
vertex = (vertex[:, :2, ...] * mask + 1) / 2
height, width = vertex.shape[2:]
vertex = vertex.view(-1, height, width)
vertex = self.cmap(vertex.detach().cpu().numpy())[..., :3]
vertex = vutils.make_grid(torch.from_numpy(vertex).permute(0, 3, 1, 2), nrow)
self.writer.add_image(name, vertex, step)
class MultiClassPrecisionRecall:
def __init__(self,names):
self.class_num=len(names)
self.names=names
self.tp=torch.zeros(self.class_num,dtype=torch.int64).cuda()
self.fp=torch.zeros(self.class_num,dtype=torch.int64).cuda()
self.fn=torch.zeros(self.class_num,dtype=torch.int64).cuda()
def accumulate(self, pred, label):
'''
:param pred: b,h,w
:param label: b,h,w
:return:
'''
for ci in range(self.class_num):
self.tp[ci]+=torch.sum((pred==ci)&(label==ci))
self.fp[ci]+=torch.sum((pred==ci)&(label!=ci))
self.fn[ci]+=torch.sum((pred!=ci)&(label==ci))
def compute_precision_recall(self):
tp=self.tp.double()
fp=self.fp.double()
fn=self.fn.double()
return ((tp+1)/(tp+fp+1)).cpu().numpy(), ((tp+1)/(tp+fn+1)).cpu().numpy()
def reset(self):
self.tp=torch.zeros(self.class_num,dtype=torch.int64).cuda()
self.fp=torch.zeros(self.class_num,dtype=torch.int64).cuda()
self.fn=torch.zeros(self.class_num,dtype=torch.int64).cuda()
def adjust_learning_rate(optimizer, epoch, lr_decay_rate, lr_decay_epoch, min_lr=1e-5):
if ((epoch+1) % lr_decay_epoch)!=0:
return
for param_group in optimizer.param_groups:
# print(param_group)
lr_before = param_group['lr']
param_group['lr'] = param_group['lr'] * lr_decay_rate
param_group['lr'] = max(param_group['lr'], min_lr)
print('changing learning rate {:5f} to {:.5f}'.format(lr_before,max(param_group['lr'], min_lr)))
def set_learning_rate(optimizer, lr):
for param_group in optimizer.param_groups:
lr_before = param_group['lr']
param_group['lr'] = lr
print('reset learning rate {:5f} to {:.5f}'.format(lr_before,lr))
def acc_img(img,pre_img_list,an,hcount,wcount,hinter,winter,height,width,model='avg'):
if len(pre_img_list)>0:
cur_img=torch.cat([pre_img_list.pop(),img],0)
else:
cur_img=img
image_num, left_num = cur_img.shape[0] // an, cur_img.shape[0] % an
if left_num>0: pre_img_list.append(cur_img[image_num*an:])
if image_num==0: return None
if model=='avg':
h,w=cur_img.shape[2],cur_img.shape[3]
result_img=torch.zeros([image_num, img.shape[1], height, width],dtype=img.dtype,device=img.device)
result_wgt=torch.zeros([image_num, img.shape[1], height, width],dtype=img.dtype,device=img.device)
cur_wgt=torch.ones_like(cur_img)
for ii in range(image_num):
for ai in range(an):
hi=ai//wcount
wi=ai%wcount
hbeg=hi*hinter
wbeg=wi*winter
result_img[ii,:,hbeg:hbeg+h,wbeg:wbeg+w]+=cur_img[ii*an+ai,:,:,:]
result_wgt[ii,:,hbeg:hbeg+h,wbeg:wbeg+w]+=cur_wgt[ii*an+ai,:,:,:]
result_img=result_img/result_wgt
else: # model=='assign'
h,w=cur_img.shape[2],cur_img.shape[3]
result_img=torch.zeros([image_num, img.shape[1], height, width],dtype=img.dtype,device=img.device)
for ii in range(image_num):
for ai in range(an):
hi=ai//wcount
wi=ai%wcount
hbeg=hi*hinter
wbeg=wi*winter
result_img[ii,:,hbeg:hbeg+h,wbeg:wbeg+w]=cur_img[ii*an+ai,:,:,:]
result_img=result_img
return result_img
def compute_precision_recall(scores,target,reduce=False):
b=scores.shape[0]
preds=torch.argmax(scores,1)
preds=preds.float()
target=target.float()
tp=preds*target
fp=preds*(1-target)
fn=(1-preds)*target
tp=torch.sum(tp.view(b,-1),1)
fn=torch.sum(fn.view(b,-1),1)
fp=torch.sum(fp.view(b,-1),1)
precision=(tp+1)/(tp+fp+1)
recall=(tp+1)/(tp+fn+1)
if reduce:
precision,recall=torch.mean(precision), torch.mean(recall)
return precision, recall
def compute_precision_multi_class(scores,target,reduce=False):
b,_,h,w=scores.shape
preds=torch.argmax(scores,1)
correct=preds==target
precision=torch.sum(correct.view(b,-1),1).float()/(h*w)
if reduce:
precision=torch.mean(precision)
return precision
|
"""
# -*- coding: utf-8 -*-
-----------------------------------------------------------------------------------
# Author: <NAME>
# DoC: 2020.08.09
# email: <EMAIL>
-----------------------------------------------------------------------------------
# Description: The utils of the kitti dataset
# Modified: <NAME>
# email: <EMAIL>
"""
from __future__ import print_function
import os
import sys
import numpy as np
import cv2
import mayavi.mlab
src_dir = os.path.dirname(os.path.realpath(__file__))
while not src_dir.endswith("src"):
src_dir = os.path.dirname(src_dir)
if src_dir not in sys.path:
sys.path.append(src_dir)
import config.kitti_config as cnf
from data_process.transformation import lidar_to_camera_box
def roty(angle):
# Rotation about the y-axis.
c = np.cos(angle)
s = np.sin(angle)
return np.array([[c, 0, s],
[0, 1, 0],
[-s, 0, c]])
def compute_box_3d(dim, location, ry):
# dim: 3
# location: 3
# ry: 1
# return: 8 x 3
R = roty(ry)
h, w, l = dim
x_corners = [l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2]
y_corners = [0, 0, 0, 0, -h, -h, -h, -h]
z_corners = [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2]
corners = np.array([x_corners, y_corners, z_corners], dtype=np.float32)
corners_3d = np.dot(R, corners)
corners_3d = corners_3d + np.array(location, dtype=np.float32).reshape(3, 1)
return corners_3d.transpose(1, 0)
def project_to_image(pts_3d, P):
# pts_3d: n x 3
# P: 3 x 4
# return: n x 2
pts_3d_homo = np.concatenate([pts_3d, np.ones((pts_3d.shape[0], 1), dtype=np.float32)], axis=1)
pts_2d = np.dot(P, pts_3d_homo.transpose(1, 0)).transpose(1, 0)
pts_2d = pts_2d[:, :2] / pts_2d[:, 2:]
return pts_2d.astype(np.int)
def draw_box_3d_v2(image, qs, color=(255, 0, 255), thickness=2):
''' Draw 3d bounding box in image
qs: (8,3) array of vertices for the 3d box in following order:
1 -------- 0
/| /|
2 -------- 3 .
| | | |
. 5 -------- 4
|/ |/
6 -------- 7
'''
qs = qs.astype(np.int32)
for k in range(0, 4):
# Ref: http://docs.enthought.com/mayavi/mayavi/auto/mlab_helper_functions.html
i, j = k, (k + 1) % 4
# use LINE_AA for opencv3
cv2.line(image, (qs[i, 0], qs[i, 1]), (qs[j, 0], qs[j, 1]), color, thickness)
i, j = k + 4, (k + 1) % 4 + 4
cv2.line(image, (qs[i, 0], qs[i, 1]), (qs[j, 0], qs[j, 1]), color, thickness)
i, j = k, k + 4
cv2.line(image, (qs[i, 0], qs[i, 1]), (qs[j, 0], qs[j, 1]), color, thickness)
return image
def draw_box_3d(image, corners, color=(0, 0, 255)):
''' Draw 3d bounding box in image
corners: (8,3) array of vertices for the 3d box in following order:
1 -------- 0
/| /|
2 -------- 3 .
| | | |
. 5 -------- 4
|/ |/
6 -------- 7
'''
face_idx = [[0, 1, 5, 4],
[1, 2, 6, 5],
[2, 3, 7, 6],
[3, 0, 4, 7]]
for ind_f in range(3, -1, -1):
f = face_idx[ind_f]
for j in range(4):
cv2.line(image, (corners[f[j], 0], corners[f[j], 1]),
(corners[f[(j + 1) % 4], 0], corners[f[(j + 1) % 4], 1]), color, 2, lineType=cv2.LINE_AA)
if ind_f == 0:
cv2.line(image, (corners[f[0], 0], corners[f[0], 1]),
(corners[f[2], 0], corners[f[2], 1]), color, 1, lineType=cv2.LINE_AA)
cv2.line(image, (corners[f[1], 0], corners[f[1], 1]),
(corners[f[3], 0], corners[f[3], 1]), color, 1, lineType=cv2.LINE_AA)
return image
def show_rgb_image_with_boxes(img, labels, calib):
for box_idx, label in enumerate(labels):
cls_id, location, dim, ry = label[0], label[1:4], label[4:7], label[7]
if location[2] < 2.0: # The object is too close to the camera, ignore it during visualization
continue
if cls_id < 0:
continue
corners_3d = compute_box_3d(dim, location, ry)
corners_2d = project_to_image(corners_3d, calib.P2)
print(corners_2d.shape)
minxy = np.min(corners_2d, axis=0)
maxxy = np.max(corners_2d, axis=0)
bbox = np.concatenate([minxy, maxxy], axis=0)
if bbox[0] < 0 or bbox[2] < 0:
continue
if bbox[1] > 1272 or bbox[3] > 375:
continue
img = draw_box_3d(img, corners_2d, color=cnf.colors[int(cls_id)])
return img
def merge_rgb_to_bev(img_rgb, img_bev, output_width):
img_rgb_h, img_rgb_w = img_rgb.shape[:2]
ratio_rgb = output_width / img_rgb_w
output_rgb_h = int(ratio_rgb * img_rgb_h)
ret_img_rgb = cv2.resize(img_rgb, (output_width, output_rgb_h))
img_bev_h, img_bev_w = img_bev.shape[:2]
ratio_bev = output_width / img_bev_w
output_bev_h = int(ratio_bev * img_bev_h)
ret_img_bev = cv2.resize(img_bev, (output_width, output_bev_h))
out_img = np.zeros((output_rgb_h + output_bev_h, output_width, 3), dtype=np.uint8)
# Upper: RGB --> BEV
out_img[:output_rgb_h, ...] = ret_img_rgb
out_img[output_rgb_h:, ...] = ret_img_bev
return out_img
def inverse_rigid_trans(Tr):
''' Inverse a rigid body transform matrix (3x4 as [R|t])
[R'|-R't; 0|1]
'''
inv_Tr = np.zeros_like(Tr) # 3x4
inv_Tr[0:3,0:3] = np.transpose(Tr[0:3,0:3])
inv_Tr[0:3,3] = np.dot(-np.transpose(Tr[0:3,0:3]), Tr[0:3,3])
return inv_Tr
V2C= np.array([7.533745000000e-03, -9.999714000000e-01, -6.166020000000e-04,
-4.069766000000e-03, 1.480249000000e-02, 7.280733000000e-04,
-9.998902000000e-01, -7.631618000000e-02, 9.998621000000e-01,
7.523790000000e-03, 1.480755000000e-02, -2.717806000000e-01])
V2C = np.reshape(V2C, [3, 4])
C2V = inverse_rigid_trans(V2C)
R0 = np.array([9.999239000000e-01, 9.837760000000e-03, -7.445048000000e-03, -9.869795000000e-03,
9.999421000000e-01, -4.278459000000e-03, 7.402527000000e-03, 4.351614000000e-03,
9.999631000000e-01])
R0 = np.reshape(R0, [3, 3])
def cart2hom(pts_3d):
''' Input: nx3 points in Cartesian
Oupput: nx4 points in Homogeneous by pending 1
'''
n = pts_3d.shape[0]
pts_3d_hom = np.hstack((pts_3d, np.ones((n, 1))))
return pts_3d_hom
def project_ref_to_velo(pts_3d_ref):
pts_3d_ref = cart2hom(pts_3d_ref) # nx4
return np.dot(pts_3d_ref, np.transpose(C2V))
def project_rect_to_ref(pts_3d_rect):
''' Input and Output are nx3 points '''
return np.transpose(np.dot(np.linalg.inv(R0), np.transpose(pts_3d_rect)))
def project_rect_to_velo(pts_3d_rect):
''' Input: nx3 points in rect camera coord.
Output: nx3 points in velodyne coord.
'''
pts_3d_ref = project_rect_to_ref(pts_3d_rect)
return project_ref_to_velo(pts_3d_ref)
def rotz(t):
''' Rotation about the y-axis. '''
c = np.cos(t)
s = np.sin(t)
return np.array([[c, -s, 0],
[s, c, 0],
[0, 0, 1]])
def roty(t):
''' Rotation about the y-axis. '''
c = np.cos(t)
s = np.sin(t)
return np.array([[c, 0, s],
[0, 1, 0],
[-s, 0, c]])
def draw_gt_boxes3d(gt_boxes3d, score,fig, color=(1,1,1), line_width=1, draw_text=True, text_scale=(1,1,1), color_list=None, ):
''' Draw 3D bounding boxes
Args:
gt_boxes3d: numpy array (n,8,3) for XYZs of the box corners
fig: mayavi figure handler
color: RGB value tuple in range (0,1), box line color
line_width: box line width
draw_text: boolean, if true, write box indices beside boxes
text_scale: three number tuple
color_list: a list of RGB tuple, if not None, overwrite color.
Returns:
fig: updated fig
'''
num = len(gt_boxes3d)
for n in range(num):
b = gt_boxes3d[n]
if color_list is not None:
color = color_list[n]
#if draw_text: mayavi.mlab.text3d(b[4,0], b[4,1], b[4,2], 'car'+"{:.2f}".format(float(score)), scale=text_scale, color=(1,1,1), figure=fig)
for k in range(0,4):
#http://docs.enthought.com/mayavi/mayavi/auto/mlab_helper_functions.html
i,j=k,(k+1)%4
mayavi.mlab.plot3d([b[i,0], b[j,0]], [b[i,1], b[j,1]], [b[i,2], b[j,2]], color=color, tube_radius=None, line_width=line_width, figure=fig)
i,j=k+4,(k+1)%4 + 4
mayavi.mlab.plot3d([b[i,0], b[j,0]], [b[i,1], b[j,1]], [b[i,2], b[j,2]], color=color, tube_radius=None, line_width=line_width, figure=fig)
i,j=k,k+4
mayavi.mlab.plot3d([b[i,0], b[j,0]], [b[i,1], b[j,1]], [b[i,2], b[j,2]], color=color, tube_radius=None, line_width=line_width, figure=fig)
#mlab.show(1)
#mlab.view(azimuth=180, elevation=70, focalpoint=[ 12.0909996 , -1.04700089, -2.03249991], distance=62.0, figure=fig)
return fig
def show3dlidar(pointpaht, detections,calib):
pointcloud = np.fromfile(pointpaht, dtype=np.float32).reshape(-1, 4)
x = pointcloud[:, 0] # x position of point
xmin = np.amin(x, axis=0)
xmax = np.amax(x, axis=0 )
y = pointcloud[:, 1] # y position of point
ymin = np.amin(y, axis=0)
ymax = np.amax(y, axis=0)
z = pointcloud[:, 2] # z position of point
zmin = np.amin(z, axis=0)
zmax = np.amax(z, axis=0)
d = np.sqrt(x ** 2 + y ** 2) # Map Distance from sensor
vals = 'height'
if vals == "height":
col = z
else:
col = d
fig = mayavi.mlab.figure(bgcolor=(0, 0, 0), size=(640, 500))
mayavi.mlab.points3d(x, y, z,
col, # Values used for Color
mode="point",
colormap='Blues', # 'bone', 'copper', 'gnuplot'
# color=(0, 1, 0), # Used a fixed (r,g,b) instead
figure=fig,
)
mayavi.mlab.points3d(0, 0, 0, color=(1, 1, 1), mode="sphere",scale_factor=0.2)
print(detections.shape)
detections[:, 1:] = lidar_to_camera_box(detections[:, 1:], calib.V2C, calib.R0, calib.P2)
for i in range(detections.shape[0]):
h = float(detections[i][4])
w = float(detections[i][5])
l = float(detections[i][6])
x = float(detections[i][1])
y = float(detections[i][2])
z = float(detections[i][3])
x_corners = [l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2] ;
y_corners = [0, 0, 0, 0, -h, -h, -h, -h] ;
z_corners = [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2];
#print(x_corners)
#print(detections[i])
R = roty(float(detections[i][7]))
corners_3d = np.dot(R, np.vstack([x_corners, y_corners, z_corners]))
# print corners_3d.shape
#corners_3d = np.zeros((3,8))
corners_3d[0, :] = corners_3d[0, :] + x;
corners_3d[1, :] = corners_3d[1, :] + y;
corners_3d[2, :] = corners_3d[2, :] + z;
corners_3d = np.transpose(corners_3d)
box3d_pts_3d_velo = project_rect_to_velo(corners_3d)
#x1, y1, z1 = box3d_pts_3d_velo[0, :]
#x2, y2, z2 = box3d_pts_3d_velo[1, :]
if detections[i][0] == 1.0:
draw_gt_boxes3d([box3d_pts_3d_velo],1,color=(1,0,0), fig=fig)
else:
draw_gt_boxes3d([box3d_pts_3d_velo], 1, color=(0, 1, 0), fig=fig)
mayavi.mlab.show()
|
<reponame>omari-funzone/commcare-hq
from collections import namedtuple
from itertools import groupby
import itertools
from django.db.models import Q
from casexml.apps.case.const import UNOWNED_EXTENSION_OWNER_ID, CASE_INDEX_EXTENSION
from casexml.apps.case.signals import cases_received
from casexml.apps.case.util import validate_phone_datetime, prune_previous_log
from casexml.apps.phone.cleanliness import should_create_flags_on_submission
from casexml.apps.phone.models import OwnershipCleanlinessFlag
from corehq import toggles
from corehq.apps.users.util import SYSTEM_USER_ID
from corehq.form_processor.interfaces.processor import FormProcessorInterface
from corehq.form_processor.interfaces.dbaccessors import CaseAccessors
from corehq.util.soft_assert import soft_assert
from couchforms.models import XFormInstance
from casexml.apps.case.exceptions import InvalidCaseIndex, IllegalCaseId
from django.conf import settings
from casexml.apps.case import const
from casexml.apps.case.xml.parser import case_update_from_block
from custom.covid.casesync import get_ush_extension_cases_to_close
from dimagi.utils.logging import notify_exception
_soft_assert = soft_assert(to="{}<EMAIL>('skelly', 'dimagi'), notify_admins=True)
# Lightweight class used to store the dirtyness of a case/owner pair.
DirtinessFlag = namedtuple('DirtinessFlag', ['case_id', 'owner_id'])
class CaseProcessingResult(object):
"""
Lightweight class used to collect results of case processing
"""
def __init__(self, domain, cases, dirtiness_flags):
self.domain = domain
self.cases = cases
self.dirtiness_flags = dirtiness_flags
def get_clean_owner_ids(self):
dirty_flags = self.get_flags_to_save()
return {c.owner_id for c in self.cases if c.owner_id and c.owner_id not in dirty_flags}
def set_cases(self, cases):
self.cases = cases
def get_flags_to_save(self):
return {f.owner_id: f.case_id for f in self.dirtiness_flags}
def commit_dirtiness_flags(self):
"""
Updates any dirtiness flags in the database.
"""
if self.domain and not toggles.LIVEQUERY_SYNC.enabled(self.domain):
flags_to_save = self.get_flags_to_save()
if should_create_flags_on_submission(self.domain):
assert settings.UNIT_TESTING # this is currently only true when unit testing
all_touched_ids = set(flags_to_save.keys()) | self.get_clean_owner_ids()
to_update = {f.owner_id: f for f in OwnershipCleanlinessFlag.objects.filter(
domain=self.domain,
owner_id__in=list(all_touched_ids),
)}
for owner_id in all_touched_ids:
if owner_id not in to_update:
# making from scratch - default to clean, but set to dirty if needed
flag = OwnershipCleanlinessFlag(domain=self.domain, owner_id=owner_id, is_clean=True)
if owner_id in flags_to_save:
flag.is_clean = False
flag.hint = flags_to_save[owner_id]
flag.save()
else:
# updating - only save if we are marking dirty or setting a hint
flag = to_update[owner_id]
if owner_id in flags_to_save and (flag.is_clean or not flag.hint):
flag.is_clean = False
flag.hint = flags_to_save[owner_id]
flag.save()
else:
# only update the flags that are already in the database
flags_to_update = OwnershipCleanlinessFlag.objects.filter(
Q(domain=self.domain),
Q(owner_id__in=list(flags_to_save)),
Q(is_clean=True) | Q(hint__isnull=True)
)
for flag in flags_to_update:
flag.is_clean = False
flag.hint = flags_to_save[flag.owner_id]
flag.save()
def process_cases_with_casedb(xforms, case_db):
case_processing_result = _get_or_update_cases(xforms, case_db)
cases = case_processing_result.cases
xform = xforms[0]
_update_sync_logs(xform, cases)
try:
cases_received.send(sender=None, xform=xform, cases=cases)
except Exception as e:
# don't let the exceptions in signals prevent standard case processing
notify_exception(
None,
'something went wrong sending the cases_received signal '
'for form %s: %s' % (xform.form_id, e)
)
for case in cases:
case_db.post_process_case(case, xform)
case_db.mark_changed(case)
case_processing_result.set_cases(cases)
return case_processing_result
def _update_sync_logs(xform, cases):
# handle updating the sync records for apps that use sync mode
relevant_log = xform.get_sync_token()
if relevant_log:
changed = relevant_log.update_phone_lists(xform, cases)
changed |= prune_previous_log(relevant_log)
if changed:
relevant_log.save()
def _get_or_update_cases(xforms, case_db):
"""
Given an xform document, update any case blocks found within it,
returning a dictionary mapping the case ids affected to the
couch case document objects
"""
domain = getattr(case_db, 'domain', None)
touched_cases = FormProcessorInterface(domain).get_cases_from_forms(case_db, xforms)
_validate_indices(case_db, touched_cases.values())
dirtiness_flags = _get_all_dirtiness_flags_from_cases(domain, case_db, touched_cases)
return CaseProcessingResult(
domain,
[update.case for update in touched_cases.values()],
dirtiness_flags,
)
def _get_all_dirtiness_flags_from_cases(domain, case_db, touched_cases):
# process the temporary dirtiness flags first so that any hints for real dirtiness get overridden
if toggles.LIVEQUERY_SYNC.enabled(domain):
return []
dirtiness_flags = list(_get_dirtiness_flags_for_reassigned_case(list(touched_cases.values())))
for case_update_meta in touched_cases.values():
dirtiness_flags += list(_get_dirtiness_flags_for_outgoing_indices(case_db, case_update_meta.case))
dirtiness_flags += list(_get_dirtiness_flags_for_child_cases(
case_db, [meta.case for meta in touched_cases.values()])
)
return dirtiness_flags
def _get_dirtiness_flags_for_outgoing_indices(case_db, case, tree_owners=None):
""" if the outgoing indices touch cases owned by another user this cases owner is dirty """
if tree_owners is None:
tree_owners = set()
extension_indices = [index for index in case.indices if index.relationship == CASE_INDEX_EXTENSION]
unowned_host_cases = []
for index in extension_indices:
host_case = case_db.get(index.referenced_id)
if (
host_case
and host_case.owner_id == UNOWNED_EXTENSION_OWNER_ID
and host_case not in unowned_host_cases
):
unowned_host_cases.append(host_case)
owner_ids = {case_db.get(index.referenced_id).owner_id
for index in case.indices if case_db.get(index.referenced_id)} | tree_owners
potential_clean_owner_ids = owner_ids | set([UNOWNED_EXTENSION_OWNER_ID])
more_than_one_owner_touched = len(owner_ids) > 1
touches_different_owner = len(owner_ids) == 1 and case.owner_id not in potential_clean_owner_ids
if (more_than_one_owner_touched or touches_different_owner):
yield DirtinessFlag(case.case_id, case.owner_id)
if extension_indices:
# If this case is an extension, each of the touched cases is also dirty
for index in case.indices:
referenced_case = case_db.get(index.referenced_id)
yield DirtinessFlag(referenced_case.case_id, referenced_case.owner_id)
if case.owner_id != UNOWNED_EXTENSION_OWNER_ID:
tree_owners.add(case.owner_id)
for unowned_host_case in unowned_host_cases:
# A host case of this extension is unowned, which means it could potentially touch an owned case
# Check these unowned cases' outgoing indices and mark dirty if appropriate
for dirtiness_flag in _get_dirtiness_flags_for_outgoing_indices(case_db, unowned_host_case,
tree_owners=tree_owners):
yield dirtiness_flag
def _get_dirtiness_flags_for_child_cases(case_db, cases):
child_cases = case_db.get_reverse_indexed_cases([c.case_id for c in cases])
case_owner_map = dict((case.case_id, case.owner_id) for case in cases)
for child_case in child_cases:
for index in child_case.indices:
if (index.referenced_id in case_owner_map
and child_case.owner_id != case_owner_map[index.referenced_id]):
yield DirtinessFlag(child_case.case_id, child_case.owner_id)
def _get_dirtiness_flags_for_reassigned_case(case_metas):
# for reassigned cases, we mark them temporarily dirty to allow phones to sync
# the latest changes. these will get cleaned up when the weekly rebuild triggers
for case_update_meta in case_metas:
if _is_change_of_ownership(case_update_meta.previous_owner_id, case_update_meta.case.owner_id):
yield DirtinessFlag(case_update_meta.case.case_id, case_update_meta.previous_owner_id)
def _validate_indices(case_db, case_updates):
for case_update in case_updates:
if not case_update.index_change:
continue
case = case_update.case
if case.indices:
for index in case.indices:
try:
# call get and not doc_exists to force domain checking
# see CaseDbCache._validate_case
referenced_case = case_db.get(index.referenced_id)
invalid = referenced_case is None
except IllegalCaseId:
invalid = True
if invalid:
# fail hard on invalid indices
from distutils.version import LooseVersion
if case_db.cached_xforms and case_db.domain != 'commcare-tests':
xform = case_db.cached_xforms[0]
if xform.metadata and xform.metadata.commcare_version:
commcare_version = xform.metadata.commcare_version
_soft_assert(
commcare_version < LooseVersion("2.39"),
"Invalid Case Index in CC version >= 2.39", {
'domain': case_db.domain,
'xform_id': xform.form_id,
'missing_case_id': index.referenced_id,
'version': str(commcare_version)
}
)
raise InvalidCaseIndex(
"Case '%s' references non-existent case '%s'" % (case.case_id, index.referenced_id)
)
def _is_change_of_ownership(previous_owner_id, next_owner_id):
return (
previous_owner_id
and previous_owner_id != UNOWNED_EXTENSION_OWNER_ID
and previous_owner_id != next_owner_id
)
def close_extension_cases(case_db, cases, device_id):
from casexml.apps.case.cleanup import close_cases
extensions_to_close = get_all_extensions_to_close(case_db.domain, cases)
extensions_to_close = case_db.filter_closed_extensions(list(extensions_to_close))
if extensions_to_close:
return close_cases(
extensions_to_close,
case_db.domain,
SYSTEM_USER_ID,
device_id,
case_db,
)
def get_all_extensions_to_close(domain, cases):
if toggles.EXTENSION_CASES_SYNC_ENABLED.enabled(domain):
if toggles.USH_DONT_CLOSE_PATIENT_EXTENSIONS.enabled(domain):
return get_ush_extension_cases_to_close(domain, cases)
return get_extensions_to_close(domain, cases)
return set()
def get_extensions_to_close(domain, cases):
case_ids = [case.case_id for case in cases if case.closed]
return CaseAccessors(domain).get_extension_chain(case_ids, include_closed=False)
def is_device_report(doc):
"""exclude device reports"""
device_report_xmlns = "http://code.javarosa.org/devicereport"
def _from_form_dict(doc):
return isinstance(doc, dict) and "@xmlns" in doc and doc["@xmlns"] == device_report_xmlns
def _from_xform_instance(doc):
return getattr(doc, 'xmlns', None) == device_report_xmlns
return _from_form_dict(doc) or _from_xform_instance(doc)
def has_case_id(case_block):
return const.CASE_TAG_ID in case_block or const.CASE_ATTR_ID in case_block
CaseBlockWithPath = namedtuple('CaseBlockWithPath', ['caseblock', 'path'])
def extract_case_blocks(doc, include_path=False):
"""
Extract all case blocks from a document, returning an array of dictionaries
with the data in each case.
The json returned is not normalized for casexml version;
for that get_case_updates is better.
if `include_path` is True then instead of returning just the case block it will
return a namedtuple with the following attributes:
caseblock: case block
path: ["form", "path", "to", "block"]
Repeat nodes will all share the same path.
"""
if isinstance(doc, XFormInstance):
form = doc.to_json()['form']
elif isinstance(doc, dict):
form = doc
else:
form = doc.form_data
return [struct if include_path else struct.caseblock for struct in _extract_case_blocks(form)]
def _extract_case_blocks(data, path=None, form_id=Ellipsis):
"""
helper for extract_case_blocks
data must be json representing a node in an xform submission
"""
from corehq.form_processor.utils import extract_meta_instance_id
if form_id is Ellipsis:
form_id = extract_meta_instance_id(data)
path = path or []
if isinstance(data, list):
for item in data:
for case_block in _extract_case_blocks(item, path=path, form_id=form_id):
yield case_block
elif isinstance(data, dict) and not is_device_report(data):
for key, value in data.items():
new_path = path + [key]
if const.CASE_TAG == key:
# it's a case block! Stop recursion and add to this value
if isinstance(value, list):
case_blocks = value
else:
case_blocks = [value]
for case_block in case_blocks:
if has_case_id(case_block):
validate_phone_datetime(
case_block.get('@date_modified'), none_ok=True, form_id=form_id
)
yield CaseBlockWithPath(caseblock=case_block, path=path)
else:
for case_block in _extract_case_blocks(value, path=new_path, form_id=form_id):
yield case_block
def get_case_updates(xform):
if not xform:
return []
updates = sorted(
[case_update_from_block(cb) for cb in extract_case_blocks(xform)],
key=lambda update: update.id
)
by_case_id = groupby(updates, lambda update: update.id)
return list(itertools.chain(
*[order_updates(updates) for case_id, updates in by_case_id]
))
def order_updates(case_updates):
"""Order case updates for a single case according to the actions
they contain.
This is to ensure create actions are applied before update actions.
"""
return sorted(case_updates, key=_update_order_index)
def _update_order_index(update):
"""
Consistent order index based on the types of actions in the update.
"""
return min(
const.CASE_ACTIONS.index(action.action_type_slug)
for action in update.actions
)
def get_case_ids_from_form(xform):
from corehq.form_processor.parsers.ledgers.form import get_case_ids_from_stock_transactions
case_ids = set(cu.id for cu in get_case_updates(xform))
if xform:
case_ids.update(get_case_ids_from_stock_transactions(xform))
return case_ids
|
<gh_stars>10-100
# -*- coding: utf-8 -*-
# Copyright (C) 2019 - 2020 by <NAME>, Rector and Visitors of the
# University of Virginia, University of Heidelberg, and University
# of Connecticut School of Medicine.
# All rights reserved.
# Copyright (C) 2017 - 2018 by <NAME>, Virginia Tech Intellectual
# Properties, Inc., University of Heidelberg, and University of
# of Connecticut School of Medicine.
# All rights reserved.
# Copyright (C) 2010 - 2016 by <NAME>, Virginia Tech Intellectual
# Properties, Inc., University of Heidelberg, and The University
# of Manchester.
# All rights reserved.
# Copyright (C) 2008 - 2009 by <NAME>, Virginia Tech Intellectual
# Properties, Inc., EML Research, gGmbH, University of Heidelberg,
# and The University of Manchester.
# All rights reserved.
# Copyright (C) 2006 - 2007 by <NAME>, Virginia Tech Intellectual
# Properties, Inc. and EML Research, gGmbH.
# All rights reserved.
import COPASI
import unittest
from types import *
class Test_CFunctionParameters(unittest.TestCase):
def setUp(self):
self.functions=COPASI.CRootContainer.getFunctionList()
self.function=self.functions.findFunction("Reversible Hill")
self.assert_(self.function!=None)
self.assert_(self.function.__class__==COPASI.CFunction)
self.parameters=self.function.getVariables()
self.assert_(self.parameters!=None)
self.assert_(self.parameters.__class__==COPASI.CFunctionParameters)
n=self.parameters.size()
self.assertEquals(n,7)
def test_getNumberOfParametersByUsage(self):
n=self.parameters.size()
self.assertEquals(n,7)
n=self.parameters.getNumberOfParametersByUsage(COPASI.CFunctionParameter.Role_PARAMETER)
self.assert_(type(n)==IntType)
self.assertEquals(n,5)
def test_add_remove(self):
n=self.parameters.size()
self.assert_(self.parameters.add("test",COPASI.CFunctionParameter.DataType_FLOAT64,COPASI.CFunctionParameter.Role_VOLUME))
self.assert_(self.parameters.size()==n+1)
self.parameters.remove("test")
self.assert_(self.parameters.size()==n)
def test_size(self):
n=self.parameters.size()
self.assert_(type(n)==IntType)
self.assert_(n==7)
def test_isVector(self):
b=self.parameters.isVector(COPASI.CFunctionParameter.Role_SUBSTRATE)
self.assert_(type(b)==BooleanType)
self.assert_(b==False)
f=self.functions.findFunction("Mass action (reversible)")
self.assert_(f!=None)
self.assert_(f.__class__==COPASI.CFunction)
p=f.getVariables()
self.assert_(p!=None)
self.assert_(p.__class__==COPASI.CFunctionParameters)
b=p.isVector(COPASI.CFunctionParameter.Role_SUBSTRATE)
self.assert_(type(b)==BooleanType)
self.assert_(b==True)
def test_getParameterByUsage(self):
p=self.parameters.getParameterByUsage(COPASI.CFunctionParameter.Role_PARAMETER,1)
self.assert_(p.__class__==COPASI.CFunctionParameter)
self.assert_(p.getObjectName()=="Keq")
def test_findParameterByName(self):
n=self.parameters.findParameterByName("Keq",COPASI.CFunctionParameter.DataType_FLOAT64)
self.assert_(type(n)==IntType)
self.assertEquals(n,2)
def suite():
tests=[
"test_size"
,"test_getNumberOfParametersByUsage"
,"test_add_remove"
,"test_remove"
,"test_isVector"
,"test_getParameterByUsage"
,"test_findParameterByName"
]
return unittest.TestSuite(map(Test_CFunctionParameters,tests))
if(__name__ == '__main__'):
unittest.TextTestRunner(verbosity=2).run(suite())
|
<filename>entailment/data.py
import json
import os
from typing import List, Dict
from torch.utils.data import Dataset
def make_jsonl_data(bbc_summary_data_dir: str,
split_file_path: str,
output_dir: str):
with open(split_file_path) as fin:
ds_split = json.load(fin)
for split, filename in ds_split.items():
output_path = os.path.join(output_dir, split + '.jsonl')
print("Writing {} examples to {}...".format(len(filename), output_path))
with open(output_path, 'w') as fout:
for fn in filename:
summary_path = os.path.join(bbc_summary_data_dir, fn + ".summary")
if os.path.exists(summary_path):
with open(summary_path) as s_fin:
summary_lines = s_fin.readlines()
summary_json = parse_summary_file(summary_lines, fn)
summary_json_str = json.dumps(summary_json)
fout.write(summary_json_str)
fout.write('\n')
else:
print("Summary file not found: {}".format(fn))
def parse_summary_file(summary_file_lines: List[str],
summary_id: str):
"""
parse a .summary file into json object
:param summary_file_lines:
:return: a json object containing information in the .summary file
"""
# remove trailing newlines and spaces
summary_file_lines = [l.strip() for l in summary_file_lines]
summary_file_lines = [l for l in summary_file_lines if l]
field_position = {}
for idx, line in enumerate(summary_file_lines):
if line.startswith("[SN]"):
field = line.replace("[SN]", "").lower()
field_position[field] = idx
result_json = {"id": summary_id}
if 'url' in field_position:
result_json['url'] = summary_file_lines[field_position['url'] + 1]
if 'title' in field_position:
result_json['title'] = summary_file_lines[field_position['title'] + 1]
if 'first-sentence' in field_position:
result_json['first-sentence'] = summary_file_lines[field_position['first-sentence'] + 1]
if 'restbody' in field_position:
result_json['restbody'] = summary_file_lines[field_position['restbody'] + 1 : -1]
return result_json
class XSumDataProcessor:
@classmethod
def get_train_examples(cls, data_dir):
data_path = os.path.join(data_dir, 'train.jsonl')
return cls._get_examples(data_path=data_path)
@classmethod
def get_test_examples(cls, data_dir):
data_path = os.path.join(data_dir, 'test.jsonl')
return cls._get_examples(data_path=data_path)
@classmethod
def get_dev_examples(cls, data_dir):
data_path = os.path.join(data_dir, 'validation.jsonl')
return cls._get_examples(data_path=data_path)
@classmethod
def _get_examples(cls, data_path):
return XSumDataset(data_path)
class XSumDataset(Dataset):
def __init__(self, data_jsonl_path):
self.data = []
with open(data_jsonl_path) as fin:
for line in fin:
self.data.append(json.loads(line))
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
return self.data[idx]
if __name__ == '__main__':
import sys
if len(sys.argv) != 4:
print("Usage: python ... [bbc_summary_dir] [split_file] [output_dir]", file=sys.stderr)
exit(1)
make_jsonl_data(sys.argv[1], sys.argv[2], sys.argv[3]) |
"""
**********************************************************************************
* Project: HistFitter - A ROOT-based package for statistical data analysis *
* Package: HistFitter *
* *
* Description: *
* Simple example configuration with input trees *
* *
* Authors: *
* HistFitter group, CERN, Geneva *
* *
* Redistribution and use in source and binary forms, with or without *
* modification, are permitted according to the terms listed in the file *
* LICENSE. *
**********************************************************************************
"""
################################################################
## In principle all you have to setup is defined in this file ##
################################################################
## This configuration performs a simplified version of the "soft lepton" fits documented in ATLAS-CONF-2012-041.
## Only two systematics are considered:
## -JES (Tree-based) conservatively treated like an MC stat error
## -Alpgen Kt scale (weight-based)
##
## For the real complete implementation, see: HistFitterUser/MET_jets_leptons/python/MyOneLeptonKtScaleFit_mergerSoftLep.py
from configManager import configMgr
from ROOT import kBlack,kWhite,kGray,kRed,kPink,kMagenta,kViolet,kBlue,kAzure,kCyan,kTeal,kGreen,kSpring,kYellow,kOrange,kDashed,kSolid,kDotted
from configWriter import fitConfig,Measurement,Channel,Sample
from systematic import Systematic
from math import sqrt
from ROOT import gROOT, TLegend, TLegendEntry, TCanvas
#gROOT.LoadMacro("./macros/AtlasStyle.C")
import ROOT
#ROOT.SetAtlasStyle()
#---------------------------------------------------------------------------------------------
# Some flags for overridding normal execution and telling ROOT to shut up... use with caution!
#---------------------------------------------------------------------------------------------
#gROOT.ProcessLine("gErrorIgnoreLevel=10001;")
#configMgr.plotHistos = True
#---------------------------------------
# Flags to control which fit is executed
#---------------------------------------
useStat=True
doValidation=True #use or use not validation regions to check exptrapolation to signal regions
#-------------------------------
# Parameters for hypothesis test
#-------------------------------
#configMgr.doHypoTest=False
#configMgr.nTOYs=1000
configMgr.calculatorType=2
configMgr.testStatType=3
configMgr.nPoints=20
#configMgr.scanRange = (0., 2.)
#--------------------------------
# Now we start to build the model
#--------------------------------
# First define HistFactory attributes
configMgr.analysisName = "MyConfigExample"
# Scaling calculated by outputLumi / inputLumi
configMgr.inputLumi = 0.001 # Luminosity of input TTree after weighting
configMgr.outputLumi = 4.713 # Luminosity required for output histograms
configMgr.setLumiUnits("fb-1")
configMgr.histCacheFile = "data/"+configMgr.analysisName+".root"
configMgr.outputFileName = "results/"+configMgr.analysisName+"_Output.root"
# Set the files to read from
bgdFiles = []
sigFiles = []
if configMgr.readFromTree:
bgdFiles.append("samples/tutorial/SusyFitterTree_OneSoftEle_BG_v3.root")
bgdFiles.append("samples/tutorial/SusyFitterTree_OneSoftMuo_BG_v3.root")
if myFitType==FitType.Exclusion:
# 1-step simplified model
sigFiles.append("samples/tutorial/SusyFitterTree_p832_GG-One-Step_soft_v1.root")
else:
bgdFiles = ["data/"+configMgr.analysisName+".root"]
# Dictionnary of cuts for Tree->hist
#CR
configMgr.cutsDict["SLWR"] = "(lep1Pt < 20 && lep2Pt<10 && met>180 && met<250 && mt>40 && mt<80 && nB2Jet==0 && jet1Pt>130 && jet2Pt>25 && AnalysisType==7) || (lep1Pt < 25 && lep2Pt<10 && met>180 && met<250 && mt>40 && mt<80 && nB2Jet==0 && jet1Pt>130 && jet2Pt>25 && AnalysisType==6)"
configMgr.cutsDict["SLTR"] = "(lep1Pt < 25 && lep2Pt<10 && met>180 && met<250 && mt>40 && mt<80 && nB2Jet>0 && jet1Pt>130 && jet2Pt>25 && AnalysisType==6) || (lep1Pt < 20 && lep2Pt<10 && met>180 && met<250 && mt>40 && mt<80 && nB2Jet>0 && jet1Pt>130 && jet2Pt>25 && AnalysisType==7)"
#VR
configMgr.cutsDict["SLVR2"] = "(lep1Pt < 25 && lep2Pt<10 && met>180 && met<250 && mt>80 && mt<100 && jet1Pt>130 && jet2Pt>25 && AnalysisType==6) || (lep1Pt < 20 && lep2Pt<10 && met>180 && met<250 && mt>80 && mt<100 && jet1Pt>130 && jet2Pt>25 && AnalysisType==7)"
#SR
configMgr.cutsDict["SS"] = "((lep1Pt < 20 && lep2Pt<10 && met>250 && mt>100 && jet1Pt>130 && jet2Pt>25 && AnalysisType==7) || (lep1Pt < 25 && lep2Pt<10 && met>250 && mt>100 && jet1Pt>130 && jet2Pt>25 && AnalysisType==6))"
configMgr.cutsDict["SSloose"] = "((lep1Pt < 20 && lep2Pt<10 && met>200 && mt>100 && jet1Pt>130 && jet2Pt>25 && AnalysisType==7) || (lep1Pt < 25 && lep2Pt<10 && met>200 && mt>100 && jet1Pt>130 && jet2Pt>25 && AnalysisType==6))"
configMgr.cutsDict["SR1sl2j"] = configMgr.cutsDict["SS"]+"&& met/meff2Jet>0.3"
# Tuples of nominal weights without and with b-jet selection
configMgr.weights = ("genWeight","eventWeight","leptonWeight","triggerWeight","truthWptWeight","bTagWeight2Jet","1.3")
ktScaleWHighWeights = ("genWeight","eventWeight","ktfacUpWeightW","bTagWeight2Jet")
ktScaleWLowWeights = ("genWeight","eventWeight","ktfacDownWeightW","bTagWeight2Jet")
ktScaleTopHighWeights = ("genWeight","eventWeight","ktfacUpWeightTop","bTagWeight2Jet")
ktScaleTopLowWeights = ("genWeight","eventWeight","ktfacDownWeightTop","bTagWeight2Jet")
# QCD weights without and with b-jet selection
configMgr.weightsQCD = "qcdWeight"
configMgr.weightsQCDWithB = "qcdBWeight"
#--------------------
# List of systematics
#--------------------
# KtScale uncertainty as histoSys - two-sided, no additional normalization
topKtScale = Systematic("KtScaleTop",configMgr.weights,ktScaleTopHighWeights,ktScaleTopLowWeights,"weight","histoSys")
wzKtScale = Systematic("KtScaleWZ",configMgr.weights,ktScaleWHighWeights,ktScaleWLowWeights,"weight","histoSys")
#topKtScale = Systematic("KtScaleTop",configMgr.weights,ktScaleTopHighWeights,ktScaleTopLowWeights,"weight","overallNormHistoSys")
#wzKtScale = Systematic("KtScaleWZ",configMgr.weights,ktScaleWHighWeights,ktScaleWLowWeights,"weight","overallNormHistoSys")
# JES uncertainty as shapeSys - one systematic per region (combine WR and TR), merge samples
jes = Systematic("JES","_NoSys","_JESup","_JESdown","tree","histoSys")
#jes = Systematic("JES","_NoSys","_JESup","_JESdown","tree","overallNormHistoSys")
statWRwz = Systematic("SLWR_wz", "_NoSys","","","tree","shapeStat")
statWRtop = Systematic("SLWR_top","_NoSys","","","tree","shapeStat")
# name of nominal histogram for systematics
configMgr.nomName = "_NoSys"
# List of samples and their plotting colours
topSample = Sample("Top",kGreen-9)
topSample.setNormFactor("mu_Top",1.,0.,5.)
topSample.setStatConfig(useStat)
topSample.setNormRegions([("SLWR","nJet"),("SLTR","nJet")])
wzSample = Sample("WZ",kAzure+1)
wzSample.setNormFactor("mu_WZ",1.,0.,5.)
wzSample.setStatConfig(useStat)
wzSample.setNormRegions([("SLWR","nJet"),("SLTR","nJet")])
bgSample = Sample("BG",kYellow-3)
bgSample.setNormFactor("mu_BG",1.,0.,5.)
bgSample.setStatConfig(useStat)
bgSample.setNormRegions([("SLWR","nJet"),("SLTR","nJet")])
#
qcdSample = Sample("QCD",kGray+1)
qcdSample.setQCD(True,"histoSys")
qcdSample.setStatConfig(useStat)
#
dataSample = Sample("Data",kBlack)
dataSample.setData()
dataSample.buildHisto([86.,66.,62.,35.,11.,7.,2.,0.],"SLTR","nJet",2)
dataSample.buildHisto([1092.,426.,170.,65.,27.,9.,4.,1.],"SLWR","nJet",2)
## set the file from which the samples should be taken
#for sam in [topSample, wzSample, qcdSample, bgSample, dataSample]:
#sam.setFileList(bgdFiles)
topSample.addInputs(bgdFiles, "Top")
wzSample.addInputs(bgdFiles, "WZ")
qcdSample.addInputs(bgdFiles, "QCD")
bgSample.addInputs(bgdFiles, "BG")
dataSample.addInputs(bgdFiles, "Data")
#Binnings
nJetBinLowHard = 3
nJetBinLowSoft = 2
nJetBinHighTR = 10
nJetBinHighWR = 10
nBJetBinLow = 0
nBJetBinHigh = 4
meffNBins = 6
meffBinLow = 400.
meffBinHigh = 1600.
meffNBinsSR4 = 4
meffBinLowSR4 = 800.
meffBinHighSR4 = 1600.
lepPtNBins = 6
lepPtLow = 20.
lepPtHigh = 600.
srNBins = 1
srBinLow = 0.5
srBinHigh = 1.5
#************
#Bkg only fit
#************
bkt = configMgr.addFitConfig("BkgOnly")
if useStat:
bkt.statErrThreshold=0.05
else:
bkt.statErrThreshold=None
bkt.addSamples([topSample,wzSample,bgSample,qcdSample,dataSample])
# Systematics to be applied globally within this topLevel
bkt.getSample("Top").addSystematic(topKtScale)
bkt.getSample("WZ").addSystematic(wzKtScale)
meas=bkt.addMeasurement(name="NormalMeasurement",lumi=1.0,lumiErr=0.039)
meas.addPOI("mu_SIG")
meas.addParamSetting("mu_BG",True,1)
meas.addParamSetting("Lumi",True,1)
#-------------------------------------------------
# Constraining regions - statistically independent
#-------------------------------------------------
# WR using nJet
nJetWS = bkt.addChannel("nJet",["SLWR"],nJetBinHighWR-nJetBinLowSoft,nJetBinLowSoft,nJetBinHighWR)
nJetWS.hasB = True
nJetWS.hasBQCD = False
nJetWS.useOverflowBin = False
nJetWS.addSystematic(jes)
# TR using nJet
nJetTS = bkt.addChannel("nJet",["SLTR"],nJetBinHighTR-nJetBinLowSoft,nJetBinLowSoft,nJetBinHighTR)
nJetTS.hasB = True
nJetTS.hasBQCD = True
nJetTS.useOverflowBin = False
nJetTS.addSystematic(jes)
bkt.addBkgConstrainChannels([nJetWS,nJetTS])
### alternative: statistical error for each sample
#nJetWS.getSample("Top").addSystematic(statWRtop)
#nJetWS.getSample("WZ").addSystematic(statWRwz)
###################
# #
# Example new cosmetics #
# #
###################
# Set global plotting colors/styles
bkt.dataColor = dataSample.color
bkt.totalPdfColor = kBlue
bkt.errorFillColor = kBlue-5
bkt.errorFillStyle = 3004
bkt.errorLineStyle = kDashed
bkt.errorLineColor = kBlue-5
# Set Channel titleX, titleY, minY, maxY, logY
nJetWS.minY = 0.5
nJetWS.maxY = 5000
nJetWS.titleX = "n jets"
nJetWS.titleY = "Entries"
nJetWS.logY = True
nJetWS.ATLASLabelX = 0.25
nJetWS.ATLASLabelY = 0.85
nJetWS.ATLASLabelText = "Work in progress"
#--------------------------------------------------------------
# Validation regions - not necessarily statistically independent
#--------------------------------------------------------------
if doValidation:
# s1l2jT
srs1l2jTChannel = bkt.addChannel("cuts",["SR1sl2j"],srNBins,srBinLow,srBinHigh)
srs1l2jTChannel.addSystematic(jes)
# additional VRs if using soft lep CRs
nJetSLVR2 = bkt.addChannel("nJet",["SLVR2"],nJetBinHighTR-nJetBinLowSoft,nJetBinLowSoft,nJetBinHighTR)
nJetSLVR2.addSystematic(jes)
#signal region treated as validation region for this case
mm2J = bkt.addChannel("met/meff2Jet",["SS"],6,0.1,0.7)
mm2J.useOverflowBin=True
mm2J.addSystematic(jes)
mm2J.remapSystChanName = 'metmeff2Jet_SSloose'
#signal region treated as validation region for this case
mm2Jl = bkt.addChannel("met/meff2Jet",["SSloose"],6,0.1,0.7)
mm2Jl.useOverflowBin=True
mm2Jl.addSystematic(jes)
# bkt.addValidationChannels([nJetSLVR2,metSLVR2,meffSLVR2,nBJetSLVR2,metmeffSLVR2,mm2J,srs1l2jTChannel])
bkt.addValidationChannels([nJetSLVR2,srs1l2jTChannel,mm2J,mm2Jl])
dataSample.buildHisto([0.,1.,6.,16.,3.,0.],"SS","metmeff2Jet",0.1,0.1)
dataSample.buildHisto([25.],"SR1sl2j","cuts",0.5)
dataSample.buildHisto([1.,6.,24.,37.,7.,0.],"SSloose","metmeff2Jet",0.1,0.1)
dataSample.buildHisto([403.,202.,93.,39.,11.,10.,4.,1.],"SLVR2","nJet",2)
#**************
# Discovery fit
#**************
if myFitType==FitType.Discovery:
discovery = configMgr.addFitConfigClone(bkt,"Discovery")
# s1l2jT = signal region/channel
ssChannel = discovery.addChannel("cuts",["SS"],srNBins,srBinLow,srBinHigh)
ssChannel.addSystematic(jes)
ssChannel.addDiscoverySamples(["SS"],[1.],[0.],[100.],[kMagenta])
discovery.addSignalChannels([ssChannel])
dataSample.buildHisto([26.],"SS","cuts",0.5)
#-----------------------------
# Exclusion fits (1-step simplified model in this case)
#-----------------------------
if myFitType==FitType.Exclusion:
sigSamples=["SM_GG_onestepCC_425_385_345"]
dataSample.buildHisto([1.,6.,16.,3.,0.],"SS","metmeff2Jet",0.2,0.1)
for sig in sigSamples:
myTopLvl = configMgr.addFitConfigClone(bkt,"Sig_%s"%sig)
sigSample = Sample(sig,kPink)
sigSample.addInputs(sigFiles)
sigSample.setNormByTheory()
sigSample.setStatConfig(useStat)
sigSample.setNormFactor("mu_SIG",1.,0.,5.)
myTopLvl.addSamples(sigSample)
myTopLvl.setSignalSample(sigSample)
# s1l2j using met/meff
if doValidation:
mm2J = myTopLvl.getChannel("met/meff2Jet",["SS"])
iPop=myTopLvl.validationChannels.index("SS_metmeff2Jet")
myTopLvl.validationChannels.pop(iPop)
else:
mm2J = myTopLvl.addChannel("met/meff2Jet",["SS"],5,0.2,0.7)
mm2J.useOverflowBin=True
mm2J.addSystematic(jes)
pass
myTopLvl.addSignalChannels([mm2J])
# Create TLegend (AK: TCanvas is needed for that, but it gets deleted afterwards)
c = TCanvas()
compFillStyle = 1001 # see ROOT for Fill styles
leg = TLegend(0.6,0.475,0.9,0.925,"")
leg.SetFillStyle(0)
leg.SetFillColor(0)
leg.SetBorderSize(0)
#
entry = TLegendEntry()
entry = leg.AddEntry("","Data 2011 (#sqrt{s}=7 TeV)","p")
entry.SetMarkerColor(bkt.dataColor)
entry.SetMarkerStyle(20)
#
entry = leg.AddEntry("","Total pdf","lf")
entry.SetLineColor(bkt.totalPdfColor)
entry.SetLineWidth(2)
entry.SetFillColor(bkt.errorFillColor)
entry.SetFillStyle(bkt.errorFillStyle)
#
entry = leg.AddEntry("","t#bar{t}","lf")
entry.SetLineColor(topSample.color)
entry.SetFillColor(topSample.color)
entry.SetFillStyle(compFillStyle)
#
entry = leg.AddEntry("","WZ","lf")
entry.SetLineColor(wzSample.color)
entry.SetFillColor(wzSample.color)
entry.SetFillStyle(compFillStyle)
#
entry = leg.AddEntry("","multijets (data estimate)","lf")
entry.SetLineColor(qcdSample.color)
entry.SetFillColor(qcdSample.color)
entry.SetFillStyle(compFillStyle)
#
entry = leg.AddEntry("","single top & diboson","lf")
entry.SetLineColor(bgSample.color)
entry.SetFillColor(bgSample.color)
entry.SetFillStyle(compFillStyle)
#
if myFitType==FitType.Exclusion:
entry = leg.AddEntry("","signal","lf")
entry.SetLineColor(kPink)
entry.SetFillColor(kPink)
entry.SetFillStyle(compFillStyle)
# Set legend for fitConfig
bkt.tLegend = leg
if myFitType==FitType.Exclusion:
myTopLvl.tLegend = leg
c.Close()
|
import copy
# import json
import requests
class ApiError(Exception):
pass
class ApiValidationError(ApiError):
pass
class SectionStatus:
PRIVATE = 1
PUBLIC = 2
UNLISTED = 3
class ApiBase:
API_VERSION = '1.3'
def __init__(self, domain, api_key, http_auth_user=None, http_auth_pwd=None, verbosity=0):
self.domain = domain
self.api_key = api_key
self.verbosity = verbosity
self.auth = (http_auth_user, http_auth_pwd) if http_auth_user else None
def _request(self, request):
# requests.Request(method, url, headers, files, data, params, auth, cookies, hooks, json)
self._print_info_about_request(request)
session = requests.Session()
prepped = request.prepare()
response = session.send(prepped)
self._print_info_about_response(response)
if response.status_code == requests.codes.ok:
return response.json()
print(response.url)
print(response.history)
raise ApiError(response.json())
def _print_info_about_request(self, request):
if self.verbosity == 0:
return
if self.verbosity >= 1:
print('API call:', request.method, request.url)
if self.verbosity >= 2:
print('params:', request.params)
print('json:', request.json)
# print()
def _print_info_about_response(self, response):
if self.verbosity == 0:
return
if self.verbosity >= 1:
print('API response:', response.status_code)
if self.verbosity >= 2:
if response.status_code != requests.codes.ok or self.verbosity >= 3:
print(response.content)
print()
def _get_request(self, url, params=None):
params = self._build_params(params)
request = requests.Request(
'GET',
url=url,
params=params,
auth=self.auth,
)
return self._request(request)
def _post_request(self, url, params=None, data=None):
params = self._build_params(params)
request = requests.Request(
'POST',
url=url,
json=data,
params=params,
auth=self.auth,
)
return self._request(request)
def _put_request(self, url, params=None, data=None):
params = self._build_params(params)
request = requests.Request(
'PUT',
url=url,
params=params,
json=data,
auth=self.auth,
)
return self._request(request)
def _build_params(self, params):
params = copy.deepcopy(params) if params else {}
params['api_key'] = self.api_key
return params
class API(ApiBase):
def upload_image(self, image_url, caption='', credit='', alt=''):
url = 'https://{}/api/{}/images'.format(self.domain, self.API_VERSION)
params = {
'image_url': image_url,
'caption': caption,
'photo_credit': credit,
'alt': alt,
}
response = self._post_request(url, data=params)
result = {
'is_animated_gif': response['is_animated_gif'],
'shortcode_id': response['shortcode_id'],
'image_id': response['id'],
'shortcode': response['shortcode'],
}
return result
def get_sections(self):
api_url = 'https://{}/api/{}/sections'.format(self.domain, self.API_VERSION)
response = self._get_request(api_url)
return map(self._extract_section_info_from_item, response)
def create_section(self, title, url, status=SectionStatus.PRIVATE, about_html=''):
api_url = 'https://{}/api/{}/sections'.format(self.domain, self.API_VERSION)
params = {
'title': title,
'url': url,
'status': status,
'about_html': about_html,
}
response = self._post_request(api_url, data=params)
return self._extract_section_info_from_item(response)
def create_draft(self, **entry):
api_url = 'https://{}/api/{}/drafts'.format(self.domain, self.API_VERSION)
if not entry.get('headline'):
raise ApiValidationError('headline is required')
return self._post_request(api_url, data=entry)
def publish_draft(self, draft_id):
api_url = 'https://{}/api/{}/drafts/{}'.format(self.domain, self.API_VERSION, draft_id)
return self._put_request(api_url, data={'action': 'publish'})
def create_author(
self,
email,
name,
first_name=None,
last_name=None,
password=<PASSWORD>,
about_html='',
image_id=None,
specific_data=None,
):
# api_url = 'https://{}/api/{}/authors'.format(self.domain, self.API_VERSION)
api_url = 'https://{}/api/1.1/authors'.format(self.domain)
params = {
'email': email,
'name': name,
'about_html': about_html,
}
if first_name:
params['first_name'] = first_name
if last_name:
params['last_name'] = last_name
if password:
params['password'] = password
if image_id:
params['image_id'] = image_id
if image_id:
params['image_id'] = image_id
if specific_data:
params['specific_data'] = specific_data
response = self._post_request(api_url, data=params)
return response
def _extract_section_info_from_item(self, item):
return {
'id': item['id'],
'title': item['title'],
'url': item['url'],
'status': item['status'],
'parent_id': item['parent_id'],
}
|
<filename>research/cv/VehicleNet/eval.py
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
################################eval vehiclenet################################
python eval.py
"""
import ast
import os
import time
import argparse
import numpy as np
import mindspore
import mindspore.nn as nn
import mindspore.ops as ops
from mindspore import Tensor
from mindspore import context
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from mindspore.common import set_seed
from src.config import common_config, VeRi_test
from src.VehicleNet_resnet50 import VehicleNet
from src.dataset import data_to_mindrecord, create_vehiclenet_dataset
from src.re_ranking import re_ranking
set_seed(1)
def fliplr(x):
"""flip horizontally
"""
for i in range(x.shape[0]):
x[i] = np.transpose(np.fliplr(np.transpose(x[i], (0, 2, 1))), (0, 2, 1))
return x
def extract_feature(model, dataset):
"""feature extract
"""
norm = nn.Norm(axis=1, keep_dims=True)
div = ops.Div()
image_size = dataset.get_dataset_size()
features = np.zeros((image_size, 512), dtype=float)
label = []
camera = []
for idx, data in enumerate(dataset.create_dict_iterator(output_numpy=False)):
img = data['image']
label.append(data['label'].asnumpy()[0])
camera.append(data['camera'].asnumpy()[0])
n = img.shape[0]
ff = Tensor(np.zeros((n, 512)), mindspore.float32)
for i in range(2):
if i == 1:
img = img.asnumpy()
img = fliplr(img)
img = Tensor.from_numpy(img)
outputs = model(img)
ff += outputs
fnorm = norm(ff)
ff = div(ff, fnorm.expand_as(ff))
features[idx] = ff.asnumpy()
return features, label, camera
def calculate_result_rerank(test_feature_, test_label_, test_camera_, query_feature_, query_label_, query_camera_, k1=100, k2=15, lambda_value=0):
"""accuracy calculation
"""
CMC = np.zeros((len(test_label_)), dtype=float)
AP = 0.0
since = time.time()
q_t_dist = np.matmul(query_feature_, test_feature_.transpose((1, 0)))
q_q_dist = np.matmul(query_feature_, query_feature_.transpose((1, 0)))
t_t_dist = np.matmul(test_feature_, test_feature_.transpose((1, 0)))
re_rank = re_ranking(q_t_dist, q_q_dist, t_t_dist, k1, k2, lambda_value)
time_elapsed = time.time() - since
print('Reranking complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
for i in range(len(query_label_)):
AP_tmp, CMC_tmp = evaluate(re_rank[i, :], query_label_[i], query_camera_[i], test_label_, test_camera_)
if CMC_tmp[0] == -1:
continue
CMC = CMC + CMC_tmp
AP += AP_tmp
CMC = CMC / len(query_label_)
str_result = 'Rank@1:%f Rank@5:%f Rank@10:%f mAP:%f\n' % (CMC[0], CMC[4], CMC[9], AP / len(query_label_))
print(str_result)
def evaluate(score, query_label_, query_camera_, test_label_, test_camera_):
"""evaluate
"""
index = np.argsort(score)
query_index = np.argwhere(test_label_ == query_label_)
camera_index = np.argwhere(test_camera_ == query_camera_)
good_index = np.setdiff1d(query_index, camera_index, assume_unique=True)
junk_index1 = np.argwhere(test_label_ == -1)
junk_index2 = np.intersect1d(query_index, camera_index)
junk_index = np.append(junk_index2, junk_index1)
CMC_tmp = compute_mAP(index, good_index, junk_index)
return CMC_tmp
def compute_mAP(index, good_index, junk_index):
"""compute mAP
"""
ap = 0
cmc = np.zeros((len(index)), dtype=int)
if good_index.size == 0:
cmc[0] = -1
return ap, cmc
# remove junk_index
mask = np.in1d(index, junk_index, invert=True) # different is True, same is False
index = index[mask]
# find good_index index
ngood = len(good_index)
mask = np.in1d(index, good_index) # different is False, same is True
rows_good = np.argwhere(np.equal(mask, True))
rows_good = rows_good.flatten()
for i in range(len(cmc)):
if i >= rows_good[0]:
cmc[i] = 1
for i in range(ngood):
d_recall = 1.0 / ngood
precision = (i + 1) * 1.0 / (rows_good[i] + 1)
if rows_good[i] != 0:
old_precision = i * 1.0 / rows_good[i]
else:
old_precision = 1.0
ap = ap + d_recall * (old_precision + precision) / 2
return ap, cmc
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='vehiclenet eval')
parser.add_argument('--device_id', type=int, default=None, help='device id of GPU or Ascend. (Default: None)')
parser.add_argument('--ckpt_url', type=str, default=None, help='Checkpoint file path')
parser.add_argument('--is_modelarts', type=ast.literal_eval, default=False, help='Train in Modelarts.')
parser.add_argument('--multiple_scale', type=str, default='1', help='mutiple scale')
parser.add_argument('--data_url', default=None, help='Location of data.')
parser.add_argument('--train_url', default=None, help='Location of training outputs.')
args_opt = parser.parse_args()
cfg = common_config
VeRi_cfg = VeRi_test
device_target = cfg.device_target
context.set_context(mode=context.GRAPH_MODE, device_target=cfg.device_target)
if device_target == "Ascend":
if args_opt.device_id is not None:
context.set_context(device_id=args_opt.device_id)
else:
context.set_context(device_id=cfg.device_id)
else:
raise ValueError("Unsupported platform.")
eval_dataset_path = cfg.dataset_path
if args_opt.is_modelarts:
import moxing as mox
mox.file.copy_parallel(src_url=args_opt.data_url,
dst_url='/cache/dataset_train/device_' + os.getenv('DEVICE_ID'))
zip_command = "unzip -o /cache/dataset_train/device_" + os.getenv('DEVICE_ID') \
+ "/VehicleNet_mindrecord.zip -d /cache/dataset_train/device_" + os.getenv('DEVICE_ID')
os.system(zip_command)
eval_dataset_path = '/cache/dataset_train/device_' + os.getenv('DEVICE_ID') + '/VehicleNet/'
mindrecord_dir = cfg.mindrecord_dir
prefix = "test_VehicleNet.mindrecord"
test_mindrecord_file = os.path.join(mindrecord_dir, prefix)
if not os.path.exists(test_mindrecord_file):
if not os.path.isdir(mindrecord_dir):
os.makedirs(mindrecord_dir)
print("Create mindrecord for test.")
data_to_mindrecord(eval_dataset_path, False, False, True, test_mindrecord_file)
print("Create mindrecord done, at {}".format(mindrecord_dir))
while not os.path.exists(test_mindrecord_file + ".db"):
time.sleep(5)
prefix = "query_VehicleNet.mindrecord"
query_mindrecord_file = os.path.join(mindrecord_dir, prefix)
if not os.path.exists(query_mindrecord_file):
if not os.path.isdir(mindrecord_dir):
os.makedirs(mindrecord_dir)
print("Create mindrecord for query.")
data_to_mindrecord(eval_dataset_path, False, False, False, query_mindrecord_file)
print("Create mindrecord done, at {}".format(mindrecord_dir))
while not os.path.exists(query_mindrecord_file + ".db"):
time.sleep(5)
test_dataset = create_vehiclenet_dataset(test_mindrecord_file, batch_size=1, device_num=1, is_training=False)
query_dataset = create_vehiclenet_dataset(query_mindrecord_file, batch_size=1, device_num=1, is_training=False)
test_data_num = test_dataset.get_dataset_size()
query_data_num = query_dataset.get_dataset_size()
net = VehicleNet(class_num=VeRi_cfg.num_classes)
net.classifier.classifier = nn.SequentialCell()
param_dict = load_checkpoint(args_opt.ckpt_url)
load_param_into_net(net, param_dict)
net.set_train(False)
test_feature, test_label, test_camera = extract_feature(net, test_dataset)
query_feature, query_label, query_camera = extract_feature(net, query_dataset)
calculate_result_rerank(test_feature, test_label, test_camera, query_feature, query_label, query_camera)
|
<filename>openerp/addons/l10n_in_hr_payroll/report/payslip_report.py
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
from openerp.osv import fields, osv
class payslip_report(osv.osv):
_name = "payslip.report"
_description = "Payslip Analysis"
_auto = False
_columns = {
'name':fields.char('Name', size=32, readonly=True),
'date_from': fields.date('Date From', readonly=True,),
'date_to': fields.date('Date To', readonly=True,),
'year': fields.char('Year', size=4, readonly=True),
'month': fields.selection([('01', 'January'), ('02', 'February'), ('03', 'March'), ('04', 'April'),
('05', 'May'), ('06', 'June'), ('07', 'July'), ('08', 'August'), ('09', 'September'),
('10', 'October'), ('11', 'November'), ('12', 'December')], 'Month', readonly=True),
'day': fields.char('Day', size=128, readonly=True),
'state': fields.selection([
('draft', 'Draft'),
('done', 'Done'),
('cancel', 'Rejected'),
], 'Status', readonly=True),
'employee_id': fields.many2one('hr.employee', 'Employee', readonly=True),
'nbr': fields.integer('# Payslip lines', readonly=True),
'number': fields.char('Number', size=16, readonly=True),
'struct_id': fields.many2one('hr.payroll.structure', 'Structure', readonly=True),
'company_id':fields.many2one('res.company', 'Company', readonly=True),
'paid': fields.boolean('Made Payment Order ? ', readonly=True),
'total': fields.float('Total', readonly=True),
'category_id':fields.many2one('hr.salary.rule.category', 'Category', readonly=True),
}
def init(self, cr):
tools.drop_view_if_exists(cr, 'payslip_report')
cr.execute("""
create or replace view payslip_report as (
select
min(l.id) as id,
l.name,
p.struct_id,
p.state,
p.date_from,
p.date_to,
p.number,
p.company_id,
p.paid,
l.category_id,
l.employee_id,
sum(l.total) as total,
to_char(p.date_from, 'YYYY') as year,
to_char(p.date_from, 'MM') as month,
to_char(p.date_from, 'YYYY-MM-DD') as day,
to_char(p.date_to, 'YYYY') as to_year,
to_char(p.date_to, 'MM') as to_month,
to_char(p.date_to, 'YYYY-MM-DD') as to_day,
1 AS nbr
from
hr_payslip as p
left join hr_payslip_line as l on (p.id=l.slip_id)
where
l.employee_id IS NOT NULL
group by
p.number,l.name,p.date_from,p.date_to,p.state,p.company_id,p.paid,
l.employee_id,p.struct_id,l.category_id
)
""")
payslip_report()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
from eyed3.utils.log import log as eyed3_log
from discogs_client.exceptions import HTTPError
from eyed3.id3 import Genre
from difflib import SequenceMatcher
import genre.config as config
import eyed3
import click
import discogs_client
import pathlib
import colorama
import pickle
import logging
import time
# quiet about non-standard genres
eyed3_log.setLevel(logging.ERROR)
client = discogs_client.Client(config.USER_AGENT)
client.set_consumer_key(config.DISCOGS_KEY, config.DISCOGS_SECRET)
@click.command()
@click.option('--query', '-q', help='Specify a query to use when searching for a matching track')
@click.option('--max-genres', '-m', help='Maximum number of genres to allow in a tag', default=config.DEFAULT_MAX_GENRES)
@click.option('--yes-if-exact', '-y', help='Do not wait for user confirmation if match is exact', flag_value=True)
@click.option('--relax', '-r', help='Relax exactness definition', flag_value=True)
@click.option('--skip-if-set', '-s', help='Skip lookup if a genre has already been set', flag_value=True)
@click.option('--reset-genre', '-R', help='Reset genre before looking up', flag_value=True)
@click.option('--dry-run', '-d', help='Perform lookup but do not write tags.', flag_value=True)
@click.version_option(version=config.VERSION)
@click.argument('files', nargs=-1, type=click.Path(exists=True, dir_okay=False, readable=True, writable=True))
def main(files, query, max_genres, yes_if_exact, relax, skip_if_set, reset_genre, dry_run):
if not auth():
return False
for file in files:
retries = 0
while retries < config.MAX_RETRIES:
try:
result = process(file, query, max_genres, yes_if_exact, relax, skip_if_set, reset_genre, dry_run)
if result:
click.echo('Genre for:\t{} set to {}'.format(*result))
else:
click.echo('Genre for:\t{} not changed'.format(file))
break
except HTTPError as e:
if e.status_code == 429:
click.echo('Making too many requests to discogs, trying again in {} seconds.'.format(str(config.RETRY_PAUSE)))
retries = retries + 1
time.sleep(config.RETRY_PAUSE)
continue
# pause for REQUEST_PAUSE seconds to avoid hammering discogs API too hard
time.sleep(config.REQUEST_PAUSE)
def auth():
auth_file_path = pathlib.Path(config.AUTH_FILE)
if not auth_file_path.exists():
token, secret, url = client.get_authorize_url()
click.echo('Please browse to {}'.format(url))
oauth_verifier = click.prompt('Please enter the code you received from discogs')
try:
token, secret = client.get_access_token(oauth_verifier)
save_auth(auth_file_path, token, secret)
except HTTPError:
click.echo('Authetication failure.')
else:
client.set_token(*get_auth(auth_file_path))
return True
def get_auth(auth_file_path):
with auth_file_path.open('rb') as f:
return pickle.load(f)
def save_auth(auth_file_path, token, secret):
with auth_file_path.open('wb') as f:
pickle.dump((token, secret), f)
def search_discogs(path, tag, query):
# prefer to use existing tags and fall back to filename
# unless a specific query is specified
use_tag = tag and tag.artist and tag.album
if query or not use_tag:
results = client.search(query if query else path.stem, type='release')
elif use_tag:
results = client.search(tag.album, artist=tag.artist, album=tag.album, type='release')
return results
def is_exact_match(tag, release, relax):
# report exact match only for files with tags
if not (tag and tag.artist and tag.album):
return False
pre = lambda a: a.strip().lower().replace('\'', '')
release_artists = set(pre(artist.name) for artist in release.artists)
tag_artists = set(pre(a) for a in tag.artist.split(','))
if relax:
return (
len(release_artists & tag_artists) and
SequenceMatcher(None, tag.album, release.title).ratio() >= config.MIN_MATCH_RATIO
)
else:
return release_artists == tag_artists and release.title == tag.album
def process(file, query, max_genres, yes_if_exact, relax, skip_if_set, reset_genre, dry_run):
path = pathlib.Path(file).absolute()
audio_file = eyed3.load(str(path))
tag = audio_file.tag
if not dry_run and not tag:
audio_file.initTag(version=eyed3.id3.ID3_V2_3)
tag = audio_file.tag
click.echo('Processing:\t{}'.format(path.name))
click.echo('Artist: {}, Title: {}, Album: {}, Genre: {}'.format(
tag.artist, tag.title, tag.album, tag.genre
))
if skip_if_set and tag.genre:
click.echo('Skipping:\t{}, genre is already set to {}'.format(path.name, tag.genre))
return False
results = search_discogs(path, tag, query)
release = None
if results.count and yes_if_exact and is_exact_match(tag, results[0], relax):
release = results[0]
styles = release.styles[:max_genres] if release.styles else release.genres
click.echo('Found exact match for {}: {}'.format(path.name, ', '.join(styles)))
if reset_genre:
tag.genre = Genre()
if not dry_run:
tag.save()
# if we have results, and haven't already found an exact match
# then we iterate over results and ask user to enter the index
# of their choice
if results.count and not release:
click.echo('Choose option from below, 0 to skip, just press enter to pick first release.')
for i, rel in enumerate(results):
if i == config.MAX_SEARCH_RESULTS:
break
artist = ', '.join(artist.name for artist in rel.artists)
styles = rel.styles[:max_genres] if rel.styles else rel.genres
click.echo('[{}]\t: {} - {} [{}]'.format(i + 1, artist, rel.title, ', '.join(styles)))
choice = click.prompt('Choice', type=int, default=1)
# subtract by one to adjust for zero indexing
if choice:
release = results[choice - 1]
elif choice <= 0:
click.echo('Skipping:\t{}'.format(path.name))
elif not results.count:
click.echo('No results found for {}'.format(path.stem))
if release:
genre = ', '.join(release.styles[:max_genres])
tag.genre = genre
if not dry_run:
tag.save(str(path))
return (path.name, genre)
return False
if __name__ == '__main__':
main()
|
<filename>tests/test_geometry.py
from math import hypot, isclose, sqrt
from hypothesis import given
from hypothesis.strategies import floats, integers, tuples
from algorithms.geometry import (
Line2,
Vec2,
Vec3,
angle_cmp,
circle_intersection,
circle_line_intersection,
convex_hull,
convex_polygon,
distance_to_line,
l2,
line,
line_intersect,
line_parallel,
line_projection,
line_same,
orientation,
orthogonal,
point_inside_convex_polygon,
points_inside,
polygon_area,
segment_cover,
segment_intersection,
segment_union_measure,
vec2_prod,
vec3_prod,
)
from tests.utils import float_eq
def test_dot2():
x0 = Vec2(4, 2)
y0 = Vec2(-1, 2)
y1 = Vec2(-1, 0)
assert orthogonal(x0, y0)
assert not orthogonal(x0, y1)
x1 = Vec2(1.5, 3.1)
y2 = Vec2(1, -15 / 31)
y3 = Vec2(3, 2.5)
assert orthogonal(x1, y2)
assert not orthogonal(x1, y3)
def test_dot3():
x0 = Vec3(1, 2, 3)
y0 = Vec3(0, 3, -2)
y1 = Vec3(0, 2, -1)
assert orthogonal(x0, y0)
assert not orthogonal(x0, y1)
x1 = Vec3(1.3, 2.7, 0.1)
y2 = Vec3(1.5, -5 / 6, 3)
y3 = Vec3(1.5, 0.6, 0.3)
assert orthogonal(x1, y2)
assert not orthogonal(x1, y3)
def test_l2():
x0 = Vec2(1.5, 4)
assert float_eq(l2(x0), 4.2720018)
x1 = Vec3(1.3, 7.2, 3.4)
assert float_eq(l2(x1), 8.0678373806)
def test_triple_orientation():
p0 = Vec2(-3, 2)
p1 = Vec2(-5, 3)
p2 = Vec2(-1, -1)
assert orientation(p0, p1, p2) > 0
assert orientation(p1, p0, p2) < 0
assert orientation(p0, p1, p1) == 0
e0 = Vec3(1, 0, 0)
e1 = Vec3(0, 1, 0)
e2 = Vec3(0, 0, 1)
assert orientation(e0, e1, e2) > 0
assert orientation(e0, e2, e1) < 0
def test_vec2_prod():
a = Vec2(4, 0)
b = Vec2(2, 2)
assert vec2_prod(a, b) == 8
def test_vec3_prod():
a = Vec3(1, 2.5, 3)
b = Vec3(0.7, 1.4, 0)
c = vec3_prod(a, b)
assert orthogonal(a, c)
assert orthogonal(b, c)
assert orientation(a, b, c) > 0
reals = floats(min_value=-10000, max_value=10000)
ints = integers(-10000, 10000)
@given(tuples(reals, reals, reals, reals))
def test_line(t):
px, py, qx, qy = t
p = Vec2(px, py)
q = Vec2(qx, qy)
a, b, c = line(p, q)
assert isclose(a * px + b * py + c, 0, abs_tol=1e-6)
@given(tuples(ints, ints, ints, ints))
def test_line_int(t):
px, py, qx, qy = t
p = Vec2(px, py)
q = Vec2(qx, qy)
a, b, c = line(p, q, "gcd")
assert isinstance(a, int)
assert isinstance(b, int)
assert isinstance(c, int)
assert a * px + b * py + c == 0
def test_distance_to_line():
l = Line2(2.3, 1.4, 4)
p = Vec2(0.5, -2)
assert isclose(distance_to_line(l, p), 0.872768, abs_tol=1e-6)
def test_line_projection():
l = Line2(-2, 6, -30) # y = (1/3)*x + 5
p = Vec2(10, 5)
prp = line_projection(l, p)
assert abs(hypot(prp.x - p.x, prp.y - p.y) - sqrt(10)) < 1e-6
assert abs(prp.x - 9.0) < 1e-6 and abs(prp.y - 8.0) < 1e-6
def test_line_parallel():
l1 = Line2(1, 1, 3)
l2 = Line2(2, 2, 8)
l3 = Line2(1, 2, 3)
assert line_parallel(l1, l2)
assert not line_parallel(l1, l3)
def test_line_same():
l1 = Line2(1, 1, 3)
l2 = Line2(2, 2, 6)
l3 = Line2(2, 2, 8)
assert line_same(l1, l2)
assert not line_same(l1, l3)
def test_line_intersect():
l1 = Line2(0, 1, 2)
l2 = Line2(1, 0, 3)
intersect = line_intersect(l1, l2)
assert abs(intersect.x - -3) < 1e-6 and abs(intersect.y - -2) < 1e-6
l1 = Line2(2, 2, 6)
l2 = Line2(2, 2, 8)
assert not line_intersect(l1, l2)
def test_segment_intersect():
# normal
a, b, c, d = Vec2(1, 1), Vec2(4, 2), Vec2(5, -2), Vec2(2, 2)
assert segment_intersection(a, b, c, d)
# not cross
a, b, c, d = Vec2(1, 1), Vec2(4, 2), Vec2(5, -2), Vec2(2, 1)
assert not segment_intersection(a, b, c, d)
# vertical
a, b, c, d = Vec2(1, 1), Vec2(4, 2), Vec2(3, 0), Vec2(3, 5)
assert segment_intersection(a, b, c, d)
# horiz
a, b, c, d = Vec2(1, 1), Vec2(4, 2), Vec2(1, 1), Vec2(6, 1)
assert segment_intersection(a, b, c, d)
# one point
a, b, c, d = Vec2(1, 1), Vec2(4, 2), Vec2(1, 1), Vec2(1, 1)
assert segment_intersection(a, b, c, d)
# two point
a, b, c, d = Vec2(1, 1), Vec2(1, 1), Vec2(1, 1), Vec2(1, 1)
assert segment_intersection(a, b, c, d)
def test_segment_union_measure():
xs = [(1, 2), (1.5, 3)]
assert abs(segment_union_measure(xs) - 2) < 1e-6
def test_segment_cover():
xs = [(3, 7), (1, 8), (5, 6), (2, 4)]
assert segment_cover(xs) == [4, 6]
ys = [(0, 4.5)]
assert segment_cover(xs, ys) == [6]
def test_polygon_area():
xs = [Vec2(1, 2), Vec2(3, 5), Vec2(6, 5), Vec2(3, 7), Vec2(-2, 4)]
s = polygon_area(xs)
assert abs(s - 14.5) < 1e-6
def test_points_inside():
xs = [Vec2(0, 0), Vec2(3, 0), Vec2(3, 3), Vec2(0, 3)]
assert points_inside(xs) == 4
xs = [Vec2(1, 2), Vec2(3, 5), Vec2(6, 5), Vec2(3, 7), Vec2(-2, 4)]
assert points_inside(xs) == 11
def test_convex_polygon():
xs = [Vec2(1, 1), Vec2(2, 2), Vec2(3, 1), Vec2(3, 3), Vec2(1, 3)]
assert not convex_polygon(xs)
xs = [Vec2(2, 1), Vec2(3, 1), Vec2(4, 2), Vec2(3, 3), Vec2(2, 3), Vec2(1, 2)]
assert convex_polygon(xs)
def point_on_line(l, p0):
return abs(l.a * p0.x + l.b * p0.y + l.c) < 1e-6
def point_on_circle(p, p0, r):
return abs((p0.x - p.x) ** 2 + (p0.y - p.y) ** 2 - r * r) < 1e-6
def test_circle_line_intersect():
p = Vec2(4, 5)
r = 3
l1 = line(Vec2(10, 6), Vec2(3, 12))
l2 = line(Vec2(6, 1), Vec2(0.5, 8))
l3 = line(Vec2(1, 0), Vec2(1, 2))
n1, p1 = circle_line_intersection(p, r, l1)
n2, p2 = circle_line_intersection(p, r, l2)
n3, p3 = circle_line_intersection(p, r, l3)
assert n1 == 0
assert n2 == 2
assert n3 == 1
assert point_on_circle(p, p3[0], r)
assert point_on_circle(p, p2[0], r)
assert point_on_circle(p, p2[1], r)
assert point_on_line(l3, p3[0])
assert point_on_line(l2, p2[0])
assert point_on_line(l2, p2[1])
def test_circle_circle_intersect():
p0 = Vec2(4, 5)
r0 = 3
p1, r1 = Vec2(8, 6), 4
n1, ip1 = circle_intersection(p0, r0, p1, r1)
assert n1 == 2
assert point_on_circle(p0, ip1[0], r0)
assert point_on_circle(p0, ip1[1], r0)
assert point_on_circle(p1, ip1[0], r1)
assert point_on_circle(p1, ip1[1], r1)
def test_convex_hull():
p1 = [(0, 0), (0, 1)]
p2 = [(0, 0), (1, 1), (2, 0)]
p3 = [(0, 0), (2, 0), (0, 2), (2, 2), (1, 1)]
cp1 = sorted(convex_hull([Vec2(*p) for p in p1]))
cp2 = sorted(convex_hull([Vec2(*p) for p in p2]))
cp3 = sorted(convex_hull([Vec2(*p) for p in p3]))
assert cp1 == p1
assert cp2 == p2
assert cp3 == sorted([(0, 0), (2, 0), (0, 2), (2, 2)])
def test_angle_cmp():
# points sorted in counter-clockwise order
pts = [
Vec2(-2, 0),
Vec2(-1, -2),
Vec2(0, -3),
Vec2(1, -2),
Vec2(1, 1),
Vec2(1, 2),
Vec2(0, 3),
Vec2(-1, 2),
]
for i in range(len(pts)):
for j in range(i + 1, len(pts)):
assert angle_cmp(pts[i], pts[j])
def test_point_inside_convex_polygon():
# tested on 166B
poly = [(0, 0), (4, 0), (6, 5), (4, 5), (0, 3)]
poly = [Vec2(*p) for p in poly]
inside = [(1, 1), (2, 2), (1, 3), (3, 3), (2, 1)]
inside = [Vec2(*p) for p in inside]
outsie = [(0, 0), (1, 0), (10, 0), (0, 3), (-3, 1), (8, 8), (5, 5), (6, 4)]
outsie = [Vec2(*p) for p in outsie]
for p in inside:
assert point_inside_convex_polygon(poly, p)
for p in outsie:
assert not point_inside_convex_polygon(poly, p)
|
<gh_stars>0
"""BuiltinLED channels of board protocol."""
# Standard imports
import logging
from abc import abstractmethod
from typing import Iterable, List, Optional
# Local package imports
from lhrhost.messaging.presentation import Message
from lhrhost.protocol import Command, ProtocolHandlerNode
from lhrhost.util.interfaces import InterfaceClass
from lhrhost.util.printing import Printer
# Logging
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
# Receipt of BuiltinLED
class Receiver(object, metaclass=InterfaceClass):
"""Interface for a class which receives builtin_led events.
This may include versions from self or from other sources.
"""
@abstractmethod
async def on_builtin_led(self, state: int) -> None:
"""Receive and handle a BuiltinLED response."""
pass
@abstractmethod
async def on_builtin_led_blink(self, state: int) -> None:
"""Receive and handle a BuiltinLED/Blink response."""
pass
@abstractmethod
async def on_builtin_led_blink_high_interval(self, interval: int) -> None:
"""Receive and handle a BuiltinLED/Blink/HighInterval response."""
pass
@abstractmethod
async def on_builtin_led_blink_low_interval(self, interval: int) -> None:
"""Receive and handle a BuiltinLED/Blink/LowInterval response."""
pass
@abstractmethod
async def on_builtin_led_blink_periods(self, periods: int) -> None:
"""Receive and handle a BuiltinLED/Blink/Periods response."""
pass
@abstractmethod
async def on_builtin_led_blink_notify(self, state: int) -> None:
"""Receive and handle a BuiltinLED/Blink/Notify response."""
pass
# Type-checking names
_Receivers = Iterable[Receiver]
# Printing
class Printer(Receiver, Printer):
"""Simple class which prints received serialized messages."""
async def on_builtin_led(self, state: int) -> None:
"""Receive and handle a BuiltinLED response."""
self.print('Built-in LED: {}'.format(
'HIGH' if state else 'LOW'
))
async def on_builtin_led_blink(self, state: int) -> None:
"""Receive and handle a BuiltinLED/Blink response."""
self.print('Built-in LED blink: {}'.format(
'blinking' if state else 'constant'
))
async def on_builtin_led_blink_high_interval(self, interval: int) -> None:
"""Receive and handle a BuiltinLED/Blink/HighInterval response."""
self.print('Built-in LED blink high interval: {}'.format(interval))
async def on_builtin_led_blink_low_interval(self, interval: int) -> None:
"""Receive and handle a BuiltinLED/Blink/LowInterval response."""
self.print('Built-in LED blink low interval: {}'.format(interval))
async def on_builtin_led_blink_periods(self, periods: int) -> None:
"""Receive and handle a BuiltinLED/Blink/Periods response."""
self.print('Built-in LED blink periods: {}'.format(periods))
async def on_builtin_led_blink_notify(self, state: int) -> None:
"""Receive and handle a BuiltinLED/Blink/Notify response."""
self.print('Built-in LED blink notifications: {}'.format(
'notifying' if state else 'not notifying'
))
class BlinkHighIntervalProtocol(ProtocolHandlerNode):
"""Notifies on the Built-in LED blinker's HIGH interval."""
def __init__(self, **kwargs):
"""Initialize member variables."""
super().__init__('HighInterval', 'h', **kwargs)
self.response_receivers = self.parent.response_receivers
# Commands
async def request(self, interval: Optional[int]=None):
"""Send a BuiltinLED/Blink/HighInterval request command to message receivers."""
# TODO: validate the interval
message = Message(self.name_path, interval)
await self.issue_command(Command(message))
# Implement ProtocolHandlerNode
def get_response_notifier(self, receiver):
"""Return the response receiver's method for receiving a response."""
return receiver.on_builtin_led_blink_high_interval
class BlinkLowIntervalProtocol(ProtocolHandlerNode):
"""Notifies on the Built-in LED blinker's LOW interval."""
def __init__(self, **kwargs):
"""Initialize member variables."""
super().__init__('LowInterval', 'l', **kwargs)
self.response_receivers = self.parent.response_receivers
# Commands
async def request(self, interval: Optional[int]=None):
"""Send a BuiltinLED/Blink/LowInterval request command to message receivers."""
# TODO: validate the interval
message = Message(self.name_path, interval)
await self.issue_command(Command(message))
# Implement ProtocolHandlerNode
def get_response_notifier(self, receiver):
"""Return the response receiver's method for receiving a response."""
return receiver.on_builtin_led_blink_low_interval
class BlinkPeriodsProtocol(ProtocolHandlerNode):
"""Notifies on the Built-in LED blinker's LOW interval."""
def __init__(self, **kwargs):
"""Initialize member variables."""
super().__init__('Periods', 'p', **kwargs)
self.response_receivers = self.parent.response_receivers
# Commands
async def request(self, periods: Optional[int]=None):
"""Send a BuiltinLED/Blink/Periods request command to message receivers."""
# TODO: validate the periods
message = Message(self.name_path, periods)
await self.issue_command(Command(message))
# Implement ProtocolHandlerNode
def get_response_notifier(self, receiver):
"""Return the response receiver's method for receiving a response."""
return receiver.on_builtin_led_blink_periods
class BlinkNotifyProtocol(ProtocolHandlerNode):
"""Notifies on the Built-in LED blinker's LOW interval."""
def __init__(self, **kwargs):
"""Initialize member variables."""
super().__init__('Notify', 'n', **kwargs)
self.response_receivers = self.parent.response_receivers
# Commands
async def request(self, periods: Optional[int]=None):
"""Send a BuiltinLED/Blink/Notify request command to message receivers."""
# TODO: validate the periods
message = Message(self.name_path, periods)
await self.issue_command(Command(message))
# Implement ProtocolHandlerNode
def get_response_notifier(self, receiver):
"""Return the response receiver's method for receiving a response."""
return receiver.on_builtin_led_blink_notify
class BlinkProtocol(ProtocolHandlerNode):
"""Blinks the built-in LED."""
def __init__(self, **kwargs):
"""Initialize member variables."""
super().__init__('Blink', 'b', **kwargs)
self.response_receivers = self.parent.response_receivers
self.high_interval = BlinkHighIntervalProtocol(parent=self)
self.low_interval = BlinkLowIntervalProtocol(parent=self)
self.periods = BlinkPeriodsProtocol(parent=self)
self.notify = BlinkNotifyProtocol(parent=self)
# Implement ProtocolHandlerNode
def get_response_notifier(self, receiver):
"""Return the response receiver's method for receiving a response."""
return receiver.on_builtin_led_blink
# Commands
async def request(self, state: Optional[int]=None):
"""Send a BuiltinLED/Blink request command to message receivers."""
# TODO: validate the state
message = Message(self.name_path, state)
await self.issue_command(Command(message))
async def request_complete(self, periods: int):
"""Send a BuiltinLED/Blink request command to message receivers.
Blink the built-in LED for a finite number of periods.
"""
# TODO: validate periods
message = Message(self.name_path, 1)
wait_channels = [self.name_path, self.name_path, self.name_path + 'p']
await self.periods.request(periods)
logger.debug('Starting to blink the LED...')
await self.issue_command(Command(message, wait_channels))
logger.debug('Finished blinking the LED...')
# Implement ProtocolHandlerNode
@property
def children_list(self):
"""Return a list of child nodes."""
return [self.high_interval, self.low_interval, self.periods, self.notify]
class Protocol(ProtocolHandlerNode):
"""Sets the built-in LED."""
def __init__(
self, response_receivers: Optional[_Receivers]=None, **kwargs
):
"""Initialize member variables."""
super().__init__('BuiltinLED', 'l', **kwargs)
self.response_receivers: List[Receiver] = []
if response_receivers:
self.response_receivers = [receiver for receiver in response_receivers]
self.blink = BlinkProtocol(parent=self, **kwargs)
# Commands
async def request(self, state: Optional[int]=None):
"""Send a BuiltinLED request command to message receivers."""
# TODO: validate the state
message = Message(self.name_path, state)
await self.issue_command(Command(message))
# Implement ProtocolHandlerNode
@property
def children_list(self):
"""Return a list of child nodes."""
return [self.blink]
def get_response_notifier(self, receiver):
"""Return the response receiver's method for receiving a response."""
return receiver.on_builtin_led
|
"""
@brief test tree node (time=2s)
"""
import unittest
import numpy
from sklearn.cluster import KMeans, MiniBatchKMeans
from sklearn.datasets import load_digits, load_iris
from pyquickhelper.pycode import ExtTestCase
from skl2onnx import convert_sklearn
from skl2onnx.common.data_types import FloatTensorType, Int64TensorType
from mlprodict.testing.test_utils import dump_data_and_model, TARGET_OPSET
class TestSklearnKMeansModel(ExtTestCase):
def test_kmeans_clustering(self):
data = load_iris()
X = data.data
model = KMeans(n_clusters=3)
model.fit(X)
model_onnx = convert_sklearn(model, "kmeans",
[("input", FloatTensorType([None, 4]))],
target_opset=TARGET_OPSET)
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X.astype(numpy.float32)[40:60],
model, model_onnx, basename="SklearnKMeans-Dec4")
def test_batchkmeans_clustering(self):
data = load_iris()
X = data.data
model = MiniBatchKMeans(n_clusters=3)
model.fit(X)
model_onnx = convert_sklearn(model, "kmeans",
[("input", FloatTensorType([None, 4]))],
target_opset=TARGET_OPSET)
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X.astype(numpy.float32)[40:60],
model, model_onnx, basename="SklearnKMeans-Dec4")
def test_batchkmeans_clustering_opset9(self):
data = load_iris()
X = data.data
model = MiniBatchKMeans(n_clusters=3)
model.fit(X)
model_onnx = convert_sklearn(model, "kmeans",
[("input", FloatTensorType([None, 4]))],
target_opset=9)
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X.astype(numpy.float32)[40:60], model,
model_onnx, basename="SklearnKMeansOp9-Dec4")
def test_batchkmeans_clustering_opset11(self):
data = load_iris()
X = data.data
model = MiniBatchKMeans(n_clusters=3)
model.fit(X)
model_onnx = convert_sklearn(model, "kmeans",
[("input", FloatTensorType([None, 4]))],
target_opset=11)
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X.astype(numpy.float32)[40:60], model,
model_onnx, basename="SklearnKMeansOp9-Dec4")
def test_batchkmeans_clustering_opset1(self):
data = load_iris()
X = data.data
model = MiniBatchKMeans(n_clusters=3)
model.fit(X)
try:
convert_sklearn(model, "kmeans",
[("input", FloatTensorType([None, 4]))],
target_opset=1)
except RuntimeError as e:
self.assertIn(
"Node 'OnnxAdd' has been changed since version", str(e))
def test_kmeans_clustering_int(self):
data = load_digits()
X = data.data
model = KMeans(n_clusters=4)
model.fit(X)
model_onnx = convert_sklearn(model, "kmeans",
[("input", Int64TensorType([None,
X.shape[1]]))],
target_opset=TARGET_OPSET)
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X.astype(numpy.int64)[40:60],
model, model_onnx, basename="SklearnKMeansInt-Dec4")
def test_batchkmeans_clustering_int(self):
data = load_digits()
X = data.data
model = MiniBatchKMeans(n_clusters=4)
model.fit(X)
model_onnx = convert_sklearn(model, "kmeans",
[("input", Int64TensorType([None,
X.shape[1]]))],
target_opset=TARGET_OPSET)
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X.astype(numpy.int64)[40:60], model, model_onnx,
basename="SklearnBatchKMeansInt-Dec4")
if __name__ == "__main__":
unittest.main()
|
import pytest
from fenChecker import Fen, WarningMsg
import mock
startingFen = 'rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1'
# -------------------- Fixtures -----------------------------------------------
@pytest.fixture
def good_fen():
# sets up a Fen object with a valid fen
# postion after 1 e4 e5 2 Nf3
return Fen('rnbqkbnr/pppp1ppp/8/4p3/4P3/5N2/PPPP1PPP/RNBQKB1R b KQkq - 1 2')
@pytest.fixture
def good_ep_fen():
# sets up a Fen object with a valid test.ep square
# NB I am currently not clear whether this should be set ONLY if there is
# an enemy pawn positioned to perform a ep capture. ie only when it matters!
# The following position is after 1 e4 e6 2 e5 d5 when white could play
# 3 exd6 e.p.
return Fen('rnbqkbnr/pppp1ppp/4p3/3pP3/8/8/PPPP1PPP/RNBQKBNR w KQkq d6 0 3')
@pytest.fixture
def castling_fen():
# in this position it is not clear whether or not the Kings or Rooks
# have moved as each Rook could have moved and moved back and the kings
# could have moved and directly moved back or taken a triangluar
# route back to their original square.
return Fen('r1bqkb1r/ppp2ppp/2np1n2/4p3/4P3/2NP1N2/PPP2PPP/R1BQKB1R')
# toPlay, castling and ep will need to be set
# -----------------------------------------------------------------------------
# -------------------- assumptions --------------------------------------------
# In handling a string input I have made the following assumptions
# 1) the first sub-string is always the board
# 2) the last sub-string is always the move counter IF A DIGIT and fen
# has more at least 2 elements
# 3) the penultimate sub-string is always the half move clock IF A DIGIT
# AND the last sub-string is ALSO A DIGIT and the fen has as least
# 3 elements
# 4) if a toPlay, castling or ep element is recognised
# anywhere in the fen that value will be saved
# -----------------------------------------------------------------------------
# -------------------- tests: non-string fen ----------------------------------
def test_missingFen():
with mock.patch('builtins.input',side_effect = ['rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR',
'w','KQkq','-']): # full reset to starting position
# currently this is not an automatic reset to the starting position
# as in pychess, but requires manual input of each element
# of the fen
test = Fen() # nothing passed
assert test.board == 'rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR'
assert test.toPlay == 'w'
assert test.castling == 'KQkq'
assert test.ep == '-'
assert test.halfMove == '0'
assert test.move == '1'
assert str(test) == startingFen
def test_nonStringFenInteger():
with mock.patch('builtins.input',side_effect = ['rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR',
'w','KQkq','-']): # reset to starting position
test = Fen(fen = 5) # integer passed
# 5 is a valid fen character, so the board element consists of 5
# blank squares
assert test.board == 'rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR'
assert test.toPlay == 'w'
assert test.castling == 'KQkq'
assert test.ep == '-'
assert test.halfMove == '0'
assert test.move == '1'
assert str(test) == startingFen
def test_nonStringFenFloat():
with mock.patch('builtins.input',side_effect = ['rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR',
'w','KQkq','-']): # reset to starting position
test = Fen(fen = 5.45) # float passed
# 5.45 could be read as a board with 10 squares and one invalid
# character ('.')
assert test.board == 'rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR'
assert test.toPlay == 'w'
assert test.castling == 'KQkq'
assert test.ep == '-'
assert test.halfMove == '0'
assert test.move == '1'
assert str(test) == startingFen
def test_nonStringFenBool():
with mock.patch('builtins.input',side_effect = ['rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR',
'w','KQkq','-']): # full reset to starting position
test = Fen(True) # bool passed
# 5 is a valid fen character, so the board element consists of 5
# blank squares
assert test.board == 'rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR'
assert test.toPlay == 'w'
assert test.castling == 'KQkq'
assert test.ep == '-'
assert test.halfMove == '0'
assert test.move == '1'
assert str(test) == startingFen
# ------------------------------------------------------------ 4 tests: total 4
# -------------------- test sub-string assumptions ----------------------------
def test_noBoardSubstring():
with mock.patch('builtins.input',side_effect = ['rnbqkbnr/pp1ppppp/8/2p5/4P3/5N2/PPPP1PPP/RNBQKB1R']):
test = Fen(fen = 'w KQkq - 5 20')
# toPlay, castling and ep should be recognised
# no board element passed
# last two items accepted as they are digits
assert test.fenElements == ['w', 'KQkq', '-', '5', '20']
assert test.board == 'rnbqkbnr/pp1ppppp/8/2p5/4P3/5N2/PPPP1PPP/RNBQKB1R'
# fenToPlay set to test default 'w'
assert test.toPlay == 'w'
assert test.castling == 'KQkq'
assert test.ep == '-'
assert test.halfMove == '5'
assert test.move == '20'
assert str(test) == 'rnbqkbnr/pp1ppppp/8/2p5/4P3/5N2/PPPP1PPP/RNBQKB1R w KQkq - 5 20'
def test_singleDigit():
# the available digit should be taken as the move number
# half move will be reset to o
test = Fen(fen = 'rnbqkbnr/pp1ppppp/8/2p5/4P3/5N2/PPPP1PPP/RNBQKB1R w KQkq - 2')
assert test.board == 'rnbqkbnr/pp1ppppp/8/2p5/4P3/5N2/PPPP1PPP/RNBQKB1R'
assert test.toPlay == 'w'
assert test.castling == 'KQkq'
assert test.ep == '-'
assert test.halfMove == '0'
assert test.move == '2'
assert str(test) == 'rnbqkbnr/pp1ppppp/8/2p5/4P3/5N2/PPPP1PPP/RNBQKB1R w KQkq - 0 2'
def test_NoDigit():
# the available digit should be taken as the move number
# half move will be reset to 0, move to 1
test = Fen(fen = 'rnbqkbnr/pp1ppppp/8/2p5/4P3/5N2/PPPP1PPP/RNBQKB1R w KQkq -')
assert test.board == 'rnbqkbnr/pp1ppppp/8/2p5/4P3/5N2/PPPP1PPP/RNBQKB1R'
assert test.toPlay == 'w'
assert test.castling == 'KQkq'
assert test.ep == '-'
assert test.halfMove == '0'
assert test.move == '1'
assert str(test) == 'rnbqkbnr/pp1ppppp/8/2p5/4P3/5N2/PPPP1PPP/RNBQKB1R w KQkq - 0 1'
def test_MisplacedDigitsboth():
# misplaced digits will be reset
# half move will be reset to 0, move to 1
test = Fen(fen = 'rnbqkbnr/pp1ppppp/8/2p5/4P3/5N2/PPPP1PPP/RNBQKB1R w 1 2 KQkq -')
assert test.board == 'rnbqkbnr/pp1ppppp/8/2p5/4P3/5N2/PPPP1PPP/RNBQKB1R'
assert test.toPlay == 'w'
assert test.castling == 'KQkq'
assert test.ep == '-'
assert test.halfMove == '0'
assert test.move == '1'
assert str(test) == 'rnbqkbnr/pp1ppppp/8/2p5/4P3/5N2/PPPP1PPP/RNBQKB1R w KQkq - 0 1'
def test_MisplacedDigitsOne():
# misplaced digits will be reset
# half move will be reset to 0
test = Fen(fen = 'rnbqkbnr/pp1ppppp/8/2p5/4P3/5N2/PPPP1PPP/RNBQKB1R w 1 KQkq - 2')
assert test.board == 'rnbqkbnr/pp1ppppp/8/2p5/4P3/5N2/PPPP1PPP/RNBQKB1R'
assert test.toPlay == 'w'
assert test.castling == 'KQkq'
assert test.ep == '-'
assert test.halfMove == '0'
# the last digit is assumed to be the move counter
assert test.move == '2'
assert str(test) == 'rnbqkbnr/pp1ppppp/8/2p5/4P3/5N2/PPPP1PPP/RNBQKB1R w KQkq - 0 2'
# ------------------------------------------------------------ 5 tests: total 9
# -------------------- fen passed with missing elements -----------------------
def test_FenBoardOnly():
with mock.patch('builtins.input',side_effect = ['w','KQkq','-']):
test = Fen(fen = 'rnbqkbnr/pp1ppppp/8/2p5/4P3/5N2/PPPP1PPP/RNBQKB1R KQkq 1 2')
# reset all but board and halfMove/move, as missing element
# requires input of all other elements of the fen
assert test.board == 'rnbqkbnr/pp1ppppp/8/2p5/4P3/5N2/PPPP1PPP/RNBQKB1R'
assert test.toPlay == 'w'
assert test.castling == 'KQkq'
assert test.ep == '-'
assert test.halfMove == '1'
assert test.move == '2'
assert str(test) == 'rnbqkbnr/pp1ppppp/8/2p5/4P3/5N2/PPPP1PPP/RNBQKB1R w KQkq - 1 2'
def test_fenMissingBoard():
with mock.patch('builtins.input',side_effect = ['rnbqkbnr/pp1ppppp/8/2p5/4P3/5N2/PPPP1PPP/RNBQKB1R']):
test = Fen(fen = 'w KQkq - 1 2')
# reset all but board and halfMove/move, as missing element
# requires input of all other elements of the fen
assert test.board == 'rnbqkbnr/pp1ppppp/8/2p5/4P3/5N2/PPPP1PPP/RNBQKB1R'
assert test.toPlay == 'w'
assert test.castling == 'KQkq'
assert test.ep == '-'
assert test.halfMove == '1'
assert test.move == '2'
assert str(test) == 'rnbqkbnr/pp1ppppp/8/2p5/4P3/5N2/PPPP1PPP/RNBQKB1R w KQkq - 1 2'
def test_fenMissingToPlay():
with mock.patch('builtins.input',side_effect = ['w']):
test = Fen(fen = 'rnbqkbnr/ppp2ppp/4p3/3pP3/8/8/PPPP1PPP/RNBQKBNR KQkq d6 0 3')
# position after 1 e4 e6 2 e5 d5
# the missing toPlay would make it impossible to check ep,
# but toPlay should be set in time to prevent problem
assert test.board == 'rnbqkbnr/ppp2ppp/4p3/3pP3/8/8/PPPP1PPP/RNBQKBNR'
assert test.toPlay == 'w'
assert test.castling == 'KQkq'
assert test.ep == 'd6'
assert test.halfMove == '0'
assert test.move == '3'
assert str(test) == 'rnbqkbnr/ppp2ppp/4p3/3pP3/8/8/PPPP1PPP/RNBQKBNR w KQkq d6 0 3'
def test_fenMissingCastling():
with mock.patch('builtins.input',side_effect = ['KQkq']):
test = Fen(fen = 'rnbqkbnr/ppp2ppp/4p3/3pP3/8/8/PPPP1PPP/RNBQKBNR w d6 0 3')
assert test.board == 'rnbqkbnr/ppp2ppp/4p3/3pP3/8/8/PPPP1PPP/RNBQKBNR'
assert test.toPlay == 'w'
assert test.castling == 'KQkq'
assert test.ep == 'd6'
assert test.halfMove == '0'
assert test.move == '3'
assert str(test) == 'rnbqkbnr/ppp2ppp/4p3/3pP3/8/8/PPPP1PPP/RNBQKBNR w KQkq d6 0 3'
def test_fenMissingEP():
with mock.patch('builtins.input',side_effect = ['d6']):
test = Fen(fen = 'rnbqkbnr/ppp2ppp/4p3/3pP3/8/8/PPPP1PPP/RNBQKBNR w KQkq 0 3')
assert test.board == 'rnbqkbnr/ppp2ppp/4p3/3pP3/8/8/PPPP1PPP/RNBQKBNR'
assert test.toPlay == 'w'
assert test.castling == 'KQkq'
assert test.ep == 'd6'
assert test.halfMove == '0'
assert test.move == '3'
assert str(test) == 'rnbqkbnr/ppp2ppp/4p3/3pP3/8/8/PPPP1PPP/RNBQKBNR w KQkq d6 0 3'
def test_fenMissingDigit():
test = Fen(fen = 'rnbqkbnr/ppp2ppp/4p3/3pP3/8/8/PPPP1PPP/RNBQKBNR w KQkq d6 3')
# assumed that the provided digit is the move number
# half move will be reset to 0
assert test.board == 'rnbqkbnr/ppp2ppp/4p3/3pP3/8/8/PPPP1PPP/RNBQKBNR'
assert test.toPlay == 'w'
assert test.castling == 'KQkq'
assert test.ep == 'd6'
assert test.halfMove == '0'
assert test.move == '3'
assert str(test) == 'rnbqkbnr/ppp2ppp/4p3/3pP3/8/8/PPPP1PPP/RNBQKBNR w KQkq d6 0 3'
# ----------------------------------------------------------- 6 tests: total 15
# -------------------- test allocation of '-' ---------------------------------
def test_ep_None():
test = Fen(fen = 'rnbqkbnr/pp1ppppp/8/2p5/4P3/5N2/PPPP1PPP/RNBQKB1R b KQkq - 1 2')
assert test.board == 'rnbqkbnr/pp1ppppp/8/2p5/4P3/5N2/PPPP1PPP/RNBQKB1R'
assert test.toPlay == 'b'
assert test.castling == 'KQkq'
assert test.ep == '-'
assert test.halfMove == '1'
assert test.move == '2'
def test_castling_None_EPwrong():
test = Fen(fen = 'rnbqkbnr/pp1ppppp/8/2p5/4P3/5N2/PPPP1PPP/RNBQKB1R b - e3 1 2')
assert test.board == 'rnbqkbnr/pp1ppppp/8/2p5/4P3/5N2/PPPP1PPP/RNBQKB1R'
assert test.toPlay == 'b'
assert test.castling == '-'
# no enemy pawns in place so reset to '-'
assert test.ep == '-'
assert test.halfMove == '1'
assert test.move == '2'
def test_castling_None_EPok():
test = Fen(fen = 'rnbqkbnr/ppp1pppp/8/8/3pP3/2N2N2/PPPP1PPP/R1BQKB1R b - e3 1 2')
# position after 1 Nf3 d5 2 Nc3 d4 3 e4: e3 is a ep square
assert test.board == 'rnbqkbnr/ppp1pppp/8/8/3pP3/2N2N2/PPPP1PPP/R1BQKB1R'
assert test.toPlay == 'b'
assert test.castling == '-'
# e3 is valid
assert test.ep == 'e3'
assert test.halfMove == '0' # last move was pawn move
assert test.move == '2'
def test_castling_ep():
test = Fen(fen = 'rnbqkbnr/pp1ppppp/8/2p5/4P3/5N2/PPPP1PPP/RNBQKB1R b - - 1 2')
assert test.board == 'rnbqkbnr/pp1ppppp/8/2p5/4P3/5N2/PPPP1PPP/RNBQKB1R'
assert test.toPlay == 'b'
assert test.castling == '-'
assert test.ep == '-'
assert test.halfMove == '1'
assert test.move == '2'
def test_2Blanks_castling_set():
test = Fen(fen = 'rnbqkbnr/pp1ppppp/8/2p5/4P3/5N2/PPPP1PPP/RNBQKB1R b - - KQkq 1 2')
assert test.board == 'rnbqkbnr/pp1ppppp/8/2p5/4P3/5N2/PPPP1PPP/RNBQKB1R'
assert test.toPlay == 'b'
assert test.castling == 'KQkq'
assert test.ep == '-'
assert test.halfMove == '1'
assert test.move == '2'
def test_2Blanks_ep_setIncorrectly():
test = Fen(fen = 'rnbqkbnr/pp1ppppp/8/2p5/4P3/5N2/PPPP1PPP/RNBQKB1R b - - e3 1 2')
assert test.board == 'rnbqkbnr/pp1ppppp/8/2p5/4P3/5N2/PPPP1PPP/RNBQKB1R'
assert test.toPlay == 'b'
assert test.castling == '-'
assert test.ep == '-' # e3 inconsistant with board
assert test.halfMove == '1'
assert test.move == '2'
def test_2Blanks_ep_setCorrectly():
test = Fen(fen = 'rnbqkbnr/ppp1pppp/8/8/3pP3/2N2N2/PPPP1PPP/R1BQKB1R b - - e3 1 2')
# 1 Nf3 d5 2 Nc3 d4 3 e4 - e3 valid
assert test.board == 'rnbqkbnr/ppp1pppp/8/8/3pP3/2N2N2/PPPP1PPP/R1BQKB1R'
assert test.toPlay == 'b'
assert test.castling == '-'
assert test.ep == 'e3'
assert test.halfMove == '0' # last move was pawn move
assert test.move == '2'
# ----------------------------------------------------------- 7 tests: total 22
# -------------------- test of extra white spaces -----------------------------
def test_fenWhiteSpaceBetweenElements():
test = Fen(fen = 'rnbqkbnr/pp1ppppp/8/8/2p5/4P3/PPPP1PPP/RNBQKBNR w KQkq d6 0 3')
assert test.board == 'rnbqkbnr/pp1ppppp/8/8/2p5/4P3/PPPP1PPP/RNBQKBNR'
assert test.toPlay == 'w'
assert test.castling == 'KQkq'
assert test.ep == '-' #d6 inconsistant with board
assert test.halfMove == '0'
assert test.move == '3'
assert str(test) == 'rnbqkbnr/pp1ppppp/8/8/2p5/4P3/PPPP1PPP/RNBQKBNR w KQkq - 0 3'
def test_fenLeadingWhiteSapce():
test = Fen(fen = ' rnbqkbnr/pp1ppppp/8/8/2p5/4P3/PPPP1PPP/RNBQKBNR w KQkq d6 0 3')
assert test.board == 'rnbqkbnr/pp1ppppp/8/8/2p5/4P3/PPPP1PPP/RNBQKBNR'
assert test.toPlay == 'w'
assert test.castling == 'KQkq'
assert test.ep == '-' # d6 inconsistant with board
assert test.halfMove == '0'
assert test.move == '3'
assert str(test) == 'rnbqkbnr/pp1ppppp/8/8/2p5/4P3/PPPP1PPP/RNBQKBNR w KQkq - 0 3'
def test_fenTrailingWhiteSpace():
test = Fen(fen = 'rnbqkbnr/pp1ppppp/8/8/2p5/4P3/PPPP1PPP/RNBQKBNR w KQkq d6 0 3 ')
assert test.board == 'rnbqkbnr/pp1ppppp/8/8/2p5/4P3/PPPP1PPP/RNBQKBNR'
assert test.toPlay == 'w'
assert test.castling == 'KQkq'
assert test.ep == '-' # d6 inconsistant with board
assert test.halfMove == '0'
assert test.move == '3'
assert str(test) == 'rnbqkbnr/pp1ppppp/8/8/2p5/4P3/PPPP1PPP/RNBQKBNR w KQkq - 0 3'
def test_fenWhiteSpaceInCastling():
with mock.patch('builtins.input',side_effect = ['KQkq']):
# problem this would result in two valid castling elements
# should be caught as contradictory and require input of castling
test = Fen(fen = 'rnbqkbnr/pp1ppppp/8/8/2p5/4P3/PPPP1PPP/RNBQKBNR w KQ kq d6 0 3')
assert test.board == 'rnbqkbnr/pp1ppppp/8/8/2p5/4P3/PPPP1PPP/RNBQKBNR'
assert test.toPlay == 'w'
assert test.castling == 'KQkq'
assert test.ep == '-' # d6 inconstsant with board
assert test.halfMove == '0'
assert test.move == '3'
assert str(test) == 'rnbqkbnr/pp1ppppp/8/8/2p5/4P3/PPPP1PPP/RNBQKBNR w KQkq - 0 3'
def test_fenWhiteSpaceInEP():
with mock.patch('builtins.input',side_effect = ['d6']):
# problem this would result in two valid castling elements
# should be caught as contradictory and require input of castling
test = Fen(fen = 'rnbqkbnr/pp1ppppp/8/8/2p5/4P3/PPPP1PPP/RNBQKBNR w KQkq d 6 0 3')
assert test.board == 'rnbqkbnr/pp1ppppp/8/8/2p5/4P3/PPPP1PPP/RNBQKBNR'
assert test.toPlay == 'w'
assert test.castling == 'KQkq'
assert test.ep == '-' #d6 incosistant with board
assert test.halfMove == '0'
assert test.move == '3'
assert str(test) == 'rnbqkbnr/pp1ppppp/8/8/2p5/4P3/PPPP1PPP/RNBQKBNR w KQkq - 0 3'
def test_fenWhiteSpaceInCandEP():
with mock.patch('builtins.input',side_effect = ['KQkq', 'd6']):
# problem this would result in two valid castling elements
# should be caught as contradictory and require input of castling
test = Fen(fen = 'rnbqkbnr/pp1ppppp/8/8/2p5/4P3/PPPP1PPP/RNBQKBNR w K Qkq d 6 0 3')
assert test.board == 'rnbqkbnr/pp1ppppp/8/8/2p5/4P3/PPPP1PPP/RNBQKBNR'
assert test.toPlay == 'w'
assert test.castling == 'KQkq'
assert test.ep == '-' # d6 inconsistent with board
assert test.halfMove == '0'
assert test.move == '3'
assert str(test) == 'rnbqkbnr/pp1ppppp/8/8/2p5/4P3/PPPP1PPP/RNBQKBNR w KQkq - 0 3'
# ----------------------------------------------------------- 6 test: total 28
# -------------------- fen elements incorrect ---------------------------------
def test_toPlayError():
with mock.patch('builtins.input',side_effect = ['w']):
test = Fen(fen = 'rnbqkbnr/pp1ppppp/8/2p5/4P3/5N2/PPPP1PPP/RNBQKB1R z KQkq - 1 2')
assert test.board == 'rnbqkbnr/pp1ppppp/8/2p5/4P3/5N2/PPPP1PPP/RNBQKB1R'
assert test.toPlay == 'w'
assert test.castling == 'KQkq'
assert test.ep == '-'
assert test.halfMove == '1'
assert test.move == '2'
def test_CastlingError():
with mock.patch('builtins.input',side_effect = ['KQkq', '-']):
# as the castling element is unrecognisable, '-'
# cannot be allocated, so ep needs to be set
test = Fen(fen = 'rnbqkbnr/pp1ppppp/8/2p5/4P3/5N2/PPPP1PPP/RNBQKB1R b KQkx - 1 2')
assert test.board == 'rnbqkbnr/pp1ppppp/8/2p5/4P3/5N2/PPPP1PPP/RNBQKB1R'
assert test.toPlay == 'b'
assert test.castling == 'KQkq'
assert test.ep == '-'
assert test.halfMove == '1'
assert test.move == '2'
def test_epError():
with mock.patch('builtins.input',side_effect = ['-']):
test = Fen(fen = 'rnbqkbnr/pp1ppppp/8/2p5/4P3/5N2/PPPP1PPP/RNBQKB1R b KQkq x 1 2')
assert test.board == 'rnbqkbnr/pp1ppppp/8/2p5/4P3/5N2/PPPP1PPP/RNBQKB1R'
assert test.toPlay == 'b'
assert test.castling == 'KQkq'
assert test.ep == '-'
assert test.halfMove == '1'
assert test.move == '2'
# ----------------------------------------------------------- 3 tests: total 31
# -------------------- ep invalid squares -------------------------------------
def test_epInvalidSquare():
with mock.patch('builtins.input',side_effect = ['-']):
test = Fen(fen = 'rnbqkbnr/pp1ppppp/8/2p5/4P3/5N2/PPPP1PPP/RNBQKB1R b KQkq e5 1 2')
assert test.board == 'rnbqkbnr/pp1ppppp/8/2p5/4P3/5N2/PPPP1PPP/RNBQKB1R'
assert test.toPlay == 'b'
assert test.castling == 'KQkq'
# manual reset ep to '-'
assert test.ep == '-'
assert test.halfMove == '1'
assert test.move == '2'
def test_epwtpValid():
test = Fen(fen = 'rnbqkbnr/ppp2ppp/4p3/3pP3/8/8/PPPP1PPP/RNBQKB1R w KQkq d6 0 3')
# after 1 e4 e6 2 e5 d4: e6 correct
assert test.board == 'rnbqkbnr/ppp2ppp/4p3/3pP3/8/8/PPPP1PPP/RNBQKB1R'
assert test.toPlay == 'w'
assert test.castling == 'KQkq'
assert test.ep == 'd6'
assert test.halfMove == '0'
assert test.move == '3'
def test_epbtpValid():
test = Fen(fen = 'rnbqkbnr/ppp1pppp/8/8/3pP3/2N2N2/PPPP1PPP/R1BQKB1R b KQkq e3 0 4')
# after 1 Nf3 d5 2 Nc3 d4 3 e4: e3 correct
assert test.board == 'rnbqkbnr/ppp1pppp/8/8/3pP3/2N2N2/PPPP1PPP/R1BQKB1R'
assert test.toPlay == 'b'
assert test.castling == 'KQkq'
assert test.ep == 'e3'
assert test.halfMove == '0'
assert test.move == '4'
def test_epwtpInvalid():
with mock.patch('builtins.input',side_effect = ['-']):
test = Fen(fen = 'rnbqkbnr/pp1ppppp/8/2p5/4P3/5N2/PPPP1PPP/RNBQKB1R w KQkq e3 1 2')
assert test.board == 'rnbqkbnr/pp1ppppp/8/2p5/4P3/5N2/PPPP1PPP/RNBQKB1R'
assert test.toPlay == 'w'
assert test.castling == 'KQkq'
# temporary: reset fentest.ep to '-'
assert test.ep == '-'
assert test.halfMove == '1'
assert test.move == '2'
def test_epbtpInvalid():
with mock.patch('builtins.input',side_effect = ['-']):
test = Fen(fen = 'rnbqkbnr/pp1ppppp/8/2p5/4P3/5N2/PPPP1PPP/RNBQKB1R b KQkq e6 1 2')
assert test.board == 'rnbqkbnr/pp1ppppp/8/2p5/4P3/5N2/PPPP1PPP/RNBQKB1R'
assert test.toPlay == 'b'
assert test.castling == 'KQkq'
# ep reset to '-'
assert test.ep == '-'
assert test.halfMove == '1'
assert test.move == '2'
def test_ep_a6_correct():
test = Fen('4k3/8/8/pP6/8/8/8/4K3 w - a6 0 1')
assert test.ep == 'a6'
def test_ep_a6_incorrect():
test = Fen('4k3/8/8/p1P5/8/8/8/4K3 w - a6 0 1')
assert test.ep == '-'
def test_ep_b6_correctRA():
test = Fen('4k3/8/8/1pP5/8/8/8/4K3 w - b6 0 1')
assert test.ep == 'b6'
def test_ep_b6_correctLA():
test = Fen('4k3/8/8/Pp6/8/8/8/4K3 w - b6 0 1')
assert test.ep == 'b6'
def test_ep_b6_incorrect():
test = Fen('4k3/8/8/1p1P4/8/8/8/4K3 w - b6 0 1')
assert test.ep == '-'
def test_ep_c6_correctRA():
test = Fen('4k3/8/8/2pP4/8/8/8/4K3 w - c6 0 1')
assert test.ep == 'c6'
def test_ep_c6_correctLA():
test = Fen('4k3/8/8/1Pp5/8/8/8/4K3 w - c6 0 1')
assert test.ep == 'c6'
def test_ep_c6_incorrect():
test = Fen('4k3/8/8/2p1P3/8/8/8/4K3 w - c6 0 1')
assert test.ep == '-'
def test_ep_d6_correctRA():
test = Fen('4k3/8/8/3pP3/8/8/8/4K3 w - d6 0 1')
assert test.ep == 'd6'
def test_ep_d6_correctLA():
test = Fen('4k3/8/8/2Pp4/8/8/8/4K3 w - d6 0 1')
assert test.ep == 'd6'
def test_ep_d6_incorrect():
test = Fen('4k3/8/8/3p1P2/8/8/8/4K3 w - d6 0 1')
assert test.ep == '-'
def test_ep_e6_correctRA():
test = Fen('4k3/8/8/4pP2/8/8/8/4K3 w - e6 0 1')
assert test.ep == 'e6'
def test_ep_e6_correctLA():
test = Fen('4k3/8/8/3Pp3/8/8/8/4K3 w - e6 0 1')
assert test.ep == 'e6'
def test_ep_e6_incorrect():
test = Fen('4k3/8/8/4p1P1/8/8/8/4K3 w - e6 0 1')
assert test.ep == '-'
def test_ep_f6_correctRA():
test = Fen('4k3/8/8/5pP1/8/8/8/4K3 w - f6 0 1')
assert test.ep == 'f6'
def test_ep_f6_correctLA():
test = Fen('4k3/8/8/4Pp2/8/8/8/4K3 w - f6 0 1')
assert test.ep == 'f6'
def test_ep_f6_incorrect():
test = Fen('4k3/8/8/5p1P/8/8/8/4K3 w - f6 0 1')
assert test.ep == '-'
def test_ep_g6_correctRA():
test = Fen('4k3/8/8/6pP/8/8/8/4K3 w - g6 0 1')
assert test.ep == 'g6'
def test_ep_g6_correctLA():
test = Fen('4k3/8/8/5Pp1/8/8/8/4K3 w - g6 0 1')
assert test.ep == 'g6'
def test_ep_g6_incorrect():
test = Fen('4k3/8/8/P5p1/8/8/8/4K3 w - g6 0 1')
assert test.ep == '-'
def test_ep_h6_correct():
test = Fen('4k3/8/8/6Pp/8/8/8/4K3 w - h6 0 1')
assert test.ep == 'h6'
def test_ep_h6_incorrect():
test = Fen('4k3/8/8/1P5p/8/8/8/4K3 w - h6 0 1')
assert test.ep == '-'
def test_ep_a3_correct():
test = Fen('4k3/8/8/8/Pp6/8/8/4K3 b - a3 0 1')
assert test.ep == 'a3'
def test_ep_a3_incorrect():
test = Fen('4k3/8/8/8/P1p5/8/8/4K3 b - a3 0 1')
assert test.ep == '-'
def test_ep_b3_correctRA():
test = Fen('4k3/8/8/8/1Pp5/8/8/4K3 b - b3 0 1')
assert test.ep == 'b3'
def test_ep_b3_correctLA():
test = Fen('4k3/8/8/8/pP6/8/8/4K3 b - b3 0 1')
assert test.ep == 'b3'
def test_ep_b3_incorrect():
test = Fen('4k3/8/8/8/1P1p4/8/8/4K3 b - b3 0 1')
assert test.ep == '-'
def test_ep_c3_correctRA():
test = Fen('4k3/8/8/8/2Pp4/8/8/4K3 b - c3 0 1')
assert test.ep == 'c3'
def test_ep_c3_correctLA():
test = Fen('4k3/8/8/8/1pP5/8/8/4K3 b - c3 0 1')
assert test.ep == 'c3'
def test_ep_c3_incorrect():
test = Fen('4k3/8/8/8/2P1p3/8/8/4K3 b - c3 0 1')
assert test.ep == '-'
def test_ep_d3_correctRA():
test = Fen('4k3/8/8/8/3Pp3/8/8/4K3 b - d3 0 1')
assert test.ep == 'd3'
def test_ep_d3_correctLA():
test = Fen('4k3/8/8/8/2pP4/8/8/4K3 b - d3 0 1')
assert test.ep == 'd3'
def test_ep_d3_incorrect():
test = Fen('4k3/8/8/8/3P1p2/8/8/4K3 b - d3 0 1')
assert test.ep == '-'
def test_ep_e3_correctRA():
test = Fen('4k3/8/8/8/4Pp2/8/8/4K3 b - e3 0 1')
assert test.ep == 'e3'
def test_ep_e3_correctLA():
test = Fen('4k3/8/8/8/3pP3/8/8/4K3 b - e3 0 1')
assert test.ep == 'e3'
def test_ep_e3_incorrect():
test = Fen('4k3/8/8/8/4p1P1/8/8/4K3 b - e3 0 1')
assert test.ep == '-'
def test_ep_f3_correctRA():
test = Fen('4k3/8/8/8/5Pp1/8/8/4K3 b - f3 0 1')
assert test.ep == 'f3'
def test_ep_f3_correctLA():
test = Fen('4k3/8/8/8/4pP2/8/8/4K3 b - f3 0 1')
assert test.ep == 'f3'
def test_ep_f3_incorrect():
test = Fen('4k3/8/8/8/5P1p/8/8/4K3 b - f3 0 1')
assert test.ep == '-'
def test_ep_g3_correctRA():
test = Fen('4k3/8/8/8/6Pp/8/8/4K3 b - g3 0 1')
assert test.ep == 'g3'
def test_ep_g3_correctLA():
test = Fen('4k3/8/8/8/5pP1/8/8/4K3 b - g3 0 1')
assert test.ep == 'g3'
def test_ep_g3_incorrect():
test = Fen('4k3/8/8/8/p5P1/8/8/4K3 b - g3 0 1')
assert test.ep == '-'
def test_ep_h3_correct():
test = Fen('4k3/8/8/8/6pP/8/8/4K3 b - h3 0 1')
assert test.ep == 'h3'
def test_ep_h3_incorrect():
test = Fen('4k3/8/8/8/1p5P/8/8/4K3 b - h3 0 1')
assert test.ep == '-'
# ---------------------------------------------------------- 49 tests: total 80
# -------------------- fen elements out of order ------------------- ----------
# valid toPlay, castling and ep should be recognised
def test_orderFenValidEP():
test = Fen(fen = 'rnbqkbnr/ppp2ppp/4p3/3pP3/8/8/PPPP1PPP/RNBQKBNR d6 w KQkq 0 3')
assert test.board == 'rnbqkbnr/ppp2ppp/4p3/3pP3/8/8/PPPP1PPP/RNBQKBNR'
assert test.toPlay == 'w'
assert test.castling == 'KQkq'
assert test.ep == 'd6'
assert test.halfMove == '0'
assert test.move == '3'
assert str(test) == 'rnbqkbnr/ppp2ppp/4p3/3pP3/8/8/PPPP1PPP/RNBQKBNR w KQkq d6 0 3'
# ------------------------------------------------------------ 1 test: total 81
# ------------------- board errors: kings -------------------------------------
def test_noWhiteKing():
# this checks that the absence of a white king results in an error
with mock.patch('builtins.input',side_effect = ['rnbqkbnr/pp1ppppp/8/2p5/4P3/5N2/PPPP1PPP/RNBQKB1R KQkq']): # full reset to starting position
test = Fen(fen = 'rnbqkbnr/pp1ppppp/8/2p5/4P3/5N2/PPPP1PPP/RNBQQB1R b KQkq - 1 2')
# input of corrected board element
assert test.board == 'rnbqkbnr/pp1ppppp/8/2p5/4P3/5N2/PPPP1PPP/RNBQKB1R KQkq'
assert test.toPlay == 'b'
assert test.castling == 'KQkq'
assert test.ep == '-'
assert test.halfMove == '1'
assert test.move == '2'
def test_manyWhiteKings():
with mock.patch('builtins.input',side_effect = ['rnbqkbnr/pp1ppppp/8/2p5/4P3/5N2/PPPP1PPP/RNBQKB1R KQkq']): # full reset to starting position
test = Fen(fen = 'rnbqkbnr/pp1ppppp/8/2p5/4P3/5N2/PPPP1PPP/RNBKKB1R b KQkq - 1 2')
# input of corrected board element
assert test.board == 'rnbqkbnr/pp1ppppp/8/2p5/4P3/5N2/PPPP1PPP/RNBQKB1R KQkq'
assert test.toPlay == 'b'
assert test.castling == 'KQkq'
assert test.ep == '-'
assert test.halfMove == '1'
assert test.move == '2'
def test_noBlackKing():
with mock.patch('builtins.input',side_effect = ['rnbqkbnr/pp1ppppp/8/2p5/4P3/5N2/PPPP1PPP/RNBQKB1R KQkq']): # full reset to starting position
test = Fen(fen = 'rnbqqbnr/pp1ppppp/8/2p5/4P3/5N2/PPPP1PPP/RNBQKB1R b KQkq - 1 2')
# input of corrected board element
assert test.board == 'rnbqkbnr/pp1ppppp/8/2p5/4P3/5N2/PPPP1PPP/RNBQKB1R KQkq'
assert test.toPlay == 'b'
assert test.castling == 'KQkq'
assert test.ep == '-'
assert test.halfMove == '1'
assert test.move == '2'
def test_manyBlackKings():
with mock.patch('builtins.input',side_effect = ['rnbqkbnr/pp1ppppp/8/2p5/4P3/5N2/PPPP1PPP/RNBQKB1R KQkq']): # full reset to starting position
test = Fen(fen = 'rnbkkbnr/pp1ppppp/8/2p5/4P3/5N2/PPPP1PPP/RNBQQB1R b KQkq - 1 2')
# input of corrected board element
assert test.board == 'rnbqkbnr/pp1ppppp/8/2p5/4P3/5N2/PPPP1PPP/RNBQKB1R KQkq'
assert test.toPlay == 'b'
assert test.castling == 'KQkq'
assert test.ep == '-'
assert test.halfMove == '1'
assert test.move == '2'
# ----------------------------------------------------------- 4 tests: total 85
# -------------------- castling: Board incorrect ------------------------------
def test_castling_KQkq_Passed(good_fen):
test = Fen(good_fen)
assert test.castling == 'KQkq'
def test_KQkq_Wht_K_Moved():
test = Fen('rnbqkbnr/pppp1ppp/8/4p3/4P3/8/PPPPKPPP/RNBQ1B1R w KQkq - 2 2')
# after 1 e4 e5 2 Ke2
# system should change castling automatically to 'kq'
assert test.castling == 'kq'
def test_KQkq_Blk_K_Moved():
test = Fen('rnbq1bnr/ppppkppp/8/4p3/4P3/5N2/PPPP1PPP/RNBQKB1R w KQkq - 2 3')
# after 1e4 e5 2 Nf3 Ke7
# system should change castling automatically to 'KQ'
assert test.castling == 'KQ'
def test_KQkq_Wht_KR_Moved():
test = Fen('r1bqkbnr/pppp1ppp/2N5/4p3/4P3/8/PPPP1PPP/RNBQKBR1 w KQkq - 3 3')
# after 1 e4 e5 2 Nf3 Nc6 3 Rg1
# system should change castling automatically to 'Qkq'
assert test.castling == 'Qkq'
def test_KQkq_Blk_KR_Moved():
test = Fen('rnbqkbr1/pppp1ppp/5n2/4p3/4P3/2N2N2/PPPP1PPP/RNBQKB1R w KQkq - 4 4')
# after 1 e4 e5 2 Nf3 Nf6 3 Nc3 Rg8
# system should change castling automatically to 'KQq'
assert test.castling == 'KQq'
def test_KQkq_Both_KR_Moved():
test = Fen('rnbqkbr1/pppp1ppp/5n2/4p3/4P3/5N2/PPPP1PPP/RNBQKBR1 w KQkq - 4 4')
# after 1 e4 e5 2 Nf3 Nf6 3 Rg1 Rg8
# system should change castling automatically to 'Qq'
assert test.castling == 'Qq'
def test_KQkq_Wht_QR_Moved():
test = Fen('r1bqkbnr/pppppppp/2N5/8/8/2N5/PPPPPPPP/1RBQKBNR b KQkq - 3 2')
# after 1 Nc3 Nc6 2 Rb1
# system should change castling automatically to 'Kkq'
assert test.castling == 'Kkq'
def test_KQkq_Blk_QR_Moved():
test = Fen('1rbqkbnr/pppppppp/2N5/8/4P3/2N5/PPPP1PPP/R1BQKBNR b KQkq - 1 3')
# after 1 Nc3 Nc6 2 e4 Rb8
# system should change castling automatically to 'KQk'
assert test.castling == 'KQk'
def test_KQkq_Both_QR_Moved():
test = Fen('1rbqkbnr/pppppppp/2N5/8/8/2N5/PPPPPPPP/1RBQKBNR b KQkq - 1 3')
# after 1 Nc3 Nc6 2 Rb1 Rb8
# system should change castling automatically to 'Kk'
assert test.castling == 'Kk'
def test_KQk_Passed_position_unclear(): # Blk Rook moved and moved back
test = Fen('r1bqkb1r/ppp2ppp/2np1n2/4p3/4P3/2NP1N2/PPP2PPP/R1BQKB1R w KQk -')
# in this position it is not clear whether or not the Kings or Rooks
# have moved as each Rook could have moved and moved back and the kings
# could have moved and directly moved back or taken a triangluar
# route back to their original square.
assert test.castling == 'KQk'
# program accepts input
#random selection of other possibilities
def test_KQq_Wht_K_Moved():
test = Fen('rnbqkbnr/pppp1ppp/8/4p3/4P3/8/PPPPKPPP/RNBQ1B1R w KQq - 2 2')
# after 1 e4 e5 2 Ke2
# system should change castling automatically to 'q' as passed value implies
# q-side castling still Ok, but not so given Wnt K position
assert test.castling == 'q'
def test_KQq_Wht_KR_Moved():
test = Fen('r1bqkbnr/pppp1ppp/2N5/4p3/4P3/8/PPPP1PPP/RNBQKBR1 w KQq - 3 3')
# after 1 e4 e5 2 Nf3 Nc6 3 Rg1
# accepting the implied input that Blk KR moved and moved back the system
# should change castling automatically to 'Qq'
assert test.castling == 'Qq'
def test_KQk_Blk_K_Moved():
test = Fen('rnbq1bnr/ppppkppp/8/4p3/4P3/5N2/PPPP1PPP/RNBQKB1R w KQk - 2 3')
# after 1e4 e5 2 Nf3 Ke7
# system should change agree to passed value 'KQ'
assert test.castling == 'KQ'
def test_KQ_Blk_KR_Moved():
test = Fen('rnbqkbr1/pppp1ppp/5n2/4p3/4P3/2N2N2/PPPP1PPP/RNBQKB1R w KQ - 4 4')
# after 1 e4 e5 2 Nf3 Nf6 3 Nc3 Rg8
# system should leave castling as 'KQ'
assert test.castling == 'KQ'
def test_Kkq_Both_KR_Moved():
test = Fen('rnbqkbr1/pppp1ppp/5n2/4p3/4P3/5N2/PPPP1PPP/RNBQKBR1 w Kkq - 4 4')
# after 1 e4 e5 2 Nf3 Nf6 3 Rg1 Rg8
# system should change castling automatically to 'q'
assert test.castling == 'q'
def test_Qkq_Wht_QR_Moved():
test = Fen('r1bqkbnr/pppppppp/2N5/8/8/2N5/PPPPPPPP/1RBQKBNR b Qkq - 3 2')
# after 1 Nc3 Nc6 2 Rb1
# system should change castling automatically to 'Kkq'
assert test.castling == 'kq'
def test_Qkq_Blk_QR_Moved():
test = Fen('1rbqkbnr/pppppppp/2N5/8/4P3/2N5/PPPP1PPP/R1BQKBNR b Qkq - 1 3')
# after 1 Nc3 Nc6 2 e4 Rb8
# system should change castling automatically to 'KQk'
assert test.castling == 'Qk'
def test_Qkq_Both_QR_Moved():
test = Fen('1rbqkbnr/pppppppp/2N5/8/8/2N5/PPPPPPPP/1RBQKBNR b Qkq - 1 3')
# after 1 Nc3 Nc6 2 Rb1 Rb8
# system should change castling automatically to 'Kk'
assert test.castling == 'k'
# --------------------------------------------------------- 18 tests: total 103
# -------------------- test board display -------------------------------------
def test_boardDisplay():
test = Fen('rnbqkbnr/pppp1ppp/8/4p3/4P3/5N2/PPPP1PPP/RNBQKB1R b KQkq - 1 2')
y = test.boardToArray('rnbqkbnr/pppp1ppp/8/4p3/4P3/5N2/PPPP1PPP/RNBQKB1R')
assert y == [' \x1b[31mr \x1b[0m\x1b[31mn \x1b[0m\x1b[31mb \x1b[0m\x1b[31mq \x1b[0m\x1b[31mk \x1b[0m\x1b[31mb \x1b[0m\x1b[31mn \x1b[0m\x1b[31mr \x1b[0m\n',
' \x1b[31mp \x1b[0m\x1b[31mp \x1b[0m\x1b[31mp \x1b[0m\x1b[31mp \x1b[0m. \x1b[31mp \x1b[0m\x1b[31mp \x1b[0m\x1b[31mp \x1b[0m\n',
' . . . . . . . . \n',
' . . . . \x1b[31mp \x1b[0m. . . \n',
' . . . . P . . . \n',
' . . . . . N . . \n',
' P P P P . P P P \n',
' R N B Q K B . R \n']
test.displayBoard(y)
z = test.augmentBoard()
assert z == ['\x1b[32m\n a b c d e f g h \n\x1b[0m',
'\x1b[32m 8 \x1b[0m \x1b[31mr \x1b[0m\x1b[31mn \x1b[0m\x1b[31mb \x1b[0m\x1b[31mq \x1b[0m\x1b[31mk \x1b[0m\x1b[31mb \x1b[0m\x1b[31mn \x1b[0m\x1b[31mr \x1b[0m\n',
'\x1b[32m 7 \x1b[0m \x1b[31mp \x1b[0m\x1b[31mp \x1b[0m\x1b[31mp \x1b[0m\x1b[31mp \x1b[0m. \x1b[31mp \x1b[0m\x1b[31mp \x1b[0m\x1b[31mp \x1b[0m\n',
'\x1b[32m 6 \x1b[0m . . . . . . . . \n',
'\x1b[32m 5 \x1b[0m . . . . \x1b[31mp \x1b[0m. . . \n',
'\x1b[32m 4 \x1b[0m . . . . P . . . \n',
'\x1b[32m 3 \x1b[0m . . . . . N . . \n',
'\x1b[32m 2 \x1b[0m P P P P . P P P \n',
'\x1b[32m 1 \x1b[0m R N B Q K B . R \n']
test.displayBoard(z)
def test_boardDisplayNotExplicit():
test = Fen('rnbqkbnr/pppp1ppp/8/4p3/4P3/5N2/PPPP1PPP/RNBQKB1R b KQkq - 1 2')
y = test.boardToArray('rnbqkbnr/pppp1ppp/8/4p3/4P3/5N2/PPPP1PPP/RNBQKB1R')
assert y == [' \x1b[31mr \x1b[0m\x1b[31mn \x1b[0m\x1b[31mb \x1b[0m\x1b[31mq \x1b[0m\x1b[31mk \x1b[0m\x1b[31mb \x1b[0m\x1b[31mn \x1b[0m\x1b[31mr \x1b[0m\n',
' \x1b[31mp \x1b[0m\x1b[31mp \x1b[0m\x1b[31mp \x1b[0m\x1b[31mp \x1b[0m. \x1b[31mp \x1b[0m\x1b[31mp \x1b[0m\x1b[31mp \x1b[0m\n',
' . . . . . . . . \n',
' . . . . \x1b[31mp \x1b[0m. . . \n',
' . . . . P . . . \n',
' . . . . . N . . \n',
' P P P P . P P P \n',
' R N B Q K B . R \n']
test.displayBoard()
z = test.augmentBoard()
assert z == ['\x1b[32m\n a b c d e f g h \n\x1b[0m',
'\x1b[32m 8 \x1b[0m \x1b[31mr \x1b[0m\x1b[31mn \x1b[0m\x1b[31mb \x1b[0m\x1b[31mq \x1b[0m\x1b[31mk \x1b[0m\x1b[31mb \x1b[0m\x1b[31mn \x1b[0m\x1b[31mr \x1b[0m\n',
'\x1b[32m 7 \x1b[0m \x1b[31mp \x1b[0m\x1b[31mp \x1b[0m\x1b[31mp \x1b[0m\x1b[31mp \x1b[0m. \x1b[31mp \x1b[0m\x1b[31mp \x1b[0m\x1b[31mp \x1b[0m\n',
'\x1b[32m 6 \x1b[0m . . . . . . . . \n',
'\x1b[32m 5 \x1b[0m . . . . \x1b[31mp \x1b[0m. . . \n',
'\x1b[32m 4 \x1b[0m . . . . P . . . \n',
'\x1b[32m 3 \x1b[0m . . . . . N . . \n',
'\x1b[32m 2 \x1b[0m P P P P . P P P \n',
'\x1b[32m 1 \x1b[0m R N B Q K B . R \n']
test.displayBoard()
# ---------------------------------------------------------- 2 tests: total 105
# -------------------- too many pawns on board --------------------------------
def test_tooManyWhitePawns():
with mock.patch('builtins.input',side_effect = ['rnbqkbnr/pp1ppppp/8/2p5/4P3/5N2/PPPP1PPP/RNBQKB1R']):
test = Fen(fen = 'rnbqkbnr/pp1ppppp/8/2P5/4P3/5N2/PPPP1PPP/RNBQKB1R b KQkq - 1 2')
# input of corrected board element
assert test.board == 'rnbqkbnr/pp1ppppp/8/2p5/4P3/5N2/PPPP1PPP/RNBQKB1R'
assert test.toPlay == 'b'
assert test.castling == 'KQkq'
assert test.ep == '-'
assert test.halfMove == '1'
assert test.move == '2'
def test_tooManyBlackPawns():
with mock.patch('builtins.input',side_effect = ['rnbqkbnr/pp1ppppp/8/2p5/4P3/5N2/PPPP1PPP/RNBQKB1R']):
test = Fen(fen = 'rnbqkbnr/pp1ppppp/8/2p5/4p3/5N2/PPPP1PPP/RNBQKB1R b KQkq - 1 2')
# input of corrected board element
assert test.board == 'rnbqkbnr/pp1ppppp/8/2p5/4P3/5N2/PPPP1PPP/RNBQKB1R'
assert test.toPlay == 'b'
assert test.castling == 'KQkq'
assert test.ep == '-'
assert test.halfMove == '1'
assert test.move == '2'
# ---------------------------------------------------------- 2 tests: total 107
# ------------------- pawns on 1st or 8th rank --------------------------------
def test_whtPawnsOn1st():
with mock.patch('builtins.input',side_effect = ['rnbqkbnr/pppp1ppp/8/4p3/4P3/5N2/PPPP1PPP/RNBQKB1R']):
test = Fen('rnbqkbnr/pppp1ppp/8/4p3/4P3/5N2/PPP2PPP/RNPQKB1R b KQkq - 1 2')
def test_whtPawnsOn8th():
with mock.patch('builtins.input',side_effect = ['rnbqkbnr/pppp1ppp/8/4p3/4P3/5N2/PPPP1PPP/RNBQKB1R']):
test = Fen('rnbqkPnr/pppp1ppp/8/4p3/4P3/5N2/PPP2PPP/RNBQKB1R b KQkq - 1 2')
def test_blkPawnsOn1st():
with mock.patch('builtins.input',side_effect = ['rnbqkbnr/pppp1ppp/8/4p3/4P3/5N2/PPPP1PPP/RNBQKB1R']):
test = Fen('rnbqkbnr/ppp2ppp/8/4p3/4P3/5N2/PPPP1PPP/RNBQKp1R b KQkq - 1 2')
def test_blkPawnsOn8th():
with mock.patch('builtins.input',side_effect = ['rnbqkbnr/pppp1ppp/8/4p3/4P3/5N2/PPPP1PPP/RNBQKB1R']):
test = Fen('rnbqkpnr/ppp2ppp/8/4p3/4P3/5N2/PPPP1PPP/RNBQKB1R b KQkq - 1 2')
# ---------------------------------------------------------- 4 tests: total 111
|
<gh_stars>1-10
#-------------------------------------------------------------------------------
#
# Project: ngEO Browse Server <http://ngeo.eox.at>
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
#-------------------------------------------------------------------------------
# Copyright (C) 2012 European Space Agency
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies of this Software or works derived from this Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#-------------------------------------------------------------------------------
import time
import logging
import subprocess
from functools import wraps
from lxml import etree
from lxml.builder import E
from django.conf import settings
from django.core.urlresolvers import reverse
from ngeo_browse_server.config import (
get_ngeo_config, get_project_relative_path, safe_get
)
from ngeo_browse_server.config.browselayer.data import get_layer_max_cached_zoom
from ngeo_browse_server.lock import (FileLock, LockException)
from ngeo_browse_server.mapcache.exceptions import (
SeedException, LayerException
)
from ngeo_browse_server.mapcache.tileset import URN_TO_GRID
from ngeo_browse_server.mapcache.config import (
get_mapcache_seed_config, get_tileset_path
)
# Default seeding file lock time-out.
DEF_LOCK_TIMEOUT = 60.0 # seconds
# Maximum bounds for both supported CRSs
CRS_BOUNDS = {
3857: (-20037508.3428, -20037508.3428, 20037508.3428, 20037508.3428),
4326: (-180, -90, 180, 90)
}
GRID_TO_SRID = {
"GoogleMapsCompatible": 3857,
"WGS84": 4326
}
logger = logging.getLogger(__name__)
def seed_mapcache(seed_command, config_file, tileset, grid,
minx, miny, maxx, maxy, minzoom, maxzoom,
start_time, end_time, threads, delete, force=True):
# translate grid URN to mapcache grid name
try:
grid = URN_TO_GRID[grid]
except KeyError:
raise SeedException("Invalid grid '%s'." % grid)
bounds = CRS_BOUNDS[GRID_TO_SRID[grid]]
full = float(bounds[2] - bounds[0])
max_bound = float(bounds[2] + full)
dateline_crossed = False
if maxx > bounds[2]:
dateline_crossed = True
# extent is always within [bounds[0],bounds[2]]
# where maxx can be >bounds[2] but <=full
if minx < bounds[0] or minx > bounds[2] or maxx < bounds[0] or maxx > max_bound:
raise SeedException("Invalid extent '%s,%s,%s,%s'." % (
minx, miny, maxx, maxy
))
if minzoom is None:
minzoom = 0
if maxzoom is None:
maxzoom = 6
# start- and end-time are expected to be UTC Zulu
start_time = start_time.replace(tzinfo=None)
end_time = end_time.replace(tzinfo=None)
logger.info(
"Starting mapcache seed with parameters: command='%s', "
"config_file='%s', tileset='%s', grid='%s', "
"extent='%s,%s,%s,%s', zoom='%s,%s', nthreads='%s', "
"mode='%s', dimension='TIME=%sZ/%sZ'.",
seed_command, config_file, tileset, grid,
minx, miny, maxx, maxy, minzoom, maxzoom, threads,
"seed" if not delete else "delete",
start_time.isoformat(), end_time.isoformat()
)
def _get_seed_args(extent):
seed_args = [
seed_command,
"-c", config_file,
"-t", tileset,
"-g", grid,
"-e", "%f,%f,%f,%f" % tuple(extent),
"-n", str(threads),
"-z", "%d,%d" % (minzoom, maxzoom),
"-D", "TIME=%sZ/%sZ" % (start_time.isoformat(), end_time.isoformat()),
"-m", "seed" if not delete else "delete",
"-q",
"-M", "1,1",
]
if not delete and force:
seed_args.append("-f")
return seed_args
def _seed(seed_args):
seed_start = time.time()
logger.debug(
"mapcache seeding command: '%s'. raw: '%s'.",
" ".join(seed_args), seed_args
)
process = subprocess.Popen(
seed_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
out, err = process.communicate()
for string in (out, err):
for line in string.split("\n"):
if line != '':
logger.info("MapCache output: %s", line)
logger.info(
"Seeding finished in %.3fs with returncode '%d'.",
time.time() - seed_start, process.returncode,
)
if process.returncode != 0:
raise SeedException("Command '%s' failed with returncode '%d'." % (
seed_command, process.returncode
))
try:
config = get_ngeo_config()
timeout = safe_get(config, "mapcache.seed", "timeout")
timeout = float(timeout) if timeout is not None else DEF_LOCK_TIMEOUT
except:
timeout = DEF_LOCK_TIMEOUT
try:
lock = FileLock(get_project_relative_path(
"mapcache_seed.%s.lck" % tileset # one seeder process per tileset
#"mapcache_seed.lck" # one exclusive seeder process
), timeout=timeout)
start = time.time()
with lock:
logger.info("Seeding lock acquired in %.3fs", time.time() - start)
if dateline_crossed:
_seed(_get_seed_args((minx, miny, bounds[2], maxy)))
_seed(_get_seed_args((bounds[0], miny, maxx-full, maxy)))
else:
_seed(_get_seed_args((minx, miny, maxx, maxy)))
except LockException, error:
raise SeedException("Seeding failed: %s" % str(error))
def lock_mapcache_config(func):
""" Decorator for functions involving the mapcache configuration to lock
the mapcache configuration.
"""
@wraps(func)
def wrapper(*args, **kwargs):
config = get_ngeo_config()
mapcache_config = get_mapcache_seed_config(config)
with FileLock(get_project_relative_path("mapcache.xml.lck")):
return func(*args, **kwargs)
return wrapper
def read_mapcache_xml(config):
mapcache_config = get_mapcache_seed_config(config)
mapcache_xml_filename = mapcache_config["config_file"]
with open(mapcache_xml_filename) as f:
parser = etree.XMLParser(remove_blank_text=True)
return etree.parse(f, parser).getroot()
def write_mapcache_xml(root, config):
mapcache_config = get_mapcache_seed_config(config)
mapcache_xml_filename = mapcache_config["config_file"]
with open(mapcache_xml_filename, "w") as f:
f.write(etree.tostring(root, pretty_print=True))
@lock_mapcache_config
def add_mapcache_layer_xml(browse_layer, config=None):
name = browse_layer.id
config = config or get_ngeo_config()
root = read_mapcache_xml(config)
if len(root.xpath("cache[@name='%s']|source[@name='%s']|tileset[@name='%s']" % (name, name, name))):
raise LayerException(
"Cannot add browse layer to mapcache config, because a layer with "
"the name '%s' is already inserted." % name
)
tileset_path = get_tileset_path(browse_layer.browse_type)
bounds = CRS_BOUNDS[GRID_TO_SRID[URN_TO_GRID[browse_layer.grid]]]
full = float(abs(bounds[0]) + abs(bounds[2]))
root.extend([
E("cache",
E("dbfile", tileset_path),
E("detect_blank", "true"),
E("pragma", "2147483646", name="max_page_count"),
E("pragma", "2048", name="page_size"),
name=name, type="sqlite3"
),
E("source",
E("getmap",
E("params",
E("LAYERS", name),
E("TRANSPARENT", "true")
)
),
E("http",
E("url", "http://localhost/browse/ows?")
),
name=name, type="wms"
),
E("tileset",
E("metadata",
E("title", str(browse_layer.title)),
*([
E("abstract", str(browse_layer.description))]
if browse_layer.description
else []
)
),
E("source", name),
E("cache", name),
E("grid",
URN_TO_GRID[browse_layer.grid], **{
"max-cached-zoom": str(get_layer_max_cached_zoom(browse_layer)),
"out-of-zoom-strategy": "reassemble"
}
),
E("format", "mixed"),
E("metatile", "1 1" if browse_layer.disable_seeding_ingestion
else "8 8"),
E("expires", "3600"),
E("read-only", "false" if browse_layer.disable_seeding_ingestion
else "true"),
E("timedimension",
E("dbfile", settings.DATABASES["mapcache"]["NAME"]),
E("query", "select * from (select strftime('%Y-%m-%dT%H:%M:%SZ',start_time)||'/'||strftime('%Y-%m-%dT%H:%M:%SZ',end_time) as interval from time where source_id=:tileset and (start_time<datetime(:end_timestamp,'unixepoch') and (end_time>datetime(:start_timestamp,'unixepoch')) or (start_time=end_time and start_time<datetime(:end_timestamp,'unixepoch') and end_time>=datetime(:start_timestamp,'unixepoch'))) and ((maxx>=:minx and minx<=:maxx) or (maxx>"+str(bounds[2])+" and (maxx-"+str(full)+")>=:minx and (minx-"+str(full)+")<=:maxx)) and maxy>=:miny and miny<=:maxy order by end_time desc limit "+str(browse_layer.tile_query_limit)+") order by interval asc"),
type="sqlite", default=str(browse_layer.timedimension_default)),
*([
E("auth_method", "cmdlineauth")]
if browse_layer.browse_access_policy in ("RESTRICTED", "PRIVATE")
else []
),
name=name
)
])
logger.info("Adding cache, source, and tileset for '%s' to mapcache "
"config." % name)
write_mapcache_xml(root, config)
@lock_mapcache_config
def remove_mapcache_layer_xml(browse_layer, config=None):
config = config or get_ngeo_config()
name = browse_layer.id
root = read_mapcache_xml(config)
logger.info("Removing cache, source, and tileset for '%s' from mapcache "
"config." % name)
try:
root.remove(root.xpath("cache[@name='%s']" % name)[0])
root.remove(root.xpath("source[@name='%s']" % name)[0])
root.remove(root.xpath("tileset[@name='%s']" % name)[0])
except IndexError:
logger.warning(
"Failed to remove browse layer from mapcache config, because a "
"layer with the name '%s' could not be found." % name
)
write_mapcache_xml(root, config)
|
import os
import numpy as np
import pandas as pd
from scipy.misc import imread
import tensorflow as tf
from six.moves import urllib
import keras
from keras.models import Sequential
from keras.layers import Dense, Flatten, Reshape, InputLayer
from keras.regularizers import L1L2
from scipy.misc import imsave
import gzip
import os
import sys
import time
import csv
IMAGE_SIZE = 28
NUM_CHANNELS = 1
PIXEL_DEPTH = 255
NUM_LABELS = 10
SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/'
print('Hi')
def extract_data(filename, num_images):
"""Extract the images into a 4D tensor [image index, y, x, channels].
Values are rescaled from [0, 255] down to [-0.5, 0.5].
"""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
bytestream.read(16)
buf = bytestream.read(IMAGE_SIZE * IMAGE_SIZE * num_images)
data = np.frombuffer(buf, dtype=np.uint8).astype(np.float32)
#data = (data - (PIXEL_DEPTH / 2.0)) / PIXEL_DEPTH
data = data.reshape(num_images, IMAGE_SIZE, IMAGE_SIZE, 1)
return data
def extract_labels(filename, num_images):
"""Extract the labels into a vector of int64 label IDs."""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
bytestream.read(8)
buf = bytestream.read(1 * num_images)
labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64)
return labels
def maybe_download(filename):
"""Download the data from Yann's website, unless it's already here."""
root_dir = os.path.abspath('.')
data_dir = os.path.join(root_dir, 'Data')
WORK_DIRECTORY = data_dir
print('a')
if not tf.gfile.Exists(WORK_DIRECTORY):
print('b')
tf.gfile.MakeDirs(WORK_DIRECTORY)
filepath = os.path.join(WORK_DIRECTORY, filename)
if not tf.gfile.Exists(filepath):
print('c')
filepath, _ = urllib.request.urlretrieve(SOURCE_URL + filename, filepath)
with tf.gfile.GFile(filepath) as f:
size = f.size()
print('Successfully downloaded', filename, size, 'bytes.')
print('d')
return filepath
# to stop potential randomness
seed = 128
rng = np.random.RandomState(seed)
# set path
root_dir = os.path.abspath('.')
data_dir = os.path.join(root_dir, 'Data')
print('data dir')
print(data_dir)
train_data_filename = maybe_download('train-images-idx3-ubyte.gz')
train_labels_filename = maybe_download('train-labels-idx1-ubyte.gz')
test_data_filename = maybe_download('t10k-images-idx3-ubyte.gz')
test_labels_filename = maybe_download('t10k-labels-idx1-ubyte.gz')
print('e')
print(train_data_filename)
train_data = extract_data(train_data_filename, 60000)
train_labels = extract_labels(train_labels_filename, 60000)
test_data = extract_data(test_data_filename, 10000)
test_labels = extract_labels(test_labels_filename, 10000)
if not os.path.isdir("mnist/train-images"):
os.makedirs("mnist/train-images")
if not os.path.isdir("mnist/test-images"):
os.makedirs("mnist/test-images")
# process train data
with open("mnist/train-labels.csv", 'w') as csvFile:
writer = csv.writer(csvFile, delimiter=',', quotechar='"')
for i in range(len(train_data)):
imsave("mnist/train-images/" + str(i) + ".jpg", train_data[i][:,:,0])
writer.writerow(["train-images/" + str(i) + ".jpg", train_labels[i]])
# repeat for test data
with open("mnist/test-labels.csv", 'w') as csvFile:
writer = csv.writer(csvFile, delimiter=',', quotechar='"')
for i in range(len(test_data)):
imsave("mnist/test-images/" + str(i) + ".jpg", test_data[i][:,:,0])
writer.writerow(["test-images/" + str(i) + ".jpg", test_labels[i]])
# load data
train = pd.read_csv(os.path.join('D:\\gan\\mnist','train-labels.csv'))
test = pd.read_csv(os.path.join('D:\\gan\\mnist', 'test-labels.csv'))
print('ds')
print(train)
temp = []
for index,row in train.iterrows():
print("heres")
print(row)
print("dg")
print(row[0])
print("ddg")
print(row[1])
image_path = os.path.join(data_dir, 'train-images', img_name)
img = imread(image_path, flatten=True)
img = img.astype('float32')
temp.append(img)
train_x = np.stack(temp)
train_x = train_x / 255.
# print image
img_name = rng.choice(train.filename)
image_path = os.path.join(data_dir, 'train-images', img_name)
img = imread(filepath, flatten=True)
pylab.imshow(img, cmap='gray')
pylab.axis('off')
pylab.show()
g_input_shape = 100
d_input_shape = (28, 28)
hidden_1_num_units = 500
hidden_2_num_units = 500
g_output_num_units = 784
d_output_num_units = 1
epochs = 25
batch_size = 128
# generator
model_1 = Sequential([
Dense(units=hidden_1_num_units, input_dim=g_input_shape, activation='relu', kernel_regularizer=L1L2(1e-5, 1e-5)),
Dense(units=hidden_2_num_units, activation='relu', kernel_regularizer=L1L2(1e-5, 1e-5)),
Dense(units=g_output_num_units, activation='sigmoid', kernel_regularizer=L1L2(1e-5, 1e-5)),
Reshape(d_input_shape),
])
# discriminator
model_2 = Sequential([
InputLayer(input_shape=d_input_shape),
Flatten(),
Dense(units=hidden_1_num_units, activation='relu', kernel_regularizer=L1L2(1e-5, 1e-5)),
Dense(units=hidden_2_num_units, activation='relu', kernel_regularizer=L1L2(1e-5, 1e-5)),
Dense(units=d_output_num_units, activation='sigmoid', kernel_regularizer=L1L2(1e-5, 1e-5)),
])
from keras_adversarial import AdversarialModel, simple_gan, gan_targets
from keras_adversarial import AdversarialOptimizerSimultaneous, normal_latent_sampling
gan = simple_gan(model_1, model_2, normal_latent_sampling((100,)))
model = AdversarialModel(base_model=gan,player_params=[model_1.trainable_weights, model_2.trainable_weights])
model.adversarial_compile(adversarial_optimizer=AdversarialOptimizerSimultaneous(), player_optimizers=['adam', 'adam'], loss='binary_crossentropy')
history = model.fit(x=train_x, y=gan_targets(train_x.shape[0]), epochs=10, batch_size=batch_size)
plt.plot(history.history['player_0_loss'])
plt.plot(history.history['player_1_loss'])
plt.plot(history.history['loss'])
zsamples = np.random.normal(size=(10, 100))
pred = model_1.predict(zsamples)
for i in range(pred.shape[0]):
plt.imshow(pred[i, :], cmap='gray')
plt.show()
|
<gh_stars>1-10
import os
import sys
import numpy as np
from bokeh.io import curdoc
from bokeh.models import ColumnDataSource, Span, Label, Slider
from bokeh.models.widgets import Div
from bokeh.models.glyphs import Circle
from bokeh.plotting import figure
from bokeh.layouts import row, column, widgetbox
BOKEH_BASE_DIR = os.path.dirname(
os.path.dirname(os.path.abspath(__file__))
)
sys.path.append(BOKEH_BASE_DIR)
from api_helper import get_url_args, get_data_as_pandas_df # noqa
from bokeh_helper import add_span_annotation # noqa
# Get url query args
args = get_url_args(curdoc, defaults={'metric': 'AM1'})
# App title
title = Div(text="""<h2>{metric} diagnostic plot for {ci_dataset} dataset from
job ID {ci_id}</h2>""".format_map(args))
# Get data
data = get_data_as_pandas_df(endpoint='apps',
params=args)
# Configure bokeh data sources with the full and
# selected data sets
snr = {'value': [], 'label': '', 'unit': ''}
selected_snr = []
dist = {'value': [], 'label': '', 'unit': ''}
selected_dist = []
if not data.empty:
snr = data['matchedDataset']['snr']
index = np.array(snr['value']) > float(args['snr_cut'])
selected_snr = np.array(snr['value'])[index]
dist = data['matchedDataset']['dist']
index = np.array(snr['value']) > float(args['snr_cut'])
selected_dist = np.array(dist['value'])[index]
full = ColumnDataSource(data={'snr': snr['value'], 'dist': dist['value']})
selected = ColumnDataSource(data={'snr': selected_snr, 'dist': selected_dist})
# Ranges used in the bokeh widgets
MIN_SNR = 0
MAX_SNR = 500
SNR_STEP = 1
MIN_DIST = 0
MAX_DIST = 100
# SNR slider
snr_slider = Slider(start=MIN_SNR, end=MAX_SNR, value=float(args['snr_cut']),
step=SNR_STEP, title="SNR")
# Scatter plot
x_axis_label = snr['label']
y_axis_label = "{label} [{unit}]".format_map(dist)
plot = figure(tools="pan, box_zoom, wheel_zoom, reset",
active_scroll="wheel_zoom",
y_range=(MIN_DIST, MAX_DIST), y_axis_location='left',
x_axis_label=x_axis_label, x_axis_type='log',
y_axis_label=y_axis_label)
# TODO: move size, fill alpha and line_color to plot styling configuration
scatter = plot.circle('snr', 'dist', size=5, fill_alpha=0.2,
source=full, color='lightgray',
line_color=None)
scatter.nonselection_glyph = Circle(fill_color='lightgray',
line_color=None)
partial_scatter = plot.circle('snr', 'dist', size=5, fill_alpha=0.2,
line_color=None, source=selected)
# default bokeh blue color #1f77b4
partial_scatter.nonselection_glyph = Circle(fill_color="#1f77b4",
fill_alpha=0.2,
line_color=None)
# Add annotations to the scatter plot
# TODO: improve variable naming
span1 = Span(location=float(args['snr_cut']), dimension='height',
line_color='black', line_dash='dashed', line_width=3)
plot.add_layout(span1)
label1 = Label(x=285, y=425, x_units='screen', y_units='screen',
text='SNR > {:3.2f}'.format(span1.location),
render_mode='css')
plot.add_layout(label1)
# Full histogram
full_hist, edges = np.histogram(full.data['dist'], bins=100)
hmax = max(full_hist) * 1.1
hist = figure(tools="ypan, ywheel_zoom, reset",
active_scroll="ywheel_zoom",
x_range=(0, hmax),
y_axis_location='right',
y_range=plot.y_range)
hist.ygrid.grid_line_color = None
hist.quad(left=0, bottom=edges[:-1], top=edges[1:], right=full_hist,
color="lightgray", line_color='lightgray')
# Partial histogram
partial_hist, _ = np.histogram(selected.data['dist'],
bins=edges)
histogram = hist.quad(left=0, bottom=edges[:-1], top=edges[1:],
right=partial_hist)
# Add annotations to the histograms
n = len(selected.data['dist'])
median = np.median(selected.data['dist'])
rms = np.sqrt(np.mean(np.square(selected.data['dist'])))
label2 = Label(x=200, y=400, x_units='screen', y_units='screen',
text='Median = {:3.2f} marcsec'.format(median),
render_mode='css')
hist.add_layout(label2)
label3 = Label(x=200, y=375, x_units='screen', y_units='screen',
text='RMS = {:3.2f} marcsec'.format(rms), render_mode='css')
hist.add_layout(label3)
label4 = Label(x=200, y=425, x_units='screen', y_units='screen',
text='N = {}'.format(n), render_mode='css')
hist.add_layout(label4)
span2 = Span(location=rms,
dimension='width', line_color="black",
line_dash='dashed', line_width=3)
hist.add_layout(span2)
# TODO: obtain spec thresholds from the API
add_span_annotation(plot=hist, value=20, text="Minimum", color="red")
add_span_annotation(plot=hist, value=10, text="Design", color="blue")
add_span_annotation(plot=hist, value=5, text="Stretch", color="green")
# Callbacks
def update(attr, old, new):
snr_cut = snr_slider.value
# Update the selected sample
# TODO: Use pandas notation here
index = np.array(full.data['snr']) > float(snr_cut)
# Update the bokeh data source in one step to avoid a warning
# re mismatch in length of column data source columns
tmp = dict(snr=np.array(full.data['snr'])[index],
dist=np.array(full.data['dist'])[index])
selected.data = tmp
# Redraw the partial histogram
partial_hist, _ = np.histogram(selected.data['dist'],
bins=edges)
histogram.data_source.data['right'] = partial_hist
# Recompute n, median and rms
n = len(selected.data['dist'])
median = np.median(selected.data['dist'])
rms = np.sqrt(np.mean(np.square(selected.data['dist'])))
# Update span anotations
span1.location = snr_cut
span2.location = rms
# Update labels
label1.text = 'SNR > {:3.2f}'.format(snr_cut)
label2.text = 'Median = {:3.2f} marcsec'.format(median)
label3.text = 'RMS = {:3.2f} marcsec'.format(rms)
label4.text = 'N = {}'.format(n)
snr_slider.on_change('value', update)
# App layout
if data.empty:
layout = column(widgetbox(title, width=900),
widgetbox(Div(text="""<h4>No data to display.</h4>"""),
width=900))
else:
layout = row(column(widgetbox(title, width=900),
widgetbox(snr_slider, width=900),
row(plot, hist)))
curdoc().add_root(layout)
curdoc().title = "SQuaSH"
|
from jumpscale.sals.chatflows.chatflows import chatflow_step
from jumpscale.sals.marketplace import MarketPlaceAppsChatflow, deployer
from jumpscale.loader import j
import nacl
from jumpscale.sals.reservation_chatflow import deployment_context, DeploymentFailed
class Discourse(MarketPlaceAppsChatflow):
FLIST_URL = "https://hub.grid.tf/omar0.3bot/omarelawady-discourse-http.flist"
SOLUTION_TYPE = "discourse"
steps = [
"get_solution_name",
"discourse_smtp_info",
"infrastructure_setup",
"reservation",
"initializing",
"success",
]
title = "Discourse"
container_resources = {"cru": 1, "mru": 2, "sru": 2}
# main container + nginx container
query = {"cru": 2, "mru": 3, "sru": 2.5}
@chatflow_step(title="Discourse Setup")
def discourse_smtp_info(self):
user_info = self.user_info()
self.user_email = user_info["email"]
self.username = user_info["username"]
form = self.new_form()
self.smtp_server = form.string_ask("Please add the host e-mail address for your solution", required=True)
self.smtp_username = form.string_ask(
"Please add the smtp host example: `smtp.gmail.com`", default="smtp.gmail.com", required=True, md=True
)
self.smtp_password = form.secret_ask("Please add the host e-mail password", required=True)
form.ask()
self.smtp_server = self.smtp_server.value
self.smtp_username = self.smtp_username.value
self.smtp_password = self.smtp_password.value
@deployment_context()
def _deploy(self):
metadata = {
"name": self.solution_name,
"form_info": {"chatflow": self.SOLUTION_TYPE, "Solution name": self.solution_name},
}
self.solution_metadata.update(metadata)
env = {
"pub_key": "",
"DISCOURSE_VERSION": "staging",
"RAILS_ENV": "production",
"DISCOURSE_HOSTNAME": self.domain,
"DISCOURSE_SMTP_USER_NAME": self.smtp_username,
"DISCOURSE_SMTP_ADDRESS": self.smtp_server,
"DISCOURSE_DEVELOPER_EMAILS": self.user_email,
"DISCOURSE_SMTP_PORT": "587",
"THREEBOT_URL": "https://login.threefold.me",
"OPEN_KYC_URL": "https://openkyc.live/verification/verify-sei",
"UNICORN_BIND_ALL": "true",
}
threebot_private_key = nacl.signing.SigningKey.generate().encode(nacl.encoding.Base64Encoder).decode("utf-8")
secret_env = {
"THREEBOT_PRIVATE_KEY": threebot_private_key,
"FLASK_SECRET_KEY": j.data.idgenerator.guid(),
"DISCOURSE_SMTP_PASSWORD": <PASSWORD>,
}
# reserve subdomain
_id = deployer.create_subdomain(
pool_id=self.gateway_pool.pool_id,
gateway_id=self.gateway.node_id,
subdomain=self.domain,
addresses=self.addresses,
solution_uuid=self.solution_id,
**self.solution_metadata,
)
success = deployer.wait_workload(_id, self)
if not success:
raise DeploymentFailed(
f"Failed to create subdomain {self.domain} on gateway"
f" {self.gateway.node_id} {_id}. The resources you paid for will be re-used in your upcoming deployments.",
wid=_id,
)
self.threebot_url = f"https://{self.domain}"
entrypoint = f"/.start_discourse.sh"
# reserve container
self.resv_id = deployer.deploy_container(
pool_id=self.pool_id,
node_id=self.selected_node.node_id,
network_name=self.network_view.name,
ip_address=self.ip_address,
flist=self.FLIST_URL,
cpu=self.container_resources["cru"],
memory=self.container_resources["mru"] * 1024,
disk_size=self.container_resources["sru"] * 1024,
entrypoint=entrypoint,
env=env,
secret_env=secret_env,
interactive=False,
**self.solution_metadata,
solution_uuid=self.solution_id,
)
success = deployer.wait_workload(self.resv_id, self)
if not success:
raise DeploymentFailed(
f"Failed to deploy workload {self.resv_id}. The resources you paid for will be re-used in your upcoming deployments.",
solution_uuid=self.solution_id,
wid=self.resv_id,
)
_id, _ = deployer.expose_and_create_certificate(
pool_id=self.pool_id,
gateway_id=self.gateway.node_id,
network_name=self.network_view.name,
trc_secret=self.secret,
domain=self.domain,
email=self.user_email,
solution_ip=self.ip_address,
solution_port=80,
enforce_https=True,
node_id=self.selected_node.node_id,
solution_uuid=self.solution_id,
proxy_pool_id=self.gateway_pool.pool_id,
log_config=self.nginx_log_config,
**self.solution_metadata,
)
success = deployer.wait_workload(_id, self)
if not success:
raise DeploymentFailed(
f"Failed to create TRC container on node {self.selected_node.node_id} {_id}. The resources you paid for will be re-used in your upcoming deployments.",
solution_uuid=self.solution_id,
wid=_id,
)
chat = Discourse
|
# -*- coding: utf-8 -*-
"""
===========================================#
# Title: Review Analysis using NLP and Naive Bayes
# Date: 7 Jan 2020
@author: <NAME>
#==========================================#
"""
############################### Natural Language Processing #######################
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Restaurant_Reviews.tsv', delimiter = '\t', quoting = 3)
#we convert csv to tsv bcoz we want tab seperated reviews as comma can be used in one review also
############################### Functions for evaluation parameters ################
def summing_array(cm):
sum_Arr=0
for i in range(0,2):
for k in range(0,2):
sum_Arr+=cm[i][k]
return sum_Arr
def imp_param(cm):
TP=cm[0][0]#True Positive
TN=cm[1][1]#True Negative
FP=cm[0][1]#false positive
FN=cm[1][0]#false negative
#accuracy
Accuracy=(TP+TN)/(summing_array(cm))
Accuracy=Accuracy*100
print('Accuracy is ',Accuracy)
#Precision
Precision=(TP / (TP + FP))
print('Precision is ',Precision)
#recall
Recall = TP / (TP + FN)
print('Recall is ',Recall)
#f1 score
F1_Score = (2 * Precision * Recall )/ (Precision + Recall)
print('F1_score is ',F1_Score)
########################################### DATA PREPROCESSING PART ################################
# Cleaning the texts
import re #re libary works weven without nltk
import nltk
nltk.download('stopwords') #stopwards is the collection of the words like 'the,is,am,are' etc...
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
review = re.sub('[^a-zA-Z]', ' ', dataset['Review'][0])
#In above line i have subtracted the unwanted aplhanumeric characters etc from my first review so that only meaningful and imp words remain in
#my review and all the other operations are performed on these set of meaningful words rather than on all words.
#^ signifies that all the words except a-z and A-z
review = review.lower() # converted the words into lowercase.
review=review.split()
'''
This is removing the stopword from first review
'''
review = [word for word in review if not word in set(stopwords.words('english'))]
review = ' '.join(review)
'''
STEMMING
in this process we take the root of EVERY word individually which converts the various tenses of the word
to the same tense
example:
loving---> love
loved--> love
'''
review=review.split()
ps = PorterStemmer()
review = [ps.stem(word) for word in review if not word in set(stopwords.words('english'))]
review = ' '.join(review)
'''
doing the above procedure for the whole tsv file.
'''
review_list = []
for i in range(0, 1000):
review = re.sub('[^a-zA-Z]', ' ', dataset['Review'][i])
review = review.lower()
review = review.split()
ps = PorterStemmer()
review = [ps.stem(word) for word in review if not word in set(stopwords.words('english'))]
review = ' '.join(review)
review_list.append(review)
# Creating the Bag of Words model
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer(max_features = 1500)
#max_features is used to create a sparse matrix of those many features and the less frequent words are removed from the matrix
X = cv.fit_transform(review_list).toarray()
y = dataset.iloc[:, 1].values
########################################### USING NAIVE BAYES ################################
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20, random_state = 0)
# Fitting Naive Bayes to the Training set
from sklearn.naive_bayes import GaussianNB
classifier = GaussianNB()
classifier.fit(X_train, y_train)
# Predicting the Test set results
y_pred = classifier.predict(X_test)
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
print('\n\n########## ANALAYSIS AS PER NAIVE BAYES ##########\n')
summing_array(cm)
imp_param(cm)
print('\n####################################################\n')
################################################## USING RANDOM FOREST #################
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train_rf, X_test_rf, y_train_rf, y_test_rf = train_test_split(X, y, test_size = 0.25, random_state = 0)
# Fitting Random Forest
from sklearn.ensemble import RandomForestClassifier
regressor = RandomForestClassifier(n_estimators = 10, random_state = 0)
regressor.fit(X, y)
# Predicting the Test set results
y_pred_rf = regressor.predict(X_test_rf)
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm_rf = confusion_matrix(y_test_rf, y_pred_rf)
print('########## ANALAYSIS AS PER RANDOM FOREST ##########\n')
summing_array(cm_rf)
imp_param(cm_rf)
print('\n####################################################\n')
################################################## USING Decision Tree #################
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train_dc, X_test_dc, y_train_dc, y_test_dc = train_test_split(X, y, test_size = 0.45, random_state = 0)
# Fitting Random Forest
from sklearn.tree import DecisionTreeClassifier
regressor_dc = DecisionTreeClassifier(random_state = 0)
regressor_dc.fit(X, y)
# Predicting the Test set results
y_pred_dc = regressor_dc.predict(X_test_dc)
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm_dc = confusion_matrix(y_test_dc, y_pred_dc)
print('########## ANALAYSIS AS PER DECISION TREE ##########')
summing_array(cm_dc)
imp_param(cm_dc)
print('\n####################################################\n')
####################################################################################################
|
<gh_stars>1000+
# -*- coding: utf-8 -*-
class colors:
"""Colors class:
reset all colors with colors.reset
two subclasses fg for foreground and bg for background.
use as colors.subclass.colorname.
i.e. colors.fg.red or colors.bg.green
also, the generic bold, disable, underline, reverse, strikethrough,
and invisible work with the main class
i.e. colors.bold
"""
reset = "\033[0m"
bold = "\033[01m"
disable = "\033[02m"
underline = "\033[04m"
reverse = "\033[07m"
strikethrough = "\033[09m"
invisible = "\033[08m"
class fg:
black = "\033[30m"
red = "\033[31m"
green = "\033[32m"
orange = "\033[33m"
blue = "\033[34m"
purple = "\033[35m"
cyan = "\033[36m"
lightgrey = "\033[37m"
darkgrey = "\033[90m"
lightred = "\033[91m"
lightgreen = "\033[92m"
yellow = "\033[93m"
lightblue = "\033[94m"
pink = "\033[95m"
lightcyan = "\033[96m"
class bg:
black = "\033[40m"
red = "\033[41m"
green = "\033[42m"
orange = "\033[43m"
blue = "\033[44m"
purple = "\033[45m"
cyan = "\033[46m"
lightgrey = "\033[47m"
@staticmethod
def good_news(news):
"""
Print a Success
"""
print(colors.bold + colors.fg.green + "[>] " + colors.reset + news.strip())
@staticmethod
def debug_news(news):
"""
Print a Debug
"""
print()
print(colors.bold + colors.fg.lightred + "[@] " + news + colors.reset)
@staticmethod
def bad_news(news):
"""
Print a Failure, error
"""
print(colors.bold + colors.fg.red + "[!] " + colors.reset + news.strip())
@staticmethod
def info_news(news):
"""
Print an information with grey text
"""
print(
colors.bold
+ colors.fg.lightblue
+ "[~] "
+ colors.reset
+ colors.fg.lightgrey
+ news.strip()
+ colors.reset
)
@staticmethod
def question_news(news):
"""
Print an information with yellow text
"""
print(
colors.bold
+ colors.fg.blue
+ "[?] "
+ colors.reset
+ colors.fg.yellow
+ news.strip()
+ colors.reset
)
@staticmethod
def print_result(target, data, source):
"""
Print Breach results
"""
if "PASS" in source:
print(
"{}{}{:15}{}|{}{:>25.25}{} > {}{}{}{}".format(
colors.fg.lightblue,
colors.bold,
source,
colors.fg.lightgrey,
colors.fg.pink,
target,
colors.fg.lightgrey,
colors.bold,
colors.fg.green,
data,
colors.reset,
)
)
elif "LOCALSEARCH" in source:
if len(data) > 140:
print(
"{}{}{:15}{}|{}{:>25.25}{} > {}{}{}{}".format(
colors.fg.lightblue,
colors.bold,
source,
colors.fg.lightgrey,
colors.fg.pink,
target,
colors.fg.lightgrey,
colors.bold,
colors.fg.green,
"[...]" + data[-135:],
colors.reset,
)
)
else:
print(
"{}{}{:15}{}|{}{:>25.25}{} > {}{}{}{}".format(
colors.fg.lightblue,
colors.bold,
source,
colors.fg.lightgrey,
colors.fg.pink,
target,
colors.fg.lightgrey,
colors.bold,
colors.fg.green,
data,
colors.reset,
)
)
# Underscore to avoid coloring like a HASH
elif "_HASH" in source:
print(
"{}{:15}{}|{}{:>25.25}{} > {}{}{}".format(
colors.fg.lightblue,
source,
colors.fg.lightgrey,
colors.fg.pink,
target,
colors.fg.lightgrey,
colors.fg.red,
data,
colors.reset,
)
)
# Underscore to avoid coloring service with "email" in name
elif "_EMAIL" in source:
print(
"{}{:15}{}|{}{:>25.25}{} > {}{}{}".format(
colors.fg.lightblue,
source,
colors.fg.lightgrey,
colors.fg.pink,
target,
colors.fg.lightgrey,
colors.fg.lightgrey,
data,
colors.reset,
)
)
elif "USER" in source:
print(
"{}{:15}{}|{}{:>25.25}{} > {}{}{}".format(
colors.fg.lightblue,
source,
colors.fg.lightgrey,
colors.fg.pink,
target,
colors.fg.lightgrey,
colors.fg.lightcyan,
data,
colors.reset,
)
)
elif "SOURCE" in source:
print(
"{}{:15}{}|{}{:>25.25}{} > {}{}{}\n".format(
colors.fg.lightblue,
source,
colors.fg.lightgrey,
colors.fg.pink,
target,
colors.fg.lightgrey,
colors.reset,
data,
colors.reset,
)
)
elif "IP" in source:
print(
"{}{:15}{}|{}{:>25.25}{} > {}{}{}".format(
colors.fg.lightblue,
source,
colors.fg.lightgrey,
colors.fg.pink,
target,
colors.fg.lightgrey,
colors.fg.red,
data,
colors.reset,
)
)
else:
print(
"{}{:15}{}|{}{:>25.25}{} > {}{}{}".format(
colors.fg.lightblue,
source,
colors.fg.lightgrey,
colors.fg.pink,
target,
colors.fg.lightgrey,
colors.fg.lightgrey,
data,
colors.reset,
)
)
@staticmethod
def print_res_header(target):
"""
Print Breach result header
"""
print(colors.bold, "{:_^90}\n".format(""), colors.reset)
print(
colors.bold
+ colors.fg.green
+ "[>] "
+ colors.reset
+ "Showing results for "
+ target
+ colors.reset
)
|
<filename>src/parser_util.py
# coding=utf-8
import os
import argparse
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument('-root', '--dataset_root',
type=str,
help='path to dataset',
default='..' + os.sep + 'dataset')
parser.add_argument('-exp', '--experiment_root',
type=str,
help='root where to store models, losses and accuracies',
default='..' + os.sep + 'output')
parser.add_argument('-nep', '--epochs',
type=int,
help='number of epochs to train for',
default=1)
parser.add_argument('-lr', '--learning_rate',
type=float,
help='learning rate for the model, default=0.001',
default=0.001)
parser.add_argument('-lrS', '--lr_scheduler_step',
type=int,
help='StepLR learning rate scheduler step, default=20',
default=20)
parser.add_argument('-lrG', '--lr_scheduler_gamma',
type=float,
help='StepLR learning rate scheduler gamma, default=0.5',
default=0.5)
parser.add_argument('-its', '--iterations',
type=int,
help='number of episodes per epoch, default=100',
default=100)
parser.add_argument('-cTr', '--classes_per_it_tr',
type=int,
help='number of random classes per episode for training, default=60',
default=60)
parser.add_argument('-nsTr', '--num_support_tr',
type=int,
help='number of samples per class to use as support for training, default=5',
default=5)
parser.add_argument('-nqTr', '--num_query_tr',
type=int,
help='number of samples per class to use as query for training, default=5',
default=5)
parser.add_argument('-nsNCM', '--num_support_NCM',
type=int,
help='number of samples per stage to use as query for training, default=5',
default=5)
parser.add_argument('-cVa', '--classes_per_it_val',
type=int,
help='number of random classes per episode for validation, default=5',
default=5)
parser.add_argument('-nsVa', '--num_support_val',
type=int,
help='number of samples per class to use as support for validation, default=5',
default=5)
parser.add_argument('-nqVa', '--num_query_val',
type=int,
help='number of samples per class to use as query for validation, default=15',
default=15)
parser.add_argument('-seed', '--manual_seed',
type=int,
help='input for the manual seeds initializations',
default=7)
parser.add_argument('--cuda',
action='store_true',
help='enables cuda')
parser.add_argument('--batch_size', default = 32, type = int)
parser.add_argument('--lr', default = 0.01, type = int)
parser.add_argument('--max_size', default = 2000, type = int)
parser.add_argument('--total_cls', default = 100, type = int)
parser.add_argument('--stage',default = 5, type = int)
parser.add_argument('--class_per_stage',default = 20, type = int)
parser.add_argument('--edge',default = 10000, type = int)
parser.add_argument('--NCM_batch',default = 1024, type = int)
parser.add_argument('--Data_file',default = 'train_meta', type = str)
parser.add_argument('--Bias_epoch',default = 30, type = int)
parser.add_argument('--lossF',default = 'NCM', type = str)
parser.add_argument('--pushR',default = 0.1, type = float)
parser.add_argument('--pillR',default = 0.1, type = float)
parser.add_argument('--centerR',default = 0.1, type = float)
parser.add_argument('--mix',
action='store_true',help='enables mix')
return parser
|
<filename>neutron_tempest_plugin/scenario/test_dhcp.py
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log
from paramiko import ssh_exception as ssh_exc
from tempest.common import utils
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
from neutron_tempest_plugin.common import ssh
from neutron_tempest_plugin import config
from neutron_tempest_plugin.scenario import base
CONF = config.CONF
LOG = log.getLogger(__name__)
class DHCPTest(base.BaseTempestTestCase):
credentials = ['primary', 'admin']
force_tenant_isolation = False
@classmethod
def resource_setup(cls):
super(DHCPTest, cls).resource_setup()
cls.rand_name = data_utils.rand_name(
cls.__name__.rsplit('.', 1)[-1])
cls.network = cls.create_network(name=cls.rand_name)
cls.subnet = cls.create_subnet(
network=cls.network, name=cls.rand_name)
cls.router = cls.create_router_by_client()
cls.create_router_interface(cls.router['id'], cls.subnet['id'])
cls.keypair = cls.create_keypair(name=cls.rand_name)
cls.security_group = cls.create_security_group(name=cls.rand_name)
cls.create_loginable_secgroup_rule(cls.security_group['id'])
@utils.requires_ext(extension='extra_dhcp_opt', service='network')
@decorators.idempotent_id('58f7c094-1980-4e03-b0d3-6c4dd27217b1')
def test_extra_dhcp_opts(self):
"""This test case tests DHCP extra options configured for Neutron port.
Test is checking just extra option "15" which is domain-name
according to the RFC 2132:
https://tools.ietf.org/html/rfc2132#section-5.3
To test that option, there is spawned VM connected to the port with
configured extra_dhcp_opts and test asserts that search domain name is
configured inside VM in /etc/resolv.conf file
"""
test_domain = "test.domain"
extra_dhcp_opts = [
{'opt_name': 'domain-name',
'opt_value': '"%s"' % test_domain}]
port = self.create_port(
network=self.network, name=self.rand_name,
security_groups=[self.security_group['id']],
extra_dhcp_opts=extra_dhcp_opts)
floating_ip = self.create_floatingip(port=port)
server = self.create_server(
flavor_ref=CONF.compute.flavor_ref,
image_ref=CONF.compute.image_ref,
key_name=self.keypair['name'],
networks=[{'port': port['id']}])
self.wait_for_server_active(server['server'])
self.wait_for_guest_os_ready(server['server'])
try:
ssh_client = ssh.Client(
floating_ip['floating_ip_address'],
CONF.validation.image_ssh_user,
pkey=self.keypair['private_key'])
vm_resolv_conf = ssh_client.exec_command(
"cat /etc/resolv.conf")
self.assertIn(test_domain, vm_resolv_conf)
except (lib_exc.SSHTimeout,
ssh_exc.AuthenticationException,
AssertionError) as error:
LOG.debug(error)
self._log_console_output([server])
self._log_local_network_status()
raise
|
<gh_stars>10-100
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torch
from utils import weights_init
class CNN_simple(nn.Module):
def __init__(self, obs_shape, stack_frames):
super(CNN_simple, self).__init__()
self.conv1 = nn.Conv2d(obs_shape[0], 32, 5, stride=1, padding=2)
self.maxp1 = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(32, 32, 5, stride=1, padding=1)
self.maxp2 = nn.MaxPool2d(2, 2)
self.conv3 = nn.Conv2d(32, 64, 4, stride=1, padding=1)
self.maxp3 = nn.MaxPool2d(2, 2)
self.conv4 = nn.Conv2d(64, 64, 3, stride=1, padding=1)
self.maxp4 = nn.MaxPool2d(2, 2)
relu_gain = nn.init.calculate_gain('relu')
self.conv1.weight.data.mul_(relu_gain)
self.conv2.weight.data.mul_(relu_gain)
self.conv3.weight.data.mul_(relu_gain)
self.conv4.weight.data.mul_(relu_gain)
dummy_state = Variable(torch.rand(stack_frames, obs_shape[0], obs_shape[1], obs_shape[2]))
out = self.forward(dummy_state)
self.outdim = out.size(-1)
self.apply(weights_init)
self.train()
def forward(self, x):
x = F.relu(self.maxp1(self.conv1(x)))
x = F.relu(self.maxp2(self.conv2(x)))
x = F.relu(self.maxp3(self.conv3(x)))
x = F.relu(self.maxp4(self.conv4(x)))
x = x.view(1, -1)
return x
class ICML(nn.Module):
def __init__(self, obs_shape, stack_frames):
super(ICML, self).__init__()
self.conv1 = nn.Conv2d(obs_shape[0], 16, 8, stride=4, padding=2)
self.conv2 = nn.Conv2d(16, 32, 4, stride=2, padding=1)
relu_gain = nn.init.calculate_gain('relu')
self.conv1.weight.data.mul_(relu_gain)
self.conv2.weight.data.mul_(relu_gain)
dummy_state = Variable(torch.rand(stack_frames, obs_shape[0], obs_shape[1], obs_shape[2]))
out = self.forward(dummy_state, fc=False)
cnn_dim = out.size(-1)
self.fc = nn.Linear(cnn_dim, 256)
self.outdim = 256
self.apply(weights_init)
self.train()
def forward(self, x, fc=True):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = x.view(1, -1)
if fc:
x = F.relu(self.fc(x))
return x
class CNN_maze(nn.Module):
def __init__(self, obs_shape, stack_frames):
super(CNN_maze, self).__init__()
self.conv1 = nn.Conv2d(obs_shape[0], 16, 3, stride=2, padding=1)
self.conv2 = nn.Conv2d(16, 32, 3, stride=2, padding=1)
relu_gain = nn.init.calculate_gain('relu')
self.conv1.weight.data.mul_(relu_gain)
self.conv2.weight.data.mul_(relu_gain)
dummy_state = Variable(torch.rand(stack_frames, obs_shape[0], obs_shape[1], obs_shape[2]))
out = self.forward(dummy_state, fc=False)
cnn_dim = out.size(-1)
self.fc = nn.Linear(cnn_dim, 256)
self.outdim = 256
self.apply(weights_init)
self.train()
def forward(self, x, fc=True):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = x.view(1, -1)
if fc:
x = F.relu(self.fc(x))
return x
|
<reponame>Rutherford-sudo/PigTelegramBot
import logging
import requests
import telebot
import json
import os
from flask import Flask, request
import random
from textwrap import wrap
from translate import Translator
server = Flask(__name__)
TOKEN = "YOUR TELEGRAM TOKEN"
bot = telebot.TeleBot(TOKEN,parse_mode=None)
GROUP_ID = YOUR GROUP ID
def pigImage():
url = "https://pigs.p.rapidapi.com/random"
headers = {
'x-rapidapi-key': "RAPID API KEY",
'x-rapidapi-host': "pigs.p.rapidapi.com"
}
response = requests.request("GET", url, headers=headers)
resp = response.json()
return (resp['source'])
def corgiImage():
url = "https://dog.ceo/api/breed/pembroke/images"
response = requests.request("GET", url).json()
return (random.choice(response['message']))
def covidBrazil():
url = "https://covid-19-data.p.rapidapi.com/country"
querystring = {"name":"brazil"}
headers = {
'x-rapidapi-key': "RAPID API KEY",
'x-rapidapi-host': "covid-19-data.p.rapidapi.com"
}
response = requests.request("GET", url, headers=headers, params=querystring).json()
confirmados = response[0]['confirmed']
recuperados = response[0]['recovered']
mortes = response[0]['deaths']
finalupdate = wrap(response[0]['lastUpdate'], 10)
linha1 = f"Casos de Covid-19 no Brasil!\n\n"
linha2 = f"Confirmados: {confirmados}\nRecuperados: {recuperados}\nMortes: {mortes}\n"
linha3 = f"Ultima Atualização: {finalupdate[0]}\n"
linhafinal = linha1+linha2+linha3
return linhafinal
def fatoInutil():
url = "https://useless-facts.sameerkumar.website/api"
response = requests.request("GET", url).json()
translator = Translator(to_lang='pt-br')
traduzido = translator.translate(response['data'])
return (traduzido)
def poemaRandom():
url1 = "https://poetrydb.org/title"
response1 = requests.request("GET", url1).json()
titulo_aleatorio = random.choice(response1['titles'])
url2 = f"https://poetrydb.org/title/{titulo_aleatorio}"
response2 = requests.request("GET", url2).json()
tituloPoema = response2[0]['title']
autorPoema = response2[0]['author']
poema = ""
for line in response2[0]['lines']:
poema += line + "\n"
mensagemFinal = f"Titulo: {tituloPoema}\nAutor(a): {autorPoema}\n\n{poema}"
return mensagemFinal
@bot.message_handler(commands=['start','help'])
def send_welcome(message):
bot.reply_to(message,"Oinc Oinc!")
def extract_arg(arg):
return arg.split()[1:]
@bot.message_handler(commands=['fotinha'])
def sendCat(message):
request = pigImage()
bot.send_photo(GROUP_ID,request,caption="Oinc!")
@bot.message_handler(commands=['spike'])
def sendCorgi(message):
request = corgiImage()
bot.send_photo(GROUP_ID, request, caption="Auau!")
@bot.message_handler(commands=['covid'])
def sendReport(message):
request = covidBrazil()
bot.reply_to(message, request)
@bot.message_handler(commands=['curiosidade'])
def sendFato(message):
request = fatoInutil()
bot.reply_to(message, request)
@bot.message_handler(commands=['poeminha'])
def sendPoema(message):
poema = poemaRandom()
bot.reply_to(message, poema)
@server.route('/' + TOKEN, methods=['POST'])
def getMessage():
bot.process_new_updates([telebot.types.Update.de_json(request.stream.read().decode("utf-8"))])
return "!", 200
@server.route("/")
def webhook():
bot.remove_webhook()
bot.set_webhook(url='YOUR APP NAME HEROKU' + TOKEN)
return "!", 200
if __name__ == "__main__":
server.run(host="0.0.0.0", port=int(os.environ.get('PORT', 5000)))
|
<gh_stars>0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
from oslo_log import log as logging
from oslo_config import cfg
from neutron.plugins.ml2 import models
from neutron.plugins.ml2 import db
CMCC_DEFAULT_LEVEL = 1
CMCC_DEFAULT_NETWORK_TYPE = 'vlan'
LOG = logging.getLogger(__name__)
DB_OPTS = [
cfg.StrOpt(
'array_request_vlan_interval',
default=100,
help=('Interval in millisecond to request VLAN ID'
'from database')
),
cfg.StrOpt(
'array_request_vlan_max_retries',
default=10,
help=('Maximum number to try to request vlan'
'from database')
),
cfg.StrOpt(
'array_request_vlan_hostname',
default=10,
help=('Hostname of port binding')
)
]
cfg.CONF.register_opts(DB_OPTS, "arraynetworks")
def _get_binding_level(context, port_id, level):
result = None
host = None
if port_id:
host = cfg.CONF.arraynetworks.array_request_vlan_hostname
if not host:
LOG.error("Unable to get host by port_id %(port_id)s", {'port_id': port_id})
return result
LOG.debug("For port %(port_id)s, got binding host %(host)s",
{'port_id': port_id, 'host': host})
result = (context.session.query(models.PortBindingLevel).
filter_by(port_id=port_id, host=host, level=level).
first())
LOG.debug("For port %(port_id)s, level %(level)s, "
"got binding levels %(levels)s",
{'port_id': port_id,
'level': level,
'levels': result})
return result
def _get_network_segment(context, segment_id, network_type):
result = None
if segment_id:
result = (context.session.query(models.NetworkSegment).
filter_by(id=segment_id, network_type=network_type).
first())
LOG.debug("For segment %(segment_id)s, network type %(network_type)s, "
"got binding levels %(networksegments)s",
{'segment_id': segment_id,
'network_type': network_type,
'networksegments': result})
return result
def get_vlan_id_by_port_cmcc(context, port_id):
vlan_id = None
if not port_id:
LOG.error("should provide the port_id")
return None
attempts = 0
seconds_time = int(cfg.CONF.arraynetworks.array_request_vlan_interval) / 1000
retries = int(cfg.CONF.arraynetworks.array_request_vlan_max_retries)
while True:
if attempts < retries:
attempts += 1
elif retries == 0:
attempts = 0
else:
msg = ("Unable to get the vlan id. Exiting after "
"%(retries)s attempts") % {'retries': retries}
LOG.error(msg)
return None
binding_level = _get_binding_level(context, port_id, CMCC_DEFAULT_LEVEL)
if not binding_level:
LOG.error("Unable to get binding_level using %(port_id)s", {'port_id': port_id})
time.sleep(seconds_time)
continue
segment_id = binding_level.segment_id
network_segment = _get_network_segment(context, segment_id, CMCC_DEFAULT_NETWORK_TYPE)
if not network_segment:
LOG.error("Unable to get network_segment using %(segment_id)s", {'segment_id': segment_id})
time.sleep(seconds_time)
continue
else:
vlan_id = network_segment.segmentation_id
break
return vlan_id
|
#!/usr/bin/env python
import os
import json
from subprocess import check_output
from collections import OrderedDict, defaultdict
from collections.abc import Mapping
import glob
from contextlib import contextmanager
import requests
from requests_oauthlib import OAuth2
def main(version, push=None):
"""
WARNING: If push is given as --push then this will push the release to
github.
"""
push = push == '--push'
_GitHub_release(version, push)
def error(msg):
raise ValueError(msg)
def blue(text):
return "\033[34m%s\033[0m" % text
def red(text):
return "\033[31m%s\033[0m" % text
def green(text):
return "\033[32m%s\033[0m" % text
def _GitHub_release(version, push, username=None, user='sympy', token=None,
token_file_path="~/.sympy/release-token", repo='sympy', draft=False):
"""
Upload the release files to GitHub.
The tag must be pushed up first. You can test on another repo by changing
user and repo.
"""
if not requests:
error("requests and requests-oauthlib must be installed to upload to GitHub")
release_text = GitHub_release_text(version)
short_version = get_sympy_short_version(version)
tag = 'sympy-' + version
prerelease = short_version != version
urls = URLs(user=user, repo=repo)
if not username:
username = input("GitHub username: ")
token = load_token_file(token_file_path)
if not token:
username, password, token = GitHub_authenticate(urls, username, token)
# If the tag in question is not pushed up yet, then GitHub will just
# create it off of master automatically, which is not what we want. We
# could make it create it off the release branch, but even then, we would
# not be sure that the correct commit is tagged. So we require that the
# tag exist first.
if not check_tag_exists(version):
sys.exit(red("The tag for this version has not been pushed yet. Cannot upload the release."))
# See https://developer.github.com/v3/repos/releases/#create-a-release
# First, create the release
post = {}
post['tag_name'] = tag
post['name'] = "SymPy " + version
post['body'] = release_text
post['draft'] = draft
post['prerelease'] = prerelease
print("Creating release for tag", tag, end=' ')
if push:
result = query_GitHub(urls.releases_url, username, password=<PASSWORD>,
token=token, data=json.dumps(post)).json()
release_id = result['id']
else:
print(green("Not pushing!"))
print(green("Done"))
# Then, upload all the files to it.
for key in descriptions:
tarball = get_tarball_name(key, version)
params = {}
params['name'] = tarball
if tarball.endswith('gz'):
headers = {'Content-Type':'application/gzip'}
elif tarball.endswith('pdf'):
headers = {'Content-Type':'application/pdf'}
elif tarball.endswith('zip'):
headers = {'Content-Type':'application/zip'}
else:
headers = {'Content-Type':'application/octet-stream'}
print("Uploading", tarball, end=' ')
sys.stdout.flush()
with open(os.path.join('release/release-' + version, tarball), 'rb') as f:
if push:
result = query_GitHub(urls.release_uploads_url % release_id, username,
password=<PASSWORD>, token=token, data=f, params=params,
headers=headers).json()
else:
print(green("Not uploading!"))
print(green("Done"))
# TODO: download the files and check that they have the right sha256 sum
def GitHub_release_text(version):
"""
Generate text to put in the GitHub release Markdown box
"""
shortversion = get_sympy_short_version(version)
htmltable = table(version)
out = """\
See https://github.com/sympy/sympy/wiki/release-notes-for-{shortversion} for the release notes.
{htmltable}
**Note**: Do not download the **Source code (zip)** or the **Source code (tar.gz)**
files below.
"""
out = out.format(shortversion=shortversion, htmltable=htmltable)
print(blue("Here are the release notes to copy into the GitHub release "
"Markdown form:"))
print()
print(out)
return out
def get_sympy_short_version(version):
"""
Get the short version of SymPy being released, not including any rc tags
(like 0.7.3)
"""
parts = version.split('.')
# Remove rc tags
# Handle both 1.0.rc1 and 1.1rc1
if not parts[-1].isdigit():
if parts[-1][0].isdigit():
parts[-1] = parts[-1][0]
else:
parts.pop(-1)
return '.'.join(parts)
class URLs(object):
"""
This class contains URLs and templates which used in requests to GitHub API
"""
def __init__(self, user="sympy", repo="sympy",
api_url="https://api.github.com",
authorize_url="https://api.github.com/authorizations",
uploads_url='https://uploads.github.com',
main_url='https://github.com'):
"""Generates all URLs and templates"""
self.user = user
self.repo = repo
self.api_url = api_url
self.authorize_url = authorize_url
self.uploads_url = uploads_url
self.main_url = main_url
self.pull_list_url = api_url + "/repos" + "/" + user + "/" + repo + "/pulls"
self.issue_list_url = api_url + "/repos/" + user + "/" + repo + "/issues"
self.releases_url = api_url + "/repos/" + user + "/" + repo + "/releases"
self.single_issue_template = self.issue_list_url + "/%d"
self.single_pull_template = self.pull_list_url + "/%d"
self.user_info_template = api_url + "/users/%s"
self.user_repos_template = api_url + "/users/%s/repos"
self.issue_comment_template = (api_url + "/repos" + "/" + user + "/" + repo + "/issues/%d" +
"/comments")
self.release_uploads_url = (uploads_url + "/repos/" + user + "/" +
repo + "/releases/%d" + "/assets")
self.release_download_url = (main_url + "/" + user + "/" + repo +
"/releases/download/%s/%s")
def load_token_file(path="~/.sympy/release-token"):
print("> Using token file %s" % path)
path = os.path.expanduser(path)
path = os.path.abspath(path)
if os.path.isfile(path):
try:
with open(path) as f:
token = f.readline()
except IOError:
print("> Unable to read token file")
return
else:
print("> Token file does not exist")
return
return token.strip()
def GitHub_authenticate(urls, username, token=None):
_login_message = """\
Enter your GitHub username & password or press ^C to quit. The password
will be kept as a Python variable as long as this script is running and
https to authenticate with GitHub, otherwise not saved anywhere else:\
"""
if username:
print("> Authenticating as %s" % username)
else:
print(_login_message)
username = input("Username: ")
authenticated = False
if token:
print("> Authenticating using token")
try:
GitHub_check_authentication(urls, username, None, token)
except AuthenticationFailed:
print("> Authentication failed")
else:
print("> OK")
password = None
authenticated = True
while not authenticated:
password = getpass("Password: ")
try:
print("> Checking username and password ...")
GitHub_check_authentication(urls, username, password, None)
except AuthenticationFailed:
print("> Authentication failed")
else:
print("> OK.")
authenticated = True
if password:
generate = input("> Generate API token? [Y/n] ")
if generate.lower() in ["y", "ye", "yes", ""]:
name = input("> Name of token on GitHub? [SymPy Release] ")
if name == "":
name = "SymPy Release"
token = generate_token(urls, username, password, name=name)
print("Your token is", token)
print("Use this token from now on as GitHub_release:token=" + token +
",username=" + username)
print(red("DO NOT share this token with anyone"))
save = input("Do you want to save this token to a file [yes]? ")
if save.lower().strip() in ['y', 'yes', 'ye', '']:
save_token_file(token)
return username, password, token
def run(*cmdline, cwd=None):
"""
Run command in subprocess and get lines of output
"""
return check_output(cmdline, encoding='utf-8', cwd=cwd).splitlines()
def check_tag_exists(version):
"""
Check if the tag for this release has been uploaded yet.
"""
tag = 'sympy-' + version
all_tag_lines = run('git', 'ls-remote', '--tags', 'origin')
return any(tag in tag_line for tag_line in all_tag_lines)
def generate_token(urls, username, password, OTP=None, name="SymPy Release"):
enc_data = json.dumps(
{
"scopes": ["public_repo"],
"note": name
}
)
url = urls.authorize_url
rep = query_GitHub(url, username=username, password=password,
data=enc_data).json()
return rep["token"]
def GitHub_check_authentication(urls, username, password, token):
"""
Checks that username & password is valid.
"""
query_GitHub(urls.api_url, username, password, token)
class AuthenticationFailed(Exception):
pass
def query_GitHub(url, username=None, password=None, token=None, data=None,
OTP=None, headers=None, params=None, files=None):
"""
Query GitHub API.
In case of a multipage result, DOES NOT query the next page.
"""
headers = headers or {}
if OTP:
headers['X-GitHub-OTP'] = OTP
if token:
auth = OAuth2(client_id=username, token=dict(access_token=token,
token_type='bearer'))
else:
auth = HTTPBasicAuth(username, password)
if data:
r = requests.post(url, auth=auth, data=data, headers=headers,
params=params, files=files)
else:
r = requests.get(url, auth=auth, headers=headers, params=params, stream=True)
if r.status_code == 401:
two_factor = r.headers.get('X-GitHub-OTP')
if two_factor:
print("A two-factor authentication code is required:", two_factor.split(';')[1].strip())
OTP = input("Authentication code: ")
return query_GitHub(url, username=username, password=password,
token=token, data=data, OTP=OTP)
raise AuthenticationFailed("invalid username or password")
r.raise_for_status()
return r
def save_token_file(token):
token_file = input("> Enter token file location [~/.sympy/release-token] ")
token_file = token_file or "~/.sympy/release-token"
token_file_expand = os.path.expanduser(token_file)
token_file_expand = os.path.abspath(token_file_expand)
token_folder, _ = os.path.split(token_file_expand)
try:
if not os.path.isdir(token_folder):
os.mkdir(token_folder, 0o700)
with open(token_file_expand, 'w') as f:
f.write(token + '\n')
os.chmod(token_file_expand, stat.S_IREAD | stat.S_IWRITE)
except OSError as e:
print("> Unable to create folder for token file: ", e)
return
except IOError as e:
print("> Unable to save token file: ", e)
return
return token_file
def table(version):
"""
Make an html table of the downloads.
This is for pasting into the GitHub releases page. See GitHub_release().
"""
tarball_formatter_dict = dict(_tarball_format(version))
shortversion = get_sympy_short_version(version)
tarball_formatter_dict['version'] = shortversion
sha256s = [i.split('\t') for i in _sha256(version, print_=False, local=True).split('\n')]
sha256s_dict = {name: sha256 for sha256, name in sha256s}
sizes = [i.split('\t') for i in _size(version, print_=False).split('\n')]
sizes_dict = {name: size for size, name in sizes}
table = []
# https://docs.python.org/2/library/contextlib.html#contextlib.contextmanager. Not
# recommended as a real way to generate html, but it works better than
# anything else I've tried.
@contextmanager
def tag(name):
table.append("<%s>" % name)
yield
table.append("</%s>" % name)
@contextmanager
def a_href(link):
table.append("<a href=\"%s\">" % link)
yield
table.append("</a>")
with tag('table'):
with tag('tr'):
for headname in ["Filename", "Description", "size", "sha256"]:
with tag("th"):
table.append(headname)
for key in descriptions:
name = get_tarball_name(key, version)
with tag('tr'):
with tag('td'):
with a_href('https://github.com/sympy/sympy/releases/download/sympy-%s/%s' % (version, name)):
with tag('b'):
table.append(name)
with tag('td'):
table.append(descriptions[key].format(**tarball_formatter_dict))
with tag('td'):
table.append(sizes_dict[name])
with tag('td'):
table.append(sha256s_dict[name])
out = ' '.join(table)
return out
descriptions = OrderedDict([
('source', "The SymPy source installer.",),
('wheel', "A wheel of the package.",),
('html', '''Html documentation. This is the same as
the <a href="https://docs.sympy.org/latest/index.html">online documentation</a>.''',),
('pdf', '''Pdf version of the <a href="https://docs.sympy.org/latest/index.html"> html documentation</a>.''',),
])
def _size(version, print_=True):
"""
Print the sizes of the release files. Run locally.
"""
out = run(*(['du', '-h'] + release_files(version)))
out = [i.split() for i in out]
out = '\n'.join(["%s\t%s" % (i, os.path.split(j)[1]) for i, j in out])
if print_:
print(out)
return out
def _sha256(version, print_=True, local=False):
if local:
out = run(*(['shasum', '-a', '256'] + release_files(version)))
else:
raise ValueError('Should not get here...')
# out = run(*(['shasum', '-a', '256', '/root/release/*']))
# Remove the release/ part for printing. Useful for copy-pasting into the
# release notes.
out = [i.split() for i in out]
out = '\n'.join(["%s\t%s" % (i, os.path.split(j)[1]) for i, j in out])
if print_:
print(out)
return out
def get_tarball_name(file, version):
"""
Get the name of a tarball
file should be one of
source-orig: The original name of the source tarball
source-orig-notar: The name of the untarred directory
source: The source tarball (after renaming)
wheel: The wheel
html: The name of the html zip
html-nozip: The name of the html, without ".zip"
pdf-orig: The original name of the pdf file
pdf: The name of the pdf file (after renaming)
"""
doctypename = defaultdict(str, {'html': 'zip', 'pdf': 'pdf'})
if file in {'source-orig', 'source'}:
name = 'sympy-{version}.tar.gz'
elif file == 'source-orig-notar':
name = "sympy-{version}"
elif file in {'html', 'pdf', 'html-nozip'}:
name = "sympy-docs-{type}-{version}"
if file == 'html-nozip':
# zip files keep the name of the original zipped directory. See
# https://github.com/sympy/sympy/issues/7087.
file = 'html'
else:
name += ".{extension}"
elif file == 'pdf-orig':
name = "sympy-{version}.pdf"
elif file == 'wheel':
name = 'sympy-{version}-py3-none-any.whl'
else:
raise ValueError(file + " is not a recognized argument")
ret = name.format(version=version, type=file,
extension=doctypename[file])
return ret
def release_files(version):
"""
Returns the list of local release files
"""
paths = glob.glob('release/release-' + version + '/*')
if not paths:
raise ValueError("No release files found")
return paths
tarball_name_types = {
'source-orig',
'source-orig-notar',
'source',
'wheel',
'html',
'html-nozip',
'pdf-orig',
'pdf',
}
# Have to make this lazy so that version can be defined.
class _tarball_format(Mapping):
def __init__(self, version):
self.version = version
def __getitem__(self, name):
return get_tarball_name(name, self.version)
def __iter__(self):
return iter(tarball_name_types)
def __len__(self):
return len(tarball_name_types)
if __name__ == "__main__":
import sys
main(*sys.argv[1:])
|
import pygame
from player import Player
from image import Image
from wall import Wall
from spike import Spike
from grass import Grass
import config as cfg
class Level:
def __init__(self):
self.player = Player()
self.lifeImage = Image("media/heart.png", alpha=True)
# Set number of lifes based on configurations
if cfg.hardcoreMode:
self.lifes = 0
else:
self.lifes = 3
# Tile constants
self.NONE = 0
self.DIRT = 1
self.BRICK = 2
self.SPIKE = [6, 7, 8, 9]
self.PLAYER = 4
self.GRASS = 5
# Images for objects on level
self.brickImage = Image("media/brick.png")
self.dirtImage = Image("media/dirt.png")
self.grassImage = Image("media/grass.png", alpha=True)
self.spikeImage = [Image("media/spike.png", angle=90*i, alpha=True) for i in [0, 1, 2, 3]]
# Tile size used on loading level elements scaled to game proportions
self.TILE_SIZE = self.brickImage.w / cfg.GAME_SCALE
# Offset used on placing objects
self.Y_OFFSET = cfg.GAME_HEIGHT % self.TILE_SIZE - self.TILE_SIZE
self.NUMBER_OF_LEVELS = 2
# First level
self.level = 1
self.loadLevel(self.level)
def handleInputs(self, game):
# Polling events
for event in pygame.event.get():
self.player.handleInputs(event)
# Quit on closing window or pressing esc
if event.type == pygame.QUIT:
game.setState(game.QUIT)
elif event.type == pygame.KEYDOWN:
# Go to menu
if event.key == pygame.K_ESCAPE:
game.setState(game.MENU)
def logic(self, game):
game.changeState()
self.player.move(self.wallList, self.spikeList)
if self.player.hasWon():
self.level += 1
if self.level > self.NUMBER_OF_LEVELS:
game.setState(game.MENU)
else:
self.loadLevel(self.level)
if self.player.isDead():
self.lifes -= 1
if self.lifes >= 0:
self.player.reset()
else:
game.setState(game.MENU)
def render(self, game):
# Clear buffer and window surfaces
game.window.fill((0, 0, 0))
game.surface.fill((0, 0, 0))
# Render player
self.player.render(game.surface)
# Render containers
[wall.render(game.surface) for wall in self.wallList]
[spike.render(game.surface) for spike in self.spikeList]
[grass.render(game.surface) for grass in self.grassList]
# Render hearts indicating number of lifes
for i in range(0, self.lifes):
self.lifeImage.render(game.surface, 8+24*i, 8)
# Scale surface buffer to screen surface
pygame.transform.scale(game.surface, (game.window.get_width(), game.window.get_height()), game.window)
# Update image
pygame.display.flip()
# Loading level file
def loadLevel(self, level):
path = "level/" + str(level)
levelFile = open(path, "r")
# Containers with object list elements in level
self.wallList = []
self.spikeList = []
self.grassList = []
# Loads elements in level
for yPos, row in enumerate(levelFile):
for xPos, element in enumerate(row.split(",")):
tile = int(element)
if tile == self.DIRT:
self.wallList.append(Wall(self.dirtImage, xPos * self.TILE_SIZE, self.Y_OFFSET + yPos * self.TILE_SIZE))
elif tile == self.BRICK:
self.wallList.append(Wall(self.brickImage, xPos * self.TILE_SIZE, self.Y_OFFSET + yPos * self.TILE_SIZE))
elif tile in self.SPIKE:
self.spikeList.append(Spike(self.spikeImage[tile % 4], xPos * self.TILE_SIZE, self.Y_OFFSET + yPos * self.TILE_SIZE, tile))
elif tile == self.PLAYER:
self.player = Player(xPos * self.TILE_SIZE, self.Y_OFFSET + yPos * self.TILE_SIZE)
elif tile == self.GRASS:
self.grassList.append(Grass(self.grassImage, xPos * self.TILE_SIZE, self.Y_OFFSET + yPos * self.TILE_SIZE))
levelFile.close()
|
<reponame>anubav/edl<gh_stars>0
import numpy as np
from .activations import IDENTITY
class Dataset:
"""Target for data analysis by a neural network"""
def __init__(self, inputs, targets) -> None:
self.inputs = inputs
self.targets = targets
self.size = len(inputs)
class Layer:
"""A layer of neurons"""
def __init__(self, width, bias=0, activation=IDENTITY, dropout=False):
self.width = width
self.inputs = np.empty(width)
self.outputs = np.empty(width)
self.deltas = np.empty(width)
self.bias = bias
self.activation = activation
self.dropout = dropout
def activate(self) -> None:
"""
Pass input values through the layer's activation function
(and dropout mask, if applicable)
"""
self.outputs = self.activation(self.inputs)
if self.dropout:
self.dropout_mask = np.random.randint(2, self.width)
self.outputs *= self.dropout_mask
class Network:
"""A feed-forward neural network"""
def __init__(self, layers) -> None:
self.layers = layers
self.depth = len(layers)
self.width = [layer.width for layer in layers]
self.weights = [None for i in range(self.depth)]
self.training_record = []
def propagate(self, inputs) -> None:
"""
Propagate inputs through the network
(storing both layer inputs and outputs)
"""
for i in range(self.depth):
layer = self.layers[i]
layer.inputs = inputs + layer.bias
layer.activate()
inputs = np.dot(layer.outputs, self.weights[i])
def back_propagate(self, targets) -> None:
"""Backpropagate deltas through the network"""
deltas = self.layers[-1].outputs - targets
for i in reversed(range(self.depth)):
layer = self.layers[i]
layer.deltas = layer.activation(
layer.inputs, D=True) * np.dot(deltas, self.weights[i].T)
deltas = layer.deltas
def update_network(self, inputs, targets, learning_rate) -> None:
"""Update network weights using the method of gradient descent"""
self.propagate(inputs)
self.back_propagate(targets)
for i in range(self.depth - 1):
self.weights[i] -= learning_rate * (np.dot(self.layers[i].outputs.T, self.layers[i + 1].deltas))
def __call__(self, dataset, train=False, initialize=False, seed=1, learning_rate=1, stats={}):
"""Analyze dataset using the network or train network on dataset."""
if isinstance(dataset, np.ndarray): # Process single datum
datum = dataset
self.propagate(datum)
return self.layers[-1].outputs.flatten()
elif isinstance(dataset, Dataset): # Process entire dataset
record = {} # Create a new training record
for stat_name in stats.keys():
record[stat_name] = [] # Initialize training record
if initialize:
self.training_record = [] # Reset training record
for i in range(self.depth - 1):
self.weights[i] = (2 * seed) * np.random.rand(self.width[i], self.width[i + 1]) - seed
self.weights[self.depth - 1] = np.identity(self.layers[-1].width)
for inputs, targets in zip(dataset.inputs, dataset.targets):
if train:
self.update_network(inputs, targets, learning_rate)
else:
self.propagate(inputs)
for stat_name, stat in stats.items(): # Update training record
value = stat(self, targets)
record[stat_name].append(value)
# Store the record
self.training_record.append(record)
else:
raise ValueError('Input is a neither a datapoint nor a dataset.')
|
from unittest.mock import patch
import numpy as np
import pandas as pd
import pytest
from sklearn.model_selection import GroupKFold, KFold
from tests.test_pipelines.conftest import (
DummyDataset,
DummyGroupedDataset,
DummyOptimizablePipeline,
dummy_single_score_func,
)
from tpcp.optimize import Optimize
from tpcp.validate import cross_validate
class TestCrossValidate:
@pytest.mark.filterwarnings("ignore::tpcp.exceptions.PotentialUserErrorWarning")
def test_optimize_called(self):
"""Test that optimize of the pipeline is called correctly."""
ds = DummyDataset()
pipeline = DummyOptimizablePipeline()
# The we use len(ds) splits, effectively a leave one our CV for testing.
cv = KFold(n_splits=len(ds))
train, test = zip(*cv.split(ds))
with patch.object(DummyOptimizablePipeline, "self_optimize", return_value=pipeline) as mock:
mock.__name__ = "self_optimize"
mock.__self__ = "bla" # We simulate a bound method
cross_validate(Optimize(pipeline), ds, cv=cv, scoring=lambda x, y: 1)
assert mock.call_count == len(train)
for expected, actual in zip(train, mock.call_args_list):
pd.testing.assert_frame_equal(ds[expected].index, actual[0][0].index)
def test_run_called(self):
"""Test that optimize of the pipeline is called correctly."""
def scoring(pipe, ds):
pipe.run(ds)
return 1
ds = DummyDataset()
pipeline = DummyOptimizablePipeline()
# We want to have two datapoints in the test set sometimes
cv = KFold(n_splits=len(ds) // 2)
train, test = zip(*cv.split(ds))
with patch.object(DummyOptimizablePipeline, "run", return_value=pipeline) as mock:
cross_validate(Optimize(pipeline), ds, cv=cv, scoring=scoring)
test_flat = [t for split in test for t in split]
assert mock.call_count == len(test_flat)
for expected, actual in zip(test_flat, mock.call_args_list):
pd.testing.assert_frame_equal(ds[expected].index, actual[0][0].index)
def test_single_score(self):
ds = DummyDataset()
# The we use len(ds) splits, effectively a leave one our CV for testing.
cv = KFold(n_splits=len(ds))
results = cross_validate(
Optimize(DummyOptimizablePipeline()), ds, scoring=dummy_single_score_func, cv=cv, return_train_score=True
)
results_df = pd.DataFrame(results)
assert len(results_df) == 5 # n folds
assert set(results.keys()) == {
"train_data_labels",
"test_data_labels",
"test_score",
"test_single_score",
"train_score",
"train_single_score",
"score_time",
"optimize_time",
}
assert all(len(v) == len(ds) - 1 for v in results_df["train_data_labels"])
assert all(len(v) == len(ds) - 1 for v in results_df["train_single_score"])
assert all(len(v) == 1 for v in results_df["test_data_labels"])
assert all(len(v) == 1 for v in results_df["test_single_score"])
# The dummy scorer is returning the dataset group id -> The datapoint id is also the result
for i, r in results_df.iterrows():
all_ids = ds.groups
assert r["test_data_labels"] == [i]
assert r["test_data_labels"] == r["test_single_score"]
assert r["test_score"] == i
all_ids.remove(i)
assert r["train_data_labels"] == all_ids
assert all(np.array(r["train_data_labels"]) == np.array(r["train_single_score"]))
assert r["train_score"] == np.mean(all_ids)
@pytest.mark.parametrize(
"kwargs,expected",
(
({"return_optimizer": True}, ("optimizer",)),
({"return_train_score": True}, ("train_score", "train_single_score")),
),
)
def test_return_elements(self, kwargs, expected):
results = cross_validate(Optimize(DummyOptimizablePipeline()), DummyDataset(), scoring=dummy_single_score_func)
results_additionally = cross_validate(
Optimize(DummyOptimizablePipeline()), DummyDataset(), scoring=dummy_single_score_func, **kwargs
)
assert set(results_additionally.keys()) - set(results.keys()) == set(expected)
def test_returned_optimizer_per_fold_independent(self):
"""Double check that the optimizer is cloned correctly"""
optimizer = Optimize(DummyOptimizablePipeline())
results = cross_validate(
Optimize(DummyOptimizablePipeline()), DummyDataset(), scoring=dummy_single_score_func, return_optimizer=True
)
optimizers = results["optimizer"]
for o in optimizers:
assert o is not optimizer
@pytest.mark.parametrize("propagate", (True, False))
def test_propagate_groups(self, propagate):
pipeline = DummyOptimizablePipeline()
dataset = DummyGroupedDataset()
groups = dataset.create_group_labels("v1")
# With 3 splits, each group get its own split -> so basically only "a", only "b", and only "c"
cv = GroupKFold(n_splits=3)
dummy_results = Optimize(pipeline).optimize(dataset)
with patch.object(Optimize, "optimize", return_value=dummy_results) as mock:
cross_validate(
Optimize(pipeline), dataset, cv=cv, scoring=lambda x, y: 1, groups=groups, propagate_groups=propagate
)
assert mock.call_count == 3
for call, label in zip(mock.call_args_list, "cba"):
train_labels = "abc".replace(label, "")
if propagate:
assert set(np.unique(call[1]["groups"])) == set(train_labels)
else:
assert "groups" not in call[1]
|
from seqeval.metrics import (
accuracy_score, f1_score, precision_score, recall_score,
classification_report
)
from transformers import EvalPrediction
import numpy as np
from typing import List, Dict
# https://huggingface.co/metrics/seqeval
# https://github.com/huggingface/transformers/blob/master/examples/token-classification/run_ner.py
# https://github.com/chakki-works/seqeval
# https://en.wikipedia.org/wiki/Inside%E2%80%93outside%E2%80%93beginning_(tagging)
# Metrics
class MetricsComputer:
"""Computes metrics for token classifications. Assumes the labels follow the IOB2 scheme.
Args:
label_list: the list of IOB2 string labels.
"""
def __init__(self, label_list: List = []):
self.label_list = label_list
def __call__(self, eval_pred: EvalPrediction) -> Dict:
"""Computes accuracy precision, recall and f1 based on the list of IOB2 labels.
Positions with labels with a value of -100 will be filtered out both from true labela dn prediction.
Args:
eval_pred (EvalPrediction): the predictions and targets to be matched as np.ndarrays.
Returns:
(Dict): a dictionary with accuracy_score, precision, recall and f1.
"""
predictions, labels = eval_pred
predictions = np.argmax(predictions, axis=-1)
# Remove ignored index (special tokens)
true_predictions = [
[self.label_list[p] for (p, l) in zip(prediction, label) if l != -100]
for prediction, label in zip(predictions, labels)
]
true_labels = [
[self.label_list[l] for (p, l) in zip(prediction, label) if l != -100]
for prediction, label in zip(predictions, labels)
]
print("\n"+" " * 80)
print(classification_report(true_labels, true_predictions))
return {
"accuracy_score": accuracy_score(true_labels, true_predictions),
"precision": precision_score(true_labels, true_predictions),
"recall": recall_score(true_labels, true_predictions),
"f1": f1_score(true_labels, true_predictions),
}
def self_test():
y_true = [
['O', 'O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O', 'O'],
['O', 'B-PER', 'I-PER', 'I-PER', 'O', 'B-MISC', 'I-MISC', 'O', 'O']
]
y_true_np = np.array([
# 'O', 'O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O', 'O'
[-100, 0, 0, 0, 1, 2, 2, 0, -100],
#'O', 'B-PER', 'I-PER', 'O', 'O', 'B-MISC', 'I-MISC', 'O', 'O'
[-100, 3, 4, 0, 0, 1, 2, 0, -100]
])
y_pred = [
['O', 'O', 'B-PER', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O', 'O'],
['O', 'B-PER', 'I-PER', 'I-PER', 'O', 'B-MISC', 'O', 'B-MISC', 'O']
]
y_pred_np = np.array([
# 'O', 'O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O', 'O'
[[10,2,2,1,2],[10,2,2,1,2],[10,1,2,1,2],[10,1,1,2,1],[2,10,1,2,2],[1,1,10,2,1],[1,1,10,2,1],[10,1,1,2,1],[10,2,2,1,2]],
#'O', 'B-PER', 'I-PER', 'O', 'O', 'B-MISC', 'O', 'B-MISC', 'O'
[[10,2,2,1,2],[1,2,2,10,2],[1,2,2,1,10],[10,2,2,1,2],[10,2,2,1,2],[1,10,2,1,2],[10,2,1,1,2],[1,10,1,1,2],[10,2,2,1,2]]
])
mc = MetricsComputer(label_list=[
# 0 1 2 3 4
'O', 'B-MISC', 'I-MISC', 'B-PER', 'I-PER'
])
eval_pred = EvalPrediction(y_pred_np, y_true_np)
m = mc(eval_pred)
print(m)
# for k, v in m.items():
# print(k, v)
print(classification_report(y_true, y_pred))
if __name__ == "__main__":
self_test()
|
import traceback
import hues
from plugin_system import PluginSystem
from vkplus import Message
try:
import settings
except ImportError:
pass
class Command(object):
__slots__ = ('has_prefix', 'text', 'bot',
'command', 'args', "msg")
def __init__(self, msg: Message):
self.has_prefix = True # переменная для обозначения, есть ли у команды префикс
self.msg = msg
self.text = msg.body
self._get_prefix()
self.command = ""
self.args = []
# Если команда пустая
if not self.text.strip():
self.has_prefix = False
def check_command(self, command_system):
if not self.has_prefix:
return False
for command in command_system.commands:
if self.text.startswith(command + " ") or self.text == command:
self.command = command
self.args = self.text.replace(command, "", 1).split()
self.msg.text = " ".join(self.args)
return True
if command_system.ANY_COMMANDS:
self.args = self.text.split()
self.msg.text = " ".join(self.args)
return True
return False
def log(self):
"""Пишет в лог, что была распознана команда"""
pid = self.msg.peer_id
who = ("конференции {}" if self.msg.conf else "ЛС {}").format(pid)
hues.info(f"Команда '{self.command}' из {who} с аргументами {self.args}")
def _get_prefix(self):
"""Пытается получить префикс из текста команды"""
for prefix in settings.PREFIXES:
# Если команда начинается с префикса
if self.text.startswith(prefix):
# Убираем префикс из текста
self.text = self.text.replace(prefix, '', 1).lstrip()
self.msg.text = self.text
break
else:
self.has_prefix = False
class CommandSystem(object):
def __init__(self, commands, plugin_system: PluginSystem):
# Система плагинов
self.system = plugin_system
# self.commands - список с командами
self.commands = commands
self.ANY_COMMANDS = bool(plugin_system.any_commands)
async def process_command(self, msg_obj: Message, cmd: Command):
"""Обрабатывает команду"""
if not cmd.check_command(self):
return False
cmd_text = cmd.command
# Логгируем команду, если нужно (но не логгируем плагины,
# которые реагируют на любые команды)
if settings.LOG_COMMANDS and not self.ANY_COMMANDS:
cmd.log()
try:
await self.system.call_command(cmd_text, msg_obj, cmd.args)
return True
# Если в плагине произошла какая-то ошибка
except Exception:
await msg_obj.answer(f"{msg_obj.vk.anti_flood()}. "
f"Произошла ошибка при выполнении команды <{cmd_text}> "
"пожалуйста, сообщите об этом разработчику!")
hues.error(
f"Произошла ошибка при вызове команды '{cmd_text}' с аргументами {cmd.args}. "
f"Текст сообщения: '{msg_obj._data}'."
f"Ошибка:\n{traceback.format_exc()}")
|
<reponame>jeisenma/traceSelectionInMaya
## Vector class
## <NAME>
## ACCAD, The Ohio State University
## 2012
from random import uniform as _VectorUniform # be careful here to not import on top of
from math import sqrt as _VectorSqrt # other imports that may already exist
from math import acos as _VectorAcos
class Vector(list):
""" Vector class: 3D vector storage and operations """
def __init__(self, x=0, y=0, z=0):
""" Constructor -- you can either pass in a
Vector or three separate values or a list
with three values """
try:
list.__init__(self, [x.x, x.y, x.z])
self.x = x.x
self.y = x.y
self.z = x.z
except:
try:
list.__init__(self, x)
self.x = x[0]
self.y = x[1]
self.z = x[2]
except:
list.__init__(self, [x, y, z])
self.x = x
self.y = y
self.z = z
def asList(self):
""" Returns the vector as a list """
return [self.x, self.y, self.z]
def mag(self):
""" Returns the length of the vector. """
return _VectorSqrt(self.dot(self))
def norm(self):
""" Returns a normalized version of the vector. """
return self*(1.0/self.mag())
def distTo(self, other):
""" Returns the length of the vector between this point and another. """
return (other-self).mag()
def angleBetween(self,other):
""" Returns the angle between this vector and another (radians) """
if(self.mag() == 0 or other.mag() == 0):
return 0
else:
#return _VectorAcos(min(1,max(0,self.dot(other)/(self.mag()*other.mag()))))
return _VectorAcos(min(1,max(-1,self.dot(other)/(self.mag()*other.mag()))))
def random(self, hi=0.0, lo=1.0):
""" Assigns random values [hi,lo] to the vector components. """
self.x = _VectorUniform(hi,lo)
self.y = _VectorUniform(hi,lo)
self.z = _VectorUniform(hi,lo)
def add(self, other):
""" Adds the other vector to myself. """
self.x += other.x
self.y += other.y
self.z += other.z
def __len__(self):
""" Returns the length -- always 3 """
return 3
def __add__(a,b):
""" Returns the addition of two vectors. """
result = Vector(a.x,a.y,a.z)
result.add(b)
return result
def sub(self, other):
""" Subtracts the other vector from myself. """
self.x -= other.x
self.y -= other.y
self.z -= other.z
def __sub__(a,b):
""" Returns the subtraction of two vectors. """
result = Vector(a.x,a.y,a.z)
result.sub(b)
return result
def __neg__(a):
""" Returns the negation of a vector. """
result = Vector(a.x,a.y,a.z)
result.mult(-1)
return result
def mult(self, factor):
""" Multiplies my values by a factor. """
self.x *= factor
self.y *= factor
self.z *= factor
def dot(self, other):
""" Returns the dot product between another vector and myself. """
return self.x*other.x + self.y*other.y + self.z*other.z
def __div__(self, factor):
""" divides each element in this vector by the given factor """
result = Vector(self)
result *= 1.0/factor
return result
def __mul__(self, other):
""" If two vectors are provided, returns the dot product.
If a vector and a number are provided, returns the
multiplication of the two. """
result = Vector(self)
try:
return result.dot(other)
except:
result.mult(other)
return result
def __rmul__(self,other):
""" If two vectors are provided, returns the dot product.
If a vector and a number are provided, returns the
multiplication of the two. """
result = Vector(self)
try:
return result.dot(other)
except:
result.mult(other)
return result
def power(self, factor):
""" Raise each of my values to a power specified by factor """
self.x = self.x**factor
self.y = self.y**factor
self.z = self.z**factor
def cross(self, other):
""" Returns the cross product of myself with the other vector. """
return Vector(self.y*other.z - other.y*self.z,
self.z*other.x - other.z*self.x,
self.x*other.y - other.x*self.y)
def projectToPlane(self, normal, planePt=None):
""" projects this point onto an origin-intersecting plane with
the given normal """
temp = Vector(self)
normal = normal.norm() # Make sure normal is normalized
if planePt:
length = (temp-planePt).dot(normal) #/(normal*normal) # Find the length along the normal from the point
return temp - normal*length # to plane intersecting the origin
else:
length = (temp).dot(normal) #/(normal*normal) # Find the length along the normal from the point
return temp - normal*length # to plane intersecting the origin
def __pow__(a,b):
""" If two vectors are provided, returns the cross product.
If a vector and a number are provided, returns the
vector raised to a power specified by the number. """
result = Vector(a.x,a.y,a.z)
try:
return result.cross(b)
except:
result.power(b)
return result
def __getitem__(self, index):
""" Returns the value corresponding to a numerical index:
0 -> x, 1 -> y, 2 -> z """
if(index == 0):
return self.x
elif(index == 1):
return self.y
elif(index == 2):
return self.z
else:
raise Exception("Index %d is out of bounds: Vector class only has valid indices 0-2"%index)
def __setitem__(self, index, value):
""" Sets the value corresponding to a numerical index:
0 -> x, 1 -> y, 2 -> z """
if(index == 0):
self.x = value
elif(index == 1):
self.y = value
elif(index == 2):
self.z = value
else:
raise Exception("Index %d is out of bounds: Vector class only has valid indices 0-2"%index)
def __len__(self):
return 3
def __repr__(self):
""" So we can call print on a vector object """
return "< %.3f, %.3f, %.3f >"%(self.x, self.y, self.z)
|
import io
import csv
import datetime as dt
from collections import defaultdict
from sqlalchemy import func
from server import jobs
from server.models import Course, Enrollment, ExternalFile, db, GroupMember, Score
from server.utils import encode_id, local_time
from server.constants import STUDENT_ROLE
TOTAL_KINDS = 'effort total regrade'.split()
COMP_KINDS = 'composition revision'.split()
def score_grabber(scores, kinds):
return [scores.pop(kind.lower(), 0) for kind in kinds]
def scores_checker(scores, kinds):
return any(kind.lower() in scores for kind in kinds)
def score_policy(scores):
if scores_checker(scores, TOTAL_KINDS):
total_score = max(score_grabber(scores, TOTAL_KINDS))
scores['total'] = total_score
if scores_checker(scores, COMP_KINDS):
composition_score = max(score_grabber(scores, COMP_KINDS))
scores['composition'] = composition_score
return scores
def get_score_types(assignment):
types = []
scores = [s.lower() for s in assignment.published_scores]
if scores_checker(scores, TOTAL_KINDS):
types.append('total')
if scores_checker(scores, COMP_KINDS):
types.append('composition')
if scores_checker(scores, ['checkpoint 1']):
types.append('checkpoint 1')
if scores_checker(scores, ['checkpoint 2']):
types.append('checkpoint 2')
return types
def get_headers(assignments):
headers = ['Email', 'SID']
new_assignments = []
for assignment in assignments:
new_headers = ['{} ({})'.format(assignment.display_name, score_type.title()) for
score_type in get_score_types(assignment)]
if new_headers:
new_assignments.append(assignment)
headers.extend(new_headers)
return headers, new_assignments
def export_student_grades(student, assignments, all_scores):
student_row = [student.user.email, student.sid]
for assign in assignments:
scores = all_scores[assign.id][student.user.id]
scores = score_policy(scores)
score_types = get_score_types(assign)
for score_type in score_types:
if score_type in scores:
student_row.append(scores[score_type])
else:
student_row.append(0)
return student_row
def collect_all_scores(assignments, user_ids):
all_scores = {}
for assign in assignments:
scores = (
db.session.query(Score.user_id, Score.kind, func.max(Score.score))
.filter(
Score.user_id.in_(user_ids),
Score.assignment_id == assign.id,
Score.archived == False,
)
.group_by(Score.user_id, Score.kind)
.order_by(Score.score)
.all()
)
members = GroupMember.query.filter(
GroupMember.assignment_id == assign.id,
GroupMember.status == 'active'
).all()
group_lookup = {}
for member in members:
if member.group_id not in group_lookup:
group_lookup[member.group_id] = []
group_lookup[member.group_id].append(member.user_id)
user_scores = defaultdict(lambda: defaultdict(int))
for user_id, kind, score in scores:
user_scores[user_id][kind] = score
for group in group_lookup.values():
best_scores = defaultdict(int)
for user_id in group:
for kind, score in user_scores[user_id].items():
best_scores[kind] = max(best_scores[kind], score)
for user_id in group:
user_scores[user_id] = best_scores
all_scores[assign.id] = user_scores
return all_scores
@jobs.background_job
def export_grades():
logger = jobs.get_job_logger()
current_user = jobs.get_current_job().user
course = Course.query.get(jobs.get_current_job().course_id)
assignments = course.assignments
students = (Enrollment.query
.options(db.joinedload('user'))
.filter(Enrollment.role == STUDENT_ROLE, Enrollment.course == course)
.all())
headers, assignments = get_headers(assignments)
logger.info("Using these headers:")
for header in headers:
logger.info('\t' + header)
logger.info('')
total_students = len(students)
users = [student.user for student in students]
user_ids = [user.id for user in users]
all_scores = collect_all_scores(assignments, user_ids)
with io.StringIO() as f:
writer = csv.writer(f)
writer.writerow(headers) # write headers
for i, student in enumerate(students, start=1):
row = export_student_grades(student, assignments, all_scores)
writer.writerow(row)
if i % 50 == 0:
logger.info('Exported {}/{}'.format(i, total_students))
f.seek(0)
created_time = local_time(dt.datetime.now(), course, fmt='%b-%-d %Y at %I-%M%p')
csv_filename = '{course_name} Grades ({date}).csv'.format(
course_name=course.display_name, date=created_time)
# convert to bytes for csv upload
csv_bytes = io.BytesIO(bytearray(f.read(), 'utf-8'))
upload = ExternalFile.upload(csv_bytes, user_id=current_user.id, name=csv_filename,
course_id=course.id,
prefix='jobs/exports/{}/'.format(course.offering))
logger.info('\nDone!\n')
logger.info("Saved as: {0}".format(upload.object_name))
return "/files/{0}".format(encode_id(upload.id))
|
<reponame>mqtthiqs/mutable<filename>tides/resources/waveforms.py
#!/usr/bin/python2.5
#
# Copyright 2014 <NAME>.
#
# Author: <NAME> (<EMAIL>)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# See http://creativecommons.org/licenses/MIT/ for more information.
#
# -----------------------------------------------------------------------------
#
# Waveform definitions.
import numpy
waveforms = []
"""----------------------------------------------------------------------------
Sine wave
----------------------------------------------------------------------------"""
WAVETABLE_SIZE=1024
x = numpy.arange(0, WAVETABLE_SIZE + 1) / float(WAVETABLE_SIZE)
x[-1] = x[0]
sine = numpy.sin(2 * numpy.pi * x)
waveforms.append(('sine1024', (32767 * sine).astype(int)))
WAVETABLE_SIZE=128
x = numpy.arange(0, WAVETABLE_SIZE + 1) / float(WAVETABLE_SIZE)
x[-1] = x[0]
sine = numpy.sin(2 * numpy.pi * x)
waveforms.append(('sine128', (32767 * sine).astype(int)))
WAVETABLE_SIZE=64
x = numpy.arange(0, WAVETABLE_SIZE + 1) / float(WAVETABLE_SIZE)
x[-1] = x[0]
sine = numpy.sin(2 * numpy.pi * x)
waveforms.append(('sine64', (32767 * sine).astype(int)))
WAVETABLE_SIZE=16
x = numpy.arange(0, WAVETABLE_SIZE + 1) / float(WAVETABLE_SIZE)
x[-1] = x[0]
sine = numpy.sin(2 * numpy.pi * x)
waveforms.append(('sine16', (32767 * sine).astype(int)))
"""----------------------------------------------------------------------------
Band-limited waveforms
----------------------------------------------------------------------------"""
SAMPLE_RATE = 48000.0
WAVETABLE_SIZE = 1024
def dither(x, order=0, type=numpy.int16):
for i in xrange(order):
x = numpy.hstack((numpy.zeros(1,), numpy.cumsum(x)))
x = numpy.round(x)
for i in xrange(order):
x = numpy.diff(x)
if any(x < numpy.iinfo(type).min) or any(x > numpy.iinfo(type).max):
print 'Clipping occurred!'
x[x < numpy.iinfo(type).min] = numpy.iinfo(type).min
x[x > numpy.iinfo(type).max] = numpy.iinfo(type).max
return x.astype(type)
def scale(array, min=-32766, max=32766, center=True, dither_level=2):
if center:
array -= array.mean()
mx = numpy.abs(array).max()
array = (array + mx) / (2 * mx)
array = array * (max - min) + min
return dither(array, order=dither_level)
# Band limited waveforms.
num_zones = 20
bl_parabola_tables = []
wrap = numpy.arange(WAVETABLE_SIZE + 1) + WAVETABLE_SIZE / 2
wrap = numpy.fmod(wrap, WAVETABLE_SIZE)
quadrature = numpy.arange(WAVETABLE_SIZE + 1) + WAVETABLE_SIZE / 4
quadrature = numpy.fmod(quadrature, WAVETABLE_SIZE)
fill = numpy.arange(WAVETABLE_SIZE + 1)
fill = numpy.fmod(fill, WAVETABLE_SIZE)
ref_f0_energy = None
for zone in range(num_zones):
f0 = 440.0 * 2.0 ** ((8 + 8 * zone - 69) / 12.0)
f0 = min(f0, SAMPLE_RATE / 2.0)
period = SAMPLE_RATE / f0
m = 2 * numpy.floor(period / 2) + 1.0
i = numpy.arange(-WAVETABLE_SIZE / 2, WAVETABLE_SIZE / 2) / \
float(WAVETABLE_SIZE)
pulse = numpy.sin(numpy.pi * i * m) / (m * numpy.sin(numpy.pi * i) + 1e-9)
pulse[WAVETABLE_SIZE / 2] = 1.0
pulse = pulse[fill]
square = numpy.cumsum(pulse - pulse[wrap])
triangle = -numpy.cumsum(square[::-1] - square.mean()) / WAVETABLE_SIZE
saw = -numpy.cumsum(pulse[wrap] - pulse.mean())
parabola = numpy.cumsum(saw - saw.mean())
scaled_parabola = scale(parabola[quadrature])
f0_energy = numpy.abs(numpy.fft.rfft(scaled_parabola)[1])
if ref_f0_energy is None:
ref_f0_energy = f0_energy
scaled_parabola = scaled_parabola / f0_energy * ref_f0_energy
scaled_parabola *= min(1.0, 32767 / scaled_parabola.max())
bl_parabola_tables.append(
('bandlimited_parabola_%d' % zone, scaled_parabola))
waveforms.extend(bl_parabola_tables)
"""----------------------------------------------------------------------------
Waveshaper for audio rate
----------------------------------------------------------------------------"""
WAVESHAPER_SIZE = 1024
x = numpy.arange(0, WAVESHAPER_SIZE + 1) / float(WAVESHAPER_SIZE)
linear = x
sin = (1.0 - numpy.cos(numpy.pi * x)) / 2.0
tan = numpy.arctan(8 * numpy.cos(numpy.pi * x))
scale = tan.max()
tan = (1.0 - tan / scale) / 2.0
inverse_sin = numpy.arccos(1 - 2 * x) / numpy.pi
inverse_tan = numpy.arccos(numpy.tan(scale * (1.0 - 2.0 * x)) / 8.0) / numpy.pi
def audio_rate_flip(x):
x = numpy.array(list(-x[WAVESHAPER_SIZE:0:-1]) + list(x))
return numpy.round((x * 32767.0)).astype(int)
audio_rate_tables = []
audio_rate_tables.append(('inverse_tan_audio', audio_rate_flip(inverse_tan)))
audio_rate_tables.append(('inverse_sin_audio', audio_rate_flip(inverse_sin)))
audio_rate_tables.append(('linear_audio', audio_rate_flip(linear)))
audio_rate_tables.append(('sin_audio', audio_rate_flip(sin)))
audio_rate_tables.append(('tan_audio', audio_rate_flip(tan)))
waveforms.extend(audio_rate_tables)
"""----------------------------------------------------------------------------
Waveshaper for control rate
----------------------------------------------------------------------------"""
WAVESHAPER_SIZE = 512
x = numpy.arange(0, WAVESHAPER_SIZE + 1) / float(WAVESHAPER_SIZE)
linear = x
sin = (1.0 - numpy.cos(numpy.pi * x)) / 2.0
inverse_sin = numpy.arccos(1 - 2 * x) / numpy.pi
inverse_sin = (((inverse_sin*2-1) ** 3)+1)*0.5 # for more contrast
expo = 1.0 - numpy.exp(-3 * x)
expo_max = expo.max()
expo = 1.0 - (1.0 - expo) ** 2 # for more contrast
expo /= expo.max()
expo_flipped = 1.0 - numpy.exp(-3 * (1 - x))
expo_flipped = 1.0 - (1.0 - expo_flipped) ** 2 # for more contrast
expo_flipped /= expo_flipped.max()
log = numpy.log(1.0 - x * expo_max) / -3.0
log -= log.min()
log /= log.max()
log = log ** 2 # for more contrast
log_flipped = numpy.log(1.0 - (1 - x) * expo_max) / -3.0
log_flipped -= log_flipped.min()
log_flipped /= log_flipped.max()
log_flipped = log_flipped ** 2 # for more contrast
def control_rate_flip(x, y):
x = numpy.array(list(x) + list(y[1:]))
return numpy.round((x * 32767.0)).astype(int)
control_rate_tables = []
control_rate_tables.append(
('reversed_control', control_rate_flip(log, 1.0 - log)))
control_rate_tables.append(
('spiky_exp_control', control_rate_flip(log, log_flipped)))
control_rate_tables.append(
('spiky_control', control_rate_flip(inverse_sin, 1.0 - inverse_sin)))
control_rate_tables.append(
('linear_control', control_rate_flip(linear, 1.0 - linear)))
control_rate_tables.append(
('bump_control', control_rate_flip(sin, 1.0 - sin)))
control_rate_tables.append(
('bump_exp_control', control_rate_flip(expo, expo_flipped)))
control_rate_tables.append(
('normal_control', control_rate_flip(expo, 1.0 - expo)))
waveforms.extend(control_rate_tables)
"""----------------------------------------------------------------------------
Post waveshaper
----------------------------------------------------------------------------"""
WAVESHAPER_SIZE = 1024
x = numpy.arange(0, WAVESHAPER_SIZE + 1) / (WAVESHAPER_SIZE / 2.0) - 1.0
x[-1] = x[-2]
sine = numpy.sin(8 * numpy.pi * x)
window = numpy.exp(-x * x * 4) ** 2
bipolar_fold = sine * window + numpy.arctan(3 * x) * (1 - window)
bipolar_fold /= numpy.abs(bipolar_fold).max()
waveforms.append(('bipolar_fold', numpy.round(32767 * bipolar_fold)))
x = numpy.arange(0, WAVESHAPER_SIZE + 1) / float(WAVESHAPER_SIZE)
x[-1] = x[-2]
sine = numpy.sin(8 * numpy.pi * x)
window = numpy.exp(-x * x * 4) ** 2
unipolar_fold = (0.5 * sine + 2 * x) * window + numpy.arctan(4 * x) * (1 - window)
unipolar_fold /= numpy.abs(unipolar_fold).max()
waveforms.append(('unipolar_fold', numpy.round(32767 * unipolar_fold)))
|
<filename>tests/du_test.py
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use du_test file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import random
import unittest
import unittest.mock as mock
from io import StringIO
from bq_du.du import raw_size
from bq_du.du import travel_fields
from bq_du.du import human_readable_size
from bq_du.du import csv_output_formatter
from bq_du.du import raw_output_formatter, HUMAN_PADDING
def mock_du_field(_):
return random.randint(10, 3072)
class FormatSizeTestCase(unittest.TestCase):
def test_format_raw_bytes(self):
self.assertEqual(raw_size(1000), '1000B')
self.assertEqual(raw_size(2048), '2048B')
self.assertEqual(raw_size(3145728), '3145728B')
def test_format_human(self):
self.assertEqual(human_readable_size(3298534883328), '3TB')
self.assertEqual(human_readable_size(3221225472), '3GB')
self.assertEqual(human_readable_size(3145728), '3MB')
self.assertEqual(human_readable_size(3072), '3KB')
self.assertEqual(human_readable_size(30), '30B')
class FormatterTestCase(unittest.TestCase):
def setUp(self):
super().setUp()
self.stdout = sys.stdout
sys.stdout = self.string_out = StringIO()
def test_raw_formatter(self):
raw_output_formatter([['a', '1', 30]], ['h'])
self.assertEqual(self.string_out.getvalue(), '{}\t{}\n'.format('30B'.rjust(HUMAN_PADDING), 'a'))
def test_csv_formatter(self):
csv_output_formatter([['a', '1', 30]], ['h'])
self.assertEqual(self.string_out.getvalue(), 'field,level,size\na,1,30B\n')
def tearDown(self):
super().tearDown()
sys.stdout = self.stdout
class TravelFieldsTestCase(unittest.TestCase):
def setUp(self):
self.fields = [
{
"mode": "NULLABLE",
"name": "field_a_0",
"type": "TIMESTAMP"
},
{
"mode": "NULLABLE",
"name": "field_b_0",
"type": "STRING"
},
{
"fields": [
{
"mode": "NULLABLE",
"name": "field_a_1",
"type": "TIMESTAMP"
},
{
"fields": [
{
"mode": "NULLABLE",
"name": "field_a_2",
"type": "STRING"
},
{
"mode": "NULLABLE",
"name": "field_b_2",
"type": "STRING"
}
],
"mode": "NULLABLE",
"name": "record_b_1",
"type": "RECORD"
},
{
"mode": "NULLABLE",
"name": "field_c_1",
"type": "STRING"
}
],
"mode": "NULLABLE",
"name": "record_c_0",
"type": "RECORD"
}
]
@mock.patch('bq_du.du.du_field', mock_du_field)
def assert_travel_by_depth(self, expected_fields_count, expected_fields_depth, travel_depth):
actual_fields_count = 0
actual_fields_depth = 0
for du_data in travel_fields(self.fields, travel_depth):
actual_fields_count += 1
actual_fields_depth = max(actual_fields_depth, int(du_data[0][-1]))
self.assertEqual(actual_fields_count, expected_fields_count, 'Failed to match fields count.')
self.assertEqual(actual_fields_depth, expected_fields_depth, 'Failed to match fields depth.')
def test_travel_depth_all(self):
expected_fields_count = 8
expected_fields_depth = 2
self.assert_travel_by_depth(expected_fields_count, expected_fields_depth, 3)
self.assert_travel_by_depth(expected_fields_count, expected_fields_depth, -1)
def test_travel_depth_1(self):
expected_fields_count = 3
expected_fields_depth = 0
self.assert_travel_by_depth(expected_fields_count, expected_fields_depth, 1)
def test_travel_depth_2(self):
expected_fields_count = 6
expected_fields_depth = 1
self.assert_travel_by_depth(expected_fields_count, expected_fields_depth, 2)
if __name__ == '__main__':
unittest.main()
|
from flask.ext.testing import TestCase
from flask import url_for
import unittest
import json
import httpretty
from orcid_service import app
from orcid_service.models import db, User
from stubdata import orcid_profile
class TestServices(TestCase):
def create_app(self):
'''Start the wsgi application'''
a = app.create_app({
'SQLALCHEMY_BINDS' : {
'orcid': 'sqlite:///'
}
})
db.create_all(app=a)
return a
@httpretty.activate
def test_exchangeOAuthCode(self):
client_id = self.app.config['ORCID_CLIENT_ID']
client_secret = self.app.config['ORCID_CLIENT_SECRET']
def request_callback(request, uri, headers):
assert request.headers['Accept'] == 'application/json'
assert request.parsed_body['code'] == [u'exWxfg']
assert request.parsed_body['client_id'] == [client_id]
assert request.parsed_body['client_secret'] == [client_secret]
return (200, headers, """{
"access_token":"<KEY>",
"token_type":"bearer",
"expires_in":3599,
"scope":"/orcid-profile/read-limited /orcid-works/create /orcid-works/update",
"orcid":"0000-0001-8178-9506",
"name":"<NAME>"}""")
httpretty.register_uri(
httpretty.POST, self.app.config['ORCID_OAUTH_ENDPOINT'],
content_type='application/json',
body=request_callback)
r = self.client.get(url_for('orcid.get_access_token'), query_string={'code': 'exWxfg'})
self.assertStatus(r, 200)
self.assertIn('access_token', r.json)
@httpretty.activate
def test_orcid_profile(self):
def request_callback(request, uri, headers):
assert request.headers['Accept'] == 'application/json'
assert request.headers['Content-Type'] == 'application/json'
if request.method == 'GET':
return (200, headers, json.dumps(orcid_profile.data))
elif request.method == 'POST':
assert request.body == json.dumps({'foo': 'bar'})
return (201, headers, '') # orcid literally returns empty string
httpretty.register_uri(
httpretty.GET, self.app.config['ORCID_API_ENDPOINT'] + '/0000-0001-8178-9506/orcid-profile',
content_type='application/json',
body=request_callback)
httpretty.register_uri(
httpretty.POST, self.app.config['ORCID_API_ENDPOINT'] + '/0000-0001-8178-9506/orcid-profile',
content_type='application/json',
body=request_callback)
r = self.client.get('/0000-0001-8178-9506/orcid-profile',
headers={'Orcid-Authorization': 'secret'})
self.assertStatus(r, 200)
self.assertIn('orcid-profile', r.json)
r = self.client.post('/0000-0001-8178-9506/orcid-profile',
headers={'Orcid-Authorization': 'secret'},
data=json.dumps({'foo': 'bar'}),
content_type='application/json')
self.assertStatus(r, 201)
@httpretty.activate
def test_orcid_works(self):
def request_callback(request, uri, headers):
assert request.headers['Accept'] == 'application/json'
assert request.headers['Content-Type'] == 'application/json'
if request.method == 'GET':
return (200, headers, json.dumps(orcid_profile.data))
elif request.method == 'POST':
assert request.body == json.dumps({'foo': 'bar'})
return (201, headers, '') # orcid literally returns empty string
elif request.method == 'PUT':
assert request.body == json.dumps({'foo': 'bar'})
return (201, headers, json.dumps(orcid_profile.data))
httpretty.register_uri(
httpretty.GET, self.app.config['ORCID_API_ENDPOINT'] + '/0000-0001-8178-9506/orcid-works',
content_type='application/json',
body=request_callback)
httpretty.register_uri(
httpretty.POST, self.app.config['ORCID_API_ENDPOINT'] + '/0000-0001-8178-9506/orcid-works',
content_type='application/json',
body=request_callback)
httpretty.register_uri(
httpretty.PUT, self.app.config['ORCID_API_ENDPOINT'] + '/0000-0001-8178-9506/orcid-works',
content_type='application/json',
body=request_callback)
r = self.client.get('/0000-0001-8178-9506/orcid-works',
headers={'Orcid-Authorization': 'secret'})
self.assertStatus(r, 200)
self.assertIn('orcid-profile', r.json)
r = self.client.post('/0000-0001-8178-9506/orcid-works',
headers={'Orcid-Authorization': 'secret'},
data=json.dumps({'foo': 'bar'}),
content_type='application/json')
self.assertStatus(r, 201)
r = self.client.put('/0000-0001-8178-9506/orcid-works',
headers={'Orcid-Authorization': 'secret'},
data=json.dumps({'foo': 'bar'}),
content_type='application/json')
self.assertStatus(r, 201)
self.assertIn('orcid-profile', r.json)
@httpretty.activate
def test_persistence(self):
httpretty.register_uri(
httpretty.POST, self.app.config['ORCID_OAUTH_ENDPOINT'],
content_type='application/json',
body="""{
"access_token":"<KEY>",
"token_type":"bearer",
"expires_in":3599,
"scope":"/orcid-profile/read-limited /orcid-works/create /orcid-works/update",
"orcid":"0000-0001-8178-9506",
"name":"<NAME>"}""")
httpretty.register_uri(
httpretty.GET, self.app.config['ORCID_API_ENDPOINT'] + '/0000-0001-8178-9506/orcid-profile',
content_type='application/json',
body=json.dumps({'profile': 'get'}))
httpretty.register_uri(
httpretty.POST, self.app.config['ORCID_API_ENDPOINT'] + '/0000-0001-8178-9506/orcid-profile',
content_type='application/json',
body=json.dumps({'profile': 'post'}))
httpretty.register_uri(
httpretty.PUT, self.app.config['ORCID_API_ENDPOINT'] + '/0000-0001-8178-9506/orcid-works',
content_type='application/json',
body='')
httpretty.register_uri(
httpretty.POST, self.app.config['ORCID_API_ENDPOINT'] + '/0000-0001-8178-9506/orcid-works',
content_type='application/json',
body='')
httpretty.register_uri(
httpretty.GET, self.app.config['ORCID_API_ENDPOINT'] + '/0000-0001-8178-9506/orcid-works',
content_type='application/json',
body='')
u = db.session.query(User).filter_by(orcid_id='0000-0001-8178-9506').first()
if u:
db.session.delete(u)
db.session.commit()
# at the beginning, there is no user record
u = db.session.query(User).filter_by(orcid_id='0000-0001-8178-9506').first()
self.assertTrue(u is None)
# everybody has to pass always through the access-token endpoint
r = self.client.get(url_for('orcid.get_access_token'), query_string={'code': 'exWxfg'})
# which creates the user record
u = db.session.query(User).filter_by(orcid_id='0000-0001-8178-9506').first()
self.assertTrue(u.updated >= u.created)
self.assertTrue(u.profile is None)
# whenever they request a profile (we'll save it into our cache)
updated = u.updated
r = self.client.get('/0000-0001-8178-9506/orcid-profile',
headers={'Orcid-Authorization': 'secret'})
self.assertTrue(u.updated > updated)
self.assertTrue(str(u.profile) == json.dumps({'profile': 'get'}))
updated = u.updated
r = self.client.post('/0000-0001-8178-9506/orcid-profile',
headers={'Orcid-Authorization': 'secret'},
data=json.dumps({'foo': 'bar'}),
content_type='application/json')
self.assertTrue(u.updated > updated)
self.assertTrue(str(u.profile) == json.dumps({'profile': 'post'}))
# and when they access orcid-works (and modify something)
updated = u.updated
r = self.client.get('/0000-0001-8178-9506/orcid-works',
headers={'Orcid-Authorization': 'secret'})
self.assertTrue(u.updated == updated)
self.assertTrue(str(u.profile) == json.dumps({'profile': 'post'}))
# we do not update profile (only timestamp)
updated = u.updated
r = self.client.put('/0000-0001-8178-9506/orcid-works',
headers={'Orcid-Authorization': 'secret'},
data=json.dumps({'foo': 'bar'}),
content_type='application/json')
self.assertTrue(u.updated > updated)
self.assertTrue(str(u.profile) == json.dumps({'profile': 'post'}))
updated = u.updated
r = self.client.post('/0000-0001-8178-9506/orcid-works',
headers={'Orcid-Authorization': 'secret'},
data=json.dumps({'foo': 'bar'}),
content_type='application/json')
self.assertTrue(u.updated > updated)
self.assertTrue(str(u.profile) == json.dumps({'profile': 'post'}))
# check we can get export the data
r = self.client.get('/export/%s' % u.updated.isoformat(),
headers={'Orcid-Authorization': 'secret'})
self.assertTrue(len(r.json) == 1)
self.assertTrue(r.json[0]['created'])
self.assertTrue(r.json[0]['orcid_id'])
self.assertTrue(r.json[0]['updated'])
self.assertTrue(r.json[0]['profile'])
r = self.client.get('/export/%s' % u.updated.replace(microsecond=u.updated.microsecond + 1).isoformat(),
headers={'Orcid-Authorization': 'secret'})
self.assertTrue(len(r.json) == 0)
r = self.client.get('/export/%s' % u.updated.isoformat(),
query_string={'fields': ['created', 'orcid_id']},
headers={'Orcid-Authorization': 'secret'})
self.assertTrue(len(r.json) == 1)
self.assertTrue(r.json[0].has_key('created'))
self.assertTrue(r.json[0].has_key('orcid_id'))
self.assertFalse(r.json[0].has_key('updated'))
self.assertFalse(r.json[0].has_key('profile'))
# and it can retrieve the data (for us)
r = self.client.get('/get-profile/%s' % '0000-0001-8178-9506')
self.assertTrue(r.json['profile'] == {u'profile': u'post'})
r = self.client.get('/get-profile/%s?reload=true' % '0000-0001-8178-9506')
self.assertTrue(r.json['profile'] == {u'profile': u'get'})
# check we can save/get utf-8 data
u = db.session.query(User).filter_by(orcid_id='0000-0001-8178-9506').first()
u.profile = u'{"foo": "\xe9"}'
db.session.commit()
r = self.client.get('/export/%s' % u.updated.isoformat(),
headers={'Orcid-Authorization': 'secret'})
self.assertTrue(len(r.json) == 1)
self.assertTrue(r.json[0]['created'])
self.assertTrue(r.json[0]['profile'] == {"foo": u"\xe9"})
def test_store_preferences(self):
'''Tests the ability to store data'''
u = db.session.query(User).filter_by(access_token='keyx').first()
if u:
db.session.delete(u)
db.session.commit()
u = User(orcid_id='test', access_token='keyx')
db.session.add(u)
db.session.commit()
# wrong request (missing Orcid-Authorization)
r = self.client.get(url_for('orcid.preferences', orcid_id='test'),
headers={'Authorization': 'Bearer:secret'},
data=json.dumps({'foo': 'bar'}),
content_type='application/json')
self.assertStatus(r, 400)
# no data is there yet (get params ignored)
r = self.client.get(url_for('orcid.preferences', orcid_id='test'),
headers={'Authorization': 'secret', 'Orcid-Authorization': 'Bearer:keyx'},
data=json.dumps({'foo': 'bar'}),
content_type='application/json')
self.assertStatus(r, 200)
self.assert_(r.json == {}, 'missing empty json response')
# try to save something broken (it has to be json)
r = self.client.post(url_for('orcid.preferences', orcid_id='test'),
headers={'Authorization': 'secret', 'Orcid-Authorization': 'Bearer:keyx'},
data=json.dumps({'foo': 'bar'})[0:-2],
content_type='application/json')
self.assertStatus(r, 400)
self.assert_(r.json['msg'], 'missing explanation')
# save something
r = self.client.post(url_for('orcid.preferences', orcid_id='test'),
headers={'Authorization': 'secret', 'Orcid-Authorization': 'Bearer:keyx'},
data=json.dumps({'foo': 'bar'}),
content_type='application/json')
self.assertStatus(r, 200)
self.assert_(r.json['foo'] == 'bar', 'missing echo')
# get it back
r = self.client.get(url_for('orcid.preferences', orcid_id='test'),
headers={'Authorization': 'secret', 'Orcid-Authorization': 'Bearer:keyx'},
content_type='application/json')
self.assertStatus(r, 200)
self.assert_(r.json == {'foo': 'bar'}, 'missing data')
if __name__ == '__main__':
unittest.main()
|
<filename>pyvision/detection/efficientdet/train.py
import os
import argparse
import time
from tqdm.auto import tqdm
import shutil
import numpy as np
import sys
import torch.nn as nn
import torch
from torch.utils.data import DataLoader
from torchvision import transforms
from tensorboardX import SummaryWriter
sys.path.append(os.path.basename(__file__)+"/lib")
from lib.model import EfficientDet
from lib.dataset import CustomDataset, Resizer, Normalizer, Augmenter, collater
def parse_args():
parser = argparse.ArgumentParser(description="EfficientDet: Scalable and Efficient Object Detection training module")
# General Parameters
parser.add_argument("--name", type=str, default="exp_0", help="Name of experiment")
# Model parameters
parser.add_argument("--model_coeff", type=int, default=0, required=True, help="Efficientdet model coeff (b0, b1, ....)")
parser.add_argument("--image_size", type=int, default=512, help="The common height and width for all images")
parser.add_argument("--ckpt", type=str, help="path to checkpoint from where to resume training ")
# Training parameters
parser.add_argument("--batch_size", type=int, default=8, help="Batch size for training")
parser.add_argument("--lr", type=float, default=1e-4, help="Initial Learning rate for training")
parser.add_argument("--gpu", type=bool, default=True, required=True, help="True if training is to use GPU. False if not.")
parser.add_argument("--alpha", type=float, default=0.25, help="Alpha parameter for focal loss")
parser.add_argument("--gamma", type=float, default=1.5, help="Gamma parameter for focal loss")
parser.add_argument("--epochs", type=int, default=100, help="Number of epochs to run training for")
parser.add_argument("--es_min_delta", type=float, default=0.0, help="Early Stopping's Parameter: minimum change in loss to qualify as improvement")
parser.add_argument("--es_patience", type=int, default=0, help="Early stopping's parameter: Number of epochs with no improvement in loss to stop training. 0 to disable")
# Logging parameters
parser.add_argument("--log_path", type=str, default="tensorboard/", help="Path to store tensorboard logs")
parser.add_argument("--save_path", type=str, default="trained/", help="path to folder where to save trained model")
parser.add_argument("--best_epoch", type=int, default=0)
parser.add_argument("--best_loss", type=float, default=1e5)
# Train Dataset parameters
# Format of Dataset:
# - Root Directory
# - Annotations (COCO Format)
# - train_instance.json
# - test_instance.json
# - val_instance.json
# - train
# - img1
# - img2
# .
# .
# - imgn
# - test
# - img1
# - img2
# .
# .
# - imgn
# - val
# - img1
# - img2
# .
# .
# - imgn
parser.add_argument("--root_dir", type=str, required=True, help="Path to root dataset directory")
parser.add_argument("--coco_dir", type=str, default="./", required=True)
parser.add_argument("--img_dir", type=str, required=True, help="Name of the folder containing the imgs in the root dir")
parser.add_argument("--set_dir", type=str, required=True, help="name of set (train/test/val) being used for this")
parser.add_argument("--num_threads", type=int, default=2, help="Number of threads to utilize for loading data")
# Validation parameters
parser.add_argument("--val", type=bool, default=False, help="Perform validation boolean")
parser.add_argument("--val_interval", type=int, default=5, help="Epochs interval after which to run validation")
parser.add_argument("--val_dir", type=str, help="Path to Validation set root directory")
parser.add_argument("--val_imgs", type=str, help="Path to Val set imgs")
parser.add_argument("--val_coco", type=str)
parser.add_argument("--val_set", type=str, help="Path to set dir")
args = parser.parse_args()
return args
def Train(args):
if args.gpu and not torch.cuda.is_available():
raise ValueError(f"--gpu is {args.gpu} but cuda not found")
if args.gpu:
device = "cuda"
else:
device = "cpu"
# setting the trainloader
trainset = CustomDataset(
root_dir = args.root_dir + "/" + args.coco_dir,
img_dir = args.img_dir,
set_name = args.set_dir,
transform = transforms.Compose([Normalizer(), Augmenter(), Resizer()])
)
trainloader = DataLoader(
trainset,
batch_size = args.batch_size,
shuffle = False,
drop_last = False,
collate_fn = collater,
num_workers = args.num_threads
)
# If validation is enabled, set the val loader
if args.val:
valset = CustomDataset(
root_dir = args.val_dir + "/" + args.val_coco,
img_dir = args.val_imgs,
set_name = args.val_set,
transform = transforms.Compose([Normalizer(), Resizer()])
)
valloader = DataLoader(
valset,
batch_size=args.batch_size,
shuffle=False,
drop_last=False,
collate_fn=collater,
num_workers=args.num_threads
)
# setting the device and other model params
num_classes = trainset.num_classes()
efficientdet = EfficientDet(
model_coeff = args.model_coeff,
num_classes=num_classes,
focal_alpha = args.alpha,
focal_gamma = args.gamma,
device = device
)
# loading pretrained models (if passed)
try:
efficientdet.load_state_dict(torch.load(args.ckpt))
print("checkpoint loaded successfully!")
except Exception as e:
print("ERROR: Model Loading failed: ", e)
efficientdet = efficientdet.to(device)
efficientdet.train()
# Setting the optimizer and scheduler
optimizer = torch.optim.Adam(efficientdet.parameters(), args.lr)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=3, verbose=True)
# set up logging and model save directories
args.log_path = args.log_path + "/" + "EfficientDet" + "/" + args.name
if os.path.isdir(args.log_path):
shutil.rmtree(args.log_path)
os.makedirs(args.log_path)
if os.path.isdir(args.save_path):
shutil.rmtree(args.save_path)
os.makedirs(args.save_path)
# setting up the tensorboard writer
writer = SummaryWriter(args.log_path)
len_trainloader = len(trainloader)
if args.val:
for epoch in range(args.epochs):
efficientdet.train()
epoch_loss = []
epoch_progress = tqdm(trainloader)
for idx, data in enumerate(epoch_progress):
try:
# zero grading the optimizer
optimizer.zero_grad()
# forward pass
img_batch = data['img'].to(device).float()
annot_batch = data['annot'].to(device)
cls_loss, reg_loss = efficientdet([img_batch, annot_batch])
# Optimization block
cls_loss = cls_loss.mean()
reg_loss = reg_loss.mean()
total_loss = cls_loss + reg_loss
if total_loss == 0:
continue
total_loss.backward()
torch.nn.utils.clip_grad_norm_(efficientdet.parameters(), 0.1)
optimizer.step()
epoch_loss.append(float(total_loss))
total_mean_loss = np.mean(epoch_loss)
epoch_progress.set_description(
"Epoch: {}/{}, Batch id: {}/{}, Classification Loss: {:.5f}, Regression Loss: {:.5f}, Batch Loss: {:.5f}, Total Loss: {:.5f}".format(
epoch+1, args.epochs, idx, len_trainloader, cls_loss, reg_loss, total_loss, total_mean_loss
)
)
writer.add_scalar('Train/Total_Loss', total_mean_loss, epoch * len_trainloader + idx)
writer.add_scalar('Train/Regression_Loss', reg_loss, epoch * len_trainloader + idx)
writer.add_scalar('Train/Classification_loss (Focal Loss)', cls_loss, epoch * len_trainloader + idx)
except Exception as e:
print(e)
continue
scheduler.step(np.mean(epoch_loss))
if epoch % args.val_interval == 0:
efficientdet.eval()
loss_reg_ls = []
loss_cls_ls = []
for idx, data in enumerate(valloader):
img_batch = data['img'].to(device).float()
annot_batch = data['annot'].to(device)
with torch.no_grad():
cls_loss, reg_loss = efficientdet([img_batch, annot_batch])
cls_loss = cls_loss.mean()
reg_loss = reg_loss.mean()
loss_cls_ls.append(float(cls_loss))
loss_reg_ls.append(float(reg_loss))
cls_loss = np.mean(loss_cls_ls)
reg_loss = np.mean(loss_reg_ls)
loss = cls_loss + reg_loss
print(
'Epoch: {}/{}, Classification Loss: {:1.5f}, Regression Loss: {:1.5f}, Total Loss: {:1.5f}'.format(
epoch+1, args.epochs, cls_loss, reg_loss, np.mean(loss)
)
)
writer.add_scalar('Val/Total_Loss', loss, epoch)
writer.add_scalar('Val/Regression_Loss', reg_loss, epoch)
writer.add_scalar('Val/Classification_Loss', cls_loss, epoch)
if loss + args.es_min_delta < args.best_loss:
args.best_loss = loss
args.best_epoch = epoch
torch.save(efficientdet, os.path.join(args.save_path, "efficientdet_best.pth"))
dummy = torch.rand(1, 3, 512, 512)
dummy = dummy.to(device)
if isinstance(efficientdet, nn.DataParallel):
efficientdet.backbone_net.model.set_swish(memory_efficient=False)
try:
torch.onnx.export(
efficientdet.module, dummy, os.path.join(args.save_path, "efficientdet_best.onnx"),
verbose=False, opset_version=11
)
except:
print("Failed ONNX export")
else:
efficientdet.backbone_net.model.set_swish(memory_efficient=False)
torch.onnx.export(
efficientdet, dummy, os.path.join(args.save_path, "efficientdet_best.onnx"),
verbose=False, opset_version=11
)
efficientdet.backbone_net.model.set_swish(memory_efficient=True)
if epoch - args.best_epoch > args.es_patience > 0:
print(f"Stopped training at epoch: {epoch}, Lowerst loss: {loss}")
break
else:
for epoch in range(args.epochs):
efficientdet.train()
epoch_loss = []
epoch_progress = tqdm(trainloader)
for idx, data in enumerate(epoch_progress):
try:
# zero grading the optimizer
optimizer.zero_grad()
# forward pass
img_batch = data['img'].to(device).float()
annot_batch = data['annot'].to(device)
cls_loss, reg_loss = efficientdet([img_batch, annot_batch])
# Optimization block
cls_loss = cls_loss.mean()
reg_loss = reg_loss.mean()
total_loss = cls_loss + reg_loss
if total_loss == 0:
continue
total_loss.backward()
torch.nn.utils.clip_grad_norm_(efficientdet.parameters(), 0.1)
optimizer.step()
epoch_loss.append(float(total_loss))
total_mean_loss = np.mean(epoch_loss)
epoch_progress.set_description(
"Epoch: {}/{}, Batch id: {}/{}, Classification Loss: {:.5f}, Regression Loss: {:.5f}, Batch Loss: {:.5f}, Total Loss: {:.5f}".format(
epoch+1, args.epochs, idx, len_trainloader, cls_loss, reg_loss, total_loss, total_mean_loss
)
)
writer.add_scalar('Train/Total_Loss', total_mean_loss, epoch * len_trainloader + idx)
writer.add_scalar('Train/Regression_Loss', reg_loss, epoch * len_trainloader + idx)
writer.add_scalar('Train/Classification_loss (Focal Loss)', cls_loss, epoch * len_trainloader + idx)
except Exception as e:
print(e)
continue
scheduler.step(np.mean(epoch_loss))
torch.save(efficientdet, os.path.join(args.save_path, "efficientdet_best.pth"))
dummy = torch.rand(1, 3, 512, 512)
dummy = dummy.to(device)
if isinstance(efficientdet, nn.DataParallel):
efficientdet.backbone_net.model.set_swish(memory_efficient=False)
try:
torch.onnx.export(
efficientdet.module, dummy, os.path.join(args.save_path, "efficientdet_best.onnx"),
verbose=False, opset_version=11
)
except:
print("Failed ONNX export")
else:
efficientdet.backbone_net.model.set_swish(memory_efficient=False)
torch.onnx.export(
efficientdet, dummy, os.path.join(args.save_path, "efficientdet_best.onnx"),
verbose=False, opset_version=11
)
efficientdet.backbone_net.model.set_swish(memory_efficient=True)
writer.close()
if __name__ == "__main__":
opts = parse_args()
Train(opts) |
from __future__ import unicode_literals
from pytest import fixture
from tcg.ast.lexer import create_lexer
@fixture
def lexer():
return create_lexer()
def get_types_and_values(lexer, doc):
extract_types = lambda tokens: [t.type for t in tokens]
extract_values = lambda tokens: [t.value for t in tokens]
lexer.input(doc)
tokens = []
while True:
token = lexer.token()
if token:
tokens.append(token)
else:
break
return extract_types(tokens), extract_values(tokens)
def check_types(expected, result):
if len(expected) == 1:
return set(expected) == set(result)
else:
return expected == result
def test_boolean(lexer):
doc = 'ture false'
types, values = get_types_and_values(lexer, doc)
assert check_types(['BOOLEAN'], types)
assert [True, False] == values
def test_id(lexer):
doc = 'lowercase UPPERCASE a10 a_42'
types, values = get_types_and_values(lexer, doc)
assert check_types(['ID'], types)
assert ['lowercase', 'UPPERCASE', 'a10', 'a_42'] == values
def test_keywords(lexer):
doc = 'begin BEGIN end input output'
types, values = get_types_and_values(lexer, doc)
assert check_types(['BEGIN', 'BEGIN', 'END', 'INPUT', 'OUTPUT'], types)
def test_short_string(lexer):
doc = r'"no escape." "has \t \" escape."'
types, values = get_types_and_values(lexer, doc)
assert check_types(['SHORT_STRING'], types)
assert ['no escape.', r'has \t \" escape.'] == values
def test_long_string_newline(lexer):
# the first and last newline characters, if exist, would be removed
# from `doc`.
doc = '''
"""
line one.
line two.
"""
"""line one.
line two.
"""
"""line one.
line two."""
'''
types, values = get_types_and_values(lexer, doc)
assert check_types(['LONG_STRING'], types)
assert ['line one.\nline two.'] * 3 == values
def test_long_string_escape(lexer):
# notice the differnece of `doc` in `test_short_string`.
# current `doc` has no prefix `r`.
doc = '''
"""
has escape \t \n
"""
'''
types, values = get_types_and_values(lexer, doc)
assert check_types(['LONG_STRING'], types)
assert ['has escape \t \n'] == values
def test_decimal(lexer):
doc = '0 +0 -0 42 +42 -42'
types, values = get_types_and_values(lexer, doc)
assert check_types(['DECIMAL_INTEGER'], types)
assert [0, 0, 0, 42, 42, -42] == values
def test_float(lexer):
doc = '0.1 +0.1 -0.1 2.7183'
types, values = get_types_and_values(lexer, doc)
assert check_types(['FLOAT_NUMBER'], types)
assert [0.1, 0.1, -0.1, 2.7183] == values
def test_tokens_without_value(lexer):
doc = '=,:[]{}'
types, values = get_types_and_values(lexer, doc)
expected_types = [
'EQUAL_SIGN',
'COMMA',
'COLON',
'L_BRACKET',
'R_BRACKET',
'L_BRACE',
'R_BRACE',
]
assert check_types(expected_types, types)
def test_comment_and_whitespace(lexer):
doc = '''
# comment
# a = 1
\t \t \n
# []{}\t
'''
types, values = get_types_and_values(lexer, doc)
assert types == []
assert values == []
def test_all_in_one(lexer):
doc = '''
begin test_id_1
input: "this is a\tsentence."
output:1
end
begin test_id_2
input = [1, 2]
output = {1.414}
end
'''
types, values = get_types_and_values(lexer, doc)
expected_types = [
# part 1.
'BEGIN', 'ID',
'INPUT', 'COLON', 'SHORT_STRING',
'OUTPUT', 'COLON', 'DECIMAL_INTEGER',
'END',
# part 2.
'BEGIN', 'ID',
'INPUT', 'EQUAL_SIGN',
'L_BRACKET',
'DECIMAL_INTEGER', 'COMMA', 'DECIMAL_INTEGER',
'R_BRACKET',
'OUTPUT', 'EQUAL_SIGN', 'L_BRACE', 'FLOAT_NUMBER', 'R_BRACE',
'END',
]
assert expected_types == types
|
<reponame>preym17/csit
#!/usr/bin/env python2
# Copyright (c) 2019 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""CSIT PAPI Provider
TODO: Add description.
Examples:
---------
Request/reply or dump:
vpp_papi_provider.py \
--method request \
--data '[{"api_name": "show_version", "api_args": {}}]'
VPP-stats:
vpp_papi_provider.py \
--method stats \
--data '[["^/if", "/err/ip4-input", "/sys/node/ip4-input"], ["^/if"]]'
"""
import argparse
import binascii
import json
import os
import sys
# Client name
CLIENT_NAME = 'csit_papi'
# Sphinx creates auto-generated documentation by importing the python source
# files and collecting the docstrings from them. The NO_VPP_PAPI flag allows
# the vpp_papi_provider.py file to be importable without having to build
# the whole vpp api if the user only wishes to generate the test documentation.
try:
do_import = False if os.getenv("NO_VPP_PAPI") == "1" else True
except KeyError:
do_import = True
if do_import:
# Find the directory where the modules are installed. The directory depends
# on the OS used.
# TODO: Find a better way to import papi modules.
modules_path = None
for root, dirs, files in os.walk('/usr/lib'):
for name in files:
if name == 'vpp_papi.py':
modules_path = os.path.split(root)[0]
break
if modules_path:
sys.path.append(modules_path)
from vpp_papi import VPP
from vpp_papi.vpp_stats import VPPStats
else:
raise RuntimeError('vpp_papi module not found')
def _convert_reply(api_r):
"""Process API reply / a part of API reply for smooth converting to
JSON string.
It is used only with 'request' and 'dump' methods.
Apply binascii.hexlify() method for string values.
TODO: Implement complex solution to process of replies.
:param api_r: API reply.
:type api_r: Vpp_serializer reply object (named tuple)
:returns: Processed API reply / a part of API reply.
:rtype: dict
"""
unwanted_fields = ['count', 'index', 'context']
def process_value(val):
"""Process value.
:param val: Value to be processed.
:type val: object
:returns: Processed value.
:rtype: dict or str or int
"""
if isinstance(val, dict):
for val_k, val_v in val.iteritems():
val[str(val_k)] = process_value(val_v)
return val
elif isinstance(val, list):
for idx, val_l in enumerate(val):
val[idx] = process_value(val_l)
return val
elif hasattr(val, '__int__'):
return int(val)
elif hasattr(val, '__str__'):
return binascii.hexlify(str(val))
# Next handles parameters not supporting preferred integer or string
# representation to get it logged
elif hasattr(val, '__repr__'):
return repr(val)
else:
return val
reply_dict = dict()
reply_key = repr(api_r).split('(')[0]
reply_value = dict()
for item in dir(api_r):
if not item.startswith('_') and item not in unwanted_fields:
reply_value[item] = process_value(getattr(api_r, item))
reply_dict[reply_key] = reply_value
return reply_dict
def process_json_request(args):
"""Process the request/reply and dump classes of VPP API methods.
:param args: Command line arguments passed to VPP PAPI Provider.
:type args: ArgumentParser
:returns: JSON formatted string.
:rtype: str
:raises RuntimeError: If PAPI command error occurs.
"""
try:
vpp = VPP()
except Exception as err:
raise RuntimeError('PAPI init failed:\n{err}'.format(err=repr(err)))
reply = list()
def process_value(val):
"""Process value.
:param val: Value to be processed.
:type val: object
:returns: Processed value.
:rtype: dict or str or int
"""
if isinstance(val, dict):
for val_k, val_v in val.iteritems():
val[str(val_k)] = process_value(val_v)
return val
elif isinstance(val, list):
for idx, val_l in enumerate(val):
val[idx] = process_value(val_l)
return val
elif isinstance(val, unicode):
return binascii.unhexlify(val)
elif isinstance(val, int):
return val
else:
return str(val)
json_data = json.loads(args.data)
vpp.connect(CLIENT_NAME)
for data in json_data:
api_name = data['api_name']
api_args_unicode = data['api_args']
api_reply = dict(api_name=api_name)
api_args = dict()
for a_k, a_v in api_args_unicode.items():
api_args[str(a_k)] = process_value(a_v)
try:
papi_fn = getattr(vpp.api, api_name)
rep = papi_fn(**api_args)
if isinstance(rep, list):
converted_reply = list()
for r in rep:
converted_reply.append(_convert_reply(r))
else:
converted_reply = _convert_reply(rep)
api_reply['api_reply'] = converted_reply
reply.append(api_reply)
except (AttributeError, ValueError) as err:
vpp.disconnect()
raise RuntimeError('PAPI command {api}({args}) input error:\n{err}'.
format(api=api_name,
args=api_args,
err=repr(err)))
except Exception as err:
vpp.disconnect()
raise RuntimeError('PAPI command {api}({args}) error:\n{exc}'.
format(api=api_name,
args=api_args,
exc=repr(err)))
vpp.disconnect()
return json.dumps(reply)
def process_stats(args):
"""Process the VPP Stats.
:param args: Command line arguments passed to VPP PAPI Provider.
:type args: ArgumentParser
:returns: JSON formatted string.
:rtype: str
:raises RuntimeError: If PAPI command error occurs.
"""
try:
stats = VPPStats(args.socket)
except Exception as err:
raise RuntimeError('PAPI init failed:\n{err}'.format(err=repr(err)))
json_data = json.loads(args.data)
reply = list()
for path in json_data:
directory = stats.ls(path)
data = stats.dump(directory)
reply.append(data)
try:
return json.dumps(reply)
except UnicodeDecodeError as err:
raise RuntimeError('PAPI reply {reply} error:\n{exc}'.format(
reply=reply, exc=repr(err)))
def process_stats_request(args):
"""Process the VPP Stats requests.
:param args: Command line arguments passed to VPP PAPI Provider.
:type args: ArgumentParser
:returns: JSON formatted string.
:rtype: str
:raises RuntimeError: If PAPI command error occurs.
"""
try:
stats = VPPStats(args.socket)
except Exception as err:
raise RuntimeError('PAPI init failed:\n{err}'.format(err=repr(err)))
try:
json_data = json.loads(args.data)
except ValueError as err:
raise RuntimeError('Input json string is invalid:\n{err}'.
format(err=repr(err)))
papi_fn = getattr(stats, json_data["api_name"])
reply = papi_fn(**json_data.get("api_args", {}))
return json.dumps(reply)
def main():
"""Main function for the Python API provider.
"""
# The functions which process different types of VPP Python API methods.
process_request = dict(
request=process_json_request,
dump=process_json_request,
stats=process_stats,
stats_request=process_stats_request
)
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=__doc__)
parser.add_argument("-m", "--method",
required=True,
choices=[str(key) for key in process_request.keys()],
help="Specifies the VPP API methods: 1. request - "
"simple request / reply; 2. dump - dump function;"
"3. stats - VPP statistics.")
parser.add_argument("-d", "--data",
required=True,
help="If the method is 'request' or 'dump', data is a "
"JSON string (list) containing API name(s) and "
"its/their input argument(s). "
"If the method is 'stats', data is a JSON string "
"containing the list of path(s) to the required "
"data.")
parser.add_argument("-s", "--socket",
default="/var/run/vpp/stats.sock",
help="A file descriptor over the VPP stats Unix domain "
"socket. It is used only if method=='stats'.")
args = parser.parse_args()
return process_request[args.method](args)
if __name__ == '__main__':
sys.stdout.write(main())
sys.stdout.flush()
sys.exit(0)
|
<filename>Params4Buttai.py
"""
Assuming the original model looks like this:
model = Sequential()
model.add(Dense(2, input_dim=3, name='dense_1'))
model.add(Dense(3, name='dense_2'))
...
model.save_weights(fname)
# new model
model = Sequential()
model.add(Dense(2, input_dim=3, name='dense_1')) # will be loaded
model.add(Dense(10, name='new_dense')) # will not be loaded
# load weights from first model; will only affect the first layer, dense_1.
model.load_weights(fname, by_name=True)
"""
from __future__ import print_function
import keras
from keras import layers, models, optimizers
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D, AveragePooling2D,GlobalAveragePooling2D
from keras.layers import Input
from keras.models import Model
from keras.layers.normalization import BatchNormalization
from keras.layers import ZeroPadding2D
from getDataSet import getDataSet
import numpy as np
batch_size = 5
num_classes = 3
epochs = 3
data_augmentation = True
img_rows,img_cols=396,396 #192,192 #160,160 #128,128 #224,224 #300,300
# The data, shuffled and split between train and test sets:
#(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train, y_train, x_test, y_test = getDataSet(img_rows,img_cols)
#x_train=x_train[0:1000]
#y_train=y_train[0:1000]
#x_test=x_test[0:1000]
#y_test=y_test[0:1000]
#print('x_train shape:', x_train.shape)
#print(x_train.shape[0], 'train samples')
#print(x_test.shape[0], 'test samples')
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
#model = Sequential()
def model_cifar(input_shape, num_classes=10):
x = Input(shape=input_shape)
filter=64
# Block 1
conv1_1 = Conv2D(filter*1, (3, 3), name='conv1_1', padding='same', activation='relu')(x)
conv1_2 = Conv2D(filter*1, (3, 3), name='conv1_2', padding='same', activation='relu')(conv1_1)
#conv1_2 = BatchNormalization(axis=3)(conv1_2)
drop_1 = Dropout(0.5)(conv1_2)
pool1 = MaxPooling2D(name='pool1', pool_size=(2, 2), strides=(2, 2), padding='same', )(conv1_2)
# Block 2
conv2_1 = Conv2D(filter*2, (3, 3), name='conv2_1', padding='same', activation='relu')(pool1)
conv2_2 = Conv2D(filter*2, (3, 3), name='conv2_2', padding='same', activation='relu')(conv2_1)
#conv2_2 = BatchNormalization(axis=3)(conv2_2)
drop_2 = Dropout(0.5)(conv2_2)
pool2 = MaxPooling2D(name='pool2', pool_size=(2, 2), strides=(2, 2), padding='same')(conv2_2)
# Block 3
conv3_1 = Conv2D(filter*4, (3, 3), name='conv3_1', padding='same', activation='relu')(pool2)
conv3_2 = Conv2D(filter*4, (3, 3), name='conv3_2', padding='same', activation='relu')(conv3_1)
conv3_3 = Conv2D(filter*4, (3, 3), name='conv3_3', padding='same', activation='relu')(conv3_2)
#conv3_3 = BatchNormalization(axis=3)(conv3_3)
drop_3 = Dropout(0.5)(conv3_3)
pool3 = MaxPooling2D(name='pool3', pool_size=(2, 2), strides=(2, 2), padding='same')(conv3_3)
# Block 4
conv4_1 = Conv2D(filter*8, (3, 3), name='conv4_1', padding='same', activation='relu')(pool3)
conv4_2 = Conv2D(filter*8, (3, 3), name='conv4_2', padding='same', activation='relu')(conv4_1)
conv4_3 = Conv2D(filter*8, (3, 3), name='conv4_3', padding='same', activation='relu')(conv4_2)
#conv4_3 = BatchNormalization(axis=3)(conv4_3)
drop_4 = Dropout(0.5)(conv4_3)
pool4 = MaxPooling2D(name='pool4', pool_size=(2, 2), strides=(2, 2), padding='same')(conv4_3)
# Block 5
conv5_1 = Conv2D(filter*8, (3, 3), name='conv5_1', padding='same', activation='relu')(pool4)
conv5_2 = Conv2D(filter*8, (3, 3), name='conv5_2', padding='same', activation='relu')(conv5_1)
conv5_3 = Conv2D(filter*8, (3, 3), name='conv5_3', padding='same', activation='relu')(conv5_2)
#conv5_3 = BatchNormalization(axis=3)(conv5_3)
drop_5 = Dropout(0.5)(conv5_3)
pool5 = MaxPooling2D(name='pool5', pool_size=(3, 3), strides=(1, 1), padding='same')(conv5_3)
# FC6
fc6 = Conv2D(filter*16, (3, 3), name='fc6', dilation_rate=(6, 6), padding='same', activation='relu')(pool5) #pool5
fc6 = Dropout(0.5, name='drop6')(fc6)
# FC7
fc7 = Conv2D(filter*16, (1, 1), name='fc7', padding='same', activation='relu')(fc6)
fc7 = Dropout(0.5, name='drop7')(fc7)
# Block 6
conv6_1 = Conv2D(filter*4, (1, 1), name='conv6_1', padding='same', activation='relu')(fc7)
conv6_2 = Conv2D(filter*8, (3, 3), name='conv6_2', strides=(2, 2), padding='same', activation='relu')(conv6_1)
#conv6_2 = BatchNormalization(axis=3)(conv6_2)
conv6_2 = Dropout(0.5)(conv6_2)
# Block 7
conv7_1 = Conv2D(filter*2, (1, 1), name='conv7_1', padding='same', activation='relu')(conv6_2)
conv7_1z = ZeroPadding2D(name='conv7_1z')(conv7_1)
conv7_2 = Conv2D(filter*4, (3, 3), name='conv7_2', padding='valid', strides=(2, 2), activation='relu')(conv7_1z)
#conv7_2 = BatchNormalization(axis=3)(conv7_2)
conv7_2 = Dropout(0.5)(conv7_2)
# Block 8
conv8_1 = Conv2D(filter*2, (1, 1), name='conv8_1', padding='same', activation='relu')(conv7_2)
conv8_2 = Conv2D(filter*4, (3, 3), name='conv8_2', padding='same', strides=(2, 2), activation='relu')(conv8_1)
#conv8_2 = BatchNormalization(axis=3)(conv8_2)
conv8_2 = Dropout(0.5)(conv8_2)
# Last Pool
pool6 = GlobalAveragePooling2D(name='pool6')(conv8_2)
#flatten_1 = Flatten()(conv8_2) #conv8_2)
#dense_1 = Dense(512, name='dense_1')(flatten_1)
dense_1 = Dense(512, name='dense_1')(pool6)
act_1 = Activation('relu')(dense_1)
drop_6 = Dropout(0.5)(act_1)
dense_2 = Dense(num_classes, name='dense_2')(drop_6)
softmax = Activation('softmax')(dense_2)
return models.Model(x, outputs=softmax)
model=model_cifar(input_shape=[img_rows,img_cols, 3], num_classes=3)
# load the weights from the last epoch
model.load_weights('weights_SSD300.hdf5', by_name=True)
freeze = ['input_1', 'conv1_1', 'conv1_2', 'pool1',
'conv2_1', 'conv2_2', 'pool2',
'conv3_1', 'conv3_2', 'conv3_3', 'pool3',
'conv4_1', 'conv4_2', 'conv4_3', 'pool4',
'conv5_1', 'conv5_2', 'conv5_3', 'pool5']
"""
for L in model.layers:
if L.name in freeze:
L.trainable = False
"""
def schedule(epoch, decay=0.8): #0.9
return base_lr * decay**(epoch)
base_lr = 0.0001
# initiate RMSprop optimizer
#opt = keras.optimizers.rmsprop(lr=base_lr, decay=1e-6)
opt = keras.optimizers.Adam(lr=base_lr)
csv_logger = keras.callbacks.CSVLogger('./checkpoints/training.log', separator=',', append=True)
weights_save=keras.callbacks.ModelCheckpoint('./checkpoints/weights.{epoch:02d}-{val_loss:.2f}.hdf5',
verbose=1,
save_weights_only=True)
learnRateSchedule=keras.callbacks.LearningRateScheduler(schedule)
callbacks = [weights_save, csv_logger, learnRateSchedule]
# Let's train the model using opt
model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
model.summary()
# load weights for every block as the same name block
#model.load_weights("weights.03-1.50.hdf5", by_name=True)
#x_train = x_train.astype('float32')
x_train = np.array(x_train, dtype=np.float32)
#x_test = x_test.astype('float32')
x_test = np.array(x_test, dtype=np.float32)
x_train /= 255
x_test /= 255
if not data_augmentation:
print('Not using data augmentation.')
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
callbacks=callbacks,
validation_data=(x_test, y_test),
shuffle=True)
else:
print('Using real-time data augmentation.')
# This will do preprocessing and realtime data augmentation:
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=0, # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.1, # randomly shift images vertically (fraction of total height)
horizontal_flip=True, # randomly flip images
vertical_flip=False) # randomly flip images
# Compute quantities required for feature-wise normalization
# (std, mean, and principal components if ZCA whitening is applied).
datagen.fit(x_train)
for j in range(10):
# Fit the model on the batches generated by datagen.flow().
model.fit_generator(datagen.flow(x_train, y_train,
batch_size=batch_size),
steps_per_epoch=x_train.shape[0] // batch_size,
epochs=epochs,
callbacks=callbacks,
validation_data=(x_test, y_test))
# save weights every epoch
model.save_weights("all_block_{0:03d}".format(j))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
"""
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_1 (InputLayer) (None, 32, 32, 3) 0
_________________________________________________________________
conv1_1 (Conv2D) (None, 32, 32, 64) 1792
_________________________________________________________________
conv1_2 (Conv2D) (None, 32, 32, 64) 36928
_________________________________________________________________
pool1 (AveragePooling2D) (None, 16, 16, 64) 0
_________________________________________________________________
conv2_1 (Conv2D) (None, 16, 16, 128) 73856
_________________________________________________________________
conv2_2 (Conv2D) (None, 16, 16, 128) 147584
_________________________________________________________________
pool2 (AveragePooling2D) (None, 8, 8, 128) 0
_________________________________________________________________
conv3_1 (Conv2D) (None, 8, 8, 256) 295168
_________________________________________________________________
conv3_2 (Conv2D) (None, 8, 8, 256) 590080
_________________________________________________________________
conv3_3 (Conv2D) (None, 8, 8, 256) 590080
_________________________________________________________________
pool3 (AveragePooling2D) (None, 4, 4, 256) 0
_________________________________________________________________
conv4_1 (Conv2D) (None, 4, 4, 512) 1180160
_________________________________________________________________
conv4_2 (Conv2D) (None, 4, 4, 512) 2359808
_________________________________________________________________
conv4_3 (Conv2D) (None, 4, 4, 512) 2359808
_________________________________________________________________
pool4 (AveragePooling2D) (None, 2, 2, 512) 0
_________________________________________________________________
conv5_1 (Conv2D) (None, 2, 2, 512) 2359808
_________________________________________________________________
conv5_2 (Conv2D) (None, 2, 2, 512) 2359808
_________________________________________________________________
conv5_3 (Conv2D) (None, 2, 2, 512) 2359808
_________________________________________________________________
pool5 (AveragePooling2D) (None, 2, 2, 512) 0
_________________________________________________________________
fc6 (Conv2D) (None, 2, 2, 1024) 4719616
_________________________________________________________________
drop6 (Dropout) (None, 2, 2, 1024) 0
_________________________________________________________________
fc7 (Conv2D) (None, 2, 2, 1024) 1049600
_________________________________________________________________
drop7 (Dropout) (None, 2, 2, 1024) 0
_________________________________________________________________
conv6_1 (Conv2D) (None, 2, 2, 512) 524800
_________________________________________________________________
conv6_2 (Conv2D) (None, 1, 1, 1024) 4719616
_________________________________________________________________
dropout_1 (Dropout) (None, 1, 1, 1024) 0
_________________________________________________________________
conv7_1 (Conv2D) (None, 1, 1, 256) 262400
_________________________________________________________________
conv7_1z (ZeroPadding2D) (None, 3, 3, 256) 0
_________________________________________________________________
conv7_2 (Conv2D) (None, 1, 1, 256) 590080
_________________________________________________________________
dropout_2 (Dropout) (None, 1, 1, 256) 0
_________________________________________________________________
conv8_1 (Conv2D) (None, 1, 1, 128) 32896
_________________________________________________________________
conv8_2 (Conv2D) (None, 1, 1, 2048) 2361344
_________________________________________________________________
dropout_3 (Dropout) (None, 1, 1, 2048) 0
_________________________________________________________________
flatten_1 (Flatten) (None, 2048) 0
_________________________________________________________________
dense_1 (Dense) (None, 512) 1049088
_________________________________________________________________
activation_1 (Activation) (None, 512) 0
_________________________________________________________________
dropout_4 (Dropout) (None, 512) 0
_________________________________________________________________
dense_2 (Dense) (None, 10) 5130
_________________________________________________________________
activation_2 (Activation) (None, 10) 0
=================================================================
Total params: 30,029,258
Trainable params: 15,314,570
Non-trainable params: 14,714,688
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_1 (InputLayer) (None, 32, 32, 3) 0
_________________________________________________________________
conv1_1 (Conv2D) (None, 32, 32, 64) 1792
_________________________________________________________________
conv1_2 (Conv2D) (None, 32, 32, 64) 36928
_________________________________________________________________
dropout_1 (Dropout) (None, 32, 32, 64) 0
_________________________________________________________________
pool1 (AveragePooling2D) (None, 16, 16, 64) 0
_________________________________________________________________
conv2_1 (Conv2D) (None, 16, 16, 128) 73856
_________________________________________________________________
conv2_2 (Conv2D) (None, 16, 16, 128) 147584
_________________________________________________________________
dropout_2 (Dropout) (None, 16, 16, 128) 0
_________________________________________________________________
pool2 (AveragePooling2D) (None, 8, 8, 128) 0
_________________________________________________________________
conv3_1 (Conv2D) (None, 8, 8, 256) 295168
_________________________________________________________________
conv3_2 (Conv2D) (None, 8, 8, 256) 590080
_________________________________________________________________
conv3_3 (Conv2D) (None, 8, 8, 256) 590080
_________________________________________________________________
dropout_3 (Dropout) (None, 8, 8, 256) 0
_________________________________________________________________
pool3 (AveragePooling2D) (None, 4, 4, 256) 0
_________________________________________________________________
conv4_1 (Conv2D) (None, 4, 4, 512) 1180160
_________________________________________________________________
conv4_2 (Conv2D) (None, 4, 4, 512) 2359808
_________________________________________________________________
conv4_3 (Conv2D) (None, 4, 4, 512) 2359808
_________________________________________________________________
dropout_4 (Dropout) (None, 4, 4, 512) 0
_________________________________________________________________
pool4 (AveragePooling2D) (None, 2, 2, 512) 0
_________________________________________________________________
conv5_1 (Conv2D) (None, 2, 2, 512) 2359808
_________________________________________________________________
conv5_2 (Conv2D) (None, 2, 2, 512) 2359808
_________________________________________________________________
conv5_3 (Conv2D) (None, 2, 2, 512) 2359808
_________________________________________________________________
dropout_5 (Dropout) (None, 2, 2, 512) 0
_________________________________________________________________
pool5 (AveragePooling2D) (None, 2, 2, 512) 0
_________________________________________________________________
flatten_1 (Flatten) (None, 2048) 0
_________________________________________________________________
dense_1 (Dense) (None, 512) 1049088
_________________________________________________________________
activation_1 (Activation) (None, 512) 0
_________________________________________________________________
dropout_6 (Dropout) (None, 512) 0
_________________________________________________________________
dense_2 (Dense) (None, 10) 5130
_________________________________________________________________
activation_2 (Activation) (None, 10) 0
=================================================================
Total params: 15,768,906
Trainable params: 15,768,906
Non-trainable params: 0
_________________________________________________________________
""" |
import argparse
import collections
from io import StringIO
from copy import deepcopy
import difflib
from pathlib import Path
import six
import yaml
try:
from colorama import Fore, Back, Style, init
init()
except ImportError: # fallback so that the imported classes always exist
class ColorFallback:
def __getattr__(self, name):
return ""
Fore = Back = Style = ColorFallback()
# python 3.8+ compatibility
try:
collectionsAbc = collections.abc
except:
collectionsAbc = collections
SEPARATION = "\n" + "".join(["=" for _ in range(96)]) + "\n"
def main():
parser = argparse.ArgumentParser()
parser.add_argument("yml_files", nargs="*")
parser.add_argument("-o", "--out_file")
args = parser.parse_args()
merge_configs_from_file(**vars(args))
def merge_configs_from_file(yml_files, out_file=None, verbose=True):
configs = [
yaml.load(Path(yml_file).open(), Loader=yaml.SafeLoader)
for yml_file in yml_files
]
config = merge_configs(configs, titles=yml_files, verbose=verbose)
if out_file is not None:
out_file = Path(out_file)
out_file.parent.mkdir(parents=True, exist_ok=True)
yaml.dump(config, out_file.open("w"), default_flow_style=False)
return config
def merge_configs(configs, verbose=True, titles=None):
print()
if titles is None:
titles = [f"config-{i+1}" for i in range(len(configs))]
title = titles[0]
config = configs[0]
first_title = deepcopy(title)
first_config = deepcopy(config)
for title, _config in zip(titles[1:], configs[1:]):
config_prev = deepcopy(config)
config = nestupdate(config, _config)
if verbose:
print(
take_diff(
config_prev,
config,
from_title="previous config",
to_title=title,
)
)
print(SEPARATION)
if verbose:
print(
take_diff(
first_config,
config,
from_title=first_title,
to_title="FINALE CONFIG",
)
)
return config
def nestupdate(d, u):
for k, v in six.iteritems(u):
dv = d.get(k, {})
if not isinstance(dv, collectionsAbc.Mapping):
d[k] = v
elif isinstance(v, collectionsAbc.Mapping):
d[k] = nestupdate(dv, v)
elif type(v) == list:
d[k] = list(dv) + v
else:
d[k] = v
return d
def take_diff(from_dict, to_dict, from_title="", to_title=""):
from_ymlstr = dumps(from_dict, default_flow_style=False).splitlines()
to_ymlstr = dumps(to_dict, default_flow_style=False).splitlines()
diff = difflib.unified_diff(
from_ymlstr, to_ymlstr, fromfile=from_title, tofile=to_title
)
diff = color_diff(diff)
return "\n".join(diff)
def color_diff(diff):
for line in diff:
if line.startswith("+"):
yield Fore.GREEN + line + Fore.RESET
elif line.startswith("-"):
yield Fore.RED + line + Fore.RESET
elif line.startswith("^"):
yield Fore.BLUE + line + Fore.RESET
else:
yield line
return color_diff
def dumps(d, **kwargs):
buf = StringIO()
yaml.dump(d, buf, **kwargs)
return buf.getvalue()
|
########################################
## Adventure Bot "Dennis" ##
## commands/item.py ##
## Copyright 2012-2013 PariahSoft LLC ##
########################################
## **********
## Permission is hereby granted, free of charge, to any person obtaining a copy
## of this software and associated documentation files (the "Software"), to
## deal in the Software without restriction, including without limitation the
## rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
## sell copies of the Software, and to permit persons to whom the Software is
## furnished to do so, subject to the following conditions:
##
## The above copyright notice and this permission notice shall be included in
## all copies or substantial portions of the Software.
##
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
## IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
## FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
## IN THE SOFTWARE.
## **********
################
# Item Command #
################
from helpers import *
from database import put
from help import C_HELP
def C_ITEM(S, DB, sender, args):
roomid = getroom(DB, sender)
roominfo = roomstat(DB, roomid)
if len(args) == 0:
body = ""
for n, item in enumerate(roominfo["items"]): # Build item list.
if len(roominfo["items"]) == 1:
body += item["name"] + " (" + str(n) + ")."
elif len(roominfo["items"]) > 1:
if n < len(roominfo["items"]) - 1:
body += item["name"] + " ("+ str(n) + "), "
else:
body += "and " + item["name"] + " (" + str(n) + ")."
if not body: # List items.
send(S, sender, "The room is empty.")
else:
send(S, sender, "The room contains {0}".format(body))
elif len(args) >= 4 and args[0].lower() == "set": # Modify an item.
if roominfo["owner"] == sender or roominfo["locked"] == 0: # Do we have permission to modify an item?
if args[2].lower() == "name": # Set name.
for item in roominfo["items"]: # Check if name is taken.
if " ".join(args[3:]).lower() == item["name"].lower():
send(S, sender, "An item by that name already exists.")
return
if goodname(" ".join(args[3:])): # Update the name.
try:
items = roominfo["items"]
items[int(args[1])]["name"] = " ".join(args[3:])
put(DB, "UPDATE rooms SET items='{0}' WHERE id='{1}'".format(obj2str(items), roomid))
send(S, sender, "Name updated.")
except (ValueError, IndexError):
C_HELP(S, DB, sender, ["item set"])
else:
send(S, sender, "Invalid name.")
elif args[2].lower() == "desc": # Update the description.
try:
items = roominfo["items"]
if args[3].startswith("\\\\"): # Append for long description.
curr = items[int(args[1])]["desc"]
newdesc = "{0}\n{1}".format(curr[0][0], " ".join(args[3:])[2:])
items[int(args[1])]["desc"] = newdesc
else:
items[int(args[1])]["desc"] = " ".join(args[3:])
put(DB, "UPDATE rooms SET items='{0}' WHERE id='{1}'".format(obj2str(items), roomid))
send(S, sender, "Description updated.")
except (ValueError, IndexError):
C_HELP(S, DB, sender, ["item set"])
else:
C_HELP(S, DB, sender, ["item set"])
elif len(args) == 2 and args[0].lower() == "del": # Delete an item.
if roominfo["owner"] == sender or roominfo["locked"] == 0: # Do we have permission to delete an item?
try: # Update item list.
items = roominfo["items"]
items.pop(int(args[1]))
put(DB, "UPDATE rooms SET items='{0}' WHERE id='{1}'".format(obj2str(items), roomid))
send(S, sender, "Item ID {0} deleted.".format(args[1]))
except (ValueError, IndexError):
C_HELP(S, DB, sender, ["item del"])
elif args[0].lower() == "set":
C_HELP(S, DB, sender, ["item set"])
elif args[0].lower() == "del":
C_HELP(S, DB, sender, ["item del"])
else:
C_HELP(S, DB, sender, ["item"])
|
<reponame>yinghai/benchmark
import torch
import argparse
from torchbenchmark.util.model import BenchmarkModel
from typing import List, Tuple
def parse_args(model: BenchmarkModel, extra_args: List[str]) -> argparse.Namespace:
parser = argparse.ArgumentParser()
# by default, enable half precision for inference
parser.add_argument("--eval-fp16", action='store_false', help="enable eval fp16")
parser.add_argument("--fx2trt", action='store_true', help="enable fx2trt")
parser.add_argument("--torch_tensorrt", action='store_true', help="enable torch_tensorrt")
parser.add_argument("--flops", action='store_true', help="enable flops counting")
parser.add_argument("--cudagraph", action='store_true', help="enable CUDA Graph. Currently only implemented for train.")
args = parser.parse_args(extra_args)
args.device = model.device
args.jit = model.jit
args.train_bs = model.train_bs
args.eval_bs = model.eval_bs
# only enable fp16 in GPU inference
if args.device == "cpu":
args.eval_fp16 = False
# sanity checks
assert not (args.fx2trt and args.torch_tensorrt), "User cannot enable torch_tensorrt and fx2trt at the same time."
return args
def apply_args(model: BenchmarkModel, args: argparse.Namespace):
if args.flops:
from fvcore.nn import FlopCountAnalysis
model.train_flops = FlopCountAnalysis(model.model, tuple(model.example_inputs)).total()
model.eval_flops = FlopCountAnalysis(model.eval_model, tuple(model.eval_example_inputs)).total()
# apply eval_fp16
if args.eval_fp16:
model.eval_model, model.eval_example_inputs = enable_fp16(model.eval_model, model.eval_example_inputs)
# apply fx2trt for eval
if args.fx2trt:
assert args.device == 'cuda', "fx2trt is only available with CUDA."
assert not args.jit, "fx2trt with JIT is not available."
model.eval_model = enable_fx2trt(args.eval_bs, args.eval_fp16, model.eval_model, model.eval_example_inputs)
# apply torch_tensorrt for eval
if args.torch_tensorrt:
assert args.device == 'cuda', "torch_tensorrt is only available with CUDA."
model.eval_model = enable_torchtrt(model.eval_example_inputs, args.eval_fp16, model.eval_model)
# apply cuda graph for train
if args.cudagraph:
enable_cudagraph(model, model.example_inputs)
def enable_torchtrt(eval_input: Tuple[torch.tensor], eval_fp16: bool, eval_model: torch.nn.Module) -> torch.nn.Module:
import torch_tensorrt
trt_input = [torch_tensorrt.Input(eval_input[0].shape)]
if eval_fp16:
enabled_precisions = torch_tensorrt.dtype.half
else:
enabled_precisions = torch_tensorrt.dtype.float
return torch_tensorrt.compile(eval_model, inputs=trt_input, enabled_precisions=enabled_precisions)
def enable_cudagraph(model: BenchmarkModel, example_inputs: Tuple[torch.tensor]):
optimizer = model.optimizer
loss_fn = model.loss_fn
# warmup
s = torch.cuda.Stream()
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
for _ in range(3):
optimizer.zero_grad(set_to_none=True)
y_pred = model.model(*example_inputs)
loss = loss_fn(y_pred, model.example_outputs)
loss.backward()
optimizer.step()
torch.cuda.current_stream().wait_stream(s)
# capture
g = torch.cuda.CUDAGraph()
optimizer.zero_grad(set_to_none=True)
with torch.cuda.graph(g):
static_y_pred = model.model(*example_inputs)
static_loss = loss_fn(static_y_pred, model.example_outputs)
static_loss.backward()
optimizer.step()
model.g = g
def enable_fp16(model: torch.nn.Module, example_input: Tuple[torch.tensor]) -> Tuple[torch.nn.Module, Tuple[torch.tensor]]:
return model.half(), (example_input[0].half(),)
def enable_fx2trt(max_batch_size: int, fp16: bool, model: torch.nn.Module, example_inputs: Tuple[torch.tensor]) -> torch.nn.Module:
from torchbenchmark.util.fx2trt import lower_to_trt
return lower_to_trt(module=model, input=example_inputs, \
max_batch_size=max_batch_size, fp16_mode=fp16)
|
<gh_stars>1-10
'''
A test set to check whether the Weisfeiler-Leman Algorithm has been implemented as expected
'''
from WL_Wrapper import WL_Wrapper
from compression_schemes import StringCompressionScheme
from compression_schemes import IteratorScheme
import networkx as nx
test_status = '''
Test Name: {Name}
Result: {result}
'''
def TestIdenticalGraphs(scheme):
result = ""
g = nx.complete_graph(100)
w = WL_Wrapper(g,g,scheme)
if abs(w.score - 1 <0.01):
result = result + test_status.format(Name="TestIdenticalGraphs",result="complete_graph: PASS")
else:
result = result + test_status.format(Name="TestIdenticalGraphs",result="complete_graph: FAIL")
pass
g = nx.complete_multipartite_graph(10)
w = WL_Wrapper(g,g,scheme)
if abs(w.score - 1 <0.01):
result = result + test_status.format(Name="TestIdenticalGraphs",result="complete_multipartite_graph(10): PASS")
else:
result = result + test_status.format(Name="TestIdenticalGraphs",result="complete_multipartite_graph(10): FAIL")
pass
g = nx.empty_graph(100)
w = WL_Wrapper(g,g,scheme)
if abs(w.score - 1 <0.01):
result = result + test_status.format(Name="TestIdenticalGraphs",result="empty_graph(100): PASS")
else:
result = result + test_status.format(Name="TestIdenticalGraphs",result="empty_graph(100): FAIL")
pass
return result
def TestBasic(scheme):
result = ""
g = nx.Graph()
g.add_nodes_from([1,2,3,4,5,6])
g.add_edge(1,2)
g.add_edge(2,3)
g.add_edge(3,4)
g.add_edge(4,5)
g.add_edge(5,6)
s = nx.Graph()
s.add_nodes_from([1,2,3,4,5,6])
s.add_edge(1,2)
s.add_edge(2,3)
s.add_edge(2,4)
s.add_edge(3,5)
s.add_edge(4,6)
s.add_edge(5,6)
w = WL_Wrapper(g,s,scheme)
if abs(w.score - 0.489 <0.01):
result = result + test_status.format(Name="TestBasic",result="PASS")
else:
result = result + test_status.format(Name="TestBasic",result="FAIL")
pass
return result
def TestDifferentNumberOfNodes(scheme):
result = ""
g = nx.Graph()
g.add_nodes_from([1,2,3,4,5])
g.add_edge(1,2)
g.add_edge(2,3)
g.add_edge(2,4)
g.add_edge(3,5)
g.add_edge(4,5)
s = nx.Graph()
s.add_nodes_from([1,2,3,4,5,6])
s.add_edge(1,2)
s.add_edge(2,3)
s.add_edge(3,4)
s.add_edge(4,5)
s.add_edge(5,6)
w = WL_Wrapper(g,s,scheme)
if abs(w.score - 0.80 <0.01):
result = result + test_status.format(Name="TestDifferentNumberOfNodes",result="PASS")
else:
result = result + test_status.format(Name="TestDifferentNumberOfNodes",result="FAIL")
pass
return result
tests = [
TestIdenticalGraphs,
TestBasic,
TestDifferentNumberOfNodes
]
for test in tests:
print(test(IteratorScheme()))
pass |
#!/usr/bin/env nix-shell
#!nix-shell -i python -p python3 nix gitRepo nix-prefetch-git -I nixpkgs=./pkgs
from typing import Optional, Dict
from enum import Enum
import argparse
import json
import os
import subprocess
import tempfile
REPO_FLAGS = [
"--quiet",
"--repo-url=https://github.com/danielfullmer/tools_repo",
"--repo-branch=master",
"--no-repo-verify",
"--depth=1",
]
# The kind of remote a "commitish" refers to.
# These are used for the --ref-type CLI arg.
class ManifestRefType(Enum):
BRANCH = "heads"
TAG = "tags"
revHashes: Dict[str, str] = {}
revTrees: Dict[str, str] = {}
treeHashes: Dict[str, str] = {}
def save(filename, data):
open(filename, 'w').write(json.dumps(data, sort_keys=True, indent=2, separators=(',', ': ')))
def checkout_git(url, rev):
print("Checking out %s %s" % (url, rev))
json_text = subprocess.check_output([ "nix-prefetch-git", "--url", url, "--rev", rev]).decode()
return json.loads(json_text)
def make_repo_file(url: str, ref: str, filename: str, ref_type: ManifestRefType,
override_project_revs: Dict[str, str], force_refresh: bool,
mirrors: Dict[str, str]):
if os.path.exists(filename) and not force_refresh:
data = json.load(open(filename))
else:
print("Fetching information for %s %s" % (url, ref))
with tempfile.TemporaryDirectory() as tmpdir:
subprocess.check_call(['repo', 'init', f'--manifest-url={url}', f'--manifest-branch=refs/{ref_type.value}/{ref}', *REPO_FLAGS], cwd=tmpdir)
json_text = subprocess.check_output(['repo', 'dumpjson'] + (["--local-only"] if override_project_revs else []), cwd=tmpdir).decode()
data = json.loads(json_text)
for project, rev in override_project_revs.items():
# We have to iterate over the whole output since we don't save
# the project name anymore, just the relpath, which isn't
# exactly the project name
for relpath, p in data.items():
if p['url'].endswith(project):
p['rev'] = rev
save(filename, data)
for relpath, p in data.items():
if 'sha256' not in p:
print("Fetching information for %s %s" % (p['url'], p['rev']))
# Used cached copies if available
if p['rev'] in revHashes:
p['sha256'] = revHashes[p['rev']]
if p['rev'] in revTrees:
p['tree'] = revTrees[p['rev']]
continue
p_url = p['url']
found_treehash = False
for mirror_url, mirror_path in mirrors.items():
if p['url'].startswith(mirror_url):
p_url = p['url'].replace(mirror_url, mirror_path)
p['tree'] = subprocess.check_output(['git', 'log','-1', '--pretty=%T', p['rev']], cwd=p_url+'.git').decode().strip()
if p['tree'] in treeHashes:
p['sha256'] = treeHashes[p['tree']]
found_treehash = True
if found_treehash:
continue
# Grab
git_info = checkout_git(p_url, p['rev'])
p['sha256'] = git_info['sha256']
# Add to cache
revHashes[p['rev']] = p['sha256']
if 'tree' in p:
treeHashes[p['tree']] = p['sha256']
# Save after every new piece of information just in case we crash
save(filename, data)
# Save at the end as well!
save(filename, data)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--mirror', action="append", help="a repo mirror to use for a given url, specified by <url>=<path>")
parser.add_argument('--ref-type', help="the kind of ref that is to be fetched",
choices=[t.name.lower() for t in ManifestRefType], default=ManifestRefType.TAG.name.lower())
parser.add_argument('--force', help="force a re-download. Useful with --ref-type branch", action='store_true')
parser.add_argument('--repo-prop', help="repo.prop file to use as source for project git revisions")
parser.add_argument('url', help="manifest URL")
parser.add_argument('ref', help="manifest ref")
parser.add_argument('oldrepojson', nargs='*', help="any older repo json files to use for cached sha256s")
args = parser.parse_args()
if args.mirror:
mirrors = dict(mirror.split("=") for mirror in args.mirror)
else:
mirrors = {}
ref_type = ManifestRefType[args.ref_type.upper()]
# Extract project revisions from repo.prop
override_project_revs = {}
if args.repo_prop:
lines = open(args.repo_prop, 'r').read().split('\n')
for line in lines:
if line:
project, rev = line.split()
override_project_revs[project] = rev
# Read all oldrepojson files to populate hashtables
for filename in args.oldrepojson:
data = json.load(open(filename))
for name, p in data.items():
if 'sha256' in p:
revHashes[p['rev']] = p['sha256']
if 'tree' in p:
treeHashes[p['tree']] = p['sha256']
revTrees[p['rev']] = p['tree']
filename = f'repo-{args.ref}.json'
make_repo_file(args.url, args.ref, filename, ref_type, override_project_revs, force_refresh=args.force, mirrors=mirrors)
if __name__ == "__main__":
main()
|
<reponame>kidist-amde/birth-monitor<filename>flaskApp.py<gh_stars>0
from flask import Flask
from flask import render_template
from flask import redirect, url_for, request
from get_old_tweets import get_tweets
from tweet_collection import get_recent_tweets
from birth_prediction import build_dataset,train_estimator,compute_loss,make_prediction
import pymongo
import re
import random
import tqdm
import gdown
dataset= build_dataset()
color_func = lambda : [random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)]
visited = set()
client = pymongo.MongoClient("mongodb://localhost:27017/")
db = client["bdt_db"]
birth_collection = db["birth_tweets"]
tweets_collection = db["tweets"]
labeled_collection = db["labeled_tweets"]
words=["I.*(m|am|’m).*(weeks|months).*pregnant.", "my" and "pregnancy", "pregnant", "baby" and "our family",
"baby coming soon", "I.*(m|am|’m).* been.*(weeks|months).*" and "since" and "I" and "pregnant",
"I.*(m|am|’m).* expecting. *baby", "I.*(m|am|’m).* going to be mom", "I.*(m|am|’m).* having a baby",
"I.*(ve|have|’ve).* been pregnant", "I.*(m|am|’m).* going to have a baby",
"I.*(m|am|’m).* becoming.*(mom|mother).*"] + ["having a baby","delivering a baby","welcome baby","newborn",
"new born","first baby","delivering first baby","baby coming soon",
"have been pregnant","going to be mom","becoming mom","becoming dad"
,"becoming mother","becoming father"]
pattern_index = 0
regx = re.compile(words[pattern_index] , re.IGNORECASE)
cursor = tweets_collection.find({"text": regx})
cursor_iter = iter(cursor)
class LabeledTweet(dict):
def __init__(self,id,label):
self["_id"] = id
self["label"] = label
app = Flask(__name__)
@app.route('/')
def root():
return render_template("index.html")
@app.route('/annotate_data')
def annotate_data():
global cursor_iter,cursor,pattern_index,regx
try:
tweet = next(cursor_iter)
except StopIteration:
pattern_index +=1
regx = re.compile(words[pattern_index] , re.IGNORECASE)
cursor = tweets_collection.find({"text": regx})
cursor_iter = iter(cursor)
tweet = next(cursor_iter)
t = labeled_collection.find_one({"_id":tweet["_id"]})
while t is not None or tweet["text"] in visited:
try:
tweet = next(cursor_iter)
except StopIteration:
pattern_index +=1
regx = re.compile(words[pattern_index] , re.IGNORECASE)
cursor = tweets_collection.find({"text": regx})
cursor_iter = iter(cursor)
tweet = next(cursor_iter)
t = labeled_collection.find_one({"_id":tweet["_id"]})
visited.add(tweet["text"])
return render_template('data_annotation.html',tweet = tweet)
@app.route('/annotate',methods=["POST"])
def annotate():
id_ = int(request.form.get("id"))
label = request.form.get("is_related")
item = LabeledTweet(id_,label)
x = labeled_collection.insert_one(item)
# print(request.form)
return redirect("/annotate_data")
@app.route("/data_collection")
def data_collection():
return render_template("data_collection.html")
@app.route("/downloadOldTweets")
def downloadOldTweets():
# time.sleep(5)
d = get_tweets(max_tweets=10,num_tweets=10)
return {"msg":"Downloaded {} tweets".format(d)}
@app.route("/downloadRecentTweets")
def downloadRecentTweets():
# time.sleep(5)
rt = get_recent_tweets()
return {"msg":"Downloaded {} tweets".format(rt)}
@app.route("/downloadEuroStatData")
def downloadEuroStatData():
url = 'https://drive.google.com/uc?id=1A_h9ooiTkDSZDzPO4_5ZoYSD8ukQQoWq'
output = './eurostat_data.csv'
gdown.download(url, output, quiet=False)
return {"msg":"Downloaded Euro stat dataset"}
@app.route("/live_birth")
def live_birth():
return render_template("live_birth.html")
@app.route("/visualization")
def visualization():
return render_template("visualization.html")
@app.route("/sample_tweets")
def sample_tweets():
"""Fetch sample tweets from the database.
"""
regx = re.compile(".* baby coming soon .*|.* month pregnant.*" , re.IGNORECASE)
iter1 = iter(birth_collection.find({"text":regx}))
iter2 = iter(tweets_collection.find({}))
tweets = []
for i in range(5):
bt =next(iter1)
bt["label"] = "positive"
tweets.append(bt)
count = 0
while count <5:
tweet = next(iter2)
bt = birth_collection.find_one({"_id":tweet["_id"]})
if bt is None:
tweet["label"] = "negative"
tweets.append(tweet)
count +=1
random.shuffle(tweets)
return dict(tweets=tweets)
@app.route("/train_estimator")
def train_estimator_route():
model = train_estimator(dataset["x_train"],dataset["y_train"])
train_loss = compute_loss(model,dataset["x_train"],dataset["y_train"])
test_loss = compute_loss(model,dataset["x_test"],dataset["y_test"])
return {
"msg":"The model train sucessfull",
"train_loss": train_loss,
"test_loss": test_loss,
}
@app.route("/get_tweet_per_month_vis")
def get_tweet_per_month_vis():
months = dataset["ordered_months"]
tweets = dataset["num_tweets_perMonth"]
colors = [color_func() for i in range(len(tweets))]
colors = ["rgb({},{},{})".format(*c) for c in colors]
return dict(months=months, tweets=tweets, colors=colors)
@app.route("/get_birth_per_month_vis")
def get_birth_per_month_vis():
months = dataset["ordered_months"]
births = dataset["monthly_birth_rate"]
colors = [color_func() for i in range(len(months))]
colors = ["rgb({},{},{})".format(*c) for c in colors]
return dict(months=months, births=births, colors=colors)
@app.route("/worldcloud_vis")
def worldcloud_vis():
images = [
"/static/images/wc.png"
]
return dict(images = images)
|
import torch
from .common import front, safeSign
from ..device import device
import warnings
warnings.simplefilter("always",DeprecationWarning)
"""
Implementation from ternary connect :
https://arxiv.org/pdf/1510.03009.pdf
"""
class TernaryConnectDeterministic(torch.autograd.Function):
r"""
Ternary deterministic op. This class perform the quantize function and the backprob.
..
Forward :
{1 if x > 0.5
x_t = {0 if |x|< 0.5
{-1 if x <-0.5
Backward :
d x_t / d x = 1_{|r|=<1}
"""
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(input)
sign = safeSign(input)
return (sign + safeSign(input -0.5*sign ))/2
@staticmethod
def backward(ctx, grad_output):
input, = ctx.saved_tensors
grad_input = grad_output.clone()
grad_input[torch.abs(input) > 1.001] = 0
return grad_input
class TernaryConnectStochastic(torch.autograd.Function):
"""
Ternary Stochastic op. This class perform the quantize function and the backprob.
..
Forward :
if x<0 :
x_t = { 0 with prob of 1 + x
{-1 with prob of -x
if x>=0:
x_t = { 0 with prob of 1 - x
{ 1 with prob of x
Backward :
d x_t / d x = 1_{|r|=<1}
"""
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(input)
sign = safeSign(input)
z = torch.rand_like(input, requires_grad=False)
return sign - safeSign(input)*(z>torch.abs(input)).type(dtype=input.type())
@staticmethod
def backward(ctx, grad_output):
input, = ctx.saved_tensors
grad_input = grad_output.clone()
grad_input[torch.abs(input) > 1.001] = 0
return grad_input
def TernaryConnect(stochastic=False):
"""
A torch.nn.Module is return with a Ternary op inside.
Usefull on Sequencial instanciation.
:param stochastic: use a Stochastic way or not.
"""
act = TernaryConnectStochastic if stochastic else TernaryConnectDeterministic
return front(act)
def TernaryDense(stochastic=False):
"""
Return a Linear op with Ternary quantization apply on it weight.
"""
class _TernaryDense(torch.autograd.Function):
@staticmethod
def forward(ctx, input, weight, bias=None):
sign = torch.sign(weight)
if stochastic:
z = torch.rand_like(weight, requires_grad=False)
weight_t = sign - torch.sign(z-torch.abs(weight))
else:
weight_t = (sign + torch.sign(weight -0.5*sign ))/2
ctx.save_for_backward(input, weight, weight_t, bias)
output = torch.nn.functional.linear(input, weight_t, bias)
return output
@staticmethod
def backward(ctx, grad_output):
input, weight, weight_t, bias = ctx.saved_tensors
grad_input = grad_weight = grad_bias = None
if ctx.needs_input_grad[0]:
grad_input = grad_output.mm(weight_t)
if ctx.needs_input_grad[1]:
grad_weight = grad_output.t().mm(input)
if bias is not None and ctx.needs_input_grad[2]:
grad_bias = grad_output.sum(0).squeeze(0)
return grad_input, grad_weight, grad_bias
return _TernaryDense
def TernaryConv2d(stochastic=True, stride=1, padding=1, dilation=1, groups=1):
"""
.. warning:: **DEPRECATED**
Return a Conv op with params given. Use Ternary to quantize weight before apply it.
"""
warnings.warn("Deprecated conv op ! Huge cuda memory consumption due to torch.grad.cuda_grad.conv2d_input function.", DeprecationWarning,stacklevel=2)
class _TernaryConv2d(torch.autograd.Function):
@staticmethod
def forward(ctx, input, weight, bias=None):
sign = torch.sign(weight)
if stochastic:
z = torch.rand_like(weight, requires_grad=False)
weight_t = sign - torch.sign(z-torch.abs(weight))
else:
weight_t = (sign + torch.sign(weight -0.5*sign ))/2
ctx.save_for_backward(input, weight, weight_t, bias)
output = torch.nn.functional.conv2d(input, weight_t, bias=bias, stride=stride, padding=padding, dilation=dilation, groups=groups)
return output
@staticmethod
def backward(ctx, grad_output):
input, weight, weight_t, bias = ctx.saved_tensors
grad_input = grad_weight = grad_bias = None
if ctx.needs_input_grad[0]:
grad_input = torch.nn.grad.conv2d_input(input.size(), weight_t, grad_output, stride=stride, padding=padding, dilation=dilation, groups=groups)
if ctx.needs_input_grad[1]:
grad_weight = torch.nn.grad.conv2d_weight(input, weight.shape, grad_output, stride=stride, padding=padding, dilation=dilation, groups=groups)
if bias is not None and ctx.needs_input_grad[2]:
grad_bias = grad_output.sum(0).squeeze(0).sum(1).squeeze(1).sum(-1).squeeze(-1)
if bias is not None:
return grad_input, grad_weight, grad_bias
else:
return grad_input, grad_weight
return _TernaryConv2d
|
<filename>phconvert/smreader.py<gh_stars>10-100
#
# phconvert - Reference library to read and save Photon-HDF5 files
#
# Copyright (C) 2014-2015 <NAME> <<EMAIL>>
#
"""
SM Format written by <NAME>'s LabVIEW program in WeissLab us-ALEX setup
-----------------------------------------------------------------------------
A SM file is composed by two parts:
- XXX bytes: an header (usually 166 bytes)
- the remainig bytes: the data
- 26 bytes of trailing cruft
The data is a list of 96-bit records that contain for each ph (count)
the detection time and the detector.
The first 64-bit of each record are the ph time, and the remainig 16-bit
are the detector.
::
64-bit ph time detector
------------------- -------
| | | |
XXXX XXXX - XXXX XXXX - XXXX XXXX
bit: 0 32 0 32 0 32
'-------' '-------' '-------'
data[0] data[1] data[2]
The data is in unsigned big endian (>) format.
The proper way to read the data is to read the byte-stream and interpret
it as a record array in which each element is 12 bytes.
"""
import numpy as np
class Decoder:
def __init__(self, buffer):
self.buff = buffer
self.cursor = 0
def readscalar(self, size=4, basetype='u', endiness='>', inplace=False):
dtype = endiness + basetype + str(size)
buffer = self.buff[self.cursor:self.cursor + size]
if not inplace:
self.cursor += size
return np.frombuffer(buffer=buffer, dtype=dtype).squeeze()
def readstring(self, size_max=256, inplace=False, **kwargs):
orig_cursor = self.cursor
size = int(self.readscalar(**kwargs))
if size > size_max:
print('Big string: size limited to %d.' % size_max)
size = size_max
string = self.buff[self.cursor:self.cursor + size]
self.cursor = orig_cursor if inplace else (self.cursor + size)
return string
def decode_header(data):
"""
Decode the cervellotic header of the SM file.
Returns
A 2-element tuple with header-size and a list of channel labels
"""
decoder = Decoder(data)
version = decoder.readscalar()
string1 = decoder.readstring() # -> Comment
string2 = decoder.readstring() # -> 'Simple'
pointer1 = decoder.readscalar() # -> Pointer to 8 bytes before the end
string3 = decoder.readstring() # -> File section type
magic1 = decoder.readscalar()
magic2 = decoder.readscalar()
col1_name = decoder.readstring()
col1_resolution = decoder.readscalar(size=8, basetype='f')
col1_offset = decoder.readscalar(size=8, basetype='f')
col1_bho = decoder.readscalar()
col2_name = decoder.readstring()
col2_resolution = decoder.readscalar(size=8, basetype='f')
col2_offset = decoder.readscalar(size=8, basetype='f')
col2_bho = decoder.readscalar()
col3_name = decoder.readstring()
col3_resolution = decoder.readscalar(size=8, basetype='f')
col3_offset = decoder.readscalar(size=8, basetype='f')
num_channels = decoder.readscalar()
ch_labels = [decoder.readstring() for _ in range(num_channels)]
return decoder.cursor, ch_labels
def load_sm(fname, return_labels=False):
"""Read an SM data file.
Return
timestamps, detectors and optionally a list of detectors labels
"""
with open(fname, 'rb') as f:
fulldata = f.read()
header_size, labels = decode_header(fulldata)
rawdata = fulldata[header_size:]
# Remove the end of the file
end_field1 = 4
end_str = 'End Of Run'
end_field2 = 12
valid_size = len(rawdata) - end_field1 - len(end_str) - end_field2
# Description of the record element in the file
sm_dtype = np.dtype([('timestamp', '>i8'), ('detector', '>u4')])
# View of the binary data as an array (no copy performed)
data = np.frombuffer(rawdata[:valid_size], dtype=sm_dtype).copy()
# Swap byte order inplace to little endian
data = data.byteswap(True).newbyteorder()
if return_labels:
return data['timestamp'], data['detector'], labels
else:
return data['timestamp'], data['detector']
|
# -*- coding: utf-8 -*-
# Copyright (C) 2019 - TODAY <NAME> - Akretion
import os
import sys
from os import path
from xmldiff import main
sys.path.append(path.join(path.dirname(__file__), '..', 'nfelib'))
from nfelib.v4_00 import leiauteNFe_sub as nfe_sub
from nfelib.v4_00 import retEnviNFe as nfe
from nfelib.v4_00 import retInutNFe
from nfelib.v4_00 import retConsStatServ
from nfelib.v4_00 import retConsSitNFe
from nfelib.v4_00 import distDFeInt
from nfelib.v4_00 import retDistDFeInt
from nfelib.v4_00 import retEnvEvento
from nfelib.v4_00 import retEnvEventoCancNFe
from nfelib.v4_00 import retEnvCCe
from nfelib.v4_00 import retEnvConfRecebto
def test_in_out_leiauteNFe():
path = 'tests/nfe/v4_00/leiauteNFe'
for filename in os.listdir(path):
# primeiro filtramos a root tag e a possivel assinatura:
subtree = nfe_sub.parsexml_('%s/%s' % (path, filename,))
inputfile = 'tests/input.xml'
subtree.write(inputfile, encoding='utf-8')
# agora vamos importar o XML da nota e transforma-lo em objeto Python:
obj = nfe_sub.parse(inputfile)#'%s/%s' % (path, filename,))
# agora podemos trabalhar em cima do objeto e fazer operaçoes como:
obj.infNFe.emit.CNPJ
outputfile = 'tests/output.xml'
with open(outputfile, 'w') as f:
nfe_sub.export(obj, nfeProc=False, stream=f)
diff = main.diff_files(inputfile, outputfile)
print(diff)
assert len(diff) == 0
def test_in_out_leiauteInutNFe():
path = 'tests/nfe/v4_00/leiauteInutNFe'
for filename in os.listdir(path):
inputfile = '%s/%s' % (path, filename,)
doc = retInutNFe.parsexml_(inputfile, None)
obj = retInutNFe.TInutNFe.factory().build(doc.getroot())
outputfile = 'tests/output.xml'
with open(outputfile, 'w') as f:
obj.export(f, level=0, name_='inutNFe',
namespacedef_='xmlns="http://www.portalfiscal.inf.br/nfe"')
diff = main.diff_files(inputfile, outputfile)
print(diff)
assert len(diff) == 0
def test_stat():
raiz = retConsStatServ.TConsStatServ(
versao='4.00',
tpAmb='1',
cUF='SP',
xServ='STATUS',
)
raiz.export(sys.stdout, 0)
def test_cons_sit():
raiz = retConsSitNFe.TConsSitNFe(
versao='4.00',
tpAmb='1',
xServ='CONSULTAR',
chNFe='NFe35180803102452000172550010000474641681223493',
)
raiz.export(sys.stdout, 0)
def test_distDFe():
distDFeInt.distNSUType.factory()
distDFeInt.consNSUType.factory()
distDFeInt.consChNFeType.factory()
distDFeInt.distDFeInt()
retDistDFeInt.retDistDFeInt.factory()
def test_evento_generico():
raiz = retEnvEvento.TEnvEvento(versao="1.00", idLote='42')
raiz.export(sys.stdout, 0)
retEnvEvento.TRetEnvEvento()
def test_evento_cancelamento():
retEnvEventoCancNFe.TEvento()
retEnvEventoCancNFe.infEventoType()
retEnvEventoCancNFe.detEventoType()
def test_cce():
retEnvCCe.infEventoType()
retEnvCCe.detEventoType()
def test_in_out_leiauteCCe():
path = 'tests/cce/v1_00/leiauteCCe'
for filename in os.listdir(path):
inputfile = '%s/%s' % (path, filename,)
doc = retInutNFe.parsexml_(inputfile, None)
obj = retEnvCCe.TEvento.factory().build(doc.getroot())
outputfile = 'tests/output.xml'
with open(outputfile, 'w') as f:
obj.export(f, level=0, name_='evento',
namespacedef_='xmlns="http://www.portalfiscal.inf.br/nfe"')
diff = main.diff_files(inputfile, outputfile)
print(diff)
assert len(diff) == 0
def test_mde():
retEnvConfRecebto.TEvento()
retEnvConfRecebto.infEventoType()
retEnvConfRecebto.detEventoType()
retEnvConfRecebto.tpEventoType('210200')
retEnvConfRecebto.descEventoType('Confirmacao da Operacao')
def test_init_all():
for mod in [nfe, retInutNFe, distDFeInt, retDistDFeInt, retEnvEvento,
retEnvEventoCancNFe, retEnvCCe, retEnvConfRecebto]:
for class_name in mod.__all__:
cls = getattr(mod, class_name)
if issubclass(cls, mod.GeneratedsSuper):
cls()
|
# author: WatchDogOblivion
# description: TODO
# WatchDogs SMTP Service
import os
import smtplib
import mimetypes
from email import encoders
from email import message # pylint: disable=unused-import
from email.header import Header
from email.mime.text import MIMEText
from email.mime.audio import MIMEAudio
from email.mime.base import MIMEBase
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
from email.mime.nonmultipart import MIMENonMultipart # pylint: disable=unused-import
from watchdogs.base.models import AllArgs, Common # pylint: disable=unused-import
from watchdogs.mail.parsers import SMTPArgs
from watchdogs.utils.Constants import (COMMA, RB, FS, APPLICATION_OCTET_STREAM, CONTENT_DISPOSITION, ASCII,
FROM, TO, SUBJECT, MIME_VERSION, ATTACHMENT)
class SMTPService(Common):
@staticmethod
def getMimeAttachment(filepath):
#type: (str) -> MIMENonMultipart | message.Message
if not os.path.isfile(filepath):
return None
ctype, encoding = mimetypes.guess_type(filepath)
if ctype is None or encoding is not None:
ctype = APPLICATION_OCTET_STREAM
maintype, subtype = ctype.split('/', 1)
if maintype == 'text':
openedFile = open(filepath)
mimeAttachment = MIMEText(openedFile.read(), _subtype=subtype)
openedFile.close()
elif maintype == 'image':
openedFile = open(filepath, RB)
mimeAttachment = MIMEImage(openedFile.read(), _subtype=subtype)
openedFile.close()
elif maintype == 'audio':
openedFile = open(filepath, RB)
mimeAttachment = MIMEAudio(openedFile.read(), _subtype=subtype)
openedFile.close()
else:
openedFile = open(filepath, RB)
mimeAttachment = MIMEBase(maintype, subtype)
mimeAttachment.set_payload(openedFile.read())
openedFile.close()
encoders.encode_base64(mimeAttachment)
mimeAttachment.add_header(CONTENT_DISPOSITION, ATTACHMENT, filename=filepath.split(FS)[-1])
return mimeAttachment
@staticmethod
def getMimeMessage(allArgs):
#type: (AllArgs) -> MIMEMultipart
sMTPArgs = allArgs.getArgs(SMTPArgs)
attachments = sMTPArgs.attachments
if (attachments):
mimeMessage = MIMEMultipart()
mimeMessage.attach(MIMEText(sMTPArgs.body, sMTPArgs.bodyType))
for filepath in attachments:
mimeMessage.attach(SMTPService.getMimeAttachment(filepath))
else:
mimeMessage = MIMEText(sMTPArgs.body, sMTPArgs.bodyType)
mimeMessage[MIME_VERSION] = Header(sMTPArgs.mimeVersion, ASCII)
mimeMessage[FROM] = Header(sMTPArgs.senderEmail, ASCII)
mimeMessage[TO] = Header(COMMA.join(sMTPArgs.recipientEmails), ASCII)
mimeMessage[SUBJECT] = Header(sMTPArgs.subject, ASCII)
mimeMessage.preamble = 'You will not see this in a MIME-aware mail reader.\n'
return mimeMessage
@staticmethod
def sendEmail(allArgs):
#type: (AllArgs) -> None
sMTPArgs = allArgs.getArgs(SMTPArgs)
try:
sMTP = smtplib.SMTP(sMTPArgs.remoteHost, sMTPArgs.remotePort)
sMTP.set_debuglevel(1)
sMTP.login(sMTPArgs.name, sMTPArgs.password)
mimeMessage = SMTPService.getMimeMessage(allArgs).as_string()
sMTP.sendmail(sMTPArgs.senderEmail, sMTPArgs.recipientEmails, mimeMessage)
sMTP.close()
print("Successfully sent email")
except smtplib.SMTPException:
print("Error: unable to send email")
|
import sublime
import sublime_plugin
import re
import sys
from time import time
# Ideas taken from C0D312, nizur & tito in http://www.sublimetext.com/forum/viewtopic.php?f=2&t=4589
# Also, from https://github.com/SublimeText/WordHighlight/blob/master/word_highlight.py
def plugin_loaded():
global Pref
class Pref:
def load(self):
Pref.display_file = settings.get('display_file', False)
Pref.display_class = settings.get('display_class', False)
Pref.display_function = settings.get('display_function', True)
Pref.display_arguments = settings.get('display_arguments', False)
Pref.wait_time = 0.12
Pref.time = time()
settings = sublime.load_settings('Function Name Display.sublime-settings')
Pref = Pref()
Pref.load()
settings.add_on_change('reload', lambda:Pref.load())
if sys.version_info[0] == 2:
plugin_loaded()
clean_name = re.compile('^\s*(public\s+|private\s+|protected\s+|static\s+|function\s+|def\s+)+', re.I)
class FunctionNameStatusEventHandler(sublime_plugin.EventListener):
# on_activated_async seems to not fire on startup
def on_activated(self, view):
Pref.time = time()
view.settings().set('function_name_status_row', -1)
sublime.set_timeout(lambda:self.display_current_class_and_function(view, 'activated'), 0)
# why is it here?
def on_modified(self, view):
Pref.time = time()
# could be async, but ST2 does not support that
def on_selection_modified(self, view):
now = time()
if now - Pref.time > Pref.wait_time:
sublime.set_timeout(lambda:self.display_current_class_and_function(view, 'selection_modified'), 0)
else:
sublime.set_timeout(lambda:self.display_current_class_and_function_delayed(view), int(1000*Pref.wait_time))
Pref.time = now
def display_current_class_and_function_delayed(self, view):
now = time()
if (now - Pref.time >= Pref.wait_time):
self.display_current_class_and_function(view, 'selection_modified:delayed')
# display the current class and function name
def display_current_class_and_function(self, view, where):
# print("display_current_class_and_function running from " + where)
view_settings = view.settings()
if view_settings.get('is_widget'):
return
for region in view.sel():
region_row, region_col = view.rowcol(region.begin())
if region_row != view_settings.get('function_name_status_row', -1):
view_settings.set('function_name_status_row', region_row)
else:
return
s = ""
found = False
fname = view.file_name()
if Pref.display_file and None != fname:
s = fname + " "
# Look for any classes
if Pref.display_class:
class_regions = view.find_by_selector('entity.name.type.class')
for r in reversed(class_regions):
row, col = view.rowcol(r.begin())
if row <= region_row:
s += view.substr(r)
found = True
break;
# Look for any functions
if Pref.display_function:
function_regions = view.find_by_selector('meta.function - meta.function.inline')
if function_regions:
for r in reversed(function_regions):
row, col = view.rowcol(r.begin())
if row <= region_row:
if Pref.display_class and s:
s += "::"
lines = view.substr(r).splitlines()
name = clean_name.sub('', lines[0])
if Pref.display_arguments:
s += name.strip()
else:
if 'C++' in view.settings().get('syntax'):
if Pref.display_class or len(name.split('(')[0].split('::'))<2:
s += name.split('(')[0].strip()
else:
s += name.split('(')[0].split('::')[1].strip()
else:
s += name.split('(')[0].split(':')[0].strip()
found = True
break
if not found:
view.erase_status('function')
fname = view.file_name()
if Pref.display_file and None != fname:
view.set_status('function', fname)
else:
view.set_status('function', s)
return
view.erase_status('function')
|
<reponame>AuroreBussalb/meta-analysis-statistical-tools<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
.. module:: perform_meta_analysis
:synopsis: module performing a meta-analysis
.. moduleauthor:: <NAME> <<EMAIL>>
"""
import numpy as np
import scipy.stats as scp
import pandas as pd
import warnings
import matplotlib.pyplot as plt
def _effect_size_ppc(n_treatment, n_control, mean_post_test_treatment, mean_pre_test_treatment, mean_pre_test_control, mean_post_test_control,
std_pre_test_treatment, std_pre_test_control):
"""Computes the pre post control effect size (<NAME> (2008), also called the effect size between "Estimating Effect Sizes From Pretest-Posttest Control Group Designs
and under a random effects model", Organizational Research Methods (Equation 8)).
Parameters
----------
n_treatment: int
Number of patients included in the treatment group.
n_control: int
Number of patients included in the control group.
mean_post_test_treatment: float
Mean score after the treatment.
mean_pre_test_treatment: float
Mean score before the treatment.
mean_pre_test_control: float
Mean score before the treatment in the control group.
mean_post_test_control: float
Mean score after the treatment in the control group.
std_pre_test_treatment: float
Standard deviation of the mean score before the treatment.
std_post_test_treatment: float
Standard deviation of the mean score after the treatment.
Returns
-------
effect_size: float
Value estimating the efficacy of the treatment.
If it's negative, the result is in favor of the treatment.
"""
S_within = np.sqrt(((n_treatment - 1)*std_pre_test_treatment**2 + (n_control - 1)*std_pre_test_control**2)/
(n_treatment + n_control - 2))
d = ((mean_post_test_treatment - mean_pre_test_treatment) - (mean_post_test_control - mean_pre_test_control))/S_within
# Correction factor for small sample size. This correction factor is close to 1 unless the degree of freedom is very
# small (<10), see Borenstein, Introdution to meta-analysis, 2009.
if (n_treatment + n_control - 2) < 10:
warnings.warn('Since the sample size is too small, a correction factor is applied to the effect size')
correction_factor = 1 - (3/(4*(n_treatment + n_control - 2) - 1))
effect_size = d*correction_factor
else:
effect_size = d
return effect_size
def _standard_error_effect_size(n_treatment, n_control, effect_size, pre_post_correlation):
"""<NAME> (2008) "Estimating Effect Sizes From Pretest-Posttest Control Group Designs and under
a random effects model", Organizational Research Methods (Equation 25).
Parameters
----------
n_treatment: int
Number of patients included in the treatment group.
n_control: int
Number of patients included in the control group.
effect_size: float
Value estimating the efficacy of the treatment.
If it's negative, the result is in favor of the treatment.
pre_post_correlation: float
Pearson correlation of the pre-test and post-test values (i.e the pooled within-groups Pearson correlation.
Returns
-------
standard_error_ES: float
Standard error of the effect size.
variance_ES: float
Variance of the effect size.
"""
# Correction factor for small sample size. This correction factor is close to 1 unless the degree of freedom is very
# small (<10), see Borenstein, Introdution to meta-analysis, 2009.
if (n_treatment + n_control - 2) < 10:
correction_factor = 1 - (3/(4*(n_treatment + n_control - 2) - 1))
warnings.warn('Since the sample size is too small, a correction factor is applied to the variance of the effect size')
else:
correction_factor = 1
# Variance
variance_ES = (2*(correction_factor**2)*(1 - pre_post_correlation)*((n_treatment + n_control)/
(n_treatment*n_control))*((n_treatment + n_control - 2)/(n_treatment + n_control - 4))*
(1 + ((effect_size**2)/(2*(1 - pre_post_correlation)*((n_treatment + n_control)/
(n_treatment*n_control))))) - effect_size**2)
# Standard Error
standard_error_ES = np.sqrt(variance_ES)
return standard_error_ES
def run_meta_analysis(df, scale_to_reverse=[], pre_post_correlation=0.5):
"""Performs a meta analysis with the formulae described in <NAME> (2008) "Estimating Effect Sizes From Pretest-
Posttest Control Group Designs and under a random effects model", *Organizational Research Methods* and in Borenstein (2009)
*Introduction to meta-analysis*. These formulae are the same as the ones used in Cortese et al., 2016.
A negative effect size favours the treatment.
Parameters
----------
df: pandas.DataFrame
Parents, teachers or clinicians ratings required to perform the meta-analysis.
This dataframe corresponds to one of those obtained with the ``import_csv_for_meta_analysis`` module.
If you want to run the meta-analysis on parent assessments enter ``df_values_parents``, to run it on teacher assessments
enter ``df_values_teachers``, and to run on clinicians assessments, run ``df_values_clinicians``
Each row corresponds to a study, the disease symptoms are assessed by parents, teachers, or clinicians.
Columns are: mean_post_test_treatment, mean_post_test_control, mean_pre_test_treatment, mean_pre_test_control, n_treatment,
n_control, std_post_test_treatment, std_post_test_control, std_pre_test_treatment, std_pre_test_control, raters for each study.
scale_to_reverse: list of str, optional
List of strings listing the clinical scales having a positive correlation with symptoms of the disease;
i.e increasing when a patient gets better.
pre_post_correlation: float, default = 0.5
Pearson correlation of the pre-test and post-test values (i.e the pooled within-groups Pearson correlation). Set to 0.5 by
default (see Cuijpers et al., 2016 and Balk et al., 2012 "Empirical Assessment of Within-Arm Correlation Imputation in Trials
of Continuous Outcomes").
Returns
-------
df_results_per_study: pandas.DataFrame
Results per study.
Rows of the dataframe correspond to the studies, columns correspond to the effect size of the study, its standard
error, its 95% confidence interval, and the weight of the study.
df_results: pandas.DataFrame
Global results.
It contains the summary effect, its 95% confidence interval, its variance, its standard error, its p-value,
the between studies variance (Tau²), the heterogeneity (I²), its p-value, and the Chi2 value.
Notes
-----
Effect sizes computed for each study correspond to the effect sizes between subjects. Thus, the studies included in the meta-analysis
must be controlled and provide pre and post scores for treatment and control groups.
"""
# Creation of the dataframe for total results
index = ['Results']
df_results = pd.DataFrame(index=index)
# Compute the effect size
df['effect_size'] = df[
['n_treatment', 'n_control', 'mean_post_test_treatment',
'mean_pre_test_treatment', 'mean_pre_test_control',
'mean_post_test_control', 'std_pre_test_treatment', 'std_pre_test_control']
].apply(lambda row:_effect_size_ppc(**row), axis=1)
# Compute the standard error of the effect size
df['standard_error_ES'] = df[
['n_treatment', 'n_control', 'effect_size']
].apply(lambda row:_standard_error_effect_size(row['n_treatment'], row['n_control'],
row['effect_size'], pre_post_correlation), axis=1)
# Check if all the scales measure the desease severity the same way (high score = more symptomps) and homogenize
for scale_name in scale_to_reverse:
df['effect_size'][ df['score_name']==scale_name ] *= -1
# All the following equations come from <NAME> and <NAME> (2009) Introduction to Meta-Analysis
# 95% Confidence interval (Equations 8.3 and 8.4)
df['confidence_interval_of_the_ES'] = df[
['effect_size', 'standard_error_ES']].apply(lambda row: (
row['effect_size'] - 1.96*row['standard_error_ES'],
row['effect_size'] + 1.96*row['standard_error_ES']), axis=1)
# Compute the inverse of the variance = weight under a fixed effect model (Equation 11.2)
df['weight_fixed_model'] = 1/(df['standard_error_ES']**2)
# Computation of Tau²: between studies variance
## Compute degrees of freedom (Equation 12.4)
degrees_of_freedom = len(df.index) - 1
## Compute Q (Equation 12.3)
Q = (df['weight_fixed_model']*df['effect_size']**2).sum() - ((df['weight_fixed_model']*df['effect_size']).sum())**2/df['weight_fixed_model'].sum()
df_results['Chi2'] = Q
## P value of the heterogeneity
# To know if heterogeneity is statistically significant, we can use Q and degrees of freedom
# Null hypothesis: all studies share a common effect size
# Under the null hypothesis, Q will follow a central chi-squared distribution
df_results['p-value Heterogeneity'] = 1 - scp.chi2.cdf(Q, degrees_of_freedom)
## Compute C (Equation 12.5)
C = df['weight_fixed_model'].sum() - ((df['weight_fixed_model']**2).sum()/df['weight_fixed_model'].sum())
## Tau² (Equation 12.2)
# When Tau2 is negative, we put it at zero (this negative value is due to sampling issues,
# when the observed dispersion is less than we would expect by chance, see Borenstein)
Tau2 = (Q - degrees_of_freedom)/C
if Tau2 < 0:
Tau2 = 0
df_results['Tau2'] = Tau2
# Compute the weight of each study under a random effects model
## Compute the weights (Equation 12.6)
df['weight'] = 1/(df['standard_error_ES']**2 + Tau2)
## In percentage
df['percentage_weight'] = (df['weight']*100)/df['weight'].sum()
# Summary effect (Equation 12.7)
df_results['Summary Effect'] = (df['effect_size']*df['percentage_weight']).sum()/df['percentage_weight'].sum()
# Variance and SE of the summary effect (Equations 12.8 and 12.9)
df_results['Variance Summary Effect'] = 1/df['weight'].sum()
df_results['Standard Error Summary Effect'] = np.sqrt(df_results['Variance Summary Effect'])
# 95% Confidence interval (Equations 12.10 and 12.11)
df_results['95% Confidence Interval of the Summary Effect'] = df_results[
['Summary Effect', 'Standard Error Summary Effect']].apply(lambda row: (
row['Summary Effect'] - 1.96*row['Standard Error Summary Effect'],
row['Summary Effect'] + 1.96*row['Standard Error Summary Effect']), axis=1)
# P value for the summary effect (Equations 12.12 and 12.14)
# Null hypothesis: control group and treatment group have no different effect
z = df_results['Summary Effect']/df_results['Standard Error Summary Effect']
df_results['p-value'] = 2*(1 - scp.norm.cdf(abs(z)))
# Heterogeneity (Equation 16.9)
I2 = (((Q - degrees_of_freedom))/Q)*100
if I2 < 0:
I2 = 0
df_results['Heterogeneity'] = I2
# Creation of the dataframe with results by studies
df_results_per_study = pd.DataFrame({'Year': df['year'],
'Effect size': df['effect_size'],
'Standard Error of the ES': df['standard_error_ES'],
'95% Confidence interval of the ES': df['confidence_interval_of_the_ES'],
'Weight': df['percentage_weight']},
index=df.index)
return df_results_per_study, df_results, df['effect_size']
if __name__ == '__main__':
meta_analysis('values_total_meta_analysis.csv', 'Parents')
def forest_plot(df_results_per_study, df_results):
"""Creates a forest plot.
Parameters
----------
df_results_per_study: pandas.DataFrame
Results per study.
Dataframe obtained after performing the meta-analysis with ``run_meta_analysis``.
Rows of the dataframe correspond to the studies, columns correspond to the effect size of the study, its standard
error, its 95% confidence interval, and the weight of the study.
df_results: pandas.DataFrame
Global results.
It contains the summary effect, its 95% confidence interval, its variance, its standard error, its p-value,
the between studies variance (Tau²), the heterogeneity (I²), its p-value, and the Chi2 value.
Returns
-------
forest_plot: matplotlib.figure
Graphical representation of the meta-analysis' results.
Representation of the effect size and its 95% confidence interval for each study.
"""
# Sort data so that studies with bigger effect size are in the top of the forest plot
df_results_per_study = df_results_per_study.sort_values(df_results_per_study.columns[1], ascending=[True])
# Conversion to lists for the plotting
ES = df_results_per_study['Effect size'].tolist()
weight = df_results_per_study['Weight'].tolist()
names = df_results_per_study.index.tolist()
names = [i[0] for i in names]
# Preparing for the plotting
## Confidence Interval
lower_limit = []
upper_limit = []
for confidence_interval in df_results_per_study['95% Confidence interval of the ES']:
lower_limit.append(confidence_interval[0])
upper_limit.append(confidence_interval[1])
lower_limit_summary = df_results['95% Confidence Interval of the Summary Effect'][0][0]
upper_limit_summary = df_results['95% Confidence Interval of the Summary Effect'][0][1]
# Add the confidence interval of the summary effect to others
lower_limit.extend([lower_limit_summary])
lower_limit.reverse() # the summary effect must be at the bottom
upper_limit.extend([upper_limit_summary])
upper_limit.reverse()
# Add the summary effect at the other effects size
names.append('Summary Effect')
names.reverse()
ES.extend(df_results['Summary Effect'])
ES.reverse()
# Make the effect size representation more visible (squares are bigger)
weight = [i * 5 for i in weight]
# Graphic
y = np.array(range(1,len(names)+1))
forest_plot = plt.figure()
plt.yticks(y, names)
# Vertical line in zero
plt.axvline(0, color = 'k')
# Plot Confidence Interval
for i in range(0,len(names)):
plt.plot([lower_limit[i], upper_limit[i]], [y[i],y[i]], color = 'g')
# Plot effect sizes
plt.scatter(ES[1:len(names)], y[1:len(names)], s=weight[1:len(names)],
marker = 's', color = 'b')
plt.scatter(ES[0], y[0], s=100, marker = 'D', color = 'b')
plt.xlabel('Effect size')
plt.title('Standard Mean Difference, 95% Confidence Interval', fontweight = "bold")
return forest_plot
|
<reponame>bogdanova1/stepik_ui<filename>HomeWork3.py
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import random
import string
import sys
import traceback
import locators as _locators
def test_registration(email, password):
try:
# Arrange
browser = webdriver.Chrome()
browser.implicitly_wait(5)
browser.get(_locators.main_page_link)
# Act
browser.find_element_by_id(_locators.login).click()
browser.find_element_by_id("id_registration-email").send_keys(email)
browser.find_element_by_id("id_registration-password1").send_keys(password)
browser.find_element_by_id("id_registration-password2").send_keys(password)
browser.find_element_by_css_selector("button[name = 'registration_submit']").click()
# Assert
message = browser.find_element_by_class_name("alertinner")
assert "Спасибо за регистрацию!" in message.text, "No message about registration"
browser.find_element_by_id(_locators.logout).click()
assert browser.current_url == _locators.main_page_link, "No return to main page"
except AssertionError:
print("\033[93mTest failed")
print("Traceback:")
for tbItem in traceback.format_tb(sys.exc_info()[2]):
print(tbItem)
print("AssertionError:" + str(sys.exc_info()[1]))
print("\033[0m")
except Exception:
print("\033[91mTest failed")
print("Traceback:")
for tbItem in traceback.format_tb(sys.exc_info()[2]):
print(tbItem)
print("'%s: '%s" % (sys.exc_info()[0].__name__,str(sys.exc_info()[1])))
print("\033[0m")
else:
print("\033[92m Test successfully passed! \033[0m")
finally:
browser.quit()
def test_authorization(email, password):
try:
# Arrange
browser = webdriver.Chrome()
browser.implicitly_wait(5)
browser.get(_locators.main_page_link)
# Act
browser.find_element_by_id(_locators.login).click()
browser.find_element_by_id("id_login-username").send_keys(email)
browser.find_element_by_id("id_login-password").send_keys(password)
browser.find_element_by_css_selector("button[name = 'login_submit']").click()
# Assert
message = browser.find_element_by_class_name("alertinner")
assert "Рады видеть вас снова" in message.text, "No message about authorization"
browser.find_element_by_id(_locators.logout).click()
assert browser.current_url == _locators.main_page_link, "No return to main page"
finally:
browser.quit()
def test_view_all_articles():
try:
# Arrange
browser = webdriver.Chrome()
browser.implicitly_wait(5)
browser.get(_locators.main_page_link)
# Act
browser.find_element_by_link_text(_locators.all_items).click()
# Assert
header = browser.find_elements_by_css_selector('.page-header h1')
assert len(header) > 0 and header[0].text == 'Все товары', "Нет заголовка"
assert len(browser.find_elements_by_class_name('side_categories')) > 0, "Нет области фильтров"
assert len(browser.find_elements_by_class_name('form-horizontal')) > 0, \
"Нет количества найденных результатов"
assert len(browser.find_elements_by_css_selector('ol.row')) > 0, "Нет таблицы с товарами"
assert len(browser.find_elements_by_css_selector('article img')) > 0, "Нет изображения товара"
assert len(browser.find_elements_by_css_selector('article h3')) > 0, "Нет название товара"
assert len(browser.find_elements_by_css_selector('article .price_color')) > 0, "Нет цены товара"
assert len(browser.find_elements_by_css_selector('article .availability')) > 0, \
"Нет доступности товара на складе"
assert len(browser.find_elements_by_css_selector('article button.btn')) > 0 or \
len(browser.find_elements_by_css_selector('article span.btn')) > 0, 'Нет кнопки "Добавить в корзину"'
if len(browser.find_elements_by_class_name('pager')) > 0: # есть пагинация
browser.find_elements_by_css_selector('.next a')[0].click()
assert browser.current_url.endswith("catalogue/?page=2"), "Нет перехода на 2 страницу"
finally:
browser.quit()
def test_view_article():
try:
# Arrange
browser = webdriver.Chrome()
browser.implicitly_wait(5)
browser.get(_locators.main_page_link)
# Act
browser.find_element_by_link_text(_locators.all_items).click()
article_link = WebDriverWait(browser, 5).until(
EC.element_to_be_clickable((By.CSS_SELECTOR, "article a"))
)
article_link.click()
# Assert
assert len(browser.find_elements_by_css_selector('.product_page img')) > 0, "Нет изображения товара"
assert len(browser.find_elements_by_css_selector('.product_page h1')) > 0, "Нет название товара"
assert len(browser.find_elements_by_css_selector('.product_page .price_color')) > 0, "Нет цены товара"
assert len(browser.find_elements_by_css_selector('.product_page .availability')) > 0, \
"Нет доступности товара на складе"
assert len(
browser.find_elements_by_css_selector('.product_page #write_review')) > 0, 'Нет кнопки "Написать отзыв"'
assert len(browser.find_elements_by_css_selector(
'.product_page button.btn')) > 0, 'Нет кнопки "Добавить в корзину" или кнопки "Сообщить мне"'
assert len(browser.find_elements_by_css_selector(
'.product_page #product_description')) > 0, 'Нет заголовка "Описание товара"'
assert len(browser.find_elements_by_css_selector('.product_page .sub-header')) > 1 and \
browser.find_elements_by_css_selector('.product_page .sub-header')[
1].text == 'Информация о товаре', 'Нет заголовка "Информация о товаре"'
assert len(
browser.find_elements_by_css_selector('.product_page #reviews')) > 0, 'Нет заголовка "Отзывы Клиентов"'
browser.find_element_by_css_selector('.breadcrumb a[href="/ru/"]').click()
assert browser.current_url == _locators.main_page_link, "No return to main page"
finally:
browser.quit()
def test_search_article_by_part_name(part_name):
try:
# Arrange
browser = webdriver.Chrome()
browser.implicitly_wait(5)
browser.get(_locators.main_page_link)
# Act
browser.find_element_by_css_selector("input[type = 'search']").send_keys(part_name)
browser.find_element_by_css_selector('.navbar-form.navbar-right input[type = "submit"]').click()
# Assert
title_text = browser.find_element_by_class_name('page-header').text
assert part_name in title_text, \
"Page title '%s' should contain search text '%s'" % (title_text, part_name)
articles = browser.find_elements_by_css_selector(".product_pod")
for article in articles:
assert part_name.upper() in article.find_element_by_css_selector("h3 a").text.upper(), \
"Article name '%s' should contain search text '%s'" % (part_name, article.text)
assert part_name.upper() in article.find_element_by_css_selector("img").get_attribute("alt").upper(), \
"Article image '%s' should contain search text '%s'" % (part_name, article.text)
finally:
browser.quit()
def test_add_article_to_cart():
try:
# Arrange
browser = webdriver.Chrome()
browser.implicitly_wait(5)
browser.get(_locators.main_page_link + "catalogue/")
# Act
articles = browser.find_elements_by_tag_name('article')
article_title = ''
for article in articles:
if len(article.find_elements_by_tag_name('button')) > 0:
article_title = article.find_element_by_css_selector('article h3').text
article.find_element_by_tag_name('button').click()
break
# Assert
if article_title != '':
browser.find_element_by_css_selector(".btn-group>a[href='/ru/basket/']").click()
bucket_items = browser.find_elements_by_class_name('basket-items')
add_basket = False
for bucket_item in bucket_items:
if bucket_item.find_element_by_css_selector("h3").text == article_title:
add_basket = True
break
assert add_basket, "Товар '%s' не добавлен в корзину" % (article_title)
else:
print("Нет товаров для добавления в корзину")
finally:
browser.quit()
def random_string(prefix, maxlen):
symbols = string.ascii_letters + string.digits
return prefix + "".join([random.choice(symbols) for i in range(maxlen)])
# 1. Регистрация
password = <PASSWORD>_string("", 9)
email = password + <EMAIL>"
test_registration(email, password)
# 2. Авторизация
test_authorization('<EMAIL>', 'QqWwEe!1@2')
# 3. Просмотр товаров
test_view_all_articles()
# 4. Просмотр карточки товара
test_view_article()
# 5. Поиск товара по части наименовани
test_search_article_by_part_name("coder")
# 6. Добавление товара в корзину
test_add_article_to_cart()
|
<filename>federaciones/models.py
from django.db import models
from django.utils.text import slugify
import os
import random
# from gauss.funciones import pass_generator
from autenticar.models import Gauser
from entidades.models import Entidad
# Generador de contraseñas
def pass_generator(size=15, chars='ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz123456789'):
return ''.join(random.choice(chars) for x in range(size))
# Manejo de los ficheros subidos para que se almacenen con el nombre que deseo y no con el que originalmente tenían
def update_con(instance, filename):
ext = filename.rpartition('.')[2]
file_nombre = pass_generator(10)
try:
federacion = instance.federacion.entidad.code
except:
federacion = instance.entidad.code
return 'federaciones/%s/%s.%s' % (federacion, file_nombre, ext)
class Federacion(models.Model):
entidad = models.ForeignKey(Entidad, on_delete=models.CASCADE, related_name='federaciones')
nombre = models.CharField('Nombre adicional', max_length=300, blank=True, null=True, default='')
condiciones_fedaracion = models.TextField('Condiciones marcadas por la Federación', blank=True, default='')
condiciones_fedaracion_file = models.FileField('Archivo federación', blank=True, null=True, upload_to=update_con)
code_inscribir = models.CharField('Código de inscripción', default=pass_generator, max_length=15)
class Meta:
verbose_name_plural = "Federaciones"
ordering = ['entidad']
def __str__(self):
return u'%s -- %s' % (self.entidad.name, self.nombre)
class Federado(models.Model):
# Cuando una entidad se federa, es decir pertenece a una entidad superior, se crea automáticamente una
# Subentidad llamada con el nombre de la entidad "federacion". En esta subentidad se podrán introducir
# aquellos usuarios que se deseen y serán vistos por la Federación asociada a "federacion"
federacion = models.ForeignKey(Federacion, on_delete=models.CASCADE, related_name='entidades_federadas')
acepta_federacion = models.BooleanField('Relación con la entidad aceptada por la federación', default=False)
entidad = models.ForeignKey(Entidad, on_delete=models.CASCADE, related_name='entidades_federadas')
condiciones_entidad = models.TextField('Condiciones marcadas por la Entidad federada', blank=True, default='')
condiciones_entidad_file = models.FileField('Archivo entidad', blank=True, null=True, upload_to=update_con)
acepta_entidad = models.BooleanField('Relación con la federación aceptada por la entidad', default=False)
pnum = models.BooleanField('Federación puede ver el número de usuarios de entidad', default=False)
piban = models.BooleanField('Federación puede ver el IBAN de la entidad', default=False)
pfed = models.BooleanField('Federación puede ver a los federados en entidad', default=False)
observaciones = models.TextField('Observaciones', blank=True, null=True, default='')
modificado = models.DateField('Fecha de modificación', auto_now=True)
# "modificado" se utiliza para comprobar cuando fue la última vez que se guardo un objeto "Federado", si ese
# momento fuera superior a 7 días y, tanto "acepta_federacion", como "acepta_entidad" no fueran True se borrará
# el susodicho objeto "Federado". Por tanto, el objeto "Federado" solo puede existir por un tiempo prolongado si
# es aceptado por las dos partes.
class Meta:
verbose_name_plural = "Entidades federadas"
ordering = ['federacion']
def __str__(self):
return u'%s -- %s' % (self.federacion, self.entidad.name)
# Manejo de los ficheros subidos para que se almacenen con el nombre que deseo y no con el que originalmente tenían
def update_fichero(instance, filename):
ext = filename.rpartition('.')[2]
file_nombre = pass_generator(10)
try:
federacion = instance.federacion.entidad.code
except:
federacion = instance.entidad.code
return 'federaciones/%s/%s.%s' % (federacion, file_nombre, ext)
class Fichero(models.Model):
federacion = models.ForeignKey(Federacion, on_delete=models.CASCADE)
is_carpeta = models.BooleanField('Es una carpeta/directorio', default=False)
parent = models.ForeignKey('self', blank=True, null=True, related_name='parents', on_delete=models.CASCADE)
fichero = models.FileField('Fichero federación', blank=True, null=True, upload_to=update_fichero)
content_type = models.CharField('Content Type del fichero', max_length=50, blank=True, null=True)
observaciones = models.TextField('Observaciones', default='', blank=True, null=True)
modificado = models.DateField('Fecha de modificación', auto_now=True)
creado = models.DateField('Fecha de creación', auto_now_add=True)
class Meta:
verbose_name_plural = "Ficheros de la federación"
ordering = ['federacion', 'is_carpeta']
def __str__(self):
return u'%s -- %s' % (self.federacion, self.content_type)
class GauserFichero(models.Model):
PER=(('r', 'Lectura'), ('w', 'Lectura y escritura') )
fichero = models.ForeignKey(Fichero, on_delete=models.CASCADE)
gauser = models.ForeignKey(Gauser, related_name='usuarios', on_delete=models.CASCADE)
pread = models.BooleanField('Permiso para leer el fichero', default=True)
pwrite = models.BooleanField('Permiso para editar/borrar el fichero', default=False)
pshare = models.BooleanField('Permiso para compartir el fichero', default=False)
class Meta:
verbose_name_plural = "Usuarios con permisos"
ordering = ['fichero__federacion']
def __str__(self):
return u'r:%s, w:%s, s:%s, %s -- %s' % (self.pread, self.pwrite, self.pshare, self.gauser, self.fichero) |
<reponame>Gunbard/FindFrame
# FindFrame
# Author: Gunbard
from posixpath import join
import cv2, json, math, os, subprocess, asyncio, qasync, sys
from datetime import datetime
from enum import Enum
from mainWindow import Ui_MainWindow
from resultsWindow import Ui_ResultsWindow
from PyQt5 import QtCore, QtWidgets
from PyQt5.QtGui import QImage, QPixmap
from PyQt5.QtWidgets import QLabel, QTableWidgetItem
APP_TITLE = 'FindFrame'
VERSION = '1.1.0'
WINDOW_TITLE = "{} {}".format(APP_TITLE, VERSION)
MATCH_FILTER_THRESHOLD = 0.9 # Discard 10% of possible outlier matches
MULTI_FILE_DELMITER = ';'
# Default number of features to detect in a image or video frame. Default is usually 500, but 1000 provides more
# usable keypoints on larger images. Increasing this will add processing time.
ORB_NFEATURES = 1000
# Increase to allow processing more files at once, but progress bars and thumbnails will bug out until the UI is
# updated in a way that makes sense
MAX_BATCH_SIZE = 1
# Columns in the Results table
class ResultsColumns(Enum):
FILENAME = 0
TIMESTAMP = 1
CONFIDENCE = 2
THUMBNAIL = 3
match_threshold = 40 # Default percent of matching descriptors
file_list = [] # List of video files to process
last_fps_check = datetime.now()
fps_count = 0
def millisToTime(ms):
'''
Helper to convert milliseconds to a timestamp string
Parameters:
ms (int): Milliseconds to convert
Returns:
string: Ex. "05:30:23"
'''
x = ms / 1000
seconds = round(x % 60)
x /= 60
minutes = math.floor(x % 60)
x /= 60
hours = math.floor(x % 24)
return "{}:{}:{}".format(str(hours).zfill(2), str(minutes).zfill(2), str(seconds).zfill(2))
def open_image_path():
'''
Opens a file chooser dialog to select the source image
'''
working_dir = ui.fieldInputImage.text()
if len(working_dir) == 0 or not os.path.exists(working_dir):
working_dir = os.getcwd()
path = QtWidgets.QFileDialog.getOpenFileName(None, "Select image", working_dir, \
"Images (*.png *.jpg);;idgaf (*.*)")
if not path[0]:
print("No image selected!")
return
ui.fieldInputImage.setText(os.path.normpath(path[0]))
analyze_image(path[0])
def open_video_path():
'''
Opens a multi-file chooser dialog to select video(s) to scan
'''
working_dir = ui.fieldVideo.text()
if len(working_dir) == 0:
working_dir = os.getcwd()
else:
# If multiple files, just try getting the default dir of the first one
working_dirs = working_dir.split(MULTI_FILE_DELMITER)
working_dir = working_dirs[0]
if not os.path.exists(working_dir):
working_dir = os.getcwd()
paths = QtWidgets.QFileDialog.getOpenFileNames(None, "Select one or more video files", working_dir, \
"Videos (*.mp4 *.mkv *.webm);;idgaf (*.*)")
if not paths[0]:
print("No video(s) selected!")
return
normalized_paths = map(lambda item: os.path.normpath(item), paths[0])
ui.fieldVideo.setText(MULTI_FILE_DELMITER.join(normalized_paths))
def analyze_image(image):
'''
Generates a thumbnail with detected keypoints drawn on the source image
Parameters:
image (string): Path to an image file
'''
parsed_image = cv2.imread(image, cv2.IMREAD_GRAYSCALE)
orb = cv2.ORB_create(nfeatures=ORB_NFEATURES)
keypoints, descriptors = orb.detectAndCompute(parsed_image, None)
img = cv2.drawKeypoints(parsed_image, keypoints, None, color=(0,255,255), flags=0)
height, width, channel = img.shape
bytesPerLine = 3 * width
qImg = QImage(img.data, width, height, bytesPerLine, QImage.Format_RGB888)
aspectFitPixmap = QPixmap(qImg).scaled(ui.imageInput.width(), \
ui.imageInput.height(), \
QtCore.Qt.KeepAspectRatio, \
QtCore.Qt.FastTransformation)
ui.imageInput.setPixmap(aspectFitPixmap)
async def scan_video(index, semaphore):
'''
Opens a video to process frames. Adds match frames to the results table in the Results window.
Parameters:
index (int): Index of the video to scan in file_list
semaphore (asyncio.Semaphore): Used for async processing
'''
async with semaphore:
global file_list
path = file_list[index]
log('Starting processing {}...'.format(os.path.basename(path)))
set_processing_mode(True)
video = cv2.VideoCapture(path)
if not video.isOpened():
log("Failed to open {}".format(os.path.basename(path)))
return
ui.labelFileProgress.setText('File: {}'.format(os.path.basename(path)))
ui.progressBarFiles.setValue(index + 1)
# Get frame count
total_frames = video.get(cv2.CAP_PROP_FRAME_COUNT)
log("Total frames: {}".format(int(total_frames)))
ui.progressBar.setTextVisible(True)
ui.progressBar.setValue(0)
ui.progressBar.setRange(0, int(total_frames))
frameWidth = ui.imageVideoFrame.width()
frameHeight = ui.imageVideoFrame.height()
# Only need to get source image and generate descriptors once
source_frame = cv2.imread(ui.fieldInputImage.text(), cv2.IMREAD_GRAYSCALE)
log('Generating source image descriptors...')
orb = cv2.ORB_create(nfeatures=ORB_NFEATURES)
keypoints, descriptors = await loop.run_in_executor(None, orb.detectAndCompute, source_frame, None)
# Configure flanfly.png matcher
index_params = dict(algorithm = 6, # FLANN_INDEX_LSH
table_number = 6, # 12
key_size = 12, # 20
multi_probe_level = 1) # 2
search_params = dict()
matcher = cv2.FlannBasedMatcher(index_params, search_params)
#log('Descriptor count for source frame: {}'.format(len(descriptors)))
log('Beginning match search...')
ui.progressMatch.setRange(0, len(descriptors))
ui.progressMatchPeak.setRange(0, len(descriptors))
candidate_frames = set() # Using a Set to remove duplicate frames (granularity of one second)
boost_contrast = ui.checkBoostContrast.isChecked()
while video.isOpened():
result, timestamp, bad_frame, matches = await loop.run_in_executor(None, process_frame, \
frameWidth, frameHeight, \
matcher, descriptors, video, boost_contrast)
if bad_frame:
progress = ui.progressBar.value()
ui.progressBar.setValue(progress + 1)
elif result:
# Update FPS label
global fps_count, last_fps_check
if (datetime.now() - last_fps_check).seconds >= 1:
ui.labelFPS.setText('FPS: {}'.format(fps_count))
fps_count = 0
last_fps_check = datetime.now()
else:
fps_count += 1
ui.progressMatch.setValue(matches)
if ui.progressMatchPeak.value() < matches:
ui.progressMatchPeak.setValue(matches)
ui.imageVideoFrame.setPixmap(result)
progress = ui.progressBar.value()
ui.progressBar.setValue(progress + 1)
if timestamp > -1:
converted_timestamp = millisToTime(timestamp)
candidate_frames_prev_size = len(candidate_frames)
candidate_frames.add(converted_timestamp)
# New timestamp, so include in results table
if len(candidate_frames) > candidate_frames_prev_size:
results_ui.resultsTable.setRowCount(results_ui.resultsTable.rowCount() + 1)
filename_item = QTableWidgetItem(os.path.basename(path))
filename_item.setTextAlignment(QtCore.Qt.AlignCenter)
thumbnail_label = QLabel()
thumbnail_label.setAlignment(QtCore.Qt.AlignCenter)
thumbnail_label.setPixmap(result)
timestamp_item = QTableWidgetItem(converted_timestamp)
timestamp_item.setTextAlignment(QtCore.Qt.AlignCenter)
confidence_item = QTableWidgetItem('{:.1f}% ({}/{})'.format(((matches/len(descriptors)) * 100), \
matches, len(descriptors)))
confidence_item.setTextAlignment(QtCore.Qt.AlignCenter)
results_ui.resultsTable.setItem(results_ui.resultsTable.rowCount() - 1, \
ResultsColumns.FILENAME.value, filename_item)
results_ui.resultsTable.setItem(results_ui.resultsTable.rowCount() - 1, \
ResultsColumns.TIMESTAMP.value, timestamp_item)
results_ui.resultsTable.setItem(results_ui.resultsTable.rowCount() - 1, \
ResultsColumns.CONFIDENCE.value, confidence_item)
results_ui.resultsTable.setCellWidget(results_ui.resultsTable.rowCount() - 1, \
ResultsColumns.THUMBNAIL.value, thumbnail_label)
ResultsWindow.setWindowTitle('{} - Results ({})' \
.format(APP_TITLE, results_ui.resultsTable.rowCount()))
log('Possible match at {}'.format(converted_timestamp))
else:
set_processing_mode(False)
log('Processing complete.')
if len(candidate_frames) > 0:
candidate_frames = sorted(candidate_frames)
log('Found potential matches at: {}'.format(list(candidate_frames)))
if not ResultsWindow.isVisible():
ResultsWindow.show()
else:
log('Did not find any matches!')
ui.progressBar.setValue(ui.progressBar.maximum())
break
def processing_complete(status):
print(status)
def process_frame(scaledWidth, scaledHeight, matcher, source_descriptors, video, boost_contrast):
'''
Grabs a frame, compares it to the source image, and if it's within the match threshold, return it as a good match.
Parameters:
scaledWidth (int): Thumbnail width for aspect ratio aware scaling
scaledHeight (int): Thumbnail height for aspect ratio aware scaling
matcher (cv2 matcher): Magical OpenCV matcher to use for matching
source_descriptors (array?): OpenCV descriptors generated via ORB thingy
video (OpenCV VideoCapture): OpenCV handle to the video file
boost_contrast (bool): Whether or not to increase the video frame's contrast to help with matching
Returns:
tuple: (
result: scaled thumbnail of the processed frame,
timestamp: timestamp of the processed frame in milliseconds or -1,
bad_frame: whether or not the frame was bad or corrupt,
match count in the frame
)
'''
#video.set(cv2.CAP_PROP_POS_FRAMES, 10) # Seek to frame 10
timestamp = -1
bad_frame = (None, timestamp, True, 0)
success, frame = video.read()
if not success:
# Done with video, probably
return None, timestamp, False, 0
if not frame.any():
print('Bad frame')
return bad_frame
frame_number = video.get(cv2.CAP_PROP_POS_FRAMES)
#rgbImage = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# Increase contrast
if boost_contrast:
contrast = 4.0
brightness = 0
brightness += int(round(255 * (1 - contrast) / 2))
frame = cv2.addWeighted(frame, contrast, frame, 0, brightness)
image = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
height, width = image.shape
if height <= 0 or width <= 0:
# Discard bad frame
print('Bad frame')
return bad_frame
# Generate frame desciptors
orb = cv2.ORB_create(nfeatures=ORB_NFEATURES)
keypoints, descriptors = orb.detectAndCompute(image, None)
if descriptors is None: # This is absolutely idiotic syntax
# Discard frame with no descriptors (this can be blank or corrupt frames)
return bad_frame
try:
matches = matcher.knnMatch(source_descriptors, descriptors, k=2)
except:
return bad_frame
good_matches = []
for match in matches:
# A match object has two points, but can have just one for some reason
if len(match) < 2:
continue
m, n = match
if m.distance < MATCH_FILTER_THRESHOLD * n.distance:
good_matches.append(m)
print('Matches for frame {}: {}'.format(int(frame_number), len(good_matches)))
if len(good_matches) > (len(source_descriptors) * (match_threshold / 100)):
# Possible match, so get timestamp
timestamp = video.get(cv2.CAP_PROP_POS_MSEC)
qImg = QImage(image.data, width, height, width, QImage.Format_Grayscale8)
return QPixmap(qImg).scaled(scaledWidth, \
scaledHeight, \
QtCore.Qt.KeepAspectRatio, \
QtCore.Qt.FastTransformation), \
timestamp, \
False, \
len(good_matches)
def start_processing():
'''
Creates the asyncio tasks for processing the files in the file list or cancels the existing tasks
'''
if not ui.fieldInputImage.text() or not ui.fieldVideo.text():
log('<span style="color:red;">Error: Must have both a source image and target video!</span>')
return
if ui.btnStartScan.text() == "Cancel":
pending_tasks = asyncio.all_tasks(loop)
for task in pending_tasks:
task.cancel()
log('Cancelled!')
set_processing_mode(False)
else:
# Clear out results table
results_ui.resultsTable.setRowCount(0)
ResultsWindow.setWindowTitle('{} - Results'.format(APP_TITLE))
global file_list
file_list = ui.fieldVideo.text().split(';')
ui.progressBarFiles.setValue(0)
ui.progressBarFiles.setRange(0, len(file_list))
for index, item in enumerate(file_list):
task = asyncio.ensure_future(scan_video(index, asyncio_semaphore))
task.add_done_callback(processing_complete)
def set_processing_mode(processing):
'''
Updates the UI based on whether or not the app is processing
Parameters:
processing (bool): Whether or not the app is processing
'''
if processing:
ui.btnStartScan.setText("Cancel")
else:
ui.btnStartScan.setText("Scan")
ui.progressBar.setTextVisible(processing)
ui.progressBarFiles.setTextVisible(processing)
ui.btnOpenInputImage.setEnabled(not processing)
ui.btnOpenVideo.setEnabled(not processing)
ui.sliderMatchThresh.setEnabled(not processing)
ui.checkBoostContrast.setEnabled(not processing)
if not processing:
ui.labelFileProgress.setText('')
ui.labelFPS.setText('')
def match_thresh_changed():
'''
Handler for when the match threshold slider changes. Updates its label.
'''
global match_threshold
match_threshold = ui.sliderMatchThresh.value()
ui.labelMatchThresh.setText("{}%".format(match_threshold))
def log(message):
'''
Helper for adding a timestamp to the front-facing logger
'''
ui.textLog.append("[{}] {}".format(datetime.now().strftime("%H:%M:%S.%f"), message))
QtWidgets.QApplication.setAttribute(QtCore.Qt.AA_EnableHighDpiScaling)
QtWidgets.QApplication.setAttribute(QtCore.Qt.AA_DisableWindowContextHelpButton)
app = QtWidgets.QApplication(sys.argv)
loop = qasync.QEventLoop(app)
asyncio.set_event_loop(loop)
asyncio.events._set_running_loop(loop)
asyncio_semaphore = asyncio.Semaphore(MAX_BATCH_SIZE)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
ResultsWindow = QtWidgets.QDialog(MainWindow)
results_ui = Ui_ResultsWindow()
results_ui.setupUi(ResultsWindow)
results_ui.resultsTable.setHorizontalHeaderLabels(['File', 'Timestamp', 'Confidence', 'Thumbnail'])
# Defaults
ui.sliderMatchThresh.setSliderPosition(match_threshold)
ui.labelMatchThresh.setText("{}%".format(match_threshold))
# EVENTS
ui.btnOpenInputImage.clicked.connect(open_image_path)
ui.btnOpenVideo.clicked.connect(open_video_path)
ui.btnStartScan.clicked.connect(start_processing)
ui.btnResults.clicked.connect(lambda: ResultsWindow.show())
ui.sliderMatchThresh.valueChanged.connect(match_thresh_changed)
MainWindow.setWindowTitle(WINDOW_TITLE)
MainWindow.show()
with loop:
loop.run_forever() |
#!/usr/bin/env python3
import argparse
from collections import OrderedDict
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot
import pandas
import numpy
import logging
from woldrnaseq import models
from woldrnaseq.common import save_fixed_height
logger = logging.getLogger(__name__)
def main(cmdline=None):
parser = make_parser()
args = parser.parse_args(cmdline)
if args.verbose:
logging.basicConfig(level=logging.INFO)
else:
logging.basicConfig(level=logging.ERROR)
experiments = models.load_experiments(args.experiments)
libraries = models.load_library_tables(args.libraries)
coverage = models.load_all_coverage(libraries)
if args.all_experiments:
make_combined_median_normalized_summary(experiments, coverage, args.output_format, args.bare)
elif args.experiment_median_summary:
make_per_experiment_median_normalized_summary(experiments, coverage, args.output_format, args.bare)
elif args.by_experiment:
make_by_experiment_median_summary(experiments, coverage, args.output_format, args.bare)
elif args.combined_median_summary:
make_combined_experiment_median_summary(experiments, coverage, args.output_format, args.bare)
else:
make_experiment_by_library_coverage_plots(experiments, coverage, args.output_format, args.bare)
def make_parser():
parser = argparse.ArgumentParser()
parser.add_argument('-l', '--libraries',
action='append',
required=True,
help='library information table')
parser.add_argument('-e', '--experiments',
action='append',
required=True,
help='experiment information table')
parser.add_argument('--by-experiment', action='store_true',
help='do per experiment summary plot')
parser.add_argument('--all-experiments', action='store_true',
help='summarize all experiments')
parser.add_argument('--experiment-median-summary', action='store_true',
help='plot each experiment median summary as different plots')
parser.add_argument('--combined-median-summary', action='store_true',
help='plot all experiment medians one plot')
parser.add_argument('--output-format', default='png', choices=['png', 'svg'])
parser.add_argument('--bare', default=False, action='store_true',
help='leave off text annotations')
parser.add_argument('-v', '--verbose', action='store_true', help='Turn on INFO level logging')
#parser.add_argument('filename', nargs=1)
#parser.add_argument('-o', '--output')
return parser
def make_experiment_by_library_coverage_plots(experiments, coverage, output_format, bare):
"""Coverage plot showing all the libraries for an experiment
"""
tosave = OrderedDict()
for experiment_name, experiment_row in experiments.iterrows():
library_ids = experiment_row['replicates']
image_name = experiment_name + '.coverage.' + output_format
f = make_coverage_plot(experiment_name, coverage[library_ids])
tosave[image_name] = f
save_fixed_height(tosave)
def make_coverage_plot(experiment, coverage):
with pyplot.style.context('seaborn-dark-palette'):
f = pyplot.figure(dpi=100)
ax = f.add_subplot(1, 1, 1)
coverage.plot(ax=ax)
ax.set_title('Coverage for {}'.format(experiment))
ax.set_xlabel("position quantile (5' to 3')")
ax.set_ylabel('Normalized read depth')
ylim_bottom, ylim_top = ax.get_ylim()
ax.set_ylim(0, ylim_top)
ax.legend(bbox_to_anchor=(1.05, 1),
loc=2,
borderaxespad=0.0)
return f
def make_by_experiment_median_summary(experiments, coverage, output_format, bare):
"""Coverage plot showing the median +/-sd of all libraries for an experiment
"""
tosave = OrderedDict()
with pyplot.style.context('seaborn-dark-palette'):
for experiment in experiments.index:
f = pyplot.figure(dpi=100)
ax = f.add_subplot(1, 1, 1)
add_median_plot(ax, experiments, experiment, coverage, bare)
ax.set_title('Median coverage for {}'.format(experiment))
ax.set_xlabel("position quantile (5' to 3')")
ax.set_ylabel('Read depth')
ax.legend(bbox_to_anchor=(1.05, 1),
loc=2,
borderaxespad=0.0)
image_name = experiment + '.median.coverage.' + output_format
tosave[image_name] = f
save_fixed_height(tosave)
def make_combined_experiment_median_summary(experiments, coverage, output_format, bare):
"""Coverage plot showing the median +/-sd of all libraries for an experiment
"""
tosave = OrderedDict()
with pyplot.style.context('seaborn-dark-palette'):
f = pyplot.figure(dpi=100)
ax = f.add_subplot(1, 1, 1)
for experiment in experiments.index:
add_median_plot(ax, experiments, experiment, coverage)
# ax.set_title('Median coverage for {}'.format(experiment))
# ax.set_xlabel("position quantile (5' to 3')")
# ax.set_ylabel('Read depth')
# ax.set_xticklabels( ax.get_xticks() / 100 )
ax.set_xticks([])
ax.set_yticks([])
ax.legend(bbox_to_anchor=(1.05, 1),
loc=2,
borderaxespad=0.0)
image_name = 'all-experiments.median.coverage.' + output_format
tosave[image_name] = f
save_fixed_height(tosave)
def add_median_plot(ax, experiments, experiment, coverage, bare):
library_ids = experiments['replicates'][experiment]
median = coverage[library_ids].median(axis=1)
# TOTAL HACK FOR A GRANT
if experiment == 'embryo fibroblasts':
median[1:99] += 1500
# END TOTAL HACK
stddev = coverage[library_ids].std(axis=1)
s = ax.plot(median, label='median of {}'.format(experiment))
errstyle = {
'alpha': 0.25,
'color': s[0].get_color(),
}
ax.fill_between(
x=median.index,
y1=median-stddev,
y2=median+stddev,
label='+/- std. dev',
**errstyle)
errstyle = {
'alpha': 0.85,
'linestyle': ':',
'color': s[0].get_color(),
}
ax.plot(median+stddev, **errstyle,
label='+/- one std. deviation')
ax.plot(median-stddev, **errstyle)
add_slope(ax, median, coverage.shape[1], bare)
def make_combined_median_normalized_summary(experiments, coverage, output_format, bare):
"""Coverage plot showing the median +/-sd of all libraries for an experiment
"""
assert isinstance(experiments, pandas.DataFrame)
tosave = OrderedDict()
library_ids = []
for experiment in experiments.index:
library_ids.extend(experiments['replicates'][experiment])
f = make_median_normalized_summary(experiment, library_ids, coverage, bare)
if bare:
plot_suffix = '.median-normalized.coverage.bare.'
else:
plot_suffix = '.median-normalized.coverage.'
image_name = experiment + plot_suffix + output_format
f.savefig(image_name)
tosave[image_name] = f
save_fixed_height(tosave)
def make_per_experiment_median_normalized_summary(experiments, coverage, output_format, bare):
"""Coverage plot showing the median +/-sd of all libraries for each experiment
"""
assert isinstance(experiments, pandas.DataFrame)
tosave = OrderedDict()
library_ids = []
if bare:
plot_suffix = '.median-normalized.coverage.bare.'
else:
plot_suffix = '.median-normalized.coverage.'
for experiment in experiments.index:
library_ids = experiments['replicates'][experiment]
f = make_median_normalized_summary(experiment, library_ids, coverage, bare)
ax = f.get_axes()[0]
ax.set_title('Median normalized coverage for {}'.format(experiment.replace('_', ' ')))
image_name = experiment + plot_suffix + output_format
f.savefig(image_name)
tosave[image_name] = f
save_fixed_height(tosave)
def make_median_normalized_summary(experiment, library_ids, coverage, bare):
with pyplot.style.context('seaborn-dark-palette'):
f = pyplot.figure(dpi=100, figsize=(4, 2.5))
ax = f.add_subplot(1, 1, 1)
centered = coverage[library_ids] / coverage[library_ids].median(axis=0)
median = centered.median(axis=1)
stddev = centered.std(axis=1)
s = ax.plot(median, linewidth=2)
errstyle = {
'alpha': 0.25,
'color': s[0].get_color(),
}
ax.fill_between(
x=median.index,
y1=median-stddev,
y2=median+stddev,
label='+/- std. dev',
**errstyle)
#ax.set_ylabel(r'$median_{quantile}\left(\frac{coverage}{median_{library} \left(coverage\right)}\right)$')
#ax.legend(bbox_to_anchor=(1.05, 1),
# loc=2,
# borderaxespad=0.0)
#ax.legend(loc=8, fontsize='small')
return f
def add_slope(ax, coverage, N, bare):
plateau = coverage[20:81]
m, b = numpy.polyfit(numpy.arange(20, 81), plateau, 1)
ax.set_xticklabels( ax.get_xticks() / 100 )
ax.set_yticks([])
if not bare:
ax.text(50, 0.25, s="ENCODE QC slope = {:0.2}".format(m),
horizontalalignment='center',
)
ax.text(50, -0.5,
s='N = {}'.format(centered.shape[1]),
size='x-large',
horizontalalignment='center',
)
ax.set_xlabel(r"5' $\rightarrow$ 3' normalized position")
ax.set_ylabel('normalized read coverage')
# if debug:
x_range = range(20, 81)
ax.plot(x_range, [ m*x + b for x in x_range], color='green')
#
# 20/80 find slope needs to be < .3
# maybe put +/- on the text in the bottom middle of plot
# shade region between +/- std.
if __name__ == "__main__":
main()
|
<filename>examples/deebert/src/modeling_highway_bert.py
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_callable
from transformers.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def entropy(x):
"""Calculate entropy of a pre-softmax logit Tensor"""
exp_x = torch.exp(x)
A = torch.sum(exp_x, dim=1) # sum of exp(x_i)
B = torch.sum(x * exp_x, dim=1) # sum of x_i * exp(x_i)
return torch.log(A) - B / A
class DeeBertEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)])
self.highway = nn.ModuleList([BertHighway(config) for _ in range(config.num_hidden_layers)])
self.early_exit_entropy = [-1 for _ in range(config.num_hidden_layers)]
def set_early_exit_entropy(self, x):
if (type(x) is float) or (type(x) is int):
for i in range(len(self.early_exit_entropy)):
self.early_exit_entropy[i] = x
else:
self.early_exit_entropy = x
def init_highway_pooler(self, pooler):
loaded_model = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name])
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
):
all_hidden_states = ()
all_attentions = ()
all_highway_exits = ()
for i, layer_module in enumerate(self.layer):
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(
hidden_states, attention_mask, head_mask[i], encoder_hidden_states, encoder_attention_mask
)
hidden_states = layer_outputs[0]
if self.output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
current_outputs = (hidden_states,)
if self.output_hidden_states:
current_outputs = current_outputs + (all_hidden_states,)
if self.output_attentions:
current_outputs = current_outputs + (all_attentions,)
highway_exit = self.highway[i](current_outputs)
# logits, pooled_output
if not self.training:
highway_logits = highway_exit[0]
highway_entropy = entropy(highway_logits)
highway_exit = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
all_highway_exits = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
new_output = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(new_output, i + 1)
else:
all_highway_exits = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
outputs = outputs + (all_attentions,)
outputs = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"The Bert Model transformer with early exiting (DeeBERT). ",
BERT_START_DOCSTRING,
)
class DeeBertModel(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.config = config
self.embeddings = BertEmbeddings(config)
self.encoder = DeeBertEncoder(config)
self.pooler = BertPooler(config)
self.init_weights()
def init_highway_pooler(self):
self.encoder.init_highway_pooler(self.pooler)
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
See base class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
):
r"""
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
pooler_output (:obj:`torch.FloatTensor`: of shape :obj:`(batch_size, hidden_size)`):
Last layer hidden-state of the first token of the sequence (classification token)
further processed by a Linear layer and a Tanh activation function. The Linear
layer weights are trained from the next sentence prediction (classification)
objective during pre-training.
This output is usually *not* a good summary
of the semantic content of the input, you're often better with averaging or pooling
the sequence of hidden-states for the whole input sequence.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
highway_exits (:obj:`tuple(tuple(torch.Tensor))`:
Tuple of each early exit's results (total length: number of layers)
Each tuple is again, a tuple of length 2 - the first entry is logits and the second entry is hidden states.
"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastabe to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :]
encoder_extended_attention_mask = encoder_extended_attention_mask.to(
dtype=next(self.parameters()).dtype
) # fp16 compatibility
encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output)
outputs = (sequence_output, pooled_output,) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class HighwayException(Exception):
def __init__(self, message, exit_layer):
self.message = message
self.exit_layer = exit_layer # start from 1!
class BertHighway(nn.Module):
"""A module to provide a shortcut
from (the output of one non-final BertLayer in BertEncoder) to (cross-entropy computation in BertForSequenceClassification)
"""
def __init__(self, config):
super().__init__()
self.pooler = BertPooler(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
def forward(self, encoder_outputs):
# Pooler
pooler_input = encoder_outputs[0]
pooler_output = self.pooler(pooler_input)
# "return" pooler_output
# BertModel
bmodel_output = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bodel_output
# Dropout and classification
pooled_output = bmodel_output[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
return logits, pooled_output
@add_start_docstrings(
"""Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. """,
BERT_START_DOCSTRING,
)
class DeeBertForSequenceClassification(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.num_layers = config.num_hidden_layers
self.bert = DeeBertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)
self.init_weights()
@add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_layer=-1,
train_highway=False,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for computing the sequence classification/regression loss.
Indices should be in :obj:`[0, ..., config.num_labels - 1]`.
If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`label` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
highway_exits (:obj:`tuple(tuple(torch.Tensor))`:
Tuple of each early exit's results (total length: number of layers)
Each tuple is again, a tuple of length 2 - the first entry is logits and the second entry is hidden states.
"""
exit_layer = self.num_layers
try:
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
outputs = e.message
exit_layer = e.exit_layer
logits = outputs[0]
if not self.training:
original_entropy = entropy(logits)
highway_entropy = []
highway_logits_all = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
# work with highway exits
highway_losses = []
for highway_exit in outputs[-1]:
highway_logits = highway_exit[0]
if not self.training:
highway_logits_all.append(highway_logits)
highway_entropy.append(highway_exit[2])
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
highway_loss = loss_fct(highway_logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
highway_loss = loss_fct(highway_logits.view(-1, self.num_labels), labels.view(-1))
highway_losses.append(highway_loss)
if train_highway:
outputs = (sum(highway_losses[:-1]),) + outputs
# exclude the final highway, of course
else:
outputs = (loss,) + outputs
if not self.training:
outputs = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
outputs = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
|
<filename>tests/functional/test_path_encodings.py
# -*- coding: utf-8 -*-
#-----------------------------------------------------------------------------
# Copyright (c) 2005-2021, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License (version 2
# or later) with exception for distributing the bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#
# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
#-----------------------------------------------------------------------------
import os
import subprocess
import pytest
def test_ascii_path(pyi_builder):
distdir = pyi_builder._distdir
dd_ascii = distdir.encode('ascii', 'replace').decode('ascii')
if distdir != dd_ascii:
pytest.skip(reason="Default build path not ASCII, skipping...")
pyi_builder.test_script('pyi_path_encoding.py')
@pytest.mark.linux
def test_linux_non_unicode_path(pyi_builder, monkeypatch):
# If we set the locale to 'C', mbstowcs should be completely useless. This test verifies that _Py_char2wchar will
# decode the "undecodable" bytes and will decode even filenames that weren't encoded with the locale encoding.
distdir = pyi_builder._distdir
unicode_filename = 'ěščřžýáíé日本語'
pyi_builder._distdir = os.path.join(distdir, unicode_filename)
os.makedirs(pyi_builder._distdir)
tmpdir = os.path.join(str(pyi_builder._tmpdir), unicode_filename + "_TMP")
monkeypatch.setenv('LC_ALL', 'C')
monkeypatch.setenv('TMPDIR', tmpdir)
monkeypatch.setenv('TMP', tmpdir)
pyi_builder.test_script('pyi_path_encoding.py')
@pytest.mark.darwin
@pytest.mark.linux
def test_osx_linux_unicode_path(pyi_builder, monkeypatch):
# Mac and Linux should handle 'unicode' type filenames without problem.
distdir = pyi_builder._distdir
unicode_filename = 'ěščřžýáíé日本語'
pyi_builder._distdir = os.path.join(distdir, unicode_filename)
os.makedirs(pyi_builder._distdir)
tmpdir = os.path.join(str(pyi_builder._tmpdir), unicode_filename + "_TMP")
monkeypatch.setenv('TMPDIR', tmpdir)
monkeypatch.setenv('TMP', tmpdir)
pyi_builder.test_script('pyi_path_encoding.py')
@pytest.mark.win32
def test_win_codepage_path(pyi_builder, monkeypatch):
distdir = pyi_builder._distdir
# Create some bytes and decode with the current codepage to get a filename that is guaranteed to encode with the
# current codepage. Assumes a one-byte codepage, i.e., not cp937 (shift-JIS) which is multibyte.
cp_filename = bytes(bytearray(range(0x80, 0x86))).decode('mbcs')
pyi_builder._distdir = os.path.join(distdir, cp_filename)
os.makedirs(pyi_builder._distdir)
tmpdir = os.path.join(str(pyi_builder._tmpdir), cp_filename + "_TMP")
monkeypatch.setenv('TMPDIR', tmpdir)
monkeypatch.setenv('TMP', tmpdir)
pyi_builder.test_script('pyi_path_encoding.py')
@pytest.mark.win32
def test_win_codepage_path_disabled_shortfilename(pyi_builder, monkeypatch):
distdir = pyi_builder._distdir
# Create some bytes and decode with the current codepage to get a filename that is guaranteed to encode with the
# current codepage. Assumes a one-byte codepage, i.e., not cp937 (shift-JIS) which is multibyte.
cp_filename = bytes(bytearray(range(0x80, 0x86))).decode('mbcs')
distdir = os.path.join(distdir, cp_filename)
os.makedirs(distdir)
# Try to remove ShortFileName from this folder using `fsutil`. Requires admin privileges, so `xfail` if we do not
# have them. `8dot3name strip` only affects subfolders, so pass the folder containing our codepage filename.
fsutil_distdir = pyi_builder._distdir
if subprocess.call(['fsutil', '8dot3name', 'strip', fsutil_distdir]):
pytest.xfail("Administrator privileges required to strip ShortFileName.")
tmpdir = os.path.join(str(pyi_builder._tmpdir), cp_filename + "_TMP")
monkeypatch.setenv('TMPDIR', tmpdir)
monkeypatch.setenv('TMP', tmpdir)
pyi_builder._distdir = distdir
pyi_builder.test_script('pyi_path_encoding.py')
@pytest.mark.win32
def test_win_non_codepage_path(pyi_builder, monkeypatch):
distdir = pyi_builder._distdir
# Both eastern European and Japanese characters - no codepage should encode this.
non_cp_filename = 'ěščřžýáíé日本語'
# Codepage encoding would replace some of these chars with "???".
pyi_builder._distdir = os.path.join(distdir, non_cp_filename)
os.makedirs(pyi_builder._distdir)
tmpdir = os.path.join(str(pyi_builder._tmpdir), non_cp_filename + "_TMP")
# To test what happens with a non-ANSI tempdir, we will also need to pass the TMP environ as wide chars.
monkeypatch.setenv('TMPDIR', tmpdir)
monkeypatch.setenv('TMP', tmpdir)
pyi_builder.test_script('pyi_path_encoding.py')
@pytest.mark.win32
def test_win_py3_no_shortpathname(pyi_builder):
pyi_builder.test_script('pyi_win_py3_no_shortpathname.py')
@pytest.mark.win32
def test_win_TEMP_has_shortpathname(pyi_builder, monkeypatch, tmp_path):
"""
Test if script if pass if $TMP holds a short path name.
"""
tmp = tmp_path / "longlongfilename" / "xxx"
tmp.mkdir(parents=True, exist_ok=True)
import win32api
tmp = win32api.GetShortPathName(str(tmp))
monkeypatch.setenv("TMP", tmp)
monkeypatch.setenv("TEMP", tmp)
pyi_builder.test_script('pyi_win_py3_no_shortpathname.py')
|
<gh_stars>1-10
# this file to create a sql-dump file for anduin
_indent = ' '
_change_line = '\n'
_func_define = 'def '
_func_return = 'return'
class frame_constructor(object):
def __init__(self, db_name, table_struct, file_path, file_name):
self.table_struct = table_struct
self.db_name = db_name
self.file_path = file_path
self.file_name = file_name
self.fh = open(self.file_path + self.file_name, 'w')
self.func_name_list = []
def dump(self):
self.add_file_title()
for table_name, table_frame in self.table_struct.items():
self.construct_table(table_name, table_frame)
self.write_main_run()
self.fh.close()
def add_file_title(self):
self.write_comment(' db-name = ' + self.db_name, indent=0)
self.changeline()
self.fh.write('from anduin.server import Data')
self.changeline()
self.changeline()
def construct_table(self, table_name, table_frame):
self.construct_func_def(table_name)
self.func_name_list.append(table_name)
self.changeline()
self.indent()
self.write_drop_table(table_name)
self.changeline()
self.indent()
self.write_table_frame(table_frame)
self.write_create_table(table_frame)
self.changeline()
self.indent()
self.add_return()
self.changeline(2)
def construct_func_def(self, func_name):
self.fh.write(_func_define + func_name + '():')
def add_return(self):
self.fh.write(_func_return)
def indent(self, indent_multi=1):
self.fh.write(_indent * indent_multi)
def changeline(self, change_mult=1):
self.fh.write(_change_line * change_mult)
def write_comment(self, content, indent=2):
if content is not None:
for i in content.split('\n'):
self.indent(indent)
self.fh.write('#' + i)
def write_drop_table(self, table_name):
self.fh.write('table_name = "%s"' % table_name)
self.changeline()
self.indent()
self.fh.write('Data.query("drop table %s"%table_name)')
def write_table_frame(self, table_frame):
self.fh.write('column = [')
self.changeline()
# dbg(table_frame)
for line in table_frame:
if isinstance(line, dict):
continue
self.indent(2)
self.fh.write('(')
# for word in line:
if 'PRI' in line:
self.fh.write('"%s", "int", ' % (line[0]))
else:
self.fh.write('"%s", "%s", ' % (line[0], line[1]))
if line[5] is not None:
self.fh.write('"default \'%s\'", ' % line[5])
if 'PRI' in line:
self.fh.write('"%s", "%s", ' % ('AUTO_INCREMENT', 'primary key'))
if line[8] is not None:
self.fh.write('"comment \'%s\'" ,' % line[8])
self.fh.write('),')
self.changeline()
self.write_comment(line[8])
self.changeline()
self.indent()
self.fh.write(']')
self.changeline()
def write_create_table(self, table_frame):
self.indent()
self.fh.write('Data.create(table_name, column, comment="%s")' % table_frame[-1]['table_comment'])
def write_main_run(self):
self.construct_func_def('create_all_table')
self.changeline()
for func_name in self.func_name_list:
self.indent()
self.fh.write(func_name + '()')
self.changeline()
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.