code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
from collections import deque
with open("day6.txt", "r") as f:
fish = list(map(int, f.readline().split(",")))
fish_ages = deque([0]*9)
for age in fish:
fish_ages[age] += 1
for _ in range(80):
new_fish = fish_ages[0]
fish_ages.rotate(-1)
fish_ages[6] += fish_ages[-1]
fish_ages[-1] = new_fish
print(f"Part 1: {sum(fish_ages)}")
for _ in range(256-80):
new_fish = fish_ages[0]
fish_ages.rotate(-1)
fish_ages[6] += fish_ages[-1]
fish_ages[-1] = new_fish
print(f"Part 2: {sum(fish_ages)}")
| [
"collections.deque"
] | [((129, 143), 'collections.deque', 'deque', (['([0] * 9)'], {}), '([0] * 9)\n', (134, 143), False, 'from collections import deque\n')] |
'''tzinfo timezone information for Africa/Mogadishu.'''
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class Mogadishu(DstTzInfo):
'''Africa/Mogadishu timezone definition. See datetime.tzinfo for details'''
zone = 'Africa/Mogadishu'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1930,12,31,21,0,0),
d(1956,12,31,21,30,0),
]
_transition_info = [
i(10800,0,'EAT'),
i(9000,0,'BEAT'),
i(10800,0,'EAT'),
]
Mogadishu = Mogadishu()
| [
"pytz.tzinfo.memorized_ttinfo",
"pytz.tzinfo.memorized_datetime"
] | [((355, 374), 'pytz.tzinfo.memorized_datetime', 'd', (['(1)', '(1)', '(1)', '(0)', '(0)', '(0)'], {}), '(1, 1, 1, 0, 0, 0)\n', (356, 374), True, 'from pytz.tzinfo import memorized_datetime as d\n'), ((371, 396), 'pytz.tzinfo.memorized_datetime', 'd', (['(1930)', '(12)', '(31)', '(21)', '(0)', '(0)'], {}), '(1930, 12, 31, 21, 0, 0)\n', (372, 396), True, 'from pytz.tzinfo import memorized_datetime as d\n'), ((393, 419), 'pytz.tzinfo.memorized_datetime', 'd', (['(1956)', '(12)', '(31)', '(21)', '(30)', '(0)'], {}), '(1956, 12, 31, 21, 30, 0)\n', (394, 419), True, 'from pytz.tzinfo import memorized_datetime as d\n'), ((452, 470), 'pytz.tzinfo.memorized_ttinfo', 'i', (['(10800)', '(0)', '"""EAT"""'], {}), "(10800, 0, 'EAT')\n", (453, 470), True, 'from pytz.tzinfo import memorized_ttinfo as i\n'), ((470, 488), 'pytz.tzinfo.memorized_ttinfo', 'i', (['(9000)', '(0)', '"""BEAT"""'], {}), "(9000, 0, 'BEAT')\n", (471, 488), True, 'from pytz.tzinfo import memorized_ttinfo as i\n'), ((488, 506), 'pytz.tzinfo.memorized_ttinfo', 'i', (['(10800)', '(0)', '"""EAT"""'], {}), "(10800, 0, 'EAT')\n", (489, 506), True, 'from pytz.tzinfo import memorized_ttinfo as i\n')] |
from nltk.corpus import wordnet as wn
import nltk
import numpy as np
import pm4py
from pm4py.objects.log.log import EventLog
from nlp_label_quality.analysis import matrix_eval
from typing import Dict, List, Tuple, Union, Any
from nlp_label_quality.analysis.attribute_value import Attribute, AttributeValue
import time
import logging
logger = logging.getLogger(__name__)
def filter_log_on_given_level(log: EventLog,
attribute: str,
values: Any,
level: str = 'event',
retain: bool = False) -> EventLog:
"""
Filter event log on certain attribute given certain filter on a given level
Parameters
----------
log
pm4py eventlog
attribute
attribute to filter
values
values to be filtered for
level
which level should the filter work on (event or case level)
retain
if level instance should be kept or not
Returns
-------
filtered_log
new event log, filtered on conditions above
"""
filtered_log = pm4py.filter_event_attribute_values(log, attribute, values, level, retain)
return filtered_log
def generate_sim_matrices(bool_mulitple_options: bool,
attributes: List[Attribute],
name: str,
options: Union[List[str], List[List[str]]]) -> None:
"""
Calculate the similarity matrices for each attribute and save them within their instance in a dict
Parameters
----------
bool_mulitple_options
boolean to decide how to matrices have to be built
attributes
set of attributes to work with
name
key for dict to get correct similarity_matrices back for different analysis purposes
options
list of options [model, name, attribute to look for, function]
"""
if bool_mulitple_options:
for attribute in attributes:
attribute.build_sim_matrices(name, options)
else:
for attribute in attributes:
attribute.build_sim_matrix(name, options)
def get_result_selection(bool_mulitple_options: bool,
all_sim_matrices: Dict[Attribute, Dict[str, List[np.ndarray]]],
options: Union[List[str], List[List[str]]],
thresholds: Union[float, List[float]],
treeview_headers: List[str],
antonym_library) -> Dict[int, Dict[str, Union[str, int, float]]]:
"""
Return the results for all similarity matrices and taking the thresholds into account
Parameters
----------
bool_mulitple_options
boolean to decide how the results have to be analysed
all_sim_matrices
-- missing --
options
-- missing --
thresholds
-- missing --
treeview_headers
headers that are used in tkinter treeview in order to make sure all needed values are present
antonym_library
set of antonyms by verbocean
Returns
-------
repair_selection_dict
dict where all possible values are saved to present to interactive front-end
"""
if bool_mulitple_options:
repair_selection_dict = _get_result_selection_multiple_options(all_sim_matrices, options, thresholds,
treeview_headers, antonym_library)
else:
repair_selection_dict = _get_result_selection_single_option(all_sim_matrices, options, thresholds,
treeview_headers, antonym_library)
return repair_selection_dict
def _get_result_selection_single_option(all_sim_matrices,
options: List[str],
threshold: float,
treeview_headers: List[str],
antonym_library) -> Dict[int, Dict[str, Union[str, int, float]]]:
repair_selection_dict, repair_id = {}, 0
for attribute, matrix_content in all_sim_matrices.items():
for name, matrices in matrix_content.items():
for i, matrix in enumerate(matrices):
relevant_indices, relevant_values = matrix_eval.get_results_from_matrix(matrix, threshold)
for index, sim_score in zip(relevant_indices, relevant_values):
# attribute value instances for given indices
value1, value2 = attribute.attr_values[index[0]], attribute.attr_values[index[1]]
# antonym distinction, if there are any antonyms and skip selection
antonym_set = _check_antonymy(antonym_library, value1, value2)
result_values = _get_repair_values_after_sorting(attribute, sim_score, value1, value2, options, threshold, antonym_set)
# # relevant values for selection filtering
# str1, str2 = value1.orig_value, value2.orig_value
# freq1, freq2 = value1.count, value2.count
# # based on frequency, decide which value should be changed in the original log
# if freq1 == freq2:
# continue
# elif freq1 < freq2:
# orig_value, sugg_value = str1, str2
# orig_freq, sugg_freq = freq1, freq2
# else:
# orig_value, sugg_value = str2, str1
# orig_freq, sugg_freq = freq2, freq1
#
# # treeview_headers = ['attribute', 'value', 'antonyms', 'threshold', 'original_value', 'suggested_value', 'original occurence', 'suggested occurence',
# # 'sim_model', 'model_name', 'attr_property', 'function']
# result_values = [attribute.attr, float_value, antonym_set, threshold, orig_value, sugg_value,
# orig_freq, sugg_freq, sim_model, model_name, attr_property]
if result_values:
repair_selection_dict[repair_id] = _result_values_to_dict(treeview_headers, result_values) # keys are used to make retrieval of data easier
repair_id += 1
return repair_selection_dict
def _get_result_selection_multiple_options(all_sim_matrices,
options: List[List[str]],
threshold: float,
treeview_headers: List[str],
antonym_library) -> Dict[int, Dict[str, Union[str, int, float]]]:
"""
IMPLEMENTATION FOR MULITPLE OPTIONS
"""
repair_selection_dict, repair_id = {}, 0
for attribute, matrix_content in all_sim_matrices.items():
for name, matrices in matrix_content.items():
for i, matrix in enumerate(matrices):
relevant_indices, relevant_values = matrix_eval.get_results_from_matrix(matrix, threshold)
for index, sim_score in zip(relevant_indices, relevant_values):
# attribute value instances for given indices
value1, value2 = attribute.attr_values[index[0]], attribute.attr_values[index[1]]
# antonym distinction, if there are any antonyms and skip selection
antonym_set = _check_antonymy(antonym_library, value1, value2)
result_values = _get_repair_values_after_sorting(attribute, sim_score, value1, value2, options, threshold, antonym_set)
# # relevant values for selection filtering
# str1, str2 = value1.orig_value, value2.orig_value
# freq1, freq2 = value1.count, value2.count
# # based on frequency, decide which value should be changed in the original log
# if freq1 == freq2:
# continue
# elif freq1 < freq2:
# o_value, s_value = value1, value2
# else:
# o_value, s_value = value2, value1
#
# orig_value_str, sugg_value_str = o_value.orig_value, s_value.orig_value
# orig_freq, sugg_freq = o_value.count, s_value.count
#
#
#
# # retrieve analysis content to show what was compared to each other
# o_anal_value, s_anal_value = _get_anal_content(o_value, s_value, attr_property)
#
# # treeview_headers = ['attribute', 'value', 'antonyms', 'threshold', 'original_value', 'suggested_value', 'original occurence', 'suggested occurence',
# # 'sim_model', 'model_name', 'attr_property', 'function']
# result_values = [attribute.attr, float_value, threshold, orig_value_str, o_anal_value,
# sugg_value_str, s_anal_value, orig_freq, sugg_freq, sim_model, model_name,
# attr_property, function]
if result_values:
repair_selection_dict[repair_id] = _result_values_to_dict(treeview_headers, result_values) # keys are used to make retrieval of data easier
repair_id += 1
return repair_selection_dict
def _get_repair_values_after_sorting(attribute, sim_score, value1, value2, options, threshold, antonym_set) -> Union[None, List[Union[str, float]]]:
sim_model, model_name, attr_property, function = options
freq1, freq2 = value1.count, value2.count
# based on frequency, decide which value should be changed in the original log
if freq1 == freq2:
return None
elif freq1 < freq2:
o_value, s_value = value1, value2
else:
o_value, s_value = value2, value1
orig_value_str, sugg_value_str = o_value.orig_value, s_value.orig_value
orig_freq, sugg_freq = o_value.count, s_value.count
# retrieve analysis content to show what was compared to each other
o_anal_value, s_anal_value = _get_anal_content(o_value, s_value, attr_property)
# treeview_headers = ['attribute', 'sim_score', 'threshold', 'antonyms', 'original_value', 'suggested_value',
# 'orig_anal_value', 'sugg_anal_value', 'original occurence', 'suggested occurence',
# 'sim_model', 'model_name', 'attr_property', 'function']
result_values = [attribute.attr, sim_score, threshold, antonym_set, orig_value_str, sugg_value_str, o_anal_value, s_anal_value,
orig_freq, sugg_freq, sim_model, model_name, attr_property, function]
return result_values
def _get_anal_content(o_value: 'AttributeValue', s_value: 'AttributeValue', attr_property: str) -> Tuple[str, str]:
o_anal_value = getattr(o_value, attr_property)
s_anal_value = getattr(s_value, attr_property)
# print(attr_property, o_anal_value, s_anal_value)
return o_anal_value, s_anal_value
def _result_values_to_dict(treeview_headers: List[str],
result_values: List[Union[int, str, float]]) -> Dict[str, Union[int, str, float]]:
"""
Turn value list into a dict according to the keys based on treeview_headers
Parameters
----------
treeview_headers
headers that are used in tkinter treeview in order to make sure all needed values are present
result_values
result values that were just analysed
Returns
-------
result_dict_per_id
result values ordered to treeview_headers as key
"""
result_dict_per_id = {}
for key, value in zip(treeview_headers, result_values):
result_dict_per_id[key] = value
return result_dict_per_id
def _check_antonymy(antonym_library: Dict[str, List[str]],
attr_value1: 'AttributeValue',
attr_value2: 'AttributeValue') -> Dict[str, str]:
"""
Check if there are any shared antonyms within the two values
Parameters
----------
antonym_library
set of antonyms derived from verbocean file
attr_value1:
first instance of AttributeValue to be compared
attr_value2
second instance of AttributeValue to be compared
Returns
-------
total_antonym_set
set of antonyms that both attr_values 'share'
"""
total_antonym_set = {}
total_antonym_set.update(
get_antonym_from_verbocean_local(antonym_library, attr_value1.spacy_lemmas, attr_value2.spacy_lemmas))
total_antonym_set.update(
get_antonyms_of_two_terms_from_wordnet(attr_value1.synsets_right_pos, attr_value2.synsets_right_pos))
return total_antonym_set
def get_antonyms_of_two_terms_from_wordnet(term1_synsets: Dict[str, List['Synset']],
term2_synsets: Dict[str, List['Synset']]) -> Dict[str, List[str]]:
"""
Return the antonym both terms and their corresponding synsets share
Parameters
----------
term1_synsets
set of synsets for each value in the term
term2_synsets
set of synsets for each value in the term
Returns
-------
antonym_library
key corresponds to the antonym in the other term
"""
antonym_set = {}
for key1, syn1 in term1_synsets.items():
for key2, syn2 in term2_synsets.items():
lemma1_list, lemma2_list = [], []
ant1_list, ant2_list = [], []
for synset1 in syn1:
for synset2 in syn2:
if synset1.pos() == synset2.pos():
lemma1_list.extend(set([lemma for lemma in synset1.lemmas()]))
lemma2_list.extend(set([lemma for lemma in synset2.lemmas()]))
ant1_list.extend(set([ant for lemma in lemma1_list for ant in lemma.antonyms()]))
ant2_list.extend(set([ant for lemma in lemma2_list for ant in lemma.antonyms()]))
if set.intersection(set(lemma1_list), set(ant2_list)):
lib_key, lib_value = key1, list(key2.split())
if lib_key in antonym_set.keys():
antonym_set[lib_key].extend(lib_value)
else:
antonym_set[lib_key] = lib_value
if set.intersection(set(lemma2_list), set(ant1_list)):
lib_key, lib_value = key2, list(key1.split())
if lib_key in antonym_set.keys():
antonym_set[lib_key].extend(lib_value)
else:
antonym_set[lib_key] = lib_value
return antonym_set
def _line_to_tuple(line: str) -> Tuple[str, str, str, str]:
"""
Turn line from verbocean into correct observation
Parameters
----------
line
line from verbocean that has to be separated and prepared
"""
start_br = line.find('[')
end_br = line.find(']')
conf_delim = line.find('::')
verb1 = line[:start_br].strip()
rel = line[start_br + 1: end_br].strip()
verb2 = line[end_br + 1: conf_delim].strip()
conf = line[conf_delim: len(line)].strip()
return verb1, rel, verb2, conf
def get_antonyms_from_verbocean() -> Dict[str, List[str]]:
"""
Get antonym library based on verbocean.txt
Relation: opposite-of returns opposities resembling antonyms for verbs
Returns
-------
antonym_library
all antonyms from verbocean saved in a dictionary
"""
input_file = 'data/verbocean.txt'
rel_to_observation = ["opposite-of"]
antonym_library = {}
with open(input_file) as f:
line = f.readline()
while line:
if not line.startswith("#"):
(verb1, rel, verb2, conf) = _line_to_tuple(line)
if rel in rel_to_observation:
if verb1 in antonym_library:
antonym_library[verb1].append(verb2)
else:
antonym_library[verb1] = [
verb2] # create list with first element if the key is found for the first time
line = f.readline()
logger.info('Verbocean antonym information loaded ...')
return antonym_library
def get_synonyms_from_verbocean() -> Dict[str, List[str]]:
"""
Get synonym library based on verbocean.txt
Relation: stronger-than as it often still conveys the same meaning
similar as it gives synonyms
Returns
-------
synonym_library
all synonyms from verbocean saved in a dictionary
"""
input_file = 'data/verbocean.txt'
rel_to_observation = ["stronger-than", "similar"]
synonym_library = {}
with open(input_file) as f:
line = f.readline()
while line:
if not line.startswith("#"):
(verb1, rel, verb2, conf) = _line_to_tuple(line)
if rel in rel_to_observation:
if verb1 in synonym_library:
synonym_library[verb1].append(verb2)
else:
synonym_library[verb1] = [
verb2] # create list with first element if the key is found for the first time
line = f.readline()
return synonym_library
def get_antonym_from_verbocean_local(antonym_library: Dict[str, List[str]],
lemmas1: List[str],
lemmas2: List[str]) -> Dict[str, str]:
"""
Check if there are 'shared' antonyms between both lemmas within antonym_library and return them
Returns
-------
antonym_set
set of antonym_dictionary for current values lemmas1 and lemmas2
"""
antonym_set = {}
for lemma1 in lemmas1:
if lemma1 in antonym_library.keys():
for lemma2 in lemmas2:
if lemma2 in antonym_library[lemma1]:
antonym_set[lemma1] = lemma2
return antonym_set
| [
"logging.getLogger",
"nlp_label_quality.analysis.matrix_eval.get_results_from_matrix",
"pm4py.filter_event_attribute_values"
] | [((348, 375), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (365, 375), False, 'import logging\n'), ((1124, 1198), 'pm4py.filter_event_attribute_values', 'pm4py.filter_event_attribute_values', (['log', 'attribute', 'values', 'level', 'retain'], {}), '(log, attribute, values, level, retain)\n', (1159, 1198), False, 'import pm4py\n'), ((4343, 4397), 'nlp_label_quality.analysis.matrix_eval.get_results_from_matrix', 'matrix_eval.get_results_from_matrix', (['matrix', 'threshold'], {}), '(matrix, threshold)\n', (4378, 4397), False, 'from nlp_label_quality.analysis import matrix_eval\n'), ((7128, 7182), 'nlp_label_quality.analysis.matrix_eval.get_results_from_matrix', 'matrix_eval.get_results_from_matrix', (['matrix', 'threshold'], {}), '(matrix, threshold)\n', (7163, 7182), False, 'from nlp_label_quality.analysis import matrix_eval\n')] |
import unittest
import numpy as np
import tensorflow as tf
import tensorflow.keras as K
from tensorflow.keras.layers import Dense, BatchNormalization, Dropout, Softmax
from sklearn.metrics import accuracy_score
from nncv.data_loader import *
from nncv.loss import *
class TestTFFunction(unittest.TestCase):
_xyz = np.ones([100, 3])
nfeature = _xyz.shape[1]
_data = {'x': _xyz}
data = tf.data.Dataset.from_tensor_slices(_data).batch(49)
iterator = data.make_one_shot_iterator()
model = K.models.Sequential([Dense(2,
input_shape=(nfeature,), activation=tf.nn.relu)])
def test_place_holder(self):
ph_X = tf.keras.Input(shape=(self.nfeature, ))
ph_Y = self.model(ph_X)
def test_iteration(self):
nextx = self.iterator.get_next()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
try:
idb = 0
while True:
x = sess.run(nextx)
print("batch", idb, "batch shape", x['x'].shape)
idb += 1
except tf.errors.OutOfRangeError:
pass
def test_session(self):
''' test how the session and iterator works together with feed dict'''
ph_X = tf.keras.Input(shape=(self.nfeature, ))
ph_Y = self.model(ph_X)
nextx = self.iterator.get_next()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
try:
idb = 0
while True:
x = sess.run(nextx)
Y = sess.run(ph_Y, feed_dict={ph_X: x['x']})
idb += 1
except tf.errors.OutOfRangeError:
pass
def test_feeddict(self):
''' test whether the return value can be a dict for the session '''
def combo(x):
a = x + 3
b = x
return {'a': a, 'b': b}
ph_X = tf.keras.Input(shape=(self.nfeature, ))
# ph_Y = self.model(ph_X)
# ph_dict = {'a': ph_a, 'b': ph_b}
ph_dict = combo(ph_X)
nextx = self.iterator.get_next()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
try:
idb = 0
while True:
x = sess.run(nextx)
Y = sess.run(ph_dict, feed_dict={ph_X: x['x']})
idb += 1
except tf.errors.OutOfRangeError:
pass
def test_optimization(self):
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = np.float32(x_train)
y_train = np.float32(y_train)
x_test = np.float32(x_test)
y_test = np.float32(y_test)
x_train, x_test = x_train / 255.0, x_test / 255.0
train_batch = tf.data.Dataset.from_tensor_slices({
'x': x_train, 'y': y_train})
train_batch = train_batch.batch(32)
iterator = train_batch.make_initializable_iterator()
nextx = iterator.get_next()
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(512, activation=tf.nn.relu),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)
])
opt = tf.train.AdamOptimizer(learning_rate=0.001,
beta1=0.9, beta2=0.999, epsilon=1e-08,
use_locking=False, name='Adam')
def loss(model, data):
y_pred = model(data['x'])
y_true = data['y']
l = tf.keras.backend.sparse_categorical_crossentropy(
y_true, y_pred)
g = tf.gradients(l, model.trainable_variables)
return l, g, y_pred, y_true
ph_l, ph_g, ph_p, ph_y = loss(model, nextx)
training_op = opt.minimize(ph_l)
init = tf.global_variables_initializer()
sess = tf.keras.backend.get_session()
sess.run(init)
for epoch in range(5):
sess.run(iterator.initializer)
for batch in range(10):
_, l, g, p, y = sess.run((training_op, ph_l, ph_g, ph_p, ph_y))
p_cont = np.argmax(p, axis=-1)
e = accuracy_score(p_cont, y)
print("epoch {}, loss {:.4f}, accuracy{:.4f}".format(
epoch, np.average(l), e))
| [
"sklearn.metrics.accuracy_score",
"numpy.ones",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.keras.backend.get_session",
"numpy.average",
"tensorflow.Session",
"tensorflow.keras.layers.Dropout",
"numpy.argmax",
"tensorflow.global_variables_initializer",
"tensorflow.gradients",
"tens... | [((322, 339), 'numpy.ones', 'np.ones', (['[100, 3]'], {}), '([100, 3])\n', (329, 339), True, 'import numpy as np\n'), ((684, 722), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(self.nfeature,)'}), '(shape=(self.nfeature,))\n', (698, 722), True, 'import tensorflow as tf\n'), ((1319, 1357), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(self.nfeature,)'}), '(shape=(self.nfeature,))\n', (1333, 1357), True, 'import tensorflow as tf\n'), ((2016, 2054), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(self.nfeature,)'}), '(shape=(self.nfeature,))\n', (2030, 2054), True, 'import tensorflow as tf\n'), ((2729, 2748), 'numpy.float32', 'np.float32', (['x_train'], {}), '(x_train)\n', (2739, 2748), True, 'import numpy as np\n'), ((2767, 2786), 'numpy.float32', 'np.float32', (['y_train'], {}), '(y_train)\n', (2777, 2786), True, 'import numpy as np\n'), ((2804, 2822), 'numpy.float32', 'np.float32', (['x_test'], {}), '(x_test)\n', (2814, 2822), True, 'import numpy as np\n'), ((2840, 2858), 'numpy.float32', 'np.float32', (['y_test'], {}), '(y_test)\n', (2850, 2858), True, 'import numpy as np\n'), ((2940, 3004), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (["{'x': x_train, 'y': y_train}"], {}), "({'x': x_train, 'y': y_train})\n", (2974, 3004), True, 'import tensorflow as tf\n'), ((3459, 3578), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': '(0.001)', 'beta1': '(0.9)', 'beta2': '(0.999)', 'epsilon': '(1e-08)', 'use_locking': '(False)', 'name': '"""Adam"""'}), "(learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon\n =1e-08, use_locking=False, name='Adam')\n", (3481, 3578), True, 'import tensorflow as tf\n'), ((4056, 4089), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (4087, 4089), True, 'import tensorflow as tf\n'), ((4105, 4135), 'tensorflow.keras.backend.get_session', 'tf.keras.backend.get_session', ([], {}), '()\n', (4133, 4135), True, 'import tensorflow as tf\n'), ((404, 445), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['_data'], {}), '(_data)\n', (438, 445), True, 'import tensorflow as tf\n'), ((536, 592), 'tensorflow.keras.layers.Dense', 'Dense', (['(2)'], {'input_shape': '(nfeature,)', 'activation': 'tf.nn.relu'}), '(2, input_shape=(nfeature,), activation=tf.nn.relu)\n', (541, 592), False, 'from tensorflow.keras.layers import Dense, BatchNormalization, Dropout, Softmax\n'), ((843, 855), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (853, 855), True, 'import tensorflow as tf\n'), ((1447, 1459), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1457, 1459), True, 'import tensorflow as tf\n'), ((2219, 2231), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2229, 2231), True, 'import tensorflow as tf\n'), ((3765, 3829), 'tensorflow.keras.backend.sparse_categorical_crossentropy', 'tf.keras.backend.sparse_categorical_crossentropy', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (3813, 3829), True, 'import tensorflow as tf\n'), ((3863, 3905), 'tensorflow.gradients', 'tf.gradients', (['l', 'model.trainable_variables'], {}), '(l, model.trainable_variables)\n', (3875, 3905), True, 'import tensorflow as tf\n'), ((886, 919), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (917, 919), True, 'import tensorflow as tf\n'), ((1490, 1523), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1521, 1523), True, 'import tensorflow as tf\n'), ((2262, 2295), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2293, 2295), True, 'import tensorflow as tf\n'), ((3217, 3262), 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {'input_shape': '(28, 28)'}), '(input_shape=(28, 28))\n', (3240, 3262), True, 'import tensorflow as tf\n'), ((3276, 3325), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(512)'], {'activation': 'tf.nn.relu'}), '(512, activation=tf.nn.relu)\n', (3297, 3325), True, 'import tensorflow as tf\n'), ((3339, 3367), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.2)'], {}), '(0.2)\n', (3362, 3367), True, 'import tensorflow as tf\n'), ((3381, 3432), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(10)'], {'activation': 'tf.nn.softmax'}), '(10, activation=tf.nn.softmax)\n', (3402, 3432), True, 'import tensorflow as tf\n'), ((4375, 4396), 'numpy.argmax', 'np.argmax', (['p'], {'axis': '(-1)'}), '(p, axis=-1)\n', (4384, 4396), True, 'import numpy as np\n'), ((4417, 4442), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['p_cont', 'y'], {}), '(p_cont, y)\n', (4431, 4442), False, 'from sklearn.metrics import accuracy_score\n'), ((4532, 4545), 'numpy.average', 'np.average', (['l'], {}), '(l)\n', (4542, 4545), True, 'import numpy as np\n')] |
from pyecharts import options as opts
from pyecharts.charts import Scatter
from pyecharts.commons.utils import JsCode
from pyecharts.faker import Faker
c = (
Scatter()
.add_xaxis(Faker.choose())
.add_yaxis(
"商家A",
[list(z) for z in zip(Faker.values(), Faker.choose())],
label_opts=opts.LabelOpts(
formatter=JsCode(
"function(params){return params.value[1] +' : '+ params.value[2];}"
)
),
)
.set_global_opts(
title_opts=opts.TitleOpts(title="Scatter-多维度数据"),
tooltip_opts=opts.TooltipOpts(
formatter=JsCode(
"function (params) {return params.name + ' : ' + params.value[2];}"
)
),
visualmap_opts=opts.VisualMapOpts(
type_="color", max_=150, min_=20, dimension=1
),
)
.render("scatter_multi_dimension.html")
)
| [
"pyecharts.faker.Faker.values",
"pyecharts.options.TitleOpts",
"pyecharts.commons.utils.JsCode",
"pyecharts.charts.Scatter",
"pyecharts.options.VisualMapOpts",
"pyecharts.faker.Faker.choose"
] | [((520, 557), 'pyecharts.options.TitleOpts', 'opts.TitleOpts', ([], {'title': '"""Scatter-多维度数据"""'}), "(title='Scatter-多维度数据')\n", (534, 557), True, 'from pyecharts import options as opts\n'), ((760, 825), 'pyecharts.options.VisualMapOpts', 'opts.VisualMapOpts', ([], {'type_': '"""color"""', 'max_': '(150)', 'min_': '(20)', 'dimension': '(1)'}), "(type_='color', max_=150, min_=20, dimension=1)\n", (778, 825), True, 'from pyecharts import options as opts\n'), ((620, 695), 'pyecharts.commons.utils.JsCode', 'JsCode', (['"""function (params) {return params.name + \' : \' + params.value[2];}"""'], {}), '("function (params) {return params.name + \' : \' + params.value[2];}")\n', (626, 695), False, 'from pyecharts.commons.utils import JsCode\n'), ((188, 202), 'pyecharts.faker.Faker.choose', 'Faker.choose', ([], {}), '()\n', (200, 202), False, 'from pyecharts.faker import Faker\n'), ((163, 172), 'pyecharts.charts.Scatter', 'Scatter', ([], {}), '()\n', (170, 172), False, 'from pyecharts.charts import Scatter\n'), ((265, 279), 'pyecharts.faker.Faker.values', 'Faker.values', ([], {}), '()\n', (277, 279), False, 'from pyecharts.faker import Faker\n'), ((281, 295), 'pyecharts.faker.Faker.choose', 'Faker.choose', ([], {}), '()\n', (293, 295), False, 'from pyecharts.faker import Faker\n'), ((356, 431), 'pyecharts.commons.utils.JsCode', 'JsCode', (['"""function(params){return params.value[1] +\' : \'+ params.value[2];}"""'], {}), '("function(params){return params.value[1] +\' : \'+ params.value[2];}")\n', (362, 431), False, 'from pyecharts.commons.utils import JsCode\n')] |
'''
* @author <NAME>
* @email <EMAIL>
* @create date 01-01-2022 17:03:09
* @modify date 01-01-2022 17:03:09
* @desc This code generates a dataset containing data of chess games played on lichess.org
'''
import pandas as pd
import time
from pandas.core.frame import DataFrame
import constants as C
import json
import random
import os
import pickle
def generate_username_set(csv_file_path:str) -> tuple:
"""
This function takes a CSV file that contain records of chess games from lichess.org and return a Set of all the lichess usernames.
Args:
csv_file_path (String): A string path to a CSV file that contains lichess.org chess games
returns:
A tuple containing:
1- usernames (Set): A set that contains all the usernames in the CSV games record (dataset)
2- games (DataFrame): A cleaned DataFrame with relevant games only
"""
# USES:
#
# The generated set will be used to scrape new games (data)
#
games = pd.read_csv(csv_file_path)
# username set. Will be returned
usernames = set()
for i in range(len(games)):
# add white player to the set
usernames.add(games['white_id'][i])
# add black player to the set
usernames.add(games['black_id'][i])
if (games['turns'][i] < 10) or (int(games["increment_code"][i].split('+')[0]) <= 10) or (abs(games['white_rating'][i] - games['black_rating'][i]) > 50) or (games['rated'][i] != True):
games.drop(i, axis=0, inplace=True)
return (usernames, games)
def scrape_games(players_set: set, output_path: str="Data/Games", game_num:int=3) -> set:
"""
This function takes a set of lichess.org usernames, and scrape their games from lichess.org with Berserk API.
The function also saves those games in JSON files
Args:
players_set (set): A set of lichess.org usernames
output_path (str): The path of the directory where JSON files are doing to be saved at
game_num (int): The maximum number of games scraped from lichess.org
return:
new_username_set (set): A new set of lichess usernames for future usages
"""
new_username_set = set()
for username in players_set:
if not os.path.isdir(output_path+"/"+username) or len(os.listdir(output_path+"/"+username)) < 40:
if not os.path.isdir(output_path+"/"+username):
os.mkdir(output_path+"/"+username)
try:
for game in C.CLIENT.games.export_by_player(username=username, as_pgn=False, max=game_num,rated=True,perf_type="rapid", analysed=True, moves=True, evals=True,opening=True):
if "analysis" in game["players"]["white"].keys():
with open("{}/{}/{}.json".format(output_path, username,random.randint(1,9999999999)), 'w') as f:
json.dump(game, f, indent=4, sort_keys=False, default=str)
new_username_set.add(game["players"]["white"]["user"]["name"])
new_username_set.add(game["players"]["black"]["user"]["name"])
time.sleep(0.5)
except Exception as e:
print("An error has occurred:")
print(e)
time.sleep(1)
return new_username_set
def write_usernames(username_set:set, output_path):
"""Saves username_set in a pickle file for future uses
Args:
username_set (set): A set of liches.org usernames
"""
with open("{}.pkl".format(output_path), "wb") as f:
pickle.dump(username_set, f)
return
def read_usernames(username_path: str, current_username_set: set = set()) -> set:
"""reads usernames from a saved pickle. Also adds any locally added usernames if available.
Args:
username_path (str): path to the pickle file
current_username_set DEFAULT -> set(): in case there was an exsisting set
Returns:
set: a set of lichess.org usernames
"""
with open(username_path, "rb") as f:
username_set = pickle.load(f)
for i in current_username_set:
username_set.add(i)
return username_set
def generate_final_dataset(games_path:str = "Data/Games") -> DataFrame:
"""Generates a CSV file (dataset) of the games found in the directory games_path
Args:
games_path (str, optional): path to the saved chess games. Defaults to "Data/Games".
Returns:
DataFrame: A dataset in a .csv file format that include all game data
"""
df = pd.DataFrame(columns = [
"Game ID",
"White Rating",
"Black Rating",
"Average Rating",
"Opening ECO",
"# of Opening Ply",
"White avg Centi-pawn Loss",
"White # of Inaccuracies",
"White # of Mistakes",
"White # of Blunders",
"Black avg Centi-pawn Loss",
"Black # of Inaccuracies",
"Black # of Mistakes",
"Black # of Blunders"])
for dir in os.listdir(games_path):
print("Now working on username: {}".format(dir))
try:
for file in os.listdir(games_path + "/" +dir):
row_list = []
try:
with open(games_path + "/" +dir+"/"+file) as f:
game = json.load(f)
if "analysis" in game["players"]["white"].keys() and game["variant"]=="standard" and len(game["moves"].split()) >= 16:
# "Game ID", "White Rating", "Black Rating", "Opening ECO", "Opening Ply", "White Centi-pawn Loss", "White's Number of Inaccuracies", "White's Number of Mistakes", "White's Number of Blunders", "Black Centi-pawn Loss", "Black's Number of Inaccuracies", "Black's Number of Mistakes", "Black's Number of Blunders"]
# game ID
row_list.append(game["id"])
# White Rating
row_list.append(game["players"]["white"]["rating"])
# Black Rating
row_list.append(game["players"]["black"]["rating"])
# Average Rating
row_list.append(int((game["players"]["white"]["rating"] + game["players"]["black"]["rating"])/2))
# opening ECO
row_list.append(game["opening"]["eco"])
# opening ply
row_list.append(game["opening"]["ply"])
row_list.append(game["players"]["white"]["analysis"]['acpl'])
row_list.append(game["players"]["white"]["analysis"]['inaccuracy'])
row_list.append(game["players"]["white"]["analysis"]['mistake'])
row_list.append(game["players"]["white"]["analysis"]['blunder'])
row_list.append(game["players"]["black"]["analysis"]['acpl'])
row_list.append(game["players"]["black"]["analysis"]['inaccuracy'])
row_list.append(game["players"]["black"]["analysis"]['mistake'])
row_list.append(game["players"]["black"]["analysis"]['blunder'])
# Add new row
df.loc[len(df)] = row_list
except json.decoder.JSONDecodeError as d:
os.remove(dir+"/"+file)
print("empty file has been removed")
except Exception as e:
print(e)
except NotADirectoryError as E:
print(dir, E)
return df
| [
"os.listdir",
"pickle.dump",
"pandas.read_csv",
"constants.CLIENT.games.export_by_player",
"pickle.load",
"time.sleep",
"json.load",
"os.path.isdir",
"os.mkdir",
"pandas.DataFrame",
"random.randint",
"json.dump",
"os.remove"
] | [((1010, 1036), 'pandas.read_csv', 'pd.read_csv', (['csv_file_path'], {}), '(csv_file_path)\n', (1021, 1036), True, 'import pandas as pd\n'), ((4555, 4897), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['Game ID', 'White Rating', 'Black Rating', 'Average Rating', 'Opening ECO',\n '# of Opening Ply', 'White avg Centi-pawn Loss',\n 'White # of Inaccuracies', 'White # of Mistakes', 'White # of Blunders',\n 'Black avg Centi-pawn Loss', 'Black # of Inaccuracies',\n 'Black # of Mistakes', 'Black # of Blunders']"}), "(columns=['Game ID', 'White Rating', 'Black Rating',\n 'Average Rating', 'Opening ECO', '# of Opening Ply',\n 'White avg Centi-pawn Loss', 'White # of Inaccuracies',\n 'White # of Mistakes', 'White # of Blunders',\n 'Black avg Centi-pawn Loss', 'Black # of Inaccuracies',\n 'Black # of Mistakes', 'Black # of Blunders'])\n", (4567, 4897), True, 'import pandas as pd\n'), ((5359, 5381), 'os.listdir', 'os.listdir', (['games_path'], {}), '(games_path)\n', (5369, 5381), False, 'import os\n'), ((3574, 3602), 'pickle.dump', 'pickle.dump', (['username_set', 'f'], {}), '(username_set, f)\n', (3585, 3602), False, 'import pickle\n'), ((4075, 4089), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (4086, 4089), False, 'import pickle\n'), ((5477, 5511), 'os.listdir', 'os.listdir', (["(games_path + '/' + dir)"], {}), "(games_path + '/' + dir)\n", (5487, 5511), False, 'import os\n'), ((2260, 2303), 'os.path.isdir', 'os.path.isdir', (["(output_path + '/' + username)"], {}), "(output_path + '/' + username)\n", (2273, 2303), False, 'import os\n'), ((2370, 2413), 'os.path.isdir', 'os.path.isdir', (["(output_path + '/' + username)"], {}), "(output_path + '/' + username)\n", (2383, 2413), False, 'import os\n'), ((2427, 2465), 'os.mkdir', 'os.mkdir', (["(output_path + '/' + username)"], {}), "(output_path + '/' + username)\n", (2435, 2465), False, 'import os\n'), ((2507, 2678), 'constants.CLIENT.games.export_by_player', 'C.CLIENT.games.export_by_player', ([], {'username': 'username', 'as_pgn': '(False)', 'max': 'game_num', 'rated': '(True)', 'perf_type': '"""rapid"""', 'analysed': '(True)', 'moves': '(True)', 'evals': '(True)', 'opening': '(True)'}), "(username=username, as_pgn=False, max=\n game_num, rated=True, perf_type='rapid', analysed=True, moves=True,\n evals=True, opening=True)\n", (2538, 2678), True, 'import constants as C\n'), ((3129, 3144), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (3139, 3144), False, 'import time\n'), ((2307, 2347), 'os.listdir', 'os.listdir', (["(output_path + '/' + username)"], {}), "(output_path + '/' + username)\n", (2317, 2347), False, 'import os\n'), ((3269, 3282), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (3279, 3282), False, 'import time\n'), ((5662, 5674), 'json.load', 'json.load', (['f'], {}), '(f)\n', (5671, 5674), False, 'import json\n'), ((7824, 7851), 'os.remove', 'os.remove', (["(dir + '/' + file)"], {}), "(dir + '/' + file)\n", (7833, 7851), False, 'import os\n'), ((2887, 2945), 'json.dump', 'json.dump', (['game', 'f'], {'indent': '(4)', 'sort_keys': '(False)', 'default': 'str'}), '(game, f, indent=4, sort_keys=False, default=str)\n', (2896, 2945), False, 'import json\n'), ((2817, 2846), 'random.randint', 'random.randint', (['(1)', '(9999999999)'], {}), '(1, 9999999999)\n', (2831, 2846), False, 'import random\n')] |
from typing import Any, Dict, Mapping, Type
from vial.errors import ErrorHandlingAPI, MethodNotAllowedError, NotFoundError
from vial.json import Json, NativeJson
from vial.loggers import LoggerFactory
from vial.middleware import CallChain, MiddlewareAPI, MiddlewareChain
from vial.parsers import ParserAPI
from vial.request import RequestContext
from vial.resources import Resource
from vial.routes import Route, RoutingAPI
from vial.types import HTTPMethod, LambdaContext, MultiDict, Request, Response
class RouteResolver:
def __call__(self, resources: Mapping[str, Mapping[HTTPMethod, Route]], request: Request) -> Route:
defined_routes = resources.get(request.resource)
if not defined_routes:
raise NotFoundError(request.resource)
route = defined_routes.get(request.method)
if not route:
raise MethodNotAllowedError(request.method.name)
return route
class RouteInvoker:
def __call__(self, route: Route, request: Request) -> Response:
args = self._build_args(route, request)
result = route.function(*args.values())
return self._to_response(result)
@staticmethod
def _to_response(result: Any) -> Response:
if isinstance(result, Response):
return result
if result is None or not isinstance(result, tuple):
return Response(result)
return Response(*result)
@staticmethod
def _build_args(route: Route, request: Request) -> Mapping[str, Any]:
if not route.variables:
return {}
path_params: Mapping[str, str] = request.event["pathParameters"]
args: Dict[str, Any] = {}
for name, parser in route.variables.items():
args[name] = parser(path_params[name])
return args
class Vial(RoutingAPI, ParserAPI, MiddlewareAPI, ErrorHandlingAPI):
route_resolver_class = RouteResolver
invoker_class = RouteInvoker
logger_factory_class = LoggerFactory
json_class: Type[Json] = NativeJson
def __init__(self, name: str) -> None:
super().__init__()
self.name = name
self.route_resolver = self.route_resolver_class()
self.invoker = self.invoker_class()
self.json = self.json_class()
self.logger = self.logger_factory_class.get(name)
def register_resource(self, app: Resource) -> None:
ParserAPI.register_parsers(self, app)
RoutingAPI.register_routes(self, app)
MiddlewareAPI.register_middlewares(self, app)
def __call__(self, event: Dict[str, Any], context: LambdaContext) -> Mapping[str, Any]:
request = self._build_request(event, context)
with RequestContext(request):
response = self._handle_request(request)
return self._to_lambda_response(response)
def _handle_request(self, request: Request) -> Response:
try:
route = self.route_resolver(self.routes, request)
return self._build_invocation_chain(route)(request)
except Exception as e: # pylint: disable=broad-except
self.logger.exception("Encountered uncaught exception")
return self.default_error_handler(e)
def _build_invocation_chain(self, route: Route) -> CallChain:
all_middleware = self.registered_middleware[self.name] + self.registered_middleware[route.resource]
def route_invocation(event: Request) -> Response:
return self.invoker(route, event)
if not all_middleware:
return route_invocation
handler = MiddlewareChain(all_middleware[-1], route_invocation)
for middleware in reversed(all_middleware[0:-1]):
handler = MiddlewareChain(middleware, handler)
return handler
def _to_lambda_response(self, response: Response) -> Mapping[str, Any]:
if not isinstance(response.body, str):
body = self.json.dumps(response.body) if response.body is not None else None
else:
body = response.body
return {"headers": response.headers, "statusCode": response.status, "body": body}
@staticmethod
def _build_request(event: Dict[str, Any], context: LambdaContext) -> Request:
return Request(
event,
context,
HTTPMethod[event["httpMethod"]],
event["resource"],
event["path"],
MultiDict(event["multiValueHeaders"]),
MultiDict(event["multiValueQueryStringParameters"]),
event["body"],
)
| [
"vial.request.RequestContext",
"vial.types.Response",
"vial.types.MultiDict",
"vial.parsers.ParserAPI.register_parsers",
"vial.middleware.MiddlewareAPI.register_middlewares",
"vial.middleware.MiddlewareChain",
"vial.errors.NotFoundError",
"vial.errors.MethodNotAllowedError",
"vial.routes.RoutingAPI.... | [((1397, 1414), 'vial.types.Response', 'Response', (['*result'], {}), '(*result)\n', (1405, 1414), False, 'from vial.types import HTTPMethod, LambdaContext, MultiDict, Request, Response\n'), ((2381, 2418), 'vial.parsers.ParserAPI.register_parsers', 'ParserAPI.register_parsers', (['self', 'app'], {}), '(self, app)\n', (2407, 2418), False, 'from vial.parsers import ParserAPI\n'), ((2427, 2464), 'vial.routes.RoutingAPI.register_routes', 'RoutingAPI.register_routes', (['self', 'app'], {}), '(self, app)\n', (2453, 2464), False, 'from vial.routes import Route, RoutingAPI\n'), ((2473, 2518), 'vial.middleware.MiddlewareAPI.register_middlewares', 'MiddlewareAPI.register_middlewares', (['self', 'app'], {}), '(self, app)\n', (2507, 2518), False, 'from vial.middleware import CallChain, MiddlewareAPI, MiddlewareChain\n'), ((3559, 3612), 'vial.middleware.MiddlewareChain', 'MiddlewareChain', (['all_middleware[-1]', 'route_invocation'], {}), '(all_middleware[-1], route_invocation)\n', (3574, 3612), False, 'from vial.middleware import CallChain, MiddlewareAPI, MiddlewareChain\n'), ((737, 768), 'vial.errors.NotFoundError', 'NotFoundError', (['request.resource'], {}), '(request.resource)\n', (750, 768), False, 'from vial.errors import ErrorHandlingAPI, MethodNotAllowedError, NotFoundError\n'), ((861, 903), 'vial.errors.MethodNotAllowedError', 'MethodNotAllowedError', (['request.method.name'], {}), '(request.method.name)\n', (882, 903), False, 'from vial.errors import ErrorHandlingAPI, MethodNotAllowedError, NotFoundError\n'), ((1365, 1381), 'vial.types.Response', 'Response', (['result'], {}), '(result)\n', (1373, 1381), False, 'from vial.types import HTTPMethod, LambdaContext, MultiDict, Request, Response\n'), ((2679, 2702), 'vial.request.RequestContext', 'RequestContext', (['request'], {}), '(request)\n', (2693, 2702), False, 'from vial.request import RequestContext\n'), ((3693, 3729), 'vial.middleware.MiddlewareChain', 'MiddlewareChain', (['middleware', 'handler'], {}), '(middleware, handler)\n', (3708, 3729), False, 'from vial.middleware import CallChain, MiddlewareAPI, MiddlewareChain\n'), ((4383, 4420), 'vial.types.MultiDict', 'MultiDict', (["event['multiValueHeaders']"], {}), "(event['multiValueHeaders'])\n", (4392, 4420), False, 'from vial.types import HTTPMethod, LambdaContext, MultiDict, Request, Response\n'), ((4434, 4485), 'vial.types.MultiDict', 'MultiDict', (["event['multiValueQueryStringParameters']"], {}), "(event['multiValueQueryStringParameters'])\n", (4443, 4485), False, 'from vial.types import HTTPMethod, LambdaContext, MultiDict, Request, Response\n')] |
import torch
import torch.nn as nn
import torch.nn.functional as F
from baseline.pytorch.classify import ClassifierModelBase
from baseline.model import register_model
def ngrams(x, filtsz, mxlen):
chunks = []
for i in range(mxlen - filtsz + 1):
chunk = x[:, i:i+filtsz, :]
chunks += [chunk]
chunks = torch.stack(chunks, 1)
return chunks
@register_model(task='classify', name='rnf')
class RNFWordClassifier(ClassifierModelBase):
def __init__(self):
super(RNFWordClassifier, self).__init__()
def init_pool(self, dsz, **kwargs):
self.filtsz = kwargs['filtsz']
self.mxlen = kwargs.get('mxlen', 100)
pdrop = kwargs.get('dropout', 0.4)
self.dropout_pre = nn.Dropout(pdrop)
rnnsz = kwargs.get('rnnsz', 300)
self.rnf = nn.LSTM(dsz, rnnsz, batch_first=True)
self.pool_dropout = nn.Dropout(kwargs.get('pool_dropout', 0.))
return rnnsz
def pool(self, btc, lengths):
btc = self.dropout_pre(btc)
btfc = ngrams(btc, self.filtsz, self.mxlen)
B, T, F, C = btfc.shape
btc = btfc.view(B*T, F, C)
output, hidden = self.rnf(btc)
hidden = hidden[0].view(hidden[0].shape[1:])
btc = hidden.view(B, T, -1)
bc = btc.max(1)[0]
return self.pool_dropout(bc)
| [
"torch.nn.Dropout",
"baseline.model.register_model",
"torch.stack",
"torch.nn.LSTM"
] | [((373, 416), 'baseline.model.register_model', 'register_model', ([], {'task': '"""classify"""', 'name': '"""rnf"""'}), "(task='classify', name='rnf')\n", (387, 416), False, 'from baseline.model import register_model\n'), ((329, 351), 'torch.stack', 'torch.stack', (['chunks', '(1)'], {}), '(chunks, 1)\n', (340, 351), False, 'import torch\n'), ((734, 751), 'torch.nn.Dropout', 'nn.Dropout', (['pdrop'], {}), '(pdrop)\n', (744, 751), True, 'import torch.nn as nn\n'), ((812, 849), 'torch.nn.LSTM', 'nn.LSTM', (['dsz', 'rnnsz'], {'batch_first': '(True)'}), '(dsz, rnnsz, batch_first=True)\n', (819, 849), True, 'import torch.nn as nn\n')] |
# -*- coding: utf-8 -*-
"""
IPLoc
Retrieve location information for an IP
"""
import sys
from iploc import IpLoc
from flask import Flask, jsonify, abort, make_response
app = Flask(__name__)
@app.route("/<ip_addr>/")
def ip_search(ip_addr=None):
if ip_addr is None:
abort(make_response("Error: Missing IP Address", 400))
if not is_valid_ipv4(ip_addr):
abort(make_response("Error: Invalid IP Address", 400))
loc_data = iploc.lookup(ip_addr)
return jsonify(loc_data)
@app.route("/<ip_addr>/<key>/")
def ip_search_by_key(ip_addr=None, key=None):
if ip_addr is None or key is None:
abort(make_response("Error: Invalid parameters provided", 400))
if not is_valid_ipv4(ip_addr):
abort(make_response("Error: Invalid IP Address", 400))
loc_data = iploc.lookup(ip_addr)
response_obj = {}
keys = key.split(',')
for k in keys:
if loc_data.get(str(k)) is not None:
response_obj[k] = loc_data.get(str(k))
return jsonify(response_obj)
def is_valid_ipv4(addr):
pieces = addr.split('.')
if len(pieces) != 4: return False
try: return all(0<=int(p)<256 for p in pieces)
except ValueError: return False
defaults = {
"loc_data_file": 'data/Location.csv',
"ips_data_file": 'data/Blocks.csv'
}
if len(sys.argv) < 3:
loc_file = defaults['loc_data_file']
ips_file = defaults['ips_data_file']
else:
loc_file = sys.argv[1]
ips_file = sys.argv[2]
iploc = IpLoc(loc_file, ips_file)
app.run(host='0.0.0.0') | [
"flask.jsonify",
"flask.make_response",
"iploc.IpLoc",
"flask.Flask"
] | [((184, 199), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (189, 199), False, 'from flask import Flask, jsonify, abort, make_response\n'), ((1479, 1504), 'iploc.IpLoc', 'IpLoc', (['loc_file', 'ips_file'], {}), '(loc_file, ips_file)\n', (1484, 1504), False, 'from iploc import IpLoc\n'), ((489, 506), 'flask.jsonify', 'jsonify', (['loc_data'], {}), '(loc_data)\n', (496, 506), False, 'from flask import Flask, jsonify, abort, make_response\n'), ((1006, 1027), 'flask.jsonify', 'jsonify', (['response_obj'], {}), '(response_obj)\n', (1013, 1027), False, 'from flask import Flask, jsonify, abort, make_response\n'), ((294, 341), 'flask.make_response', 'make_response', (['"""Error: Missing IP Address"""', '(400)'], {}), "('Error: Missing IP Address', 400)\n", (307, 341), False, 'from flask import Flask, jsonify, abort, make_response\n'), ((392, 439), 'flask.make_response', 'make_response', (['"""Error: Invalid IP Address"""', '(400)'], {}), "('Error: Invalid IP Address', 400)\n", (405, 439), False, 'from flask import Flask, jsonify, abort, make_response\n'), ((639, 695), 'flask.make_response', 'make_response', (['"""Error: Invalid parameters provided"""', '(400)'], {}), "('Error: Invalid parameters provided', 400)\n", (652, 695), False, 'from flask import Flask, jsonify, abort, make_response\n'), ((746, 793), 'flask.make_response', 'make_response', (['"""Error: Invalid IP Address"""', '(400)'], {}), "('Error: Invalid IP Address', 400)\n", (759, 793), False, 'from flask import Flask, jsonify, abort, make_response\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Definition of the NSDE algorithm"""
import numpy as np
try:
from openmdao.utils.concurrent import concurrent_eval
except ModuleNotFoundError:
import warnings
warnings.warn("OpenMDAO is not installed. Concurrent evaluation is not available.")
from . import sorting, hv
from .strategies import EvolutionStrategy
def mpi_fobj_wrapper(fobj):
"""
Wrapper for the objective function to keep track of individual indices when running under MPI.
Parameters
----------
fobj : callable
Original objective function
Returns
-------
callable
Wrapped function which, in addition to x, takes the individual's index and returns it along with f
"""
def wrapped(x, ii):
return fobj(x), ii
return wrapped
class NSDE:
"""
Non-dominated Sorting Differential Evolution (NSDE) Algorithm.
Attributes
----------
fobj : callable
Objective function.
Should have a single argument of type array_like which corresponds to the design vector.
Should have either a single float or 1D array output corresponding to the objective function value(s),
or two array_like outputs, the first of which corresponds to the objective function value(s) and the second
to the constraint violations.
Constraints are assumed to be satisfied if constraint violations <= constraint tolerance.
lb, ub : array_like
Lower and upper bounds
range : array_like
Distances between the lower and upper bounds
f, cr : float
Mutation rate and crossover probabilities
adaptivity : int
Method of self-adaptivity.
- 0: No self-adaptivity. Specified mutation rate and crossover probability are used.
- 1: Simple self-adaptability. Mutation rate and crossover probability are optimized Mote-Carlo style.
- 2: Complex self-adaptability. Mutation rate and crossover probability are mutated with specified strategy.
max_gen : int
Maximum number of generations
tolx, tolf : float
Tolerances on the design vectors' and objective function values' spreads
tolc : float
Constraint violation tolerance.
n_dim : int
Number of dimension of the problem
n_pop : int
Population size
rng : np.random.Generator
Random number generator
comm : MPI communicator or None
The MPI communicator that will be used objective evaluation for each generation
model_mpi : None or tuple
If the model in fobj is also parallel, then this will contain a tuple with the the
total number of population points to evaluate concurrently, and the color of the point
to evaluate on this rank
strategy : EvolutionStrategy
Evolution strategy to use for procreation
pop : np.array
List of the individuals' chromosomes making up the current population
fit : np.array
Fitness of the individuals in the population
con : np.array
Constraint violations of the individuals in the population
generation : int
Generation counter
"""
def __init__(
self,
strategy=None,
mut=0.85,
crossp=1.0,
adaptivity=0,
max_gen=1000,
tolx=1e-8,
tolf=1e-8,
tolc=1e-6,
n_pop=None,
seed=None,
comm=None,
model_mpi=None,
):
self.fobj = None
self.lb, self.ub = None, None
self.range = 0
self.f = mut
self.cr = crossp
self.max_gen = max_gen
self.tolx = tolx
self.tolf = tolf
self.tolc = tolc
self.n_dim = 0
self.n_obj = 0
self.n_con = 0
self.n_pop = n_pop
self.rng = np.random.default_rng(seed)
if adaptivity not in [0, 1, 2]:
raise ValueError("self_adaptivity must be one of (0, 1, 2).")
self.adaptivity = adaptivity
self.comm = comm
self.model_mpi = model_mpi
if strategy is None:
self.strategy = EvolutionStrategy("rand-to-best/1/bin/random")
elif isinstance(strategy, EvolutionStrategy):
self.strategy = strategy
elif isinstance(strategy, str):
self.strategy = EvolutionStrategy(strategy)
else:
raise ValueError(
"Argument `strategy` should be None, a str, or an instance of EvolutionStrategy."
)
self.pop = None
self.fit = None
self.con = None
self.fronts = None
self.dx, self.df, self.hv = np.inf, np.inf, np.inf
self.pareto_lb = +np.inf
self.pareto_ub = -np.inf
self.generation = 0
self._is_initialized = False
self._running_under_mpi = comm is not None and hasattr(comm, "bcast")
def init(self, fobj, bounds, pop=None):
"""
Initialize the algorithm.
Parameters
----------
fobj : callable
Objective function
bounds : list of 2-tuples
List of (lower, upper) bounds
pop : None or array_like, optional
Initial population. If None, it will be created at random.
"""
# Set default values for the mutation and crossover parameters
if self.f is None or 0.0 > self.f > 1.0:
self.f = 0.85
if self.cr is None or 0.0 > self.cr > 1.0:
self.cr = 1.0
# Prepare the objective function and compute the bounds and variable range
self.fobj = fobj if self.comm is None else mpi_fobj_wrapper(fobj)
self.lb, self.ub = np.asarray(bounds).T
self.range = self.ub - self.lb
# Compute the number of dimensions
self.n_dim = len(bounds)
def create_f_cr(adaptivity, f, cr, n, rng):
# Create random mutation/crossover parameters if self-adaptivity is used
if adaptivity == 0:
f = f * np.ones(n)
cr = cr * np.ones(n)
elif adaptivity == 1:
f = rng.uniform(size=n) * 0.9 + 0.1
cr = rng.uniform(size=n)
elif adaptivity == 2:
f = rng.uniform(size=n) * 0.15 + 0.5
cr = rng.uniform(size=n) * 0.15 + 0.5
return f, cr
adjust_pop = False
if pop is not None:
self.n_pop = pop.shape[0]
self.pop = pop
self.f, self.cr = create_f_cr(
self.adaptivity, self.f, self.cr, self.n_pop, self.rng
)
else:
if self.n_pop is None or self.n_pop <= 0:
self.pop = self.rng.uniform(self.lb, self.ub, size=(1, self.n_dim))
adjust_pop = True
self.n_pop = 1
else:
self.pop = self.rng.uniform(
self.lb, self.ub, size=(self.n_pop, self.n_dim)
)
self.f, self.cr = create_f_cr(
self.adaptivity, self.f, self.cr, self.n_pop, self.rng
)
# Ensure all processors have the same population and mutation/crossover parameters
if self._running_under_mpi:
self.pop, self.f, self.cr = self.comm.bcast(
(self.pop, self.f, self.cr), root=0
)
self.fit, self.con = self(self.pop)
self.n_obj = self.fit.shape[1]
if self.con is not None:
self.n_con = self.con.shape[1]
if adjust_pop:
self.n_pop = 5 * self.n_dim * self.n_obj
# If we are running under MPI, expand population to fully exploit all processors
if self._running_under_mpi:
self.n_pop = int(np.ceil(self.n_pop / self.comm.size) * self.comm.size)
self.pop = np.concatenate(
(
self.pop,
self.rng.uniform(
self.lb, self.ub, size=(self.n_pop - 1, self.n_dim)
),
)
)
self.f, self.cr = create_f_cr(
self.adaptivity, self.f, self.cr, self.n_pop, self.rng
)
if self._running_under_mpi:
self.pop, self.f, self.cr = self.comm.bcast(
(self.pop, self.f, self.cr), root=0
)
self.fit, self.con = self(self.pop)
self.update()
# Set generation counter to 0
self.generation = 0
# Mark class as initialized
self._is_initialized = True
@property
def is_initialized(self):
"""bool: True if the algorithm has been initialized, False if not."""
return self._is_initialized
def __iter__(self):
"""
This class is an iterator itself.
Raises
------
RuntimeError
If this class is being used as an iterator before it has been initialized.
"""
if not self._is_initialized:
raise RuntimeError("NSDE is not yet initialized.")
return self
def __next__(self):
"""
Main iteration.
Returns
-------
NSDE
The new state at the next generation.
"""
if (
self.generation < self.max_gen
and self.dx > self.tolx
and self.df > self.tolf
):
# Create a new population and mutation/crossover parameters
pop_new, f_new, cr_new = self.procreate()
# Ensure all processors have the same updated population and mutation/crossover parameters
if self._running_under_mpi:
pop_new, f_new, cr_new = self.comm.bcast(
(pop_new, f_new, cr_new), root=0
)
# Evaluate the fitness of the new population
fit_new, con_new = self(pop_new)
# Update the class with the new data
self.update(pop_new, fit_new, con_new, f_new, cr_new)
# Compute spreads and update generation counter
if self.n_obj == 1:
self.dx = np.linalg.norm(self.pop[0] - self.pop[-1])
self.df = np.abs(self.fit[0] - self.fit[-1])
else:
pareto = self.fit[self.fronts[0]]
self.pareto_lb = np.minimum(self.pareto_lb, np.min(pareto, axis=0, keepdims=True))
self.pareto_ub = np.maximum(self.pareto_ub, np.max(pareto, axis=0, keepdims=True))
pareto_norm = 1 + (pareto - self.pareto_lb) / (self.pareto_ub - self.pareto_lb)
self.hv = hv.hv(pareto_norm, 2.1 * np.ones(self.n_obj))
self.generation += 1
# Return the new state
return self
else:
raise StopIteration
def __call__(self, pop):
"""
Evaluate the fitness of the given population.
Parameters
----------
pop : array_like
List of chromosomes of the individuals in the population
Returns
-------
fit : np.array
Fitness of the individuals in the given population
con : np.array or None
Constraint violations of the individuals in the given population if present. None otherwise.
Notes
-----
If this class has an MPI communicator the individuals will be evaluated in parallel.
Otherwise function evaluation will be serial.
"""
if self.is_initialized:
fit = np.empty((self.n_pop, self.n_obj))
con = None if self.n_con is None else np.empty((self.n_pop, self.n_con))
else:
fit = pop.shape[0] * [None]
con = None
def handle_result(_v, _i, _fit, _con):
if isinstance(_v, tuple):
_fit[_i] = np.asarray(_v[0])
c = np.asarray(_v[1])
if _con is None:
_con = np.empty((pop.shape[0], c.size))
_con[_i] = c
else:
_fit[_i] = _v
return _fit, _con
# Evaluate generation
if self._running_under_mpi:
# Construct run cases
cases = [((item, ii), None) for ii, item in enumerate(pop)]
# Pad the cases with some dummy cases to make the cases divisible amongst the procs.
extra = len(cases) % self.comm.size
if extra > 0:
for j in range(self.comm.size - extra):
cases.append(cases[-1])
# Compute the fitness of all individuals in parallel using MPI
results = concurrent_eval(
self.fobj, cases, self.comm, allgather=True, model_mpi=self.model_mpi
)
# Gather the results
for result in results:
retval, err = result
if err is not None or retval is None:
raise Exception(err)
else:
fit, con = handle_result(*retval, fit, con)
else:
# Evaluate the population in serial
for idx, ind in enumerate(pop):
val = self.fobj(ind)
fit, con = handle_result(val, idx, fit, con)
# Turn all NaNs in the fitnesses into infs
fit = np.reshape(np.where(np.isnan(fit), np.inf, fit), (pop.shape[0], -1))
if con is not None:
con = np.reshape(np.where(np.isnan(con), np.inf, con), (pop.shape[0], -1))
return fit, con
def run(self):
for _ in self:
pass
def procreate(self):
"""
Generate a new population using the selected evolution strategy.
Returns
-------
pop_new : np.array
Chromosomes of the individuals in the next generation
f_new : np.array
New set of mutation rates
cr_new : np.array
New set of crossover probabilities
"""
pop_old_norm = (np.copy(self.pop) - self.lb) / self.range
pop_new_norm = np.empty_like(pop_old_norm)
# If there are constraints, augment the fitness to penalize infeasible individuals while procreating.
# This stops the best and rand-to-best strategies to keep the best infeasible individual alive indefinitely.
if self.n_con and False:
fit = np.where(
np.any(self.con >= 1e-6, axis=1, keepdims=True),
np.linalg.norm(self.con, axis=1, keepdims=True) + np.max(self.fit),
self.fit,
)
else:
fit = self.fit
if self.adaptivity == 0 or self.adaptivity == 1:
if self.adaptivity == 0:
# No adaptivity. Use static f and cr.
f_new = self.f
cr_new = self.cr
else:
# Simple adaptivity. Use new f and cr.
f_new = np.where(
self.rng.uniform(size=self.n_pop) < 0.9,
self.f,
self.rng.uniform(size=self.n_pop) * 0.9 + 0.1,
)
cr_new = np.where(
self.rng.uniform(size=self.n_pop) < 0.9,
self.cr,
self.rng.uniform(size=self.n_pop),
)
for idx in range(self.n_pop):
pop_new_norm[idx], _, _ = self.strategy(
idx, pop_old_norm, fit, self.fronts, f_new, cr_new, self.rng, False
)
else:
# Complex adaptivity. Mutate f and cr.
f_new = np.copy(self.f)
cr_new = np.copy(self.cr)
for idx in range(self.n_pop):
pop_new_norm[idx], f_new[idx], cr_new[idx] = self.strategy(
idx, pop_old_norm, fit, self.fronts, self.f, self.cr, self.rng, True
)
pop_new = self.lb + self.range * np.asarray(pop_new_norm)
return pop_new, f_new, cr_new
def update(self, pop_new=None, fit_new=None, con_new=None, f_new=None, cr_new=None):
"""
Update the population (and f/cr if self-adaptive).
Parameters
----------
pop_new : np.array or None, optional
Proposed new population resulting from procreation
fit_new : np.array or None, optional
Fitness of the individuals in the new population
con_new : np.array or None, optional
Constraint violations of the individuals in the new population
f_new : np.array or None, optional
New set of mutation rates
cr_new : np.array or None, optional
New set of crossover probabilities
Notes
-----
Individuals in the old population will only be replaced by the new ones if they have improved fitness.
Mutation rate and crossover probabilities will only be replaced if self-adaptivity is turned on and if their
corresponding individuals have improved fitness.
"""
if self.n_obj == 1:
self._update_single(pop_new, fit_new, con_new, f_new, cr_new)
else:
self._update_multi(pop_new, fit_new, con_new, f_new, cr_new)
def _update_single(
self, pop_new=None, fit_new=None, con_new=None, f_new=None, cr_new=None
):
if self.n_con:
cs = np.sum(
np.where(np.greater(self.con, self.tolc), self.con, 0.0),
axis=1
)
else:
cs = 0
if (
pop_new is not None
and fit_new is not None
and f_new is not None
and cr_new is not None
):
if self.n_con:
c_new = np.all(con_new <= self.tolc, axis=1)
c_old = np.all(self.con <= self.tolc, axis=1)
cs_new = np.sum(
np.where(np.greater(con_new, self.tolc), con_new, 0.0), axis=1
)
improved_indices = np.argwhere(
((c_new & c_old) & (fit_new <= self.fit).flatten())
+ (c_new & ~c_old)
+ ((~c_new & ~c_old) & (cs_new <= cs))
)
self.con[improved_indices] = con_new[improved_indices]
cs[improved_indices] = cs_new[improved_indices]
else:
improved_indices = np.argwhere((fit_new <= self.fit).flatten())
self.pop[improved_indices] = pop_new[improved_indices]
self.fit[improved_indices] = fit_new[improved_indices]
if self.adaptivity != 0:
self.f[improved_indices] = f_new[improved_indices]
self.cr[improved_indices] = cr_new[improved_indices]
# Sort population so the best individual is always the first
idx_sort = np.argsort(
self.fit.flatten() + np.where(cs != 0.0, cs * np.max(self.fit), 0.0)
)
self.pop = self.pop[idx_sort]
self.fit = self.fit[idx_sort]
if self.n_con:
self.con = self.con[idx_sort]
if self.adaptivity != 0:
self.f = self.f[idx_sort]
self.cr = self.cr[idx_sort]
def _update_multi(
self, pop_new=None, fit_new=None, con_new=None, f_new=None, cr_new=None
):
if (
pop_new is not None
and fit_new is not None
and f_new is not None
and cr_new is not None
):
self.pop = np.concatenate((self.pop, pop_new))
self.fit = np.concatenate((self.fit, fit_new))
if self.n_con:
self.con = np.concatenate((self.con, con_new))
if self.adaptivity != 0:
self.f = np.concatenate((self.f, f_new))
self.cr = np.concatenate((self.cr, cr_new))
if self.n_con:
fronts = sorting.nonDominatedSorting(self.fit, self.con, self.n_pop)
else:
fronts = sorting.nonDominatedSorting(self.fit, self.n_pop)
fronts[-1] = np.asarray(fronts[-1])[
sorting.crowdingDistanceSorting(self.fit[fronts[-1]])[
: (self.n_pop - sum(len(f) for f in fronts[:-1]))
]
].tolist()
new_idxs = []
counter = 0
self.fronts = []
for front in fronts:
new_idxs += front
self.fronts += [list(range(counter, counter + len(front)))]
counter += len(front)
self.pop = self.pop[new_idxs]
self.fit = self.fit[new_idxs]
if self.n_con:
self.con = self.con[new_idxs]
if self.adaptivity != 0:
self.f = self.f[new_idxs]
self.cr = self.cr[new_idxs]
| [
"numpy.copy",
"numpy.abs",
"numpy.greater",
"numpy.ceil",
"numpy.random.default_rng",
"numpy.ones",
"numpy.asarray",
"numpy.min",
"numpy.any",
"numpy.max",
"numpy.empty_like",
"numpy.empty",
"numpy.concatenate",
"numpy.linalg.norm",
"warnings.warn",
"numpy.isnan",
"numpy.all",
"ope... | [((221, 309), 'warnings.warn', 'warnings.warn', (['"""OpenMDAO is not installed. Concurrent evaluation is not available."""'], {}), "(\n 'OpenMDAO is not installed. Concurrent evaluation is not available.')\n", (234, 309), False, 'import warnings\n'), ((3826, 3853), 'numpy.random.default_rng', 'np.random.default_rng', (['seed'], {}), '(seed)\n', (3847, 3853), True, 'import numpy as np\n'), ((14070, 14097), 'numpy.empty_like', 'np.empty_like', (['pop_old_norm'], {}), '(pop_old_norm)\n', (14083, 14097), True, 'import numpy as np\n'), ((5681, 5699), 'numpy.asarray', 'np.asarray', (['bounds'], {}), '(bounds)\n', (5691, 5699), True, 'import numpy as np\n'), ((11545, 11579), 'numpy.empty', 'np.empty', (['(self.n_pop, self.n_obj)'], {}), '((self.n_pop, self.n_obj))\n', (11553, 11579), True, 'import numpy as np\n'), ((12654, 12745), 'openmdao.utils.concurrent.concurrent_eval', 'concurrent_eval', (['self.fobj', 'cases', 'self.comm'], {'allgather': '(True)', 'model_mpi': 'self.model_mpi'}), '(self.fobj, cases, self.comm, allgather=True, model_mpi=self\n .model_mpi)\n', (12669, 12745), False, 'from openmdao.utils.concurrent import concurrent_eval\n'), ((15600, 15615), 'numpy.copy', 'np.copy', (['self.f'], {}), '(self.f)\n', (15607, 15615), True, 'import numpy as np\n'), ((15637, 15653), 'numpy.copy', 'np.copy', (['self.cr'], {}), '(self.cr)\n', (15644, 15653), True, 'import numpy as np\n'), ((19477, 19512), 'numpy.concatenate', 'np.concatenate', (['(self.pop, pop_new)'], {}), '((self.pop, pop_new))\n', (19491, 19512), True, 'import numpy as np\n'), ((19536, 19571), 'numpy.concatenate', 'np.concatenate', (['(self.fit, fit_new)'], {}), '((self.fit, fit_new))\n', (19550, 19571), True, 'import numpy as np\n'), ((10146, 10188), 'numpy.linalg.norm', 'np.linalg.norm', (['(self.pop[0] - self.pop[-1])'], {}), '(self.pop[0] - self.pop[-1])\n', (10160, 10188), True, 'import numpy as np\n'), ((10215, 10249), 'numpy.abs', 'np.abs', (['(self.fit[0] - self.fit[-1])'], {}), '(self.fit[0] - self.fit[-1])\n', (10221, 10249), True, 'import numpy as np\n'), ((11630, 11664), 'numpy.empty', 'np.empty', (['(self.n_pop, self.n_con)'], {}), '((self.n_pop, self.n_con))\n', (11638, 11664), True, 'import numpy as np\n'), ((11855, 11872), 'numpy.asarray', 'np.asarray', (['_v[0]'], {}), '(_v[0])\n', (11865, 11872), True, 'import numpy as np\n'), ((11893, 11910), 'numpy.asarray', 'np.asarray', (['_v[1]'], {}), '(_v[1])\n', (11903, 11910), True, 'import numpy as np\n'), ((13348, 13361), 'numpy.isnan', 'np.isnan', (['fit'], {}), '(fit)\n', (13356, 13361), True, 'import numpy as np\n'), ((14005, 14022), 'numpy.copy', 'np.copy', (['self.pop'], {}), '(self.pop)\n', (14012, 14022), True, 'import numpy as np\n'), ((14403, 14451), 'numpy.any', 'np.any', (['(self.con >= 1e-06)'], {'axis': '(1)', 'keepdims': '(True)'}), '(self.con >= 1e-06, axis=1, keepdims=True)\n', (14409, 14451), True, 'import numpy as np\n'), ((15922, 15946), 'numpy.asarray', 'np.asarray', (['pop_new_norm'], {}), '(pop_new_norm)\n', (15932, 15946), True, 'import numpy as np\n'), ((17723, 17759), 'numpy.all', 'np.all', (['(con_new <= self.tolc)'], {'axis': '(1)'}), '(con_new <= self.tolc, axis=1)\n', (17729, 17759), True, 'import numpy as np\n'), ((17784, 17821), 'numpy.all', 'np.all', (['(self.con <= self.tolc)'], {'axis': '(1)'}), '(self.con <= self.tolc, axis=1)\n', (17790, 17821), True, 'import numpy as np\n'), ((19626, 19661), 'numpy.concatenate', 'np.concatenate', (['(self.con, con_new)'], {}), '((self.con, con_new))\n', (19640, 19661), True, 'import numpy as np\n'), ((19725, 19756), 'numpy.concatenate', 'np.concatenate', (['(self.f, f_new)'], {}), '((self.f, f_new))\n', (19739, 19756), True, 'import numpy as np\n'), ((19783, 19816), 'numpy.concatenate', 'np.concatenate', (['(self.cr, cr_new)'], {}), '((self.cr, cr_new))\n', (19797, 19816), True, 'import numpy as np\n'), ((6012, 6022), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (6019, 6022), True, 'import numpy as np\n'), ((6049, 6059), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (6056, 6059), True, 'import numpy as np\n'), ((10378, 10415), 'numpy.min', 'np.min', (['pareto'], {'axis': '(0)', 'keepdims': '(True)'}), '(pareto, axis=0, keepdims=True)\n', (10384, 10415), True, 'import numpy as np\n'), ((10477, 10514), 'numpy.max', 'np.max', (['pareto'], {'axis': '(0)', 'keepdims': '(True)'}), '(pareto, axis=0, keepdims=True)\n', (10483, 10514), True, 'import numpy as np\n'), ((11971, 12003), 'numpy.empty', 'np.empty', (['(pop.shape[0], c.size)'], {}), '((pop.shape[0], c.size))\n', (11979, 12003), True, 'import numpy as np\n'), ((13463, 13476), 'numpy.isnan', 'np.isnan', (['con'], {}), '(con)\n', (13471, 13476), True, 'import numpy as np\n'), ((14468, 14515), 'numpy.linalg.norm', 'np.linalg.norm', (['self.con'], {'axis': '(1)', 'keepdims': '(True)'}), '(self.con, axis=1, keepdims=True)\n', (14482, 14515), True, 'import numpy as np\n'), ((14518, 14534), 'numpy.max', 'np.max', (['self.fit'], {}), '(self.fit)\n', (14524, 14534), True, 'import numpy as np\n'), ((17391, 17422), 'numpy.greater', 'np.greater', (['self.con', 'self.tolc'], {}), '(self.con, self.tolc)\n', (17401, 17422), True, 'import numpy as np\n'), ((20028, 20050), 'numpy.asarray', 'np.asarray', (['fronts[-1]'], {}), '(fronts[-1])\n', (20038, 20050), True, 'import numpy as np\n'), ((7764, 7800), 'numpy.ceil', 'np.ceil', (['(self.n_pop / self.comm.size)'], {}), '(self.n_pop / self.comm.size)\n', (7771, 7800), True, 'import numpy as np\n'), ((10663, 10682), 'numpy.ones', 'np.ones', (['self.n_obj'], {}), '(self.n_obj)\n', (10670, 10682), True, 'import numpy as np\n'), ((17884, 17914), 'numpy.greater', 'np.greater', (['con_new', 'self.tolc'], {}), '(con_new, self.tolc)\n', (17894, 17914), True, 'import numpy as np\n'), ((18895, 18911), 'numpy.max', 'np.max', (['self.fit'], {}), '(self.fit)\n', (18901, 18911), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# Copyright 2021 Canonincal Ltd.
# See LICENSE file for licensing details.
import logging
import os
import subprocess
from tempfile import NamedTemporaryFile
from ops.charm import CharmBase
from ops.framework import StoredState
from ops.main import main
from ops.model import ActiveStatus, BlockedStatus
from ops.pebble import ConnectionError
logger = logging.getLogger(__name__)
class ConcourseWorkerOperatorCharm(CharmBase):
_stored = StoredState()
def __init__(self, *args):
super().__init__(*args)
self.framework.observe(self.on.config_changed, self._on_config_changed)
self._stored.set_default(
concourse_web_host=None,
concourse_tsa_host_key_pub=None,
)
self.framework.observe(self.on.concourse_worker_relation_changed, self._on_concourse_worker_relation_changed)
def _on_concourse_worker_relation_changed(self, event):
if not os.path.exists("/concourse-keys/worker_key.pub"):
logger.info("We don't have /concourse-keys/worker_key.pub to publish on the relation yet, deferring.")
event.defer()
return
# Publish our public key on the relation.
with open("/concourse-keys/worker_key.pub", "r") as worker_key_pub:
logger.info("Publishing WORKER_KEY_PUB on concourse-worker relation.")
event.relation.data[self.unit]["WORKER_KEY_PUB"] = worker_key_pub.read()
container = self.unit.get_container("concourse-worker")
tsa_host = event.relation.data[event.app].get("TSA_HOST")
tsa_host_key_pub = event.relation.data[event.app].get("CONCOURSE_TSA_HOST_KEY_PUB")
if not tsa_host or not tsa_host_key_pub:
event.defer()
return
try:
container.push("/concourse-keys/tsa_host_key.pub", tsa_host_key_pub, make_dirs=True)
self._stored.concourse_web_host = tsa_host
except ConnectionError:
logger.info("Unable to push to the container, deferring.")
event.defer()
return
# If we get this far, retrigger a config-changed, where we'll check if we
# have all the info we need and try to run concourse-worker if we do.
self.on.config_changed.emit()
def _get_concourse_binary_path(self):
container = self.unit.get_container("concourse-worker")
with NamedTemporaryFile(delete=False) as temp:
temp.write(container.pull("/usr/local/concourse/bin/concourse", encoding=None).read())
temp.flush()
logger.info("Wrote concourse binary to %s", temp.name)
# Make it executable
os.chmod(temp.name, 0o777)
return temp.name
def _on_config_changed(self, event):
# Let's check with have the worker key already. If not, let's create it.
if not os.path.exists("/concourse-keys/worker_key"):
try:
concourse_binary_path = self._get_concourse_binary_path()
except ConnectionError:
event.defer()
return
subprocess.run([concourse_binary_path, "generate-key", "-t", "ssh", "-f", "/concourse-keys/worker_key"])
if not self._stored.concourse_web_host:
self.unit.status = BlockedStatus("Relation required with Concourse Web.")
return
# Check we have other needed file from relation.
if not os.path.exists(self._env_config["CONCOURSE_TSA_PUBLIC_KEY"]):
self.unit.BlockedStatus("Waiting for CONCOURSE_TSA_PUBLIC_KEY")
event.defer()
return
container = self.unit.get_container("concourse-worker")
layer = self._concourse_layer()
try:
services = container.get_plan().to_dict().get("services", {})
except ConnectionError:
logger.info("Unable to connect to Pebble, deferring event")
event.defer()
return
if services != layer["services"]:
container.add_layer("concourse-worker", layer, combine=True)
logger.info("Added updated layer to concourse")
if container.get_service("concourse-worker").is_running():
container.stop("concourse-worker")
container.start("concourse-worker")
logger.info("Restarted concourse-worker service")
self.unit.status = ActiveStatus()
def _concourse_layer(self):
return {
"services": {
"concourse-worker": {
"override": "replace",
"summary": "concourse worker node",
"command": "/usr/local/bin/entrypoint.sh worker",
"startup": "enabled",
"environment": self._env_config,
}
},
}
@property
def _env_config(self):
return {
"CONCOURSE_BAGGAGECLAIM_DRIVER": "overlay",
"CONCOURSE_TSA_HOST": "{}:2222".format(self._stored.concourse_web_host), # comma-separated list.
"CONCOURSE_TSA_PUBLIC_KEY": "/concourse-keys/tsa_host_key.pub",
"CONCOURSE_TSA_WORKER_PRIVATE_KEY": "/concourse-keys/worker_key",
"CONCOURSE_WORK_DIR": "/opt/concourse/worker",
}
if __name__ == "__main__":
main(ConcourseWorkerOperatorCharm, use_juju_for_storage=True)
| [
"logging.getLogger",
"os.path.exists",
"ops.main.main",
"ops.model.ActiveStatus",
"ops.framework.StoredState",
"subprocess.run",
"os.chmod",
"ops.model.BlockedStatus",
"tempfile.NamedTemporaryFile"
] | [((379, 406), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (396, 406), False, 'import logging\n'), ((470, 483), 'ops.framework.StoredState', 'StoredState', ([], {}), '()\n', (481, 483), False, 'from ops.framework import StoredState\n'), ((5319, 5380), 'ops.main.main', 'main', (['ConcourseWorkerOperatorCharm'], {'use_juju_for_storage': '(True)'}), '(ConcourseWorkerOperatorCharm, use_juju_for_storage=True)\n', (5323, 5380), False, 'from ops.main import main\n'), ((4402, 4416), 'ops.model.ActiveStatus', 'ActiveStatus', ([], {}), '()\n', (4414, 4416), False, 'from ops.model import ActiveStatus, BlockedStatus\n'), ((950, 998), 'os.path.exists', 'os.path.exists', (['"""/concourse-keys/worker_key.pub"""'], {}), "('/concourse-keys/worker_key.pub')\n", (964, 998), False, 'import os\n'), ((2405, 2437), 'tempfile.NamedTemporaryFile', 'NamedTemporaryFile', ([], {'delete': '(False)'}), '(delete=False)\n', (2423, 2437), False, 'from tempfile import NamedTemporaryFile\n'), ((2684, 2708), 'os.chmod', 'os.chmod', (['temp.name', '(511)'], {}), '(temp.name, 511)\n', (2692, 2708), False, 'import os\n'), ((2874, 2918), 'os.path.exists', 'os.path.exists', (['"""/concourse-keys/worker_key"""'], {}), "('/concourse-keys/worker_key')\n", (2888, 2918), False, 'import os\n'), ((3112, 3220), 'subprocess.run', 'subprocess.run', (["[concourse_binary_path, 'generate-key', '-t', 'ssh', '-f',\n '/concourse-keys/worker_key']"], {}), "([concourse_binary_path, 'generate-key', '-t', 'ssh', '-f',\n '/concourse-keys/worker_key'])\n", (3126, 3220), False, 'import subprocess\n'), ((3297, 3351), 'ops.model.BlockedStatus', 'BlockedStatus', (['"""Relation required with Concourse Web."""'], {}), "('Relation required with Concourse Web.')\n", (3310, 3351), False, 'from ops.model import ActiveStatus, BlockedStatus\n'), ((3444, 3504), 'os.path.exists', 'os.path.exists', (["self._env_config['CONCOURSE_TSA_PUBLIC_KEY']"], {}), "(self._env_config['CONCOURSE_TSA_PUBLIC_KEY'])\n", (3458, 3504), False, 'import os\n')] |
'''
Another version of tree builder, converting list to tree.
where list is actually an array of tree.
@author: <NAME>
'''
from typing import List
from unittest import TestCase
class TreeNode(object):
'''
Definition for a binary tree node.
'''
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
def print(self):
buf = '{' + str(self.val)
if self.left is not None:
buf += ', l: ' + self.left.print()
if self.right is not None:
buf += ', r: ' + self.right.print()
return buf + '}'
def list2tree(lst: List):
'''
convert int array (sorted in dfs binary tree) to tree of TreeNode
0, 1, 2, 3, 4, 5, 6 =>
0
1 2
3 4 5 6
7 8 9 10
where l = 2m+1, r = 2m + 2
i.e. m = (l-1)//2, m = (r-2)//2
'''
if not lst:
return None
m, l, r = 0, 1, 2 # @UnusedVariable
root0 = TreeNode(int(lst[m]))
lst[0] = root0
for l in range(1, len(lst) - 1, 2):
m = (l-1) // 2
root = lst[m]
if (lst[l] is not None):
root.left = TreeNode(lst[l])
lst[l] = root.left
if (lst[l+1] is not None):
root.right = TreeNode(lst[l+1])
lst[l+1] = root.right
return root0
if __name__ == '__main__':
t = TestCase()
t.assertEqual('{0, l: {1, l: {3, l: {7}, r: {8}}, r: {4, l: {9}, r: {10}}}, r: {2, l: {5}, r: {6}}}',
list2tree([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).print())
t.assertEqual('{3, l: {5, l: {6}, r: {2, l: {7}, r: {4}}}, r: {1, l: {0}, r: {8}}}',
list2tree([3, 5, 1, 6, 2, 0, 8, None, None, 7, 4]).print())
print('OK!') | [
"unittest.TestCase"
] | [((1408, 1418), 'unittest.TestCase', 'TestCase', ([], {}), '()\n', (1416, 1418), False, 'from unittest import TestCase\n')] |
from __future__ import annotations
from typing import Literal
from prettyqt import constants, core, widgets
from prettyqt.qt import QtWidgets
from prettyqt.utils import InvalidParamError, bidict
GESTURE_CANCEL_POLICY = bidict(
none=QtWidgets.QGesture.GestureCancelPolicy.CancelNone,
all_in_context=QtWidgets.QGesture.GestureCancelPolicy.CancelAllInContext,
)
GestureCancelPolicyStr = Literal["none", "all_in_context"]
QtWidgets.QGesture.__bases__ = (core.Object,)
class Gesture(QtWidgets.QGesture):
def get_state(self) -> constants.GestureStateStr:
"""Return current state.
Returns:
state
"""
return constants.GESTURE_STATE.inverse[self.state()]
def get_gesture_type(self) -> constants.GestureTypeStr:
"""Return current gesture type.
Returns:
gesture type
"""
return constants.GESTURE_TYPE.inverse[self.gestureType()]
def get_hot_spot(self) -> core.PointF:
return core.PointF(self.hotSpot())
def set_gesture_cancel_policy(self, policy: GestureCancelPolicyStr):
"""Set gesture cancel policy.
Args:
policy: gesture cancel policy to use
Raises:
InvalidParamError: gesture cancel policy does not exist
"""
if policy not in GESTURE_CANCEL_POLICY:
raise InvalidParamError(policy, GESTURE_CANCEL_POLICY)
self.setGestureCancelPolicy(GESTURE_CANCEL_POLICY[policy])
def get_gesture_cancel_policy(self) -> GestureCancelPolicyStr:
"""Return current gesture cancel policy.
Returns:
gesture cancel policy
"""
return GESTURE_CANCEL_POLICY.inverse[self.gestureCancelPolicy()]
if __name__ == "__main__":
app = widgets.app()
gesture = Gesture()
| [
"prettyqt.utils.InvalidParamError",
"prettyqt.utils.bidict",
"prettyqt.widgets.app"
] | [((223, 363), 'prettyqt.utils.bidict', 'bidict', ([], {'none': 'QtWidgets.QGesture.GestureCancelPolicy.CancelNone', 'all_in_context': 'QtWidgets.QGesture.GestureCancelPolicy.CancelAllInContext'}), '(none=QtWidgets.QGesture.GestureCancelPolicy.CancelNone,\n all_in_context=QtWidgets.QGesture.GestureCancelPolicy.CancelAllInContext)\n', (229, 363), False, 'from prettyqt.utils import InvalidParamError, bidict\n'), ((1768, 1781), 'prettyqt.widgets.app', 'widgets.app', ([], {}), '()\n', (1779, 1781), False, 'from prettyqt import constants, core, widgets\n'), ((1359, 1407), 'prettyqt.utils.InvalidParamError', 'InvalidParamError', (['policy', 'GESTURE_CANCEL_POLICY'], {}), '(policy, GESTURE_CANCEL_POLICY)\n', (1376, 1407), False, 'from prettyqt.utils import InvalidParamError, bidict\n')] |
# -*- coding: utf-8 -*-
"""
Module implementing ProfileDialog.
"""
from PySide2.QtCore import Slot, Qt
from PySide2.QtWidgets import QDialog, QFileDialog, \
QMessageBox, QWidget, QApplication
import datetime
import os.path
from typing import Optional
from .ui_profile import Ui_Dialog
class ProfileDialog(QDialog, Ui_Dialog):
"""
A dialog for creation or edition of a profile.
"""
def __init__(self, parent: Optional[QWidget] = None,
profile=None, names=None):
"""
Constructor.
@param parent reference to the parent widget
@param profile an optional profile to edit
@param names
"""
super(ProfileDialog, self).__init__(parent)
self.setupUi(self)
self.names = [] if names is None else tuple(names)
if profile is not None:
self.name.setText(profile.name)
self.mask_edit.setText(profile.mask)
self.path.setText(profile.path)
self.pattern.setText(profile.pattern)
@Slot()
def on_change_clicked(self):
"""
Select the profile folder.
"""
wd = QFileDialog.getExistingDirectory(
self, directory=self.path.text(),
options=QFileDialog.ShowDirsOnly | QFileDialog.DontUseNativeDialog)
self.path.setText(wd)
def get_name(self):
return self.name.text().strip()
def get_path(self):
return self.path.text().strip()
def get_mask(self):
return self.mask_edit.text().strip()
def get_pattern(self):
return self.pattern.text().strip()
@Slot()
def on_button_box_accepted(self):
if self.valid():
self.accept()
def valid(self):
"""
Validate the content of the dialog.
"""
if self.get_name() in self.names:
self.error(translate('profile', '"{}" is already used')
.format(self.get_name()),
Id.translate('profile', 'Name'))
return False
if self.get_name() == '':
self.error(translate('profile', 'Name cannot be empty'),
Id.translate('profile', 'Name'))
return False
if not os.path.isdir(self.get_path()):
self.error(translate('profiles', '"{}" is not a valid folder')
.format(self.get_path()),
Id.translate('profile', 'Path'))
return False
if '*' not in self.get_mask() and '?' not in self.get_mask():
self.error(translate('profile',
'"{}" is not a valid image pattern')
.format(self.get_mask()),
Id.translate('profile', 'Mask'))
return False
now = datetime.datetime.now()
try:
now.strftime(self.get_pattern())
except ValueError:
self.error(translate('profile',
'"{}" is not a valid date pattern')
.format(self.get_pattern()),
Id.translate('profile', 'Pattern'))
return False
return True
def error(self, msg, field):
QMessageBox.warning(self, translate('profile', field), msg)
attr = 'mask_edit' if field == 'Mask' else field.lower()
getattr(self, attr).setFocus(Qt.OtherFocusReason)
def translate(ctx, txt):
return QApplication.instance().translate(ctx, txt)
class Id:
"""
A quick and dirty hack to force lupdate to collect strings without
actually translating them (at that point)
"""
@staticmethod
def translate(_ctx, txt):
return txt
| [
"PySide2.QtCore.Slot",
"datetime.datetime.now",
"PySide2.QtWidgets.QApplication.instance"
] | [((1038, 1044), 'PySide2.QtCore.Slot', 'Slot', ([], {}), '()\n', (1042, 1044), False, 'from PySide2.QtCore import Slot, Qt\n'), ((1617, 1623), 'PySide2.QtCore.Slot', 'Slot', ([], {}), '()\n', (1621, 1623), False, 'from PySide2.QtCore import Slot, Qt\n'), ((2807, 2830), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2828, 2830), False, 'import datetime\n'), ((3448, 3471), 'PySide2.QtWidgets.QApplication.instance', 'QApplication.instance', ([], {}), '()\n', (3469, 3471), False, 'from PySide2.QtWidgets import QDialog, QFileDialog, QMessageBox, QWidget, QApplication\n')] |
from PyQt5.QtCore import QThreadPool, QRunnable, QMetaType, pyqtSignal, QObject
from openspectra.image import BandDescriptor, GreyscaleImage, RGBImage, Image
from openspectra.openspecrtra_tools import OpenSpectraImageTools
from openspectra.openspectra_file import OpenSpectraFile
from openspectra.utils import Logger, LogHelper
class GreyscaleImageTask(QRunnable):
__LOG:Logger = LogHelper.logger("GreyscaleImageTask")
grey_image_created = pyqtSignal(GreyscaleImage)
def __init__(self, image_tools:OpenSpectraImageTools, band:int,
band_descriptor:BandDescriptor, call_back):
super().__init__()
self.__image_tools = image_tools
self.__band = band
self.__band_descriptor = band_descriptor
self.__call_back = call_back
def run(self):
GreyscaleImageTask.__LOG.debug("Task creating image...")
image = self.__image_tools.greyscale_image(self.__band, self.__band_descriptor)
GreyscaleImageTask.__LOG.debug("Task calling call back...")
self.__call_back(image)
class RGBImageTask(QRunnable):
rgb_image_created = pyqtSignal(RGBImage)
def __init__(self, image_tools:OpenSpectraImageTools, red:int, green:int, blue:int,
red_descriptor:BandDescriptor, green_descriptor:BandDescriptor,
blue_descriptor:BandDescriptor, call_back):
super().__init__()
self.__image_tools = image_tools
self.__red = red
self.__green = green
self.__blue = blue
self.__red_descriptor = red_descriptor
self.__green_descriptor = green_descriptor
self.__blue_descriptor = blue_descriptor
self.__call_back = call_back
def run(self):
image = self.__image_tools.rgb_image(self.__red, self.__green, self.__blue,
self.__red_descriptor, self.__green_descriptor, self.__blue_descriptor)
self.__call_back(image)
class ThreadedImageTools(QObject):
"""A wrapper for OpenSpectraImageTools that allows Images to be created
from data in a separate thread in a QT application. This allows the UI to keep
functioning when processing large data sets into an image. For example
generating an rgb image from a large, in terms of lines and samples, data file"""
image_created = pyqtSignal(Image)
def __init__(self, file:OpenSpectraFile):
super().__init__()
self.__image_tools = OpenSpectraImageTools(file)
self.__thread_pool = QThreadPool.globalInstance()
def greyscale_image(self, band:int, band_descriptor:BandDescriptor):
task = GreyscaleImageTask(self.__image_tools, band, band_descriptor, self.__handle_image_complete)
task.setAutoDelete(True)
self.__thread_pool.start(task)
def rgb_image(self, red:int, green:int, blue:int,
red_descriptor:BandDescriptor, green_descriptor:BandDescriptor,
blue_descriptor:BandDescriptor):
task = RGBImageTask(self.__image_tools, red, green, blue,
red_descriptor, green_descriptor, blue_descriptor, self.__handle_image_complete)
task.setAutoDelete(True)
self.__thread_pool.start(task)
def __handle_image_complete(self, image:Image):
self.image_created.emit(image)
| [
"PyQt5.QtCore.pyqtSignal",
"openspectra.utils.LogHelper.logger",
"PyQt5.QtCore.QThreadPool.globalInstance",
"openspectra.openspecrtra_tools.OpenSpectraImageTools"
] | [((388, 426), 'openspectra.utils.LogHelper.logger', 'LogHelper.logger', (['"""GreyscaleImageTask"""'], {}), "('GreyscaleImageTask')\n", (404, 426), False, 'from openspectra.utils import Logger, LogHelper\n'), ((453, 479), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', (['GreyscaleImage'], {}), '(GreyscaleImage)\n', (463, 479), False, 'from PyQt5.QtCore import QThreadPool, QRunnable, QMetaType, pyqtSignal, QObject\n'), ((1117, 1137), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', (['RGBImage'], {}), '(RGBImage)\n', (1127, 1137), False, 'from PyQt5.QtCore import QThreadPool, QRunnable, QMetaType, pyqtSignal, QObject\n'), ((2292, 2309), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', (['Image'], {}), '(Image)\n', (2302, 2309), False, 'from PyQt5.QtCore import QThreadPool, QRunnable, QMetaType, pyqtSignal, QObject\n'), ((2413, 2440), 'openspectra.openspecrtra_tools.OpenSpectraImageTools', 'OpenSpectraImageTools', (['file'], {}), '(file)\n', (2434, 2440), False, 'from openspectra.openspecrtra_tools import OpenSpectraImageTools\n'), ((2470, 2498), 'PyQt5.QtCore.QThreadPool.globalInstance', 'QThreadPool.globalInstance', ([], {}), '()\n', (2496, 2498), False, 'from PyQt5.QtCore import QThreadPool, QRunnable, QMetaType, pyqtSignal, QObject\n')] |
# import json
import os
from ibm_watson import LanguageTranslatorV3
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
from dotenv import load_dotenv
load_dotenv()
apikey = os.environ['apikey']
url = os.environ['url']
authenticator = IAMAuthenticator(apikey)
language_translator = LanguageTranslatorV3(
version='2018-05-01',
authenticator=authenticator
)
language_translator.set_service_url(url)
def englishToFrench(english_text):
'''
function to translate english to french
'''
french_text = language_translator.translate(
text=english_text,
model_id='en-fr').get_result()
return french_text
def frenchToEnglish(french_text):
'''
function to translate french to english
'''
english_text = language_translator.translate(
text=french_text,
model_id='fr-en').get_result()
return english_text | [
"ibm_watson.LanguageTranslatorV3",
"ibm_cloud_sdk_core.authenticators.IAMAuthenticator",
"dotenv.load_dotenv"
] | [((163, 176), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (174, 176), False, 'from dotenv import load_dotenv\n'), ((249, 273), 'ibm_cloud_sdk_core.authenticators.IAMAuthenticator', 'IAMAuthenticator', (['apikey'], {}), '(apikey)\n', (265, 273), False, 'from ibm_cloud_sdk_core.authenticators import IAMAuthenticator\n'), ((296, 367), 'ibm_watson.LanguageTranslatorV3', 'LanguageTranslatorV3', ([], {'version': '"""2018-05-01"""', 'authenticator': 'authenticator'}), "(version='2018-05-01', authenticator=authenticator)\n", (316, 367), False, 'from ibm_watson import LanguageTranslatorV3\n')] |
import sys
import os
import numpy as np
import cv2
from PIL import Image
from skimage.morphology import binary_dilation
import time
def result_fusion(data_list,label_list=None,save_path=None):
len_ = len(os.listdir(data_list[0]))
count = 0
for item in os.scandir(data_list[0]):
img_list = [item.path] + [os.path.join(case, item.name) for case in data_list[1:]]
palette = Image.open('./result/pspnet/results/A151678.png').getpalette()
mask = np.array(Image.open(img_list[0]),dtype=np.uint8)
for label in label_list:
tmp_mask = np.zeros_like(mask,dtype=np.uint8)
for img_path in img_list:
tmp_mask += (np.array(Image.open(img_path)) == label).astype(np.uint8)
binary_mask = (tmp_mask > len(data_list)/2).astype(np.uint8)
if label == 4 or label==5:
binary_mask = binary_dilation(binary_mask)
mask[binary_mask == 1] = label
mask = Image.fromarray(mask,mode='P')
mask.putpalette(palette)
mask.save(os.path.join(save_path,item.name))
count += 1
sys.stdout.write('\rCurrent %d/%d'%(count, len_))
sys.stdout.write('\n')
def result_fusion_v2(data_list,label_list=None,save_path=None,shape=(256,256),weight=None):
len_ = len(os.listdir(data_list[0]))
count = 0
for item in os.scandir(data_list[0]):
img_list = [item.path] + [os.path.join(case, item.name) for case in data_list[1:]]
palette = Image.open('./result/pspnet/results/A151678.png').getpalette()
mask = np.zeros((len(label_list),) + shape,dtype=np.uint8)
for i, img_path in enumerate(img_list):
tmp_mask = np.zeros_like(mask,dtype=np.uint8)
for label in label_list:
temp = (np.array(Image.open(img_path)) == label).astype(np.uint8)
tmp_mask[label,...] = temp
mask[tmp_mask == 1] += weight[i]
mask = Image.fromarray(np.argmax(mask,axis=0),mode='P')
mask.putpalette(palette)
mask.save(os.path.join(save_path,item.name))
count += 1
sys.stdout.write('\rCurrent %d/%d'%(count, len_))
sys.stdout.write('\n')
if __name__ == "__main__":
start = time.time()
result_list = ['./result/t4/results','./result/t3/results','./result/pspnet/results','./result/deeplab_rs/results']
# result_fusion(result_list,list(range(7)),'./result/results')
result_fusion_v2(result_list,list(range(7)),'./result/results',weight=[9,8,7,5])
print('Run time: %.4f'%(time.time() - start)) | [
"skimage.morphology.binary_dilation",
"PIL.Image.fromarray",
"os.listdir",
"PIL.Image.open",
"os.scandir",
"numpy.zeros_like",
"os.path.join",
"numpy.argmax",
"time.time",
"sys.stdout.write"
] | [((266, 290), 'os.scandir', 'os.scandir', (['data_list[0]'], {}), '(data_list[0])\n', (276, 290), False, 'import os\n'), ((1172, 1194), 'sys.stdout.write', 'sys.stdout.write', (['"""\n"""'], {}), "('\\n')\n", (1188, 1194), False, 'import sys\n'), ((1360, 1384), 'os.scandir', 'os.scandir', (['data_list[0]'], {}), '(data_list[0])\n', (1370, 1384), False, 'import os\n'), ((2171, 2193), 'sys.stdout.write', 'sys.stdout.write', (['"""\n"""'], {}), "('\\n')\n", (2187, 2193), False, 'import sys\n'), ((2239, 2250), 'time.time', 'time.time', ([], {}), '()\n', (2248, 2250), False, 'import time\n'), ((210, 234), 'os.listdir', 'os.listdir', (['data_list[0]'], {}), '(data_list[0])\n', (220, 234), False, 'import os\n'), ((973, 1004), 'PIL.Image.fromarray', 'Image.fromarray', (['mask'], {'mode': '"""P"""'}), "(mask, mode='P')\n", (988, 1004), False, 'from PIL import Image\n'), ((1117, 1168), 'sys.stdout.write', 'sys.stdout.write', (["('\\rCurrent %d/%d' % (count, len_))"], {}), "('\\rCurrent %d/%d' % (count, len_))\n", (1133, 1168), False, 'import sys\n'), ((1304, 1328), 'os.listdir', 'os.listdir', (['data_list[0]'], {}), '(data_list[0])\n', (1314, 1328), False, 'import os\n'), ((2116, 2167), 'sys.stdout.write', 'sys.stdout.write', (["('\\rCurrent %d/%d' % (count, len_))"], {}), "('\\rCurrent %d/%d' % (count, len_))\n", (2132, 2167), False, 'import sys\n'), ((488, 511), 'PIL.Image.open', 'Image.open', (['img_list[0]'], {}), '(img_list[0])\n', (498, 511), False, 'from PIL import Image\n'), ((584, 619), 'numpy.zeros_like', 'np.zeros_like', (['mask'], {'dtype': 'np.uint8'}), '(mask, dtype=np.uint8)\n', (597, 619), True, 'import numpy as np\n'), ((1055, 1089), 'os.path.join', 'os.path.join', (['save_path', 'item.name'], {}), '(save_path, item.name)\n', (1067, 1089), False, 'import os\n'), ((1696, 1731), 'numpy.zeros_like', 'np.zeros_like', (['mask'], {'dtype': 'np.uint8'}), '(mask, dtype=np.uint8)\n', (1709, 1731), True, 'import numpy as np\n'), ((1970, 1993), 'numpy.argmax', 'np.argmax', (['mask'], {'axis': '(0)'}), '(mask, axis=0)\n', (1979, 1993), True, 'import numpy as np\n'), ((2054, 2088), 'os.path.join', 'os.path.join', (['save_path', 'item.name'], {}), '(save_path, item.name)\n', (2066, 2088), False, 'import os\n'), ((326, 355), 'os.path.join', 'os.path.join', (['case', 'item.name'], {}), '(case, item.name)\n', (338, 355), False, 'import os\n'), ((401, 450), 'PIL.Image.open', 'Image.open', (['"""./result/pspnet/results/A151678.png"""'], {}), "('./result/pspnet/results/A151678.png')\n", (411, 450), False, 'from PIL import Image\n'), ((886, 914), 'skimage.morphology.binary_dilation', 'binary_dilation', (['binary_mask'], {}), '(binary_mask)\n', (901, 914), False, 'from skimage.morphology import binary_dilation\n'), ((1420, 1449), 'os.path.join', 'os.path.join', (['case', 'item.name'], {}), '(case, item.name)\n', (1432, 1449), False, 'import os\n'), ((1495, 1544), 'PIL.Image.open', 'Image.open', (['"""./result/pspnet/results/A151678.png"""'], {}), "('./result/pspnet/results/A151678.png')\n", (1505, 1544), False, 'from PIL import Image\n'), ((2551, 2562), 'time.time', 'time.time', ([], {}), '()\n', (2560, 2562), False, 'import time\n'), ((695, 715), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (705, 715), False, 'from PIL import Image\n'), ((1801, 1821), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (1811, 1821), False, 'from PIL import Image\n')] |
from flask.ext.sqlalchemy import SQLAlchemy
from sqlalchemy.dialects.postgresql import HSTORE
from sqlalchemy.ext.mutable import MutableDict
db = SQLAlchemy()
PersonType = db.Enum('driver', 'team-owner', 'crew-chief', 'vehicle-owner', 'team-principal',
'technical-chief', 'race-engineer', name='person_types')
class Person(db.Model):
__tablename__ = 'people'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String(100), nullable=False)
country = db.Column(db.String(50), nullable=False)
class Series(db.Model):
__tablename__ = 'series'
id = db.Column(db.String(5), primary_key=True)
description = db.Column(db.String(50), nullable=True)
class Team(db.Model):
__tablename__ = 'teams'
id = db.Column(db.String(50), primary_key=True)
name = db.Column(db.String(50), nullable=False)
alias = db.Column(db.String(50), nullable=False)
owner_id = db.Column(db.Integer, db.ForeignKey('people.id'), nullable=False)
races = db.relationship('Race', secondary='race_results')
owner = db.relationship('Person', primaryjoin=owner_id == Person.id)
class Vehicle(db.Model):
__tablename__ = 'vehicles'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
number = db.Column(db.Integer, nullable=False)
owner_id = db.Column(db.Integer, db.ForeignKey('people.id'), nullable=True)
vehicle_metadata = db.Column(MutableDict.as_mutable(HSTORE), nullable=False)
races = db.relationship('Race', secondary='race_results')
owner = db.relationship('Person', primaryjoin=owner_id == Person.id)
class DriverStanding(db.Model):
__tablename__ = 'driver_standings'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
driver_id = db.Column(db.Integer, db.ForeignKey('people.id'), nullable=False)
vehicle_id = db.Column(db.Integer, db.ForeignKey('vehicles.id'), nullable=False)
series = db.Column(db.String(5), db.ForeignKey('series.id'), nullable=False)
season = db.Column(db.Integer, nullable=False)
position = db.Column(db.Integer, nullable=False)
points = db.Column(db.Integer, nullable=False)
poles = db.Column(db.Integer, nullable=False)
wins = db.Column(db.Integer, nullable=False)
starts = db.Column(db.Integer, nullable=False)
dnfs = db.Column(db.Integer, nullable=False)
top5 = db.Column(db.Integer, nullable=False)
top10 = db.Column(db.Integer, nullable=False)
driver = db.relationship('Person')
vehicle = db.relationship('Vehicle')
class TeamStanding(db.Model):
__tablename__ = 'team_standings'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
team_id = db.Column(db.String(50), db.ForeignKey('teams.id'), nullable=False)
vehicle_id = db.Column(db.Integer, db.ForeignKey('vehicles.id'), nullable=False)
series = db.Column(db.String(5), db.ForeignKey('series.id'), nullable=False)
season = db.Column(db.Integer, nullable=False)
position = db.Column(db.Integer, nullable=False)
points = db.Column(db.Integer, nullable=False)
poles = db.Column(db.Integer, nullable=False)
team = db.relationship('Team')
vehicle = db.relationship('Vehicle')
class OwnerStanding(db.Model):
__tablename__ = 'owner_standings'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
vehicle_id = db.Column(db.Integer, db.ForeignKey('vehicles.id'), nullable=False)
series = db.Column(db.String(5), db.ForeignKey('series.id'), nullable=False)
season = db.Column(db.Integer, nullable=False)
position = db.Column(db.Integer, nullable=False)
points = db.Column(db.Integer, nullable=False)
vehicle = db.relationship('Vehicle')
class RaceTrack(db.Model):
__tablename__ = 'race_tracks'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
site = db.Column(db.String(50), nullable=False)
circuit_name = db.Column(db.String(100), nullable=False)
city = db.Column(db.String(50), nullable=False)
state = db.Column(db.String(2), nullable=True)
country = db.Column(db.String(50), nullable=False)
class Race(db.Model):
__tablename__ = 'races'
id = db.Column(db.String(50), primary_key=True)
round = db.Column(db.Integer, nullable=False)
name = db.Column(db.String(100), nullable=False)
season = db.Column(db.Integer, nullable=False)
race_track_id = db.Column(db.Integer, db.ForeignKey('race_tracks.id'), nullable=False)
date = db.Column(db.DateTime, nullable=False)
laps = db.Column(db.Integer, nullable=False)
length = db.Column(db.Numeric(5, 3), nullable=False)
distance = db.Column(db.Numeric(5, 1), nullable=False)
series = db.Column(db.String(5), db.ForeignKey('series.id'), nullable=False)
race_types = db.relationship('RaceType', secondary='races_types')
race_track = db.relationship('RaceTrack')
class RaceType(db.Model):
__tablename__ = 'race_types'
id = db.Column(db.String(5), primary_key=True)
description = db.Column(db.String(50), nullable=True)
class RacesTypes(db.Model):
__tablename__ = 'races_types'
race_id = db.Column(db.String(50), db.ForeignKey('races.id'), primary_key=True)
race_type = db.Column(db.String(5), db.ForeignKey('race_types.id'), primary_key=True)
class RaceStanding(db.Model):
__tablename__ = 'race_standings'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
race_id = db.Column(db.String(50), db.ForeignKey('races.id'), nullable=False)
race_time = db.Column(db.Time, nullable=False)
caution_flags = db.Column(db.Integer, nullable=False)
caution_flag_laps = db.Column(db.Integer, nullable=False)
lead_changes = db.Column(db.Integer, nullable=False)
pole_speed = db.Column(db.Numeric(6, 3), nullable=False)
avg_speed = db.Column(db.Numeric(6, 3), nullable=False)
victory_margin = db.Column(db.Numeric(6, 3), nullable=False)
class RaceEntryType(db.Model):
__tablename__ = 'race_entry_types'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
entry_type = db.Column(db.String(50), nullable=False)
class RaceEntry(db.Model):
__tablename__ = 'race_entries'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
race_id = db.Column(db.String(50), db.ForeignKey('races.id'), nullable=False)
team_id = db.Column(db.String(50), db.ForeignKey('teams.id'), nullable=False)
vehicle_id = db.Column(db.Integer, db.ForeignKey('vehicles.id'), nullable=False)
entry_type_id = db.Column(db.Integer, db.ForeignKey('race_entry_types.id'), nullable=False)
race = db.relationship('Race')
team = db.relationship('Team')
vehicle = db.relationship('Vehicle')
entry_type = db.relationship('RaceEntryType')
people = db.relationship('RaceEntryPerson')
class RaceEntryPerson(db.Model):
__tablename__ = 'race_entries_people'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
race_entry_id = db.Column(db.Integer, db.ForeignKey('race_entries.id'), nullable=False)
person_id = db.Column(db.Integer, db.ForeignKey('people.id'), nullable=False)
type = db.Column(PersonType, nullable=False)
person = db.relationship('Person')
race_entry = db.relationship('RaceEntry')
class RaceResult(db.Model):
__tablename__ = 'race_results'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
race_id = db.Column(db.String(50), db.ForeignKey('races.id'), nullable=False)
team_id = db.Column(db.String(50), db.ForeignKey('teams.id'), nullable=False)
vehicle_id = db.Column(db.Integer, db.ForeignKey('vehicles.id'), nullable=False)
sponsor = db.Column(db.String(100), nullable=False)
grid = db.Column(db.Integer, nullable=False)
position = db.Column(db.Integer, nullable=False)
laps = db.Column(db.Integer, nullable=False)
status = db.Column(db.String(50), nullable=False)
laps_led = db.Column(db.Integer, nullable=False)
points = db.Column(db.Integer, nullable=False)
money = db.Column(db.Numeric(10, 2), nullable=False)
race = db.relationship('Race')
team = db.relationship('Team')
vehicle = db.relationship('Vehicle')
people = db.relationship('RaceResultPerson')
class RaceResultPerson(db.Model):
__tablename__ = 'race_results_people'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
race_result_id = db.Column(db.Integer, db.ForeignKey('race_results.id'), nullable=False)
person_id = db.Column(db.Integer, db.ForeignKey('people.id'), nullable=False)
type = db.Column(PersonType, nullable=False)
person = db.relationship('Person')
race_result = db.relationship('RaceResult')
class QualifyingResult(db.Model):
__tablename__ = 'qualifying_results'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
race_id = db.Column(db.String(50), db.ForeignKey('races.id'), nullable=False)
team_id = db.Column(db.String(50), db.ForeignKey('teams.id'), nullable=False)
vehicle_id = db.Column(db.Integer, db.ForeignKey('vehicles.id'), nullable=False)
session = db.Column(db.Integer, nullable=False)
position = db.Column(db.Integer, nullable=False)
lap_time = db.Column(db.Numeric(6, 3), nullable=False)
race = db.relationship('Race')
team = db.relationship('Team')
vehicle = db.relationship('Vehicle')
people = db.relationship('QualifyingResultPerson')
class QualifyingResultPerson(db.Model):
__tablename__ = 'qualifying_results_people'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
qualifying_result_id = db.Column(db.Integer, db.ForeignKey('qualifying_results.id'), nullable=False)
person_id = db.Column(db.Integer, db.ForeignKey('people.id'), nullable=False)
type = db.Column(PersonType, nullable=False)
person = db.relationship('Person')
qualifying_result = db.relationship('QualifyingResult')
class PracticeResult(db.Model):
___tablename__ = 'practice_results'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
race_id = db.Column(db.String(50), db.ForeignKey('races.id'), nullable=False)
team_id = db.Column(db.String(50), db.ForeignKey('teams.id'), nullable=False)
vehicle_id = db.Column(db.Integer, db.ForeignKey('vehicles.id'), nullable=False)
session = db.Column(db.Integer, nullable=False)
position = db.Column(db.Integer, nullable=False)
lap_time = db.Column(db.Numeric(6, 3), nullable=False)
race = db.relationship('Race')
team = db.relationship('Team')
vehicle = db.relationship('Vehicle')
people = db.relationship('PracticeResultPerson')
class PracticeResultPerson(db.Model):
__tablename__ = 'practice_results_people'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
practice_result_id = db.Column(db.Integer, db.ForeignKey(PracticeResult.id), nullable=False)
person_id = db.Column(db.Integer, db.ForeignKey('people.id'), nullable=False)
type = db.Column(PersonType, nullable=False)
person = db.relationship('Person')
practice_result = db.relationship('PracticeResult')
| [
"flask.ext.sqlalchemy.SQLAlchemy",
"sqlalchemy.ext.mutable.MutableDict.as_mutable"
] | [((147, 159), 'flask.ext.sqlalchemy.SQLAlchemy', 'SQLAlchemy', ([], {}), '()\n', (157, 159), False, 'from flask.ext.sqlalchemy import SQLAlchemy\n'), ((1455, 1485), 'sqlalchemy.ext.mutable.MutableDict.as_mutable', 'MutableDict.as_mutable', (['HSTORE'], {}), '(HSTORE)\n', (1477, 1485), False, 'from sqlalchemy.ext.mutable import MutableDict\n')] |
import numpy as np
import matplotlib.pyplot as plt
from .single_unit import PSTH
def shiftappend(arr, shift, end=None, direction='left'):
if isinstance(end, type(None)):
end = arr[-1]
if direction == 'left':
return np.hstack((arr[arr > shift]-shift, arr[arr < shift]+end-shift))
elif direction == 'right':
return np.hstack((arr[arr < shift]-end+shift, arr[arr < shift]+shift))
else:
raise ValueError('unknown direction: %s'%direction)
def crosscorrelogram(target, reference, ROI=(-0.5,0.5), binsize=.01, shift=None, skip_plot=False):
"""
Cross Correlation between two unit, optionally corrected by shift predictor.
arguments:
- target: the target spike train as 1d numpy.array
- reference: the reference spike train as 1d numpy.array
keyword arguments:
- shift: shift size, if None then skip the shift predictor correction [default: None]
- ROI: region of interest as tuple [default: (-0.5, 0.5)]
- binsize: the size of each bin [default: 0.01]
- skip_plot: if True then skip auto plot crosscorrelogram [default: False]
return:
- crosscorrelogram: as in 1d numpy.array
"""
_xcorr, _ = PSTH(target, reference, ROI, binsize, True)
if isinstance(shift, int) or isinstance(shift, float):
_shift_reference = shiftappend(reference, shift)
_xcorr_shift, _ = PSTH(target, _shift_reference, ROI, binsize, True)
_xcorr = _xcorr - _xcorr_shift
elif isinstance(shift, list) or isinstance(shift, np.ndarray):
_xcorr_shift = np.zeros_like(_xcorr)
for item in shift:
_shift_reference = shiftappend(reference, item)
_xcorr_shift_item, _ = PSTH(target, _shift_reference, ROI, binsize, True)
_xcorr_shift = _xcorr_shift + _xcorr_shift_item/np.size(shift)
_xcorr = _xcorr - _xcorr_shift
else:
_xcorr_shift = None
if not skip_plot:
plt.figure(figsize=(16,4))
plt.subplot(1,2,2)
_tspec = np.linspace(ROI[0], ROI[1]-1/int((ROI[1]-ROI[0])/binsize), int((ROI[1]-ROI[0])/binsize))
plt.bar(_tspec+binsize/2, _xcorr, width=binsize)
plt.vlines([0], 0, np.max(_xcorr)*1.05, linestyle='--', alpha=0.5)
plt.xlim((ROI[0], ROI[-1]))
plt.title('crosscorrelogram')
if not isinstance(_xcorr_shift, type(None)):
plt.subplot(1,2,1)
plt.bar(_tspec+binsize/2, _xcorr_shift, width=binsize)
plt.vlines([0], 0, np.max(_xcorr)*1.05, linestyle='--', alpha=0.5)
plt.xlim((ROI[0], ROI[-1]))
plt.title('shift predictor')
plt.show()
return _xcorr | [
"numpy.hstack",
"numpy.size",
"numpy.zeros_like",
"numpy.max",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show"
] | [((246, 315), 'numpy.hstack', 'np.hstack', (['(arr[arr > shift] - shift, arr[arr < shift] + end - shift)'], {}), '((arr[arr > shift] - shift, arr[arr < shift] + end - shift))\n', (255, 315), True, 'import numpy as np\n'), ((1975, 2002), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 4)'}), '(figsize=(16, 4))\n', (1985, 2002), True, 'import matplotlib.pyplot as plt\n'), ((2010, 2030), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (2021, 2030), True, 'import matplotlib.pyplot as plt\n'), ((2143, 2195), 'matplotlib.pyplot.bar', 'plt.bar', (['(_tspec + binsize / 2)', '_xcorr'], {'width': 'binsize'}), '(_tspec + binsize / 2, _xcorr, width=binsize)\n', (2150, 2195), True, 'import matplotlib.pyplot as plt\n'), ((2275, 2302), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(ROI[0], ROI[-1])'], {}), '((ROI[0], ROI[-1]))\n', (2283, 2302), True, 'import matplotlib.pyplot as plt\n'), ((2311, 2340), 'matplotlib.pyplot.title', 'plt.title', (['"""crosscorrelogram"""'], {}), "('crosscorrelogram')\n", (2320, 2340), True, 'import matplotlib.pyplot as plt\n'), ((2678, 2688), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2686, 2688), True, 'import matplotlib.pyplot as plt\n'), ((356, 425), 'numpy.hstack', 'np.hstack', (['(arr[arr < shift] - end + shift, arr[arr < shift] + shift)'], {}), '((arr[arr < shift] - end + shift, arr[arr < shift] + shift))\n', (365, 425), True, 'import numpy as np\n'), ((1597, 1618), 'numpy.zeros_like', 'np.zeros_like', (['_xcorr'], {}), '(_xcorr)\n', (1610, 1618), True, 'import numpy as np\n'), ((2415, 2435), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (2426, 2435), True, 'import matplotlib.pyplot as plt\n'), ((2446, 2504), 'matplotlib.pyplot.bar', 'plt.bar', (['(_tspec + binsize / 2)', '_xcorr_shift'], {'width': 'binsize'}), '(_tspec + binsize / 2, _xcorr_shift, width=binsize)\n', (2453, 2504), True, 'import matplotlib.pyplot as plt\n'), ((2592, 2619), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(ROI[0], ROI[-1])'], {}), '((ROI[0], ROI[-1]))\n', (2600, 2619), True, 'import matplotlib.pyplot as plt\n'), ((2632, 2660), 'matplotlib.pyplot.title', 'plt.title', (['"""shift predictor"""'], {}), "('shift predictor')\n", (2641, 2660), True, 'import matplotlib.pyplot as plt\n'), ((2219, 2233), 'numpy.max', 'np.max', (['_xcorr'], {}), '(_xcorr)\n', (2225, 2233), True, 'import numpy as np\n'), ((2532, 2546), 'numpy.max', 'np.max', (['_xcorr'], {}), '(_xcorr)\n', (2538, 2546), True, 'import numpy as np\n'), ((1852, 1866), 'numpy.size', 'np.size', (['shift'], {}), '(shift)\n', (1859, 1866), True, 'import numpy as np\n')] |
#Bot class & basic async events
import config
import log
import lottery
import server as server_
import discord
import sys
import re
class main:
def __init__(self, token, name, prefix): #Init bot variables
self.client = discord.Client()
self.token = token
self.name = name
self.prefix = prefix
def start(self): #Run the bot
self.client.run(self.token)
bot = main(config.token, config.name, config.prefix) #Get a main bot instance
@bot.client.event
async def on_ready(): #Log when bot starts
log.success("Started bot.")
log.info("Bot info:")
log.info(bot.client.user.name)
log.info(bot.client.user.id)
@bot.client.event
async def on_message(message): #Handle messages
if message.author == bot.client.user: #Do not reply to oneself
return
if message.author.id in config.owners and message.content == bot.prefix + "stop --instant": #High priority stop
await message.channel.send("High priority stop requested. Stopping...")
log.stop("Bot owner requested instant stop")
sys.exit(0)
if message.content.startswith(bot.prefix): #Handle command
log.info("Bot command called by " + message.author.name + ". Full command: " + message.content)
server = server_.find(message.guild)
rawCommand = message.content[len(bot.prefix):].lower() #Take prefix out
if (rawCommand == "lotto" or rawCommand == "lottery") and message.channel.id == server.lotteryChannel: #Play the lottery
await message.channel.send(embed=lottery.play(server.lottoParams, message.author, server))
elif rawCommand.startswith("set "): #Set variable command
if not message.author.permissions_in(message.channel).manage_guild: #User needs manage guild
await message.channel.send("ERROR: You need to be server admin.")
return
variable = rawCommand[4:] #Get variable to be changed
if variable.startswith("channel"): #Set current channel as the lottery channel
await message.channel.send(server.setChannel(message.channel.id))
elif variable.startswith("rate "): #Set the lottery rate limit
rate = variable[5:]
await message.channel.send(server.setRate(rate))
else: #???
await message.channel.send("Unknown variable.")
elif rawCommand.startswith("prize "): #Manage prizes
if not message.author.permissions_in(message.channel).manage_guild: #User needs manage guild
await message.channel.send("ERROR: You need to be server admin.")
return
action = rawCommand[6:] #Get action
if action.startswith("add "): #Add a new prize
params = action[4:]
prize = re.findall(r'".+?"', params) #Find prize name
weight, host = params[len(prize[0]) + 1:].split(' ') #Find weight and prize host
await message.channel.send(server.addPrize([prize[0][1:][:-1], host], weight))
if action.startswith("delete "): #Delete a prize
i = action[7:]
await message.channel.send(server.deletePrize(i))
if action.startswith("list"): #List prizes
await message.channel.send(server.listPrizes())
@bot.client.event
async def on_guild_join(guild): #Bot joined a new guild
#Generate server config
server_.find(guild)
| [
"log.success",
"log.info",
"log.stop",
"lottery.play",
"re.findall",
"sys.exit",
"discord.Client",
"server.find"
] | [((562, 589), 'log.success', 'log.success', (['"""Started bot."""'], {}), "('Started bot.')\n", (573, 589), False, 'import log\n'), ((594, 615), 'log.info', 'log.info', (['"""Bot info:"""'], {}), "('Bot info:')\n", (602, 615), False, 'import log\n'), ((620, 650), 'log.info', 'log.info', (['bot.client.user.name'], {}), '(bot.client.user.name)\n', (628, 650), False, 'import log\n'), ((655, 683), 'log.info', 'log.info', (['bot.client.user.id'], {}), '(bot.client.user.id)\n', (663, 683), False, 'import log\n'), ((3599, 3618), 'server.find', 'server_.find', (['guild'], {}), '(guild)\n', (3611, 3618), True, 'import server as server_\n'), ((238, 254), 'discord.Client', 'discord.Client', ([], {}), '()\n', (252, 254), False, 'import discord\n'), ((1041, 1085), 'log.stop', 'log.stop', (['"""Bot owner requested instant stop"""'], {}), "('Bot owner requested instant stop')\n", (1049, 1085), False, 'import log\n'), ((1094, 1105), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1102, 1105), False, 'import sys\n'), ((1181, 1280), 'log.info', 'log.info', (["('Bot command called by ' + message.author.name + '. Full command: ' +\n message.content)"], {}), "('Bot command called by ' + message.author.name +\n '. Full command: ' + message.content)\n", (1189, 1280), False, 'import log\n'), ((1303, 1330), 'server.find', 'server_.find', (['message.guild'], {}), '(message.guild)\n', (1315, 1330), True, 'import server as server_\n'), ((1601, 1657), 'lottery.play', 'lottery.play', (['server.lottoParams', 'message.author', 'server'], {}), '(server.lottoParams, message.author, server)\n', (1613, 1657), False, 'import lottery\n'), ((2931, 2958), 're.findall', 're.findall', (['"""".+?\\""""', 'params'], {}), '(\'".+?"\', params)\n', (2941, 2958), False, 'import re\n')] |
import Bases.ObjectHandler
import pygame
class SceneBase:
def __init__(self, il):
self.next = self
self.IL = il
self.OH = Bases.ObjectHandler.ObjectHandler()
self.interactive_events = None
self.pressed_keys = []
self.environment_type = "DEFAULT"
def process_input(self, events, pressed_keys_n):
self.interactive_events = events
for event in self.interactive_events:
if event.type == pygame.MOUSEBUTTONDOWN:
self.OH.OEH.event(40, event.button, pygame.mouse.get_pos()[0], pygame.mouse.get_pos()[1])
elif event.type == pygame.MOUSEBUTTONUP:
self.OH.OEH.event(41, event.button, pygame.mouse.get_pos()[0], pygame.mouse.get_pos()[1])
elif event.type == pygame.MOUSEMOTION:
self.OH.OEH.event(42, event.buttons, pygame.mouse.get_pos()[0], pygame.mouse.get_pos()[1])
modifiers = []
if pygame.K_LALT in pressed_keys_n or pygame.K_RALT in pressed_keys_n:
modifiers.append('ALT')
if pygame.K_LSHIFT in pressed_keys_n or pygame.K_RSHIFT in pressed_keys_n:
modifiers.append('SHIFT')
if pygame.K_LCTRL in pressed_keys_n or pygame.K_RCTRL in pressed_keys_n:
modifiers.append('CONTROL')
for key in pressed_keys_n:
self.OH.OEH.event(43, key, modifiers)
for key in self.pressed_keys:
if key not in pressed_keys_n:
self.OH.OEH.event(44, key, modifiers)
self.pressed_keys = list(pressed_keys_n)
def update(self, screen):
self.OH.update_objects(screen, self.interactive_events, self.pressed_keys)
def render(self, screen):
self.OH.draw_objects(screen)
def switch_to_scene(self, next_scene):
self.next = next_scene
def terminate(self):
self.switch_to_scene(None)
| [
"pygame.mouse.get_pos"
] | [((546, 568), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (566, 568), False, 'import pygame\n'), ((573, 595), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (593, 595), False, 'import pygame\n'), ((705, 727), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (725, 727), False, 'import pygame\n'), ((732, 754), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (752, 754), False, 'import pygame\n'), ((863, 885), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (883, 885), False, 'import pygame\n'), ((890, 912), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (910, 912), False, 'import pygame\n')] |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functional tests for controllers.utils."""
import os
import appengine_config
from common import users
from controllers import utils
from tests.functional import actions
_TEMPLATES_DIR = os.path.join(
appengine_config.BUNDLE_ROOT, 'tests', 'functional', 'controllers_utils',
'templates')
class TestHandler(utils.LocalizedGlobalHandler):
def get(self):
template = self.get_template(
'test_template.html', additional_dirs=[_TEMPLATES_DIR])
self.response.out.write(template.render({}))
class LocalizedGlobalHandlersTest(actions.TestBase):
def getApp(self):
return users.AuthInterceptorWSGIApplication([('/', TestHandler)])
def test_get_accept_language(self):
self.assertEquals(
'accept_language',
utils.LocalizedGlobalHandler._get_accept_language(
{'Accept-Language': 'accept_language'}))
self.assertIsNone(utils.LocalizedGlobalHandler._get_accept_language({}))
def test_get_locale_defaults_if_no_header(self):
self.assertEquals(
utils.LocalizedGlobalHandler._DEFAULT_LOCALE,
utils.LocalizedGlobalHandler._get_locale(None))
self.assertEquals(
utils.LocalizedGlobalHandler._DEFAULT_LOCALE,
utils.LocalizedGlobalHandler._get_locale(''))
def test_template_renders_successfully_with_accept_language(self):
response = self.testapp.get('/', headers={'Accept-Language': 'fr'})
self.assertIn('Success!', response.body)
def test_template_renders_successfully_with_no_accept_language(self):
response = self.testapp.get('/')
self.assertIn('Success!', response.body)
| [
"controllers.utils.LocalizedGlobalHandler._get_accept_language",
"os.path.join",
"common.users.AuthInterceptorWSGIApplication",
"controllers.utils.LocalizedGlobalHandler._get_locale"
] | [((792, 895), 'os.path.join', 'os.path.join', (['appengine_config.BUNDLE_ROOT', '"""tests"""', '"""functional"""', '"""controllers_utils"""', '"""templates"""'], {}), "(appengine_config.BUNDLE_ROOT, 'tests', 'functional',\n 'controllers_utils', 'templates')\n", (804, 895), False, 'import os\n'), ((1224, 1282), 'common.users.AuthInterceptorWSGIApplication', 'users.AuthInterceptorWSGIApplication', (["[('/', TestHandler)]"], {}), "([('/', TestHandler)])\n", (1260, 1282), False, 'from common import users\n'), ((1394, 1487), 'controllers.utils.LocalizedGlobalHandler._get_accept_language', 'utils.LocalizedGlobalHandler._get_accept_language', (["{'Accept-Language': 'accept_language'}"], {}), "({'Accept-Language':\n 'accept_language'})\n", (1443, 1487), False, 'from controllers import utils\n'), ((1528, 1581), 'controllers.utils.LocalizedGlobalHandler._get_accept_language', 'utils.LocalizedGlobalHandler._get_accept_language', (['{}'], {}), '({})\n', (1577, 1581), False, 'from controllers import utils\n'), ((1734, 1780), 'controllers.utils.LocalizedGlobalHandler._get_locale', 'utils.LocalizedGlobalHandler._get_locale', (['None'], {}), '(None)\n', (1774, 1780), False, 'from controllers import utils\n'), ((1879, 1923), 'controllers.utils.LocalizedGlobalHandler._get_locale', 'utils.LocalizedGlobalHandler._get_locale', (['""""""'], {}), "('')\n", (1919, 1923), False, 'from controllers import utils\n')] |
# coding=utf-8
"""
Different utilities for TMDbie
"""
import logging
from .types import TVShow, Person, Movie
from .abstract import TMDbType
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
def dict_get_by_value(dict_: dict, value):
for k, v in dict_.items():
if v == value:
return k
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
media_types = {
"tv": TVShow,
"movie": Movie,
"person": Person,
}
def get_media_type(data):
if isinstance(data, dict):
data = data.get("media_type")
elif isinstance(data, list):
data = data[0].get("media_type")
if not data:
log.error("Missing media_type")
log.debug(data)
real_type = media_types.get(data)
if not real_type:
raise TypeError("Not a valid media_type: {}".format(data))
return real_type
def instantiate_type(data):
if not data:
return None
type_ = get_media_type(data.get("media_type"))
# Includes subclasses
if not isinstance(type_, TMDbType):
raise TypeError("This shouldn't happen, please notify the developer!")
# noinspection PyCallingNonCallable
return type_(**data)
| [
"logging.getLogger"
] | [((148, 175), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (165, 175), False, 'import logging\n')] |
import copy
from mahjong.constants import EAST, FIVE_RED_MAN, FIVE_RED_PIN, FIVE_RED_SOU, TERMINAL_INDICES, CHUN
def is_aka_dora(tile, aka_enabled):
"""
:param tile: int 136 tiles format
:param aka_enabled: depends on table rules
:return: boolean
"""
if not aka_enabled:
return False
if tile in [FIVE_RED_MAN, FIVE_RED_PIN, FIVE_RED_SOU]:
return True
return False
def plus_dora(tile, dora_indicators):
"""
:param tile: int 136 tiles format
:param dora_indicators: array of 136 tiles format
:return: int count of dora
"""
tile_index = tile // 4
dora_count = 0
for dora in dora_indicators:
dora //= 4
# sou, pin, man
if tile_index < EAST:
# with indicator 9, dora will be 1
if dora == 8:
dora = -1
elif dora == 17:
dora = 8
elif dora == 26:
dora = 17
if tile_index == dora + 1:
dora_count += 1
else:
if dora < EAST:
continue
dora -= 9 * 3
tile_index_temp = tile_index - 9 * 3
# dora indicator is north
if dora == 3:
dora = -1
# dora indicator is hatsu
if dora == 6:
dora = 3
if tile_index_temp == dora + 1:
dora_count += 1
return dora_count
def is_chi(item):
"""
:param item: array of tile 34 indices
:return: boolean
"""
if len(item) != 3:
return False
return item[0] == item[1] - 1 == item[2] - 2
def is_pon(item):
"""
:param item: array of tile 34 indices
:return: boolean
"""
if len(item) != 3:
return False
return item[0] == item[1] == item[2]
def is_pair(item):
"""
:param item: array of tile 34 indices
:return: boolean
"""
return len(item) == 2
def is_man(tile):
"""
:param tile: 34 tile format
:return: boolean
"""
return tile <= 8
def is_pin(tile):
"""
:param tile: 34 tile format
:return: boolean
"""
return 8 < tile <= 17
def is_sou(tile):
"""
:param tile: 34 tile format
:return: boolean
"""
return 17 < tile <= 26
def is_honor(tile):
"""
:param tile: 34 tile format
:return: boolean
"""
return tile >= 27
def is_terminal(tile):
"""
:param tile: 34 tile format
:return: boolean
"""
return tile in TERMINAL_INDICES
def is_dora_indicator_for_terminal(tile):
"""
:param tile: 34 tile format
:return: boolean
"""
return tile == 7 or tile == 8 or tile == 16 or tile == 17 or tile == 25 or tile == 26
def contains_terminals(hand_set):
"""
:param hand_set: array of 34 tiles
:return: boolean
"""
return any([x in TERMINAL_INDICES for x in hand_set])
def simplify(tile):
"""
:param tile: 34 tile format
:return: tile: 0-8 presentation
"""
return tile - 9 * (tile // 9)
def find_isolated_tile_indices(hand_34):
"""
Tiles that don't have -1, 0 and +1 neighbors
:param hand_34: array of tiles in 34 tile format
:return: array of isolated tiles indices
"""
isolated_indices = []
for x in range(0, CHUN + 1):
# for honor tiles we don't need to check nearby tiles
if is_honor(x) and hand_34[x] == 0:
isolated_indices.append(x)
else:
simplified = simplify(x)
# 1 suit tile
if simplified == 0:
if hand_34[x] == 0 and hand_34[x + 1] == 0:
isolated_indices.append(x)
# 9 suit tile
elif simplified == 8:
if hand_34[x] == 0 and hand_34[x - 1] == 0:
isolated_indices.append(x)
# 2-8 tiles tiles
else:
if hand_34[x] == 0 and hand_34[x - 1] == 0 and hand_34[x + 1] == 0:
isolated_indices.append(x)
return isolated_indices
def is_tile_strictly_isolated(hand_34, tile_34):
"""
Tile is strictly isolated if it doesn't have -2, -1, 0, +1, +2 neighbors
:param hand_34: array of tiles in 34 tile format
:param tile_34: int
:return: bool
"""
hand_34 = copy.copy(hand_34)
# we don't need to count target tile in the hand
hand_34[tile_34] -= 1
if hand_34[tile_34] < 0:
hand_34[tile_34] = 0
indices = []
if is_honor(tile_34):
return hand_34[tile_34] == 0
else:
simplified = simplify(tile_34)
# 1 suit tile
if simplified == 0:
indices = [tile_34, tile_34 + 1, tile_34 + 2]
# 2 suit tile
elif simplified == 1:
indices = [tile_34 - 1, tile_34, tile_34 + 1, tile_34 + 2]
# 8 suit tile
elif simplified == 7:
indices = [tile_34 - 2, tile_34 - 1, tile_34, tile_34 + 1]
# 9 suit tile
elif simplified == 8:
indices = [tile_34 - 2, tile_34 - 1, tile_34]
# 3-7 tiles tiles
else:
indices = [tile_34 - 2, tile_34 - 1, tile_34, tile_34 + 1, tile_34 + 2]
return all([hand_34[x] == 0 for x in indices])
def count_tiles_by_suits(tiles_34):
"""
Separate tiles by suits and count them
:param tiles_34: array of tiles to count
:return: dict
"""
suits = [
{'count': 0, 'name': 'sou', 'function': is_sou},
{'count': 0, 'name': 'man', 'function': is_man},
{'count': 0, 'name': 'pin', 'function': is_pin},
{'count': 0, 'name': 'honor', 'function': is_honor}
]
for x in range(0, 34):
tile = tiles_34[x]
if not tile:
continue
for item in suits:
if item['function'](x):
item['count'] += tile
return suits
| [
"copy.copy"
] | [((4313, 4331), 'copy.copy', 'copy.copy', (['hand_34'], {}), '(hand_34)\n', (4322, 4331), False, 'import copy\n')] |
import time
from breezycreate2 import Robot
#robot = create.Create('/dev/ttyUSB0')
#robot = Robot('/dev/ttyUSB0', "57600")
robot = Robot('sim')
def write_sensors():
#robot.robot.get_packet(19)
#robot.robot.get_packet(20)
print('distance: ', robot.robot.sensor_state['distance'])
print('angle: ', robot.robot.sensor_state['angle'])
#print('RIGHT_VELOCITY: ', robot.getSensor('RIGHT_VELOCITY'))
#print('LEFT_VELOCITY: ', robot.getSensor('LEFT_VELOCITY'))
write_sensors()
robot.drive(100, 1500)
time.sleep(1)
write_sensors()
time.sleep(1)
write_sensors()
time.sleep(1)
write_sensors()
robot.drive(-200, 500)
time.sleep(1)
write_sensors()
#robot.shutdown()
| [
"breezycreate2.Robot",
"time.sleep"
] | [((132, 144), 'breezycreate2.Robot', 'Robot', (['"""sim"""'], {}), "('sim')\n", (137, 144), False, 'from breezycreate2 import Robot\n'), ((524, 537), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (534, 537), False, 'import time\n'), ((555, 568), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (565, 568), False, 'import time\n'), ((586, 599), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (596, 599), False, 'import time\n'), ((641, 654), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (651, 654), False, 'import time\n')] |
"""Base Class for a Solver. This class contains the different methods that
can be used to solve an environment/problem. There are methods for
mini-batch training, control, etc...
The idea is that this class will contain all the methods that the different
algorithms would need. Then we can simply call this class in the solver scripts
and use its methods.
I'm still torn between using a class or just using a script.
"""
from .evaluator import Evaluator
from .interrogator import Interrogator
import torch
class Solver(object):
"""This class makes absolute sense because there are many types of training
depending on the task. For this reason, in the future, this class can easily
include all instances of such training routines. Of course, transparent to
the user -which is the ultimate goal, complete transparency-.
"""
def __init__(self, slv_params):
print("Creating Solver")
self.env = slv_params['environment']
self.alg = slv_params['algorithm']
self.logger = slv_params['logger']
self.evaluator = Evaluator()
self.interrogator = Interrogator()
def forward(self):
self.interrogator.set_inference(self.alg.model, self.env)
def backward(self):
self.evaluator.evaluate(self.env, self.interrogator.inference)
feedback = (self.evaluator.score)
self.alg.step(feedback)
self.alg.print_state()
def save(self, path=''):
"""Only works with my algorithms, not with SGD."""
fn = path+"model_elite.pth"
torch.save(self.alg.model.state_dict(), fn)
def save_pool_weights(self, models, path):
for i, model in enumerate(models):
fn = path+"model_"+str(i)+".pth"
torch.save(model.state_dict(), fn)
def save_elite_weights(self, path, name=''):
if name == '':
name = "model_elite.pth"
else:
name = name+'.pth'
fn = path+name
torch.save(self.alg.model.state_dict(), fn)
def load(self, path, name="model_elite"):
"""Only works with my algorithms, not with SGD."""
fn = path+name+".pth"
print("Loading weights in: " + fn)
self.alg.model.load_state_dict(torch.load(fn))
self.alg.model.eval()
#
| [
"torch.load"
] | [((2223, 2237), 'torch.load', 'torch.load', (['fn'], {}), '(fn)\n', (2233, 2237), False, 'import torch\n')] |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from functools import partial
import torch
from torch import nn
from timm.models.layers import DropPath
from einops.layers.torch import Reduce
from .layers import DWConv, SPATIAL_FUNC, ChannelMLP, STEM_LAYER
from .misc import reshape2n
class MixingBlock(nn.Module):
def __init__(self, dim,
spatial_func=None, scaled=True, init_values=1e-4, shared_spatial_func=False,
norm_layer=partial(nn.LayerNorm, eps=1e-6), act_layer=nn.GELU, drop_path=0., cpe=True,
num_heads=None, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., # attn
in_features=None, hidden_features=None, drop=0., # mlp
channel_ratio=2.0
):
super(MixingBlock, self).__init__()
spatial_kwargs = dict(act_layer=act_layer,
in_features=in_features, hidden_features=hidden_features, drop=drop, # mlp
dim=dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=proj_drop # attn
)
self.valid_spatial_func = True
if spatial_func is not None:
if shared_spatial_func:
self.spatial_func = spatial_func
else:
self.spatial_func = spatial_func(**spatial_kwargs)
self.norm1 = norm_layer(dim)
if scaled:
self.gamma_1 = nn.Parameter(init_values * torch.ones(1, 1, dim), requires_grad=True)
else:
self.gamma_1 = 1.
else:
self.valid_spatial_func = False
self.channel_func = ChannelMLP(in_features=dim, hidden_features=int(dim*channel_ratio), act_layer=act_layer,
drop=drop)
self.norm2 = norm_layer(dim)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.cpe = cpe
if cpe:
self.cpe_net = DWConv(dim)
def forward(self, x):
in_x = x
if self.valid_spatial_func:
x = x + self.drop_path(self.gamma_1 * self.spatial_func(self.norm1(in_x)))
if self.cpe:
x = x + self.cpe_net(in_x)
x = x + self.drop_path(self.channel_func(self.norm2(x)))
return x
def flops(self, input_shape):
_, N, C = input_shape
flops = 0
if self.valid_spatial_func:
flops += self.spatial_func.flops(input_shape)
flops += N * C * 2 # norm + skip
if self.cpe:
flops += self.cpe_net.flops(input_shape)
flops += self.channel_func.flops(input_shape)
flops += N * C * 2
return flops
class Spach(nn.Module):
def __init__(self,
num_classes=1000,
img_size=224,
in_chans=3,
hidden_dim=384,
patch_size=16,
net_arch=None,
act_layer=nn.GELU,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
stem_type='conv1',
scaled=True, init_values=1e-4, drop_path_rate=0., cpe=True, shared_spatial_func=False, # mixing block
num_heads=12, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0., # attn
token_ratio=0.5, channel_ratio=2.0, drop_rate=0., # mlp
downstream=False,
**kwargs
):
super(Spach, self).__init__()
self.num_classes = num_classes
self.hidden_dim = hidden_dim
self.downstream = downstream
self.stem = STEM_LAYER[stem_type](
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=hidden_dim, downstream=downstream)
self.norm1 = norm_layer(hidden_dim)
block_kwargs = dict(dim=hidden_dim, scaled=scaled, init_values=init_values, cpe=cpe,
shared_spatial_func=shared_spatial_func, norm_layer=norm_layer, act_layer=act_layer,
num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=proj_drop, # attn
in_features=self.stem.num_patches, hidden_features=int(self.stem.num_patches * token_ratio), channel_ratio=channel_ratio, drop=drop_rate) # mlp
self.blocks = self.make_blocks(net_arch, block_kwargs, drop_path_rate, shared_spatial_func)
self.norm2 = norm_layer(hidden_dim)
if not downstream:
self.pool = Reduce('b n c -> b c', reduction='mean')
self.head = nn.Linear(hidden_dim, self.num_classes)
self.init_weights()
def make_blocks(self, net_arch, block_kwargs, drop_path, shared_spatial_func):
if shared_spatial_func:
assert len(net_arch) == 1, '`shared_spatial_func` only support unitary spatial function'
assert net_arch[0][0] != 'pass', '`shared_spatial_func` do not support pass'
spatial_func = SPATIAL_FUNC[net_arch[0][0]](**block_kwargs)
else:
spatial_func = None
blocks = []
for func_type, depth in net_arch:
for i in range(depth):
blocks.append(MixingBlock(spatial_func=spatial_func or SPATIAL_FUNC[func_type], drop_path=drop_path,
**block_kwargs))
return nn.Sequential(*blocks)
def init_weights(self):
for n, m in self.named_modules():
_init_weights(m, n)
def forward_features(self, x):
x = self.stem(x)
x = reshape2n(x)
x = self.norm1(x)
x = self.blocks(x)
x = self.norm2(x)
return x
def forward(self, x):
x = self.forward_features(x)
x = self.pool(x)
x = self.head(x)
return x
def flops(self):
flops = 0
shape = (1, self.stem.num_patches, self.hidden_dim)
# stem
flops += self.stem.flops()
flops += sum(shape)
# blocks
flops += sum([i.flops(shape) for i in self.blocks])
flops += sum(shape)
# head
flops += self.hidden_dim * self.num_classes
return flops
def _init_weights(m, n: str):
if isinstance(m, nn.Linear):
if n.startswith('head'):
nn.init.zeros_(m.weight)
nn.init.zeros_(m.bias)
else:
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
if 'mlp' in n:
nn.init.normal_(m.bias, std=1e-6)
else:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, (nn.LayerNorm, nn.BatchNorm2d, nn.GroupNorm)):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias) | [
"timm.models.layers.DropPath",
"torch.nn.init.ones_",
"torch.nn.Sequential",
"torch.nn.init.xavier_uniform_",
"torch.nn.init.kaiming_normal_",
"torch.nn.init.zeros_",
"einops.layers.torch.Reduce",
"functools.partial",
"torch.nn.Linear",
"torch.nn.Identity",
"torch.nn.init.normal_",
"torch.ones... | [((509, 541), 'functools.partial', 'partial', (['nn.LayerNorm'], {'eps': '(1e-06)'}), '(nn.LayerNorm, eps=1e-06)\n', (516, 541), False, 'from functools import partial\n'), ((3175, 3207), 'functools.partial', 'partial', (['nn.LayerNorm'], {'eps': '(1e-06)'}), '(nn.LayerNorm, eps=1e-06)\n', (3182, 3207), False, 'from functools import partial\n'), ((5576, 5598), 'torch.nn.Sequential', 'nn.Sequential', (['*blocks'], {}), '(*blocks)\n', (5589, 5598), False, 'from torch import nn\n'), ((1979, 1998), 'timm.models.layers.DropPath', 'DropPath', (['drop_path'], {}), '(drop_path)\n', (1987, 1998), False, 'from timm.models.layers import DropPath\n'), ((2022, 2035), 'torch.nn.Identity', 'nn.Identity', ([], {}), '()\n', (2033, 2035), False, 'from torch import nn\n'), ((4713, 4753), 'einops.layers.torch.Reduce', 'Reduce', (['"""b n c -> b c"""'], {'reduction': '"""mean"""'}), "('b n c -> b c', reduction='mean')\n", (4719, 4753), False, 'from einops.layers.torch import Reduce\n'), ((4779, 4818), 'torch.nn.Linear', 'nn.Linear', (['hidden_dim', 'self.num_classes'], {}), '(hidden_dim, self.num_classes)\n', (4788, 4818), False, 'from torch import nn\n'), ((6537, 6561), 'torch.nn.init.zeros_', 'nn.init.zeros_', (['m.weight'], {}), '(m.weight)\n', (6551, 6561), False, 'from torch import nn\n'), ((6575, 6597), 'torch.nn.init.zeros_', 'nn.init.zeros_', (['m.bias'], {}), '(m.bias)\n', (6589, 6597), False, 'from torch import nn\n'), ((6626, 6659), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['m.weight'], {}), '(m.weight)\n', (6649, 6659), False, 'from torch import nn\n'), ((6895, 6965), 'torch.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (['m.weight'], {'mode': '"""fan_out"""', 'nonlinearity': '"""relu"""'}), "(m.weight, mode='fan_out', nonlinearity='relu')\n", (6918, 6965), False, 'from torch import nn\n'), ((7011, 7033), 'torch.nn.init.zeros_', 'nn.init.zeros_', (['m.bias'], {}), '(m.bias)\n', (7025, 7033), False, 'from torch import nn\n'), ((7114, 7137), 'torch.nn.init.ones_', 'nn.init.ones_', (['m.weight'], {}), '(m.weight)\n', (7127, 7137), False, 'from torch import nn\n'), ((7147, 7169), 'torch.nn.init.zeros_', 'nn.init.zeros_', (['m.bias'], {}), '(m.bias)\n', (7161, 7169), False, 'from torch import nn\n'), ((6749, 6783), 'torch.nn.init.normal_', 'nn.init.normal_', (['m.bias'], {'std': '(1e-06)'}), '(m.bias, std=1e-06)\n', (6764, 6783), False, 'from torch import nn\n'), ((6827, 6849), 'torch.nn.init.zeros_', 'nn.init.zeros_', (['m.bias'], {}), '(m.bias)\n', (6841, 6849), False, 'from torch import nn\n'), ((1585, 1606), 'torch.ones', 'torch.ones', (['(1)', '(1)', 'dim'], {}), '(1, 1, dim)\n', (1595, 1606), False, 'import torch\n')] |
"""
Middleware for managing internal server errors,
and response with a apt error message
"""
import falcon
class InternalServerErrorManager(object):
"""Middleware for managing internal server errors"""
def process_response(self, request, resp, resource, req_succeeded):
"""
Manages response if the server encounters any internal server errors.
For 500 status
Args:
req (object): request object
resp (object): response object
resource (object): Target respource
req_succeeded (bool): Does this response succedded
Returns:
None
Raises:
falcon.HTTPInternalServerError: Raises Falcon internal server error.
"""
if resp.status == falcon.HTTP_500:
raise falcon.HTTPInternalServerError(
title="Internal Server Error",
description="Something went wrong on our side. Please try again later.",
)
else:
return
| [
"falcon.HTTPInternalServerError"
] | [((855, 994), 'falcon.HTTPInternalServerError', 'falcon.HTTPInternalServerError', ([], {'title': '"""Internal Server Error"""', 'description': '"""Something went wrong on our side. Please try again later."""'}), "(title='Internal Server Error', description=\n 'Something went wrong on our side. Please try again later.')\n", (885, 994), False, 'import falcon\n')] |
#!/usr/bin/env python3
import unittest
import os
import sys
import requests
import utils_test
from multiprocessing import Process
import time
sys.path.append(os.path.abspath('engram'))
import engram
class EngramTestCase(unittest.TestCase):
def setUp(self):
self.process = Process(target = engram.create, args = (':memory', True))
self.process.start()
print('running tests in four seconds...')
time.sleep(4)
def tearDown(self):
try:
self.process.terminate()
self.process.join()
except Exception as err:
print('failed to terminate process.')
print(err)
| [
"os.path.abspath",
"multiprocessing.Process",
"time.sleep"
] | [((164, 189), 'os.path.abspath', 'os.path.abspath', (['"""engram"""'], {}), "('engram')\n", (179, 189), False, 'import os\n'), ((294, 347), 'multiprocessing.Process', 'Process', ([], {'target': 'engram.create', 'args': "(':memory', True)"}), "(target=engram.create, args=(':memory', True))\n", (301, 347), False, 'from multiprocessing import Process\n'), ((422, 435), 'time.sleep', 'time.sleep', (['(4)'], {}), '(4)\n', (432, 435), False, 'import time\n')] |
#-*- coding:utf-8; mode:python; indent-tabs-mode: nil; c-basic-offset: 2; tab-width: 2 -*-
import os.path as path
from bes.common.check import check
from bes.common.string_util import string_util
from bes.fs.file_util import file_util
from bes.fs.temp_file import temp_file
from .git import git
from .git_address_util import git_address_util
from .git_repo import git_repo
from .git_clone_options import git_clone_options
class git_clone_manager(object):
'Manage a collection of repos under one root dir with conveniences.'
def __init__(self, root_dir):
self.root_dir = path.expanduser(root_dir)
def update(self, address, options = None):
'Update the repo.'
repo_path = self.path_for_address(address)
repo = git_repo(repo_path, address = address)
repo.clone_or_pull(options = options)
return repo
def path_for_address(self, address):
'Return path for local tarball.'
return path.join(self.root_dir, git_address_util.sanitize_for_local_path(address))
check.register_class(git_clone_manager, include_seq = False)
| [
"bes.common.check.check.register_class",
"os.path.expanduser"
] | [((1005, 1063), 'bes.common.check.check.register_class', 'check.register_class', (['git_clone_manager'], {'include_seq': '(False)'}), '(git_clone_manager, include_seq=False)\n', (1025, 1063), False, 'from bes.common.check import check\n'), ((582, 607), 'os.path.expanduser', 'path.expanduser', (['root_dir'], {}), '(root_dir)\n', (597, 607), True, 'import os.path as path\n')] |
#!/usr/bin/python3
"""
This script sends status of the lb service to SLS
"""
import argparse
import re
import sys
import time
import logging
import logging.config
import json
from datetime import datetime
import os
import requests
def get_arguments():
""" Parse command line arguments"""
parser = argparse.ArgumentParser(
description='Gather heavy users of lxplus.')
parser.add_argument('--debug', dest='debug', action='store_true',
help='write debug messages',
default=False)
args = parser.parse_args()
return args
def send(document):
""" send the document """
return requests.post('http://monit-metrics:10012/',
data=json.dumps(document),
headers={"Content-Type": "application/json; charset=UTF-8"})
def get_server_availability(logger, server_host):
""" Contacts a server, and returns if it is up or not"""
try:
info = requests.get("http://%s/load-balancing/heartbeat" % server_host)
logger.debug("Host contacted, and got %s", info.content.decode())
my_date = int(re.match(r'.*: (\d+) : I am alive', info.content.decode()).group(1))
logger.debug("The last execution was at %s", my_date)
now = time.mktime(datetime.now().timetuple())
latency = now - my_date
if latency < 1800:
logger.info("The server %s is up and running", server_host)
return 100
if latency < 7200:
logger.warning("The server %s has not run for two hours", server_host)
return 50
logger.error("The server is down for more than two hours")
except requests.exceptions.ConnectionError:
logger.error("Error getting the info from %s", server_host)
except AttributeError:
logger.error("Error extracting the timestamp from the response" % info.content)
return 0
def get_number_of_clusters(logger):
""" Checks how many aliases are defined in ermis"""
logger.info("Getting the number of aliases from kermis")
return os.popen('/usr/bin/kermis -j -o read -a all | /usr/bin/jq ".[] |.alias_name "').read().count('\n')
def get_data(logger, args):
""" Gets the KPI for the selected period"""
logger.info("Ready to get the data for %s", args)
availability = get_server_availability(logger, "lbmaster.cern.ch")
if availability == 0:
availability = get_server_availability(logger, "lbslave.cern.ch") / 2.
number_of_clusters = get_number_of_clusters(logger)
availabilitydesc = """<h3>DNS Load Balancing</h3><p>%s LB aliases defined</p>
<h4>Please follow the link below to see the LB Alias logs</h4><p>
<a href=\"https://aiermis.cern.ch/lbweb/logsform\">https://aiermis.cern.ch/lbweb/logsform</a></p>
""" % number_of_clusters
sls_state = 'unavailable'
if number_of_clusters == 0:
sls_state = 'degraded'
logger.error('Error getting the number of aliases from kermis')
if availability > 75:
sls_state = 'available'
elif availability > 40:
sls_state = 'degraded'
return {'producer': 'loadbalancer',
'type': 'availability',
'serviceid': 'DNSLOADBALANCING',
'service_status': sls_state,
'timestamp': int(1000*time.mktime(datetime.now().timetuple())),
'availabilityinfo': availabilitydesc,
'availabilitydesc': "The availability has been estimated to %s" % availability,
'contact': '<EMAIL>',
'webpage': 'http://information-technology.web.cern.ch/services/load-balancing-services',
}
def send_and_check(logger, document):
""" Sends a document to UMA"""
response = requests.post('http://monit-metrics:10012/', data=json.dumps(document),
headers={"Content-Type": "application/json; charset=UTF-8"})
logger.info("We got %s and %s ", response.status_code, response.text)
assert(response.status_code in [200]), \
'With document: {0}. Status code: {1}. Message: {2}'.format(document,
response.status_code,
response.text)
def main():
""" Let's do the the alarms"""
args = get_arguments()
logger = logging.getLogger(__name__)
logger.propagate = False
if args.debug:
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'%(asctime)s - %(name)s %(funcName)20s() - %(levelname)s - %(message)s')
else:
logger.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)s - %(levelname)s - %(message)s')
chan = logging.StreamHandler()
chan.setFormatter(formatter)
logger.addHandler(chan)
document = get_data(logger, args)
logger.info("The document is %s", document)
send_and_check(logger, document)
logger.info("Done")
return 0
if __name__ == '__main__':
sys.exit(main())
| [
"logging.getLogger",
"logging.StreamHandler",
"argparse.ArgumentParser",
"logging.Formatter",
"json.dumps",
"requests.get",
"datetime.datetime.now",
"os.popen"
] | [((313, 381), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Gather heavy users of lxplus."""'}), "(description='Gather heavy users of lxplus.')\n", (336, 381), False, 'import argparse\n'), ((4368, 4395), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (4385, 4395), False, 'import logging\n'), ((4763, 4786), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (4784, 4786), False, 'import logging\n'), ((984, 1048), 'requests.get', 'requests.get', (["('http://%s/load-balancing/heartbeat' % server_host)"], {}), "('http://%s/load-balancing/heartbeat' % server_host)\n", (996, 1048), False, 'import requests\n'), ((4503, 4598), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(name)s %(funcName)20s() - %(levelname)s - %(message)s"""'], {}), "(\n '%(asctime)s - %(name)s %(funcName)20s() - %(levelname)s - %(message)s')\n", (4520, 4598), False, 'import logging\n'), ((4675, 4738), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(levelname)s - %(message)s"""'], {}), "('%(asctime)s - %(levelname)s - %(message)s')\n", (4692, 4738), False, 'import logging\n'), ((739, 759), 'json.dumps', 'json.dumps', (['document'], {}), '(document)\n', (749, 759), False, 'import json\n'), ((3788, 3808), 'json.dumps', 'json.dumps', (['document'], {}), '(document)\n', (3798, 3808), False, 'import json\n'), ((1303, 1317), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1315, 1317), False, 'from datetime import datetime\n'), ((2096, 2175), 'os.popen', 'os.popen', (['"""/usr/bin/kermis -j -o read -a all | /usr/bin/jq ".[] |.alias_name \\""""'], {}), '(\'/usr/bin/kermis -j -o read -a all | /usr/bin/jq ".[] |.alias_name "\')\n', (2104, 2175), False, 'import os\n'), ((3329, 3343), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3341, 3343), False, 'from datetime import datetime\n')] |
import os
import subprocess
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.nn.functional import interpolate
from loguru import logger
from tqdm import tqdm
import numpy as np
import wandb
from draw_concat import draw_concat
from generate_noise import generate_spatial_noise
from minecraft.level_utils import one_hot_to_blockdata_level, save_level_to_world, clear_empty_world
from minecraft.level_renderer import render_minecraft
from models import calc_gradient_penalty, save_networks
from utils import interpolate3D
def update_noise_amplitude(z_prev, real, opt):
""" Update the amplitude of the noise for the current scale according to the previous noise map. """
RMSE = torch.sqrt(F.mse_loss(real, z_prev))
return opt.noise_update * RMSE
def train_single_scale(D, G, reals, generators, noise_maps, input_from_prev_scale, noise_amplitudes, opt):
""" Train one scale. D and G are the current discriminator and generator, reals are the scaled versions of the
original level, generators and noise_maps contain information from previous scales and will receive information in
this scale, input_from_previous_scale holds the noise map and images from the previous scale, noise_amplitudes hold
the amplitudes for the noise in all the scales. opt is a namespace that holds all necessary parameters. """
current_scale = len(generators)
clear_empty_world(opt.output_dir, 'Curr_Empty_World') # reset tmp world
if opt.use_multiple_inputs:
real_group = []
nzx_group = []
nzy_group = []
nz_group = []
for scale_group in reals:
real_group.append(scale_group[current_scale])
nzx_group.append(scale_group[current_scale].shape[2])
nzy_group.append(scale_group[current_scale].shape[3])
nz_group.append((scale_group[current_scale].shape[2], scale_group[current_scale].shape[3]))
curr_noises = [0 for _ in range(len(real_group))]
curr_prevs = [0 for _ in range(len(real_group))]
curr_z_prevs = [0 for _ in range(len(real_group))]
else:
real = reals[current_scale]
nz = real.shape[2:]
padsize = int(1 * opt.num_layer) # As kernel size is always 3 currently, padsize goes up by one per layer
if not opt.pad_with_noise:
# pad_noise = nn.ConstantPad3d(padsize, 0)
# pad_image = nn.ConstantPad3d(padsize, 0)
pad_noise = nn.ReplicationPad3d(padsize)
pad_image = nn.ReplicationPad3d(padsize)
else:
pad_noise = nn.ReplicationPad3d(padsize)
pad_image = nn.ReplicationPad3d(padsize)
# setup optimizer
optimizerD = optim.Adam(D.parameters(), lr=opt.lr_d, betas=(opt.beta1, 0.999))
optimizerG = optim.Adam(G.parameters(), lr=opt.lr_g, betas=(opt.beta1, 0.999))
schedulerD = torch.optim.lr_scheduler.MultiStepLR(optimizer=optimizerD, milestones=[1600, 2500], gamma=opt.gamma)
schedulerG = torch.optim.lr_scheduler.MultiStepLR(optimizer=optimizerG, milestones=[1600, 2500], gamma=opt.gamma)
if current_scale == 0: # Generate new noise
if opt.use_multiple_inputs:
z_opt_group = []
for nzx, nzy in zip(nzx_group, nzy_group):
z_opt = generate_spatial_noise([1, opt.nc_current, nzx, nzy], device=opt.device)
z_opt = pad_noise(z_opt)
z_opt_group.append(z_opt)
else:
z_opt = generate_spatial_noise((1, opt.nc_current) + nz, device=opt.device)
z_opt = pad_noise(z_opt)
else: # Add noise to previous output
if opt.use_multiple_inputs:
z_opt_group = []
for nzx, nzy in zip(nzx_group, nzy_group):
z_opt = torch.zeros([1, opt.nc_current, nzx, nzy]).to(opt.device)
z_opt = pad_noise(z_opt)
z_opt_group.append(z_opt)
else:
z_opt = torch.zeros((1, opt.nc_current) + nz).to(opt.device)
z_opt = pad_noise(z_opt)
logger.info("Training at scale {}", current_scale)
grad_d_real = []
grad_d_fake = []
grad_g = []
for p in D.parameters():
grad_d_real.append(torch.zeros(p.shape).to(opt.device))
grad_d_fake.append(torch.zeros(p.shape).to(opt.device))
for p in G.parameters():
grad_g.append(torch.zeros(p.shape).to(opt.device))
for epoch in tqdm(range(opt.niter)):
step = current_scale * opt.niter + epoch
if opt.use_multiple_inputs:
group_steps = len(real_group)
noise_group = []
for nzx, nzy in zip(nzx_group, nzy_group):
noise_ = generate_spatial_noise([1, opt.nc_current, nzx, nzy], device=opt.device)
noise_ = pad_noise(noise_)
noise_group.append(noise_)
else:
group_steps = 1
noise_ = generate_spatial_noise((1, opt.nc_current) + nz, device=opt.device)
noise_ = pad_noise(noise_)
for curr_inp in range(group_steps):
if opt.use_multiple_inputs:
real = real_group[curr_inp]
nz = nz_group[curr_inp]
z_opt = z_opt_group[curr_inp]
noise_ = noise_group[curr_inp]
prev_scale_results = input_from_prev_scale[curr_inp]
opt.curr_inp = curr_inp
else:
prev_scale_results = input_from_prev_scale
############################
# (1) Update D network: maximize D(x) + D(G(z))
###########################
for j in range(opt.Dsteps):
# train with real
D.zero_grad()
output = D(real).to(opt.device)
errD_real = -output.mean()
errD_real.backward(retain_graph=True)
grads_after = []
cos_sim = []
for i, p in enumerate(D.parameters()):
grads_after.append(p.grad)
cos_sim.append(nn.CosineSimilarity(-1)(grad_d_real[i], p.grad).mean().item())
diff_d_real = np.mean(cos_sim)
grad_d_real = grads_after
# train with fake
if (j == 0) & (epoch == 0):
if current_scale == 0: # If we are in the lowest scale, noise is generated from scratch
prev = torch.zeros((1, opt.nc_current) + nz).to(opt.device)
prev_scale_results = prev
prev = pad_image(prev)
z_prev = torch.zeros((1, opt.nc_current) + nz).to(opt.device)
z_prev = pad_noise(z_prev)
opt.noise_amp = 1
else: # First step in NOT the lowest scale
# We need to adapt our inputs from the previous scale and add noise to it
prev = draw_concat(generators, noise_maps, reals, noise_amplitudes, prev_scale_results,
"rand", pad_noise, pad_image, opt)
prev = interpolate3D(prev, real.shape[-3:], mode="bilinear", align_corners=True)
prev = pad_image(prev)
z_prev = draw_concat(generators, noise_maps, reals, noise_amplitudes, prev_scale_results,
"rec", pad_noise, pad_image, opt)
z_prev = interpolate3D(z_prev, real.shape[-3:], mode="bilinear", align_corners=True)
opt.noise_amp = update_noise_amplitude(z_prev, real, opt)
z_prev = pad_image(z_prev)
else: # Any other step
if opt.use_multiple_inputs:
z_prev = curr_z_prevs[curr_inp]
prev = draw_concat(generators, noise_maps, reals, noise_amplitudes, prev_scale_results,
"rand", pad_noise, pad_image, opt)
prev = interpolate3D(prev, real.shape[-3:], mode="bilinear", align_corners=False)
prev = pad_image(prev)
# After creating our correct noise input, we feed it to the generator:
noise = opt.noise_amp * noise_ + prev
fake = G(noise.detach(), prev)
# Then run the result through the discriminator
output = D(fake.detach())
errD_fake = output.mean()
# Backpropagation
errD_fake.backward(retain_graph=False)
# Gradient Penalty
gradient_penalty = calc_gradient_penalty(D, real, fake, opt.lambda_grad, opt.device)
gradient_penalty.backward(retain_graph=False)
grads_after = []
cos_sim = []
for i, p in enumerate(D.parameters()):
grads_after.append(p.grad)
cos_sim.append(nn.CosineSimilarity(-1)(grad_d_fake[i], p.grad).mean().item())
diff_d_fake = np.mean(cos_sim)
grad_d_fake = grads_after
# Logging:
if step % 10 == 0:
wandb.log({f"D(G(z))@{current_scale}": errD_fake.item(),
f"D(x)@{current_scale}": -errD_real.item(),
f"gradient_penalty@{current_scale}": gradient_penalty.item(),
f"D_real_grad@{current_scale}": diff_d_real,
f"D_fake_grad@{current_scale}": diff_d_fake,
},
step=step, sync=False)
optimizerD.step()
if opt.use_multiple_inputs:
z_opt_group[curr_inp] = z_opt
input_from_prev_scale[curr_inp] = prev_scale_results
curr_noises[curr_inp] = noise
curr_prevs[curr_inp] = prev
curr_z_prevs[curr_inp] = z_prev
############################
# (2) Update G network: maximize D(G(z))
###########################
for j in range(opt.Gsteps):
G.zero_grad()
fake = G(noise.detach(), prev.detach(), temperature=1)
output = D(fake)
errG = -output.mean()
errG.backward(retain_graph=False)
grads_after = []
cos_sim = []
for i, p in enumerate(G.parameters()):
grads_after.append(p.grad)
cos_sim.append(nn.CosineSimilarity(-1)(grad_g[i], p.grad).mean().item())
diff_g = np.mean(cos_sim)
grad_g = grads_after
if opt.alpha != 0: # i. e. we are trying to find an exact recreation of our input in the lat space
Z_opt = opt.noise_amp * z_opt + z_prev
G_rec = G(Z_opt.detach(), z_prev, temperature=1)
rec_loss = opt.alpha * F.mse_loss(G_rec, real)
rec_loss.backward(retain_graph=False) # TODO: Check for unexpected argument retain_graph=True
rec_loss = rec_loss.detach()
else: # We are not trying to find an exact recreation
rec_loss = torch.zeros([])
Z_opt = z_opt
optimizerG.step()
# More Logging:
if step % 10 == 0:
wandb.log({f"noise_amplitude@{current_scale}": opt.noise_amp,
f"rec_loss@{current_scale}": rec_loss.item(),
f"G_grad@{current_scale}": diff_g},
step=step, sync=False, commit=True)
# Rendering and logging images of levels
if epoch % 500 == 0 or epoch == (opt.niter - 1):
token_list = opt.token_list
to_level = one_hot_to_blockdata_level
try:
subprocess.call(["wine", '--version'])
real_scaled = to_level(real.detach(), token_list, opt.block2repr, opt.repr_type)
# Minecraft World
worldname = 'Curr_Empty_World'
clear_empty_world(opt.output_dir, worldname) # reset tmp world
to_render = [real_scaled, to_level(fake.detach(), token_list, opt.block2repr, opt.repr_type),
to_level(G(Z_opt.detach(), z_prev), token_list, opt.block2repr, opt.repr_type)]
render_names = [f"real@{current_scale}", f"G(z)@{current_scale}", f"G(z_opt)@{current_scale}"]
obj_pth = os.path.join(opt.out_, f"objects/{current_scale}")
os.makedirs(obj_pth, exist_ok=True)
for n, level in enumerate(to_render):
pos = n * (level.shape[0] + 5)
save_level_to_world(opt.output_dir, worldname, (pos, 0, 0), level, token_list, opt.props)
curr_coords = [[pos, pos + real_scaled.shape[0]],
[0, real_scaled.shape[1]],
[0, real_scaled.shape[2]]]
render_pth = render_minecraft(worldname, curr_coords, obj_pth, render_names[n])
wandb.log({render_names[n]: wandb.Object3D(open(render_pth))}, commit=False)
except OSError:
pass
# Learning Rate scheduler step
schedulerD.step()
schedulerG.step()
# Save networks
if opt.use_multiple_inputs:
z_opt = z_opt_group
torch.save(z_opt, "%s/z_opt.pth" % opt.outf)
save_networks(G, D, z_opt, opt)
wandb.save(opt.outf)
return z_opt, input_from_prev_scale, G
| [
"torch.optim.lr_scheduler.MultiStepLR",
"models.save_networks",
"numpy.mean",
"generate_noise.generate_spatial_noise",
"minecraft.level_utils.clear_empty_world",
"torch.nn.ReplicationPad3d",
"minecraft.level_renderer.render_minecraft",
"subprocess.call",
"models.calc_gradient_penalty",
"draw_conca... | [((1431, 1484), 'minecraft.level_utils.clear_empty_world', 'clear_empty_world', (['opt.output_dir', '"""Curr_Empty_World"""'], {}), "(opt.output_dir, 'Curr_Empty_World')\n", (1448, 1484), False, 'from minecraft.level_utils import one_hot_to_blockdata_level, save_level_to_world, clear_empty_world\n'), ((2866, 2970), 'torch.optim.lr_scheduler.MultiStepLR', 'torch.optim.lr_scheduler.MultiStepLR', ([], {'optimizer': 'optimizerD', 'milestones': '[1600, 2500]', 'gamma': 'opt.gamma'}), '(optimizer=optimizerD, milestones=[1600,\n 2500], gamma=opt.gamma)\n', (2902, 2970), False, 'import torch\n'), ((2984, 3088), 'torch.optim.lr_scheduler.MultiStepLR', 'torch.optim.lr_scheduler.MultiStepLR', ([], {'optimizer': 'optimizerG', 'milestones': '[1600, 2500]', 'gamma': 'opt.gamma'}), '(optimizer=optimizerG, milestones=[1600,\n 2500], gamma=opt.gamma)\n', (3020, 3088), False, 'import torch\n'), ((4030, 4080), 'loguru.logger.info', 'logger.info', (['"""Training at scale {}"""', 'current_scale'], {}), "('Training at scale {}', current_scale)\n", (4041, 4080), False, 'from loguru import logger\n'), ((13579, 13623), 'torch.save', 'torch.save', (['z_opt', "('%s/z_opt.pth' % opt.outf)"], {}), "(z_opt, '%s/z_opt.pth' % opt.outf)\n", (13589, 13623), False, 'import torch\n'), ((13628, 13659), 'models.save_networks', 'save_networks', (['G', 'D', 'z_opt', 'opt'], {}), '(G, D, z_opt, opt)\n', (13641, 13659), False, 'from models import calc_gradient_penalty, save_networks\n'), ((13664, 13684), 'wandb.save', 'wandb.save', (['opt.outf'], {}), '(opt.outf)\n', (13674, 13684), False, 'import wandb\n'), ((754, 778), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['real', 'z_prev'], {}), '(real, z_prev)\n', (764, 778), True, 'import torch.nn.functional as F\n'), ((2473, 2501), 'torch.nn.ReplicationPad3d', 'nn.ReplicationPad3d', (['padsize'], {}), '(padsize)\n', (2492, 2501), True, 'import torch.nn as nn\n'), ((2522, 2550), 'torch.nn.ReplicationPad3d', 'nn.ReplicationPad3d', (['padsize'], {}), '(padsize)\n', (2541, 2550), True, 'import torch.nn as nn\n'), ((2582, 2610), 'torch.nn.ReplicationPad3d', 'nn.ReplicationPad3d', (['padsize'], {}), '(padsize)\n', (2601, 2610), True, 'import torch.nn as nn\n'), ((2631, 2659), 'torch.nn.ReplicationPad3d', 'nn.ReplicationPad3d', (['padsize'], {}), '(padsize)\n', (2650, 2659), True, 'import torch.nn as nn\n'), ((3469, 3536), 'generate_noise.generate_spatial_noise', 'generate_spatial_noise', (['((1, opt.nc_current) + nz)'], {'device': 'opt.device'}), '((1, opt.nc_current) + nz, device=opt.device)\n', (3491, 3536), False, 'from generate_noise import generate_spatial_noise\n'), ((4885, 4952), 'generate_noise.generate_spatial_noise', 'generate_spatial_noise', (['((1, opt.nc_current) + nz)'], {'device': 'opt.device'}), '((1, opt.nc_current) + nz, device=opt.device)\n', (4907, 4952), False, 'from generate_noise import generate_spatial_noise\n'), ((3279, 3351), 'generate_noise.generate_spatial_noise', 'generate_spatial_noise', (['[1, opt.nc_current, nzx, nzy]'], {'device': 'opt.device'}), '([1, opt.nc_current, nzx, nzy], device=opt.device)\n', (3301, 3351), False, 'from generate_noise import generate_spatial_noise\n'), ((4663, 4735), 'generate_noise.generate_spatial_noise', 'generate_spatial_noise', (['[1, opt.nc_current, nzx, nzy]'], {'device': 'opt.device'}), '([1, opt.nc_current, nzx, nzy], device=opt.device)\n', (4685, 4735), False, 'from generate_noise import generate_spatial_noise\n'), ((6128, 6144), 'numpy.mean', 'np.mean', (['cos_sim'], {}), '(cos_sim)\n', (6135, 6144), True, 'import numpy as np\n'), ((8650, 8715), 'models.calc_gradient_penalty', 'calc_gradient_penalty', (['D', 'real', 'fake', 'opt.lambda_grad', 'opt.device'], {}), '(D, real, fake, opt.lambda_grad, opt.device)\n', (8671, 8715), False, 'from models import calc_gradient_penalty, save_networks\n'), ((9072, 9088), 'numpy.mean', 'np.mean', (['cos_sim'], {}), '(cos_sim)\n', (9079, 9088), True, 'import numpy as np\n'), ((10715, 10731), 'numpy.mean', 'np.mean', (['cos_sim'], {}), '(cos_sim)\n', (10722, 10731), True, 'import numpy as np\n'), ((11977, 12015), 'subprocess.call', 'subprocess.call', (["['wine', '--version']"], {}), "(['wine', '--version'])\n", (11992, 12015), False, 'import subprocess\n'), ((12211, 12255), 'minecraft.level_utils.clear_empty_world', 'clear_empty_world', (['opt.output_dir', 'worldname'], {}), '(opt.output_dir, worldname)\n', (12228, 12255), False, 'from minecraft.level_utils import one_hot_to_blockdata_level, save_level_to_world, clear_empty_world\n'), ((12630, 12680), 'os.path.join', 'os.path.join', (['opt.out_', 'f"""objects/{current_scale}"""'], {}), "(opt.out_, f'objects/{current_scale}')\n", (12642, 12680), False, 'import os\n'), ((12697, 12732), 'os.makedirs', 'os.makedirs', (['obj_pth'], {'exist_ok': '(True)'}), '(obj_pth, exist_ok=True)\n', (12708, 12732), False, 'import os\n'), ((3935, 3972), 'torch.zeros', 'torch.zeros', (['((1, opt.nc_current) + nz)'], {}), '((1, opt.nc_current) + nz)\n', (3946, 3972), False, 'import torch\n'), ((4195, 4215), 'torch.zeros', 'torch.zeros', (['p.shape'], {}), '(p.shape)\n', (4206, 4215), False, 'import torch\n'), ((4259, 4279), 'torch.zeros', 'torch.zeros', (['p.shape'], {}), '(p.shape)\n', (4270, 4279), False, 'import torch\n'), ((4348, 4368), 'torch.zeros', 'torch.zeros', (['p.shape'], {}), '(p.shape)\n', (4359, 4368), False, 'import torch\n'), ((7849, 7968), 'draw_concat.draw_concat', 'draw_concat', (['generators', 'noise_maps', 'reals', 'noise_amplitudes', 'prev_scale_results', '"""rand"""', 'pad_noise', 'pad_image', 'opt'], {}), "(generators, noise_maps, reals, noise_amplitudes,\n prev_scale_results, 'rand', pad_noise, pad_image, opt)\n", (7860, 7968), False, 'from draw_concat import draw_concat\n'), ((8032, 8106), 'utils.interpolate3D', 'interpolate3D', (['prev', 'real.shape[-3:]'], {'mode': '"""bilinear"""', 'align_corners': '(False)'}), "(prev, real.shape[-3:], mode='bilinear', align_corners=False)\n", (8045, 8106), False, 'from utils import interpolate3D\n'), ((11348, 11363), 'torch.zeros', 'torch.zeros', (['[]'], {}), '([])\n', (11359, 11363), False, 'import torch\n'), ((12858, 12951), 'minecraft.level_utils.save_level_to_world', 'save_level_to_world', (['opt.output_dir', 'worldname', '(pos, 0, 0)', 'level', 'token_list', 'opt.props'], {}), '(opt.output_dir, worldname, (pos, 0, 0), level,\n token_list, opt.props)\n', (12877, 12951), False, 'from minecraft.level_utils import one_hot_to_blockdata_level, save_level_to_world, clear_empty_world\n'), ((13175, 13241), 'minecraft.level_renderer.render_minecraft', 'render_minecraft', (['worldname', 'curr_coords', 'obj_pth', 'render_names[n]'], {}), '(worldname, curr_coords, obj_pth, render_names[n])\n', (13191, 13241), False, 'from minecraft.level_renderer import render_minecraft\n'), ((3760, 3802), 'torch.zeros', 'torch.zeros', (['[1, opt.nc_current, nzx, nzy]'], {}), '([1, opt.nc_current, nzx, nzy])\n', (3771, 3802), False, 'import torch\n'), ((6929, 7048), 'draw_concat.draw_concat', 'draw_concat', (['generators', 'noise_maps', 'reals', 'noise_amplitudes', 'prev_scale_results', '"""rand"""', 'pad_noise', 'pad_image', 'opt'], {}), "(generators, noise_maps, reals, noise_amplitudes,\n prev_scale_results, 'rand', pad_noise, pad_image, opt)\n", (6940, 7048), False, 'from draw_concat import draw_concat\n'), ((7120, 7193), 'utils.interpolate3D', 'interpolate3D', (['prev', 'real.shape[-3:]'], {'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(prev, real.shape[-3:], mode='bilinear', align_corners=True)\n", (7133, 7193), False, 'from utils import interpolate3D\n'), ((7275, 7393), 'draw_concat.draw_concat', 'draw_concat', (['generators', 'noise_maps', 'reals', 'noise_amplitudes', 'prev_scale_results', '"""rec"""', 'pad_noise', 'pad_image', 'opt'], {}), "(generators, noise_maps, reals, noise_amplitudes,\n prev_scale_results, 'rec', pad_noise, pad_image, opt)\n", (7286, 7393), False, 'from draw_concat import draw_concat\n'), ((7468, 7543), 'utils.interpolate3D', 'interpolate3D', (['z_prev', 'real.shape[-3:]'], {'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(z_prev, real.shape[-3:], mode='bilinear', align_corners=True)\n", (7481, 7543), False, 'from utils import interpolate3D\n'), ((11058, 11081), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['G_rec', 'real'], {}), '(G_rec, real)\n', (11068, 11081), True, 'import torch.nn.functional as F\n'), ((6407, 6444), 'torch.zeros', 'torch.zeros', (['((1, opt.nc_current) + nz)'], {}), '((1, opt.nc_current) + nz)\n', (6418, 6444), False, 'import torch\n'), ((6590, 6627), 'torch.zeros', 'torch.zeros', (['((1, opt.nc_current) + nz)'], {}), '((1, opt.nc_current) + nz)\n', (6601, 6627), False, 'import torch\n'), ((6034, 6057), 'torch.nn.CosineSimilarity', 'nn.CosineSimilarity', (['(-1)'], {}), '(-1)\n', (6053, 6057), True, 'import torch.nn as nn\n'), ((8978, 9001), 'torch.nn.CosineSimilarity', 'nn.CosineSimilarity', (['(-1)'], {}), '(-1)\n', (8997, 9001), True, 'import torch.nn as nn\n'), ((10631, 10654), 'torch.nn.CosineSimilarity', 'nn.CosineSimilarity', (['(-1)'], {}), '(-1)\n', (10650, 10654), True, 'import torch.nn as nn\n')] |
# coding: utf-8
# Copyright (c) Materials Virtual Lab
# Distributed under the terms of the BSD License.
import joblib
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from veidt.abstract import Model
class MultiLayerPerceptron(Model):
"""
Basic neural network model.
"""
def __init__(self, layer_sizes, describer, preprocessor=None,
activation="relu", loss="mse"):
"""
Args:
layer_sizes (list): Hidden layer sizes, e.g., [3, 3].
describer (Describer): Describer to convert
input objects to descriptors.
preprocessor (BaseEstimator): Processor to use.
Defaults to StandardScaler
activation (str): Activation function
loss (str): Loss function. Defaults to mae
"""
self.layer_sizes = layer_sizes
self.describer = describer
self.output_describer = None
self.preprocessor = preprocessor
self.activation = activation
self.loss = loss
self.model = None
def fit(self, inputs, outputs, test_size=0.2, adam_lr=1e-2, **kwargs):
"""
Args:
inputs (list): List of inputs
outputs (list): List of outputs
test_size (float): Size of test set. Defaults to 0.2.
adam_lr (float): learning rate of Adam optimizer
kwargs: Passthrough to fit function in keras.models
"""
from keras.optimizers import Adam
from keras.models import Sequential
from keras.layers import Dense
descriptors = self.describer.transform(inputs)
if self.preprocessor is None:
self.preprocessor = StandardScaler()
scaled_descriptors = self.preprocessor.fit_transform(descriptors)
else:
scaled_descriptors = self.preprocessor.transform(descriptors)
adam = Adam(adam_lr)
x_train, x_test, y_train, y_test = train_test_split(
scaled_descriptors, outputs, test_size=test_size)
model = Sequential()
model.add(Dense(self.layer_sizes[0], input_dim=len(x_train[0]),
activation=self.activation))
for l in self.layer_sizes[1:]:
model.add(Dense(l, activation=self.activation))
model.add(Dense(1))
model.compile(loss=self.loss, optimizer=adam, metrics=[self.loss])
model.fit(x_train, y_train, verbose=0, validation_data=(x_test, y_test),
**kwargs)
self.model = model
def predict(self, inputs):
"""
Predict outputs with fitted model.
Args:
inputs (list): List of input testing objects.
"""
descriptors = self.describer.transform(inputs)
scaled_descriptors = self.preprocessor.transform(descriptors)
outputs = self.model.predict(scaled_descriptors)
return outputs
def save(self, model_fname, scaler_fname):
"""
Use kears model.save method to save model in *.h5 file
Use scklearn.external.joblib to save scaler(the *.save
file is supposed to be much smaller than saved as
pickle file)
Args:
model_fname (str): Filename to save model object.
scaler_fname (str): Filename to save scaler object.
"""
self.model.save(model_fname)
joblib.dump(self.preprocessor, scaler_fname)
def load(self, model_fname, scaler_fname):
"""
Load model and scaler from corresponding files.
Args:
model_fname (str): Filename storing model.
scaler_fname (str): Filename storing scaler.
"""
from keras.models import load_model
self.model = load_model(model_fname)
self.preprocessor = joblib.load(scaler_fname)
| [
"keras.optimizers.Adam",
"keras.models.load_model",
"sklearn.model_selection.train_test_split",
"keras.models.Sequential",
"sklearn.preprocessing.StandardScaler",
"joblib.load",
"keras.layers.Dense",
"joblib.dump"
] | [((1948, 1961), 'keras.optimizers.Adam', 'Adam', (['adam_lr'], {}), '(adam_lr)\n', (1952, 1961), False, 'from keras.optimizers import Adam\n'), ((2005, 2071), 'sklearn.model_selection.train_test_split', 'train_test_split', (['scaled_descriptors', 'outputs'], {'test_size': 'test_size'}), '(scaled_descriptors, outputs, test_size=test_size)\n', (2021, 2071), False, 'from sklearn.model_selection import train_test_split\n'), ((2102, 2114), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2112, 2114), False, 'from keras.models import Sequential\n'), ((3418, 3462), 'joblib.dump', 'joblib.dump', (['self.preprocessor', 'scaler_fname'], {}), '(self.preprocessor, scaler_fname)\n', (3429, 3462), False, 'import joblib\n'), ((3783, 3806), 'keras.models.load_model', 'load_model', (['model_fname'], {}), '(model_fname)\n', (3793, 3806), False, 'from keras.models import load_model\n'), ((3835, 3860), 'joblib.load', 'joblib.load', (['scaler_fname'], {}), '(scaler_fname)\n', (3846, 3860), False, 'import joblib\n'), ((1750, 1766), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1764, 1766), False, 'from sklearn.preprocessing import StandardScaler\n'), ((2357, 2365), 'keras.layers.Dense', 'Dense', (['(1)'], {}), '(1)\n', (2362, 2365), False, 'from keras.layers import Dense\n'), ((2301, 2337), 'keras.layers.Dense', 'Dense', (['l'], {'activation': 'self.activation'}), '(l, activation=self.activation)\n', (2306, 2337), False, 'from keras.layers import Dense\n')] |
import numpy as np
import tkinter as tk
from PIL import ImageTk, Image
class Colors:
YES = '#51b442'
NO = '#a03939'
NEUTRAL = '#cacaca'
BLACK = '#2e2e2e'
ALARM = '#c30b0b'
class Popup:
def __init__(self,
title='',
text='',
text_color=Colors.BLACK,
button_names=['OK','Cancel'],
button_colors=[Colors.YES, Colors.NO],
):
def press_gen(name):
def press():
self.ret = name
self.root.destroy()
return press
self.ret = None
self.root = tk.Tk()
self.root.winfo_toplevel().title(title)
self.root.grid_columnconfigure(0, weight=1)
self.root.grid_rowconfigure(0, weight=1)
# text
text_frame = tk.Frame(self.root, pady=4, padx=6)
text_frame.grid_columnconfigure(0, weight=1)
text_frame.grid(row=0, sticky='NSEW')
text_label = tk.Label(text_frame, text=text, fg=text_color,
font=('Helvetica','12','normal'))
text_label.grid(sticky='NSEW')
# buttons
buttons_frame = tk.Frame(self.root)
buttons_frame.grid(row=2, sticky='NSEW', pady=8, padx=8)
for i, name in enumerate(button_names):
buttons_frame.grid_columnconfigure(i, weight=1)
button = tk.Button(buttons_frame, text=name, bg=button_colors[i],
font=('Helvetica','12','bold'),
width=8, command=press_gen(name))
button.grid(row=0, column=i, sticky='NS', padx=10)
def response(self):
return self()
def __call__(self):
self.root.mainloop()
return self.ret
class ImagePopup(Popup):
def __init__(self,
title='',
text='',
text_color=Colors.BLACK,
button_names=['OK','Cancel'],
button_colors=[Colors.YES, Colors.NO],
images=[],
image_shape=(200,200),
):
super().__init__(title, text, text_color, button_names, button_colors)
images_frame = tk.Frame(self.root)
images_frame.grid(row=1, sticky='NESW', padx=6)
H,W = image_shape
for i, img_ in enumerate(images):
if isinstance(img_, str):
img = Image.open(img_)
elif isinstance(img_, np.ndarray):
# image coming from cv so it will be in BGR
img = Image.fromarray(img_[:,:,::-1])
else:
raise TypeError
img = img.resize((W,H), Image.ANTIALIAS)
img = ImageTk.PhotoImage(img)
images_frame.grid_columnconfigure(i, weight=1)
img_canvas = tk.Canvas(images_frame, width=W, height=H, bg='#ffffff')
img_canvas.image = img
img_canvas.grid(row=0, column=i, sticky='NS', padx=2)
img_canvas.create_image(1,1, anchor='nw', image=img)
class VideoPopup(Popup):
def __init__(self,
video_cap,
title='',
text='',
text_color=Colors.BLACK,
button_names=['OK','Cancel'],
button_colors=[Colors.YES, Colors.NO],
image_shape=(375, 500),
):
super().__init__(title, text, text_color, button_names, button_colors)
self.cap = video_cap
self.image_shape = image_shape
video_frame = tk.Frame(self.root)
video_frame.grid(row=1, sticky='NESW', padx=6)
H,W = image_shape
# image coming from cv so it will be in BGR
img = Image.fromarray(self.cap.read()[:,:,::-1])
img = img.resize((W,H), Image.ANTIALIAS)
img = ImageTk.PhotoImage(img)
video_frame.grid_columnconfigure(0, weight=1)
self.img_canvas = tk.Canvas(video_frame, width=W, height=H, bg='#ffffff')
self.img_canvas.image = img
self.img_canvas.grid(row=0, column=0, sticky='NS', padx=2)
self.img_on_canvas = self.img_canvas.create_image(1,1, anchor='nw', image=img)
self.root.after(100, self.update_image)
def update_image(self):
H,W = self.image_shape
img = Image.fromarray(self.cap.read()[:,:,::-1])
img = img.resize((W,H), Image.ANTIALIAS)
img = ImageTk.PhotoImage(img)
self.img_canvas.itemconfigure(self.img_on_canvas, image=img)
self.img_canvas.image = img
self.root.after(100, self.update_image)
| [
"PIL.Image.fromarray",
"PIL.Image.open",
"tkinter.Canvas",
"tkinter.Tk",
"tkinter.Label",
"tkinter.Frame",
"PIL.ImageTk.PhotoImage"
] | [((639, 646), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (644, 646), True, 'import tkinter as tk\n'), ((833, 868), 'tkinter.Frame', 'tk.Frame', (['self.root'], {'pady': '(4)', 'padx': '(6)'}), '(self.root, pady=4, padx=6)\n', (841, 868), True, 'import tkinter as tk\n'), ((989, 1075), 'tkinter.Label', 'tk.Label', (['text_frame'], {'text': 'text', 'fg': 'text_color', 'font': "('Helvetica', '12', 'normal')"}), "(text_frame, text=text, fg=text_color, font=('Helvetica', '12',\n 'normal'))\n", (997, 1075), True, 'import tkinter as tk\n'), ((1183, 1202), 'tkinter.Frame', 'tk.Frame', (['self.root'], {}), '(self.root)\n', (1191, 1202), True, 'import tkinter as tk\n'), ((2206, 2225), 'tkinter.Frame', 'tk.Frame', (['self.root'], {}), '(self.root)\n', (2214, 2225), True, 'import tkinter as tk\n'), ((3547, 3566), 'tkinter.Frame', 'tk.Frame', (['self.root'], {}), '(self.root)\n', (3555, 3566), True, 'import tkinter as tk\n'), ((3822, 3845), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', (['img'], {}), '(img)\n', (3840, 3845), False, 'from PIL import ImageTk, Image\n'), ((3926, 3981), 'tkinter.Canvas', 'tk.Canvas', (['video_frame'], {'width': 'W', 'height': 'H', 'bg': '"""#ffffff"""'}), "(video_frame, width=W, height=H, bg='#ffffff')\n", (3935, 3981), True, 'import tkinter as tk\n'), ((4402, 4425), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', (['img'], {}), '(img)\n', (4420, 4425), False, 'from PIL import ImageTk, Image\n'), ((2711, 2734), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', (['img'], {}), '(img)\n', (2729, 2734), False, 'from PIL import ImageTk, Image\n'), ((2819, 2875), 'tkinter.Canvas', 'tk.Canvas', (['images_frame'], {'width': 'W', 'height': 'H', 'bg': '"""#ffffff"""'}), "(images_frame, width=W, height=H, bg='#ffffff')\n", (2828, 2875), True, 'import tkinter as tk\n'), ((2411, 2427), 'PIL.Image.open', 'Image.open', (['img_'], {}), '(img_)\n', (2421, 2427), False, 'from PIL import ImageTk, Image\n'), ((2558, 2591), 'PIL.Image.fromarray', 'Image.fromarray', (['img_[:, :, ::-1]'], {}), '(img_[:, :, ::-1])\n', (2573, 2591), False, 'from PIL import ImageTk, Image\n')] |
import dash_core_components as dcc
import dash_html_components as html
from plotly import graph_objs as go
def render():
return html.Div(children=[
html.H1(children='{{cookiecutter.project_slug}}'),
html.Div(children='Dash: A web application framework for Python.'),
dcc.Graph(
id='example-graph',
figure=go.Figure(
data=[
go.Bar(
x=[1, 2, 3],
y=[4, 1, 2],
name='SF'
),
{
'x': [1, 2, 3],
'y': [2, 4, 5],
'type': 'bar',
'name': u'Montréal'
},
],
layout=go.Layout(
title='Dash Data Visualization'
)
)
)
])
| [
"plotly.graph_objs.Layout",
"plotly.graph_objs.Bar",
"dash_html_components.Div",
"dash_html_components.H1"
] | [((163, 212), 'dash_html_components.H1', 'html.H1', ([], {'children': '"""{{cookiecutter.project_slug}}"""'}), "(children='{{cookiecutter.project_slug}}')\n", (170, 212), True, 'import dash_html_components as html\n'), ((223, 289), 'dash_html_components.Div', 'html.Div', ([], {'children': '"""Dash: A web application framework for Python."""'}), "(children='Dash: A web application framework for Python.')\n", (231, 289), True, 'import dash_html_components as html\n'), ((805, 847), 'plotly.graph_objs.Layout', 'go.Layout', ([], {'title': '"""Dash Data Visualization"""'}), "(title='Dash Data Visualization')\n", (814, 847), True, 'from plotly import graph_objs as go\n'), ((416, 459), 'plotly.graph_objs.Bar', 'go.Bar', ([], {'x': '[1, 2, 3]', 'y': '[4, 1, 2]', 'name': '"""SF"""'}), "(x=[1, 2, 3], y=[4, 1, 2], name='SF')\n", (422, 459), True, 'from plotly import graph_objs as go\n')] |
# uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\reservation\unreserve_object_element.py
# Compiled at: 2020-09-08 20:23:33
# Size of source mod 2**32: 2327 bytes
from element_utils import CleanupType
from interactions.utils.interaction_elements import XevtTriggeredElement
from interactions.utils.interaction_liabilities import RESERVATION_LIABILITY
from interactions.utils.line_utils import WaitingLineInteractionChainLiability
from sims4.tuning.tunable import TunableTuple, OptionalTunable, TunableSimMinute
class UnreserveObjectElement(XevtTriggeredElement):
FACTORY_TUNABLES = {'timing': TunableTuple(description="\n The behavior should occur at the very beginning of the\n interaction. It will not be tightly synchronized visually with\n animation. This isn't a very common use case and would most\n likely be used in an immediate interaction or to change hidden\n state that is used for bookkeeping rather than visual\n appearance.\n ",
offset_time=OptionalTunable(description='\n If enabled, the interaction will wait this amount of time\n after the beginning before running the element.\n\n Only use this if absolutely necessary. Better alternatives\n include using xevts, time based conditional action with\n loot ops, and using outcomes.\n ',
tunable=TunableSimMinute(description='The interaction will wait this amount of time after the beginning before running the element', default=2),
enabled_by_default=True),
locked_args={'timing':XevtTriggeredElement.AT_BEGINNING,
'criticality':CleanupType.NotCritical, 'xevt_id':None, 'supports_failsafe':None})}
def _do_behavior(self):
self.interaction.remove_liability(RESERVATION_LIABILITY)
self.interaction.remove_liability(WaitingLineInteractionChainLiability.LIABILITY_TOKEN)
return True | [
"sims4.tuning.tunable.TunableSimMinute"
] | [((1641, 1786), 'sims4.tuning.tunable.TunableSimMinute', 'TunableSimMinute', ([], {'description': '"""The interaction will wait this amount of time after the beginning before running the element"""', 'default': '(2)'}), "(description=\n 'The interaction will wait this amount of time after the beginning before running the element'\n , default=2)\n", (1657, 1786), False, 'from sims4.tuning.tunable import TunableTuple, OptionalTunable, TunableSimMinute\n')] |
# coding=utf-8
# ------------------------------------------------------------------------
# Created by <NAME> on 2013-03-20
# Copyright (c) 2013 <NAME>. All rights reserved.
# ------------------------------------------------------------------------
from __future__ import absolute_import
import json
import logging
try:
from urllib.parse import urlparse
except ImportError: # py2
from urlparse import urlparse
from django.conf import settings
from django.contrib import admin
from django.contrib.admin.views.decorators import staff_member_required
try:
from django.urls import reverse
except ImportError: # dj 1.x
from django.core.urlresolvers import reverse
from django.db import models
from django.http import HttpResponse, HttpResponseNotFound
from django.template.loader import render_to_string
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext_lazy as _
from feincms.admin.item_editor import FeinCMSInline
from feincms.module.medialibrary.models import MediaFile
from feincms.module.medialibrary.thumbnail import admin_thumbnail
from .admin import MediaGalleryAdminBase, MediaGalleryContentFilesAdminInlineBase
# ------------------------------------------------------------------------
app_label = 'mediagallery'
logger = logging.getLogger(app_label)
# ------------------------------------------------------------------------
class MediaGalleryContentAdminInline(FeinCMSInline):
template = "admin/modest_content_inline.html"
class MediaGalleryContent(models.Model):
class Meta:
abstract = True
verbose_name = _('media gallery')
verbose_name_plural = _('media galleries')
template_prefix = 'content/modest/'
LAYOUT_CHOICES = (('', _('default list')), )
ORDER_CHOICES = (('', _('ascending')),
('-', _('descending')),
('?', _('random')))
CLICK_CHOICES = (('', _('do nothing')),
('R', _('redirect to page')),
('Z', _('zoom image')))
# ----- django fields ----- #
title = models.CharField(_('title'), max_length=80, blank=True)
options = models.CharField(_('options'), max_length=80, blank=True)
order = models.CharField(_('order'), max_length=1, blank=True,
choices=ORDER_CHOICES)
limit = models.SmallIntegerField(_('limit'), blank=True, null=True,
help_text=_('show how many items, leave empty for no limit')
)
click = models.CharField(_('on click'), max_length=1, blank=True,
choices=CLICK_CHOICES)
# Implement admin_urlname templatetag protocol
@property
def app_label(self):
return self._meta.app_label
# Implement admin_urlname templatetag protocol
@property
def module_name(self):
return self.__class__.__name__.lower()
@classmethod
def initialize_type(cls,
LAYOUT_CHOICES=None,
DROP_ACCEPTOR=None,
EXTRA_CONTEXT=None,
MEDIA_DEFS=None,
ITEM_CLASS=MediaFile):
if LAYOUT_CHOICES is None:
LAYOUT_CHOICES = (('default', _('default gallery')), )
cls.add_to_class('layout', models.CharField(_('layout'),
choices=LAYOUT_CHOICES, max_length=15,
default=LAYOUT_CHOICES[0][0],)
)
cls.extra_context = EXTRA_CONTEXT
cls.media_defs = MEDIA_DEFS
cls.feincms_item_editor_inline = MediaGalleryContentAdminInline
class MediaGalleryContentFiles(models.Model):
class Meta:
app_label = cls._meta.app_label
unique_together = (('gallery', 'mediafile'), )
verbose_name = _('media gallery content file')
verbose_name_plural = _('media gallery content files')
ordering = ('ordering',)
gallery = models.ForeignKey(cls, related_name="item_set",
on_delete=models.CASCADE)
mediafile = models.ForeignKey(ITEM_CLASS, related_name="+",
on_delete=models.CASCADE)
related_page = models.ForeignKey(cls._feincms_content_class,
verbose_name=_('related page'),
blank=True, null=True,
related_name="+", on_delete=models.SET_NULL)
ordering = models.IntegerField(default=0)
title = models.CharField(_('title'), blank=True, max_length=80)
text = models.TextField(_('text'), blank=True)
def __unicode__(self):
return u'Media Gallery %s - %s' % (self.gallery, self.mediafile)
def caption(self):
if self.title:
return self.title
if self.mediafile and self.mediafile.translation:
return self.mediafile.translation.caption
return ""
def description(self):
if self.text:
return self.text
if self.mediafile and self.mediafile.translation:
return self.mediafile.translation.description
return ""
def copyright(self):
return self.mediafile.copyright
def file(self):
return self.mediafile.file
if DROP_ACCEPTOR is None:
DROP_ACCEPTOR = MediaGalleryDropAcceptor()
class MediaGalleryContentFilesAdminInline(MediaGalleryContentFilesAdminInlineBase):
model = MediaGalleryContentFiles
class MediaGalleryAdmin(MediaGalleryAdminBase):
inlines = (MediaGalleryContentFilesAdminInline,)
MediaGalleryAdmin.drop_acceptor = DROP_ACCEPTOR
admin.site.register(cls, MediaGalleryAdmin)
@property
def media(self):
return self.media_defs.get(self.layout, None)
def __unicode__(self):
return self.title
def items(self):
qs = self.item_set
m = {'-': '-ordering', '?': '?'}
qs = qs.order_by(m.get(self.order, 'ordering'))
if self.limit:
qs = qs[:self.limit]
return qs.all()
def render(self, request, **kwargs):
ctx = {'have_icon_files': ('pdf', 'zip')}
if self.extra_context is not None:
if callable(self.extra_context):
ctx = self.extra_context()
else:
ctx = dict(self.extra_context())
ctx.update({'feincms_page': self.parent, 'object': self, 'gallery': self})
return render_to_string((
'%scontent-%s.html' % (self.template_prefix, self.layout),
'%scontent.html' % (self.template_prefix),
), ctx, request=request)
# Accessor for admin_url templatetag
def parent_opts(self):
return self.parent._meta
def opts(self):
return self._meta
# ------------------------------------------------------------------------
class MediaGalleryDropAcceptor(object):
def __init__(self, *args, **kwargs):
self.mediaurl = urlparse(settings.MEDIA_URL)
self.mediachange_url = None # deferred until init is done
def reverse_url(self, request, url, ctx):
"""
This takes what url the user dropped onto the drop zone and
tries to intuit what she meant. Currently implements dropping
a MediaLibrary item (see below, `mediafile_reverse_url`), but
could be extended to handle Pages or Products from a catalogue.
This method takes the url and fills out the context dictionary
as it seems fit.
Override to this to customize.
"""
return self.mediafile_reverse_url(url, ctx)
@method_decorator(staff_member_required)
def __call__(self, request):
if not request.is_ajax():
return HttpResponseNotFound()
# Delayed init, url dict is not ready in __init__
if self.mediachange_url is None:
self.mediachange_url = reverse("admin:medialibrary_mediafile_change", args=(0,)).replace('0/', '')
out = {'status': 404}
inurl = request.REQUEST.get('url')
try:
url = urlparse(inurl)
# Security check: only allow urls that come from this site
if self.is_valid_drop_url(request, url):
self.reverse_url(request, url, out)
except Exception as e:
logger.exception("%s raised exception for url \"%s\": %s", self.__class__.__name__, inurl, e)
out['status'] = 500
return self.build_response(out)
def build_response(self, ctx):
r = HttpResponse(json.dumps(ctx), content_type='application/json')
r.status_code = ctx['status']
return r
def is_valid_drop_url(self, request, url):
return url.netloc == request.META.get('HTTP_HOST', None)
def mediafile_reverse_url(self, url, ctx):
"""
Tries to intuit what the user dropped onto us. This might be
a link to a MediaFile in case she dragged the "Title" column
over, or it might be a link to a file in the media library
if she dragged the image itself.
"""
mediafile = None
try:
if url.path.startswith(self.mediachange_url):
# Dropped a MediaFile url
rest = url.path[len(self.mediachange_url):-1]
mediafile = MediaFile.objects.get(pk=int(rest))
elif url.path.startswith(self.mediaurl.path):
# Dropped an image url (from media library)
file_path = url.path[len(self.mediaurl.path):]
mediafile = MediaFile.objects.get(file=file_path)
except MediaFile.DoesNotExist:
pass
else:
if mediafile is not None:
logger.debug("%s converted \"%s\" into %s(pk=%d)",
self.__class__.__name__, url.path, mediafile.__class__.__name__, mediafile.pk)
image = admin_thumbnail(mediafile, dimensions="150x100")
ctx['mediafile_id'] = mediafile.id
ctx['mediafile_type'] = mediafile.type
ctx['mediafile_caption'] = unicode(mediafile)
ctx['mediafile_url'] = image
ctx['status'] = 200
# ------------------------------------------------------------------------
| [
"logging.getLogger",
"django.utils.translation.ugettext_lazy",
"django.db.models.ForeignKey",
"django.db.models.IntegerField",
"feincms.module.medialibrary.thumbnail.admin_thumbnail",
"django.contrib.admin.site.register",
"json.dumps",
"django.utils.decorators.method_decorator",
"django.core.urlreso... | [((1303, 1331), 'logging.getLogger', 'logging.getLogger', (['app_label'], {}), '(app_label)\n', (1320, 1331), False, 'import logging\n'), ((7831, 7870), 'django.utils.decorators.method_decorator', 'method_decorator', (['staff_member_required'], {}), '(staff_member_required)\n', (7847, 7870), False, 'from django.utils.decorators import method_decorator\n'), ((1617, 1635), 'django.utils.translation.ugettext_lazy', '_', (['"""media gallery"""'], {}), "('media gallery')\n", (1618, 1635), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1666, 1686), 'django.utils.translation.ugettext_lazy', '_', (['"""media galleries"""'], {}), "('media galleries')\n", (1667, 1686), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2111, 2121), 'django.utils.translation.ugettext_lazy', '_', (['"""title"""'], {}), "('title')\n", (2112, 2121), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2181, 2193), 'django.utils.translation.ugettext_lazy', '_', (['"""options"""'], {}), "('options')\n", (2182, 2193), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2252, 2262), 'django.utils.translation.ugettext_lazy', '_', (['"""order"""'], {}), "('order')\n", (2253, 2262), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2370, 2380), 'django.utils.translation.ugettext_lazy', '_', (['"""limit"""'], {}), "('limit')\n", (2371, 2380), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2521, 2534), 'django.utils.translation.ugettext_lazy', '_', (['"""on click"""'], {}), "('on click')\n", (2522, 2534), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((5872, 5915), 'django.contrib.admin.site.register', 'admin.site.register', (['cls', 'MediaGalleryAdmin'], {}), '(cls, MediaGalleryAdmin)\n', (5891, 5915), False, 'from django.contrib import admin\n'), ((6680, 6824), 'django.template.loader.render_to_string', 'render_to_string', (["('%scontent-%s.html' % (self.template_prefix, self.layout), \n '%scontent.html' % self.template_prefix)", 'ctx'], {'request': 'request'}), "(('%scontent-%s.html' % (self.template_prefix, self.layout),\n '%scontent.html' % self.template_prefix), ctx, request=request)\n", (6696, 6824), False, 'from django.template.loader import render_to_string\n'), ((7189, 7217), 'urlparse.urlparse', 'urlparse', (['settings.MEDIA_URL'], {}), '(settings.MEDIA_URL)\n', (7197, 7217), False, 'from urlparse import urlparse\n'), ((1756, 1773), 'django.utils.translation.ugettext_lazy', '_', (['"""default list"""'], {}), "('default list')\n", (1757, 1773), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1804, 1818), 'django.utils.translation.ugettext_lazy', '_', (['"""ascending"""'], {}), "('ascending')\n", (1805, 1818), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1848, 1863), 'django.utils.translation.ugettext_lazy', '_', (['"""descending"""'], {}), "('descending')\n", (1849, 1863), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1893, 1904), 'django.utils.translation.ugettext_lazy', '_', (['"""random"""'], {}), "('random')\n", (1894, 1904), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1933, 1948), 'django.utils.translation.ugettext_lazy', '_', (['"""do nothing"""'], {}), "('do nothing')\n", (1934, 1948), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1978, 1999), 'django.utils.translation.ugettext_lazy', '_', (['"""redirect to page"""'], {}), "('redirect to page')\n", (1979, 1999), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2029, 2044), 'django.utils.translation.ugettext_lazy', '_', (['"""zoom image"""'], {}), "('zoom image')\n", (2030, 2044), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2435, 2485), 'django.utils.translation.ugettext_lazy', '_', (['"""show how many items, leave empty for no limit"""'], {}), "('show how many items, leave empty for no limit')\n", (2436, 2485), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3978, 4051), 'django.db.models.ForeignKey', 'models.ForeignKey', (['cls'], {'related_name': '"""item_set"""', 'on_delete': 'models.CASCADE'}), "(cls, related_name='item_set', on_delete=models.CASCADE)\n", (3995, 4051), False, 'from django.db import models\n'), ((4108, 4181), 'django.db.models.ForeignKey', 'models.ForeignKey', (['ITEM_CLASS'], {'related_name': '"""+"""', 'on_delete': 'models.CASCADE'}), "(ITEM_CLASS, related_name='+', on_delete=models.CASCADE)\n", (4125, 4181), False, 'from django.db import models\n'), ((4507, 4537), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (4526, 4537), False, 'from django.db import models\n'), ((7957, 7979), 'django.http.HttpResponseNotFound', 'HttpResponseNotFound', ([], {}), '()\n', (7977, 7979), False, 'from django.http import HttpResponse, HttpResponseNotFound\n'), ((8296, 8311), 'urlparse.urlparse', 'urlparse', (['inurl'], {}), '(inurl)\n', (8304, 8311), False, 'from urlparse import urlparse\n'), ((8759, 8774), 'json.dumps', 'json.dumps', (['ctx'], {}), '(ctx)\n', (8769, 8774), False, 'import json\n'), ((3295, 3306), 'django.utils.translation.ugettext_lazy', '_', (['"""layout"""'], {}), "('layout')\n", (3296, 3306), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3811, 3842), 'django.utils.translation.ugettext_lazy', '_', (['"""media gallery content file"""'], {}), "('media gallery content file')\n", (3812, 3842), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3881, 3913), 'django.utils.translation.ugettext_lazy', '_', (['"""media gallery content files"""'], {}), "('media gallery content files')\n", (3882, 3913), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4575, 4585), 'django.utils.translation.ugettext_lazy', '_', (['"""title"""'], {}), "('title')\n", (4576, 4585), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4650, 4659), 'django.utils.translation.ugettext_lazy', '_', (['"""text"""'], {}), "('text')\n", (4651, 4659), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((10115, 10163), 'feincms.module.medialibrary.thumbnail.admin_thumbnail', 'admin_thumbnail', (['mediafile'], {'dimensions': '"""150x100"""'}), "(mediafile, dimensions='150x100')\n", (10130, 10163), False, 'from feincms.module.medialibrary.thumbnail import admin_thumbnail\n'), ((3217, 3237), 'django.utils.translation.ugettext_lazy', '_', (['"""default gallery"""'], {}), "('default gallery')\n", (3218, 3237), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4332, 4349), 'django.utils.translation.ugettext_lazy', '_', (['"""related page"""'], {}), "('related page')\n", (4333, 4349), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((8115, 8172), 'django.core.urlresolvers.reverse', 'reverse', (['"""admin:medialibrary_mediafile_change"""'], {'args': '(0,)'}), "('admin:medialibrary_mediafile_change', args=(0,))\n", (8122, 8172), False, 'from django.core.urlresolvers import reverse\n'), ((9769, 9806), 'feincms.module.medialibrary.models.MediaFile.objects.get', 'MediaFile.objects.get', ([], {'file': 'file_path'}), '(file=file_path)\n', (9790, 9806), False, 'from feincms.module.medialibrary.models import MediaFile\n')] |
"""
Author: ~wy (https://github.com/wy)
Date: 25/01/2019
Description:
A simulator for the Three Dice game (Sic Bo) played in Macau casinos and elsewhere.
Important simplification is that all three dice are treated equally
which makes the maths a bit easier.
Key concepts:
Criteria - does a dice configuration match a bet type (e.g. smalls, or Total of 4)
Payoff - what is the pay off for a particular dice configuration
Probability - what is the probability a set of dice configurations will occur?
Odds - what is the expected outcome given the Probability and Payoffs
Gambler's ruin / paradise - a slight variant on the commonly referred to set-up
"""
import random
# small is 4-10 excluding triples
small = lambda x: (4 <= x[0] + x[1] + x[2] <= 10 and not (x[0] == x[1] and x[1] == x[2]))
# large is 11-17 excluding triples
large = lambda x: (11 <= x[0] + x[1] + x[2] <= 17 and not (x[0] == x[1] and x[1] == x[2]))
staticpayoffs = {
4: 50,
5: 30,
6: 17,
7: 12,
8: 8,
9: 6,
10: 6
}
def payoff410(x):
total = x[0] + x[1] + x[2]
return (staticpayoffs[total] - 6) / 7.0
def payoff48(x):
total = x[0] + x[1] + x[2]
return (staticpayoffs[total] - 4) / 5.0
def payoffsmall14(x):
total = x[0] + x[1] + x[2]
if total < 11:
return 0
else:
return 5.5
payoffs = {
"small": lambda _: 1,
"large": lambda _: 1,
"4": lambda _: 60,
"5": lambda _: 30,
"6": lambda _: 17,
"7": lambda _: 12,
"8": lambda _: 8,
"9": lambda _: 6,
"10": lambda _: 6,
"11": lambda _: 6,
"12": lambda _: 6,
"13": lambda _: 8,
"14": lambda _: 12,
"15": lambda _: 17,
"16": lambda _: 30,
"17": lambda _: 60,
"one": lambda x: len(list(filter(lambda y: y == 1, list(x)))),
"two": lambda x: len(list(filter(lambda y: y == 2, list(x)))),
"three": lambda x: len(list(filter(lambda y: y == 3, list(x)))),
"four": lambda x: len(list(filter(lambda y: y == 4, list(x)))),
"five": lambda x: len(list(filter(lambda y: y == 5, list(x)))),
"six": lambda x: len(list(filter(lambda y: y == 6, list(x)))),
"4-10": lambda x: payoff410(x),
"4-8": lambda x: payoff48(x),
"small14": lambda x: payoffsmall14(x),
}
bet_types = {
"small": small,
"large": large,
"4-10": lambda x: (4 <= x[0] + x[1] + x[2] <= 10),
"4-8": lambda x: (4 <= x[0] + x[1] + x[2] <= 8),
"small14": lambda x: ((x[0] + x[1] + x[2] == 14) or small(x)),
"4": lambda x: (x[0] + x[1] + x[2] == 4),
"5": lambda x: (x[0] + x[1] + x[2] == 5),
"6": lambda x: (x[0] + x[1] + x[2] == 6),
"7": lambda x: (x[0] + x[1] + x[2] == 7),
"8": lambda x: (x[0] + x[1] + x[2] == 8),
"9": lambda x: (x[0] + x[1] + x[2] == 9),
"10": lambda x: (x[0] + x[1] + x[2] == 10),
"11": lambda x: (x[0] + x[1] + x[2] == 11),
"12": lambda x: (x[0] + x[1] + x[2] == 12),
"13": lambda x: (x[0] + x[1] + x[2] == 13),
"14": lambda x: (x[0] + x[1] + x[2] == 14),
"15": lambda x: (x[0] + x[1] + x[2] == 15),
"16": lambda x: (x[0] + x[1] + x[2] == 16),
"17": lambda x: (x[0] + x[1] + x[2] == 17),
"one": lambda x: (x[0] == 1 or x[1] == 1 or x[2] == 1),
"two": lambda x: (x[0] == 2 or x[1] == 2 or x[2] == 2),
"three": lambda x: (x[0] == 3 or x[1] == 3 or x[2] == 3),
"four": lambda x: (x[0] == 4 or x[1] == 4 or x[2] == 4),
"five": lambda x: (x[0] == 5 or x[1] == 5 or x[2] == 5),
"six": lambda x: (x[0] == 6 or x[1] == 6 or x[2] == 6),
}
def rolldice():
# Rolls dice simulation
results = [random.randint(1, 6), random.randint(1, 6), random.randint(1, 6)]
results.sort()
return tuple(results)
def calculateodds(bet_type, iterations=100000):
criteria = bet_types[bet_type]
payoff = payoffs[bet_type]
sum_total = 0
for i in range(iterations):
r = rolldice()
if criteria(r):
sum_total += payoff(r)
else:
sum_total += -1
return sum_total / float(iterations)
def calculateprobabilities(bet_type, iterations=1000000):
criteria = bet_types[bet_type]
sum_total = 0
for i in range(iterations):
r = rolldice()
if criteria(r):
sum_total += 1
return sum_total / float(iterations)
def testOdds():
for k in bet_types:
print("{}: {}".format(k, calculateodds(k)))
def testProbabilities():
for k in bet_types:
print("{}: {}".format(k, calculateprobabilities(k)))
def gamblersruin(bet_type, startingcapital=1000, starting_bet_size=10,
win_above=1500, iterations=3000):
# make successive bets of fixed size until either bankrupt or hit win threshold
wins = 0
criteria = bet_types[bet_type]
payoff = payoffs[bet_type]
for i in range(iterations):
capital = startingcapital
bet_size = starting_bet_size
while 0 < capital < win_above:
r = rolldice()
if criteria(r):
capital += payoff(r) * bet_size
# print(capital, "win", bet_size)
bet_size = starting_bet_size
else:
capital -= bet_size
# print(capital, "loss", bet_size)
bet_size = min(2 * bet_size, capital)
if capital >= win_above:
wins += 1
# print(wins / float(i+1))
return wins / float(iterations)
def gamblersparadise(bet_type, startingcapital=1000):
for i in range(1000, 12000, 10):
wp = gamblersruin(bet_type, startingcapital=startingcapital, win_above=i)
er = (wp * (i - startingcapital)) - (startingcapital * (1 - wp))
print(i, wp, er)
# gamblersparadise("small")
# Example tests
testOdds()
k = "4-10"
print("{}: {}".format(k, calculateodds(k)))
print("{}: {}".format(k, calculateprobabilities(k)))
| [
"random.randint"
] | [((3553, 3573), 'random.randint', 'random.randint', (['(1)', '(6)'], {}), '(1, 6)\n', (3567, 3573), False, 'import random\n'), ((3575, 3595), 'random.randint', 'random.randint', (['(1)', '(6)'], {}), '(1, 6)\n', (3589, 3595), False, 'import random\n'), ((3597, 3617), 'random.randint', 'random.randint', (['(1)', '(6)'], {}), '(1, 6)\n', (3611, 3617), False, 'import random\n')] |
#!/usr/bin/python3
"""
Module for querying weather forecast data from OpenWeatherMap and storage in database
"""
import json
import requests
import datetime
# Set up logging
import logging
import logging_plus
logger = logging_plus.getLogger(__name__)
logger.addHandler(logging.NullHandler())
# Defaults
# Current / hourly forecast
cfc = {
"timestamp" : None,
"temperature" : None,
"humidity" : None,
"pressure" : None,
"clouds" : None,
"uvi" : None,
"visibility" : None,
"windspeed" : None,
"winddir" : None,
"rain" : None,
"snow" : None,
"description" : None,
"icon" : None,
"alerts" : 0
}
# Daily forecast
dfc = {
"date": None,
"sunrise": None,
"sunset": None,
"temperature_m": None,
"temperature_d": None,
"temperature_e": None,
"temperature_n": None,
"temperature_min": None,
"temperature_max": None,
"humidity" : None,
"pressure" : None,
"windspeed" : None,
"winddir" : None,
"clouds" : None,
"uvi" : None,
"pop": None,
"rain" : None,
"snow" : None,
"description" : None,
"icon" : None,
"alerts" : 0
}
def getForecast(url, payload):
"""
Get weather forecast data from openweb service
"""
fcr = requests.get(url, params=payload)
if fcr.status_code != requests.codes.ok:
fcr.raise_for_status()
try:
fcrj = fcr.json()
except Exception as e:
logger.error("Error parsing response: %s", e)
fcrj = None
logger.error("Request URL : %s", url)
logger.error("Request payload: %s", payload)
logger.error("Response : %s", fcr.text)
return fcrj
def mapForecast(fc, ts):
"""
Map forecast data to standard structure
"""
global cfc
# Map current forecast
curfc = cfc.copy()
curfc["timestamp"] = ts
curfc["temperature"] = fc["current"]["temp"]
curfc["humidity"] = fc["current"]["humidity"]
curfc["pressure"] = fc["current"]["pressure"]
curfc["clouds"] = fc["current"]["clouds"]
curfc["uvi"] = fc["current"]["uvi"]
curfc["visibility"] = fc["current"]["visibility"]
curfc["windspeed"] = fc["current"]["wind_speed"]
curfc["winddir"] = fc["current"]["wind_deg"]
if "rain" in fc["current"]:
curfc["rain"] = fc["current"]["rain"]["1h"]
if "snow" in fc["current"]:
curfc["snow"] = fc["current"]["snow"]["1h"]
if len(fc["current"]["weather"]) > 0:
w = fc["current"]["weather"][0]
curfc["description"] = w["description"]
curfc["icon"] = w["icon"]
curfc["alerts"] = getAlerts(fc, fc["current"]["dt"])
# Map hourly forecast
hourlyfc = list()
if len(fc["hourly"]) > 0:
for i in range(0, len(fc["hourly"])):
hourfc = cfc.copy()
hfc = fc["hourly"][i]
hourfc["timestamp"] = datetime.datetime.fromtimestamp(hfc["dt"]).strftime("%Y-%m-%d %H:%M:%S")
hourfc["temperature"] = hfc["temp"]
hourfc["humidity"] = hfc["humidity"]
hourfc["pressure"] = hfc["pressure"]
hourfc["clouds"] = hfc["clouds"]
hourfc["uvi"] = hfc["uvi"]
hourfc["visibility"] = hfc["visibility"]
hourfc["windspeed"] = hfc["wind_speed"]
hourfc["winddir"] = hfc["wind_deg"]
if "rain" in hfc:
hourfc["rain"] = hfc["rain"]["1h"]
if "snow" in hfc:
hourfc["snow"] = hfc["snow"]["1h"]
if len(hfc["weather"]) > 0:
w = hfc["weather"][0]
hourfc["description"] = w["description"]
hourfc["icon"] = w["icon"]
hourfc["alerts"] = getAlerts(fc, hfc["dt"])
hourlyfc.append(hourfc)
# Map daily forecast
dailyfc = list()
if len(fc["daily"]) > 0:
for i in range(0, len(fc["daily"])):
dayfc = dfc.copy()
dyfc = fc["daily"][i]
dayfc["date"] = datetime.datetime.fromtimestamp(dyfc["dt"]).strftime("%Y-%m-%d")
dayfc["sunrise"] = datetime.datetime.fromtimestamp(dyfc["sunrise"]).strftime("%H:%M:%S")
dayfc["sunset"] = datetime.datetime.fromtimestamp(dyfc["sunset"]).strftime("%H:%M:%S")
dyfct = dyfc["temp"]
dayfc["temperature_m"] = dyfct["morn"]
dayfc["temperature_d"] = dyfct["day"]
dayfc["temperature_e"] = dyfct["eve"]
dayfc["temperature_n"] = dyfct["night"]
dayfc["temperature_min"] = dyfct["min"]
dayfc["temperature_max"] = dyfct["max"]
dayfc["humidity"] = dyfc["humidity"]
dayfc["pressure"] = dyfc["pressure"]
dayfc["windspeed"] = dyfc["wind_speed"]
dayfc["winddir"] = dyfc["wind_deg"]
dayfc["clouds"] = dyfc["clouds"]
dayfc["uvi"] = dyfc["uvi"]
dayfc["pop"] = dyfc["pop"]
if "rain" in dyfc:
dayfc["rain"] = dyfc["rain"]
if "snow" in dyfc:
dayfc["snow"] = dyfc["snow"]
if len(dyfc["weather"]) > 0:
w = dyfc["weather"][0]
dayfc["description"] = w["description"]
dayfc["icon"] = w["icon"]
dayfc["alerts"] = getAlerts(fc, dyfc["dt"])
dailyfc.append(dayfc)
return [curfc, hourlyfc, dailyfc]
def getAlerts(fc, dt):
"""
Count the number of alerts for a given date/time (dt)
"""
res = 0
if "alerts" in fc:
if len(fc["alerts"]) > 0:
for i in range(0, len(fc["alerts"])):
alert = fc["alerts"][i]
if dt >= alert["start"] and dt <= alert["end"]:
res = res + 1
return res
def forecastToDb(fcData, cfg, curTs, curDate, dbCon, dbCur, servRun):
"""
Store forecast data in database
"""
#
# Store current and hourly forecast
#
tblHourly = cfg["forecast"]["forecastTables"]["hourlyForecast"]
# Clean up current / hourly forecast
# Retain forecast for the next fcRetainHours hours
fcRetainHours = cfg["forecast"]["forecastRetain"]
t_lastTs = getLatestForecast(tblHourly, dbCon, dbCur, servRun)
if t_lastTs:
t_lastTs = t_lastTs + datetime.timedelta(minutes=1)
t_curTs = datetime.datetime.strptime(curTs, "%Y-%m-%d %H:%M:%S")
t_limTs = t_curTs + datetime.timedelta(hours=fcRetainHours)
if t_lastTs < t_limTs:
t_limTs = t_lastTs
limTs = t_limTs.strftime("%Y-%m-%d %H:%M:%S")
if limTs < curTs:
limTs = curTs
else:
limTs = curTs
forecastToDbHourlyCleanup(tblHourly, limTs, dbCon, dbCur, servRun)
# Insert Current forecast
curfc = fcData[0]
forecastToDbCurrent(curfc, tblHourly, dbCon, dbCur, servRun)
# Insert hourly forecast
hourfc = fcData[1]
if len(hourfc) > 0:
for i in range(0, len(hourfc)):
curfc = hourfc[i]
if curfc["timestamp"] >= limTs:
forecastToDbHourly(curfc, tblHourly, dbCon, dbCur, servRun)
elif (curfc["timestamp"] >= curTs) and (curTs < limTs):
forecastToDbCurrent(curfc, tblHourly, dbCon, dbCur, servRun)
#
# Store daily forecast
#
tblDaily = cfg["forecast"]["forecastTables"]["dailyForecast"]
# Clean up daily forecast
forecastToDbDailyCleanup(tblDaily, curDate, dbCon, dbCur, servRun)
# Insert daily forecast
dayfc = fcData[2]
if len(dayfc) > 0:
for i in range(0, len(dayfc)):
curfc = dayfc[i]
if curfc["date"] >= curDate:
forecastToDbDaily(curfc, tblDaily, dbCon, dbCur, servRun)
def getLatestForecast(tbl, dbCon, dbCur, servRun):
"""
Return the timestamp for the latest forecast.
"""
# Prepare statement
stmt = "SELECT timestamp FROM " + tbl + " ORDER BY TIMESTAMP DESC LIMIT 0,1"
logger.debug(stmt)
dbCur.execute(stmt)
res = None
for (timestamp) in dbCur:
res = timestamp[0]
return res
def forecastToDbHourlyCleanup(tbl, ts, dbCon, dbCur, servRun):
"""
Remove entries for later timestamps.
This is necessary in order to allow later insertion of forecast entries
"""
# Prepare statement
stmt = "DELETE FROM " + tbl + " WHERE timestamp >= '" + ts + "'"
logger.debug(stmt)
dbCur.execute(stmt)
dbCon.commit()
def forecastToDbDailyCleanup(tbl, curDate, dbCon, dbCur, servRun):
"""
Remove entries for later timestamps.
This is necessary in order to allow later insertion of forecast entries
"""
# Prepare statement
stmt = "DELETE FROM " + tbl + " WHERE date >= '" + curDate + "'"
logger.debug(stmt)
dbCur.execute(stmt)
dbCon.commit()
def forecastToDbCurrent(fc, tbl, dbCon, dbCur, servRun):
"""
Store current forecast data in database
"""
global logger
# Prepare statement
ins1 = "INSERT INTO " + tbl + " (timestamp"
ins2 = "VALUES ('" + fc["timestamp"] + "'"
ins3 = " ON DUPLICATE KEY UPDATE "
if fc["temperature"] != None:
ins1 = ins1 + ", temperature"
ins2 = ins2 + ", " + "{:+.1f}".format(fc["temperature"])
ins3 = ins3 + "temperature="
ins3 = ins3 + "{:+.1f}".format(fc["temperature"])
if fc["humidity"] != None:
ins1 = ins1 + ", humidity"
ins2 = ins2 + ", " + "{:+.1f}".format(fc["humidity"])
ins3 = ins3 + ", humidity="
ins3 = ins3 + "{:+.1f}".format(fc["humidity"])
if fc["pressure"] != None:
ins1 = ins1 + ", pressure"
ins2 = ins2 + ", " + "{:+.1f}".format(fc["pressure"])
ins3 = ins3 + ", pressure="
ins3 = ins3 + "{:+.1f}".format(fc["pressure"])
if fc["clouds"] != None:
ins1 = ins1 + ", clouds"
ins2 = ins2 + ", " + "{:+.1f}".format(fc["clouds"])
ins3 = ins3 + ", clouds="
ins3 = ins3 + "{:+.1f}".format(fc["clouds"])
if fc["uvi"] != None:
ins1 = ins1 + ", uvi"
ins2 = ins2 + ", " + "{:+.2f}".format(fc["uvi"])
ins3 = ins3 + ", uvi="
ins3 = ins3 + "{:+.2f}".format(fc["uvi"])
if fc["visibility"] != None:
ins1 = ins1 + ", visibility"
ins2 = ins2 + ", " + "{:+.1f}".format(fc["visibility"])
ins3 = ins3 + ", visibility="
ins3 = ins3 + "{:+.1f}".format(fc["visibility"])
if fc["windspeed"] != None:
ins1 = ins1 + ", windspeed"
ins2 = ins2 + ", " + "{:+.1f}".format(fc["windspeed"])
ins3 = ins3 + ", windspeed="
ins3 = ins3 + "{:+.1f}".format(fc["windspeed"])
if fc["winddir"] != None:
ins1 = ins1 + ", winddir"
ins2 = ins2 + ", " + "{:+.1f}".format(fc["winddir"])
ins3 = ins3 + ", winddir="
ins3 = ins3 + "{:+.1f}".format(fc["winddir"])
if fc["rain"] != None:
ins1 = ins1 + ", rain"
ins2 = ins2 + ", " + "{:+.2f}".format(fc["rain"])
ins3 = ins3 + ", rain="
ins3 = ins3 + "{:+.2f}".format(fc["rain"])
if fc["snow"] != None:
ins1 = ins1 + ", snow"
ins2 = ins2 + ", " + "{:+.2f}".format(fc["snow"])
ins3 = ins3 + ", snow="
ins3 = ins3 + "{:+.2f}".format(fc["snow"])
if fc["description"] != None:
ins1 = ins1 + ", description"
ins2 = ins2 + ", '" + fc["description"] + "'"
ins3 = ins3 + ", description="
ins3 = ins3 + "'" + fc["description"] + "'"
if fc["icon"] != None:
ins1 = ins1 + ", icon"
ins2 = ins2 + ", '" + fc["icon"] + "'"
ins3 = ins3 + ", icon="
ins3 = ins3 + "'" + fc["icon"] + "'"
if fc["alerts"] != None:
ins1 = ins1 + ", alerts"
ins2 = ins2 + ", " + "{}".format(fc["alerts"])
ins3 = ins3 + ", alerts="
ins3 = ins3 + "{}".format(fc["alerts"])
tnow = datetime.datetime.now()
ins1 = ins1 + ", time_cre"
ins2 = ins2 + ", '" + tnow.strftime("%Y-%m-%d %H:%M:%S") + "'"
ins1 = ins1 + ", time_mod"
ins2 = ins2 + ", '" + tnow.strftime("%Y-%m-%d %H:%M:%S") + "'"
ins3 = ins3 + ", time_mod="
ins3 = ins3 + "'" + tnow.strftime("%Y-%m-%d %H:%M:%S") + "'"
# Insert Current forecast
ins = ins1 + ") " + ins2 + ")" + ins3
logger.debug(ins)
dbCur.execute(ins)
dbCon.commit()
def forecastToDbHourly(fc, tbl, dbCon, dbCur, servRun):
"""
Store forecast data in database
"""
# Prepare statement
ins1 = "INSERT INTO " + tbl + " (timestamp"
ins2 = "VALUES ('" + fc["timestamp"] + "'"
if fc["temperature"] != None:
ins1 = ins1 + ", temperature"
ins2 = ins2 + ", " + "{:+.1f}".format(fc["temperature"])
ins1 = ins1 + ", temperature_hist"
ins2 = ins2 + ", " + "{:+.1f}".format(fc["temperature"])
if fc["humidity"] != None:
ins1 = ins1 + ", humidity"
ins2 = ins2 + ", " + "{:+.1f}".format(fc["humidity"])
ins1 = ins1 + ", humidity_hist"
ins2 = ins2 + ", " + "{:+.1f}".format(fc["humidity"])
if fc["pressure"] != None:
ins1 = ins1 + ", pressure"
ins2 = ins2 + ", " + "{:+.1f}".format(fc["pressure"])
ins1 = ins1 + ", pressure_hist"
ins2 = ins2 + ", " + "{:+.1f}".format(fc["pressure"])
if fc["clouds"] != None:
ins1 = ins1 + ", clouds"
ins2 = ins2 + ", " + "{:+.1f}".format(fc["clouds"])
if fc["uvi"] != None:
ins1 = ins1 + ", uvi"
ins2 = ins2 + ", " + "{:+.2f}".format(fc["uvi"])
if fc["visibility"] != None:
ins1 = ins1 + ", visibility"
ins2 = ins2 + ", " + "{:+.1f}".format(fc["visibility"])
if fc["windspeed"] != None:
ins1 = ins1 + ", windspeed"
ins2 = ins2 + ", " + "{:+.1f}".format(fc["windspeed"])
if fc["winddir"] != None:
ins1 = ins1 + ", winddir"
ins2 = ins2 + ", " + "{:+.1f}".format(fc["winddir"])
if fc["rain"] != None:
ins1 = ins1 + ", rain"
ins2 = ins2 + ", " + "{:+.2f}".format(fc["rain"])
if fc["snow"] != None:
ins1 = ins1 + ", snow"
ins2 = ins2 + ", " + "{:+.2f}".format(fc["snow"])
if fc["description"] != None:
ins1 = ins1 + ", description"
ins2 = ins2 + ", '" + fc["description"] + "'"
if fc["icon"] != None:
ins1 = ins1 + ", icon"
ins2 = ins2 + ", '" + fc["icon"] + "'"
if fc["alerts"] != None:
ins1 = ins1 + ", alerts"
ins2 = ins2 + ", " + "{}".format(fc["alerts"])
tnow = datetime.datetime.now()
ins1 = ins1 + ", time_cre"
ins2 = ins2 + ", '" + tnow.strftime("%Y-%m-%d %H:%M:%S") + "'"
ins1 = ins1 + ", time_mod"
ins2 = ins2 + ", '" + tnow.strftime("%Y-%m-%d %H:%M:%S") + "'"
# Insert Current forecast
ins = ins1 + ") " + ins2 + ")"
logger.debug(ins)
dbCur.execute(ins)
dbCon.commit()
def forecastToDbDaily(fc, tbl, dbCon, dbCur, servRun):
"""
Store forecast data in database
"""
# Prepare statement
ins1 = "INSERT INTO " + tbl + " (date"
ins2 = "VALUES ('" + fc["date"] + "'"
if fc["sunrise"] != None:
ins1 = ins1 + ", sunrise"
ins2 = ins2 + ", '" + fc["sunrise"] + "'"
if fc["sunset"] != None:
ins1 = ins1 + ", sunset"
ins2 = ins2 + ", '" + fc["sunset"] + "'"
if fc["temperature_m"] != None:
ins1 = ins1 + ", temperature_m"
ins2 = ins2 + ", " + "{:+.1f}".format(fc["temperature_m"])
if fc["temperature_d"] != None:
ins1 = ins1 + ", temperature_d"
ins2 = ins2 + ", " + "{:+.1f}".format(fc["temperature_d"])
if fc["temperature_e"] != None:
ins1 = ins1 + ", temperature_e"
ins2 = ins2 + ", " + "{:+.1f}".format(fc["temperature_e"])
if fc["temperature_n"] != None:
ins1 = ins1 + ", temperature_n"
ins2 = ins2 + ", " + "{:+.1f}".format(fc["temperature_n"])
if fc["temperature_min"] != None:
ins1 = ins1 + ", temperature_min"
ins2 = ins2 + ", " + "{:+.1f}".format(fc["temperature_min"])
if fc["temperature_max"] != None:
ins1 = ins1 + ", temperature_max"
ins2 = ins2 + ", " + "{:+.1f}".format(fc["temperature_max"])
if fc["humidity"] != None:
ins1 = ins1 + ", humidity"
ins2 = ins2 + ", " + "{:+.1f}".format(fc["humidity"])
if fc["pressure"] != None:
ins1 = ins1 + ", pressure"
ins2 = ins2 + ", " + "{:+.1f}".format(fc["pressure"])
if fc["windspeed"] != None:
ins1 = ins1 + ", windspeed"
ins2 = ins2 + ", " + "{:+.1f}".format(fc["windspeed"])
if fc["winddir"] != None:
ins1 = ins1 + ", winddir"
ins2 = ins2 + ", " + "{:+.1f}".format(fc["winddir"])
if fc["clouds"] != None:
ins1 = ins1 + ", clouds"
ins2 = ins2 + ", " + "{:+.1f}".format(fc["clouds"])
if fc["uvi"] != None:
ins1 = ins1 + ", uvi"
ins2 = ins2 + ", " + "{:+.2f}".format(fc["uvi"])
if fc["pop"] != None:
ins1 = ins1 + ", pop"
ins2 = ins2 + ", " + "{:+.2f}".format(fc["pop"])
if fc["rain"] != None:
ins1 = ins1 + ", rain"
ins2 = ins2 + ", " + "{:+.2f}".format(fc["rain"])
if fc["snow"] != None:
ins1 = ins1 + ", snow"
ins2 = ins2 + ", " + "{:+.2f}".format(fc["snow"])
if fc["description"] != None:
ins1 = ins1 + ", description"
ins2 = ins2 + ", '" + fc["description"] + "'"
if fc["icon"] != None:
ins1 = ins1 + ", icon"
ins2 = ins2 + ", '" + fc["icon"] + "'"
if fc["alerts"] != None:
ins1 = ins1 + ", alerts"
ins2 = ins2 + ", " + "{}".format(fc["alerts"])
# Insert Current forecast
ins = ins1 + ") " + ins2 + ")"
logger.debug(ins)
dbCur.execute(ins)
dbCon.commit()
def alertsToDb(fc, cfg, dbCon, dbCur, servRun):
"""
Store alerts in database
"""
tbl = cfg["forecast"]["forecastTables"]["alertsForecast"]
if "alerts" in fc:
if len(fc["alerts"]) > 0:
for i in range(0, len(fc["alerts"])):
alert = fc["alerts"][i]
# Prepare statement
ins1 = "INSERT INTO " + tbl + " ("
ins2 = "VALUES ("
ins3 = " ON DUPLICATE KEY UPDATE "
ins1 = ins1 + "start"
ins2 = ins2 + "'" + datetime.datetime.fromtimestamp(alert["start"]).strftime("%Y-%m-%d %H:%M:%S") + "'"
ins1 = ins1 + ", end"
ins2 = ins2 + ", '" + datetime.datetime.fromtimestamp(alert["end"]).strftime("%Y-%m-%d %H:%M:%S") + "'"
ins1 = ins1 + ", event"
ins2 = ins2 + ", '" + alert["event"] + "'"
ins1 = ins1 + ", sender_name"
ins2 = ins2 + ", '" + alert["sender_name"] + "'"
ins1 = ins1 + ", description"
ins2 = ins2 + ", '" + alert["description"] + "'"
ins3 = ins3 + "description='" + alert["description"] + "'"
# Insert Current forecast
ins = ins1 + ") " + ins2 + ")" + ins3
logger.debug(ins)
dbCur.execute(ins)
dbCon.commit()
def forecastToFile(fc, cfg, curTs, fil, servRun):
"""
Store forecast data in database
"""
fil.write('{')
fil.write('"time": "' + curTs + '",')
fil.write('"data":')
fil.write(json.dumps(fc))
fil.write('}')
def handleForecast(cfg, curTs, curDate, curTime, dbCon, dbCur, fil, servRun):
"""
Handle forecast according to given configuration
Input:
- cfg : Configuration dictionary for weatherstation
- curTS : Measurement timestamp
- curDate: Measurement Date
- curTime: Measurement Time
- dbCon : Database connection
- dbCur : Database cursor
- fil : file handler for outpot file
- servRun: True for service run
"""
# Get the forecast
url = cfg["forecast"]["source"]["url"]
payload = cfg["forecast"]["source"]["payload"]
fc = getForecast(url, payload)
if fc:
# Output to file
if cfg["forecast"]["forecastFileOut"]:
forecastToFile(fc, cfg, curTs, fil, servRun)
# Map forecast
fcData = mapForecast(fc, curTs)
# Store in database
if cfg["forecast"]["forecastDbOut"]:
forecastToDb(fcData, cfg, curTs, curDate, dbCon, dbCur, servRun)
# Store alerts
if cfg["forecast"]["forecastDbOut"]:
alertsToDb(fc, cfg, dbCon, dbCur, servRun) | [
"logging.NullHandler",
"datetime.datetime.fromtimestamp",
"datetime.datetime.strptime",
"json.dumps",
"requests.get",
"datetime.datetime.now",
"logging_plus.getLogger",
"datetime.timedelta"
] | [((219, 251), 'logging_plus.getLogger', 'logging_plus.getLogger', (['__name__'], {}), '(__name__)\n', (241, 251), False, 'import logging_plus\n'), ((270, 291), 'logging.NullHandler', 'logging.NullHandler', ([], {}), '()\n', (289, 291), False, 'import logging\n'), ((1317, 1350), 'requests.get', 'requests.get', (['url'], {'params': 'payload'}), '(url, params=payload)\n', (1329, 1350), False, 'import requests\n'), ((11881, 11904), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (11902, 11904), False, 'import datetime\n'), ((14488, 14511), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (14509, 14511), False, 'import datetime\n'), ((6358, 6412), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['curTs', '"""%Y-%m-%d %H:%M:%S"""'], {}), "(curTs, '%Y-%m-%d %H:%M:%S')\n", (6384, 6412), False, 'import datetime\n'), ((19333, 19347), 'json.dumps', 'json.dumps', (['fc'], {}), '(fc)\n', (19343, 19347), False, 'import json\n'), ((6309, 6338), 'datetime.timedelta', 'datetime.timedelta', ([], {'minutes': '(1)'}), '(minutes=1)\n', (6327, 6338), False, 'import datetime\n'), ((6442, 6481), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': 'fcRetainHours'}), '(hours=fcRetainHours)\n', (6460, 6481), False, 'import datetime\n'), ((2923, 2965), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (["hfc['dt']"], {}), "(hfc['dt'])\n", (2954, 2965), False, 'import datetime\n'), ((4034, 4077), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (["dyfc['dt']"], {}), "(dyfc['dt'])\n", (4065, 4077), False, 'import datetime\n'), ((4130, 4178), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (["dyfc['sunrise']"], {}), "(dyfc['sunrise'])\n", (4161, 4178), False, 'import datetime\n'), ((4230, 4277), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (["dyfc['sunset']"], {}), "(dyfc['sunset'])\n", (4261, 4277), False, 'import datetime\n'), ((18290, 18337), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (["alert['start']"], {}), "(alert['start'])\n", (18321, 18337), False, 'import datetime\n'), ((18451, 18496), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (["alert['end']"], {}), "(alert['end'])\n", (18482, 18496), False, 'import datetime\n')] |
"""
This is the setup module for the example project.
Based on:
- https://packaging.python.org/distributing/
- https://github.com/pypa/sampleproject/blob/master/setup.py
- https://blog.ionelmc.ro/2014/05/25/python-packaging/#the-structure
"""
# Standard Python Libraries
from glob import glob
from os.path import basename, splitext
# Third-Party Libraries
from setuptools import setup
# Author details
author = ("Cyber and Infrastructure Security Agency",)
author_email = "<EMAIL>"
def readme():
"""Read in and return the contents of the project's README.md file."""
with open("README.md", encoding="utf-8") as f:
return f.read()
def package_vars(version_file):
"""Read in and return the variables defined by the version_file."""
pkg_vars = {}
with open(version_file) as f:
exec(f.read(), pkg_vars) # nosec
return pkg_vars
setup(
name="pulse-check-logs-tools",
# Versions should comply with PEP440
version="1.0.0",
description="Check logs for attempts at CVE-2019-11510",
long_description=readme(),
long_description_content_type="text/markdown",
# HIRT "homepage"
url="https://www.cisa.gov/cyber-incident-response",
# The project's main homepage
download_url="https://github.com/cisagov/check-your-pulse",
# Author details
author=author,
author_email=author_email,
license="License :: CC0 1.0 Universal (CC0 1.0) Public Domain Dedication",
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
"Development Status :: 4 - Production/Stable",
# Indicate who your project is intended for
"Intended Audience :: System Administrators",
# Pick your license as you wish (should match "license" above)
"License :: CC0 1.0 Universal (CC0 1.0) Public Domain Dedication",
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
# What does your project relate to?
keywords="skeleton",
py_modules=[splitext(basename(path))[0] for path in glob("src/*.py")],
include_package_data=True,
install_requires=[],
python_requires=">=3.6",
)
| [
"os.path.basename",
"glob.glob"
] | [((2448, 2464), 'glob.glob', 'glob', (['"""src/*.py"""'], {}), "('src/*.py')\n", (2452, 2464), False, 'from glob import glob\n'), ((2417, 2431), 'os.path.basename', 'basename', (['path'], {}), '(path)\n', (2425, 2431), False, 'from os.path import basename, splitext\n')] |
"""
Data freshness:
--------------
Calculate freshness for all datasets in HDX.
"""
import datetime
import logging
import re
from parser import ParserError
from urllib.parse import urlparse
from dateutil import parser
from hdx.data.dataset import Dataset
from hdx.data.hdxobject import HDXError
from hdx.data.resource import Resource
from hdx.hdx_configuration import Configuration
from hdx.utilities.dictandlist import dict_of_lists_add, list_distribute_contents
from sqlalchemy import and_, exists
from sqlalchemy.orm.exc import NoResultFound
from hdx.freshness.database.dbdataset import DBDataset
from hdx.freshness.database.dbinfodataset import DBInfoDataset
from hdx.freshness.database.dborganization import DBOrganization
from hdx.freshness.database.dbresource import DBResource
from hdx.freshness.database.dbrun import DBRun
from hdx.freshness.retrieval import retrieve
from hdx.freshness.testdata.serialize import (
serialize_datasets,
serialize_hashresults,
serialize_now,
serialize_results,
)
logger = logging.getLogger(__name__)
default_no_urls_to_check = 1000
class DataFreshness:
bracketed_date = re.compile(r"\((.*)\)")
def __init__(
self, session=None, testsession=None, datasets=None, now=None, do_touch=False
):
""""""
self.session = session
self.urls_to_check_count = 0
self.never_update = 0
self.live_update = 0
self.adhoc_update = 0
self.dataset_what_updated = dict()
self.resource_what_updated = dict()
self.resource_last_modified_count = 0
self.do_touch = do_touch
self.url_internal = "data.humdata.org"
self.aging = dict()
for key, value in Configuration.read()["aging"].items():
period = int(key)
aging_period = dict()
for status in value:
nodays = value[status]
aging_period[status] = datetime.timedelta(days=nodays)
self.aging[period] = aging_period
self.aging_statuses = {
0: "0: Fresh",
1: "1: Due",
2: "2: Overdue",
3: "3: Delinquent",
None: "Freshness Unavailable",
}
self.testsession = testsession
if datasets is None: # pragma: no cover
Configuration.read().set_read_only(
True
) # so that we only get public datasets
logger.info("Retrieving all datasets from HDX")
self.datasets = Dataset.get_all_datasets()
Configuration.read().set_read_only(False)
if self.testsession:
serialize_datasets(self.testsession, self.datasets)
else:
self.datasets = datasets
if now is None: # pragma: no cover
self.now = datetime.datetime.utcnow()
if self.testsession:
serialize_now(self.testsession, self.now)
else:
self.now = now
self.previous_run_number = (
self.session.query(DBRun.run_number)
.distinct()
.order_by(DBRun.run_number.desc())
.first()
)
if self.previous_run_number is not None:
self.previous_run_number = self.previous_run_number[0]
self.run_number = self.previous_run_number + 1
no_resources = self.no_resources_force_hash()
if no_resources:
self.no_urls_to_check = no_resources
else:
self.no_urls_to_check = default_no_urls_to_check
else:
self.previous_run_number = None
self.run_number = 0
self.no_urls_to_check = default_no_urls_to_check
logger.info(f"Will force hash {self.no_urls_to_check} resources")
def no_resources_force_hash(self):
columns = [DBResource.id, DBDataset.updated_by_script]
filters = [
DBResource.dataset_id == DBDataset.id,
DBResource.run_number == self.previous_run_number,
DBDataset.run_number == self.previous_run_number,
DBResource.url.notlike(f"%{self.url_internal}%"),
]
query = self.session.query(*columns).filter(and_(*filters))
noscriptupdate = 0
noresources = 0
for result in query:
updated_by_script = result[1]
if updated_by_script is not None:
noscriptupdate += 1
continue
noresources += 1
if noscriptupdate == 0:
return None
return noresources
def spread_datasets(self):
self.datasets = list_distribute_contents(
self.datasets, lambda x: x["organization"]["name"]
)
def add_new_run(self):
dbrun = DBRun(run_number=self.run_number, run_date=self.now)
self.session.add(dbrun)
self.session.commit()
@staticmethod
def internal_what_updated(dbresource, url_substr):
what_updated = f"{url_substr}-{dbresource.what_updated}"
dbresource.what_updated = what_updated
def process_resources(
self,
dataset_id,
previous_dbdataset,
resources,
updated_by_script,
hash_ids=None,
):
last_resource_updated = None
last_resource_modified = None
dataset_resources = list()
for resource in resources:
resource_id = resource["id"]
dict_of_lists_add(self.resource_what_updated, "total", resource_id)
url = resource["url"]
name = resource["name"]
metadata_modified = parser.parse(
resource["metadata_modified"], ignoretz=True
)
last_modified = parser.parse(resource["last_modified"], ignoretz=True)
if last_resource_modified:
if last_modified > last_resource_modified:
last_resource_updated = resource_id
last_resource_modified = last_modified
else:
last_resource_updated = resource_id
last_resource_modified = last_modified
dbresource = DBResource(
run_number=self.run_number,
id=resource_id,
name=name,
dataset_id=dataset_id,
url=url,
last_modified=last_modified,
metadata_modified=metadata_modified,
latest_of_modifieds=last_modified,
what_updated="firstrun",
)
if previous_dbdataset is not None:
try:
previous_dbresource = (
self.session.query(DBResource)
.filter_by(
id=resource_id, run_number=previous_dbdataset.run_number
)
.one()
)
if last_modified > previous_dbresource.last_modified:
dbresource.what_updated = "filestore"
else:
dbresource.last_modified = previous_dbresource.last_modified
dbresource.what_updated = "nothing"
if last_modified <= previous_dbresource.latest_of_modifieds:
dbresource.latest_of_modifieds = (
previous_dbresource.latest_of_modifieds
)
dbresource.http_last_modified = (
previous_dbresource.http_last_modified
)
dbresource.md5_hash = previous_dbresource.md5_hash
dbresource.hash_last_modified = (
previous_dbresource.hash_last_modified
)
dbresource.when_checked = previous_dbresource.when_checked
except NoResultFound:
pass
self.session.add(dbresource)
if updated_by_script:
dict_of_lists_add(
self.resource_what_updated, dbresource.what_updated, resource_id
)
continue
if self.url_internal in url:
self.internal_what_updated(dbresource, "internal")
dict_of_lists_add(
self.resource_what_updated, dbresource.what_updated, resource_id
)
continue
if hash_ids:
should_hash = resource_id in hash_ids
else:
should_hash = self.urls_to_check_count < self.no_urls_to_check and (
dbresource.when_checked is None
or self.now - dbresource.when_checked > datetime.timedelta(days=30)
)
resource_format = resource["format"].lower()
dataset_resources.append(
(
url,
resource_id,
resource_format,
dbresource.what_updated,
should_hash,
)
)
return dataset_resources, last_resource_updated, last_resource_modified
def process_datasets(self, hash_ids=None):
resources_to_check = list()
datasets_to_check = dict()
logger.info("Processing datasets")
for dataset in self.datasets:
resources = dataset.get_resources()
if dataset.is_requestable(): # ignore requestable
continue
dataset_id = dataset["id"]
dict_of_lists_add(self.dataset_what_updated, "total", dataset_id)
organization_id = dataset["organization"]["id"]
organization_name = dataset["organization"]["name"]
organization_title = dataset["organization"]["title"]
try:
dborganization = (
self.session.query(DBOrganization)
.filter_by(id=organization_id)
.one()
)
dborganization.name = organization_name
dborganization.title = organization_title
except NoResultFound:
dborganization = DBOrganization(
name=organization_name, id=organization_id, title=organization_title
)
self.session.add(dborganization)
dataset_name = dataset["name"]
dataset_title = dataset["title"]
dataset_private = dataset["private"]
dataset_maintainer = dataset["maintainer"]
dataset_location = ",".join([x["name"] for x in dataset["groups"]])
try:
dbinfodataset = (
self.session.query(DBInfoDataset).filter_by(id=dataset_id).one()
)
dbinfodataset.name = dataset_name
dbinfodataset.title = dataset_title
dbinfodataset.private = dataset_private
dbinfodataset.organization_id = organization_id
dbinfodataset.maintainer = dataset_maintainer
dbinfodataset.location = dataset_location
except NoResultFound:
dbinfodataset = DBInfoDataset(
name=dataset_name,
id=dataset_id,
title=dataset_title,
private=dataset_private,
organization_id=organization_id,
maintainer=dataset_maintainer,
location=dataset_location,
)
self.session.add(dbinfodataset)
try:
previous_dbdataset = (
self.session.query(DBDataset)
.filter_by(run_number=self.previous_run_number, id=dataset_id)
.one()
)
except NoResultFound:
previous_dbdataset = None
update_frequency = dataset.get("data_update_frequency")
updated_by_script = None
if update_frequency is not None:
update_frequency = int(update_frequency)
updated_by_script = dataset.get("updated_by_script")
if updated_by_script:
if "freshness_ignore" in updated_by_script:
updated_by_script = None
else:
match = self.bracketed_date.search(updated_by_script)
if match is None:
updated_by_script = None
else:
try:
updated_by_script = parser.parse(
match.group(1), ignoretz=True
)
except ParserError:
updated_by_script = None
(
dataset_resources,
last_resource_updated,
last_resource_modified,
) = self.process_resources(
dataset_id,
previous_dbdataset,
resources,
updated_by_script,
hash_ids=hash_ids,
)
dataset_date = dataset.get("dataset_date")
metadata_modified = parser.parse(
dataset["metadata_modified"], ignoretz=True
)
if "last_modified" in dataset:
last_modified = parser.parse(dataset["last_modified"], ignoretz=True)
else:
last_modified = datetime.datetime(1970, 1, 1, 0, 0)
if len(resources) == 0 and last_resource_updated is None:
last_resource_updated = "NO RESOURCES"
last_resource_modified = datetime.datetime(1970, 1, 1, 0, 0)
error = True
what_updated = "no resources"
else:
error = False
what_updated = "firstrun"
review_date = dataset.get("review_date")
if review_date is None:
latest_of_modifieds = last_modified
else:
review_date = parser.parse(review_date, ignoretz=True)
if review_date > last_modified:
latest_of_modifieds = review_date
else:
latest_of_modifieds = last_modified
if updated_by_script and updated_by_script > latest_of_modifieds:
latest_of_modifieds = updated_by_script
fresh = None
if update_frequency is not None and not error:
if update_frequency == 0:
fresh = 0
self.live_update += 1
elif update_frequency == -1:
fresh = 0
self.never_update += 1
elif update_frequency == -2:
fresh = 0
self.adhoc_update += 1
else:
fresh = self.calculate_aging(latest_of_modifieds, update_frequency)
dbdataset = DBDataset(
run_number=self.run_number,
id=dataset_id,
dataset_date=dataset_date,
update_frequency=update_frequency,
review_date=review_date,
last_modified=last_modified,
metadata_modified=metadata_modified,
updated_by_script=updated_by_script,
latest_of_modifieds=latest_of_modifieds,
what_updated=what_updated,
last_resource_updated=last_resource_updated,
last_resource_modified=last_resource_modified,
fresh=fresh,
error=error,
)
if previous_dbdataset is not None and not error:
dbdataset.what_updated = self.add_what_updated(
dbdataset.what_updated, "nothing"
)
if (
last_modified > previous_dbdataset.last_modified
): # filestore update would cause this
dbdataset.what_updated = self.add_what_updated(
dbdataset.what_updated, "filestore"
)
else:
dbdataset.last_modified = previous_dbdataset.last_modified
if previous_dbdataset.review_date is None:
if review_date is not None:
dbdataset.what_updated = self.add_what_updated(
dbdataset.what_updated, "review date"
)
else:
if (
review_date is not None
and review_date > previous_dbdataset.review_date
): # someone clicked the review button
dbdataset.what_updated = self.add_what_updated(
dbdataset.what_updated, "review date"
)
else:
dbdataset.review_date = previous_dbdataset.review_date
if updated_by_script and (
previous_dbdataset.updated_by_script is None
or updated_by_script > previous_dbdataset.updated_by_script
): # new script update of datasets
dbdataset.what_updated = self.add_what_updated(
dbdataset.what_updated, "script update"
)
else:
dbdataset.updated_by_script = previous_dbdataset.updated_by_script
if last_resource_modified <= previous_dbdataset.last_resource_modified:
# we keep this so that although we don't normally use it,
# we retain the ability to run without touching CKAN
dbdataset.last_resource_updated = (
previous_dbdataset.last_resource_updated
)
dbdataset.last_resource_modified = (
previous_dbdataset.last_resource_modified
)
if latest_of_modifieds < previous_dbdataset.latest_of_modifieds:
dbdataset.latest_of_modifieds = (
previous_dbdataset.latest_of_modifieds
)
if update_frequency is not None and update_frequency > 0:
fresh = self.calculate_aging(
previous_dbdataset.latest_of_modifieds, update_frequency
)
dbdataset.fresh = fresh
self.session.add(dbdataset)
update_string = (
f"{self.aging_statuses[fresh]}, Updated {dbdataset.what_updated}"
)
anyresourcestohash = False
for (
url,
resource_id,
resource_format,
what_updated,
should_hash,
) in dataset_resources:
if not should_hash:
if (
fresh == 0 and update_frequency != 1
) or update_frequency is None:
dict_of_lists_add(
self.resource_what_updated, what_updated, resource_id
)
continue
resources_to_check.append(
(url, resource_id, resource_format, what_updated)
)
self.urls_to_check_count += 1
anyresourcestohash = True
if anyresourcestohash:
datasets_to_check[dataset_id] = update_string
else:
dict_of_lists_add(self.dataset_what_updated, update_string, dataset_id)
self.session.commit()
return datasets_to_check, resources_to_check
def check_urls(
self, resources_to_check, user_agent, results=None, hash_results=None
):
def get_domain(x):
return urlparse(x[0]).netloc
if results is None: # pragma: no cover
resources_to_check = list_distribute_contents(
resources_to_check, get_domain
)
results = retrieve(resources_to_check, user_agent)
if self.testsession:
serialize_results(self.testsession, results)
hash_check = list()
for resource_id in results:
url, resource_format, err, http_last_modified, hash = results[resource_id]
if hash:
dbresource = (
self.session.query(DBResource)
.filter_by(id=resource_id, run_number=self.run_number)
.one()
)
if dbresource.md5_hash != hash: # File changed
hash_check.append((url, resource_id, resource_format))
if hash_results is None: # pragma: no cover
hash_check = list_distribute_contents(hash_check, get_domain)
hash_results = retrieve(hash_check, user_agent)
if self.testsession:
serialize_hashresults(self.testsession, hash_results)
return results, hash_results
def process_results(self, results, hash_results, resourcecls=Resource):
datasets_latest_of_modifieds = dict()
for resource_id in sorted(results):
url, _, err, http_last_modified, hash = results[resource_id]
dbresource = (
self.session.query(DBResource)
.filter_by(id=resource_id, run_number=self.run_number)
.one()
)
dataset_id = dbresource.dataset_id
datasetinfo = datasets_latest_of_modifieds.get(dataset_id, dict())
what_updated = dbresource.what_updated
update_last_modified = False
if http_last_modified:
if (
dbresource.http_last_modified is None
or http_last_modified > dbresource.http_last_modified
):
dbresource.http_last_modified = http_last_modified
if hash:
dbresource.when_checked = self.now
if dbresource.md5_hash == hash: # File unchanged
what_updated = self.add_what_updated(what_updated, "same hash")
else: # File updated
hash_to_set = hash
(
hash_url,
_,
hash_err,
hash_http_last_modified,
hash_hash,
) = hash_results[resource_id]
if hash_http_last_modified:
if (
dbresource.http_last_modified is None
or hash_http_last_modified > dbresource.http_last_modified
):
dbresource.http_last_modified = hash_http_last_modified
if hash_hash:
if hash_hash == hash:
if (
dbresource.md5_hash is None
): # First occurrence of resource eg. first run - don't use hash
# for last modified field (and hence freshness calculation)
dbresource.what_updated = self.add_what_updated(
what_updated, "first hash"
)
what_updated = dbresource.what_updated
else:
# Check if hash has occurred before
# select distinct md5_hash from dbresources where id = '714ef7b5-a303-4e4f-be2f-03b2ce2933c7' and md5_hash='2f3cd6a6fce5ad4d7001780846ad87a7';
if self.session.query(
exists().where(
and_(
DBResource.id == resource_id,
DBResource.md5_hash == hash,
)
)
).scalar():
dbresource.what_updated = self.add_what_updated(
what_updated, "repeat hash"
)
what_updated = dbresource.what_updated
else:
what_updated, _ = self.set_latest_of_modifieds(
dbresource, self.now, "hash"
)
dbresource.hash_last_modified = self.now
update_last_modified = True
dbresource.api = False
else:
hash_to_set = hash_hash
what_updated = self.add_what_updated(what_updated, "api")
dbresource.api = True
if hash_err:
what_updated = self.add_what_updated(what_updated, "error")
dbresource.error = hash_err
dbresource.md5_hash = hash_to_set
if err:
dbresource.when_checked = self.now
what_updated = self.add_what_updated(what_updated, "error")
dbresource.error = err
datasetinfo[resource_id] = (
dbresource.error,
dbresource.latest_of_modifieds,
dbresource.what_updated,
)
datasets_latest_of_modifieds[dataset_id] = datasetinfo
dict_of_lists_add(self.resource_what_updated, what_updated, resource_id)
if update_last_modified and self.do_touch:
try:
logger.info(f"Updating last modified for resource {resource_id}")
resource = resourcecls.read_from_hdx(resource_id)
if resource:
last_modified = parser.parse(resource["last_modified"])
dbdataset = (
self.session.query(DBDataset)
.filter_by(id=dataset_id, run_number=self.run_number)
.one()
)
update_frequency = dbdataset.update_frequency
if update_frequency > 0:
if (
self.calculate_aging(last_modified, update_frequency)
== 0
):
dotouch = False
else:
dotouch = True
else:
dotouch = True
if dotouch:
self.resource_last_modified_count += 1
logger.info(
f"Resource last modified count: {self.resource_last_modified_count}"
)
resource[
"last_modified"
] = dbresource.latest_of_modifieds.isoformat()
resource.update_in_hdx(
operation="patch",
batch_mode="KEEP_OLD",
skip_validation=True,
ignore_check=True,
)
else:
logger.info(
f"Didn't update last modified for resource {resource_id} as it is fresh!"
)
else:
logger.error(
f"Last modified update failed for id {resource_id}! Resource does not exist."
)
except HDXError:
logger.exception(
f"Last modified update failed for id {resource_id}!"
)
self.session.commit()
return datasets_latest_of_modifieds
def update_dataset_latest_of_modifieds(
self, datasets_to_check, datasets_latest_of_modifieds
):
for dataset_id in datasets_latest_of_modifieds:
dbdataset = (
self.session.query(DBDataset)
.filter_by(id=dataset_id, run_number=self.run_number)
.one()
)
dataset = datasets_latest_of_modifieds[dataset_id]
dataset_latest_of_modifieds = dbdataset.latest_of_modifieds
dataset_what_updated = dbdataset.what_updated
last_resource_modified = dbdataset.last_resource_modified
last_resource_updated = dbdataset.last_resource_updated
all_errors = True
for resource_id in sorted(dataset):
(
err,
new_last_resource_modified,
new_last_resource_what_updated,
) = dataset[resource_id]
if not err:
all_errors = False
if new_last_resource_modified:
if new_last_resource_modified > last_resource_modified:
last_resource_updated = resource_id
last_resource_modified = new_last_resource_modified
if new_last_resource_modified > dataset_latest_of_modifieds:
dataset_latest_of_modifieds = new_last_resource_modified
dataset_what_updated = new_last_resource_what_updated
dbdataset.last_resource_updated = last_resource_updated
dbdataset.last_resource_modified = last_resource_modified
self.set_latest_of_modifieds(
dbdataset, dataset_latest_of_modifieds, dataset_what_updated
)
update_frequency = dbdataset.update_frequency
if update_frequency is not None and update_frequency > 0:
dbdataset.fresh = self.calculate_aging(
dbdataset.latest_of_modifieds, update_frequency
)
dbdataset.error = all_errors
status = f"{self.aging_statuses[dbdataset.fresh]}, Updated {dbdataset.what_updated}"
if all_errors:
status = f"{status},error"
dict_of_lists_add(self.dataset_what_updated, status, dataset_id)
self.session.commit()
for dataset_id in datasets_to_check:
if dataset_id in datasets_latest_of_modifieds:
continue
dict_of_lists_add(
self.dataset_what_updated, datasets_to_check[dataset_id], dataset_id
)
def output_counts(self):
def add_what_updated_str(hdxobject_what_updated):
nonlocal output_str
output_str += f'\n* total: {len(hdxobject_what_updated["total"])} *'
for countstr in sorted(hdxobject_what_updated):
if countstr != "total":
output_str += (
f",\n{countstr}: {len(hdxobject_what_updated[countstr])}"
)
output_str = "\n*** Resources ***"
add_what_updated_str(self.resource_what_updated)
output_str += "\n\n*** Datasets ***"
add_what_updated_str(self.dataset_what_updated)
output_str += f"\n\n{self.live_update} datasets have update frequency of Live"
output_str += f"\n{self.never_update} datasets have update frequency of Never"
output_str += f"\n{self.adhoc_update} datasets have update frequency of Adhoc"
logger.info(output_str)
return output_str
@staticmethod
def set_latest_of_modifieds(dbobject, modified_date, what_updated):
if modified_date > dbobject.latest_of_modifieds:
dbobject.latest_of_modifieds = modified_date
dbobject.what_updated = DataFreshness.add_what_updated(
dbobject.what_updated, what_updated
)
update = True
else:
update = False
return dbobject.what_updated, update
@staticmethod
def add_what_updated(prev_what_updated, what_updated):
if what_updated in prev_what_updated:
return prev_what_updated
if prev_what_updated != "nothing" and prev_what_updated != "firstrun":
if what_updated != "nothing":
return f"{prev_what_updated},{what_updated}"
return prev_what_updated
else:
return what_updated
def calculate_aging(self, last_modified, update_frequency):
delta = self.now - last_modified
if delta >= self.aging[update_frequency]["Delinquent"]:
return 3
elif delta >= self.aging[update_frequency]["Overdue"]:
return 2
elif delta >= self.aging[update_frequency]["Due"]:
return 1
return 0
| [
"logging.getLogger",
"hdx.freshness.testdata.serialize.serialize_hashresults",
"re.compile",
"sqlalchemy.exists",
"hdx.freshness.testdata.serialize.serialize_now",
"datetime.timedelta",
"sqlalchemy.and_",
"datetime.datetime",
"hdx.freshness.database.dbrun.DBRun.run_number.desc",
"hdx.freshness.dat... | [((1033, 1060), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1050, 1060), False, 'import logging\n'), ((1138, 1162), 're.compile', 're.compile', (['"""\\\\((.*)\\\\)"""'], {}), "('\\\\((.*)\\\\)')\n", (1148, 1162), False, 'import re\n'), ((4610, 4686), 'hdx.utilities.dictandlist.list_distribute_contents', 'list_distribute_contents', (['self.datasets', "(lambda x: x['organization']['name'])"], {}), "(self.datasets, lambda x: x['organization']['name'])\n", (4634, 4686), False, 'from hdx.utilities.dictandlist import dict_of_lists_add, list_distribute_contents\n'), ((4753, 4805), 'hdx.freshness.database.dbrun.DBRun', 'DBRun', ([], {'run_number': 'self.run_number', 'run_date': 'self.now'}), '(run_number=self.run_number, run_date=self.now)\n', (4758, 4805), False, 'from hdx.freshness.database.dbrun import DBRun\n'), ((2503, 2529), 'hdx.data.dataset.Dataset.get_all_datasets', 'Dataset.get_all_datasets', ([], {}), '()\n', (2527, 2529), False, 'from hdx.data.dataset import Dataset\n'), ((2803, 2829), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (2827, 2829), False, 'import datetime\n'), ((4085, 4133), 'hdx.freshness.database.dbresource.DBResource.url.notlike', 'DBResource.url.notlike', (['f"""%{self.url_internal}%"""'], {}), "(f'%{self.url_internal}%')\n", (4107, 4133), False, 'from hdx.freshness.database.dbresource import DBResource\n'), ((4197, 4211), 'sqlalchemy.and_', 'and_', (['*filters'], {}), '(*filters)\n', (4201, 4211), False, 'from sqlalchemy import and_, exists\n'), ((5418, 5485), 'hdx.utilities.dictandlist.dict_of_lists_add', 'dict_of_lists_add', (['self.resource_what_updated', '"""total"""', 'resource_id'], {}), "(self.resource_what_updated, 'total', resource_id)\n", (5435, 5485), False, 'from hdx.utilities.dictandlist import dict_of_lists_add, list_distribute_contents\n'), ((5588, 5646), 'dateutil.parser.parse', 'parser.parse', (["resource['metadata_modified']"], {'ignoretz': '(True)'}), "(resource['metadata_modified'], ignoretz=True)\n", (5600, 5646), False, 'from dateutil import parser\n'), ((5705, 5759), 'dateutil.parser.parse', 'parser.parse', (["resource['last_modified']"], {'ignoretz': '(True)'}), "(resource['last_modified'], ignoretz=True)\n", (5717, 5759), False, 'from dateutil import parser\n'), ((6123, 6358), 'hdx.freshness.database.dbresource.DBResource', 'DBResource', ([], {'run_number': 'self.run_number', 'id': 'resource_id', 'name': 'name', 'dataset_id': 'dataset_id', 'url': 'url', 'last_modified': 'last_modified', 'metadata_modified': 'metadata_modified', 'latest_of_modifieds': 'last_modified', 'what_updated': '"""firstrun"""'}), "(run_number=self.run_number, id=resource_id, name=name,\n dataset_id=dataset_id, url=url, last_modified=last_modified,\n metadata_modified=metadata_modified, latest_of_modifieds=last_modified,\n what_updated='firstrun')\n", (6133, 6358), False, 'from hdx.freshness.database.dbresource import DBResource\n'), ((9541, 9606), 'hdx.utilities.dictandlist.dict_of_lists_add', 'dict_of_lists_add', (['self.dataset_what_updated', '"""total"""', 'dataset_id'], {}), "(self.dataset_what_updated, 'total', dataset_id)\n", (9558, 9606), False, 'from hdx.utilities.dictandlist import dict_of_lists_add, list_distribute_contents\n'), ((13280, 13337), 'dateutil.parser.parse', 'parser.parse', (["dataset['metadata_modified']"], {'ignoretz': '(True)'}), "(dataset['metadata_modified'], ignoretz=True)\n", (13292, 13337), False, 'from dateutil import parser\n'), ((15064, 15520), 'hdx.freshness.database.dbdataset.DBDataset', 'DBDataset', ([], {'run_number': 'self.run_number', 'id': 'dataset_id', 'dataset_date': 'dataset_date', 'update_frequency': 'update_frequency', 'review_date': 'review_date', 'last_modified': 'last_modified', 'metadata_modified': 'metadata_modified', 'updated_by_script': 'updated_by_script', 'latest_of_modifieds': 'latest_of_modifieds', 'what_updated': 'what_updated', 'last_resource_updated': 'last_resource_updated', 'last_resource_modified': 'last_resource_modified', 'fresh': 'fresh', 'error': 'error'}), '(run_number=self.run_number, id=dataset_id, dataset_date=\n dataset_date, update_frequency=update_frequency, review_date=\n review_date, last_modified=last_modified, metadata_modified=\n metadata_modified, updated_by_script=updated_by_script,\n latest_of_modifieds=latest_of_modifieds, what_updated=what_updated,\n last_resource_updated=last_resource_updated, last_resource_modified=\n last_resource_modified, fresh=fresh, error=error)\n', (15073, 15520), False, 'from hdx.freshness.database.dbdataset import DBDataset\n'), ((20155, 20211), 'hdx.utilities.dictandlist.list_distribute_contents', 'list_distribute_contents', (['resources_to_check', 'get_domain'], {}), '(resources_to_check, get_domain)\n', (20179, 20211), False, 'from hdx.utilities.dictandlist import dict_of_lists_add, list_distribute_contents\n'), ((20264, 20304), 'hdx.freshness.retrieval.retrieve', 'retrieve', (['resources_to_check', 'user_agent'], {}), '(resources_to_check, user_agent)\n', (20272, 20304), False, 'from hdx.freshness.retrieval import retrieve\n'), ((20992, 21040), 'hdx.utilities.dictandlist.list_distribute_contents', 'list_distribute_contents', (['hash_check', 'get_domain'], {}), '(hash_check, get_domain)\n', (21016, 21040), False, 'from hdx.utilities.dictandlist import dict_of_lists_add, list_distribute_contents\n'), ((21068, 21100), 'hdx.freshness.retrieval.retrieve', 'retrieve', (['hash_check', 'user_agent'], {}), '(hash_check, user_agent)\n', (21076, 21100), False, 'from hdx.freshness.retrieval import retrieve\n'), ((25931, 26003), 'hdx.utilities.dictandlist.dict_of_lists_add', 'dict_of_lists_add', (['self.resource_what_updated', 'what_updated', 'resource_id'], {}), '(self.resource_what_updated, what_updated, resource_id)\n', (25948, 26003), False, 'from hdx.utilities.dictandlist import dict_of_lists_add, list_distribute_contents\n'), ((30741, 30805), 'hdx.utilities.dictandlist.dict_of_lists_add', 'dict_of_lists_add', (['self.dataset_what_updated', 'status', 'dataset_id'], {}), '(self.dataset_what_updated, status, dataset_id)\n', (30758, 30805), False, 'from hdx.utilities.dictandlist import dict_of_lists_add, list_distribute_contents\n'), ((30977, 31068), 'hdx.utilities.dictandlist.dict_of_lists_add', 'dict_of_lists_add', (['self.dataset_what_updated', 'datasets_to_check[dataset_id]', 'dataset_id'], {}), '(self.dataset_what_updated, datasets_to_check[dataset_id],\n dataset_id)\n', (30994, 31068), False, 'from hdx.utilities.dictandlist import dict_of_lists_add, list_distribute_contents\n'), ((1929, 1960), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': 'nodays'}), '(days=nodays)\n', (1947, 1960), False, 'import datetime\n'), ((2633, 2684), 'hdx.freshness.testdata.serialize.serialize_datasets', 'serialize_datasets', (['self.testsession', 'self.datasets'], {}), '(self.testsession, self.datasets)\n', (2651, 2684), False, 'from hdx.freshness.testdata.serialize import serialize_datasets, serialize_hashresults, serialize_now, serialize_results\n'), ((2879, 2920), 'hdx.freshness.testdata.serialize.serialize_now', 'serialize_now', (['self.testsession', 'self.now'], {}), '(self.testsession, self.now)\n', (2892, 2920), False, 'from hdx.freshness.testdata.serialize import serialize_datasets, serialize_hashresults, serialize_now, serialize_results\n'), ((7998, 8085), 'hdx.utilities.dictandlist.dict_of_lists_add', 'dict_of_lists_add', (['self.resource_what_updated', 'dbresource.what_updated', 'resource_id'], {}), '(self.resource_what_updated, dbresource.what_updated,\n resource_id)\n', (8015, 8085), False, 'from hdx.utilities.dictandlist import dict_of_lists_add, list_distribute_contents\n'), ((8269, 8356), 'hdx.utilities.dictandlist.dict_of_lists_add', 'dict_of_lists_add', (['self.resource_what_updated', 'dbresource.what_updated', 'resource_id'], {}), '(self.resource_what_updated, dbresource.what_updated,\n resource_id)\n', (8286, 8356), False, 'from hdx.utilities.dictandlist import dict_of_lists_add, list_distribute_contents\n'), ((13443, 13496), 'dateutil.parser.parse', 'parser.parse', (["dataset['last_modified']"], {'ignoretz': '(True)'}), "(dataset['last_modified'], ignoretz=True)\n", (13455, 13496), False, 'from dateutil import parser\n'), ((13547, 13582), 'datetime.datetime', 'datetime.datetime', (['(1970)', '(1)', '(1)', '(0)', '(0)'], {}), '(1970, 1, 1, 0, 0)\n', (13564, 13582), False, 'import datetime\n'), ((13749, 13784), 'datetime.datetime', 'datetime.datetime', (['(1970)', '(1)', '(1)', '(0)', '(0)'], {}), '(1970, 1, 1, 0, 0)\n', (13766, 13784), False, 'import datetime\n'), ((14139, 14179), 'dateutil.parser.parse', 'parser.parse', (['review_date'], {'ignoretz': '(True)'}), '(review_date, ignoretz=True)\n', (14151, 14179), False, 'from dateutil import parser\n'), ((19744, 19815), 'hdx.utilities.dictandlist.dict_of_lists_add', 'dict_of_lists_add', (['self.dataset_what_updated', 'update_string', 'dataset_id'], {}), '(self.dataset_what_updated, update_string, dataset_id)\n', (19761, 19815), False, 'from hdx.utilities.dictandlist import dict_of_lists_add, list_distribute_contents\n'), ((20051, 20065), 'urllib.parse.urlparse', 'urlparse', (['x[0]'], {}), '(x[0])\n', (20059, 20065), False, 'from urllib.parse import urlparse\n'), ((20354, 20398), 'hdx.freshness.testdata.serialize.serialize_results', 'serialize_results', (['self.testsession', 'results'], {}), '(self.testsession, results)\n', (20371, 20398), False, 'from hdx.freshness.testdata.serialize import serialize_datasets, serialize_hashresults, serialize_now, serialize_results\n'), ((21150, 21203), 'hdx.freshness.testdata.serialize.serialize_hashresults', 'serialize_hashresults', (['self.testsession', 'hash_results'], {}), '(self.testsession, hash_results)\n', (21171, 21203), False, 'from hdx.freshness.testdata.serialize import serialize_datasets, serialize_hashresults, serialize_now, serialize_results\n'), ((1715, 1735), 'hdx.hdx_configuration.Configuration.read', 'Configuration.read', ([], {}), '()\n', (1733, 1735), False, 'from hdx.hdx_configuration import Configuration\n'), ((2305, 2325), 'hdx.hdx_configuration.Configuration.read', 'Configuration.read', ([], {}), '()\n', (2323, 2325), False, 'from hdx.hdx_configuration import Configuration\n'), ((2542, 2562), 'hdx.hdx_configuration.Configuration.read', 'Configuration.read', ([], {}), '()\n', (2560, 2562), False, 'from hdx.hdx_configuration import Configuration\n'), ((3094, 3117), 'hdx.freshness.database.dbrun.DBRun.run_number.desc', 'DBRun.run_number.desc', ([], {}), '()\n', (3115, 3117), False, 'from hdx.freshness.database.dbrun import DBRun\n'), ((10181, 10270), 'hdx.freshness.database.dborganization.DBOrganization', 'DBOrganization', ([], {'name': 'organization_name', 'id': 'organization_id', 'title': 'organization_title'}), '(name=organization_name, id=organization_id, title=\n organization_title)\n', (10195, 10270), False, 'from hdx.freshness.database.dborganization import DBOrganization\n'), ((11187, 11380), 'hdx.freshness.database.dbinfodataset.DBInfoDataset', 'DBInfoDataset', ([], {'name': 'dataset_name', 'id': 'dataset_id', 'title': 'dataset_title', 'private': 'dataset_private', 'organization_id': 'organization_id', 'maintainer': 'dataset_maintainer', 'location': 'dataset_location'}), '(name=dataset_name, id=dataset_id, title=dataset_title,\n private=dataset_private, organization_id=organization_id, maintainer=\n dataset_maintainer, location=dataset_location)\n', (11200, 11380), False, 'from hdx.freshness.database.dbinfodataset import DBInfoDataset\n'), ((19234, 19306), 'hdx.utilities.dictandlist.dict_of_lists_add', 'dict_of_lists_add', (['self.resource_what_updated', 'what_updated', 'resource_id'], {}), '(self.resource_what_updated, what_updated, resource_id)\n', (19251, 19306), False, 'from hdx.utilities.dictandlist import dict_of_lists_add, list_distribute_contents\n'), ((26309, 26348), 'dateutil.parser.parse', 'parser.parse', (["resource['last_modified']"], {}), "(resource['last_modified'])\n", (26321, 26348), False, 'from dateutil import parser\n'), ((8710, 8737), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(30)'}), '(days=30)\n', (8728, 8737), False, 'import datetime\n'), ((24083, 24146), 'sqlalchemy.and_', 'and_', (['(DBResource.id == resource_id)', '(DBResource.md5_hash == hash)'], {}), '(DBResource.id == resource_id, DBResource.md5_hash == hash)\n', (24087, 24146), False, 'from sqlalchemy import and_, exists\n'), ((24027, 24035), 'sqlalchemy.exists', 'exists', ([], {}), '()\n', (24033, 24035), False, 'from sqlalchemy import and_, exists\n')] |
# -*- coding: utf-8 -*-
#
# views.py
# wide-language-index-demo
#
"""
Public facing frontpage.
"""
import json
import random
from flask import (Blueprint, request, render_template, redirect)
from . import forms
from ..index import util
blueprint = Blueprint('public', __name__, static_folder="../static")
@blueprint.route("/", methods=["GET", "POST"])
def home():
language = request.args.get('language')
if language is not None and not language:
return redirect('/')
elif language:
return _render_language(language)
form = forms.SearchForm()
return render_template("public/home.html",
form=form,
has_query=False)
def _render_language(language):
index = util.get_index()
if language == 'random':
language = random.choice(list(index.keys()))
return redirect('/?language={0}'.format(language))
records = index.get(language)
inverted_name = util.get_languages().get(language)
record = None
record_json = None
if records:
record = random.choice(records)
record_json = json.dumps(record, indent=2, sort_keys=True)
form = forms.SearchForm(request.args)
return render_template("public/home.html",
language=language,
form=form,
record=record,
record_json=record_json,
inverted_name=inverted_name,
has_query=bool(request.args),
is_valid=bool(inverted_name is not None))
| [
"flask.render_template",
"flask.request.args.get",
"random.choice",
"json.dumps",
"flask.redirect",
"flask.Blueprint"
] | [((255, 311), 'flask.Blueprint', 'Blueprint', (['"""public"""', '__name__'], {'static_folder': '"""../static"""'}), "('public', __name__, static_folder='../static')\n", (264, 311), False, 'from flask import Blueprint, request, render_template, redirect\n'), ((388, 416), 'flask.request.args.get', 'request.args.get', (['"""language"""'], {}), "('language')\n", (404, 416), False, 'from flask import Blueprint, request, render_template, redirect\n'), ((596, 659), 'flask.render_template', 'render_template', (['"""public/home.html"""'], {'form': 'form', 'has_query': '(False)'}), "('public/home.html', form=form, has_query=False)\n", (611, 659), False, 'from flask import Blueprint, request, render_template, redirect\n'), ((478, 491), 'flask.redirect', 'redirect', (['"""/"""'], {}), "('/')\n", (486, 491), False, 'from flask import Blueprint, request, render_template, redirect\n'), ((1083, 1105), 'random.choice', 'random.choice', (['records'], {}), '(records)\n', (1096, 1105), False, 'import random\n'), ((1128, 1172), 'json.dumps', 'json.dumps', (['record'], {'indent': '(2)', 'sort_keys': '(True)'}), '(record, indent=2, sort_keys=True)\n', (1138, 1172), False, 'import json\n')] |
# ### Part 2 - The Northwind Database
# Using `sqlite3`, connect to the given `northwind_small.sqlite3` database.
import sqlite3
connection = sqlite3.connect('northwind_small.sqlite3')
cursor = connection.cursor()
print("CURSOR", cursor)
# 
# Above is an entity-relationship diagram - a picture summarizing the schema and
# relationships in the database. Note that it was generated using Microsoft
# Access, and some of the specific table/field names are different in the provided
# data. You can see all the tables available to SQLite as follows:
# ```python
# >> > cursor.execute("SELECT name FROM sqlite_master WHERE type='table' ORDER BY name").fetchall()
# [('Category',), ('Customer',), ('CustomerCustomerDemo',),
# ('CustomerDemographic',), ('Employee',), ('EmployeeTerritory',), ('Order',),
# ('OrderDetail',), ('Product',), ('Region',), ('Shipper',), ('Supplier',),
# ('Territory',)]
# ```
# *Warning*: unlike the diagram, the tables in SQLite are singular and not plural
# (do not end in `s`). And you can see the schema(`CREATE TABLE` statement)
# behind any given table with:
# ```python
# >> > cursor.execute('SELECT sql FROM sqlite_master WHERE name="Customer";').fetchall()
# [('CREATE TABLE "Customer" \n(\n "Id" VARCHAR(8000) PRIMARY KEY, \n
# "CompanyName" VARCHAR(8000) NULL, \n "ContactName" VARCHAR(8000) NULL, \n
# "ContactTitle" VARCHAR(8000) NULL, \n "Address" VARCHAR(8000) NULL, \n "City"
# VARCHAR(8000) NULL, \n "Region" VARCHAR(8000) NULL, \n "PostalCode"
# VARCHAR(8000) NULL, \n "Country" VARCHAR(8000) NULL, \n "Phone" VARCHAR(8000)
# NULL, \n "Fax" VARCHAR(8000) NULL \n)',)]
# ```
# In particular note that the * primary * key is `Id`, and not `CustomerId`. On
# other tables(where it is a * foreign * key) it will be `CustomerId`. Also note -
# the `Order` table conflicts with the `ORDER` keyword! We'll just avoid that
# particular table, but it's a good lesson in the danger of keyword conflicts.
# Answer the following questions(each is from a single table):
# - What are the ten most expensive items(per unit price) in the database?
# query = "SELECT COUNT (*) FROM charactercreator_character;"
# character_total = cursor.execute(query).fetchall()
# print("character_total", character_total)
query = """
SELECT ProductName, UnitPrice FROM Product
ORDER BY UnitPrice DESC LIMIT 10;
"""
exp_items = cursor.execute(query).fetchall()
print("10_exp_items", exp_items)
# - What is the average age of an employee at the time of their hiring?
# (Hint: a lot of arithmetic works with dates.)
query = """
SELECT AVG(HireDate) -AVG(BirthDate) as avg_hiring_age FROM Employee
"""
avg_age = cursor.execute(query).fetchall()
print("avg_age", avg_age)
# - (*Stretch*) How does the average age of employee at hire vary by city?
# Your code(to load and query the data) should be saved in `northwind.py`, and
# added to the repository. Do your best to answer in purely SQL, but if necessary
# use Python/other logic to help.
### Part 3 - Sailing the Northwind Seas
# You've answered some basic questions from the Northwind database, looking at
# individual tables - now it's time to put things together, and `JOIN`!
# Using `sqlite3` in `northwind.py`, answer the following:
# - What are the ten most expensive items(per unit price) in the database * and*
# their suppliers?
query1 = """
SELECT * FROM Product
LEFT JOIN Supplier ON Product.SupplierId = Supplier.Id
ORDER BY UnitPrice DESC LIMIT 10;
"""
exp_items_suppliers = cursor.execute(query1).fetchall()
print("10_exp_items_and_suppliers", exp_items_suppliers)
# - What is the largest category(by number of unique products in it)?
query1 = """
SELECT * FROM Category
LEFT JOIN Product ON Category.Id = Product.CategoryId
ORDER BY CategoryName DESC;
"""
#This is not finished, ran out of time!!
largest_category = cursor.execute(query1).fetchall()
print("largest_category", largest_category)
# - (*Stretch*) Who's the employee with the most territories? Use `TerritoryId`
# (not name, region, or other fields) as the unique identifier for territories.
| [
"sqlite3.connect"
] | [((143, 185), 'sqlite3.connect', 'sqlite3.connect', (['"""northwind_small.sqlite3"""'], {}), "('northwind_small.sqlite3')\n", (158, 185), False, 'import sqlite3\n')] |
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Utilities related to Qobj."""
from typing import Dict, Any, Optional, Union, List
from qiskit.qobj import QobjHeader, QasmQobj, PulseQobj
def _serialize_noise_model(config: Dict[str, Any]) -> Dict[str, Any]:
"""Traverse the dictionary looking for ``noise_model`` keys and apply
a transformation so it can be serialized.
Args:
config: The dictionary to traverse.
Returns:
The transformed dictionary.
"""
for k, v in config.items():
if isinstance(config[k], dict):
_serialize_noise_model(config[k])
else:
if k == 'noise_model':
try:
config[k] = v.to_dict(serializable=True)
except AttributeError:
# if .to_dict() fails is probably because the noise_model
# has been already transformed elsewhere
pass
return config
def update_qobj_config(
qobj: Union[QasmQobj, PulseQobj],
backend_options: Optional[Dict] = None,
noise_model: Any = None
) -> Union[QasmQobj, PulseQobj]:
"""Update a ``Qobj`` configuration from backend options and a noise model.
Args:
qobj: Description of the job.
backend_options: Backend options.
noise_model: Noise model.
Returns:
The updated ``Qobj``.
"""
config = qobj.config.to_dict()
# Append backend options to configuration.
if backend_options:
for key, val in backend_options.items():
config[key] = val
# Append noise model to configuration. Overwrites backend option
if noise_model:
config['noise_model'] = noise_model
# Look for noise_models in the config, and try to transform them
config = _serialize_noise_model(config)
# Update the Qobj configuration.
qobj.config = QobjHeader.from_dict(config)
return qobj
def dict_to_qobj(qobj_dict: Dict) -> Union[QasmQobj, PulseQobj]:
"""Convert a Qobj in dictionary format to an instance.
Args:
qobj_dict: Qobj in dictionary format.
Returns:
The corresponding QasmQobj or PulseQobj instance.
"""
if qobj_dict['type'] == 'PULSE':
_decode_pulse_qobj(qobj_dict) # Convert to proper types.
return PulseQobj.from_dict(qobj_dict)
return QasmQobj.from_dict(qobj_dict)
def _decode_pulse_qobj(pulse_qobj: Dict) -> None:
"""Decode a pulse Qobj.
Args:
pulse_qobj: Qobj to be decoded.
"""
pulse_library = pulse_qobj['config']['pulse_library']
for lib in pulse_library:
lib['samples'] = [_to_complex(sample) for sample in lib['samples']]
for exp in pulse_qobj['experiments']:
for instr in exp['instructions']:
if 'val' in instr:
instr['val'] = _to_complex(instr['val'])
if 'parameters' in instr and 'amp' in instr['parameters']:
instr['parameters']['amp'] = _to_complex(instr['parameters']['amp'])
def _to_complex(value: Union[List[float], complex]) -> complex:
"""Convert the input value to type ``complex``.
Args:
value: Value to be converted.
Returns:
Input value in ``complex``.
Raises:
TypeError: If the input value is not in the expected format.
"""
if isinstance(value, list) and len(value) == 2:
return complex(value[0], value[1])
elif isinstance(value, complex):
return value
raise TypeError("{} is not in a valid complex number format.".format(value))
| [
"qiskit.qobj.QobjHeader.from_dict",
"qiskit.qobj.PulseQobj.from_dict",
"qiskit.qobj.QasmQobj.from_dict"
] | [((2355, 2383), 'qiskit.qobj.QobjHeader.from_dict', 'QobjHeader.from_dict', (['config'], {}), '(config)\n', (2375, 2383), False, 'from qiskit.qobj import QobjHeader, QasmQobj, PulseQobj\n'), ((2825, 2854), 'qiskit.qobj.QasmQobj.from_dict', 'QasmQobj.from_dict', (['qobj_dict'], {}), '(qobj_dict)\n', (2843, 2854), False, 'from qiskit.qobj import QobjHeader, QasmQobj, PulseQobj\n'), ((2783, 2813), 'qiskit.qobj.PulseQobj.from_dict', 'PulseQobj.from_dict', (['qobj_dict'], {}), '(qobj_dict)\n', (2802, 2813), False, 'from qiskit.qobj import QobjHeader, QasmQobj, PulseQobj\n')] |
"""Main module."""
import warnings
import os
import sys
import re
from six.moves import input
from activelearner import dataset, models, strategies, labeler, utils
# Define active learning labeling modes
def interactive_mode(data, keywords, querier, oracle, save_every,
path, file_name, print_progress):
'''Implements loop for interactive labeling.'''
# Start interactive loop that retrains model every iteration
continue_cycle = True
while continue_cycle:
for _ in range(save_every):
query_id = querier.make_query()
label = oracle.label(data.view[query_id],
keywords=keywords)
data.update(query_id, label)
progress = (_ + 1) / save_every
if progress % 5 == 0:
update_progress(progress)
# Print updated class information
if print_progress:
data.get_dataset_stats()
# Save dataset object
fname = os.path.join(path, '', file_name)
utils.save_object(obj=data, filename=fname)
# ask user to input continue cycle
banner = f'Would you like to continue labeling another {save_every} examples? [(Y)es/(N)o]: '
valid_input = set(['Yes', 'Y', 'y', 'yes', 'No', 'N', 'n', 'no'])
continue_options = set(['Yes', 'Y', 'y', 'yes'])
user_choice = input(banner)
while user_choice not in valid_input:
print(f'Invalid choice. Must be one of {valid_input}')
user_choice = input(banner)
continue_cycle = user_choice in continue_options
def batch_mode(data, keywords, querier, oracle, save_every,
path, file_name):
'''Implements loop for batch labeling.'''
# Get batch of ids to query
query_ids = querier.make_query()
# Query oracle for labels
for _, query_id in enumerate(query_ids):
print('Query {} / {} in batch.\n'.format(_ + 1, len(query_ids)))
label = oracle.label(data.view[query_id], keywords=keywords)
data.update(query_id, label)
# save progress
if (_ + 1) % save_every == 0:
print('Saving progress...')
fname = os.path.join(path, '', file_name)
utils.save_object(obj=data, filename=fname)
# Main function for active_learner algorithm
def run_active_learner(mode, data, querier, feature_type, label_name,
save_every=20, print_progress=True, **kwargs):
'''
Runs main active learning algorithm loop, prompting Oracle for correct label and
updating dataset. Currently only uses random sampling query strategy. Not yet implemented option for "active" updating via model-based query strategy.
Parameters
----------
mode: string
Sets the labeling mode. Currently support 'batch' or 'interactive'.
dataset: dataset object
Must be activelearner dataset object containing features X and labels Y.
Current verision only supports TextDataset class.
querier: query strategy object
Must be activelearner query strategy object of type 'QueryByCommittee',
'QUIRE', 'RandomSampling', 'UncertaintySampling', or 'BatchUncertaintySampling.
`BatchUncertaintySampling` only works with `batch` mode and vice versa.
feature_type: string
Identifies the data structure of the feature. Must be either
'text' or 'image'.
label_name: list of strings
Let the label space be from 0 to len(label_name)-1, this list
corresponds to each label's name. If label_names are numeric,
e.g. 0,1,...,N must be entered as strings '0','1',...
save_every: int (default=20)
Number of iterations after which algorithm should save updated
dataset and print labeling progress.
print_progress: bool (default=True)
Logical indicating whether to print labeling progress upon save.
feature_name (optional): string
The name of the feature being labeled. If provided, will be
displayed to Oracle as a reminder when querying them.
keywords (optional): list
If feature_type is 'text', can a provide a list of
keywords to highlight when displayed in the console.
path (optional): string
A character string specifying path location for saving dataset. Default
is current working directory.
file_name (optional): string
A character string specifying filename for saved dataset object. Defaults to
'dataset' if unspecified.
seed (optional): int
A random seed to instantite np.random.RandomState with. Defaults
to 1 if unspecified.
'''
# Extract optional arguments
path = kwargs.pop('path', os.getcwd())
file_name = kwargs.pop('file_name', 'dataset.pkl')
feature_name = kwargs.pop('feature_name', None)
keywords = kwargs.pop('keywords', None)
seed = kwargs.pop('seed', 1)
# Argument checking
# TODO implement type checking version of entire packed
assert mode in ['batch', 'interactive'], "Mode must be one of ['batch', 'interactive']"
if not isinstance(data, dataset.textdataset.TextDataset) and \
not isinstance(data, dataset.imagedataset.ImageDataset):
raise TypeError("data must be of class TextDataset or ImageDataset from dataset submodule.")
strategy_class = re.compile("activelearner.strategies")
assert re.search(strategy_class, str(type(querier))), "querier must be of class 'BatchUncertaintySampling', 'UncertaintySampling', 'QUIRE', 'QueryByCommittee' or 'RandomSampling'"
assert isinstance(save_every, int), "save_every must be a positive integer."
assert save_every > 0, "save_every must be a positive integer."
assert isinstance(print_progress, bool), "print_progress must be a boolean."
assert isinstance(path, str), "path must be a string."
assert isinstance(seed, int), "seed must be a integer."
assert isinstance(file_name, str), "file_name must be a string."
if feature_name is not None:
assert isinstance(feature_name, str), "feature_name must be a string."
assert feature_type in ['text', 'image']
if feature_type != 'text' and keywords is not None:
warnings.warn("feature_type is not 'text', keywords will be ignored.")
if feature_type == 'text' and keywords is not None:
for word in keywords:
assert isinstance(word, str)
for name in label_name:
assert isinstance(name, str), "label_name must be a string or list of strings."
# Instantiate oracle
oracle = labeler.AskOracle(feature_type=feature_type,
label_name=label_name,
feature_name=feature_name)
# Run interactive model
if mode == 'interactive':
interactive_mode(data=data, keywords=keywords, querier=querier,
oracle=oracle, save_every=save_every, path=path,
file_name=file_name, print_progress=print_progress)
if mode == 'batch':
batch_mode(data=data, keywords=keywords, querier=querier,
oracle=oracle, save_every=save_every, path=path,
file_name=file_name) | [
"activelearner.labeler.AskOracle",
"six.moves.input",
"re.compile",
"os.path.join",
"os.getcwd",
"warnings.warn",
"activelearner.utils.save_object"
] | [((5402, 5440), 're.compile', 're.compile', (['"""activelearner.strategies"""'], {}), "('activelearner.strategies')\n", (5412, 5440), False, 'import re\n'), ((6618, 6716), 'activelearner.labeler.AskOracle', 'labeler.AskOracle', ([], {'feature_type': 'feature_type', 'label_name': 'label_name', 'feature_name': 'feature_name'}), '(feature_type=feature_type, label_name=label_name,\n feature_name=feature_name)\n', (6635, 6716), False, 'from activelearner import dataset, models, strategies, labeler, utils\n'), ((1001, 1034), 'os.path.join', 'os.path.join', (['path', '""""""', 'file_name'], {}), "(path, '', file_name)\n", (1013, 1034), False, 'import os\n'), ((1043, 1086), 'activelearner.utils.save_object', 'utils.save_object', ([], {'obj': 'data', 'filename': 'fname'}), '(obj=data, filename=fname)\n', (1060, 1086), False, 'from activelearner import dataset, models, strategies, labeler, utils\n'), ((1387, 1400), 'six.moves.input', 'input', (['banner'], {}), '(banner)\n', (1392, 1400), False, 'from six.moves import input\n'), ((4770, 4781), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4779, 4781), False, 'import os\n'), ((6264, 6334), 'warnings.warn', 'warnings.warn', (['"""feature_type is not \'text\', keywords will be ignored."""'], {}), '("feature_type is not \'text\', keywords will be ignored.")\n', (6277, 6334), False, 'import warnings\n'), ((1541, 1554), 'six.moves.input', 'input', (['banner'], {}), '(banner)\n', (1546, 1554), False, 'from six.moves import input\n'), ((2214, 2247), 'os.path.join', 'os.path.join', (['path', '""""""', 'file_name'], {}), "(path, '', file_name)\n", (2226, 2247), False, 'import os\n'), ((2260, 2303), 'activelearner.utils.save_object', 'utils.save_object', ([], {'obj': 'data', 'filename': 'fname'}), '(obj=data, filename=fname)\n', (2277, 2303), False, 'from activelearner import dataset, models, strategies, labeler, utils\n')] |
from PIL import Image
import numpy as np
from skimage import transform
IMG_HEIGHT = 100
IMG_WIDTH = 100
def load(filename):
np_image = Image.open(filename)
np_image = np.array(np_image).astype('float32')/255
# (IMG_HEIGHT, IMG_WIDTH, 3))
np_image = transform.resize(np_image, (IMG_HEIGHT, IMG_WIDTH, 3))
np_image = np.expand_dims(np_image, axis=0)
return np_image
| [
"numpy.array",
"numpy.expand_dims",
"PIL.Image.open",
"skimage.transform.resize"
] | [((142, 162), 'PIL.Image.open', 'Image.open', (['filename'], {}), '(filename)\n', (152, 162), False, 'from PIL import Image\n'), ((268, 322), 'skimage.transform.resize', 'transform.resize', (['np_image', '(IMG_HEIGHT, IMG_WIDTH, 3)'], {}), '(np_image, (IMG_HEIGHT, IMG_WIDTH, 3))\n', (284, 322), False, 'from skimage import transform\n'), ((338, 370), 'numpy.expand_dims', 'np.expand_dims', (['np_image'], {'axis': '(0)'}), '(np_image, axis=0)\n', (352, 370), True, 'import numpy as np\n'), ((178, 196), 'numpy.array', 'np.array', (['np_image'], {}), '(np_image)\n', (186, 196), True, 'import numpy as np\n')] |
from __future__ import print_function
import os.path
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from google.oauth2.credentials import Credentials
import sys
import json
SCOPES = ['https://www.googleapis.com/auth/drive','https://www.googleapis.com/auth/drive.file']
def delete_file(file_id):
creds = None
# The file token.json stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('./creds/token.json'):
creds = Credentials.from_authorized_user_file('./creds/token.json', SCOPES)
# If there are no (valid) credentials available, let the user log in.
# if this file isnt there this may be a heroku instance
elif os.path.exists('/app/google-credentials.json'):
creds = Credentials.from_authorized_user_file('/app/google-credentials.json', SCOPES)
else:
print("Please run upload upload.py before using it!!!")
sys.exit(1)
service = build('drive', 'v3', credentials=creds)
try:
service.files().delete(fileId = file_id).execute()
return 0
except:
print("ERROR, file didnt get deleted")
return 1 | [
"googleapiclient.discovery.build",
"google.oauth2.credentials.Credentials.from_authorized_user_file",
"sys.exit"
] | [((1111, 1150), 'googleapiclient.discovery.build', 'build', (['"""drive"""', '"""v3"""'], {'credentials': 'creds'}), "('drive', 'v3', credentials=creds)\n", (1116, 1150), False, 'from googleapiclient.discovery import build\n'), ((649, 716), 'google.oauth2.credentials.Credentials.from_authorized_user_file', 'Credentials.from_authorized_user_file', (['"""./creds/token.json"""', 'SCOPES'], {}), "('./creds/token.json', SCOPES)\n", (686, 716), False, 'from google.oauth2.credentials import Credentials\n'), ((924, 1001), 'google.oauth2.credentials.Credentials.from_authorized_user_file', 'Credentials.from_authorized_user_file', (['"""/app/google-credentials.json"""', 'SCOPES'], {}), "('/app/google-credentials.json', SCOPES)\n", (961, 1001), False, 'from google.oauth2.credentials import Credentials\n'), ((1084, 1095), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1092, 1095), False, 'import sys\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2017 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import time
import cProfile
from pstats import Stats
# switch on to run in pure Python mode
from odps import options
# options.force_py = True
from odps.compat import unittest, Decimal
from odps.tests.core import TestBase
from odps.models import Schema
from datetime import datetime
# remember to reset False before committing
ENABLE_PROFILE = False
DUMP_PROFILE = False
class Test(TestBase):
COMPRESS_DATA = True
BUFFER_SIZE = 1024*1024
DATA_AMOUNT = 100000
STRING_LITERAL = "Soft kitty, warm kitty, little ball of fur; happy kitty, sleepy kitty, purr, purr"
def setUp(self):
TestBase.setUp(self)
if ENABLE_PROFILE:
self.pr = cProfile.Profile()
self.pr.enable()
fields = ['a', 'b', 'c', 'd', 'e', 'f']
types = ['bigint', 'double', 'datetime', 'boolean', 'string', 'decimal']
self.SCHEMA = Schema.from_lists(fields, types)
def tearDown(self):
if ENABLE_PROFILE:
if DUMP_PROFILE:
self.pr.dump_stats('profile.out')
p = Stats(self.pr)
p.strip_dirs()
p.sort_stats('time')
p.print_stats(40)
p.print_callees('types.py:846\(validate_value', 20)
p.print_callees('types.py:828\(_validate_primitive_value', 20)
p.print_callees('tabletunnel.py:185\(write', 20)
TestBase.teardown(self)
def testWrite(self):
table_name = 'pyodps_test_tunnel_write_performance'
self.odps.create_table(table_name, schema=self.SCHEMA, if_not_exists=True)
ss = self.tunnel.create_upload_session(table_name)
r = ss.new_record()
start = time.time()
with ss.open_record_writer(0) as writer:
for i in range(self.DATA_AMOUNT):
r[0] = 2**63-1
r[1] = 0.0001
r[2] = datetime(2015, 11, 11)
r[3] = True
r[4] = self.STRING_LITERAL
r[5] = Decimal('3.15')
writer.write(r)
n_bytes = writer.n_bytes
print(n_bytes, 'bytes', float(n_bytes) / 1024 / 1024 / (time.time() - start), 'MiB/s')
ss.commit([0])
self.odps.delete_table(table_name, if_exists=True)
def testRead(self):
table_name = 'pyodps_test_tunnel_read_performance'
self.odps.delete_table(table_name, if_exists=True)
t = self.odps.create_table(table_name, schema=self.SCHEMA)
def gen_data():
for i in range(self.DATA_AMOUNT):
r = t.new_record()
r[0] = 2 ** 63 - 1
r[1] = 0.0001
r[2] = datetime(2015, 11, 11)
r[3] = True
r[4] = self.STRING_LITERAL
r[5] = Decimal('3.15')
yield r
self.odps.write_table(t, gen_data())
if ENABLE_PROFILE:
self.pr = cProfile.Profile()
self.pr.enable()
ds = self.tunnel.create_download_session(table_name)
start = time.time()
cnt = 0
with ds.open_record_reader(0, ds.count) as reader:
for _ in reader:
cnt += 1
n_bytes = reader.n_bytes
print(n_bytes, 'bytes', float(n_bytes) / 1024 / 1024 / (time.time() - start), 'MiB/s')
self.assertEqual(self.DATA_AMOUNT, cnt)
self.odps.delete_table(table_name, if_exists=True)
def testBufferedWrite(self):
table_name = 'test_tunnel_bufferred_write'
self.odps.create_table(table_name, schema=self.SCHEMA, if_not_exists=True)
ss = self.tunnel.create_upload_session(table_name)
r = ss.new_record()
start = time.time()
with ss.open_record_writer(buffer_size=self.BUFFER_SIZE, compress=self.COMPRESS_DATA) as writer:
for i in range(self.DATA_AMOUNT):
r[0] = 2**63-1
r[1] = 0.0001
r[2] = datetime(2015, 11, 11)
r[3] = True
r[4] = self.STRING_LITERAL
r[5] = Decimal('3.15')
writer.write(r)
n_bytes = writer.n_bytes
print(n_bytes, 'bytes', float(n_bytes) / 1024 / 1024 / (time.time() - start), 'MiB/s')
ss.commit(writer.get_blocks_written())
self.odps.delete_table(table_name, if_exists=True)
if __name__ == '__main__':
unittest.main()
| [
"datetime.datetime",
"odps.models.Schema.from_lists",
"odps.compat.Decimal",
"odps.tests.core.TestBase.setUp",
"odps.compat.unittest.main",
"pstats.Stats",
"odps.tests.core.TestBase.teardown",
"cProfile.Profile",
"time.time"
] | [((5042, 5057), 'odps.compat.unittest.main', 'unittest.main', ([], {}), '()\n', (5055, 5057), False, 'from odps.compat import unittest, Decimal\n'), ((1295, 1315), 'odps.tests.core.TestBase.setUp', 'TestBase.setUp', (['self'], {}), '(self)\n', (1309, 1315), False, 'from odps.tests.core import TestBase\n'), ((1564, 1596), 'odps.models.Schema.from_lists', 'Schema.from_lists', (['fields', 'types'], {}), '(fields, types)\n', (1581, 1596), False, 'from odps.models import Schema\n'), ((2057, 2080), 'odps.tests.core.TestBase.teardown', 'TestBase.teardown', (['self'], {}), '(self)\n', (2074, 2080), False, 'from odps.tests.core import TestBase\n'), ((2354, 2365), 'time.time', 'time.time', ([], {}), '()\n', (2363, 2365), False, 'import time\n'), ((3708, 3719), 'time.time', 'time.time', ([], {}), '()\n', (3717, 3719), False, 'import time\n'), ((4360, 4371), 'time.time', 'time.time', ([], {}), '()\n', (4369, 4371), False, 'import time\n'), ((1365, 1383), 'cProfile.Profile', 'cProfile.Profile', ([], {}), '()\n', (1381, 1383), False, 'import cProfile\n'), ((1744, 1758), 'pstats.Stats', 'Stats', (['self.pr'], {}), '(self.pr)\n', (1749, 1758), False, 'from pstats import Stats\n'), ((3581, 3599), 'cProfile.Profile', 'cProfile.Profile', ([], {}), '()\n', (3597, 3599), False, 'import cProfile\n'), ((2545, 2567), 'datetime.datetime', 'datetime', (['(2015)', '(11)', '(11)'], {}), '(2015, 11, 11)\n', (2553, 2567), False, 'from datetime import datetime\n'), ((2662, 2677), 'odps.compat.Decimal', 'Decimal', (['"""3.15"""'], {}), "('3.15')\n", (2669, 2677), False, 'from odps.compat import unittest, Decimal\n'), ((3328, 3350), 'datetime.datetime', 'datetime', (['(2015)', '(11)', '(11)'], {}), '(2015, 11, 11)\n', (3336, 3350), False, 'from datetime import datetime\n'), ((3445, 3460), 'odps.compat.Decimal', 'Decimal', (['"""3.15"""'], {}), "('3.15')\n", (3452, 3460), False, 'from odps.compat import unittest, Decimal\n'), ((4607, 4629), 'datetime.datetime', 'datetime', (['(2015)', '(11)', '(11)'], {}), '(2015, 11, 11)\n', (4615, 4629), False, 'from datetime import datetime\n'), ((4724, 4739), 'odps.compat.Decimal', 'Decimal', (['"""3.15"""'], {}), "('3.15')\n", (4731, 4739), False, 'from odps.compat import unittest, Decimal\n'), ((2811, 2822), 'time.time', 'time.time', ([], {}), '()\n', (2820, 2822), False, 'import time\n'), ((3950, 3961), 'time.time', 'time.time', ([], {}), '()\n', (3959, 3961), False, 'import time\n'), ((4873, 4884), 'time.time', 'time.time', ([], {}), '()\n', (4882, 4884), False, 'import time\n')] |
"""
Functions in this file allow redirecting stdout used by Fortran and C
extensions to a different file or devnull or stderr.
Example usage (same should work with stdout_redirect_1()):
# No print to stdout:
with stdout_redirect_2():
call_fortran_or_c_code()
# OR:
with stdout_redirect_1():
call_fortran_or_c_code()
# Get access to what was meant to be printed:
import io
f = io.BytesIO()
with stdout_redirect_2(f):
call_fortran_or_c_code()
print('Got stdout: "{0}"'.format(f.getvalue().decode('utf-8')))
"""
import os
import sys
import ctypes
import io
import tempfile
from contextlib import contextmanager
# Code based on:
# https://stackoverflow.com/questions/5081657/how-do-i-prevent-a-c-shared-library-to-print-on-stdout-in-python/17954769#17954769
@contextmanager
def stdout_redirect_1(to=os.devnull):
"""
Parameter *to* is str type and indicates file name.
"""
fd = sys.stdout.fileno()
# assert that Python and C stdio write using the same file descriptor
# assert libc.fileno(ctypes.c_void_p.in_dll(libc, "stdout")) == fd == 1
def _redirect_stdout(to):
sys.stdout.close() # + implicit flush()
os.dup2(to.fileno(), fd) # fd writes to 'to' file
sys.stdout = os.fdopen(fd, 'w') # Python writes to fd
with os.fdopen(os.dup(fd), 'w') as old_stdout:
with open(to, 'w') as file_:
_redirect_stdout(to=file_)
try:
yield # allow code to be run with the redirected stdout
finally:
_redirect_stdout(to=old_stdout) # restore stdout.
# buffering and flags such as CLOEXEC may be different
# Code based on:
# https://eli.thegreenplace.net/2015/redirecting-all-kinds-of-stdout-in-python/
libc = ctypes.CDLL(None)
c_stdout = ctypes.c_void_p.in_dll(libc, 'stdout')
@contextmanager
def stdout_redirect_2(to=None):
"""
Parameter *to* is steam or similar type.
"""
if to is None:
to = io.BytesIO()
# The original fd stdout points to. Usually 1 on POSIX systems.
original_stdout_fd = sys.stdout.fileno()
def _redirect_stdout(to):
"""Redirect stdout to the given file descriptor."""
# Flush the C-level buffer stdout
libc.fflush(c_stdout)
# Flush and close sys.stdout - also closes the file descriptor (fd)
sys.stdout.close()
# Make original_stdout_fd point to the same file as to_fd
os.dup2(to, original_stdout_fd)
# Create a new sys.stdout that points to the redirected fd
sys.stdout = io.TextIOWrapper(os.fdopen(original_stdout_fd, 'wb'))
# Save a copy of the original stdout fd in saved_stdout_fd
saved_stdout_fd = os.dup(original_stdout_fd)
try:
# Create a temporary file and redirect stdout to it
tfile = tempfile.TemporaryFile(mode='w+b')
_redirect_stdout(tfile.fileno())
# Yield to caller, then redirect stdout back to the saved fd
yield
_redirect_stdout(saved_stdout_fd)
# Copy contents of temporary file to the given stream
tfile.flush()
tfile.seek(0, io.SEEK_SET)
to.write(tfile.read())
finally:
tfile.close()
os.close(saved_stdout_fd)
| [
"os.dup2",
"os.close",
"io.BytesIO",
"os.dup",
"sys.stdout.close",
"ctypes.CDLL",
"os.fdopen",
"tempfile.TemporaryFile",
"sys.stdout.fileno",
"ctypes.c_void_p.in_dll"
] | [((1741, 1758), 'ctypes.CDLL', 'ctypes.CDLL', (['None'], {}), '(None)\n', (1752, 1758), False, 'import ctypes\n'), ((1770, 1808), 'ctypes.c_void_p.in_dll', 'ctypes.c_void_p.in_dll', (['libc', '"""stdout"""'], {}), "(libc, 'stdout')\n", (1792, 1808), False, 'import ctypes\n'), ((906, 925), 'sys.stdout.fileno', 'sys.stdout.fileno', ([], {}), '()\n', (923, 925), False, 'import sys\n'), ((2057, 2076), 'sys.stdout.fileno', 'sys.stdout.fileno', ([], {}), '()\n', (2074, 2076), False, 'import sys\n'), ((2677, 2703), 'os.dup', 'os.dup', (['original_stdout_fd'], {}), '(original_stdout_fd)\n', (2683, 2703), False, 'import os\n'), ((1115, 1133), 'sys.stdout.close', 'sys.stdout.close', ([], {}), '()\n', (1131, 1133), False, 'import sys\n'), ((1236, 1254), 'os.fdopen', 'os.fdopen', (['fd', '"""w"""'], {}), "(fd, 'w')\n", (1245, 1254), False, 'import os\n'), ((1951, 1963), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (1961, 1963), False, 'import io\n'), ((2324, 2342), 'sys.stdout.close', 'sys.stdout.close', ([], {}), '()\n', (2340, 2342), False, 'import sys\n'), ((2417, 2448), 'os.dup2', 'os.dup2', (['to', 'original_stdout_fd'], {}), '(to, original_stdout_fd)\n', (2424, 2448), False, 'import os\n'), ((2789, 2823), 'tempfile.TemporaryFile', 'tempfile.TemporaryFile', ([], {'mode': '"""w+b"""'}), "(mode='w+b')\n", (2811, 2823), False, 'import tempfile\n'), ((3183, 3208), 'os.close', 'os.close', (['saved_stdout_fd'], {}), '(saved_stdout_fd)\n', (3191, 3208), False, 'import os\n'), ((1298, 1308), 'os.dup', 'os.dup', (['fd'], {}), '(fd)\n', (1304, 1308), False, 'import os\n'), ((2554, 2589), 'os.fdopen', 'os.fdopen', (['original_stdout_fd', '"""wb"""'], {}), "(original_stdout_fd, 'wb')\n", (2563, 2589), False, 'import os\n')] |
#!/usr/bin/env python
import csv, os
def writeOut(termdic,termcodes,fileout,type):
w = open(fileout,"w")
if type ==1:
wr = csv.writer(w,dialect='excel',delimiter="\t")
for term, code in zip(termdic,termcodes):
score = code[0][1]
codigo = code[0][0]
if code[0][0]=="NIL":
wr.writerow([term,"NIL",score])
else:
wr.writerow([term,', '.join(codigo),score])
else:
wr = csv.writer(w,dialect='excel',delimiter="\t")
wr.writerow(["parameter", "value"])
for x in termdic:
wr.writerow([x, termdic[x]])
w.close()
def saveAnn(annpath, outpath, termcodes):
annotation_content = list()
#Load annotation, and save with new field.
for linea,code in zip(csv.reader(open(annpath),dialect='excel',delimiter="\t"),termcodes):
# Prepare code depending on the result (NIL, one term or list of terms)
if isinstance(code[0][0],list):
if len(code[0][0])==1:
elemento = str(code[0][0][0])
else:
elemento = str("+".join(code[0][0]))
else:
elemento = str(code[0][0])
linea += [elemento]
annotation_content.append(linea)
# Save to file
w = open(outpath,"w")
wr = csv.writer(w,dialect='excel',delimiter="\t")
for x in annotation_content:
wr.writerow(x)
w.close()
def prepare_output_path(out_path, filename, is_single):
if out_path[-1] != os.path.sep:
out_path = out_path+os.path.sep
# If folder don't exist, create it
if not os.path.exists(out_path):
os.makedirs(out_path)
# Specify this to work with option -t ="1"
if is_single:
file_name = "output_norm"
else:
file_name = filename
return out_path, file_name | [
"os.path.exists",
"csv.writer",
"os.makedirs"
] | [((1320, 1366), 'csv.writer', 'csv.writer', (['w'], {'dialect': '"""excel"""', 'delimiter': '"""\t"""'}), "(w, dialect='excel', delimiter='\\t')\n", (1330, 1366), False, 'import csv, os\n'), ((140, 186), 'csv.writer', 'csv.writer', (['w'], {'dialect': '"""excel"""', 'delimiter': '"""\t"""'}), "(w, dialect='excel', delimiter='\\t')\n", (150, 186), False, 'import csv, os\n'), ((484, 530), 'csv.writer', 'csv.writer', (['w'], {'dialect': '"""excel"""', 'delimiter': '"""\t"""'}), "(w, dialect='excel', delimiter='\\t')\n", (494, 530), False, 'import csv, os\n'), ((1623, 1647), 'os.path.exists', 'os.path.exists', (['out_path'], {}), '(out_path)\n', (1637, 1647), False, 'import csv, os\n'), ((1657, 1678), 'os.makedirs', 'os.makedirs', (['out_path'], {}), '(out_path)\n', (1668, 1678), False, 'import csv, os\n')] |
#!/usr/bin/env python
import json
import os
import re
import numpy
from gem.cnvReport import BaseDocumentHtml,BaseStatistical
class BuildHtmlAssembly(BaseDocumentHtml):
"""Base Mapping step"""
def titleDocument(self):
'''Title Web Document'''
self.vContent.append(" <H1 id=\"title\"> <U> ASSEMBLY MASKING FOR CNV PIPELINE REPORT </U> </H1>\n")
def linkersTable(self):
''' Links table section '''
self.vContent.append("<a id=\"linkers\"></a>\n")
self.vContent.append("<table id=\"linksTable-b\" >\n")
self.vContent.append(" <tbody>\n")
self.vContent.append(" <tr>\n")
self.vContent.append(" <td> <a class=\"link\" href=\"#originalReference\">Original Reference</a> </td>\n")
self.vContent.append(" </tr>\n")
self.vContent.append(" <tr>\n")
self.vContent.append(" <td> <a class=\"link\" href=\"#basicMaskingRegions\">Basic Masked Regions</a> </td>\n")
self.vContent.append(" </tr>\n")
self.vContent.append(" <tr>\n")
self.vContent.append(" <td> <a class=\"link\" href=\"#kmerMaskingRegions\">Kmer Masking Regions</a> </td>\n")
self.vContent.append(" </tr>\n")
self.vContent.append(" <tr>\n")
self.vContent.append(" <td> <a class=\"link\" href=\"#accumulativePlot\">Acummulative Distribution Plot</a> </td>\n")
self.vContent.append(" </tr>\n")
self.vContent.append(" <tr>\n")
self.vContent.append(" <td> <a class=\"link\" href=\"histogramPlot\">Histogram Plot</a> </td>\n")
self.vContent.append(" </tr>\n")
self.vContent.append(" <tr>\n")
self.vContent.append(" <td> <a class=\"link\" href=\"#kmerMaskedReference\">Kmer Masked Reference</a> </td>\n")
self.vContent.append(" </tr>\n")
self.vContent.append(" <tr>\n")
self.vContent.append(" <td> <a class=\"link\" href=\"#kmerMaskedReferencePad36\">Kmer Masked Reference Pad 36</a> </td>\n")
self.vContent.append(" </tr>\n")
self.vContent.append(" <tr>\n")
self.vContent.append(" <td> <a class=\"link\" href=\"#regionMaskedPad36\">Regions Masked 36 bps around any gap/repeat</a> </td>\n")
self.vContent.append(" </tr>\n")
self.vContent.append(" </tbody>\n")
self.vContent.append("</table>\n")
self.vContent.append(" <br> \n")
def sectionTitle(self, typeSection):
''' Creates a linker anchor for the section to be open, it also set section name'''
linker = ""
sectionName = ""
if typeSection == "originalReference":
linker += "id=\"originalReference\""
sectionName += "Original Reference"
elif typeSection == "basicMaskingRegions":
linker += "id=\"basicMaskingRegions\""
sectionName += "Basic Masked Regions"
elif typeSection == "kmerMaskingRegions":
linker += "id=\"kmerMaskingRegions\""
sectionName += "Kmer Masking Regions"
elif typeSection == "accumulativePlot":
linker += "id=\"accumulativePlot\""
sectionName += "Acummulative Distribution Plot"
elif typeSection == "histogramPlot":
linker += "id=\"histogramPlot\""
sectionName += "Histogram Plot"
elif typeSection == "kmerMaskedReference":
linker += "id=\"kmerMaskedReference\""
sectionName += "Kmer Masked Reference"
elif typeSection == "kmerMaskedReferencePad36":
linker += "id=\"kmerMaskedReferencePad36\""
sectionName += "Kmer Masked Reference Pad 36"
elif typeSection == "regionMaskedPad36":
linker += "id=\"regionMaskedPad36\""
sectionName += "Regions Masked 36 bps around any gap/repeat"
self.vContent.append("<a "+ linker +"></a>\n")
self.vContent.append("<H1 id=\"section\"> " + sectionName + " </H1>\n")
class FastaComposition(BaseStatistical):
'''Class for manage mrCanvar stats'''
def buildListHeaders(self):
'''Implementation of list of headers construction'''
self.fieldsList.append("Name")
self.fieldsList.append("Contigs")
self.fieldsList.append("Total (base pairs)")
self.fieldsList.append("A")
self.fieldsList.append("C")
self.fieldsList.append("G")
self.fieldsList.append("T")
self.fieldsList.append("N")
def parseInput(self):
'''Parse input source'''
self.fieldValue['Name'] = os.path.basename(self.source_file)
with open(self.source_file, "r") as statsFile:
for line in statsFile:
if line.find("|") != -1:
vFields = re.split('\s+',line.rstrip('\n'))
self.fieldValue['contigs'] = vFields[0]
self.fieldValue['total'] = long(vFields[1])
self.fieldValue['A'] = long(vFields[4])
self.fieldValue['C'] = long(vFields[6])
self.fieldValue['G'] = long(vFields[8])
self.fieldValue['T'] = long(vFields[10])
self.fieldValue['N'] = long(vFields[12])
def buildListValues(self):
'''Get a list of value fields'''
self.valuesList.append(self.fieldValue['Name'])
self.valuesList.append(self.fieldValue['contigs'])
self.valuesList.append("{:,}".format(self.fieldValue['total']))
self.valuesList.append("{:,}".format(self.fieldValue['A']))
self.valuesList.append("{:,}".format(self.fieldValue['C']))
self.valuesList.append("{:,}".format(self.fieldValue['G']))
self.valuesList.append("{:,}".format(self.fieldValue['T']))
self.valuesList.append("{:,}".format(self.fieldValue['N']))
def getHeaderValues(self):
'''Return a dictionary of fields and its values'''
self.allValues ["FastaComposition_"+os.path.basename(self.source_file)] = self.fieldValue
return self.allValues
class BedStats(BaseStatistical):
''' BED file Statistics '''
def buildListHeaders(self):
'''Implementation of list of headers construction'''
self.fieldsList.append("")
self.fieldsList.append("Regions")
self.fieldsList.append("Base Pairs")
def parseInput(self):
'''Parse input source'''
totalLines = 0
totalBases = 0
with open(self.source_file, "r") as bedData:
for line in bedData:
totalLines = totalLines + 1
vFields = line.rstrip('\n').split("\t")
totalBases = totalBases + (long(vFields[2]) - long(vFields[1]))
self.fieldValue['name'] = os.path.basename(self.source_file)
self.fieldValue['regions'] = totalLines
self.fieldValue['bases'] = totalBases
def buildListValues(self):
'''Get a list of value fields'''
self.valuesList.append(self.fieldValue["name"])
self.valuesList.append("{:,}".format(self.fieldValue["regions"]))
self.valuesList.append("{:,}".format(self.fieldValue["bases"]))
def getHeaderValues(self):
'''Return a dictionary of fields and its values'''
self.allValues [os.path.basename(self.source_file)] = self.fieldValue
return self.allValues
class AccumulativeDistributionPlot(BaseStatistical):
''' Accumulative Distribution Plot '''
def buildListHeaders(self):
'''Implementation of list of headers construction'''
self.fieldsList.append("Accumulative distribution plot")
def buildListValues(self):
'''Get a list of value fields'''
self.valuesList.append(self.source_file)
def getHeaderValues(self):
'''Return a dictionary of fields and its values'''
self.allValues ["CumulativeDistribution"] = self.source_file
return self.allValues
def create_report(html_file,json_file,galculator_original_fasta,list_regions_mask,kmer_regions_mask, accumulative_plot,
histogram_plot,galculator_kmer_mask_fasta, galculator_kmer_pad_fasta,regions_padded):
""" Generate HTML Report from mapping stat, mrCanavar log, control regions distribution, control regions plot and cutoffs file
Parameters
----------
html_file: html file to store the document
json_file: json file to store the document
galculator_original_fasta: output file from galculator for the original reference
list_regions_mask: list of bed files for basic masking
kmer_regions_mask: kmer masked regions bed file
accumulative_plot: accumulative plot png image
galculator_kmer_mask_fasta: output file from galculator for the kmer masked reference
galculator_kmer_pad_fasta: output file from galculator for the padded masked reference
regions_padded: padded regions masked bed
"""
#Original Fasta Reference Nucleotide Composition
originalFasta = FastaComposition(source_file=galculator_original_fasta,is_json = False)
#Basic Bed Regions
basicRegions = []
for bedFile in list_regions_mask:
basicRegions.append(BedStats(source_file=bedFile,is_json = False))
#Kmer Masking (regions,bases)
kmerMasking = BedStats(source_file=kmer_regions_mask,is_json = False)
#Accumulative Plot
pngPlot = AccumulativeDistributionPlot(source_file=accumulative_plot,is_json = False)
#Histogram Plot
histogramPlot = AccumulativeDistributionPlot(source_file=histogram_plot,is_json = False)
#Kmer Masking Compositon
kmerMaskingComposition = FastaComposition(source_file=galculator_kmer_mask_fasta,is_json = False)
#Kmer Masking Padded Composition
kmerMaskingPadded = FastaComposition(source_file=galculator_kmer_pad_fasta,is_json = False)
#Regions Kmer Masked Pad 36
pad36Masked = BedStats(source_file=kmer_regions_mask,is_json = False)
#1. HTML REPORT CONSTRUCTION
#1.1 Header HTML
vHtmlContent = []
htmlManager = BuildHtmlAssembly(vHtmlContent)
htmlManager.addHtmlReportHeader()
#1.2 Section Original Fasta
htmlManager.addHtmlNewSection(originalFasta,"originalReference","Original Reference Nucleotide Composition","blue")
#1.3 Section Basic masked Regions
htmlManager.addHtmlNewSection(basicRegions,"basicMaskingRegions","Basic Masked Regions","green",is_stack=True)
#1.4 Section Kmer Masking Regions
htmlManager.addHtmlNewSection(kmerMasking,"kmerMaskingRegions","Kmer Masking Regions","blue")
#1.5 Section Accumulative plot
htmlManager.addHtmlNewSection(pngPlot,"accumulativePlot","Accumulative Distribution","green",is_image=True)
#1.6 Section Histogram plot
htmlManager.addHtmlNewSection(histogramPlot,"histogramPlot","Histogram Plot","blue",is_image=True)
#1.7 Section Kmer Masked composition
htmlManager.addHtmlNewSection(kmerMaskingComposition,"kmerMaskedReference","Kmer Masked Reference Nucleotide Composition","green")
#1.8 Section Kmer Masked Padded Composition
htmlManager.addHtmlNewSection(kmerMaskingPadded,"kmerMaskedReferencePad36","Pad 36 Masked Reference Nucleotide Composition","blue")
#1.9 Section Pad 36 Masked Regions
htmlManager.addHtmlNewSection(pad36Masked,"regionMaskedPad36","Pad 36 Masked Regions","green")
#1.10 Close Html Report
htmlManager.closeHtmlReport()
#2. SAVE HTML DOCUMENT
htmlManager.saveDocument(html_file)
#3. CREATE CASCADE STYLE SHEET
vCSScontent = []
cssManager = BuildHtmlAssembly(vCSScontent)
cssManager.buildStyleSheet()
cssManager.saveDocument(os.path.dirname(os.path.abspath(html_file)) + "/style.css")
#4.CREATE JSON
jsonDataDocument = {}
jsonDataDocument.update(originalFasta.getHeaderValues())
for basicRegion in basicRegions:
jsonDataDocument.update(basicRegion.getHeaderValues())
jsonDataDocument.update(kmerMasking.getHeaderValues())
jsonDataDocument.update(pngPlot.getHeaderValues())
jsonDataDocument.update(histogramPlot.getHeaderValues())
jsonDataDocument.update(kmerMaskingComposition.getHeaderValues())
jsonDataDocument.update(kmerMaskingPadded.getHeaderValues())
jsonDataDocument.update(pad36Masked.getHeaderValues())
with open(json_file, 'w') as of:
json.dump(jsonDataDocument, of, indent=2)
| [
"os.path.abspath",
"os.path.basename",
"json.dump"
] | [((4762, 4796), 'os.path.basename', 'os.path.basename', (['self.source_file'], {}), '(self.source_file)\n', (4778, 4796), False, 'import os\n'), ((6982, 7016), 'os.path.basename', 'os.path.basename', (['self.source_file'], {}), '(self.source_file)\n', (6998, 7016), False, 'import os\n'), ((12653, 12694), 'json.dump', 'json.dump', (['jsonDataDocument', 'of'], {'indent': '(2)'}), '(jsonDataDocument, of, indent=2)\n', (12662, 12694), False, 'import json\n'), ((7510, 7544), 'os.path.basename', 'os.path.basename', (['self.source_file'], {}), '(self.source_file)\n', (7526, 7544), False, 'import os\n'), ((6172, 6206), 'os.path.basename', 'os.path.basename', (['self.source_file'], {}), '(self.source_file)\n', (6188, 6206), False, 'import os\n'), ((11966, 11992), 'os.path.abspath', 'os.path.abspath', (['html_file'], {}), '(html_file)\n', (11981, 11992), False, 'import os\n')] |
import argparse
import os
import importlib
import tensorflow as tf
from modules.compound_model import CompoundModel
from modules.regressor_trainer import train_regressor
from modules.compound_model_trainer import train_compound_model
from modules.training_helper import evaluate_regression_MSE, get_tensorflow_datasets
from modules.experiment_helper import parse_experiment_settings
def create_model_by_experiment_settings(experiment_settings, load_from=''):
def create_model_instance(model_category, model_name):
model_class = importlib.import_module('model_library.' + model_category + 's.' + model_name).Model
return model_class()
if 'compound_model' in experiment_settings:
compound_model_setting = experiment_settings['compound_model']
sub_models = {
model_category: create_model_instance(
model_category,
compound_model_setting[model_category]
) for model_category in ['generator', 'discriminator', 'regressor']
}
reset_regressor = ''
if not load_from and 'load_pretrain_weight' in compound_model_setting:
pretrain_weight_setting = compound_model_setting['load_pretrain_weight']
from_experiment = pretrain_weight_setting.get('from_experiment', experiment_settings['experiment_name'])
from_sub_exp = pretrain_weight_setting['from_sub_exp']
load_from = prepare_model_save_path(from_experiment, from_sub_exp)
reset_regressor = pretrain_weight_setting.get('reset_regressor', '')
if load_from:
for model_category, sub_model in sub_models.items():
sub_model.load_weights(load_from + '/' + model_category)
if reset_regressor:
sub_models['regressor'] = create_model_instance('regressor', compound_model_setting['regressor'])
compound_model = CompoundModel(**sub_models)
return compound_model
if 'regressor' in experiment_settings:
model_category = 'regressor'
regressor = create_model_instance(model_category, experiment_settings[model_category])
if load_from:
regressor.load_weights(load_from + '/' + model_category)
return regressor
# This function is faciliating creating model instance in jupiter notebook
def create_model_by_experiment_path_and_stage(experiment_path, sub_exp_name):
sub_exp_settings = parse_experiment_settings(experiment_path, only_this_sub_exp=sub_exp_name)
experiment_name = sub_exp_settings['experiment_name']
sub_exp_name = sub_exp_settings['sub_exp_name']
model_save_path = prepare_model_save_path(experiment_name, sub_exp_name)
model = create_model_by_experiment_settings(sub_exp_settings, load_from=model_save_path)
return model
def prepare_model_save_path(experiment_name, sub_exp_name):
if not os.path.isdir('saved_models'):
os.mkdir('saved_models')
saving_folder = 'saved_models/' + experiment_name
if not os.path.isdir(saving_folder):
os.mkdir(saving_folder)
model_save_path = saving_folder + '/' + sub_exp_name
return model_save_path
def execute_sub_exp(sub_exp_settings, action, run_anyway):
experiment_name = sub_exp_settings['experiment_name']
sub_exp_name = sub_exp_settings['sub_exp_name']
log_path = 'logs/%s/%s' % (experiment_name, sub_exp_name)
print('Executing sub-experiment: %s' % sub_exp_name)
if not run_anyway and action == 'train' and os.path.isdir(log_path):
print('Sub-experiment already done before, skipped ಠ_ಠ')
return
summary_writer = tf.summary.create_file_writer(log_path)
model_save_path = prepare_model_save_path(experiment_name, sub_exp_name)
datasets = get_tensorflow_datasets(**sub_exp_settings['data'])
if action == 'train':
model = create_model_by_experiment_settings(sub_exp_settings)
if 'train_compound_model' in sub_exp_settings:
training_settings = sub_exp_settings['train_compound_model']
trainer_function = train_compound_model
elif 'train_regressor' in sub_exp_settings:
training_settings = sub_exp_settings['train_regressor']
trainer_function = train_regressor
trainer_function(
model,
datasets,
summary_writer,
model_save_path,
**training_settings
)
elif action == 'evaluate':
model = create_model_by_experiment_settings(sub_exp_settings, load_from=model_save_path)
for phase in datasets:
loss = evaluate_regression_MSE(model, datasets[phase])
print('%s MSE loss: %lf, RMSE loss: %lf' % (phase, loss, loss**0.5))
def main(action, experiment_path, GPU_limit, run_anyway):
# shut up tensorflow!
tf.get_logger().setLevel('ERROR')
# restrict the memory usage
gpus = tf.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
tf.config.experimental.set_virtual_device_configuration(
gpu,
[tf.config.experimental.VirtualDeviceConfiguration(memory_limit=GPU_limit)]
)
# parse yaml to get experiment settings
experiment_list = parse_experiment_settings(experiment_path)
for sub_exp_settings in experiment_list:
execute_sub_exp(sub_exp_settings, action, run_anyway)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('action', help='(train/evaluate)')
parser.add_argument('experiment_path', help='name of the experiment setting, should match one of them file name in experiments folder')
parser.add_argument('--GPU_limit', type=int, default=3000)
parser.add_argument('--omit_completed_sub_exp', action='store_true')
args = parser.parse_args()
main(args.action, args.experiment_path, args.GPU_limit, (not args.omit_completed_sub_exp))
| [
"modules.experiment_helper.parse_experiment_settings",
"importlib.import_module",
"argparse.ArgumentParser",
"modules.compound_model.CompoundModel",
"tensorflow.config.experimental.set_memory_growth",
"tensorflow.summary.create_file_writer",
"os.path.isdir",
"os.mkdir",
"tensorflow.get_logger",
"t... | [((2466, 2540), 'modules.experiment_helper.parse_experiment_settings', 'parse_experiment_settings', (['experiment_path'], {'only_this_sub_exp': 'sub_exp_name'}), '(experiment_path, only_this_sub_exp=sub_exp_name)\n', (2491, 2540), False, 'from modules.experiment_helper import parse_experiment_settings\n'), ((3686, 3725), 'tensorflow.summary.create_file_writer', 'tf.summary.create_file_writer', (['log_path'], {}), '(log_path)\n', (3715, 3725), True, 'import tensorflow as tf\n'), ((3820, 3871), 'modules.training_helper.get_tensorflow_datasets', 'get_tensorflow_datasets', ([], {}), "(**sub_exp_settings['data'])\n", (3843, 3871), False, 'from modules.training_helper import evaluate_regression_MSE, get_tensorflow_datasets\n'), ((4992, 5043), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (5036, 5043), True, 'import tensorflow as tf\n'), ((5381, 5423), 'modules.experiment_helper.parse_experiment_settings', 'parse_experiment_settings', (['experiment_path'], {}), '(experiment_path)\n', (5406, 5423), False, 'from modules.experiment_helper import parse_experiment_settings\n'), ((5581, 5606), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (5604, 5606), False, 'import argparse\n'), ((1927, 1954), 'modules.compound_model.CompoundModel', 'CompoundModel', ([], {}), '(**sub_models)\n', (1940, 1954), False, 'from modules.compound_model import CompoundModel\n'), ((2922, 2951), 'os.path.isdir', 'os.path.isdir', (['"""saved_models"""'], {}), "('saved_models')\n", (2935, 2951), False, 'import os\n'), ((2962, 2986), 'os.mkdir', 'os.mkdir', (['"""saved_models"""'], {}), "('saved_models')\n", (2970, 2986), False, 'import os\n'), ((3056, 3084), 'os.path.isdir', 'os.path.isdir', (['saving_folder'], {}), '(saving_folder)\n', (3069, 3084), False, 'import os\n'), ((3095, 3118), 'os.mkdir', 'os.mkdir', (['saving_folder'], {}), '(saving_folder)\n', (3103, 3118), False, 'import os\n'), ((3555, 3578), 'os.path.isdir', 'os.path.isdir', (['log_path'], {}), '(log_path)\n', (3568, 3578), False, 'import os\n'), ((5075, 5126), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['gpu', '(True)'], {}), '(gpu, True)\n', (5115, 5126), True, 'import tensorflow as tf\n'), ((559, 637), 'importlib.import_module', 'importlib.import_module', (["('model_library.' + model_category + 's.' + model_name)"], {}), "('model_library.' + model_category + 's.' + model_name)\n", (582, 637), False, 'import importlib\n'), ((4911, 4926), 'tensorflow.get_logger', 'tf.get_logger', ([], {}), '()\n', (4924, 4926), True, 'import tensorflow as tf\n'), ((4686, 4733), 'modules.training_helper.evaluate_regression_MSE', 'evaluate_regression_MSE', (['model', 'datasets[phase]'], {}), '(model, datasets[phase])\n', (4709, 4733), False, 'from modules.training_helper import evaluate_regression_MSE, get_tensorflow_datasets\n'), ((5225, 5298), 'tensorflow.config.experimental.VirtualDeviceConfiguration', 'tf.config.experimental.VirtualDeviceConfiguration', ([], {'memory_limit': 'GPU_limit'}), '(memory_limit=GPU_limit)\n', (5274, 5298), True, 'import tensorflow as tf\n')] |
from bs4 import element
from lark import Lark
from parsers import SPEED_GRAMMAR
parser = Lark(SPEED_GRAMMAR)
class ParseError(Exception):
pass
class TableRowHelper:
"""A simplified interface around a set of table rows from bs4.
This abstracts all the evil stuff like rowspan and colspan so that the data
can be reasonably parsed.
"""
def __init__(self):
self.td_cache = {}
def set_tds(self, tds: [element.Tag]):
# Nuke any existing cache entries that "expire" this round (rowspan)
for k in list(self.td_cache.keys()):
(remaining, value) = self.td_cache[k]
if remaining == 1:
del self.td_cache[k]
else:
self.td_cache[k] = (remaining - 1, value)
# Add new data for this row
col_idx = 0
for td in tds:
rowspan = int(td.get("rowspan", 1))
while col_idx in self.td_cache:
col_idx += 1 # Skip cols that are around from a prev iteration due to rowspan
for _ in range(int(td.get("colspan", 1))):
self.td_cache[col_idx] = (rowspan, td)
col_idx += 1
def get_td(self, idx) -> element.Tag:
return self.td_cache[idx][1]
def is_uninteresting(tag: element.Tag):
return tag.name in {"sup", "img"}
def parse_road_types_table(table) -> dict:
result = {}
table_row_helper = TableRowHelper()
# Remove links (footnotes etc), images, etc. that don't serialize well.
for junk_tag in table.find_all(is_uninteresting):
junk_tag.decompose()
for row in table.find_all("tr"):
# Loop through columns
tds = row.find_all("td")
table_row_helper.set_tds(tds)
if tds:
road_type = table_row_helper.get_td(0).get_text(strip=True)
tags_filter = table_row_helper.get_td(1).get_text(" ", strip=True)
fuzzy_tags_filter = table_row_helper.get_td(2).get_text(" ", strip=True)
result[road_type] = {'filter': tags_filter, 'fuzzy_filter': fuzzy_tags_filter}
return result
def parse_speed_table(table, road_types: dict, speed_parse_func) -> dict:
column_names = []
result = {}
warnings = []
table_row_helper = TableRowHelper()
# Remove links (footnotes etc), images, etc. that don't serialize well.
for junk_tag in table.find_all(is_uninteresting):
junk_tag.decompose()
for row in table.find_all("tr"):
# Handle column names
th_tags = row.find_all("th")
if len(th_tags) > 0:
if len(column_names) == 0:
for th in th_tags:
th_text = th.get_text(strip=True)
for _ in range(int(th.get("colspan", 1))):
column_names.append(th_text)
else:
for (i, th) in enumerate(th_tags):
th_text = th.get_text(strip=True)
if th_text:
for j in range(int(th.get("colspan", 1))):
column_names[i + j] = th_text
# Loop through columns
tds = row.find_all("td")
table_row_helper.set_tds(tds)
if tds:
country_code = table_row_helper.get_td(0).get_text(strip=True)
road_type = table_row_helper.get_td(1).get_text(strip=True)
road_tags = {}
for col_idx in range(2, len(column_names)):
td = table_row_helper.get_td(col_idx)
speeds = td.get_text(strip=True)
if speeds:
vehicle_type = column_names[col_idx]
try:
parsed_speeds = speed_parse_func(speeds)
except Exception:
parsed_speeds = {}
warnings.append(f'{country_code}: Unable to parse \'{vehicle_type}\' for \'{road_type}\'')
for maxspeed_key, maxspeed_value in parsed_speeds.items():
if vehicle_type != "(default)":
maxspeed_key = maxspeed_key.replace("maxspeed:", "maxspeed:" + vehicle_type + ":", 1)
road_tags[maxspeed_key] = maxspeed_value
road_filters = road_types[road_type] if road_type in road_types else None
if not road_type or road_filters:
if country_code not in result:
result[country_code] = []
road_class = {'tags': road_tags}
if road_type:
road_class['name'] = road_type
if road_filters['filter']:
road_class['filter'] = road_filters['filter']
else:
warnings.append(f'{country_code}: There is only a fuzzy filter for \'{road_type}\'')
if road_filters['fuzzy_filter']:
road_class['fuzzy_filter'] = road_filters['fuzzy_filter']
result[country_code].append(road_class)
else:
warnings.append(f'{country_code}: Unable to map \'{road_type}\'')
return {'speed_limits': result, 'warnings': warnings}
| [
"lark.Lark"
] | [((91, 110), 'lark.Lark', 'Lark', (['SPEED_GRAMMAR'], {}), '(SPEED_GRAMMAR)\n', (95, 110), False, 'from lark import Lark\n')] |
import copy
import numpy as np
from PIL import Image
from torch.utils.data.dataset import Dataset
class CleanLabelDataset(Dataset):
"""Clean-label dataset.
Args:
dataset (Dataset): The dataset to be wrapped.
adv_dataset_path (str): The adversarially perturbed dataset path.
transform (callable): The backdoor transformations.
poison_idx (np.array): An 0/1 (clean/poisoned) array with
shape `(len(dataset), )`.
target_label (int): The target label.
"""
def __init__(self, dataset, adv_dataset_path, transform, poison_idx, target_label):
super(CleanLabelDataset, self).__init__()
self.clean_dataset = copy.deepcopy(dataset)
self.adv_data = np.load(adv_dataset_path)["data"]
self.clean_data = self.clean_dataset.data
self.train = self.clean_dataset.train
if self.train:
self.data = np.where(
(poison_idx == 1)[..., None, None, None],
self.adv_data,
self.clean_data,
)
self.targets = self.clean_dataset.targets
self.poison_idx = poison_idx
else:
# Only fetch poison data when testing.
self.data = self.clean_data[np.nonzero(poison_idx)[0]]
self.targets = self.clean_dataset.targets[np.nonzero(poison_idx)[0]]
self.poison_idx = poison_idx[poison_idx == 1]
self.transform = self.clean_dataset.transform
self.bd_transform = transform
self.target_label = target_label
def __getitem__(self, index):
img = self.data[index]
target = self.targets[index]
if self.poison_idx[index] == 1:
img = self.augment(img, bd_transform=self.bd_transform)
# If `self.train` is `True`, it will not modify `target` for poison data
# only in the target class; If `self.train` is `False`, it will flip `target`
# to `self.target_label` for testing purpose.
target = self.target_label
else:
img = self.augment(img, bd_transform=None)
item = {"img": img, "target": target}
return item
def __len__(self):
return len(self.data)
def augment(self, img, bd_transform=None):
if bd_transform is not None:
img = bd_transform(img)
img = Image.fromarray(img)
img = self.transform(img)
return img
| [
"PIL.Image.fromarray",
"numpy.where",
"numpy.nonzero",
"copy.deepcopy",
"numpy.load"
] | [((688, 710), 'copy.deepcopy', 'copy.deepcopy', (['dataset'], {}), '(dataset)\n', (701, 710), False, 'import copy\n'), ((2366, 2386), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (2381, 2386), False, 'from PIL import Image\n'), ((735, 760), 'numpy.load', 'np.load', (['adv_dataset_path'], {}), '(adv_dataset_path)\n', (742, 760), True, 'import numpy as np\n'), ((912, 999), 'numpy.where', 'np.where', (['(poison_idx == 1)[..., None, None, None]', 'self.adv_data', 'self.clean_data'], {}), '((poison_idx == 1)[..., None, None, None], self.adv_data, self.\n clean_data)\n', (920, 999), True, 'import numpy as np\n'), ((1258, 1280), 'numpy.nonzero', 'np.nonzero', (['poison_idx'], {}), '(poison_idx)\n', (1268, 1280), True, 'import numpy as np\n'), ((1339, 1361), 'numpy.nonzero', 'np.nonzero', (['poison_idx'], {}), '(poison_idx)\n', (1349, 1361), True, 'import numpy as np\n')] |
import os
import sys
import json
cfg = xmlsettings.XMLSettings(os.path.join(sys.path[0],'settings.xml'))
with open(os.path.join(sys.path[0],'config.json')) as data_file:
data = json.load(data_file)
| [
"json.load",
"os.path.join"
] | [((64, 105), 'os.path.join', 'os.path.join', (['sys.path[0]', '"""settings.xml"""'], {}), "(sys.path[0], 'settings.xml')\n", (76, 105), False, 'import os\n'), ((187, 207), 'json.load', 'json.load', (['data_file'], {}), '(data_file)\n', (196, 207), False, 'import json\n'), ((117, 157), 'os.path.join', 'os.path.join', (['sys.path[0]', '"""config.json"""'], {}), "(sys.path[0], 'config.json')\n", (129, 157), False, 'import os\n')] |
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
"""This module demonstrates keras api and shows that the saved model should reproduce the same results as the
presaved model.
https://www.tensorflow.org/guide/keras/train_and_evaluate#training_evaluation_from_tfdata_datasets
"""
def get_uncompiled_model():
# Build a model using the functional API
inputs = keras.Input(shape=(784,), name="digits")
x = layers.Dense(64, activation="relu", name="dense_1")(inputs)
x = layers.Dense(64, activation="relu", name="dense_2")(x)
outputs = layers.Dense(10, activation="softmax", name="predictions")(x)
my_model = keras.Model(inputs=inputs, outputs=outputs)
return my_model
def get_compiled_model():
my_model = get_uncompiled_model()
my_model.compile(
optimizer=keras.optimizers.RMSprop(),
loss=keras.losses.SparseCategoricalCrossentropy(),
metrics=[keras.metrics.SparseCategoricalAccuracy()],
)
# Alternative way to compile a model.
# my_model = model.compile(
# optimizer="rmsprop",
# loss="sparse_categorical_crossentropy",
# metrics=["sparse_categorical_accuracy"],
# )
return my_model
def prepare_validation_set():
# Prepare the validation dataset
valid_set = tf.data.Dataset.from_tensor_slices((x_val, y_val))
valid_set = valid_set.batch(64)
return valid_set
# print the model graph
#keras.utils.plot_model(model, "functional_api_model.png")
#keras.utils.plot_model(model, "functional_api_model_with_shapes.png", show_shapes=True)
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
# preprocess the data - convert the data type from uint8 to float32
x_train = x_train.reshape(60000, 784).astype("float32") / 255
x_test = x_test.reshape(10000, 784).astype("float32") / 255
y_train = y_train.astype("float32")
y_test = y_test.astype("float32")
# take the last 10,000 records as the validation set
x_val = x_train[-10000:]
y_val = y_train[-10000:]
# take the first 50K records as the training dataset
x_train = x_train[:-10000]
y_train = y_train[:-10000]
# Create the datasets from Numpy arrays
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64)
test_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test))
test_dataset = test_dataset.batch(64)
val_dataset = prepare_validation_set()
model = get_compiled_model()
print(model.summary)
print("Training the model")
# This will loop through the entire dataset for each epoch
# history = model.fit(x_train, y_train, epochs=2)
# This will loop through a set number of records for each epoch; therefore, the dataset is not reset at the end of
# an epoch. In other words you may not loop through the entire dataset if you set this number too low.
# Also, you should expect longer training time to get better accuracy because it will not loop through the entire
# dataset for each epoch.
# history = model.fit(x_train, y_train, epochs=2, steps_per_epoch=100)
# Pass a validation set during training.
history = model.fit(x_train, y_train, epochs=2, validation_data=val_dataset)
print("Model History")
print(history.history)
print("Evaluate the test data")
results = model.evaluate(x_test, y_test, batch_size=64)
print(f"test loss, test accuracy:{results}")
# predict on the model
print("Predictions")
predictions = model.predict(x_test[:3])
print(f"predictions shape: {predictions.shape}")
print(predictions)
print("Saving the model")
model_name = "mnist_model_dataset"
model.save(model_name)
print("***** Testing the Saved Model *****")
print("Loading the model")
model_restored = keras.models.load_model(model_name)
print("Restored Model Summary")
print(model_restored.summary())
print("Evaluate the test data")
results2 = model_restored.evaluate(x_test, y_test, batch_size=64)
print(f"test loss, test accuracy:{results2}")
# predict on the model
print("Predictions")
predictions2 = model_restored.predict(x_test[:3])
print(f"predictions shape: {predictions2.shape}")
print(predictions2)
| [
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.keras.datasets.mnist.load_data",
"tensorflow.keras.losses.SparseCategoricalCrossentropy",
"tensorflow.keras.metrics.SparseCategoricalAccuracy",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.models.load_model",
"tensorflow.keras.Input",
"te... | [((1638, 1670), 'tensorflow.keras.datasets.mnist.load_data', 'keras.datasets.mnist.load_data', ([], {}), '()\n', (1668, 1670), False, 'from tensorflow import keras\n'), ((2201, 2255), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(x_train, y_train)'], {}), '((x_train, y_train))\n', (2235, 2255), True, 'import tensorflow as tf\n'), ((2338, 2390), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(x_test, y_test)'], {}), '((x_test, y_test))\n', (2372, 2390), True, 'import tensorflow as tf\n'), ((3716, 3751), 'tensorflow.keras.models.load_model', 'keras.models.load_model', (['model_name'], {}), '(model_name)\n', (3739, 3751), False, 'from tensorflow import keras\n'), ((410, 450), 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '(784,)', 'name': '"""digits"""'}), "(shape=(784,), name='digits')\n", (421, 450), False, 'from tensorflow import keras\n'), ((673, 716), 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': 'inputs', 'outputs': 'outputs'}), '(inputs=inputs, outputs=outputs)\n', (684, 716), False, 'from tensorflow import keras\n'), ((1317, 1367), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(x_val, y_val)'], {}), '((x_val, y_val))\n', (1351, 1367), True, 'import tensorflow as tf\n'), ((459, 510), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(64)'], {'activation': '"""relu"""', 'name': '"""dense_1"""'}), "(64, activation='relu', name='dense_1')\n", (471, 510), False, 'from tensorflow.keras import layers\n'), ((527, 578), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(64)'], {'activation': '"""relu"""', 'name': '"""dense_2"""'}), "(64, activation='relu', name='dense_2')\n", (539, 578), False, 'from tensorflow.keras import layers\n'), ((596, 654), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(10)'], {'activation': '"""softmax"""', 'name': '"""predictions"""'}), "(10, activation='softmax', name='predictions')\n", (608, 654), False, 'from tensorflow.keras import layers\n'), ((843, 869), 'tensorflow.keras.optimizers.RMSprop', 'keras.optimizers.RMSprop', ([], {}), '()\n', (867, 869), False, 'from tensorflow import keras\n'), ((884, 928), 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'keras.losses.SparseCategoricalCrossentropy', ([], {}), '()\n', (926, 928), False, 'from tensorflow import keras\n'), ((947, 988), 'tensorflow.keras.metrics.SparseCategoricalAccuracy', 'keras.metrics.SparseCategoricalAccuracy', ([], {}), '()\n', (986, 988), False, 'from tensorflow import keras\n')] |
#!/usr/bin/python
import sqlite3
uId = 4
con = sqlite3.connect('ydb.db')
with con:
cur = con.cursor()
cur.execute("SELECT name, price FROM cars WHERE Id=:Id", {"Id": uId})
row = cur.fetchone()
print(f"{row[0]}, {row[1]}")
| [
"sqlite3.connect"
] | [((50, 75), 'sqlite3.connect', 'sqlite3.connect', (['"""ydb.db"""'], {}), "('ydb.db')\n", (65, 75), False, 'import sqlite3\n')] |
import numpy as np
from joblib import Memory
import typing
from typing import Union
from typing import Iterator
from typing import Tuple
from typing import TypeVar
try:
import typing_extensions # noqa
TYPING_EXTENSION_INSTALLED = True
except ImportError:
TYPING_EXTENSION_INSTALLED = False
if typing.TYPE_CHECKING or TYPING_EXTENSION_INSTALLED:
from typing_extensions import Protocol # noqa
class CVSplitter(Protocol):
def get_n_splits(self):
"""Get the number of splits."""
def split(self, X, y=None, groups=None):
"""Split data"""
else:
CVSplitter = TypeVar("CVSplitter") # typing: ignore
if typing.TYPE_CHECKING or TYPING_EXTENSION_INSTALLED:
from typing_extensions import Literal # noqa
else:
class _SimpleLiteral:
def __getitem__(self, values):
return typing.Any
Literal = _SimpleLiteral()
CVType = Union[int, CVSplitter, Iterator[Tuple[np.ndarray, np.ndarray]], None]
ArrayLike = TypeVar("ArrayLike")
EstimatorType = TypeVar("EstimatorType")
DType = TypeVar("DType")
KernelType = TypeVar("KernelType")
RandomStateType = Union[int, np.random.RandomState, None]
MemoryType = Union[str, Memory, None]
| [
"typing.TypeVar"
] | [((1001, 1021), 'typing.TypeVar', 'TypeVar', (['"""ArrayLike"""'], {}), "('ArrayLike')\n", (1008, 1021), False, 'from typing import TypeVar\n'), ((1038, 1062), 'typing.TypeVar', 'TypeVar', (['"""EstimatorType"""'], {}), "('EstimatorType')\n", (1045, 1062), False, 'from typing import TypeVar\n'), ((1071, 1087), 'typing.TypeVar', 'TypeVar', (['"""DType"""'], {}), "('DType')\n", (1078, 1087), False, 'from typing import TypeVar\n'), ((1101, 1122), 'typing.TypeVar', 'TypeVar', (['"""KernelType"""'], {}), "('KernelType')\n", (1108, 1122), False, 'from typing import TypeVar\n'), ((627, 648), 'typing.TypeVar', 'TypeVar', (['"""CVSplitter"""'], {}), "('CVSplitter')\n", (634, 648), False, 'from typing import TypeVar\n')] |
import urllib.parse
import cachetools
from common import database as db
from sqlalchemy.orm import joinedload
FEED_LOOKUP_CACHE = cachetools.LRUCache(maxsize=200)
def patch_blogspot(innetloc):
assert isinstance(innetloc, str), "Expected str, recieved %s" % type(innetloc)
# Blogspot domains are coerced to ".com" since they seem to localize their TLD,
# and somehow it all points to the same place in the end.
if ".blogspot." in innetloc and not innetloc.endswith(".blogspot.com"):
prefix = innetloc.split(".blogspot.")[0]
innetloc = prefix + ".blogspot.com"
return innetloc
def get_name_for_netloc_db(db_sess, netloc):
if netloc in FEED_LOOKUP_CACHE:
return FEED_LOOKUP_CACHE[netloc]
row = db_sess.query(db.RssFeedUrlMapper) \
.filter(db.RssFeedUrlMapper.feed_netloc == netloc) \
.options(joinedload('feed_entry')) \
.all()
if not row:
return False
if len(row) > 1:
print("ERROR: Multiple solutions for netloc %s?" % netloc)
feedname = row[0].feed_entry.feed_name
if feedname:
FEED_LOOKUP_CACHE[netloc] = feedname
return feedname
else:
return False
def getNiceName(session, srcurl, netloc=None, debug=False):
if netloc:
assert isinstance(netloc, str), "Expected str, recieved %s" % type(netloc)
srcnetloc = netloc
elif srcurl:
assert isinstance(srcurl, str), "Expected str, recieved %s" % type(srcurl)
srcnetloc = urllib.parse.urlparse(srcurl).netloc
else:
raise RuntimeError("You need to at least pass a srcurl or netloc!")
srcnetloc = patch_blogspot(srcnetloc)
val = get_name_for_netloc_db(session, srcnetloc)
return val
| [
"sqlalchemy.orm.joinedload",
"cachetools.LRUCache"
] | [((137, 169), 'cachetools.LRUCache', 'cachetools.LRUCache', ([], {'maxsize': '(200)'}), '(maxsize=200)\n', (156, 169), False, 'import cachetools\n'), ((820, 844), 'sqlalchemy.orm.joinedload', 'joinedload', (['"""feed_entry"""'], {}), "('feed_entry')\n", (830, 844), False, 'from sqlalchemy.orm import joinedload\n')] |
# -*- coding: utf-8 -*-
# pylint: disable=too-many-lines
import heapq
from binascii import hexlify
from collections import namedtuple
from raiden.encoding.signing import recover_publickey
from raiden.transfer.architecture import TransitionResult
from raiden.transfer.balance_proof import signing_data
from raiden.transfer.events import (
ContractSendChannelClose,
ContractSendChannelSettle,
ContractSendChannelUpdateTransfer,
ContractSendChannelWithdraw,
EventTransferReceivedInvalidDirectTransfer,
EventTransferReceivedSuccess,
EventTransferSentFailed,
SendDirectTransfer,
)
from raiden.transfer.mediated_transfer.state import LockedTransferUnsignedState
from raiden.transfer.mediated_transfer.events import (
refund_from_sendmediated,
SendBalanceProof,
SendMediatedTransfer,
)
from raiden.transfer.merkle_tree import (
LEAVES,
merkleroot,
compute_layers,
compute_merkleproof_for,
)
from raiden.transfer.state import (
CHANNEL_STATE_CLOSED,
CHANNEL_STATE_CLOSING,
CHANNEL_STATE_OPENED,
CHANNEL_STATE_SETTLED,
CHANNEL_STATE_SETTLING,
CHANNEL_STATES_PRIOR_TO_CLOSED,
CHANNEL_STATE_UNUSABLE,
EMPTY_MERKLE_ROOT,
EMPTY_MERKLE_TREE,
BalanceProofUnsignedState,
HashTimeLockState,
MerkleTreeState,
TransactionExecutionStatus,
UnlockPartialProofState,
UnlockProofState,
)
from raiden.transfer.state_change import (
ActionChannelClose,
ActionTransferDirect,
Block,
ContractReceiveChannelClosed,
ContractReceiveChannelNewBalance,
ContractReceiveChannelSettled,
ContractReceiveChannelWithdraw,
ReceiveTransferDirect,
)
from raiden.utils import publickey_to_address, typing
from raiden.settings import DEFAULT_NUMBER_OF_CONFIRMATIONS_BLOCK
TransactionOrder = namedtuple(
'TransactionOrder',
('block_number', 'transaction')
)
def is_known(end_state, hashlock):
"""True if the `hashlock` corresponds to a known lock."""
return (
hashlock in end_state.hashlocks_to_pendinglocks or
hashlock in end_state.hashlocks_to_unclaimedlocks
)
def is_deposit_confirmed(channel_state, block_number):
if not channel_state.deposit_transaction_queue:
return False
return is_transaction_confirmed(
channel_state.deposit_transaction_queue[0].block_number,
block_number,
)
def is_locked(end_state, hashlock):
"""True if the `hashlock` is known and the correspoding secret is not."""
return hashlock in end_state.hashlocks_to_pendinglocks
def is_secret_known(end_state, hashlock):
"""True if the `hashlock` is for a lock with a known secret."""
return hashlock in end_state.hashlocks_to_unclaimedlocks
def is_transaction_confirmed(transaction_block_number, blockchain_block_number):
confirmation_block = transaction_block_number + DEFAULT_NUMBER_OF_CONFIRMATIONS_BLOCK
return blockchain_block_number > confirmation_block
def is_valid_signature(balance_proof, sender_address):
data_that_was_signed = signing_data(
balance_proof.nonce,
balance_proof.transferred_amount,
balance_proof.channel_address,
balance_proof.locksroot,
balance_proof.message_hash,
)
try:
# ValueError is raised if the PublicKey instantiation failed, let it
# propagate because it's a memory pressure problem
publickey = recover_publickey(
data_that_was_signed,
balance_proof.signature,
)
except Exception: # pylint: disable=broad-except
# secp256k1 is using bare Exception classes
# raised if the recovery failed
msg = 'Signature invalid, could not be recovered.'
return (False, msg)
is_correct_sender = sender_address == publickey_to_address(publickey)
if is_correct_sender:
return (True, None)
msg = 'Signature was valid but the expected address does not match.'
return (False, msg)
def is_valid_directtransfer(direct_transfer, channel_state, sender_state, receiver_state):
received_balance_proof = direct_transfer.balance_proof
current_balance_proof = get_current_balanceproof(sender_state)
current_locksroot, _, current_transferred_amount = current_balance_proof
distributable = get_distributable(sender_state, receiver_state)
expected_nonce = get_next_nonce(sender_state)
amount = received_balance_proof.transferred_amount - current_transferred_amount
is_valid, signature_msg = is_valid_signature(
received_balance_proof,
sender_state.address,
)
if get_status(channel_state) != CHANNEL_STATE_OPENED:
msg = 'Invalid direct message. The channel is already closed.'
result = (False, msg)
elif not is_valid:
# The signature must be valid, otherwise the balance proof cannot be
# used onchain.
msg = 'Invalid DirectTransfer message. {}'.format(signature_msg)
result = (False, msg)
elif received_balance_proof.nonce != expected_nonce:
# The nonces must increase sequentially, otherwise there is a
# synchronization problem.
msg = (
'Invalid DirectTransfer message. '
'Nonce did not change sequentially, expected: {} got: {}.'
).format(
expected_nonce,
received_balance_proof.nonce,
)
result = (False, msg)
elif received_balance_proof.locksroot != current_locksroot:
# Direct transfers do not use hash time lock, so it cannot change the
# locksroot, otherwise a lock could be removed.
msg = (
"Invalid DirectTransfer message. "
"Balance proof's locksroot changed, expected: {} got: {}."
).format(
hexlify(current_locksroot).decode(),
hexlify(received_balance_proof.locksroot).decode(),
)
result = (False, msg)
elif received_balance_proof.transferred_amount <= current_transferred_amount:
# Direct transfers must increase the transferred_amount, otherwise the
# sender is trying to play the protocol and steal token.
msg = (
"Invalid DirectTransfer message. "
"Balance proof's transferred_amount decreased, expected larger than: {} got: {}."
).format(
current_transferred_amount,
received_balance_proof.transferred_amount,
)
result = (False, msg)
elif received_balance_proof.channel_address != channel_state.identifier:
# The balance proof must be tied to this channel, otherwise the
# on-chain contract would be sucesstible to replay attacks across
# channels.
msg = (
'Invalid DirectTransfer message. '
'Balance proof is tied to the wrong channel, expected: {} got: {}'
).format(
hexlify(channel_state.identifier).decode(),
hexlify(received_balance_proof.channel_address).decode(),
)
result = (False, msg)
elif amount > distributable:
# Direct transfer are limited to the current available balance,
# otherwise the sender is doing a trying to play the protocol and do a
# double spend.
msg = (
'Invalid DirectTransfer message. '
'Transfer amount larger than the available distributable, '
'transfer amount: {} maximum distributable: {}'
).format(
amount,
distributable,
)
result = (False, msg)
else:
result = (True, None)
return result
def is_valid_mediatedtransfer(mediated_transfer, channel_state, sender_state, receiver_state):
received_balance_proof = mediated_transfer.balance_proof
current_balance_proof = get_current_balanceproof(sender_state)
_, _, current_transferred_amount = current_balance_proof
distributable = get_distributable(sender_state, receiver_state)
expected_nonce = get_next_nonce(sender_state)
lock = mediated_transfer.lock
merkletree = compute_merkletree_with(sender_state.merkletree, lock.lockhash)
if get_status(channel_state) != CHANNEL_STATE_OPENED:
msg = 'Invalid direct message. The channel is already closed.'
result = (False, msg, None)
if merkletree is None:
msg = 'Invalid MediatedTransfer message. Same lockhash handled twice.'
result = (False, msg, None)
else:
locksroot_with_lock = merkleroot(merkletree)
(is_valid, signature_msg) = is_valid_signature(
received_balance_proof,
sender_state.address,
)
if not is_valid:
# The signature must be valid, otherwise the balance proof cannot be
# used onchain
msg = 'Invalid MediatedTransfer message. {}'.format(signature_msg)
result = (False, msg, None)
elif received_balance_proof.nonce != expected_nonce:
# The nonces must increase sequentially, otherwise there is a
# synchronization problem
msg = (
'Invalid MediatedTransfer message. '
'Nonce did not change sequentially, expected: {} got: {}.'
).format(
expected_nonce,
received_balance_proof.nonce,
)
result = (False, msg, None)
elif received_balance_proof.locksroot != locksroot_with_lock:
# The locksroot must be updated to include the new lock
msg = (
"Invalid MediatedTransfer message. "
"Balance proof's locksroot didn't match, expected: {} got: {}."
).format(
hexlify(locksroot_with_lock).decode(),
hexlify(received_balance_proof.locksroot).decode(),
)
result = (False, msg, None)
elif received_balance_proof.transferred_amount != current_transferred_amount:
# Mediated transfers must not change transferred_amount
msg = (
"Invalid MediatedTransfer message. "
"Balance proof's transferred_amount changed, expected: {} got: {}."
).format(
current_transferred_amount,
received_balance_proof.transferred_amount,
)
result = (False, msg, None)
elif received_balance_proof.channel_address != channel_state.identifier:
# The balance proof must be tied to this channel, otherwise the
# on-chain contract would be sucesstible to replay attacks across
# channels.
msg = (
'Invalid MediatedTransfer message. '
'Balance proof is tied to the wrong channel, expected: {} got: {}'
).format(
hexlify(channel_state.identifier).decode(),
hexlify(received_balance_proof.channel_address).decode(),
)
result = (False, msg, None)
# the locked amount is limited to the current available balance, otherwise
# the sender is doing a trying to play the protocol and do a double spend
elif lock.amount > distributable:
msg = (
'Invalid MediatedTransfer message. '
'Lock amount larger than the available distributable, '
'lock amount: {} maximum distributable: {}'
).format(
lock.amount,
distributable,
)
result = (False, msg, None)
else:
result = (True, None, merkletree)
return result
def is_valid_unlock(unlock, channel_state, sender_state):
received_balance_proof = unlock.balance_proof
current_balance_proof = get_current_balanceproof(sender_state)
lock = get_lock(sender_state, unlock.hashlock)
if lock is not None:
new_merkletree = compute_merkletree_without(sender_state.merkletree, lock.lockhash)
locksroot_without_lock = merkleroot(new_merkletree)
_, _, current_transferred_amount = current_balance_proof
expected_nonce = get_next_nonce(sender_state)
expected_transferred_amount = current_transferred_amount + lock.amount
is_valid, signature_msg = is_valid_signature(
received_balance_proof,
sender_state.address,
)
# TODO: Accept unlock messages if the node has not yet sent a transaction
# with the balance proof to the blockchain, this will save one call to
# withdraw on-chain for the non-closing party.
if get_status(channel_state) != CHANNEL_STATE_OPENED:
msg = 'Invalid Unlock message for {}. The channel is already closed.'.format(
hexlify(unlock.hashlock).decode(),
)
result = (False, msg, None)
elif lock is None:
msg = 'Invalid Secret message. There is no correspoding lock for {}'.format(
hexlify(unlock.hashlock).decode(),
)
result = (False, msg, None)
elif not is_valid:
# The signature must be valid, otherwise the balance proof cannot be
# used onchain.
msg = 'Invalid Secret message. {}'.format(signature_msg)
result = (False, msg, None)
elif received_balance_proof.nonce != expected_nonce:
# The nonces must increase sequentially, otherwise there is a
# synchronization problem.
msg = (
'Invalid Secret message. '
'Nonce did not change sequentially, expected: {} got: {}.'
).format(
expected_nonce,
received_balance_proof.nonce,
)
result = (False, msg, None)
elif received_balance_proof.locksroot != locksroot_without_lock:
# Secret messages remove a known lock, the new locksroot must have only
# that lock removed, otherwise the sender may be trying to remove
# additional locks.
msg = (
"Invalid Secret message. "
"Balance proof's locksroot didn't match, expected: {} got: {}."
).format(
hexlify(locksroot_without_lock).decode(),
hexlify(received_balance_proof.locksroot).decode(),
)
result = (False, msg, None)
elif received_balance_proof.transferred_amount != expected_transferred_amount:
# Secret messages must increase the transferred_amount by lock amount,
# otherwise the sender is trying to play the protocol and steal token.
msg = (
"Invalid Secret message. "
"Balance proof's wrong transferred_amount, expected: {} got: {}."
).format(
expected_transferred_amount,
received_balance_proof.transferred_amount,
)
result = (False, msg, None)
elif received_balance_proof.channel_address != channel_state.identifier:
# The balance proof must be tied to this channel, otherwise the
# on-chain contract would be sucesstible to replay attacks across
# channels.
msg = (
'Invalid Secret message. '
'Balance proof is tied to the wrong channel, expected: {} got: {}'
).format(
channel_state.identifier,
hexlify(received_balance_proof.channel_address).decode(),
)
result = (False, msg, None)
else:
result = (True, None, new_merkletree)
return result
def get_amount_locked(end_state):
total_pending = sum(
lock.amount
for lock in end_state.hashlocks_to_pendinglocks.values()
)
total_unclaimed = sum(
unlock.lock.amount
for unlock in end_state.hashlocks_to_unclaimedlocks.values()
)
return total_pending + total_unclaimed
def get_balance(sender, receiver):
sender_transferred_amount = 0
receiver_transferred_amount = 0
if sender.balance_proof:
sender_transferred_amount = sender.balance_proof.transferred_amount
if receiver.balance_proof:
receiver_transferred_amount = receiver.balance_proof.transferred_amount
return (
sender.contract_balance -
sender_transferred_amount +
receiver_transferred_amount
)
def get_current_balanceproof(end_state):
balance_proof = end_state.balance_proof
if balance_proof:
locksroot = balance_proof.locksroot
nonce = balance_proof.nonce
transferred_amount = balance_proof.transferred_amount
else:
locksroot = EMPTY_MERKLE_ROOT
nonce = 0
transferred_amount = 0
return (locksroot, nonce, transferred_amount)
def get_distributable(sender, receiver):
return get_balance(sender, receiver) - get_amount_locked(sender)
def get_known_unlocks(end_state):
"""Generate unlocking proofs for the known secrets."""
return [
compute_proof_for_lock(
end_state,
partialproof.secret,
partialproof.lock,
)
for partialproof in end_state.hashlocks_to_unclaimedlocks.values()
]
def get_lock(
end_state: 'NettingChannelEndState',
hashlock: typing.Keccak256,
) -> HashTimeLockState:
"""Return the lock correspoding to `hashlock` or None if the lock is
unknown.
"""
lock = end_state.hashlocks_to_pendinglocks.get(hashlock)
if not lock:
partial_unlock = end_state.hashlocks_to_unclaimedlocks.get(hashlock)
if partial_unlock:
lock = partial_unlock.lock
assert isinstance(lock, HashTimeLockState) or lock is None
return lock
def get_next_nonce(end_state):
if end_state.balance_proof:
return end_state.balance_proof.nonce + 1
# 0 must not be used since in the netting contract it represents null.
return 1
def get_status(channel_state):
if channel_state.settle_transaction:
finished_sucessfully = (
channel_state.settle_transaction.result == TransactionExecutionStatus.SUCCESS
)
running = channel_state.settle_transaction.finished_block_number is None
if finished_sucessfully:
result = CHANNEL_STATE_SETTLED
elif running:
result = CHANNEL_STATE_SETTLING
else:
result = CHANNEL_STATE_UNUSABLE
elif channel_state.close_transaction:
finished_sucessfully = (
channel_state.close_transaction.result == TransactionExecutionStatus.SUCCESS
)
running = channel_state.close_transaction.finished_block_number is None
if finished_sucessfully:
result = CHANNEL_STATE_CLOSED
elif running:
result = CHANNEL_STATE_CLOSING
else:
result = CHANNEL_STATE_UNUSABLE
else:
result = CHANNEL_STATE_OPENED
return result
def del_lock(end_state, hashlock):
assert is_known(end_state, hashlock)
if hashlock in end_state.hashlocks_to_pendinglocks:
del end_state.hashlocks_to_pendinglocks[hashlock]
if hashlock in end_state.hashlocks_to_unclaimedlocks:
del end_state.hashlocks_to_unclaimedlocks[hashlock]
def set_closed(channel_state, block_number):
if not channel_state.close_transaction:
channel_state.close_transaction = TransactionExecutionStatus(
None,
block_number,
TransactionExecutionStatus.SUCCESS,
)
elif not channel_state.close_transaction.finished_block_number:
channel_state.close_transaction.finished_block_number = block_number
channel_state.close_transaction.result = TransactionExecutionStatus.SUCCESS
def set_settled(channel_state, block_number):
if not channel_state.settle_transaction:
channel_state.settle_transaction = TransactionExecutionStatus(
None,
block_number,
TransactionExecutionStatus.SUCCESS,
)
elif not channel_state.settle_transaction.finished_block_number:
channel_state.settle_transaction.finished_block_number = block_number
channel_state.settle_transaction.result = TransactionExecutionStatus.SUCCESS
def update_contract_balance(end_state: 'NettingChannelEndState', contract_balance):
if contract_balance > end_state.contract_balance:
end_state.contract_balance = contract_balance
def compute_proof_for_lock(
end_state: 'NettingChannelEndState',
secret: typing.Secret,
lock: HashTimeLockState
) -> UnlockProofState:
# forcing bytes because ethereum.abi doesn't work with bytearray
merkle_proof = compute_merkleproof_for(end_state.merkletree, lock.lockhash)
return UnlockProofState(
merkle_proof,
lock.encoded,
secret,
)
def compute_merkletree_with(
merkletree: MerkleTreeState,
lockhash: typing.Keccak256,
) -> typing.Optional[MerkleTreeState]:
"""Register the given lockhash with the existing merkle tree."""
# Use None to inform the caller the lockshash is already known
result = None
leaves = merkletree.layers[LEAVES]
if lockhash not in leaves:
leaves = list(leaves)
leaves.append(lockhash)
result = MerkleTreeState(compute_layers(leaves))
return result
def compute_merkletree_without(merkletree, lockhash):
# Use None to inform the caller the lockshash is unknown
result = None
leaves = merkletree.layers[LEAVES]
if lockhash in leaves:
leaves = list(leaves)
leaves.remove(lockhash)
if leaves:
result = MerkleTreeState(compute_layers(leaves))
else:
result = EMPTY_MERKLE_TREE
return result
def create_senddirecttransfer(channel_state, amount, identifier):
our_balance_proof = channel_state.our_state.balance_proof
if our_balance_proof:
transferred_amount = amount + our_balance_proof.transferred_amount
locksroot = our_balance_proof.locksroot
else:
transferred_amount = amount
locksroot = EMPTY_MERKLE_ROOT
nonce = get_next_nonce(channel_state.our_state)
token = channel_state.token_address
recipient = channel_state.partner_state.address
balance_proof = BalanceProofUnsignedState(
nonce,
transferred_amount,
locksroot,
channel_state.identifier,
)
direct_transfer = SendDirectTransfer(
identifier,
balance_proof,
token,
recipient,
)
return direct_transfer
def create_sendmediatedtransfer(
channel_state,
initiator,
target,
amount,
identifier,
expiration,
hashlock):
our_state = channel_state.our_state
partner_state = channel_state.partner_state
our_balance_proof = our_state.balance_proof
# The caller must check the capacity prior to the call
msg = 'caller must make sure there is enough balance'
assert amount <= get_distributable(our_state, partner_state), msg
lock = HashTimeLockState(
amount,
expiration,
hashlock,
)
merkletree = compute_merkletree_with(
channel_state.our_state.merkletree,
lock.lockhash,
)
# The caller must ensure the same lock is not being used twice
assert merkletree, 'lock is already registered'
locksroot = merkleroot(merkletree)
if our_balance_proof:
transferred_amount = our_balance_proof.transferred_amount
else:
transferred_amount = 0
token = channel_state.token_address
nonce = get_next_nonce(channel_state.our_state)
recipient = channel_state.partner_state.address
balance_proof = BalanceProofUnsignedState(
nonce,
transferred_amount,
locksroot,
channel_state.identifier,
)
locked_transfer = LockedTransferUnsignedState(
identifier,
token,
balance_proof,
lock,
initiator,
target,
)
mediatedtransfer = SendMediatedTransfer(
locked_transfer,
recipient,
)
return mediatedtransfer, merkletree
def create_unlock(channel_state, identifier, secret, lock):
msg = 'caller must make sure the lock is known'
assert is_known(channel_state.our_state, lock.hashlock), msg
our_balance_proof = channel_state.our_state.balance_proof
if our_balance_proof:
transferred_amount = lock.amount + our_balance_proof.transferred_amount
else:
transferred_amount = lock.amount
merkletree = compute_merkletree_without(
channel_state.our_state.merkletree,
lock.lockhash,
)
locksroot = merkleroot(merkletree)
token = channel_state.token_address
nonce = get_next_nonce(channel_state.our_state)
recipient = channel_state.partner_state.address
balance_proof = BalanceProofUnsignedState(
nonce,
transferred_amount,
locksroot,
channel_state.identifier,
)
unlock_lock = SendBalanceProof(
identifier,
token,
recipient,
secret,
balance_proof,
)
return unlock_lock, merkletree
def send_directtransfer(channel_state, amount, identifier):
direct_transfer = create_senddirecttransfer(
channel_state,
amount,
identifier,
)
channel_state.our_state.balance_proof = direct_transfer.balance_proof
return direct_transfer
def send_mediatedtransfer(
channel_state,
initiator,
target,
amount,
identifier,
expiration,
hashlock):
send_event, merkletree = create_sendmediatedtransfer(
channel_state,
initiator,
target,
amount,
identifier,
expiration,
hashlock,
)
transfer = send_event.transfer
lock = transfer.lock
channel_state.our_state.balance_proof = transfer.balance_proof
channel_state.our_state.merkletree = merkletree
channel_state.our_state.hashlocks_to_pendinglocks[lock.hashlock] = lock
return send_event
def send_refundtransfer(
channel_state,
initiator,
target,
amount,
identifier,
expiration,
hashlock):
msg = 'Refunds are only valid for *know and pending* transfers'
assert hashlock in channel_state.partner_state.hashlocks_to_pendinglocks, msg
send_mediated_transfer, merkletree = create_sendmediatedtransfer(
channel_state,
initiator,
target,
amount,
identifier,
expiration,
hashlock,
)
mediated_transfer = send_mediated_transfer.transfer
lock = mediated_transfer.lock
channel_state.our_state.balance_proof = mediated_transfer.balance_proof
channel_state.our_state.merkletree = merkletree
channel_state.our_state.hashlocks_to_pendinglocks[lock.hashlock] = lock
refund_transfer = refund_from_sendmediated(send_mediated_transfer)
return refund_transfer
def send_unlock(channel_state, identifier, secret, hashlock):
lock = get_lock(channel_state.our_state, hashlock)
assert lock
unlock_lock, merkletree = create_unlock(
channel_state,
identifier,
secret,
lock,
)
channel_state.our_state.balance_proof = unlock_lock.balance_proof
channel_state.our_state.merkletree = merkletree
del_lock(channel_state.our_state, lock.hashlock)
return unlock_lock
def events_for_close(channel_state, block_number):
events = list()
if get_status(channel_state) in CHANNEL_STATES_PRIOR_TO_CLOSED:
channel_state.close_transaction = TransactionExecutionStatus(
block_number,
None,
None
)
close_event = ContractSendChannelClose(
channel_state.identifier,
channel_state.token_address,
channel_state.partner_state.balance_proof,
)
events.append(close_event)
return events
def register_secret_endstate(end_state, secret, hashlock):
if is_locked(end_state, hashlock):
pendinglock = end_state.hashlocks_to_pendinglocks[hashlock]
del end_state.hashlocks_to_pendinglocks[hashlock]
end_state.hashlocks_to_unclaimedlocks[hashlock] = UnlockPartialProofState(
pendinglock,
secret,
)
def register_secret(channel_state, secret, hashlock):
"""This will register the secret and set the lock to the unlocked stated.
Even though the lock is unlock it's is *not* claimed. The capacity will
increase once the next balance proof is received.
"""
our_state = channel_state.our_state
partner_state = channel_state.partner_state
register_secret_endstate(our_state, secret, hashlock)
register_secret_endstate(partner_state, secret, hashlock)
def handle_send_directtransfer(channel_state, state_change):
events = list()
amount = state_change.amount
identifier = state_change.identifier
distributable_amount = get_distributable(channel_state.our_state, channel_state.partner_state)
is_open = get_status(channel_state) == CHANNEL_STATE_OPENED
is_valid = amount > 0
can_pay = amount <= distributable_amount
if is_open and is_valid and can_pay:
direct_transfer = send_directtransfer(
channel_state,
amount,
identifier,
)
events.append(direct_transfer)
else:
if not is_open:
failure = EventTransferSentFailed(
state_change.identifier,
'Channel is not opened',
)
events.append(failure)
elif not is_valid:
msg = 'Transfer amount is invalid. Transfer: {}'.format(amount)
failure = EventTransferSentFailed(state_change.identifier, msg)
events.append(failure)
elif not can_pay:
msg = (
'Transfer amount exceeds the available capacity. '
'Capacity: {}, Transfer: {}'
).format(
distributable_amount,
amount,
)
failure = EventTransferSentFailed(state_change.identifier, msg)
events.append(failure)
return TransitionResult(channel_state, events)
def handle_action_close(channel_state, close, block_number):
msg = 'caller must make sure the ids match'
assert channel_state.identifier == close.channel_identifier, msg
events = events_for_close(channel_state, block_number)
return TransitionResult(channel_state, events)
def handle_receive_directtransfer(channel_state, direct_transfer):
is_valid, msg = is_valid_directtransfer(
direct_transfer,
channel_state,
channel_state.partner_state,
channel_state.our_state,
)
if is_valid:
_, _, previous_transferred_amount = get_current_balanceproof(channel_state.partner_state)
new_transferred_amount = direct_transfer.balance_proof.transferred_amount
transfer_amount = new_transferred_amount - previous_transferred_amount
channel_state.partner_state.balance_proof = direct_transfer.balance_proof
event = EventTransferReceivedSuccess(
direct_transfer.transfer_identifier,
transfer_amount,
channel_state.partner_state.address,
)
events = [event]
else:
event = EventTransferReceivedInvalidDirectTransfer(
direct_transfer.transfer_identifier,
reason=msg,
)
events = [event]
return TransitionResult(channel_state, events)
def handle_receive_mediatedtransfer(
channel_state: 'NettingChannelState',
mediated_transfer: 'LockedTransferSignedState'
):
"""Register the latest known transfer.
The receiver needs to use this method to update the container with a
_valid_ transfer, otherwise the locksroot will not contain the pending
transfer. The receiver needs to ensure that the merkle root has the
hashlock included, otherwise it won't be able to claim it.
"""
is_valid, msg, merkletree = is_valid_mediatedtransfer(
mediated_transfer,
channel_state,
channel_state.partner_state,
channel_state.our_state,
)
if is_valid:
channel_state.partner_state.balance_proof = mediated_transfer.balance_proof
channel_state.partner_state.merkletree = merkletree
lock = mediated_transfer.lock
channel_state.partner_state.hashlocks_to_pendinglocks[lock.hashlock] = lock
return is_valid, msg
def handle_receive_refundtransfer(channel_state, refund_transfer):
return handle_receive_mediatedtransfer(channel_state, refund_transfer)
def handle_receive_secretreveal(channel_state, state_change):
secret = state_change.secret
hashlock = state_change.hashlock
register_secret(channel_state, secret, hashlock)
def handle_unlock(channel_state, unlock):
is_valid, msg, unlocked_merkletree = is_valid_unlock(
unlock,
channel_state,
channel_state.partner_state,
)
if is_valid:
channel_state.partner_state.balance_proof = unlock.balance_proof
channel_state.partner_state.merkletree = unlocked_merkletree
del_lock(channel_state.partner_state, unlock.hashlock)
return is_valid, msg
def handle_block(channel_state, state_change, block_number):
assert state_change.block_number == block_number
events = list()
if get_status(channel_state) == CHANNEL_STATE_CLOSED:
closed_block_number = channel_state.close_transaction.finished_block_number
settlement_end = closed_block_number + channel_state.settle_timeout
if state_change.block_number > settlement_end:
channel_state.settle_transaction = TransactionExecutionStatus(
state_change.block_number,
None,
None
)
event = ContractSendChannelSettle(channel_state.identifier)
events.append(event)
while is_deposit_confirmed(channel_state, block_number):
order_deposit_transaction = heapq.heappop(channel_state.deposit_transaction_queue)
apply_channel_newbalance(
channel_state,
order_deposit_transaction.transaction,
)
return TransitionResult(channel_state, events)
def handle_channel_closed(channel_state, state_change):
events = list()
just_closed = (
state_change.channel_identifier == channel_state.identifier and
get_status(channel_state) in CHANNEL_STATES_PRIOR_TO_CLOSED
)
if just_closed:
set_closed(channel_state, state_change.closed_block_number)
balance_proof = channel_state.partner_state.balance_proof
call_update = (
state_change.closing_address != channel_state.our_state.address and
balance_proof
)
if call_update:
# The channel was closed by our partner, if there is a balance
# proof available update this node half of the state
update = ContractSendChannelUpdateTransfer(
channel_state.identifier,
balance_proof,
)
events.append(update)
unlock_proofs = get_known_unlocks(channel_state.partner_state)
if unlock_proofs:
withdraw = ContractSendChannelWithdraw(
channel_state.identifier,
unlock_proofs,
)
events.append(withdraw)
return TransitionResult(channel_state, events)
def handle_channel_settled(channel_state, state_change):
events = list()
if state_change.channel_identifier == channel_state.identifier:
set_settled(channel_state, state_change.settle_block_number)
return TransitionResult(channel_state, events)
def handle_channel_newbalance(channel_state, state_change, block_number):
deposit_transaction = state_change.deposit_transaction
if is_transaction_confirmed(deposit_transaction.deposit_block_number, block_number):
apply_channel_newbalance(channel_state, state_change.deposit_transaction)
else:
order = TransactionOrder(
deposit_transaction.deposit_block_number,
deposit_transaction,
)
heapq.heappush(channel_state.deposit_transaction_queue, order)
events = list()
return TransitionResult(channel_state, events)
def apply_channel_newbalance(channel_state, deposit_transaction):
participant_address = deposit_transaction.participant_address
if participant_address == channel_state.our_state.address:
new_balance = max(
channel_state.our_state.contract_balance,
deposit_transaction.contract_balance,
)
channel_state.our_state.contract_balance = new_balance
elif participant_address == channel_state.partner_state.address:
new_balance = max(
channel_state.partner_state.contract_balance,
deposit_transaction.contract_balance,
)
channel_state.partner_state.contract_balance = new_balance
def handle_channel_withdraw(channel_state, state_change):
hashlock = state_change.hashlock
secret = state_change.secret
our_withdraw = (
state_change.receiver == channel_state.our_state.address and
is_locked(channel_state.partner_state, hashlock)
)
# FIXME: must not remove the lock, otherwise a new unlock proof cannot be
# made
if our_withdraw:
del_lock(channel_state.partner_state, hashlock)
partner_withdraw = (
state_change.receiver == channel_state.partner_state.address and
is_locked(channel_state.our_state, hashlock)
)
if partner_withdraw:
del_lock(channel_state.our_state, hashlock)
# Withdraw is required if there was a refund in this channel, and the
# secret is learned from the withdraw event.
events = []
if is_locked(channel_state.our_state, hashlock):
lock = get_lock(channel_state.our_state, hashlock)
proof = compute_proof_for_lock(channel_state.our_state, secret, lock)
withdraw = ContractSendChannelWithdraw(channel_state.identifier, [proof])
events.append(withdraw)
register_secret(channel_state, secret, hashlock)
return TransitionResult(channel_state, events)
def state_transition(channel_state, state_change, block_number):
# pylint: disable=too-many-branches,unidiomatic-typecheck
events = list()
iteration = TransitionResult(channel_state, events)
if type(state_change) == Block:
iteration = handle_block(channel_state, state_change, block_number)
elif type(state_change) == ActionChannelClose:
iteration = handle_action_close(channel_state, state_change, block_number)
elif type(state_change) == ActionTransferDirect:
iteration = handle_send_directtransfer(channel_state, state_change)
elif type(state_change) == ContractReceiveChannelClosed:
iteration = handle_channel_closed(channel_state, state_change)
elif type(state_change) == ContractReceiveChannelSettled:
iteration = handle_channel_settled(channel_state, state_change)
elif type(state_change) == ContractReceiveChannelNewBalance:
iteration = handle_channel_newbalance(channel_state, state_change, block_number)
elif type(state_change) == ContractReceiveChannelWithdraw:
iteration = handle_channel_withdraw(channel_state, state_change)
elif type(state_change) == ReceiveTransferDirect:
iteration = handle_receive_directtransfer(channel_state, state_change)
return iteration
| [
"raiden.transfer.state.UnlockPartialProofState",
"raiden.transfer.mediated_transfer.events.SendBalanceProof",
"raiden.transfer.mediated_transfer.events.SendMediatedTransfer",
"binascii.hexlify",
"raiden.transfer.merkle_tree.merkleroot",
"raiden.transfer.architecture.TransitionResult",
"raiden.transfer.s... | [((1802, 1865), 'collections.namedtuple', 'namedtuple', (['"""TransactionOrder"""', "('block_number', 'transaction')"], {}), "('TransactionOrder', ('block_number', 'transaction'))\n", (1812, 1865), False, 'from collections import namedtuple\n'), ((3033, 3193), 'raiden.transfer.balance_proof.signing_data', 'signing_data', (['balance_proof.nonce', 'balance_proof.transferred_amount', 'balance_proof.channel_address', 'balance_proof.locksroot', 'balance_proof.message_hash'], {}), '(balance_proof.nonce, balance_proof.transferred_amount,\n balance_proof.channel_address, balance_proof.locksroot, balance_proof.\n message_hash)\n', (3045, 3193), False, 'from raiden.transfer.balance_proof import signing_data\n'), ((20431, 20491), 'raiden.transfer.merkle_tree.compute_merkleproof_for', 'compute_merkleproof_for', (['end_state.merkletree', 'lock.lockhash'], {}), '(end_state.merkletree, lock.lockhash)\n', (20454, 20491), False, 'from raiden.transfer.merkle_tree import LEAVES, merkleroot, compute_layers, compute_merkleproof_for\n'), ((20504, 20556), 'raiden.transfer.state.UnlockProofState', 'UnlockProofState', (['merkle_proof', 'lock.encoded', 'secret'], {}), '(merkle_proof, lock.encoded, secret)\n', (20520, 20556), False, 'from raiden.transfer.state import CHANNEL_STATE_CLOSED, CHANNEL_STATE_CLOSING, CHANNEL_STATE_OPENED, CHANNEL_STATE_SETTLED, CHANNEL_STATE_SETTLING, CHANNEL_STATES_PRIOR_TO_CLOSED, CHANNEL_STATE_UNUSABLE, EMPTY_MERKLE_ROOT, EMPTY_MERKLE_TREE, BalanceProofUnsignedState, HashTimeLockState, MerkleTreeState, TransactionExecutionStatus, UnlockPartialProofState, UnlockProofState\n'), ((22041, 22134), 'raiden.transfer.state.BalanceProofUnsignedState', 'BalanceProofUnsignedState', (['nonce', 'transferred_amount', 'locksroot', 'channel_state.identifier'], {}), '(nonce, transferred_amount, locksroot,\n channel_state.identifier)\n', (22066, 22134), False, 'from raiden.transfer.state import CHANNEL_STATE_CLOSED, CHANNEL_STATE_CLOSING, CHANNEL_STATE_OPENED, CHANNEL_STATE_SETTLED, CHANNEL_STATE_SETTLING, CHANNEL_STATES_PRIOR_TO_CLOSED, CHANNEL_STATE_UNUSABLE, EMPTY_MERKLE_ROOT, EMPTY_MERKLE_TREE, BalanceProofUnsignedState, HashTimeLockState, MerkleTreeState, TransactionExecutionStatus, UnlockPartialProofState, UnlockProofState\n'), ((22193, 22256), 'raiden.transfer.events.SendDirectTransfer', 'SendDirectTransfer', (['identifier', 'balance_proof', 'token', 'recipient'], {}), '(identifier, balance_proof, token, recipient)\n', (22211, 22256), False, 'from raiden.transfer.events import ContractSendChannelClose, ContractSendChannelSettle, ContractSendChannelUpdateTransfer, ContractSendChannelWithdraw, EventTransferReceivedInvalidDirectTransfer, EventTransferReceivedSuccess, EventTransferSentFailed, SendDirectTransfer\n'), ((22829, 22876), 'raiden.transfer.state.HashTimeLockState', 'HashTimeLockState', (['amount', 'expiration', 'hashlock'], {}), '(amount, expiration, hashlock)\n', (22846, 22876), False, 'from raiden.transfer.state import CHANNEL_STATE_CLOSED, CHANNEL_STATE_CLOSING, CHANNEL_STATE_OPENED, CHANNEL_STATE_SETTLED, CHANNEL_STATE_SETTLING, CHANNEL_STATES_PRIOR_TO_CLOSED, CHANNEL_STATE_UNUSABLE, EMPTY_MERKLE_ROOT, EMPTY_MERKLE_TREE, BalanceProofUnsignedState, HashTimeLockState, MerkleTreeState, TransactionExecutionStatus, UnlockPartialProofState, UnlockProofState\n'), ((23160, 23182), 'raiden.transfer.merkle_tree.merkleroot', 'merkleroot', (['merkletree'], {}), '(merkletree)\n', (23170, 23182), False, 'from raiden.transfer.merkle_tree import LEAVES, merkleroot, compute_layers, compute_merkleproof_for\n'), ((23483, 23576), 'raiden.transfer.state.BalanceProofUnsignedState', 'BalanceProofUnsignedState', (['nonce', 'transferred_amount', 'locksroot', 'channel_state.identifier'], {}), '(nonce, transferred_amount, locksroot,\n channel_state.identifier)\n', (23508, 23576), False, 'from raiden.transfer.state import CHANNEL_STATE_CLOSED, CHANNEL_STATE_CLOSING, CHANNEL_STATE_OPENED, CHANNEL_STATE_SETTLED, CHANNEL_STATE_SETTLING, CHANNEL_STATES_PRIOR_TO_CLOSED, CHANNEL_STATE_UNUSABLE, EMPTY_MERKLE_ROOT, EMPTY_MERKLE_TREE, BalanceProofUnsignedState, HashTimeLockState, MerkleTreeState, TransactionExecutionStatus, UnlockPartialProofState, UnlockProofState\n'), ((23635, 23725), 'raiden.transfer.mediated_transfer.state.LockedTransferUnsignedState', 'LockedTransferUnsignedState', (['identifier', 'token', 'balance_proof', 'lock', 'initiator', 'target'], {}), '(identifier, token, balance_proof, lock,\n initiator, target)\n', (23662, 23725), False, 'from raiden.transfer.mediated_transfer.state import LockedTransferUnsignedState\n'), ((23801, 23849), 'raiden.transfer.mediated_transfer.events.SendMediatedTransfer', 'SendMediatedTransfer', (['locked_transfer', 'recipient'], {}), '(locked_transfer, recipient)\n', (23821, 23849), False, 'from raiden.transfer.mediated_transfer.events import refund_from_sendmediated, SendBalanceProof, SendMediatedTransfer\n'), ((24448, 24470), 'raiden.transfer.merkle_tree.merkleroot', 'merkleroot', (['merkletree'], {}), '(merkletree)\n', (24458, 24470), False, 'from raiden.transfer.merkle_tree import LEAVES, merkleroot, compute_layers, compute_merkleproof_for\n'), ((24637, 24730), 'raiden.transfer.state.BalanceProofUnsignedState', 'BalanceProofUnsignedState', (['nonce', 'transferred_amount', 'locksroot', 'channel_state.identifier'], {}), '(nonce, transferred_amount, locksroot,\n channel_state.identifier)\n', (24662, 24730), False, 'from raiden.transfer.state import CHANNEL_STATE_CLOSED, CHANNEL_STATE_CLOSING, CHANNEL_STATE_OPENED, CHANNEL_STATE_SETTLED, CHANNEL_STATE_SETTLING, CHANNEL_STATES_PRIOR_TO_CLOSED, CHANNEL_STATE_UNUSABLE, EMPTY_MERKLE_ROOT, EMPTY_MERKLE_TREE, BalanceProofUnsignedState, HashTimeLockState, MerkleTreeState, TransactionExecutionStatus, UnlockPartialProofState, UnlockProofState\n'), ((24785, 24854), 'raiden.transfer.mediated_transfer.events.SendBalanceProof', 'SendBalanceProof', (['identifier', 'token', 'recipient', 'secret', 'balance_proof'], {}), '(identifier, token, recipient, secret, balance_proof)\n', (24801, 24854), False, 'from raiden.transfer.mediated_transfer.events import refund_from_sendmediated, SendBalanceProof, SendMediatedTransfer\n'), ((26694, 26742), 'raiden.transfer.mediated_transfer.events.refund_from_sendmediated', 'refund_from_sendmediated', (['send_mediated_transfer'], {}), '(send_mediated_transfer)\n', (26718, 26742), False, 'from raiden.transfer.mediated_transfer.events import refund_from_sendmediated, SendBalanceProof, SendMediatedTransfer\n'), ((30020, 30059), 'raiden.transfer.architecture.TransitionResult', 'TransitionResult', (['channel_state', 'events'], {}), '(channel_state, events)\n', (30036, 30059), False, 'from raiden.transfer.architecture import TransitionResult\n'), ((30311, 30350), 'raiden.transfer.architecture.TransitionResult', 'TransitionResult', (['channel_state', 'events'], {}), '(channel_state, events)\n', (30327, 30350), False, 'from raiden.transfer.architecture import TransitionResult\n'), ((31347, 31386), 'raiden.transfer.architecture.TransitionResult', 'TransitionResult', (['channel_state', 'events'], {}), '(channel_state, events)\n', (31363, 31386), False, 'from raiden.transfer.architecture import TransitionResult\n'), ((34106, 34145), 'raiden.transfer.architecture.TransitionResult', 'TransitionResult', (['channel_state', 'events'], {}), '(channel_state, events)\n', (34122, 34145), False, 'from raiden.transfer.architecture import TransitionResult\n'), ((35313, 35352), 'raiden.transfer.architecture.TransitionResult', 'TransitionResult', (['channel_state', 'events'], {}), '(channel_state, events)\n', (35329, 35352), False, 'from raiden.transfer.architecture import TransitionResult\n'), ((35582, 35621), 'raiden.transfer.architecture.TransitionResult', 'TransitionResult', (['channel_state', 'events'], {}), '(channel_state, events)\n', (35598, 35621), False, 'from raiden.transfer.architecture import TransitionResult\n'), ((36173, 36212), 'raiden.transfer.architecture.TransitionResult', 'TransitionResult', (['channel_state', 'events'], {}), '(channel_state, events)\n', (36189, 36212), False, 'from raiden.transfer.architecture import TransitionResult\n'), ((38092, 38131), 'raiden.transfer.architecture.TransitionResult', 'TransitionResult', (['channel_state', 'events'], {}), '(channel_state, events)\n', (38108, 38131), False, 'from raiden.transfer.architecture import TransitionResult\n'), ((38298, 38337), 'raiden.transfer.architecture.TransitionResult', 'TransitionResult', (['channel_state', 'events'], {}), '(channel_state, events)\n', (38314, 38337), False, 'from raiden.transfer.architecture import TransitionResult\n'), ((3398, 3462), 'raiden.encoding.signing.recover_publickey', 'recover_publickey', (['data_that_was_signed', 'balance_proof.signature'], {}), '(data_that_was_signed, balance_proof.signature)\n', (3415, 3462), False, 'from raiden.encoding.signing import recover_publickey\n'), ((3774, 3805), 'raiden.utils.publickey_to_address', 'publickey_to_address', (['publickey'], {}), '(publickey)\n', (3794, 3805), False, 'from raiden.utils import publickey_to_address, typing\n'), ((8446, 8468), 'raiden.transfer.merkle_tree.merkleroot', 'merkleroot', (['merkletree'], {}), '(merkletree)\n', (8456, 8468), False, 'from raiden.transfer.merkle_tree import LEAVES, merkleroot, compute_layers, compute_merkleproof_for\n'), ((11957, 11983), 'raiden.transfer.merkle_tree.merkleroot', 'merkleroot', (['new_merkletree'], {}), '(new_merkletree)\n', (11967, 11983), False, 'from raiden.transfer.merkle_tree import LEAVES, merkleroot, compute_layers, compute_merkleproof_for\n'), ((19129, 19216), 'raiden.transfer.state.TransactionExecutionStatus', 'TransactionExecutionStatus', (['None', 'block_number', 'TransactionExecutionStatus.SUCCESS'], {}), '(None, block_number, TransactionExecutionStatus.\n SUCCESS)\n', (19155, 19216), False, 'from raiden.transfer.state import CHANNEL_STATE_CLOSED, CHANNEL_STATE_CLOSING, CHANNEL_STATE_OPENED, CHANNEL_STATE_SETTLED, CHANNEL_STATE_SETTLING, CHANNEL_STATES_PRIOR_TO_CLOSED, CHANNEL_STATE_UNUSABLE, EMPTY_MERKLE_ROOT, EMPTY_MERKLE_TREE, BalanceProofUnsignedState, HashTimeLockState, MerkleTreeState, TransactionExecutionStatus, UnlockPartialProofState, UnlockProofState\n'), ((19625, 19712), 'raiden.transfer.state.TransactionExecutionStatus', 'TransactionExecutionStatus', (['None', 'block_number', 'TransactionExecutionStatus.SUCCESS'], {}), '(None, block_number, TransactionExecutionStatus.\n SUCCESS)\n', (19651, 19712), False, 'from raiden.transfer.state import CHANNEL_STATE_CLOSED, CHANNEL_STATE_CLOSING, CHANNEL_STATE_OPENED, CHANNEL_STATE_SETTLED, CHANNEL_STATE_SETTLING, CHANNEL_STATES_PRIOR_TO_CLOSED, CHANNEL_STATE_UNUSABLE, EMPTY_MERKLE_ROOT, EMPTY_MERKLE_TREE, BalanceProofUnsignedState, HashTimeLockState, MerkleTreeState, TransactionExecutionStatus, UnlockPartialProofState, UnlockProofState\n'), ((27415, 27467), 'raiden.transfer.state.TransactionExecutionStatus', 'TransactionExecutionStatus', (['block_number', 'None', 'None'], {}), '(block_number, None, None)\n', (27441, 27467), False, 'from raiden.transfer.state import CHANNEL_STATE_CLOSED, CHANNEL_STATE_CLOSING, CHANNEL_STATE_OPENED, CHANNEL_STATE_SETTLED, CHANNEL_STATE_SETTLING, CHANNEL_STATES_PRIOR_TO_CLOSED, CHANNEL_STATE_UNUSABLE, EMPTY_MERKLE_ROOT, EMPTY_MERKLE_TREE, BalanceProofUnsignedState, HashTimeLockState, MerkleTreeState, TransactionExecutionStatus, UnlockPartialProofState, UnlockProofState\n'), ((27537, 27664), 'raiden.transfer.events.ContractSendChannelClose', 'ContractSendChannelClose', (['channel_state.identifier', 'channel_state.token_address', 'channel_state.partner_state.balance_proof'], {}), '(channel_state.identifier, channel_state.\n token_address, channel_state.partner_state.balance_proof)\n', (27561, 27664), False, 'from raiden.transfer.events import ContractSendChannelClose, ContractSendChannelSettle, ContractSendChannelUpdateTransfer, ContractSendChannelWithdraw, EventTransferReceivedInvalidDirectTransfer, EventTransferReceivedSuccess, EventTransferSentFailed, SendDirectTransfer\n'), ((28047, 28091), 'raiden.transfer.state.UnlockPartialProofState', 'UnlockPartialProofState', (['pendinglock', 'secret'], {}), '(pendinglock, secret)\n', (28070, 28091), False, 'from raiden.transfer.state import CHANNEL_STATE_CLOSED, CHANNEL_STATE_CLOSING, CHANNEL_STATE_OPENED, CHANNEL_STATE_SETTLED, CHANNEL_STATE_SETTLING, CHANNEL_STATES_PRIOR_TO_CLOSED, CHANNEL_STATE_UNUSABLE, EMPTY_MERKLE_ROOT, EMPTY_MERKLE_TREE, BalanceProofUnsignedState, HashTimeLockState, MerkleTreeState, TransactionExecutionStatus, UnlockPartialProofState, UnlockProofState\n'), ((30965, 31088), 'raiden.transfer.events.EventTransferReceivedSuccess', 'EventTransferReceivedSuccess', (['direct_transfer.transfer_identifier', 'transfer_amount', 'channel_state.partner_state.address'], {}), '(direct_transfer.transfer_identifier,\n transfer_amount, channel_state.partner_state.address)\n', (30993, 31088), False, 'from raiden.transfer.events import ContractSendChannelClose, ContractSendChannelSettle, ContractSendChannelUpdateTransfer, ContractSendChannelWithdraw, EventTransferReceivedInvalidDirectTransfer, EventTransferReceivedSuccess, EventTransferSentFailed, SendDirectTransfer\n'), ((31183, 31279), 'raiden.transfer.events.EventTransferReceivedInvalidDirectTransfer', 'EventTransferReceivedInvalidDirectTransfer', (['direct_transfer.transfer_identifier'], {'reason': 'msg'}), '(direct_transfer.\n transfer_identifier, reason=msg)\n', (31225, 31279), False, 'from raiden.transfer.events import ContractSendChannelClose, ContractSendChannelSettle, ContractSendChannelUpdateTransfer, ContractSendChannelWithdraw, EventTransferReceivedInvalidDirectTransfer, EventTransferReceivedSuccess, EventTransferSentFailed, SendDirectTransfer\n'), ((33917, 33971), 'heapq.heappop', 'heapq.heappop', (['channel_state.deposit_transaction_queue'], {}), '(channel_state.deposit_transaction_queue)\n', (33930, 33971), False, 'import heapq\n'), ((36078, 36140), 'heapq.heappush', 'heapq.heappush', (['channel_state.deposit_transaction_queue', 'order'], {}), '(channel_state.deposit_transaction_queue, order)\n', (36092, 36140), False, 'import heapq\n'), ((37931, 37993), 'raiden.transfer.events.ContractSendChannelWithdraw', 'ContractSendChannelWithdraw', (['channel_state.identifier', '[proof]'], {}), '(channel_state.identifier, [proof])\n', (37958, 37993), False, 'from raiden.transfer.events import ContractSendChannelClose, ContractSendChannelSettle, ContractSendChannelUpdateTransfer, ContractSendChannelWithdraw, EventTransferReceivedInvalidDirectTransfer, EventTransferReceivedSuccess, EventTransferSentFailed, SendDirectTransfer\n'), ((21051, 21073), 'raiden.transfer.merkle_tree.compute_layers', 'compute_layers', (['leaves'], {}), '(leaves)\n', (21065, 21073), False, 'from raiden.transfer.merkle_tree import LEAVES, merkleroot, compute_layers, compute_merkleproof_for\n'), ((29267, 29340), 'raiden.transfer.events.EventTransferSentFailed', 'EventTransferSentFailed', (['state_change.identifier', '"""Channel is not opened"""'], {}), "(state_change.identifier, 'Channel is not opened')\n", (29290, 29340), False, 'from raiden.transfer.events import ContractSendChannelClose, ContractSendChannelSettle, ContractSendChannelUpdateTransfer, ContractSendChannelWithdraw, EventTransferReceivedInvalidDirectTransfer, EventTransferReceivedSuccess, EventTransferSentFailed, SendDirectTransfer\n'), ((33586, 33651), 'raiden.transfer.state.TransactionExecutionStatus', 'TransactionExecutionStatus', (['state_change.block_number', 'None', 'None'], {}), '(state_change.block_number, None, None)\n', (33612, 33651), False, 'from raiden.transfer.state import CHANNEL_STATE_CLOSED, CHANNEL_STATE_CLOSING, CHANNEL_STATE_OPENED, CHANNEL_STATE_SETTLED, CHANNEL_STATE_SETTLING, CHANNEL_STATES_PRIOR_TO_CLOSED, CHANNEL_STATE_UNUSABLE, EMPTY_MERKLE_ROOT, EMPTY_MERKLE_TREE, BalanceProofUnsignedState, HashTimeLockState, MerkleTreeState, TransactionExecutionStatus, UnlockPartialProofState, UnlockProofState\n'), ((33734, 33785), 'raiden.transfer.events.ContractSendChannelSettle', 'ContractSendChannelSettle', (['channel_state.identifier'], {}), '(channel_state.identifier)\n', (33759, 33785), False, 'from raiden.transfer.events import ContractSendChannelClose, ContractSendChannelSettle, ContractSendChannelUpdateTransfer, ContractSendChannelWithdraw, EventTransferReceivedInvalidDirectTransfer, EventTransferReceivedSuccess, EventTransferSentFailed, SendDirectTransfer\n'), ((34872, 34946), 'raiden.transfer.events.ContractSendChannelUpdateTransfer', 'ContractSendChannelUpdateTransfer', (['channel_state.identifier', 'balance_proof'], {}), '(channel_state.identifier, balance_proof)\n', (34905, 34946), False, 'from raiden.transfer.events import ContractSendChannelClose, ContractSendChannelSettle, ContractSendChannelUpdateTransfer, ContractSendChannelWithdraw, EventTransferReceivedInvalidDirectTransfer, EventTransferReceivedSuccess, EventTransferSentFailed, SendDirectTransfer\n'), ((35149, 35217), 'raiden.transfer.events.ContractSendChannelWithdraw', 'ContractSendChannelWithdraw', (['channel_state.identifier', 'unlock_proofs'], {}), '(channel_state.identifier, unlock_proofs)\n', (35176, 35217), False, 'from raiden.transfer.events import ContractSendChannelClose, ContractSendChannelSettle, ContractSendChannelUpdateTransfer, ContractSendChannelWithdraw, EventTransferReceivedInvalidDirectTransfer, EventTransferReceivedSuccess, EventTransferSentFailed, SendDirectTransfer\n'), ((21415, 21437), 'raiden.transfer.merkle_tree.compute_layers', 'compute_layers', (['leaves'], {}), '(leaves)\n', (21429, 21437), False, 'from raiden.transfer.merkle_tree import LEAVES, merkleroot, compute_layers, compute_merkleproof_for\n'), ((29550, 29603), 'raiden.transfer.events.EventTransferSentFailed', 'EventTransferSentFailed', (['state_change.identifier', 'msg'], {}), '(state_change.identifier, msg)\n', (29573, 29603), False, 'from raiden.transfer.events import ContractSendChannelClose, ContractSendChannelSettle, ContractSendChannelUpdateTransfer, ContractSendChannelWithdraw, EventTransferReceivedInvalidDirectTransfer, EventTransferReceivedSuccess, EventTransferSentFailed, SendDirectTransfer\n'), ((12680, 12704), 'binascii.hexlify', 'hexlify', (['unlock.hashlock'], {}), '(unlock.hashlock)\n', (12687, 12704), False, 'from binascii import hexlify\n'), ((29919, 29972), 'raiden.transfer.events.EventTransferSentFailed', 'EventTransferSentFailed', (['state_change.identifier', 'msg'], {}), '(state_change.identifier, msg)\n', (29942, 29972), False, 'from raiden.transfer.events import ContractSendChannelClose, ContractSendChannelSettle, ContractSendChannelUpdateTransfer, ContractSendChannelWithdraw, EventTransferReceivedInvalidDirectTransfer, EventTransferReceivedSuccess, EventTransferSentFailed, SendDirectTransfer\n'), ((12883, 12907), 'binascii.hexlify', 'hexlify', (['unlock.hashlock'], {}), '(unlock.hashlock)\n', (12890, 12907), False, 'from binascii import hexlify\n'), ((5755, 5781), 'binascii.hexlify', 'hexlify', (['current_locksroot'], {}), '(current_locksroot)\n', (5762, 5781), False, 'from binascii import hexlify\n'), ((5804, 5845), 'binascii.hexlify', 'hexlify', (['received_balance_proof.locksroot'], {}), '(received_balance_proof.locksroot)\n', (5811, 5845), False, 'from binascii import hexlify\n'), ((9667, 9695), 'binascii.hexlify', 'hexlify', (['locksroot_with_lock'], {}), '(locksroot_with_lock)\n', (9674, 9695), False, 'from binascii import hexlify\n'), ((9722, 9763), 'binascii.hexlify', 'hexlify', (['received_balance_proof.locksroot'], {}), '(received_balance_proof.locksroot)\n', (9729, 9763), False, 'from binascii import hexlify\n'), ((14029, 14060), 'binascii.hexlify', 'hexlify', (['locksroot_without_lock'], {}), '(locksroot_without_lock)\n', (14036, 14060), False, 'from binascii import hexlify\n'), ((14083, 14124), 'binascii.hexlify', 'hexlify', (['received_balance_proof.locksroot'], {}), '(received_balance_proof.locksroot)\n', (14090, 14124), False, 'from binascii import hexlify\n'), ((6851, 6884), 'binascii.hexlify', 'hexlify', (['channel_state.identifier'], {}), '(channel_state.identifier)\n', (6858, 6884), False, 'from binascii import hexlify\n'), ((6907, 6954), 'binascii.hexlify', 'hexlify', (['received_balance_proof.channel_address'], {}), '(received_balance_proof.channel_address)\n', (6914, 6954), False, 'from binascii import hexlify\n'), ((10775, 10808), 'binascii.hexlify', 'hexlify', (['channel_state.identifier'], {}), '(channel_state.identifier)\n', (10782, 10808), False, 'from binascii import hexlify\n'), ((10835, 10882), 'binascii.hexlify', 'hexlify', (['received_balance_proof.channel_address'], {}), '(received_balance_proof.channel_address)\n', (10842, 10882), False, 'from binascii import hexlify\n'), ((15164, 15211), 'binascii.hexlify', 'hexlify', (['received_balance_proof.channel_address'], {}), '(received_balance_proof.channel_address)\n', (15171, 15211), False, 'from binascii import hexlify\n')] |
from pathlib import Path
from .build import DocBuilder
def finalize_builddir(repo_name):
'Bookkeeping on the docs build directory'
root = Path('_build') / repo_name
with open(root / '.nojekyll', 'w') as fh:
fh.write('')
def build_root(repo_name):
'''Build the top-level documentation.
See :py:mod:`.build` on building sub-projects.
'''
with DocBuilder(repo_name, '.') as builder:
builder.build()
| [
"pathlib.Path"
] | [((148, 162), 'pathlib.Path', 'Path', (['"""_build"""'], {}), "('_build')\n", (152, 162), False, 'from pathlib import Path\n')] |
from django.contrib import admin
from blog.models import *
# Register your models here.
admin.site.register(Blog)
class TagAdmin(admin.ModelAdmin):
readonly = ['slug']
admin.site.register(Tag, TagAdmin)
| [
"django.contrib.admin.site.register"
] | [((90, 115), 'django.contrib.admin.site.register', 'admin.site.register', (['Blog'], {}), '(Blog)\n', (109, 115), False, 'from django.contrib import admin\n'), ((175, 209), 'django.contrib.admin.site.register', 'admin.site.register', (['Tag', 'TagAdmin'], {}), '(Tag, TagAdmin)\n', (194, 209), False, 'from django.contrib import admin\n')] |
import random
def attack(attacker_i, defender_i, entities):
attacker = entities[attacker_i]
defender = entities[defender_i]
# Calculate values
hit_chance = attacker.accuracy * (1 - defender.evasion)
bypass_armor_chance = 1 - defender.armor
damage_normal = attacker.damage
damage_armor = attacker.damage * (1 - defender.resistance)
# Calculate result from values
kill = False
damage = None
hit = True if hit_chance > random.random() else False
if hit:
hit_armor = False if bypass_armor_chance > random.random() else True
if hit_armor:
damage = damage_armor
else:
damage = damage_normal
entities[defender_i].take_damage(damage)
if defender.hp < 1:
kill = True
message = construct_message(defender.name, hit, damage, kill)
if kill:
entities.pop(defender_i)
return message
def construct_message(defender_name, hit, damage, kill):
if hit:
message = "You hit the {} for {} damage.".format(defender_name, damage)
else:
message = "You missed the {}.".format(defender_name)
if kill:
message += " The {} died.".format(defender_name)
return message
# Should we have a class for combat results?
| [
"random.random"
] | [((465, 480), 'random.random', 'random.random', ([], {}), '()\n', (478, 480), False, 'import random\n'), ((555, 570), 'random.random', 'random.random', ([], {}), '()\n', (568, 570), False, 'import random\n')] |
# name,level,published,created_on,review_count,avg_rating
# Product 1,1,True,2019-07-10,10,4.3
from dbexport.config import Session
from dbexport.models import Review, Product
from sqlalchemy.sql import func
import json
session = Session()
review_statement = (
session.query(
Review.product_id,
func.count("*").label("review_count"),
func.avg(Review.rating).label("avg_rating"),
)
.group_by(Review.product_id)
.subquery()
)
products = []
for product, review_count, avg_rating in session.query(
Product, review_statement.c.review_count, review_statement.c.avg_rating
).outerjoin(review_statement, Product.id == review_statement.c.product_id):
products.append(
{
"name": product.name,
"level": product.level,
"published": product.published,
"created_on": str(product.created_on.date()),
"review_count": review_count or 0,
"avg_rating": round(float(avg_rating), 4) if avg_rating else 0,
}
)
with open("product_ratings.json", "w") as f:
json.dump(products, f)
| [
"sqlalchemy.sql.func.avg",
"sqlalchemy.sql.func.count",
"json.dump",
"dbexport.config.Session"
] | [((233, 242), 'dbexport.config.Session', 'Session', ([], {}), '()\n', (240, 242), False, 'from dbexport.config import Session\n'), ((1084, 1106), 'json.dump', 'json.dump', (['products', 'f'], {}), '(products, f)\n', (1093, 1106), False, 'import json\n'), ((319, 334), 'sqlalchemy.sql.func.count', 'func.count', (['"""*"""'], {}), "('*')\n", (329, 334), False, 'from sqlalchemy.sql import func\n'), ((366, 389), 'sqlalchemy.sql.func.avg', 'func.avg', (['Review.rating'], {}), '(Review.rating)\n', (374, 389), False, 'from sqlalchemy.sql import func\n')] |
from OpenDutchWordnet import Wn_grid_parser
from tqdm import tqdm
from typing import NamedTuple
class Lex(NamedTuple):
id: str
lemma: str
pos: str
sense_id: str
sense_nr: int
synset_id: str
Binary = tuple[str, str]
hypers: set[Binary] = set()
antos: set[Binary] = set()
synos: set[Binary] = set()
xsynos: set[Binary] = set()
lexes: set[Lex] = set()
_hyper, _anto, _syno, _xsyno = 'has_hyperonym', 'near_antonym', 'near_synonym', 'xpos_near_synonym'
parser = Wn_grid_parser(Wn_grid_parser.odwn)
parser.clean_remove_synsets_without_relations(parser.synsets_get_generator())
lex_gen = parser.les_get_generator()
for lex in tqdm(lex_gen):
lex_id = lex.get_id()
lemma = lex.get_lemma()
pos = lex.get_pos()[0]
sense_id = lex.get_sense_id()
sense_nr = lex.get_sense_number()
synset_id = lex.get_synset_id()
if synset_id is None:
continue
lexes.add(Lex(lex_id, lemma, pos, sense_id, int(sense_nr), synset_id))
used_synsets = set(map(lambda lex: lex.synset_id, lexes))
synset_gen = parser.synsets_get_generator()
for synset in tqdm(synset_gen):
source = synset.get_id()
if source not in used_synsets:
continue
for target in map(lambda hyper: hyper.get_target(), synset.get_relations(_hyper)):
if target not in used_synsets:
continue
hypers.add((source, target))
for target in map(lambda anto: anto.get_target(), synset.get_relations(_anto)):
if target not in used_synsets:
continue
antos.add((source, target))
antos.add((target, source))
for target in map(lambda syno: syno.get_target(), synset.get_relations(_syno)):
if target not in used_synsets:
continue
synos.add((source, target))
synos.add((target, source))
for target in (map(lambda xsyn: xsyn.get_target(), synset.get_relations(_xsyno))):
if target not in used_synsets:
continue
xsynos.add((source, target))
xsynos.add((target, source))
def escape(x: str) -> str:
return x.replace("'", "\'")
def print_lexes(path: str = './wn_s.pl'):
with open(path, 'a') as f:
for lex in lexes:
f.write(f's("{lex.synset_id}", _, "{lex.lemma}", "{lex.pos}", {lex.sense_nr}, _).\n')
def print_hypers(path: str = './wn_hyp.pl'):
with open(path, 'a') as f:
for hyp in hypers:
f.write(f'hyp("{hyp[0]}", "{hyp[1]}").\n')
def print_antos(path: str = './wn_ant.pl'):
with open(path, 'a') as f:
for anto in antos:
f.write(f'ant("{anto[0]}", _, "{anto[1]}", _).\n')
def print_synos(path: str = './wn_sim.pl'):
with open(path, 'a') as f:
for syno in synos:
f.write(f'sim("{syno[0]}", "{syno[1]}").\n')
def print_xsynos(path: str = './wn_der.pl'):
with open(path, 'a') as f:
for syno in synos:
f.write(f"sim('{escape(syno[0])}', _, '{escape(syno[1])}', _).\n") | [
"OpenDutchWordnet.Wn_grid_parser",
"tqdm.tqdm"
] | [((519, 554), 'OpenDutchWordnet.Wn_grid_parser', 'Wn_grid_parser', (['Wn_grid_parser.odwn'], {}), '(Wn_grid_parser.odwn)\n', (533, 554), False, 'from OpenDutchWordnet import Wn_grid_parser\n'), ((683, 696), 'tqdm.tqdm', 'tqdm', (['lex_gen'], {}), '(lex_gen)\n', (687, 696), False, 'from tqdm import tqdm\n'), ((1123, 1139), 'tqdm.tqdm', 'tqdm', (['synset_gen'], {}), '(synset_gen)\n', (1127, 1139), False, 'from tqdm import tqdm\n')] |
'''
CVPR 2020 submission, Paper ID 6791
Source code for 'Learning to Cartoonize Using White-Box Cartoon Representations'
'''
import tensorflow as tf
import numpy as np
import tensorflow.contrib.slim as slim
def adaptive_instance_norm(content, style, epsilon=1e-5):
c_mean, c_var = tf.nn.moments(content, axes=[1, 2], keep_dims=True)
s_mean, s_var = tf.nn.moments(style, axes=[1, 2], keep_dims=True)
c_std, s_std = tf.sqrt(c_var + epsilon), tf.sqrt(s_var + epsilon)
return s_std * (content - c_mean) / c_std + s_mean
def spectral_norm(w, iteration=1):
w_shape = w.shape.as_list()
w = tf.reshape(w, [-1, w_shape[-1]])
u = tf.get_variable("u", [1, w_shape[-1]],
initializer=tf.random_normal_initializer(), trainable=False)
u_hat = u
v_hat = None
for i in range(iteration):
"""
power iteration
Usually iteration = 1 will be enough
"""
v_ = tf.matmul(u_hat, tf.transpose(w))
v_hat = tf.nn.l2_normalize(v_)
u_ = tf.matmul(v_hat, w)
u_hat = tf.nn.l2_normalize(u_)
u_hat = tf.stop_gradient(u_hat)
v_hat = tf.stop_gradient(v_hat)
sigma = tf.matmul(tf.matmul(v_hat, w), tf.transpose(u_hat))
with tf.control_dependencies([u.assign(u_hat)]):
w_norm = w / sigma
w_norm = tf.reshape(w_norm, w_shape)
return w_norm
def conv_spectral_norm(x, channel, k_size, stride=1, name='conv_snorm'):
with tf.variable_scope(name):
w = tf.get_variable("kernel", shape=[k_size[0], k_size[1], x.get_shape()[-1], channel])
b = tf.get_variable("bias", [channel], initializer=tf.constant_initializer(0.0))
x = tf.nn.conv2d(input=x, filter=spectral_norm(w), strides=[1, stride, stride, 1], padding='SAME') + b
return x
def self_attention(inputs, name='attention', reuse=False):
with tf.variable_scope(name, reuse=reuse):
h, w = tf.shape(inputs)[1], tf.shape(inputs)[2]
bs, _, _, ch = inputs.get_shape().as_list()
f = slim.convolution2d(inputs, ch//8, [1, 1], activation_fn=None)
g = slim.convolution2d(inputs, ch//8, [1, 1], activation_fn=None)
s = slim.convolution2d(inputs, 1, [1, 1], activation_fn=None)
f_flatten = tf.reshape(f, shape=[f.shape[0], -1, f.shape[-1]])
g_flatten = tf.reshape(g, shape=[g.shape[0], -1, g.shape[-1]])
beta = tf.matmul(f_flatten, g_flatten, transpose_b=True)
beta = tf.nn.softmax(beta)
s_flatten = tf.reshape(s, shape=[s.shape[0], -1, s.shape[-1]])
att_map = tf.matmul(beta, s_flatten)
att_map = tf.reshape(att_map, shape=[bs, h, w, 1])
gamma = tf.get_variable("gamma", [1], initializer=tf.constant_initializer(0.0))
output = att_map * gamma + inputs
return att_map, output
if __name__ == '__main__':
pass
| [
"tensorflow.shape",
"tensorflow.variable_scope",
"tensorflow.transpose",
"tensorflow.nn.l2_normalize",
"tensorflow.nn.moments",
"tensorflow.random_normal_initializer",
"tensorflow.stop_gradient",
"tensorflow.sqrt",
"tensorflow.matmul",
"tensorflow.nn.softmax",
"tensorflow.reshape",
"tensorflow... | [((305, 356), 'tensorflow.nn.moments', 'tf.nn.moments', (['content'], {'axes': '[1, 2]', 'keep_dims': '(True)'}), '(content, axes=[1, 2], keep_dims=True)\n', (318, 356), True, 'import tensorflow as tf\n'), ((378, 427), 'tensorflow.nn.moments', 'tf.nn.moments', (['style'], {'axes': '[1, 2]', 'keep_dims': '(True)'}), '(style, axes=[1, 2], keep_dims=True)\n', (391, 427), True, 'import tensorflow as tf\n'), ((641, 673), 'tensorflow.reshape', 'tf.reshape', (['w', '[-1, w_shape[-1]]'], {}), '(w, [-1, w_shape[-1]])\n', (651, 673), True, 'import tensorflow as tf\n'), ((1138, 1161), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['u_hat'], {}), '(u_hat)\n', (1154, 1161), True, 'import tensorflow as tf\n'), ((1175, 1198), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['v_hat'], {}), '(v_hat)\n', (1191, 1198), True, 'import tensorflow as tf\n'), ((448, 472), 'tensorflow.sqrt', 'tf.sqrt', (['(c_var + epsilon)'], {}), '(c_var + epsilon)\n', (455, 472), True, 'import tensorflow as tf\n'), ((474, 498), 'tensorflow.sqrt', 'tf.sqrt', (['(s_var + epsilon)'], {}), '(s_var + epsilon)\n', (481, 498), True, 'import tensorflow as tf\n'), ((1024, 1046), 'tensorflow.nn.l2_normalize', 'tf.nn.l2_normalize', (['v_'], {}), '(v_)\n', (1042, 1046), True, 'import tensorflow as tf\n'), ((1063, 1082), 'tensorflow.matmul', 'tf.matmul', (['v_hat', 'w'], {}), '(v_hat, w)\n', (1072, 1082), True, 'import tensorflow as tf\n'), ((1100, 1122), 'tensorflow.nn.l2_normalize', 'tf.nn.l2_normalize', (['u_'], {}), '(u_)\n', (1118, 1122), True, 'import tensorflow as tf\n'), ((1224, 1243), 'tensorflow.matmul', 'tf.matmul', (['v_hat', 'w'], {}), '(v_hat, w)\n', (1233, 1243), True, 'import tensorflow as tf\n'), ((1245, 1264), 'tensorflow.transpose', 'tf.transpose', (['u_hat'], {}), '(u_hat)\n', (1257, 1264), True, 'import tensorflow as tf\n'), ((1368, 1395), 'tensorflow.reshape', 'tf.reshape', (['w_norm', 'w_shape'], {}), '(w_norm, w_shape)\n', (1378, 1395), True, 'import tensorflow as tf\n'), ((1505, 1528), 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (1522, 1528), True, 'import tensorflow as tf\n'), ((1927, 1963), 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {'reuse': 'reuse'}), '(name, reuse=reuse)\n', (1944, 1963), True, 'import tensorflow as tf\n'), ((2088, 2151), 'tensorflow.contrib.slim.convolution2d', 'slim.convolution2d', (['inputs', '(ch // 8)', '[1, 1]'], {'activation_fn': 'None'}), '(inputs, ch // 8, [1, 1], activation_fn=None)\n', (2106, 2151), True, 'import tensorflow.contrib.slim as slim\n'), ((2163, 2226), 'tensorflow.contrib.slim.convolution2d', 'slim.convolution2d', (['inputs', '(ch // 8)', '[1, 1]'], {'activation_fn': 'None'}), '(inputs, ch // 8, [1, 1], activation_fn=None)\n', (2181, 2226), True, 'import tensorflow.contrib.slim as slim\n'), ((2238, 2295), 'tensorflow.contrib.slim.convolution2d', 'slim.convolution2d', (['inputs', '(1)', '[1, 1]'], {'activation_fn': 'None'}), '(inputs, 1, [1, 1], activation_fn=None)\n', (2256, 2295), True, 'import tensorflow.contrib.slim as slim\n'), ((2317, 2367), 'tensorflow.reshape', 'tf.reshape', (['f'], {'shape': '[f.shape[0], -1, f.shape[-1]]'}), '(f, shape=[f.shape[0], -1, f.shape[-1]])\n', (2327, 2367), True, 'import tensorflow as tf\n'), ((2389, 2439), 'tensorflow.reshape', 'tf.reshape', (['g'], {'shape': '[g.shape[0], -1, g.shape[-1]]'}), '(g, shape=[g.shape[0], -1, g.shape[-1]])\n', (2399, 2439), True, 'import tensorflow as tf\n'), ((2456, 2505), 'tensorflow.matmul', 'tf.matmul', (['f_flatten', 'g_flatten'], {'transpose_b': '(True)'}), '(f_flatten, g_flatten, transpose_b=True)\n', (2465, 2505), True, 'import tensorflow as tf\n'), ((2522, 2541), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['beta'], {}), '(beta)\n', (2535, 2541), True, 'import tensorflow as tf\n'), ((2573, 2623), 'tensorflow.reshape', 'tf.reshape', (['s'], {'shape': '[s.shape[0], -1, s.shape[-1]]'}), '(s, shape=[s.shape[0], -1, s.shape[-1]])\n', (2583, 2623), True, 'import tensorflow as tf\n'), ((2643, 2669), 'tensorflow.matmul', 'tf.matmul', (['beta', 's_flatten'], {}), '(beta, s_flatten)\n', (2652, 2669), True, 'import tensorflow as tf\n'), ((2690, 2730), 'tensorflow.reshape', 'tf.reshape', (['att_map'], {'shape': '[bs, h, w, 1]'}), '(att_map, shape=[bs, h, w, 1])\n', (2700, 2730), True, 'import tensorflow as tf\n'), ((746, 776), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {}), '()\n', (774, 776), True, 'import tensorflow as tf\n'), ((990, 1005), 'tensorflow.transpose', 'tf.transpose', (['w'], {}), '(w)\n', (1002, 1005), True, 'import tensorflow as tf\n'), ((1687, 1715), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (1710, 1715), True, 'import tensorflow as tf\n'), ((1981, 1997), 'tensorflow.shape', 'tf.shape', (['inputs'], {}), '(inputs)\n', (1989, 1997), True, 'import tensorflow as tf\n'), ((2002, 2018), 'tensorflow.shape', 'tf.shape', (['inputs'], {}), '(inputs)\n', (2010, 2018), True, 'import tensorflow as tf\n'), ((2790, 2818), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (2813, 2818), True, 'import tensorflow as tf\n')] |
# coding: utf-8
import logging
from behave import *
import foods.kebab # type: ignore
import foods.pizza # type: ignore
from foods.formula import Formula
logger = logging.getLogger(__name__)
use_step_matcher("parse")
foods_ = []
@given(
"Mister Patate's favorite foods (a {food} is represented by a {sauce} and {price})"
)
def step_impl(context, food, sauce, price):
module = f"foods.{food.lower()}"
food = getattr(eval(module), food.title())(sauce, int(price))
foods_.append(food)
@step("Mister Patate's favorite drink (Coca-Cola)")
def step_impl(context):
context.drink = "Coca-Cola"
@when("Mister Patate add food in his formula")
def step_impl(context):
context.formula = Formula(context.drink, foods_)
@then("he should have a list with all his favorite foods")
def step_impl(context):
assert context.formula.drink == "Coca-Cola"
assert len(context.formula.foods) == len(foods_)
| [
"logging.getLogger",
"foods.formula.Formula"
] | [((168, 195), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (185, 195), False, 'import logging\n'), ((711, 741), 'foods.formula.Formula', 'Formula', (['context.drink', 'foods_'], {}), '(context.drink, foods_)\n', (718, 741), False, 'from foods.formula import Formula\n')] |
from data_warehouse.db import connect
from data_warehouse.sql_queries import copy_table_queries, insert_table_queries
def load_staging_tables(cur, conn):
for query in copy_table_queries:
cur.execute(query)
conn.commit()
def insert_tables(cur, conn):
for query in insert_table_queries:
cur.execute(query)
conn.commit()
def main():
cur, conn = connect()
load_staging_tables(cur, conn)
insert_tables(cur, conn)
conn.close()
if __name__ == "__main__":
main()
| [
"data_warehouse.db.connect"
] | [((392, 401), 'data_warehouse.db.connect', 'connect', ([], {}), '()\n', (399, 401), False, 'from data_warehouse.db import connect\n')] |
# import necessary libraries
from flask import Flask, render_template, request
import pickle
# create instance of Flask app
app = Flask(__name__)
# create route that renders index.html template
@app.route("/")
def home():
return render_template("index.html")
@app.route("/purpose")
def purpose():
return render_template("purpose.html")
@app.route("/quiz")
def quiz():
return render_template("quiz.html")
@app.route("/model" , methods=["POST"])
def model():
scaler = pickle.load(open("ML/scaler.sav", 'rb'))
knn_model = pickle.load(open("ML/knn_model.sav", 'rb'))
country = int(request.form["country"])
employment = int(request.form["employment"])
history = int(request.form["history"])
remote = int(request.form["remote"])
tech = int(request.form["tech"])
age = int(request.form["age"])
gender_input = request.form["gender"]
if gender_input == "male":
male = 1
female = 0
others = 0
elif gender_input == "female":
male = 0
female = 1
others = 0
else:
male = 0
female = 0
others = 1
X = [[age, country, employment, history, remote, tech, female, male, others]]
print(country)
X_scaled = scaler.transform(X)
prediction = knn_model.predict(X_scaled)[0][0]
print(prediction)
if prediction == "Y":
prediction = "Yes"
else:
prediction = "No"
return render_template("quiz.html", prediction=prediction)
if __name__ == "__main__":
app.run(debug=True) | [
"flask.render_template",
"flask.Flask"
] | [((131, 146), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (136, 146), False, 'from flask import Flask, render_template, request\n'), ((236, 265), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (251, 265), False, 'from flask import Flask, render_template, request\n'), ((316, 347), 'flask.render_template', 'render_template', (['"""purpose.html"""'], {}), "('purpose.html')\n", (331, 347), False, 'from flask import Flask, render_template, request\n'), ((396, 424), 'flask.render_template', 'render_template', (['"""quiz.html"""'], {}), "('quiz.html')\n", (411, 424), False, 'from flask import Flask, render_template, request\n'), ((1443, 1494), 'flask.render_template', 'render_template', (['"""quiz.html"""'], {'prediction': 'prediction'}), "('quiz.html', prediction=prediction)\n", (1458, 1494), False, 'from flask import Flask, render_template, request\n')] |
from markov.chain import MarkovChain
def test_chain():
chain = MarkovChain([1, 2, 3, 1, 2])
assert len(chain.states) == 3
assert isinstance(chain.transition, dict)
assert isinstance(chain.transition[1], list)
assert chain.sample_next(1) == 2
assert len(chain.build_sequence(10, 1)) == 10
| [
"markov.chain.MarkovChain"
] | [((69, 97), 'markov.chain.MarkovChain', 'MarkovChain', (['[1, 2, 3, 1, 2]'], {}), '([1, 2, 3, 1, 2])\n', (80, 97), False, 'from markov.chain import MarkovChain\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2018-08-01 03:24
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('mridata', '0012_auto_20180731_1714'),
]
operations = [
migrations.RemoveField(
model_name='tempdata',
name='thumbnail_fftshift_along_z',
),
]
| [
"django.db.migrations.RemoveField"
] | [((292, 377), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""tempdata"""', 'name': '"""thumbnail_fftshift_along_z"""'}), "(model_name='tempdata', name='thumbnail_fftshift_along_z'\n )\n", (314, 377), False, 'from django.db import migrations\n')] |
#!/usr/bin/env python
from readVNADataSKRF import plot
# WARNING:
# Write file names ending in .vna
# Do not include .txt (do not use .vna.txt)
options = [
#['Redo_VNA', 'TP_1p4m_35_ChD0.vna'],
#['Redo_VNA', 'TP_1p4m_35_ChD1_redo_v1.vna'],
#['Redo_VNA', 'TP_1p4m_35_ChCMD_redo_v1.vna'],
#['Redo_VNA', 'TP_35cm_60_ChD0_redo.vna'],
#['Redo_VNA', '082820_TP_60_35cm_ChD1_KA.vna'],
#['Redo_VNA', 'TP_35cm_60_ChCMD_redo.vna'],
# using Cu calibration plate
#['data', '9_feb_2021_test1_1.vna'],
#['data', '9_feb_2021_test1_2.vna'],
#['data', '9_feb_2021_test1_3.vna'],
#['data', '9_feb_2021_test1_4.vna'],
#['data', '9_feb_2021_test1_5.vna'],
# cable 100
#['data', 'TP_35cm_100_test1_1.vna'],
# calibration
#['data', 'calibration_test.vna'],
#['data', 'small_SMA.vna'],
#['data', 'straight_SMA.vna'],
# cable 129
#['data', 'TP_0p35m_129_ChCMD.vna'],
#['data', 'TP_0p35m_129_ChD0.vna'],
#['data', 'TP_0p35m_129_ChD1.vna'],
#['data', 'TP_0p35m_129_ChD2.vna'],
#['data', 'TP_0p35m_129_ChD3.vna'],
# cable 100
# CMD_v1: CMD on 33pin conenctions: VNA 1 to CMD_P (12), VNA 2 to CMD_N (13)
# CMD_v2: CMD on 33pin conenctions: VNA 1 to CMD_D (13), VNA 2 to CMD_P (12)
#['data', 'TP_35cm_100_33pin_CMD_v2.vna'],
#['data', 'TP_35cm_100_33pin_D0.vna'],
#['data', 'TP_35cm_100_33pin_D1.vna'],
#['data', 'TP_35cm_100_33pin_D2.vna'],
#['data', 'TP_35cm_100_33pin_D3.vna'],
# John's cable number 1
# loopback cable, 45 pin
#['data', 'loopback_45pin_36tpi_27tpi.vna'],
#['data', 'loopback_45pin_18tpi_9tpi.vna'],
# John's cable number 2
# 33 pin to 45 pin: 0, 2, 4, 6, 8 twists per inch
#['data', 'TP_trial2_0tpi.vna'],
#['data', 'TP_trial2_2tpi.vna'],
#['data', 'TP_trial2_4tpi.vna'],
#['data', 'TP_trial2_6tpi.vna'],
#['data', 'TP_trial2_8tpi.vna'],
#['data', 'TP_JohnCable2_0tpi_run1.vna'],
#['data', 'TP_JohnCable2_0tpi_run2.vna'],
#['data', 'TP_JohnCable2_0tpi_run3.vna'],
#['data', 'TP_JohnCable2_2tpi_run1.vna'],
#['data', 'TP_JohnCable2_2tpi_run2.vna'],
#['data', 'TP_JohnCable2_2tpi_run3.vna'],
#['data', 'TP_JohnCable2_4tpi_run1.vna'],
#['data', 'TP_JohnCable2_4tpi_run2.vna'],
#['data', 'TP_JohnCable2_4tpi_run3.vna'],
#['data', 'TP_JohnCable2_6tpi_run1.vna'],
#['data', 'TP_JohnCable2_6tpi_run2.vna'],
#['data', 'TP_JohnCable2_6tpi_run3.vna'],
#['data', 'TP_JohnCable2_8tpi_run1.vna'],
#['data', 'TP_JohnCable2_8tpi_run2.vna'],
#['data', 'TP_JohnCable2_8tpi_run3.vna'],
# John's cable number 3
# Length: 2 m
# Channels: 4, 4*, 8, 8*, 16
#['data', 'johncable3_2m_4.vna'],
#['data', 'johncable3_2m_4star.vna'],
#['data', 'johncable3_2m_8.vna'],
#['data', 'johncable3_2m_8star.vna'],
#['data', 'johncable3_2m_16.vna'],
# John's cable number 4
# Length: 1 m
# Channels: 4J, 4B, 8, 16, 24
#['data', 'johncable4_Bill4tpi.vna'],
# Cable 120 (Before Lashing)
['TP_0.8m_120_M1CMD.vna', 'data/Cable_120_beforeLashing', 'plots/Cable_120_beforeLashing'],
['TP_0.8m_120_M1CMD_run2.vna', 'data/Cable_120_beforeLashing', 'plots/Cable_120_beforeLashing'],
['TP_0.8m_120_M1D0.vna', 'data/Cable_120_beforeLashing', 'plots/Cable_120_beforeLashing'],
['TP_0.8m_120_M1D1.vna', 'data/Cable_120_beforeLashing', 'plots/Cable_120_beforeLashing'],
['TP_0.8m_120_M2CMD.vna', 'data/Cable_120_beforeLashing', 'plots/Cable_120_beforeLashing'],
['TP_0.8m_120_M2D0.vna', 'data/Cable_120_beforeLashing', 'plots/Cable_120_beforeLashing'],
['TP_0.8m_120_M2D1.vna', 'data/Cable_120_beforeLashing', 'plots/Cable_120_beforeLashing'],
['TP_0.8m_120_M2D1_run2.vna', 'data/Cable_120_beforeLashing', 'plots/Cable_120_beforeLashing'],
['TP_0.8m_120_M3CMD.vna', 'data/Cable_120_beforeLashing', 'plots/Cable_120_beforeLashing'],
['TP_0.8m_120_M3D0.vna', 'data/Cable_120_beforeLashing', 'plots/Cable_120_beforeLashing'],
['TP_0.8m_120_M3D1.vna', 'data/Cable_120_beforeLashing', 'plots/Cable_120_beforeLashing'],
# Cable 120 (After Lashing)
['TP_0.8m_120_M1CMD.vna', 'data/Cable_120_afterLashing', 'plots/Cable_120_afterLashing'],
['TP_0.8m_120_M1D0.vna', 'data/Cable_120_afterLashing', 'plots/Cable_120_afterLashing'],
['TP_0.8m_120_M1D1.vna', 'data/Cable_120_afterLashing', 'plots/Cable_120_afterLashing'],
['TP_0.8m_120_M2CMD.vna', 'data/Cable_120_afterLashing', 'plots/Cable_120_afterLashing'],
['TP_0.8m_120_M2D0.vna', 'data/Cable_120_afterLashing', 'plots/Cable_120_afterLashing'],
['TP_0.8m_120_M2D1.vna', 'data/Cable_120_afterLashing', 'plots/Cable_120_afterLashing'],
['TP_0.8m_120_M3CMD.vna', 'data/Cable_120_afterLashing', 'plots/Cable_120_afterLashing'],
['TP_0.8m_120_M3CMD_run2.vna', 'data/Cable_120_afterLashing', 'plots/Cable_120_afterLashing'],
['TP_0.8m_120_M3D0.vna', 'data/Cable_120_afterLashing', 'plots/Cable_120_afterLashing'],
['TP_0.8m_120_M3D1.vna', 'data/Cable_120_afterLashing', 'plots/Cable_120_afterLashing'],
# Cable 158 (Before Lashing)
['TP_1p6m_158_CMD.vna', 'data/Cable_158_beforeLashing', 'plots/Cable_158_beforeLashing'],
['TP_1p6m_158_CMD_swap12.vna', 'data/Cable_158_beforeLashing', 'plots/Cable_158_beforeLashing'],
['TP_1p6m_158_CMD_1to2_3to4.vna', 'data/Cable_158_beforeLashing', 'plots/Cable_158_beforeLashing'],
['TP_1p6m_158_D0.vna', 'data/Cable_158_beforeLashing', 'plots/Cable_158_beforeLashing'],
['TP_1p6m_158_D0_swap12.vna', 'data/Cable_158_beforeLashing', 'plots/Cable_158_beforeLashing'],
['TP_1p6m_158_D0_1to2_3to4.vna', 'data/Cable_158_beforeLashing', 'plots/Cable_158_beforeLashing'],
['TP_1p6m_158_D1.vna', 'data/Cable_158_beforeLashing', 'plots/Cable_158_beforeLashing'],
['TP_1p6m_158_D1_run2.vna', 'data/Cable_158_beforeLashing', 'plots/Cable_158_beforeLashing'],
['TP_1p6m_158_D1_swap12.vna', 'data/Cable_158_beforeLashing', 'plots/Cable_158_beforeLashing'],
['TP_1p6m_158_D2.vna', 'data/Cable_158_beforeLashing', 'plots/Cable_158_beforeLashing'],
['TP_1p6m_158_D2_run2.vna', 'data/Cable_158_beforeLashing', 'plots/Cable_158_beforeLashing'],
['TP_1p6m_158_D2_swap12.vna', 'data/Cable_158_beforeLashing', 'plots/Cable_158_beforeLashing'],
['TP_1p6m_158_D3.vna', 'data/Cable_158_beforeLashing', 'plots/Cable_158_beforeLashing'],
['TP_1p6m_158_D3_swap12.vna', 'data/Cable_158_beforeLashing', 'plots/Cable_158_beforeLashing'],
# testing breaks (3, 4 disconnected; 1, 2, 3, 4 disconnected)
#['break_34.vna', 'data', 'plots'],
#['break_1234.vna', 'data', 'plots'],
]
for opt in options:
basename = opt[0]
data_directory = opt[1]
plot_directory = opt[2]
plot(basename, data_directory, plot_directory)
| [
"readVNADataSKRF.plot"
] | [((7989, 8035), 'readVNADataSKRF.plot', 'plot', (['basename', 'data_directory', 'plot_directory'], {}), '(basename, data_directory, plot_directory)\n', (7993, 8035), False, 'from readVNADataSKRF import plot\n')] |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
import commons_pb2 as commons__pb2
class DiagnosisDBStub(object):
"""Missing associated documentation comment in .proto file"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.AddReport = channel.unary_unary(
'/proto.DiagnosisDB/AddReport',
request_serializer=commons__pb2.Report.SerializeToString,
response_deserializer=commons__pb2.AddReportResponse.FromString,
)
self.GetDiagnosisKeys = channel.unary_stream(
'/proto.DiagnosisDB/GetDiagnosisKeys',
request_serializer=commons__pb2.GetKeyRequest.SerializeToString,
response_deserializer=commons__pb2.GetDiagnosisKeyResponse.FromString,
)
self.GetAuthorizationToken = channel.unary_unary(
'/proto.DiagnosisDB/GetAuthorizationToken',
request_serializer=commons__pb2.TokenRequest.SerializeToString,
response_deserializer=commons__pb2.TokenResponse.FromString,
)
class DiagnosisDBServicer(object):
"""Missing associated documentation comment in .proto file"""
def AddReport(self, request, context):
"""add an authorized report to the database
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetDiagnosisKeys(self, request, context):
"""query for all TEK+ENIN pairs matching the given filter. Predicates include:
- for a health authority
- between two timestamps
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetAuthorizationToken(self, request, context):
"""allows authorized healthcare professional to obtain a unique authorization
key to give to a patient
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_DiagnosisDBServicer_to_server(servicer, server):
rpc_method_handlers = {
'AddReport': grpc.unary_unary_rpc_method_handler(
servicer.AddReport,
request_deserializer=commons__pb2.Report.FromString,
response_serializer=commons__pb2.AddReportResponse.SerializeToString,
),
'GetDiagnosisKeys': grpc.unary_stream_rpc_method_handler(
servicer.GetDiagnosisKeys,
request_deserializer=commons__pb2.GetKeyRequest.FromString,
response_serializer=commons__pb2.GetDiagnosisKeyResponse.SerializeToString,
),
'GetAuthorizationToken': grpc.unary_unary_rpc_method_handler(
servicer.GetAuthorizationToken,
request_deserializer=commons__pb2.TokenRequest.FromString,
response_serializer=commons__pb2.TokenResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'proto.DiagnosisDB', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class DiagnosisDB(object):
"""Missing associated documentation comment in .proto file"""
@staticmethod
def AddReport(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/proto.DiagnosisDB/AddReport',
commons__pb2.Report.SerializeToString,
commons__pb2.AddReportResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetDiagnosisKeys(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_stream(request, target, '/proto.DiagnosisDB/GetDiagnosisKeys',
commons__pb2.GetKeyRequest.SerializeToString,
commons__pb2.GetDiagnosisKeyResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetAuthorizationToken(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/proto.DiagnosisDB/GetAuthorizationToken',
commons__pb2.TokenRequest.SerializeToString,
commons__pb2.TokenResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
| [
"grpc.unary_stream_rpc_method_handler",
"grpc.method_handlers_generic_handler",
"grpc.unary_unary_rpc_method_handler",
"grpc.experimental.unary_stream",
"grpc.experimental.unary_unary"
] | [((3333, 3411), 'grpc.method_handlers_generic_handler', 'grpc.method_handlers_generic_handler', (['"""proto.DiagnosisDB"""', 'rpc_method_handlers'], {}), "('proto.DiagnosisDB', rpc_method_handlers)\n", (3369, 3411), False, 'import grpc\n'), ((2436, 2622), 'grpc.unary_unary_rpc_method_handler', 'grpc.unary_unary_rpc_method_handler', (['servicer.AddReport'], {'request_deserializer': 'commons__pb2.Report.FromString', 'response_serializer': 'commons__pb2.AddReportResponse.SerializeToString'}), '(servicer.AddReport,\n request_deserializer=commons__pb2.Report.FromString,\n response_serializer=commons__pb2.AddReportResponse.SerializeToString)\n', (2471, 2622), False, 'import grpc\n'), ((2723, 2930), 'grpc.unary_stream_rpc_method_handler', 'grpc.unary_stream_rpc_method_handler', (['servicer.GetDiagnosisKeys'], {'request_deserializer': 'commons__pb2.GetKeyRequest.FromString', 'response_serializer': 'commons__pb2.GetDiagnosisKeyResponse.SerializeToString'}), '(servicer.GetDiagnosisKeys,\n request_deserializer=commons__pb2.GetKeyRequest.FromString,\n response_serializer=commons__pb2.GetDiagnosisKeyResponse.SerializeToString)\n', (2759, 2930), False, 'import grpc\n'), ((3036, 3236), 'grpc.unary_unary_rpc_method_handler', 'grpc.unary_unary_rpc_method_handler', (['servicer.GetAuthorizationToken'], {'request_deserializer': 'commons__pb2.TokenRequest.FromString', 'response_serializer': 'commons__pb2.TokenResponse.SerializeToString'}), '(servicer.GetAuthorizationToken,\n request_deserializer=commons__pb2.TokenRequest.FromString,\n response_serializer=commons__pb2.TokenResponse.SerializeToString)\n', (3071, 3236), False, 'import grpc\n'), ((3917, 4185), 'grpc.experimental.unary_unary', 'grpc.experimental.unary_unary', (['request', 'target', '"""/proto.DiagnosisDB/AddReport"""', 'commons__pb2.Report.SerializeToString', 'commons__pb2.AddReportResponse.FromString', 'options', 'channel_credentials', 'call_credentials', 'compression', 'wait_for_ready', 'timeout', 'metadata'], {}), "(request, target,\n '/proto.DiagnosisDB/AddReport', commons__pb2.Report.SerializeToString,\n commons__pb2.AddReportResponse.FromString, options, channel_credentials,\n call_credentials, compression, wait_for_ready, timeout, metadata)\n", (3946, 4185), False, 'import grpc\n'), ((4524, 4818), 'grpc.experimental.unary_stream', 'grpc.experimental.unary_stream', (['request', 'target', '"""/proto.DiagnosisDB/GetDiagnosisKeys"""', 'commons__pb2.GetKeyRequest.SerializeToString', 'commons__pb2.GetDiagnosisKeyResponse.FromString', 'options', 'channel_credentials', 'call_credentials', 'compression', 'wait_for_ready', 'timeout', 'metadata'], {}), "(request, target,\n '/proto.DiagnosisDB/GetDiagnosisKeys', commons__pb2.GetKeyRequest.\n SerializeToString, commons__pb2.GetDiagnosisKeyResponse.FromString,\n options, channel_credentials, call_credentials, compression,\n wait_for_ready, timeout, metadata)\n", (4554, 4818), False, 'import grpc\n'), ((5157, 5444), 'grpc.experimental.unary_unary', 'grpc.experimental.unary_unary', (['request', 'target', '"""/proto.DiagnosisDB/GetAuthorizationToken"""', 'commons__pb2.TokenRequest.SerializeToString', 'commons__pb2.TokenResponse.FromString', 'options', 'channel_credentials', 'call_credentials', 'compression', 'wait_for_ready', 'timeout', 'metadata'], {}), "(request, target,\n '/proto.DiagnosisDB/GetAuthorizationToken', commons__pb2.TokenRequest.\n SerializeToString, commons__pb2.TokenResponse.FromString, options,\n channel_credentials, call_credentials, compression, wait_for_ready,\n timeout, metadata)\n", (5186, 5444), False, 'import grpc\n')] |
# coding=utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
from contextlib import contextmanager
from flask_sqlalchemy import SQLAlchemy
from webtest import TestApp
from scout_apm.compat import kwargs_only
from scout_apm.flask.sqlalchemy import instrument_sqlalchemy
from tests.integration.test_flask import app_with_scout as flask_app_with_scout
@contextmanager
@kwargs_only
def app_with_scout():
with flask_app_with_scout() as app:
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///:memory:"
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
db = SQLAlchemy(app)
# Setup according to https://docs.scoutapm.com/#flask-sqlalchemy
instrument_sqlalchemy(db)
conn = db.engine.connect()
@app.route("/sqlalchemy/")
def sqlalchemy():
result = conn.execute("SELECT 'Hello from the DB!'")
return list(result)[0][0]
try:
yield app
finally:
conn.close()
def test_sqlalchemy(tracked_requests):
with app_with_scout() as app:
response = TestApp(app).get("/sqlalchemy/")
assert response.status_int == 200
assert response.text == "Hello from the DB!"
assert len(tracked_requests) == 1
tracked_request = tracked_requests[0]
spans = tracked_request.complete_spans
assert len(spans) == 2
assert [s.operation for s in spans] == [
"SQL/Query",
"Controller/tests.integration.test_flask_sqlalchemy.sqlalchemy",
]
assert spans[0].tags["db.statement"] == "SELECT 'Hello from the DB!'"
| [
"tests.integration.test_flask.app_with_scout",
"flask_sqlalchemy.SQLAlchemy",
"webtest.TestApp",
"scout_apm.flask.sqlalchemy.instrument_sqlalchemy"
] | [((451, 473), 'tests.integration.test_flask.app_with_scout', 'flask_app_with_scout', ([], {}), '()\n', (471, 473), True, 'from tests.integration.test_flask import app_with_scout as flask_app_with_scout\n'), ((625, 640), 'flask_sqlalchemy.SQLAlchemy', 'SQLAlchemy', (['app'], {}), '(app)\n', (635, 640), False, 'from flask_sqlalchemy import SQLAlchemy\n'), ((722, 747), 'scout_apm.flask.sqlalchemy.instrument_sqlalchemy', 'instrument_sqlalchemy', (['db'], {}), '(db)\n', (743, 747), False, 'from scout_apm.flask.sqlalchemy import instrument_sqlalchemy\n'), ((1120, 1132), 'webtest.TestApp', 'TestApp', (['app'], {}), '(app)\n', (1127, 1132), False, 'from webtest import TestApp\n')] |
# This file is mainly derived from https://github.com/openai/baselines.
from collections import deque
import os.path as osp
import time
import csv
import json
from gym.core import Wrapper
import numpy as np
from .vec_env import VecEnvWrapper
class Monitor(Wrapper):
EXT = "monitor.csv"
f = None
def __init__(self, env, filename, allow_early_resets=False,
reset_keywords=(), info_keywords=()):
Wrapper.__init__(self, env=env)
self.tstart = time.time()
if filename:
self.results_writer = ResultsWriter(
filename,
header={
't_start': time.time(),
'env_id': env.spec and env.spec.id},
extra_keys=reset_keywords + info_keywords)
else:
self.results_writer = None
self.reset_keywords = reset_keywords
self.info_keywords = info_keywords
self.allow_early_resets = allow_early_resets
self.rewards = None
self.needs_reset = True
self.episode_rewards = []
self.episode_lengths = []
self.episode_times = []
self.total_steps = 0
self.current_reset_info = {}
def reset(self, **kwargs):
self.reset_state()
for k in self.reset_keywords:
v = kwargs.get(k)
if v is None:
raise ValueError(f'Expected you to pass kwarg {k} into reset')
self.current_reset_info[k] = v
return self.env.reset(**kwargs)
def reset_state(self):
if not self.allow_early_resets and not self.needs_reset:
raise RuntimeError(
"Tried to reset an environment before done. "
"If you want to allow early resets, wrap your env with "
"Monitor(env, path, allow_early_resets=True)")
self.rewards = []
self.needs_reset = False
def step(self, action):
if self.needs_reset:
raise RuntimeError("Tried to step environment that needs reset")
ob, rew, done, info = self.env.step(action)
self.update(ob, rew, done, info)
return (ob, rew, done, info)
def update(self, ob, rew, done, info):
self.rewards.append(rew)
if done:
self.needs_reset = True
eprew = sum(self.rewards)
eplen = len(self.rewards)
epinfo = {
"r": round(eprew, 6),
"l": eplen,
"t": round(time.time() - self.tstart, 6)}
for k in self.info_keywords:
epinfo[k] = info[k]
self.episode_rewards.append(eprew)
self.episode_lengths.append(eplen)
self.episode_times.append(time.time() - self.tstart)
epinfo.update(self.current_reset_info)
if self.results_writer:
self.results_writer.write_row(epinfo)
assert isinstance(info, dict)
if isinstance(info, dict):
info['episode'] = epinfo
self.total_steps += 1
def close(self):
super(Monitor, self).close()
if self.f is not None:
self.f.close()
def get_total_steps(self):
return self.total_steps
def get_episode_rewards(self):
return self.episode_rewards
def get_episode_lengths(self):
return self.episode_lengths
def get_episode_times(self):
return self.episode_times
class ResultsWriter(object):
def __init__(self, filename, header='', extra_keys=()):
self.extra_keys = extra_keys
assert filename is not None
if not filename.endswith(Monitor.EXT):
if osp.isdir(filename):
filename = osp.join(filename, Monitor.EXT)
else:
filename = filename + "." + Monitor.EXT
self.f = open(filename, "wt")
if isinstance(header, dict):
header = '# {} \n'.format(json.dumps(header))
self.f.write(header)
self.logger = csv.DictWriter(
self.f, fieldnames=('r', 'l', 't') + tuple(extra_keys))
self.logger.writeheader()
self.f.flush()
def write_row(self, epinfo):
if self.logger:
self.logger.writerow(epinfo)
self.f.flush()
class VecMonitor(VecEnvWrapper):
def __init__(self, venv, filename=None, keep_buf=0, info_keywords=()):
VecEnvWrapper.__init__(self, venv)
self.eprets = None
self.eplens = None
self.epcount = 0
self.tstart = time.time()
if filename:
self.results_writer = ResultsWriter(
filename, header={'t_start': self.tstart},
extra_keys=info_keywords)
else:
self.results_writer = None
self.info_keywords = info_keywords
self.keep_buf = keep_buf
if self.keep_buf:
self.epret_buf = deque([], maxlen=keep_buf)
self.eplen_buf = deque([], maxlen=keep_buf)
def reset(self):
obs = self.venv.reset()
self.eprets = np.zeros(self.num_envs, 'f')
self.eplens = np.zeros(self.num_envs, 'i')
return obs
def step_wait(self):
obs, rews, dones, infos = self.venv.step_wait()
self.eprets += rews
self.eplens += 1
newinfos = list(infos[:])
for i in range(len(dones)):
if dones[i]:
info = infos[i].copy()
ret = self.eprets[i]
eplen = self.eplens[i]
epinfo = {
'r': ret,
'l': eplen,
't': round(time.time() - self.tstart, 6)}
for k in self.info_keywords:
epinfo[k] = info[k]
info['episode'] = epinfo
if self.keep_buf:
self.epret_buf.append(ret)
self.eplen_buf.append(eplen)
self.epcount += 1
self.eprets[i] = 0
self.eplens[i] = 0
if self.results_writer:
self.results_writer.write_row(epinfo)
newinfos[i] = info
return obs, rews, dones, newinfos
| [
"collections.deque",
"json.dumps",
"os.path.join",
"gym.core.Wrapper.__init__",
"numpy.zeros",
"os.path.isdir",
"time.time"
] | [((435, 466), 'gym.core.Wrapper.__init__', 'Wrapper.__init__', (['self'], {'env': 'env'}), '(self, env=env)\n', (451, 466), False, 'from gym.core import Wrapper\n'), ((490, 501), 'time.time', 'time.time', ([], {}), '()\n', (499, 501), False, 'import time\n'), ((4528, 4539), 'time.time', 'time.time', ([], {}), '()\n', (4537, 4539), False, 'import time\n'), ((5054, 5082), 'numpy.zeros', 'np.zeros', (['self.num_envs', '"""f"""'], {}), "(self.num_envs, 'f')\n", (5062, 5082), True, 'import numpy as np\n'), ((5105, 5133), 'numpy.zeros', 'np.zeros', (['self.num_envs', '"""i"""'], {}), "(self.num_envs, 'i')\n", (5113, 5133), True, 'import numpy as np\n'), ((3669, 3688), 'os.path.isdir', 'osp.isdir', (['filename'], {}), '(filename)\n', (3678, 3688), True, 'import os.path as osp\n'), ((4895, 4921), 'collections.deque', 'deque', (['[]'], {'maxlen': 'keep_buf'}), '([], maxlen=keep_buf)\n', (4900, 4921), False, 'from collections import deque\n'), ((4951, 4977), 'collections.deque', 'deque', (['[]'], {'maxlen': 'keep_buf'}), '([], maxlen=keep_buf)\n', (4956, 4977), False, 'from collections import deque\n'), ((3717, 3748), 'os.path.join', 'osp.join', (['filename', 'Monitor.EXT'], {}), '(filename, Monitor.EXT)\n', (3725, 3748), True, 'import os.path as osp\n'), ((3936, 3954), 'json.dumps', 'json.dumps', (['header'], {}), '(header)\n', (3946, 3954), False, 'import json\n'), ((2729, 2740), 'time.time', 'time.time', ([], {}), '()\n', (2738, 2740), False, 'import time\n'), ((654, 665), 'time.time', 'time.time', ([], {}), '()\n', (663, 665), False, 'import time\n'), ((2489, 2500), 'time.time', 'time.time', ([], {}), '()\n', (2498, 2500), False, 'import time\n'), ((5619, 5630), 'time.time', 'time.time', ([], {}), '()\n', (5628, 5630), False, 'import time\n')] |
# Generated by Django 3.0.8 on 2021-03-31 00:14
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('modulector', '0031_auto_20210330_0044'),
]
operations = [
migrations.AlterField(
model_name='subscriptionitem',
name='gene',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='subscriptionitem',
name='record_date',
field=models.DateTimeField(default=datetime.datetime(2021, 3, 31, 0, 14, 25, 603476, tzinfo=utc)),
),
]
| [
"datetime.datetime",
"django.db.models.CharField"
] | [((401, 444), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'null': '(True)'}), '(max_length=100, null=True)\n', (417, 444), False, 'from django.db import migrations, models\n'), ((610, 671), 'datetime.datetime', 'datetime.datetime', (['(2021)', '(3)', '(31)', '(0)', '(14)', '(25)', '(603476)'], {'tzinfo': 'utc'}), '(2021, 3, 31, 0, 14, 25, 603476, tzinfo=utc)\n', (627, 671), False, 'import datetime\n')] |
import datetime
from dateutil.relativedelta import relativedelta
from parlai.agents.programr.parser.template.nodes.base import TemplateNode
# from parlai.agents.programr.utils.logging.ylogger import YLogger
import parlai.utils.logging as logging
from parlai.agents.programr.utils.text.text import TextUtils
class TemplateIntervalNode(TemplateNode):
def __init__(self):
TemplateNode.__init__(self)
self._interval_format = None
self._style = None
self._interval_from = None
self._interval_to = None
@property
def interval_format(self):
return self._interval_format
@interval_format.setter
def interval_format(self, interval_format):
self._interval_format = interval_format
@property
def interval_from(self):
return self._interval_from
@interval_from.setter
def interval_from(self, interval_from):
self._interval_from = interval_from
@property
def interval_to(self):
return self._interval_to
@interval_to.setter
def interval_to(self, interval_to):
self._interval_to = interval_to
@property
def style(self):
return self._style
@style.setter
def style(self, style):
self._style = style
def resolve_to_string(self, brain):
format_str = self._interval_format.resolve(brain)
from_str = self.interval_from.resolve(brain)
from_time = datetime.datetime.strptime(from_str, format_str)
to_str = self.interval_to.resolve(brain)
to_time = datetime.datetime.strptime(to_str, format_str)
style = self._style.resolve(brain)
diff = to_time - from_time
difference = relativedelta(to_time, from_time)
if style == "years":
resolved = str(difference.years)
elif style == "months":
resolved = str(difference.months)
elif style == "weeks":
resolved = str(difference.weeks)
elif style == "days":
resolved = str(diff.days)
elif style == "hours":
resolved = str(difference.hours)
elif style == "minutes":
resolved = str(difference.minutes)
elif style == "seconds":
resolved = str(difference.seconds)
elif style == "microseconds":
resolved = str(difference.microseconds)
elif style == "ymd":
resolved = "%d years, %d months, %d days" % \
(difference.years, difference.months, difference.days)
elif style == "hms":
resolved = "%d hours, %d minutes, %d seconds" % \
(difference.hours, difference.minutes, difference.seconds)
elif style == "ymdhms":
resolved = "%d years, %d months, %d days, %d hours, %d minutes, %d seconds" % \
(difference.years, difference.months, difference.days,
difference.hours, difference.minutes, difference.seconds)
else:
# YLogger.error(brain, "Unknown interval style [%s]", style)
logging.error(f"Unknown interval style {style}")
resolved = ""
# YLogger.debug(brain, "[INTERVAL] resolved to [%s]", resolved)
logging.debug(f"[INTERVAL] resolved to [{resolved}]")
return resolved
def resolve(self, brain):
try:
return self.resolve_to_string(brain)
except Exception as excep:
# YLogger.exception(brain, "Failed to resolve", excep)
logging.error(f"Failed to resolve {excep}")
return ""
def to_string(self):
return "[INTERVAL]"
def to_xml(self, brain):
xml = '<interval'
xml += ' format="%s"' % self._interval_format.to_xml(brain)
xml += ' style="%s"' % self._style.to_xml(brain)
xml += '>'
xml += '<from>'
xml += self._interval_from.to_xml(brain)
xml += '</from>'
xml += '<to>'
xml += self._interval_to.to_xml(brain)
xml += '</to>'
xml += '</interval>'
return xml
#######################################################################################################
# INTERVAL_EXPRESSION ::== <interval>
# (DATE_ATTRIBUTE_TAGS)
# <style>(TEMPLATE_EXPRESSION)</style>
# <from>(TEMPLATE_EXPRESSION)</from>
# <to>(TEMPLATE_EXPRESSION)</to>
# </interval>
def parse_expression(self, graph, expression):
if 'format' in expression.attrib:
self.interval_format = graph.get_word_node(expression.attrib['format'])
head_text = self.get_text_from_element(expression)
self.parse_text(graph, head_text)
for child in expression:
tag_name = TextUtils.tag_from_text(child.tag)
if tag_name == 'format':
self.interval_format = graph.get_word_node(self.get_text_from_element(child))
elif tag_name == 'style':
node = graph.get_base_node()
node.parse_text(graph, self.get_text_from_element(child))
for sub_child in child:
graph.parse_tag_expression(sub_child, node)
node.parse_text(graph, self.get_text_from_element(child))
self.style = node
elif tag_name == 'from':
node = graph.get_base_node()
node.parse_text(graph, self.get_text_from_element(child))
for sub_child in child:
graph.parse_tag_expression(sub_child, node)
node.parse_text(graph, self.get_text_from_element(child))
self.interval_from = node
elif tag_name == 'to':
node = graph.get_base_node()
node.parse_text(graph, self.get_text_from_element(child))
for sub_child in child:
graph.parse_tag_expression(sub_child, node)
node.parse_text(graph, self.get_text_from_element(child))
self.interval_to = node
else:
graph.parse_tag_expression(child, self)
tail_text = self.get_tail_from_element(child)
self.parse_text(graph, tail_text)
if self.interval_format is None:
# YLogger.warning(self, "Interval node, format missing, defaulting to 'c%%'!")
logging.warning("Interval node, format missing, defaulting to 'c%%'!")
self.interval_format = "%c"
if self.style is None:
# YLogger.warning(self, "style node, format missing, defaulting to 'days'!")
logging.warning("style node, format missing, defaulting to 'days'!")
self.style = "days"
if self.interval_from is None:
# YLogger.warning(self, "interval_from node, format missing !")
logging.warning("interval_from node, format missing !")
if self.interval_to is None:
# YLogger.warning(self, "interval_to node, format missing !")
logging.warning("interval_to node, format missing !") | [
"dateutil.relativedelta.relativedelta",
"parlai.agents.programr.parser.template.nodes.base.TemplateNode.__init__",
"parlai.utils.logging.error",
"datetime.datetime.strptime",
"parlai.utils.logging.warning",
"parlai.utils.logging.debug",
"parlai.agents.programr.utils.text.text.TextUtils.tag_from_text"
] | [((386, 413), 'parlai.agents.programr.parser.template.nodes.base.TemplateNode.__init__', 'TemplateNode.__init__', (['self'], {}), '(self)\n', (407, 413), False, 'from parlai.agents.programr.parser.template.nodes.base import TemplateNode\n'), ((1439, 1487), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['from_str', 'format_str'], {}), '(from_str, format_str)\n', (1465, 1487), False, 'import datetime\n'), ((1556, 1602), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['to_str', 'format_str'], {}), '(to_str, format_str)\n', (1582, 1602), False, 'import datetime\n'), ((1704, 1737), 'dateutil.relativedelta.relativedelta', 'relativedelta', (['to_time', 'from_time'], {}), '(to_time, from_time)\n', (1717, 1737), False, 'from dateutil.relativedelta import relativedelta\n'), ((3238, 3291), 'parlai.utils.logging.debug', 'logging.debug', (['f"""[INTERVAL] resolved to [{resolved}]"""'], {}), "(f'[INTERVAL] resolved to [{resolved}]')\n", (3251, 3291), True, 'import parlai.utils.logging as logging\n'), ((4771, 4805), 'parlai.agents.programr.utils.text.text.TextUtils.tag_from_text', 'TextUtils.tag_from_text', (['child.tag'], {}), '(child.tag)\n', (4794, 4805), False, 'from parlai.agents.programr.utils.text.text import TextUtils\n'), ((6394, 6464), 'parlai.utils.logging.warning', 'logging.warning', (['"""Interval node, format missing, defaulting to \'c%%\'!"""'], {}), '("Interval node, format missing, defaulting to \'c%%\'!")\n', (6409, 6464), True, 'import parlai.utils.logging as logging\n'), ((6637, 6705), 'parlai.utils.logging.warning', 'logging.warning', (['"""style node, format missing, defaulting to \'days\'!"""'], {}), '("style node, format missing, defaulting to \'days\'!")\n', (6652, 6705), True, 'import parlai.utils.logging as logging\n'), ((6865, 6920), 'parlai.utils.logging.warning', 'logging.warning', (['"""interval_from node, format missing !"""'], {}), "('interval_from node, format missing !')\n", (6880, 6920), True, 'import parlai.utils.logging as logging\n'), ((7044, 7097), 'parlai.utils.logging.warning', 'logging.warning', (['"""interval_to node, format missing !"""'], {}), "('interval_to node, format missing !')\n", (7059, 7097), True, 'import parlai.utils.logging as logging\n'), ((3523, 3566), 'parlai.utils.logging.error', 'logging.error', (['f"""Failed to resolve {excep}"""'], {}), "(f'Failed to resolve {excep}')\n", (3536, 3566), True, 'import parlai.utils.logging as logging\n'), ((3082, 3130), 'parlai.utils.logging.error', 'logging.error', (['f"""Unknown interval style {style}"""'], {}), "(f'Unknown interval style {style}')\n", (3095, 3130), True, 'import parlai.utils.logging as logging\n')] |
import os
from PIL import Image
def resize(file_path="./.tmp/bg.jpg", size=(128, 128)):
try:
file_name = os.path.basename(file_path).split(".")[0]
outfile = "./.tmp/{}_{}x{}.jpg".format(file_name, size[0], size[1])
im = Image.open(file_path)
im.thumbnail(size, Image.ANTIALIAS)
im.save(outfile, "JPEG")
return outfile
except Exception:
return None
| [
"PIL.Image.open",
"os.path.basename"
] | [((250, 271), 'PIL.Image.open', 'Image.open', (['file_path'], {}), '(file_path)\n', (260, 271), False, 'from PIL import Image\n'), ((119, 146), 'os.path.basename', 'os.path.basename', (['file_path'], {}), '(file_path)\n', (135, 146), False, 'import os\n')] |
#!/usr/bin/env python
# coding: utf-8
# # Uttar Pradesh Crime Analysis
# #By- <NAME>
# #Dated: July 12,2021
# In[1]:
from IPython.display import Image
Image(url='https://www.gannett-cdn.com/-mm-/4a94aaab8e826ca8563bd0e434c9fe36f1dc920f/c=0-0-399-300&r=x404&c=534x401/local/-/media/2016/07/20/FortMyers/FortMyers/636046215443097272-CRIME-gen-NP.jpg')
# In[2]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
get_ipython().run_line_magic('matplotlib', 'inline')
sns.set_style('whitegrid')
sns.set()
# In[3]:
df=pd.read_csv(r'/home/aarush100616/Downloads/Projects/Uttar Pradesh Crime Analysis/MR data - Compiled Data Set.csv')
# In[4]:
df
# In[5]:
#droping title and text columns
x_full = df.drop(axis = 1, labels = ['Title','Text'])
# In[6]:
#Renaming big column names(cleaning)
x_full = x_full.rename(columns = {'Murder Happened with reason:\n1:Property/Land Disputes,\n2:Family Dispute\n3:Petty Quarrels,\n4:Money Disputes, \n5:Personal Vendetta, \n6:Love Affairs, \n7:Casteism\n8: Unknown/other':'Murder Reason', 'Crime Against Women ( Combined ):\n1. Murder with Rape \n2. Dowry Deaths(Sec. 3048)\n3. Suicide(sec 305/306)\n4. Kidnapping(All)\n5. Acid Attack(Sec. 326A IPC)\n6. Cruelty by Husband/in-laws((Sec.498 A IPC)\n7. Rape only(Sec. 376 or 511 IPC)\n8. Assault on Women with Intent to Outrage her Modesty (Sec. 354 IPC)\n9. Cyber Crimes against Women\n10. Protection of Children from Sexual Offences Act':'crime against women(Act)'})
x_full.columns
# In[7]:
#drop rows with all null values apart from 'City '( cleaning )
x_full = x_full.dropna(axis = 0, subset= ['City '], how = 'all')
# In[8]:
#checking Null Values
x_full.isnull().sum()
# In[9]:
#replacing NaN values in "number of" type and NaN values in crime agaisnt women column with 0
x_full = x_full.fillna(0)
# In[10]:
#seperating acts
crime_acts = x_full['crime against women(Act)'].astype('string').values
for i in range(1,11):
x_full.insert(8+i,"Act {}".format(i),0)
for j in range(len(crime_acts)):
splitted = crime_acts[j].split(',')
for x in splitted:
if x=='0':
continue
x_full.loc[int(j),'Act {}'.format(x.strip())]=1
x_full
# ## City-wise Crime Report
# In[11]:
sns.countplot(x = 'City ', data = x_full, palette = 'mako').set(title = 'city-wise crime count')
# ## Reasons For Murder
# In[12]:
plt.figure(figsize = (10,10))
reason = x_full['Murder Reason'].value_counts().to_dict()
reason.pop(0)
val = list(reason.values())
label = list(reason.keys())
plt.title('Distribution of Reasons for Murder',fontsize = 20)
plt.pie(x = val,labels = label)
plt.show()
# ## Love Affair victims Gender Distribution
# In[13]:
#love affairs victims
mv = x_full.loc[x_full['Murder Reason'] == 'Love Affairs', "Number of male victims(adult)"].sum()
fv = x_full.loc[x_full['Murder Reason'] == 'Love Affairs', "Number of female victims(adult)"].sum()
plt.figure(figsize = (10,10))
plt.title('Love Affair victims',fontsize = 20)
plt.pie(x = [mv,fv],labels = ['females','males'],colors = ['pink','blue'])
plt.show()
# ## What crimes are causing women's death?
# In[14]:
#what is the biggest cause of women's death
wmr = x_full.loc[x_full['Number of female victims(adult)']>0]
plt.figure(figsize = (15,7))
sns.countplot(x = 'Murder Reason', data = wmr)
# ## Cause of men's death
# In[15]:
wmr = x_full.loc[x_full['Number of male victims(adult)']>0]
plt.figure(figsize = (15,7))
sns.countplot(x = 'Murder Reason', data = wmr)
# ## Distribution of Crime Againt Women on basis of Acts
# In[16]:
s = x_full.iloc[:,9:(9+10)].sum()
plt.figure(figsize = (15,5))
plt.bar(s.index,s.values)
# ## Reasons:
# -Murder with Rape
# -Dowry Deaths(Sec. 3048)
# -Suicide(sec 305/306)
# -Kidnapping(All)
# -Acid Attack(Sec. 326A IPC)\n6. Cruelty by Husband/in-laws((Sec.498 A IPC)
# -Rape only(Sec. 376 or 511 IPC)
# -Assault on Women with Intent to Outrage her Modesty (Sec. 354 IPC)
# -Cyber Crimes against Women
# -Protection of Children from Sexual Offences Act
# ## Property Disputes
# In[17]:
mv = x_full.loc[x_full['Murder Reason'] == 'Property Disputes', "Number of male victims(adult)"].sum()
fv = x_full.loc[x_full['Murder Reason'] == 'Property Disputes', "Number of female victims(adult)"].sum()
plt.figure(figsize = (10,10))
plt.title('Property Disputes victims',fontsize = 20)
plt.pie(x = [mv,fv],labels = ['females','males'],colors = ['pink','blue'])
plt.show()
# ## Female vs Male involvement
# In[18]:
#females vs males crime
sns.set_palette("BrBG",1)
females = x_full['Number of female(adult)'].sum()
males = x_full['Number of male(adult) '].sum()
sns.barplot(x= ['femlaes','males'], y= [females, males]).set(title = 'Involved in Crime')
# ## Children vs Adulit Victims
# In[19]:
#Children vs Adults victims in crime city-wise
sns.set_palette("RdBu",)
females = x_full['Number of female victims(adult)'].sum()
males = x_full['Number of male victims(adult)'].sum()
child = x_full['Number of child victims'].sum()
sns.barplot(x= ['femlaes(adults)','males(adult)','children'], y= [females, males, child]).set(title = 'Victims of UP Crime')
# ## City-wise Child victims
# In[20]:
#child victims city-wise
sns.set_palette("YlOrBr", 1)
gbd = x_full.loc[x_full['City '] == 'Ghaziabad' , 'Number of child victims' ].sum()
lkw = x_full.loc[x_full['City '] == 'Lucknow' , 'Number of child victims' ].sum()
knp = x_full.loc[x_full['City '] == 'Kanpur' , 'Number of child victims' ].sum()
sns.barplot(y = [gbd,lkw,knp], x = ['Ghaziabad','Lucknow', 'Kanpur']).set(title = 'city-wise child victims')
# In[21]:
sns.set_palette("YlOrBr", 1)
gbd = x_full.loc[x_full['City '] == 'Ghaziabad' , 'Kidnnaping:\nNumber of child victims' ].sum()
lkw = x_full.loc[x_full['City '] == 'Lucknow' , 'Kidnnaping:\nNumber of child victims' ].sum()
knp = x_full.loc[x_full['City '] == 'Kanpur' , 'Kidnnaping:\nNumber of child victims' ].sum()
sns.barplot(y = [gbd,lkw,knp], x = ['Ghaziabad','Lucknow', 'Kanpur']).set(title = 'city-wise child kidnappings')
| [
"seaborn.set",
"seaborn.set_palette",
"pandas.read_csv",
"IPython.display.Image",
"matplotlib.pyplot.pie",
"seaborn.set_style",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.bar",
"seaborn.countplot",
"matplotlib.pyplot.title",
"seaborn.barplot",
"matplotlib.pyplot.show"
] | [((155, 363), 'IPython.display.Image', 'Image', ([], {'url': '"""https://www.gannett-cdn.com/-mm-/4a94aaab8e826ca8563bd0e434c9fe36f1dc920f/c=0-0-399-300&r=x404&c=534x401/local/-/media/2016/07/20/FortMyers/FortMyers/636046215443097272-CRIME-gen-NP.jpg"""'}), "(url=\n 'https://www.gannett-cdn.com/-mm-/4a94aaab8e826ca8563bd0e434c9fe36f1dc920f/c=0-0-399-300&r=x404&c=534x401/local/-/media/2016/07/20/FortMyers/FortMyers/636046215443097272-CRIME-gen-NP.jpg'\n )\n", (160, 363), False, 'from IPython.display import Image\n'), ((513, 539), 'seaborn.set_style', 'sns.set_style', (['"""whitegrid"""'], {}), "('whitegrid')\n", (526, 539), True, 'import seaborn as sns\n'), ((540, 549), 'seaborn.set', 'sns.set', ([], {}), '()\n', (547, 549), True, 'import seaborn as sns\n'), ((566, 689), 'pandas.read_csv', 'pd.read_csv', (['"""/home/aarush100616/Downloads/Projects/Uttar Pradesh Crime Analysis/MR data - Compiled Data Set.csv"""'], {}), "(\n '/home/aarush100616/Downloads/Projects/Uttar Pradesh Crime Analysis/MR data - Compiled Data Set.csv'\n )\n", (577, 689), True, 'import pandas as pd\n'), ((2405, 2433), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (2415, 2433), True, 'import matplotlib.pyplot as plt\n'), ((2563, 2623), 'matplotlib.pyplot.title', 'plt.title', (['"""Distribution of Reasons for Murder"""'], {'fontsize': '(20)'}), "('Distribution of Reasons for Murder', fontsize=20)\n", (2572, 2623), True, 'import matplotlib.pyplot as plt\n'), ((2625, 2653), 'matplotlib.pyplot.pie', 'plt.pie', ([], {'x': 'val', 'labels': 'label'}), '(x=val, labels=label)\n', (2632, 2653), True, 'import matplotlib.pyplot as plt\n'), ((2657, 2667), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2665, 2667), True, 'import matplotlib.pyplot as plt\n'), ((2948, 2976), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (2958, 2976), True, 'import matplotlib.pyplot as plt\n'), ((2978, 3023), 'matplotlib.pyplot.title', 'plt.title', (['"""Love Affair victims"""'], {'fontsize': '(20)'}), "('Love Affair victims', fontsize=20)\n", (2987, 3023), True, 'import matplotlib.pyplot as plt\n'), ((3025, 3098), 'matplotlib.pyplot.pie', 'plt.pie', ([], {'x': '[mv, fv]', 'labels': "['females', 'males']", 'colors': "['pink', 'blue']"}), "(x=[mv, fv], labels=['females', 'males'], colors=['pink', 'blue'])\n", (3032, 3098), True, 'import matplotlib.pyplot as plt\n'), ((3100, 3110), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3108, 3110), True, 'import matplotlib.pyplot as plt\n'), ((3276, 3303), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 7)'}), '(figsize=(15, 7))\n', (3286, 3303), True, 'import matplotlib.pyplot as plt\n'), ((3305, 3347), 'seaborn.countplot', 'sns.countplot', ([], {'x': '"""Murder Reason"""', 'data': 'wmr'}), "(x='Murder Reason', data=wmr)\n", (3318, 3347), True, 'import seaborn as sns\n'), ((3453, 3480), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 7)'}), '(figsize=(15, 7))\n', (3463, 3480), True, 'import matplotlib.pyplot as plt\n'), ((3482, 3524), 'seaborn.countplot', 'sns.countplot', ([], {'x': '"""Murder Reason"""', 'data': 'wmr'}), "(x='Murder Reason', data=wmr)\n", (3495, 3524), True, 'import seaborn as sns\n'), ((3635, 3662), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 5)'}), '(figsize=(15, 5))\n', (3645, 3662), True, 'import matplotlib.pyplot as plt\n'), ((3664, 3690), 'matplotlib.pyplot.bar', 'plt.bar', (['s.index', 's.values'], {}), '(s.index, s.values)\n', (3671, 3690), True, 'import matplotlib.pyplot as plt\n'), ((4303, 4331), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (4313, 4331), True, 'import matplotlib.pyplot as plt\n'), ((4333, 4384), 'matplotlib.pyplot.title', 'plt.title', (['"""Property Disputes victims"""'], {'fontsize': '(20)'}), "('Property Disputes victims', fontsize=20)\n", (4342, 4384), True, 'import matplotlib.pyplot as plt\n'), ((4386, 4459), 'matplotlib.pyplot.pie', 'plt.pie', ([], {'x': '[mv, fv]', 'labels': "['females', 'males']", 'colors': "['pink', 'blue']"}), "(x=[mv, fv], labels=['females', 'males'], colors=['pink', 'blue'])\n", (4393, 4459), True, 'import matplotlib.pyplot as plt\n'), ((4461, 4471), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4469, 4471), True, 'import matplotlib.pyplot as plt\n'), ((4543, 4569), 'seaborn.set_palette', 'sns.set_palette', (['"""BrBG"""', '(1)'], {}), "('BrBG', 1)\n", (4558, 4569), True, 'import seaborn as sns\n'), ((4850, 4873), 'seaborn.set_palette', 'sns.set_palette', (['"""RdBu"""'], {}), "('RdBu')\n", (4865, 4873), True, 'import seaborn as sns\n'), ((5229, 5257), 'seaborn.set_palette', 'sns.set_palette', (['"""YlOrBr"""', '(1)'], {}), "('YlOrBr', 1)\n", (5244, 5257), True, 'import seaborn as sns\n'), ((5628, 5656), 'seaborn.set_palette', 'sns.set_palette', (['"""YlOrBr"""', '(1)'], {}), "('YlOrBr', 1)\n", (5643, 5656), True, 'import seaborn as sns\n'), ((2269, 2322), 'seaborn.countplot', 'sns.countplot', ([], {'x': '"""City """', 'data': 'x_full', 'palette': '"""mako"""'}), "(x='City ', data=x_full, palette='mako')\n", (2282, 2322), True, 'import seaborn as sns\n'), ((4666, 4721), 'seaborn.barplot', 'sns.barplot', ([], {'x': "['femlaes', 'males']", 'y': '[females, males]'}), "(x=['femlaes', 'males'], y=[females, males])\n", (4677, 4721), True, 'import seaborn as sns\n'), ((5035, 5128), 'seaborn.barplot', 'sns.barplot', ([], {'x': "['femlaes(adults)', 'males(adult)', 'children']", 'y': '[females, males, child]'}), "(x=['femlaes(adults)', 'males(adult)', 'children'], y=[females,\n males, child])\n", (5046, 5128), True, 'import seaborn as sns\n'), ((5505, 5573), 'seaborn.barplot', 'sns.barplot', ([], {'y': '[gbd, lkw, knp]', 'x': "['Ghaziabad', 'Lucknow', 'Kanpur']"}), "(y=[gbd, lkw, knp], x=['Ghaziabad', 'Lucknow', 'Kanpur'])\n", (5516, 5573), True, 'import seaborn as sns\n'), ((5943, 6011), 'seaborn.barplot', 'sns.barplot', ([], {'y': '[gbd, lkw, knp]', 'x': "['Ghaziabad', 'Lucknow', 'Kanpur']"}), "(y=[gbd, lkw, knp], x=['Ghaziabad', 'Lucknow', 'Kanpur'])\n", (5954, 6011), True, 'import seaborn as sns\n')] |
# coding: utf-8
from zeit.cms.checkout.helper import checked_out
from zeit.cms.repository.unknown import PersistentUnknownResource
from zeit.content.rawxml.rawxml import RawXML
import jinja2
import lxml.etree
import mock
import pkg_resources
import six
import transaction
import zeit.cms.testcontenttype.testcontenttype
import zeit.content.cp.interfaces
import zeit.content.dynamicfolder.testing
class TestContainerMethodsRespectVirtualChildren(
zeit.content.dynamicfolder.testing.FunctionalTestCase):
"""Test folder methods like keys, values etc to return virtual children."""
def setUp(self):
super(TestContainerMethodsRespectVirtualChildren, self).setUp()
self.folder = self.repository['dynamicfolder']
def assert_xanten_has_basic_info_set(self, xanten):
self.assertEqual('xanten', xanten.__name__)
self.assertEqual('Xanten', xanten.title)
self.assertEqual(
'http://xml.zeit.de/dynamicfolder/xanten', xanten.uniqueId)
def test_folder_keys_contains_children_defined_in_xml_config(self):
self.assertEqual(
[u'art-déco', 'xaernten', 'xanten', 'xinjiang', u'überlingen'],
sorted(list(iter(self.folder))))
def test_folder_iter_contains_children_defined_in_xml_config(self):
self.assertEqual(
[u'art-déco', 'xaernten', 'xanten', 'xinjiang', u'überlingen'],
sorted(list(iter(self.folder))))
def test_folder_getitem_returns_child_with_basic_info_set(self):
child = self.folder['xanten']
self.assert_xanten_has_basic_info_set(child)
def test_folder_get_returns_child_with_basic_info_set(self):
child = self.folder.get('xanten')
self.assert_xanten_has_basic_info_set(child)
def test_folder_values_returns_childs_with_basic_info_set(self):
for child in self.folder.values():
if child.__name__ == 'xanten':
self.assert_xanten_has_basic_info_set(child)
break
else:
self.fail('Entry xanten not found.')
def test_folder_len_counts_children_defined_in_xml_config(self):
self.assertEqual(5, len(self.folder))
def test_folder_items_returns_childs_with_basic_info_set(self):
for key, value in self.folder.items():
if key == 'xanten':
self.assert_xanten_has_basic_info_set(value)
break
else:
self.fail('Entry xanten not found.')
def test_folder_contains_children_defined_in_xml_config(self):
self.assertIn('xanten', self.folder)
def test_setting_content_at_key_of_virtual_child_overwrites_it(self):
content = zeit.cms.testcontenttype.testcontenttype.ExampleContentType()
content.title = 'FOO'
self.folder['xanten'] = content
self.assertEqual('FOO', self.folder['xanten'].title)
def test_delete_materialized_content_goes_back_to_virtual(self):
content = zeit.cms.testcontenttype.testcontenttype.ExampleContentType()
self.folder['xanten'] = content
del self.folder['xanten']
self.assertIn('xanten', self.folder)
def test_delete_on_virtual_child_does_nothing(self):
del self.folder['xanten']
self.assertIn('xanten', self.folder)
class TestDynamicFolder(
zeit.content.dynamicfolder.testing.FunctionalTestCase):
"""Tests behaviour that exceeds basic container methods like keys, get etc.
"""
def setUp(self):
super(TestDynamicFolder, self).setUp()
self.folder = self.repository['dynamicfolder']
def test_checkin_virtual_content_materializes_it(self):
self.assertEqual('Xanten', self.folder['xanten'].title)
with checked_out(self.folder['xanten']) as co:
co.title = 'foo'
self.assertEqual('foo', self.folder['xanten'].title)
def test_unconfigured_folder_does_not_break_due_to_missing_config(self):
from ..folder import RepositoryDynamicFolder
self.repository['folder'] = RepositoryDynamicFolder()
self.assertEqual([], self.repository['folder'].items())
def test_getitem_for_key_with_no_virtual_child_raises_KeyError(self):
with self.assertRaises(KeyError):
self.folder['Buxtehude']
def test_folder_can_also_contain_normal_content(self):
self.folder['foo'] = (
zeit.cms.testcontenttype.testcontenttype.ExampleContentType())
self.assertIn('foo', self.folder)
self.assertIn('xanten', self.folder)
def test_deleting_manual_content_reveals_virtual_content_again(self):
content = zeit.cms.testcontenttype.testcontenttype.ExampleContentType()
content.title = 'FOO'
self.folder['xanten'] = content
self.assertEqual('FOO', self.folder['xanten'].title)
del self.folder['xanten']
self.assertEqual('Xanten', self.folder['xanten'].title)
def test_fills_in_template_placeholders_from_config_entries(self):
cp = self.folder['xanten']
self.assertTrue(zeit.content.cp.interfaces.ICenterPage.providedBy(cp))
self.assertTrue(zeit.content.cp.interfaces.ICP2015.providedBy(cp))
self.assertEqual('Xanten', cp.title)
def test_template_handles_umlauts_and_xml_special_chars(self):
cp = self.folder['xaernten']
self.assertEqual(u'Xärnten & mehr', cp.title)
def test_text_of_tags_can_be_used_in_template(self):
# Remove all virtual childs that have been cached
self.repository.uncontained_content = {}
with mock.patch('jinja2.Template.render') as render:
render.return_value = u''
self.folder['xinjiang'] # load files and renders template
self.assertEqual(1, render.call_count)
self.assertIn(
('text', 'Text Xinjiang'), render.call_args[1].items())
def test_parent_can_be_accessed_in_template(self):
with mock.patch(
'zeit.content.dynamicfolder.folder.'
'RepositoryDynamicFolder.content_template',
new_callable=mock.PropertyMock) as template:
template.return_value = jinja2.Template("""
<test>
<head />
<body>{{url_value}} {{__parent__.__name__}}</body>
</test>""")
self.assertIn(
'<body>xanten dynamicfolder</body>',
lxml.etree.tostring(
self.folder['xanten'].xml, encoding=six.text_type))
def test_works_with_raxml_template(self):
# These get an xml declaration in their serialization, so we must not
# process them as unicode, else lxml complains.
self.repository['data']['template.xml'] = RawXML(
pkg_resources.resource_stream(__name__, 'fixtures/template.xml'))
with self.assertNothingRaised():
self.folder['xanten']
def test_works_with_unknown_type_template(self):
# These don't get an xml declaration in their serialization, but
# luckily(?) lxml doesn't care if we use unicode or utf-8 in that case.
self.repository['data']['template.xml'] = PersistentUnknownResource(
data=pkg_resources.resource_string(
__name__, 'fixtures/template.xml').decode('latin-1'))
with self.assertNothingRaised():
self.folder['xanten']
def test_converts_xml_attribute_nodes_into_dav_properties(self):
self.assertEqual('Deutschland', self.folder['xanten'].ressort)
def test_does_not_copy_uuid_of_template_into_content(self):
self.assertNotEqual(
'{urn:uuid:6a5bcb2a-bd80-499b-ad79-72eb0a07e65e}',
zeit.cms.content.interfaces.IUUID(self.folder['xanten']).id)
def test_checkout_preserves_dav_properties_from_xml(self):
# We need a DAV property that is handled by a separate adapter to see
# the effect, since direct DAV properties are directly copied to XML,
# so for those it makes no difference if e.g. VirtualProperties were
# still used for checked-out IVirtualContent, which they should not be.
self.assertEqual('seo-title', zeit.seo.interfaces.ISEO(
self.folder['xanten']).html_title)
with checked_out(self.folder['xanten']) as co:
self.assertEqual(
'seo-title', zeit.seo.interfaces.ISEO(co).html_title)
zeit.seo.interfaces.ISEO(co).html_title = 'changed'
self.assertEqual('changed', zeit.seo.interfaces.ISEO(
self.folder['xanten']).html_title)
def assert_published(self, content):
info = zeit.cms.workflow.interfaces.IPublishInfo(content)
self.assertTrue(info.published, '%s not published' % content.uniqueId)
def assert_not_published(self, content):
info = zeit.cms.workflow.interfaces.IPublishInfo(content)
self.assertFalse(
info.published, '%s still published' % content.uniqueId)
def test_publishes_folder_with_config_and_template(self):
zeit.cms.workflow.interfaces.IPublish(
self.folder).publish(background=False)
self.assert_published(self.folder)
self.assert_published(self.folder.config_file)
self.assert_published(self.folder.content_template_file)
zeit.cms.workflow.interfaces.IPublish(
self.folder).retract(background=False)
self.assert_not_published(self.folder)
self.assert_not_published(self.folder.config_file)
self.assert_not_published(self.folder.content_template_file)
def test_does_not_break_on_erroneous_config(self):
from zeit.content.dynamicfolder.folder import RepositoryDynamicFolder
dynamic = RepositoryDynamicFolder()
dynamic.config_file = self.repository['data']['template.xml']
self.repository['brokenfolder'] = dynamic
transaction.commit()
with self.assertNothingRaised():
self.repository['brokenfolder'].values()
| [
"mock.patch",
"zeit.cms.checkout.helper.checked_out",
"zeit.content.dynamicfolder.folder.RepositoryDynamicFolder",
"jinja2.Template",
"pkg_resources.resource_string",
"transaction.commit",
"pkg_resources.resource_stream"
] | [((4016, 4041), 'zeit.content.dynamicfolder.folder.RepositoryDynamicFolder', 'RepositoryDynamicFolder', ([], {}), '()\n', (4039, 4041), False, 'from zeit.content.dynamicfolder.folder import RepositoryDynamicFolder\n'), ((9636, 9661), 'zeit.content.dynamicfolder.folder.RepositoryDynamicFolder', 'RepositoryDynamicFolder', ([], {}), '()\n', (9659, 9661), False, 'from zeit.content.dynamicfolder.folder import RepositoryDynamicFolder\n'), ((9790, 9810), 'transaction.commit', 'transaction.commit', ([], {}), '()\n', (9808, 9810), False, 'import transaction\n'), ((3717, 3751), 'zeit.cms.checkout.helper.checked_out', 'checked_out', (["self.folder['xanten']"], {}), "(self.folder['xanten'])\n", (3728, 3751), False, 'from zeit.cms.checkout.helper import checked_out\n'), ((5543, 5579), 'mock.patch', 'mock.patch', (['"""jinja2.Template.render"""'], {}), "('jinja2.Template.render')\n", (5553, 5579), False, 'import mock\n'), ((5919, 6049), 'mock.patch', 'mock.patch', (['"""zeit.content.dynamicfolder.folder.RepositoryDynamicFolder.content_template"""'], {'new_callable': 'mock.PropertyMock'}), "(\n 'zeit.content.dynamicfolder.folder.RepositoryDynamicFolder.content_template'\n , new_callable=mock.PropertyMock)\n", (5929, 6049), False, 'import mock\n'), ((6141, 6257), 'jinja2.Template', 'jinja2.Template', (['"""\n<test>\n <head />\n <body>{{url_value}} {{__parent__.__name__}}</body>\n</test>"""'], {}), '(\n """\n<test>\n <head />\n <body>{{url_value}} {{__parent__.__name__}}</body>\n</test>"""\n )\n', (6156, 6257), False, 'import jinja2\n'), ((6688, 6752), 'pkg_resources.resource_stream', 'pkg_resources.resource_stream', (['__name__', '"""fixtures/template.xml"""'], {}), "(__name__, 'fixtures/template.xml')\n", (6717, 6752), False, 'import pkg_resources\n'), ((8178, 8212), 'zeit.cms.checkout.helper.checked_out', 'checked_out', (["self.folder['xanten']"], {}), "(self.folder['xanten'])\n", (8189, 8212), False, 'from zeit.cms.checkout.helper import checked_out\n'), ((7130, 7194), 'pkg_resources.resource_string', 'pkg_resources.resource_string', (['__name__', '"""fixtures/template.xml"""'], {}), "(__name__, 'fixtures/template.xml')\n", (7159, 7194), False, 'import pkg_resources\n')] |
""" Unittest """
import unittest
import time
from wlmodem.simulator import WlModemSimulator
class TestWlModemSimulator(unittest.TestCase):
def _make_one(self):
return WlModemSimulator()
def test_connect_with_response_is_success(self):
modem = self._make_one()
self.assertTrue(modem.connect())
def test_cmd_configure_works(self):
modem = self._make_one()
success = modem.cmd_configure("a", 4)
self.assertTrue(success)
# Link is down after reconfigure
diag = modem.cmd_get_diagnostic()
self.assertFalse(diag.get("link_up"))
def test_cmd_queue_length_works(self):
modem = self._make_one()
modem.connect()
# Add 1 packet to queue
modem.cmd_queue_packet(b"12345678")
_len = modem.cmd_get_queue_length()
self.assertEqual(_len, 1)
# Flush queue
success = modem.cmd_flush_queue()
self.assertTrue(success)
# Queue length should now be 0
_len = modem.cmd_get_queue_length()
self.assertEqual(_len, 0)
def test_cmd_diagnostic_works(self):
modem = self._make_one()
diag = modem.cmd_get_diagnostic()
expect = dict(link_up=True, pkt_cnt=0, pkt_loss_cnt=0, bit_error_rate=3.5)
self.assertDictEqual(diag, expect)
def test_cmd_version(self):
modem = self._make_one()
ver = modem.cmd_get_version()
self.assertListEqual(ver, [1, 0, 1])
def test_get_data(self):
modem = self._make_one()
modem.connect()
modem.cmd_queue_packet(b"12345678")
modem._next_packet_time = time.time() + 0.01 # Don't want to wait in the unit test
# The packet is not available yet
data = modem.get_data_packet(timeout=0.0)
self.assertEqual(data, None)
time.sleep(0.01)
# Now it should be
data = modem.get_data_packet(timeout=0.5)
self.assertEqual(data, b"12345678")
def test_invalid_request_is_detected(self):
modem = self._make_one()
result = modem.request(123)
self.assertEqual(result, None)
| [
"time.sleep",
"time.time",
"wlmodem.simulator.WlModemSimulator"
] | [((181, 199), 'wlmodem.simulator.WlModemSimulator', 'WlModemSimulator', ([], {}), '()\n', (197, 199), False, 'from wlmodem.simulator import WlModemSimulator\n'), ((1831, 1847), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (1841, 1847), False, 'import time\n'), ((1636, 1647), 'time.time', 'time.time', ([], {}), '()\n', (1645, 1647), False, 'import time\n')] |
try:
from mgrd import Skeleton as MGRDSkeleton
from mgrd import SkeletonNode as MGRDSkeletonNode
has_mgrd = True
except ImportError:
has_mgrd = False
pass
def convert_to_mgrd_skeleton(skeleton):
if not has_mgrd:
return None
def create_mgrd_node(mg_node, parent):
mgrd_node = MGRDSkeletonNode(mg_node.node_name, parent, mg_node.offset, mg_node.rotation)
mgrd_node.fixed = mg_node.fixed
return mgrd_node
def populate(skeleton, mgrd_node):
node = skeleton.nodes[mgrd_node.name]
for child in node.children:
child_node = create_mgrd_node(child, mgrd_node)
mgrd_node.add_child(child_node)
populate(skeleton, child_node)
root_node = create_mgrd_node(skeleton.nodes[skeleton.root], None)
populate(skeleton, root_node)
return MGRDSkeleton(root_node)
| [
"mgrd.SkeletonNode",
"mgrd.Skeleton"
] | [((848, 871), 'mgrd.Skeleton', 'MGRDSkeleton', (['root_node'], {}), '(root_node)\n', (860, 871), True, 'from mgrd import Skeleton as MGRDSkeleton\n'), ((321, 398), 'mgrd.SkeletonNode', 'MGRDSkeletonNode', (['mg_node.node_name', 'parent', 'mg_node.offset', 'mg_node.rotation'], {}), '(mg_node.node_name, parent, mg_node.offset, mg_node.rotation)\n', (337, 398), True, 'from mgrd import SkeletonNode as MGRDSkeletonNode\n')] |
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 27 21:24:56 2020
@author: avtei
"""
import numpy as np
import lindemannindex as li
import matplotlib.pyplot as plt
#li.calc_xtc("")
import MDAnalysis
u = MDAnalysis.Universe("../gromacs/AA/AA_min.gro", "../gromacs/AA/AA_eq.xtc")
cd = li.calc_over_time(u, "AA_A")
print(cd)
a="""
def calcindex(positions, distances):
square_average = 0
for a in distances:
square_average += a**2 #<rij2>
square_average = square_average / positions.shape[0]
average = np.sum(distances)/distances.shape[0]
average_squared = average ** 2 #<rij>2
RMS = square_average - average_squared
RMS = np.sqrt(RMS)
right = RMS/average
left = 2 / (positions.shape[0] * (positions.shape[0] - 1))
Lindex = left * right
return Lindex
def CalculateXYZ(xyzfilepath):
pass
def CalculateOverTraj(xtcfilepath, verbose=False):
xtc = MDAnalysis.coordinates.XTC.XTCReader(xtcfilepath)
i=0
indexes = np.ndarray((xtc.n_frames, 1))
for ts in xtc:
atom1, atom2 = 0, 1
distances = np.ndarray((int(((ts.positions.shape[0]-1)*(ts.positions.shape[0]))/2),))
i=0
while atom1 < ts.positions.shape[0]-1:
while atom2 < ts.positions.shape[0]:
p1 = ts.positions[atom1]
p2 = ts.positions[atom2]
squared_dist = np.sum((p1-p2)**2, axis=0)
dist = np.sqrt(squared_dist)
distances[i] = dist
atom2 += 1
i+=1
atom1 += 1
atom2 = atom1 + 1
Lindex = calcindex(ts.positions, distances)
indexes[ts.frame] = Lindex
print(Lindex, indexes[ts.frame])
return indexes
#Lindexes = CalculateOverTraj("../gromacs/traj_comp.xtc")
#plt.plot([x/2 for x in range(0,200)], Lindexes)
#plt.xlabel("Temperature (K)")
#plt.ylabel("Lindemann Index")
#plt.title("887 Argon atoms undergoing heating")
""" | [
"lindemannindex.calc_over_time",
"MDAnalysis.Universe"
] | [((205, 279), 'MDAnalysis.Universe', 'MDAnalysis.Universe', (['"""../gromacs/AA/AA_min.gro"""', '"""../gromacs/AA/AA_eq.xtc"""'], {}), "('../gromacs/AA/AA_min.gro', '../gromacs/AA/AA_eq.xtc')\n", (224, 279), False, 'import MDAnalysis\n'), ((286, 314), 'lindemannindex.calc_over_time', 'li.calc_over_time', (['u', '"""AA_A"""'], {}), "(u, 'AA_A')\n", (303, 314), True, 'import lindemannindex as li\n')] |
import os
import tarfile
import glob
from mpi4py import MPI
def tarball_files_by_year(dir):
# create a tarball of the files in each year
files = os.listdir(dir)
pass
| [
"os.listdir"
] | [((155, 170), 'os.listdir', 'os.listdir', (['dir'], {}), '(dir)\n', (165, 170), False, 'import os\n')] |
import asyncio
from aiohttp import ClientSession
from pykoplenti import ApiClient
import sys
"""
Provides a simple example which reads two process data from the plenticore.
Must be called with host and password:
`python read_process_data.py 192.168.1.100 <PASSWORD>`
"""
async def async_main(host, passwd):
async with ClientSession() as session:
client = ApiClient(session, host)
await client.login(passwd)
data = await client.get_process_data_values('devices:local', ['Inverter:State', 'Home_P'])
device_local = data['devices:local']
inverter_state = device_local['Inverter:State']
home_p = device_local['Home_P']
print(f'Inverter-State: {inverter_state.value}\nHome-P: {home_p.value}\n')
if len(sys.argv) != 3:
print("Usage: <host> <password>")
sys.exit(1)
_, host, passwd = sys.argv
asyncio.run(async_main(host, passwd))
| [
"pykoplenti.ApiClient",
"aiohttp.ClientSession",
"sys.exit"
] | [((826, 837), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (834, 837), False, 'import sys\n'), ((328, 343), 'aiohttp.ClientSession', 'ClientSession', ([], {}), '()\n', (341, 343), False, 'from aiohttp import ClientSession\n'), ((373, 397), 'pykoplenti.ApiClient', 'ApiClient', (['session', 'host'], {}), '(session, host)\n', (382, 397), False, 'from pykoplenti import ApiClient\n')] |
import pytest
from abridger.dump_relations import main
from test.conftest import got_postgresql
@pytest.mark.skipif(not got_postgresql(), reason='Needs postgresql')
class TestDumpRelationsScriptForPostgresql(object):
def test_main(self, capsys, postgresql_database):
# This doesn't test the data itself, just the executable.
url = 'postgresql://%s@%s:%s/%s' % (
postgresql_database.user,
postgresql_database.host,
postgresql_database.port,
postgresql_database.dbname)
postgresql_database.disconnect()
main([url])
out, err = capsys.readouterr()
| [
"abridger.dump_relations.main",
"test.conftest.got_postgresql"
] | [((588, 599), 'abridger.dump_relations.main', 'main', (['[url]'], {}), '([url])\n', (592, 599), False, 'from abridger.dump_relations import main\n'), ((123, 139), 'test.conftest.got_postgresql', 'got_postgresql', ([], {}), '()\n', (137, 139), False, 'from test.conftest import got_postgresql\n')] |
from flaski.database import init_db
from flaski.database import db_session
from flaski.models import WikiContent
c1 = WikiContent("VisitorsBell", "VisitorsBell.gif")
db_session.add(c1)
db_session.commit()
| [
"flaski.database.db_session.commit",
"flaski.database.db_session.add",
"flaski.models.WikiContent"
] | [((119, 166), 'flaski.models.WikiContent', 'WikiContent', (['"""VisitorsBell"""', '"""VisitorsBell.gif"""'], {}), "('VisitorsBell', 'VisitorsBell.gif')\n", (130, 166), False, 'from flaski.models import WikiContent\n'), ((167, 185), 'flaski.database.db_session.add', 'db_session.add', (['c1'], {}), '(c1)\n', (181, 185), False, 'from flaski.database import db_session\n'), ((186, 205), 'flaski.database.db_session.commit', 'db_session.commit', ([], {}), '()\n', (203, 205), False, 'from flaski.database import db_session\n')] |
from flask import Flask, request, jsonify, render_template, send_from_directory, Blueprint
import os.path
from database.models import *
from app import app
from database import db
db.create_all()
@app.route("/")
def my_index():
return render_template("index.html")
@app.route('/<path:path>/')
def serve(path):
if path != "" and os.path.exists(app.static_folder + '/' + path):
return send_from_directory(app.static_folder, path)
else:
return render_template("index.html")
@app.route("/register/register", methods=["POST"])
def register():
"""
{"username" : "user1", "password": "<PASSWORD>", "type"=0}
"""
data = request.json
username = data["username"]
password = data["password"]
type = int(data["type"])
user = User(username=username, password=password,type=type)
db.session.add(user)
try:
db.session.commit()
except:
return jsonify("fail to add in db"), 500
return jsonify("success"), 200
@app.route("/login", methods=["POST"])
def login_user():
"""
{"username" : "user1", "password": "<PASSWORD>", "type":1}
"""
data = request.json
username = data["username"]
password = data["password"]
type = int(data["type"])
if not User.query.filter_by(username=username, type=type).first():
return jsonify({"result": 0})
elif not User.query.filter_by(username=username, password=password, type=type).first():
return jsonify({"result": 1})
else:
user = User.query.filter_by(username=username, password=password, type=type).first()
if user.account_state == 0:
return jsonify({"result": 2})
else:
return jsonify({"result": 3})
#
# @app.route("/send_post", method=["POST"])
# def send_post():
# """
# {"username" : "user1", "title": "some titles", \
# "description": "some description", "deadline": DATE(not sure), \
# "price: 300}
# """
# pass
#
#
# @app.route("accept_post", method=["POST"])
# def accept_post():
# pass
#
# @app.route("get_all_posts", method=["GET"])
# def get_all_posts():
# pass
#
# @app.route("edit_post", method=["PUT"])
# def edit_post():
# pass
#
# @app.route("get_self_post", method=["PUT"])
# def get_self_post():
# data = request.get_json()
# if data["type"] == 0:
# pass
# else:
# pass
if __name__ == "__main__":
app.run(host='0.0.0.0', debug=True, port=os.environ.get('PORT', 5000))
| [
"flask.render_template",
"flask.send_from_directory",
"database.db.session.add",
"database.db.session.commit",
"app.app.route",
"database.db.create_all",
"flask.jsonify"
] | [((181, 196), 'database.db.create_all', 'db.create_all', ([], {}), '()\n', (194, 196), False, 'from database import db\n'), ((199, 213), 'app.app.route', 'app.route', (['"""/"""'], {}), "('/')\n", (208, 213), False, 'from app import app\n'), ((273, 299), 'app.app.route', 'app.route', (['"""/<path:path>/"""'], {}), "('/<path:path>/')\n", (282, 299), False, 'from app import app\n'), ((504, 553), 'app.app.route', 'app.route', (['"""/register/register"""'], {'methods': "['POST']"}), "('/register/register', methods=['POST'])\n", (513, 553), False, 'from app import app\n'), ((990, 1027), 'app.app.route', 'app.route', (['"""/login"""'], {'methods': "['POST']"}), "('/login', methods=['POST'])\n", (999, 1027), False, 'from app import app\n'), ((241, 270), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (256, 270), False, 'from flask import Flask, request, jsonify, render_template, send_from_directory, Blueprint\n'), ((834, 854), 'database.db.session.add', 'db.session.add', (['user'], {}), '(user)\n', (848, 854), False, 'from database import db\n'), ((402, 446), 'flask.send_from_directory', 'send_from_directory', (['app.static_folder', 'path'], {}), '(app.static_folder, path)\n', (421, 446), False, 'from flask import Flask, request, jsonify, render_template, send_from_directory, Blueprint\n'), ((472, 501), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (487, 501), False, 'from flask import Flask, request, jsonify, render_template, send_from_directory, Blueprint\n'), ((872, 891), 'database.db.session.commit', 'db.session.commit', ([], {}), '()\n', (889, 891), False, 'from database import db\n'), ((964, 982), 'flask.jsonify', 'jsonify', (['"""success"""'], {}), "('success')\n", (971, 982), False, 'from flask import Flask, request, jsonify, render_template, send_from_directory, Blueprint\n'), ((1329, 1351), 'flask.jsonify', 'jsonify', (["{'result': 0}"], {}), "({'result': 0})\n", (1336, 1351), False, 'from flask import Flask, request, jsonify, render_template, send_from_directory, Blueprint\n'), ((1459, 1481), 'flask.jsonify', 'jsonify', (["{'result': 1}"], {}), "({'result': 1})\n", (1466, 1481), False, 'from flask import Flask, request, jsonify, render_template, send_from_directory, Blueprint\n'), ((919, 947), 'flask.jsonify', 'jsonify', (['"""fail to add in db"""'], {}), "('fail to add in db')\n", (926, 947), False, 'from flask import Flask, request, jsonify, render_template, send_from_directory, Blueprint\n'), ((1640, 1662), 'flask.jsonify', 'jsonify', (["{'result': 2}"], {}), "({'result': 2})\n", (1647, 1662), False, 'from flask import Flask, request, jsonify, render_template, send_from_directory, Blueprint\n'), ((1696, 1718), 'flask.jsonify', 'jsonify', (["{'result': 3}"], {}), "({'result': 3})\n", (1703, 1718), False, 'from flask import Flask, request, jsonify, render_template, send_from_directory, Blueprint\n')] |
#!/usr/bin/env python
import numpy as np
p = np.pi
def DH_to_T(DH):
"""!
Computes the transformation matrices given the DH table of the serial link.
@param DH: devavitt-hartemberg parameters.
@return T: transformation matrices of a joint with respect to previous joint.
"""
# Get the number of rows, to know how many T matrices should create.
rows = len(DH)
T = []
for i in range(rows):
Tmp = np.array([[np.cos(DH[i,3]), -np.sin(DH[i,3]), 0, DH[i,1]],
[np.sin(DH[i,3])*np.cos(DH[i,0]), np.cos(DH[i,3])*np.cos(DH[i,0]), -np.sin(DH[i,0]), -DH[i,2]*np.sin(DH[i,0])],
[np.sin(DH[i,3])*np.sin(DH[i,0]), np.cos(DH[i,3])*np.sin(DH[i,0]), np.cos(DH[i,0]), DH[i,2]*np.cos(DH[i,0])],
[0, 0, 0, 1]])
T.append(Tmp)
return T
def transformations(T_rel_ini, q, info):
"""!
Computes tranformations given T_relatives, q's and the info.
@param T_rel_ini: the ones computed with DH_to_T.
@param q: current configuration of baxter's arm.
@param info: 1->revolute, 0->prismatic.
@return T: transformation matrices of a joint with respect to previous joint in
the new configuration.
"""
row_q = q.size
row_info = info.size
T = []
if row_q != row_info:
print("Warning. q and info must have same size.")
return
for i in range(row_q):
if info[i] == 1:
Tel = np.array([[np.cos(q[i]), -np.sin(q[i]), 0 , 0],
[np.sin(q[i]), np.cos(q[i]), 0 , 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
# else:
# Case in which there are prismatic joints.
## Tel = np.array([[1, 0, 0, 0],
## [0, 1, 0, 0],
## [0, 0, 0, q[i]]
## [0, 0, 0, 1]])
Tmp = np.dot(T_rel_ini[i], Tel)
T.append(Tmp)
# Last matrix is constant in time. T_7,e.e
T.append(T_rel_ini[row_q])
return T
def abs_trans(T_rel):
"""!
Computes trasformations matrices w.r.t. 0 frame.
@param T_rel: trasformation matrices of a joint with respect to previous one.
@return T: absolute transformation matrices.
"""
T = []
# First is the same.
T.append(T_rel[0])
for i in range(1, len(T_rel)):
Tmp = np.dot(T[i-1], T_rel[i])
T.append(Tmp)
return T
| [
"numpy.sin",
"numpy.dot",
"numpy.cos"
] | [((1758, 1783), 'numpy.dot', 'np.dot', (['T_rel_ini[i]', 'Tel'], {}), '(T_rel_ini[i], Tel)\n', (1764, 1783), True, 'import numpy as np\n'), ((2206, 2232), 'numpy.dot', 'np.dot', (['T[i - 1]', 'T_rel[i]'], {}), '(T[i - 1], T_rel[i])\n', (2212, 2232), True, 'import numpy as np\n'), ((431, 447), 'numpy.cos', 'np.cos', (['DH[i, 3]'], {}), '(DH[i, 3])\n', (437, 447), True, 'import numpy as np\n'), ((682, 698), 'numpy.cos', 'np.cos', (['DH[i, 0]'], {}), '(DH[i, 0])\n', (688, 698), True, 'import numpy as np\n'), ((449, 465), 'numpy.sin', 'np.sin', (['DH[i, 3]'], {}), '(DH[i, 3])\n', (455, 465), True, 'import numpy as np\n'), ((492, 508), 'numpy.sin', 'np.sin', (['DH[i, 3]'], {}), '(DH[i, 3])\n', (498, 508), True, 'import numpy as np\n'), ((508, 524), 'numpy.cos', 'np.cos', (['DH[i, 0]'], {}), '(DH[i, 0])\n', (514, 524), True, 'import numpy as np\n'), ((525, 541), 'numpy.cos', 'np.cos', (['DH[i, 3]'], {}), '(DH[i, 3])\n', (531, 541), True, 'import numpy as np\n'), ((541, 557), 'numpy.cos', 'np.cos', (['DH[i, 0]'], {}), '(DH[i, 0])\n', (547, 557), True, 'import numpy as np\n'), ((559, 575), 'numpy.sin', 'np.sin', (['DH[i, 0]'], {}), '(DH[i, 0])\n', (565, 575), True, 'import numpy as np\n'), ((585, 601), 'numpy.sin', 'np.sin', (['DH[i, 0]'], {}), '(DH[i, 0])\n', (591, 601), True, 'import numpy as np\n'), ((616, 632), 'numpy.sin', 'np.sin', (['DH[i, 3]'], {}), '(DH[i, 3])\n', (622, 632), True, 'import numpy as np\n'), ((632, 648), 'numpy.sin', 'np.sin', (['DH[i, 0]'], {}), '(DH[i, 0])\n', (638, 648), True, 'import numpy as np\n'), ((649, 665), 'numpy.cos', 'np.cos', (['DH[i, 3]'], {}), '(DH[i, 3])\n', (655, 665), True, 'import numpy as np\n'), ((665, 681), 'numpy.sin', 'np.sin', (['DH[i, 0]'], {}), '(DH[i, 0])\n', (671, 681), True, 'import numpy as np\n'), ((707, 723), 'numpy.cos', 'np.cos', (['DH[i, 0]'], {}), '(DH[i, 0])\n', (713, 723), True, 'import numpy as np\n'), ((1362, 1374), 'numpy.cos', 'np.cos', (['q[i]'], {}), '(q[i])\n', (1368, 1374), True, 'import numpy as np\n'), ((1422, 1434), 'numpy.sin', 'np.sin', (['q[i]'], {}), '(q[i])\n', (1428, 1434), True, 'import numpy as np\n'), ((1436, 1448), 'numpy.cos', 'np.cos', (['q[i]'], {}), '(q[i])\n', (1442, 1448), True, 'import numpy as np\n'), ((1377, 1389), 'numpy.sin', 'np.sin', (['q[i]'], {}), '(q[i])\n', (1383, 1389), True, 'import numpy as np\n')] |
# Python imports
# Third party imports
import boto3
# Self imports
class DjangoCloudWatchHandler:
def __init__(
self,
log_level: str,
log_group_name: str,
cloud_watch_aws_id: str,
cloud_watch_aws_key: str,
cloud_watch_aws_region: str,
) -> None:
self.log_level = log_level
self.log_group_name = log_group_name
self.cloud_watch_aws_id = cloud_watch_aws_id
self.cloud_watch_aws_key = cloud_watch_aws_key
self.cloud_watch_aws_region = cloud_watch_aws_region
def get_handler_data(self) -> dict:
boto3_logs_client = self._get_boto_client()
cloud_watch_handler = {
'level': self.log_level,
'class': 'watchtower.CloudWatchLogHandler',
'boto3_client': boto3_logs_client,
'log_group_name': self.log_group_name,
'formatter': 'aws',
}
return cloud_watch_handler
def _get_boto_client(self) -> object:
boto3_logs_client = boto3.client(
'logs',
aws_access_key_id=self.cloud_watch_aws_id,
aws_secret_access_key=self.cloud_watch_aws_key,
region_name=self.cloud_watch_aws_region
)
return boto3_logs_client | [
"boto3.client"
] | [((1030, 1191), 'boto3.client', 'boto3.client', (['"""logs"""'], {'aws_access_key_id': 'self.cloud_watch_aws_id', 'aws_secret_access_key': 'self.cloud_watch_aws_key', 'region_name': 'self.cloud_watch_aws_region'}), "('logs', aws_access_key_id=self.cloud_watch_aws_id,\n aws_secret_access_key=self.cloud_watch_aws_key, region_name=self.\n cloud_watch_aws_region)\n", (1042, 1191), False, 'import boto3\n')] |
import script
from script import *
import shlex
import edition
import layout
import query
import player
import test
import graph
import opendns
class Color(script.Script):
def __init__(self, console):
super(Color, self).__init__(console)
self.colors = {
"red" : [ 1.0, 0.0, 0.0, 1.0 ],
"green" : [ 0.0, 1.0, 0.0, 1.0 ],
"blue" : [ 0.0, 0.0, 1.0, 1.0 ],
"yellow" : [ 1.0, 1.0, 0.0, 1.0 ],
"cyan" : [ 0.0, 1.0, 1.0, 1.0 ],
"magenta" : [ 1.0, 0.0, 1.0, 1.0 ],
"white" : [ 1.0, 1.0, 1.0, 1.0 ],
"gray" : [ 0.5, 0.5, 0.5, 1.0 ],
"black" : [ 0.0, 0.0, 0.0, 1.0 ],
"orange" : [ 1.0, 0.4, 0.0, 1.0 ],
"purple" : [ 0.5, 0, 0.5, 1.0],
"pink" : [ 1.0, 0.75, 0.79, 1.0 ],
"brown" : [ 0.64, 0.16, 0.16, 1.0 ]
}
self.color_map = None
self.color_masks = {
"rgba" : [ True, True, True, True ],
"rgb" : [ True, True, True, False ],
"alpha" : [ False, False, False, True ]
}
def random_color(self):
return [ random.random(), random.random(), random.random(), 1.0 ]
def parse_color(self, s):
if s in self.colors:
return std.vec4_to_str(self.colors[s])
else:
return std.vec4_to_str(self.colors["black"])
def lambda_assign(self, element_type, element_id, color):
if element_type == "node":
og.set_node_attribute(element_id, "og:space:color", "vec4", color)
elif element_type == "edge":
og.set_edge_attribute(element_id, "og:space:color", "vec4", color)
def lambda_by(self, element_type, element_id, attr, color_map):
if element_type not in color_map:
color_map[element_type] = dict()
if element_type == "node":
value = og.get_node_attribute(element_id, attr)
elif element_type == "edge":
value = og.get_edge_attribute(element_id, attr)
if value is None:
color = std.vec4_to_str(self.colors["gray"])
else:
value = "{0}".format(value)
if value not in color_map[element_type]:
color_map[element_type][value] = self.random_color()
color = std.vec4_to_str(color_map[element_type][value])
if element_type == "node":
og.set_node_attribute(element_id, "og:space:color", "vec4", color)
elif element_type == "edge":
og.set_edge_attribute(element_id, "og:space:color", "vec4", color)
def lambda_op(self, element_type, element_id, op, color_mask, factor):
def calculate(op, v1, v2, mask):
if op == "add":
r = [ v1[i] + v2[i] for i in xrange(4) ]
elif op == "sub":
r = [ v1[i] - v2[i] for i in xrange(4) ]
elif op == "mul":
r = [ v1[i] * v2[i] for i in xrange(4) ]
elif op == "div":
r = [ v1[i] / v2[i] for i in xrange(4) ]
elif op == "set":
r = v2
else:
self.console.log("Error: '{0}': Unknown operator!")
return
for i in xrange(4):
if not mask[i]:
r[i] = v1[i]
return r
if element_type == "node":
color = og.get_node_attribute(element_id, "og:space:color")
og.set_node_attribute(element_id, "og:space:color", "vec4", std.vec4_to_str(calculate(op, color, factor, color_mask)))
elif element_type == "edge":
color = og.get_edge_attribute(element_id, "og:space:color1")
og.set_edge_attribute(element_id, "og:space:color1", "vec4", std.vec4_to_str(calculate(op, color, factor, color_mask)))
color = og.get_edge_attribute(element_id, "og:space:color2")
og.set_edge_attribute(element_id, "og:space:color2", "vec4", std.vec4_to_str(calculate(op, color, factor, color_mask)))
def run(self, args):
query = self.console.query
if query is None:
self.console.log("Error: Query is empty!")
return
if len(args) == 2:
color = self.parse_color(args[1])
if 'nodes' in query:
[ self.lambda_assign("node", nid, color) for nid in query['nodes'] ]
if 'edges' in query:
[ self.lambda_assign("edge", eid, color) for eid in query['edges'] ]
elif len(args) == 3 and args[1] == "by":
attr = args[2]
color_map = dict()
if 'nodes' in query:
[ self.lambda_by("node", nid, attr, color_map) for nid in query['nodes'] ]
if 'edges' in query:
[ self.lambda_by("edge", eid, attr, color_map) for eid in query['edges'] ]
elif len(args) >= 4 and args[1] in [ "mul", "div", "add", "sub", "set" ]:
if args[2] not in self.color_masks:
self.console.log("Error: '{0}': Unknown color mask!".format(args[2]))
return
array = [ float(i) for i in " ".join(args[3:]).split() ]
if len(array) == 1:
factor = [ array[0], array[0], array[0], array[0] ]
elif len(array) == 3:
factor = [ array[0], array[1], array[2], 1.0 ]
elif len(array) == 4:
factor = [ array[0], array[1], array[2], array[3] ]
else:
self.console.log("Error: Can't parse color factor!")
return
if 'nodes' in query:
[ self.lambda_op("node", nid, args[1], self.color_masks[args[2]], factor) for nid in query['nodes'] ]
if 'edges' in query:
[ self.lambda_op("edge", eid, args[1], self.color_masks[args[2]], factor) for eid in query['edges'] ]
class Help(script.Script):
def __init__(self, console):
super(Help, self).__init__(console)
def run(self, args):
self.console.log("Avalailable commands:")
self.console.log(", ".join(self.console.context['scripts'].keys()))
class Quit(script.Script):
def __init__(self, console):
super(Quit, self).__init__(console)
def run(self, args):
self.console.log("Terminating OpenGraphiti...")
og.quit()
class Native(script.Script):
def __init__(self, console):
super(Native, self).__init__(console)
def run(self, args):
exec(" ".join(args[1:]))
# ----- Callbacks -----
class OpenGraphiti(object):
def __init__(self):
self.ids = {
"node" : og.get_node_ids,
"edge" : og.get_edge_ids
}
self.setters = {
"graph" : og.set_attribute,
"node" : og.set_node_attribute,
"edge" : og.set_edge_attribute,
}
self.getters = {
"graph" : og.get_attribute,
"node" : og.get_node_attribute,
"edge" : og.get_edge_attribute,
}
def get_ids(self, entity_type):
if entity_type in self.ids:
return self.ids[entity_type]()
raise Exception("{0}: Unknown entity type!".format(entity_type))
def set_attribute(self, entity_type, entity_id, attr_name, attr_type, attr_value):
if entity_type in self.setters:
return self.setters[entity_type](entity_id, attr_name, attr_type, attr_value)
raise Exception("{0}: Unknown entity type!".format(entity_type))
def get_attribute(self, entity_type, entity_id, attr_name):
if entity_type in self.getters:
return self.getters[entity_type](entity_id, attr_name)
raise Exception("{0}: Unknown entity type!".format(entity_type))
class Console(object):
def __init__(self):
self.context = {
"scripts" : {
"info" : edition.Info(self),
"load" : edition.Load(self),
"save" : edition.Save(self),
"screenshot" : edition.Screenshot(self),
"set" : edition.Set(self),
"get" : edition.Get(self),
"remove" : edition.Remove(self),
"map" : edition.Map(self),
"clear" : edition.Clear(self),
"select" : query.Select(self),
"filter" : query.Filter(self),
"query" : query.Query(self),
"layout" : layout.Layout(self),
"play" : player.Play(self),
"stop" : player.Stop(self),
"topo" : graph.Topology(self),
"test" : test.Test(self),
"help" : Help(self),
"color" : Color(self),
"quit" : Quit(self),
"opendns" : opendns.OpenDNS(self),
"py" : Native(self)
}
}
self.query = dict()
self.api = OpenGraphiti()
def log(self, text):
og.console({ 'log' : text })
def print_query(self):
s = "Entities: "
key_count = 0
for key in self.query.keys():
if key_count > 0:
s += ", "
s += "#{0}={1}".format(key, len(self.query[key]))
key_count += 1
self.log(s)
def execute(self, command):
lex = shlex.shlex(command, posix=True)
lex.whitespace_split = True
args = list(lex)
if 'scripts' in self.context and args[0] in self.context['scripts']:
self.context['scripts'][args[0]].run(args)
else:
# TODO: og.console("{0}: Command not found!".format(args[0]))
self.log("{0}: Command not found!".format(args[0]))
| [
"query.Query",
"shlex.shlex",
"query.Select",
"layout.Layout",
"edition.Load",
"edition.Info",
"graph.Topology",
"edition.Remove",
"edition.Map",
"player.Stop",
"test.Test",
"player.Play",
"edition.Clear",
"query.Filter",
"edition.Save",
"edition.Get",
"opendns.OpenDNS",
"edition.S... | [((7686, 7718), 'shlex.shlex', 'shlex.shlex', (['command'], {'posix': '(True)'}), '(command, posix=True)\n', (7697, 7718), False, 'import shlex\n'), ((6604, 6622), 'edition.Info', 'edition.Info', (['self'], {}), '(self)\n', (6616, 6622), False, 'import edition\n'), ((6637, 6655), 'edition.Load', 'edition.Load', (['self'], {}), '(self)\n', (6649, 6655), False, 'import edition\n'), ((6670, 6688), 'edition.Save', 'edition.Save', (['self'], {}), '(self)\n', (6682, 6688), False, 'import edition\n'), ((6709, 6733), 'edition.Screenshot', 'edition.Screenshot', (['self'], {}), '(self)\n', (6727, 6733), False, 'import edition\n'), ((6747, 6764), 'edition.Set', 'edition.Set', (['self'], {}), '(self)\n', (6758, 6764), False, 'import edition\n'), ((6778, 6795), 'edition.Get', 'edition.Get', (['self'], {}), '(self)\n', (6789, 6795), False, 'import edition\n'), ((6812, 6832), 'edition.Remove', 'edition.Remove', (['self'], {}), '(self)\n', (6826, 6832), False, 'import edition\n'), ((6846, 6863), 'edition.Map', 'edition.Map', (['self'], {}), '(self)\n', (6857, 6863), False, 'import edition\n'), ((6879, 6898), 'edition.Clear', 'edition.Clear', (['self'], {}), '(self)\n', (6892, 6898), False, 'import edition\n'), ((6916, 6934), 'query.Select', 'query.Select', (['self'], {}), '(self)\n', (6928, 6934), False, 'import query\n'), ((6951, 6969), 'query.Filter', 'query.Filter', (['self'], {}), '(self)\n', (6963, 6969), False, 'import query\n'), ((6985, 7002), 'query.Query', 'query.Query', (['self'], {}), '(self)\n', (6996, 7002), False, 'import query\n'), ((7020, 7039), 'layout.Layout', 'layout.Layout', (['self'], {}), '(self)\n', (7033, 7039), False, 'import layout\n'), ((7054, 7071), 'player.Play', 'player.Play', (['self'], {}), '(self)\n', (7065, 7071), False, 'import player\n'), ((7086, 7103), 'player.Stop', 'player.Stop', (['self'], {}), '(self)\n', (7097, 7103), False, 'import player\n'), ((7119, 7139), 'graph.Topology', 'graph.Topology', (['self'], {}), '(self)\n', (7133, 7139), False, 'import graph\n'), ((7155, 7170), 'test.Test', 'test.Test', (['self'], {}), '(self)\n', (7164, 7170), False, 'import test\n'), ((7272, 7293), 'opendns.OpenDNS', 'opendns.OpenDNS', (['self'], {}), '(self)\n', (7287, 7293), False, 'import opendns\n')] |
from sklearn.base import BaseEstimator
import yake
from ._prep import TextPrep
class YakeTextPrep(TextPrep, BaseEstimator):
"""
Remove all text except meaningful key-phrases. Uses [yake](https://github.com/LIAAD/yake).
Arguments:
top_n: number of key-phrases to select
unique: only return unique keywords from the key-phrases
Usage:
```python
from tokenwiser.textprep import YakeTextPrep
text = ["Sources tell us that Google is acquiring Kaggle, a platform that hosts data science and machine learning"]
example = YakeTextPrep(top_n=3, unique=False).transform(text)
assert example[0] == 'hosts data science acquiring kaggle google is acquiring'
```
"""
def __init__(self, top_n: int = 5, unique: bool = False):
self.top_n = top_n
self.unique = unique
self.extractor = yake.KeywordExtractor(top=self.top_n)
def encode_single(self, text):
texts = " ".join([t[0] for t in self.extractor.extract_keywords(text)])
if not self.unique:
return texts
return " ".join(set(texts.split(" ")))
| [
"yake.KeywordExtractor"
] | [((865, 902), 'yake.KeywordExtractor', 'yake.KeywordExtractor', ([], {'top': 'self.top_n'}), '(top=self.top_n)\n', (886, 902), False, 'import yake\n')] |
import os
from typing import List
from D_PostProcessSOMResults.accuracy_calculator import compute_accuracy
from F_Experiments_Helper.db import get_all_runs, update_run_entry
from F_Experiments_Helper.run_instance import RunInstance
def main():
run_instances: List[RunInstance] = get_all_runs(criteria="WHERE preckar is NULL and end_time is not NULL")
for i in run_instances:
print(i)
preckar = compute_accuracy(pairs_file=os.path.join(i.input_image_path[:-4], "pairs.txt"),
dist_matrix_file=i.dist_matrix_file_path, features_file=i.features_file_path,
neurons_file=i.initial_neurons_file, timestamp_str=i.start_time)
i.preckar = preckar
update_run_entry(i)
print(len(run_instances))
if __name__ == '__main__':
main()
| [
"F_Experiments_Helper.db.update_run_entry",
"F_Experiments_Helper.db.get_all_runs",
"os.path.join"
] | [((286, 357), 'F_Experiments_Helper.db.get_all_runs', 'get_all_runs', ([], {'criteria': '"""WHERE preckar is NULL and end_time is not NULL"""'}), "(criteria='WHERE preckar is NULL and end_time is not NULL')\n", (298, 357), False, 'from F_Experiments_Helper.db import get_all_runs, update_run_entry\n'), ((750, 769), 'F_Experiments_Helper.db.update_run_entry', 'update_run_entry', (['i'], {}), '(i)\n', (766, 769), False, 'from F_Experiments_Helper.db import get_all_runs, update_run_entry\n'), ((449, 499), 'os.path.join', 'os.path.join', (['i.input_image_path[:-4]', '"""pairs.txt"""'], {}), "(i.input_image_path[:-4], 'pairs.txt')\n", (461, 499), False, 'import os\n')] |
import re
import read_export
import pywikibot
comments = {
"<!-- or {{wowapievent}}, {{luapi}}, {{widgethandler}}, {{widgetmethod}}, {{framexmlfunc}} -->",
"<!-- Describe the purpose of the function as concisely as possible. -->\n",
"<!-- Describe the purpose of the function, exhausting detail can be saved for a later section -->\n",
"<!-- Describe the purpose of the function, though exhausting detail can be saved for a later section -->\n",
"<!-- List return values and arguments as well as function name, follow Blizzard usage convention for args -->\n",
"<!-- List each argument, together with its type -->\n",
"<!-- List each argument, together with its type; Remove entire section if the function takes no arguments -->\n",
"<!-- List each return value, together with its type -->\n",
"<!-- List each return value, together with its type; Remove entire section if the function does not return anything -->\n",
"<!-- List API functions, events, HOWTO guides, etc, related to using this function. Remove the section if none are applicable. -->\n",
"<!-- If it helps, include an example here, though it's not required if the usage is self-explanatory -->\n",
"<!-- If it helps, include example results here, though they are not required-->\n",
"<!-- If it helps, include example results here, though they are not required. You're allowed to cheat liberally since WoW isn't a command line language. -->\n",
"<!-- Please read https://wow.gamepedia.com/Wowpedia:External_links_policy before adding new links. -->\n",
"<!-- Please read https://wowpedia.fandom.com/Wowpedia:External_links_policy before adding new links. -->\n",
"<!-- Details not appropriate for the main description can go here -->\n",
"<!-- Details not appropriate for the main description can go here. REMOVE the section if you're just going to restate the intro line! -->\n",
"<!-- Details not appropriate for the main description can go here. \n REMOVE the section if you're just going to restate the intro line! -->\n",
"<!-- Details not appropriate for the concise description can go here. \n REMOVE the section if you're just going to restate the intro line! -->\n",
}
def strip_comments(text):
for s in comments:
text = text.replace(s, "")
return text
def parse_wikitext(name: str, text: str):
if "<!--" in text:
new_text = strip_comments(text)
if text!=new_text:
return new_text
def main():
changes = read_export.main(parse_wikitext)
site = pywikibot.Site("en", "wowpedia")
for l in changes:
name, text = l
page = pywikibot.Page(site, name)
page.text = text
page.save("Strip comments")
print("done")
if __name__ == "__main__":
main()
| [
"pywikibot.Page",
"pywikibot.Site",
"read_export.main"
] | [((2423, 2455), 'read_export.main', 'read_export.main', (['parse_wikitext'], {}), '(parse_wikitext)\n', (2439, 2455), False, 'import read_export\n'), ((2464, 2496), 'pywikibot.Site', 'pywikibot.Site', (['"""en"""', '"""wowpedia"""'], {}), "('en', 'wowpedia')\n", (2478, 2496), False, 'import pywikibot\n'), ((2542, 2568), 'pywikibot.Page', 'pywikibot.Page', (['site', 'name'], {}), '(site, name)\n', (2556, 2568), False, 'import pywikibot\n')] |
from flask import Blueprint
from ctflorals.controllers import HomeController
from ctflorals.controllers import AboutController
from ctflorals.controllers import GalleryController
from ctflorals.controllers import TestimonialsController
ctflorals = Blueprint('ctflorals', __name__,
template_folder='views',
static_folder='../resources')
@ctflorals.route("/")
def home():
return HomeController().index()
@ctflorals.route("/about/")
def about():
return AboutController().index()
@ctflorals.route("/gallery/")
def gallery():
return GalleryController().index()
@ctflorals.route("/testimonials/")
def testimonials():
return TestimonialsController().index()
| [
"ctflorals.controllers.GalleryController",
"ctflorals.controllers.HomeController",
"ctflorals.controllers.TestimonialsController",
"ctflorals.controllers.AboutController",
"flask.Blueprint"
] | [((250, 342), 'flask.Blueprint', 'Blueprint', (['"""ctflorals"""', '__name__'], {'template_folder': '"""views"""', 'static_folder': '"""../resources"""'}), "('ctflorals', __name__, template_folder='views', static_folder=\n '../resources')\n", (259, 342), False, 'from flask import Blueprint\n'), ((429, 445), 'ctflorals.controllers.HomeController', 'HomeController', ([], {}), '()\n', (443, 445), False, 'from ctflorals.controllers import HomeController\n'), ((508, 525), 'ctflorals.controllers.AboutController', 'AboutController', ([], {}), '()\n', (523, 525), False, 'from ctflorals.controllers import AboutController\n'), ((592, 611), 'ctflorals.controllers.GalleryController', 'GalleryController', ([], {}), '()\n', (609, 611), False, 'from ctflorals.controllers import GalleryController\n'), ((688, 712), 'ctflorals.controllers.TestimonialsController', 'TestimonialsController', ([], {}), '()\n', (710, 712), False, 'from ctflorals.controllers import TestimonialsController\n')] |
import math, pygame
from pygame.locals import *
#############################################
## Standard colors (RGB)
BLACK = (20, 20, 40)
WHITE = (255, 255, 255)
BLUE = (0, 0, 255)
GREEN = (0, 255, 0)
RED = (255, 0, 0)
#############################################
## Customize plot here
def function_to_print(x):
"""Write function to plot here.
Must take a single number x and return a single number y."""
return -x * (x - 3)
# Range of window
X_MIN = 0.0
X_MAX = 10.0
Y_MIN = -10.0
Y_MAX = 10.0
# Tick interval on axes
X_TICK = 2.5
Y_TICK = 2.5
# Granularity of plotted functions, more points -> higher resolution plot
N_POINTS = 100
# Colors
background_color = BLACK
plot_color = GREEN
grid_color = WHITE
# Note, it is also possible to make a list of functions to print
# and respective colors:
# functions = [(f1, color1), (f2, color2), ...]
#############################################
## Let the program calculate the rest
WIDTH = 640
HEIGHT = 480
X_SIZE = X_MAX - X_MIN
Y_SIZE = Y_MAX - Y_MIN
def coordinate_to_position(c):
"""Converts a model coordinate (vector) into a graphic position (pixel)"""
gx = (c[0] - X_MIN) * WIDTH / X_SIZE
gy = HEIGHT - (c[1] - Y_MIN) * HEIGHT / Y_SIZE
return gx, gy
def curve_coordinates(f, x0, x1, points):
"""Returns list of coordinates
Creates linear splines for this function f, from x0 to x1
Length of returned list == points."""
coordinates = []
x = x0
delta = (x1 - x0) / (points - 1)
while x <= x1:
coordinates += [[x, f(x)]]
x += delta
return coordinates
def linspace(x0, x1, points):
"""Returns a list of numbers of `points` elements,
with constant intervals between `x0` and `x1`"""
delta = (x1 - x0) / (points - 1)
return map(lambda x: x0 + delta * x, range(points))
def curve_coordinates2(f, x0, x1, points):
"""(Alternative implementation):
This is more compact and functional-like."""
return [[x, f(x)] for x in linspace(x0, x1, points)]
def draw_ticks(screen, axis):
"""Draws appropriate ticks on the specified axis.
axis == 0 -> X-axis, otherwise Y-axis.
This implementation is not so readable, see alternative implementation
for a more readable one."""
if axis == 0:
min = X_MIN
max = X_MAX
tick = X_TICK
limit = HEIGHT
else:
axis = 1
min = Y_MIN
max = Y_MAX
tick = Y_TICK
limit = WIDTH
start = min + min % tick
end = max - max % tick
points = (end - start) / tick + 1
t = limit / 120
for x in linspace(start, end, int(points)):
c = [0, 0]
c[axis] = x
v = coordinate_to_position(c)
a = v[1 - axis] + t
if a > limit:
a = limit
b = v[1 - axis] - t
if b < 0:
b = 0
# Copying v
s = list(v)
s[1 - axis] = a
e = list(v)
e[1 - axis] = b
pygame.draw.line(screen, grid_color, s, e, 2)
def draw_x_ticks(screen):
"""(Alternative implementation):
Draws appropriate ticks on the X-axis."""
start = X_MIN + X_MIN % X_TICK
end = X_MAX - X_MAX % X_TICK
points = (end - start) / X_TICK + 1
# t == half length of the tick line
t = HEIGHT / 120
# one iteration per tick
for x in linspace(start, end, int(points)):
v = coordinate_to_position([x, 0])
a = v[1] + t
b = v[1] - t
if a > HEIGHT:
a = HEIGHT
if b < 0:
b = 0
pygame.draw.line(screen, grid_color, [v[0], a], [v[0], b], 2)
def draw_y_ticks(screen):
"""(Alternative implementation):
Draws appropriate ticks on the Y-axis.
This function mirrors draw_x_ticks(...)"""
start = Y_MIN + Y_MIN % Y_TICK
end = Y_MAX - Y_MAX % Y_TICK
points = (end - start) / Y_TICK + 1
t = WIDTH / 120
for y in linspace(start, end, int(points)):
v = coordinate_to_position([0, y])
# print v
a = v[0] + t
b = v[0] - t
if (a > WIDTH):
a = WIDTH
if (b < 0):
b = 0
pygame.draw.line(screen, grid_color, [a, v[1]], [b, v[1]], 2)
def draw(screen, pp, plot_color):
"""Plots the points `pp` on the specified screen with the specified color."""
# Function
pygame.draw.lines(screen, plot_color, False, pp, 3)
def draw_axis(screen):
"""Draws the axes and ticks of the coordinate system."""
## Alternative implementations:
# draw_x_ticks(screen)
# draw_y_ticks(screen)
draw_ticks(screen, 0)
draw_ticks(screen, 1)
x_points = list(map(coordinate_to_position, [[X_MIN, 0], [X_MAX, 0]]))
y_points = list(map(coordinate_to_position, [[0, Y_MIN], [0, Y_MAX]]))
# X-Axis
pygame.draw.lines(screen, grid_color, False, x_points, 2)
# Y-Axis
pygame.draw.lines(screen, grid_color, False, y_points, 2)
def main():
"""Graphics: draws graphs on window and await EXIT or ESCAPE."""
pygame.init()
screen = pygame.display.set_mode([WIDTH, HEIGHT])
pygame.display.set_caption('Plot 2d')
clock = pygame.time.Clock()
screen.fill(background_color)
cc = curve_coordinates(function_to_print, X_MIN, X_MAX, N_POINTS)
pp = list(map(coordinate_to_position, cc))
# This would typically be done inside the loop, but since it is never
# updated: might as well keep it outside
draw(screen, pp, plot_color)
draw_axis(screen)
done = False
while not done:
time = clock.tick(60)
pygame.display.update()
for e in pygame.event.get():
if e.type == QUIT or (e.type == KEYUP and e.key == K_ESCAPE):
done = True
break
pygame.quit()
# if Python says run...
if __name__ == '__main__':
main()
| [
"pygame.display.set_caption",
"pygame.init",
"pygame.draw.lines",
"pygame.quit",
"pygame.draw.line",
"pygame.display.set_mode",
"pygame.event.get",
"pygame.time.Clock",
"pygame.display.update"
] | [((4349, 4400), 'pygame.draw.lines', 'pygame.draw.lines', (['screen', 'plot_color', '(False)', 'pp', '(3)'], {}), '(screen, plot_color, False, pp, 3)\n', (4366, 4400), False, 'import math, pygame\n'), ((4799, 4856), 'pygame.draw.lines', 'pygame.draw.lines', (['screen', 'grid_color', '(False)', 'x_points', '(2)'], {}), '(screen, grid_color, False, x_points, 2)\n', (4816, 4856), False, 'import math, pygame\n'), ((4875, 4932), 'pygame.draw.lines', 'pygame.draw.lines', (['screen', 'grid_color', '(False)', 'y_points', '(2)'], {}), '(screen, grid_color, False, y_points, 2)\n', (4892, 4932), False, 'import math, pygame\n'), ((5020, 5033), 'pygame.init', 'pygame.init', ([], {}), '()\n', (5031, 5033), False, 'import math, pygame\n'), ((5047, 5087), 'pygame.display.set_mode', 'pygame.display.set_mode', (['[WIDTH, HEIGHT]'], {}), '([WIDTH, HEIGHT])\n', (5070, 5087), False, 'import math, pygame\n'), ((5092, 5129), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""Plot 2d"""'], {}), "('Plot 2d')\n", (5118, 5129), False, 'import math, pygame\n'), ((5143, 5162), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (5160, 5162), False, 'import math, pygame\n'), ((5757, 5770), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (5768, 5770), False, 'import math, pygame\n'), ((2976, 3021), 'pygame.draw.line', 'pygame.draw.line', (['screen', 'grid_color', 's', 'e', '(2)'], {}), '(screen, grid_color, s, e, 2)\n', (2992, 3021), False, 'import math, pygame\n'), ((3557, 3618), 'pygame.draw.line', 'pygame.draw.line', (['screen', 'grid_color', '[v[0], a]', '[v[0], b]', '(2)'], {}), '(screen, grid_color, [v[0], a], [v[0], b], 2)\n', (3573, 3618), False, 'import math, pygame\n'), ((4150, 4211), 'pygame.draw.line', 'pygame.draw.line', (['screen', 'grid_color', '[a, v[1]]', '[b, v[1]]', '(2)'], {}), '(screen, grid_color, [a, v[1]], [b, v[1]], 2)\n', (4166, 4211), False, 'import math, pygame\n'), ((5567, 5590), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (5588, 5590), False, 'import math, pygame\n'), ((5608, 5626), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (5624, 5626), False, 'import math, pygame\n')] |
""" Seeding the db """
import os, json, crud, server, model
os.system('dropdb radlist')
os.system('createdb radlist')
model.connect_to_db(server.app)
model.db.create_all()
# # Create users
# create_user(fname, lname, email, password)
alex = crud.create_user('alex', 'sanchez', '<EMAIL>', 'asanch')
pollo = crud.create_user('pollo', 'cat', '<EMAIL>', 'luvsnax')
bellina = crud.create_user('bellina', 'kitty', '<EMAIL>', 'luvsun')
# # Create playlists
# create_playlist(user, name)
# crud.create_playlist(alex, 'yacht rockers')
# crud.create_playlist(pollo, 'divas')
# crud.create_playlist(bellina, 'metal magic')
# # Create tracks
# create_track(title, artist)
# crud.create_track('sailin', '<NAME>')
# crud.create_track('paradise', 'sade')
# crud.create_track('one', 'metallica')
# #Associate tracks with playlists
# create_playlist_track(playlist_id, track_id)
# crud.create_playlist_track(1, 1)
# crud.create_playlist_track(2, 2)
# crud.create_playlist_track(3, 3)
# only running once to populate database
| [
"os.system",
"model.db.create_all",
"model.connect_to_db",
"crud.create_user"
] | [((63, 90), 'os.system', 'os.system', (['"""dropdb radlist"""'], {}), "('dropdb radlist')\n", (72, 90), False, 'import os, json, crud, server, model\n'), ((91, 120), 'os.system', 'os.system', (['"""createdb radlist"""'], {}), "('createdb radlist')\n", (100, 120), False, 'import os, json, crud, server, model\n'), ((122, 153), 'model.connect_to_db', 'model.connect_to_db', (['server.app'], {}), '(server.app)\n', (141, 153), False, 'import os, json, crud, server, model\n'), ((154, 175), 'model.db.create_all', 'model.db.create_all', ([], {}), '()\n', (173, 175), False, 'import os, json, crud, server, model\n'), ((246, 302), 'crud.create_user', 'crud.create_user', (['"""alex"""', '"""sanchez"""', '"""<EMAIL>"""', '"""asanch"""'], {}), "('alex', 'sanchez', '<EMAIL>', 'asanch')\n", (262, 302), False, 'import os, json, crud, server, model\n'), ((311, 365), 'crud.create_user', 'crud.create_user', (['"""pollo"""', '"""cat"""', '"""<EMAIL>"""', '"""luvsnax"""'], {}), "('pollo', 'cat', '<EMAIL>', 'luvsnax')\n", (327, 365), False, 'import os, json, crud, server, model\n'), ((376, 433), 'crud.create_user', 'crud.create_user', (['"""bellina"""', '"""kitty"""', '"""<EMAIL>"""', '"""luvsun"""'], {}), "('bellina', 'kitty', '<EMAIL>', 'luvsun')\n", (392, 433), False, 'import os, json, crud, server, model\n')] |
"""modify_tag_history_column
Revision ID: 96a7e3a61347
Revises: <PASSWORD>
Create Date: 2021-10-06 11:55:30.187627
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('issue_tag_history', sa.Column('historys', sa.ARRAY(postgresql.JSONB(astext_type=sa.Text())), nullable=True))
op.drop_column('issue_tag_history', 'history')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('issue_tag_history', sa.Column('history', postgresql.JSONB(astext_type=sa.Text()), autoincrement=False, nullable=True))
op.drop_column('issue_tag_history', 'historys')
# ### end Alembic commands ###
| [
"sqlalchemy.Text",
"alembic.op.drop_column"
] | [((555, 601), 'alembic.op.drop_column', 'op.drop_column', (['"""issue_tag_history"""', '"""history"""'], {}), "('issue_tag_history', 'history')\n", (569, 601), False, 'from alembic import op\n'), ((864, 911), 'alembic.op.drop_column', 'op.drop_column', (['"""issue_tag_history"""', '"""historys"""'], {}), "('issue_tag_history', 'historys')\n", (878, 911), False, 'from alembic import op\n'), ((811, 820), 'sqlalchemy.Text', 'sa.Text', ([], {}), '()\n', (818, 820), True, 'import sqlalchemy as sa\n'), ((522, 531), 'sqlalchemy.Text', 'sa.Text', ([], {}), '()\n', (529, 531), True, 'import sqlalchemy as sa\n')] |
from django.core.management.base import BaseCommand
from orcamentos.crm.models import Employee
class Command(BaseCommand):
help = ''' Cria um usuário admin. '''
def handle(self, *args, **kwargs):
'''
Cria um Employee.
Precisamos de Employee para fazer todas as transações no sistema.
'''
username = 'admin'
first_name = 'Admin'
last_name = 'Admin'
email = '<EMAIL>'
user = Employee.objects.create(
username=username,
first_name=first_name,
last_name=last_name,
email=email,
gender='I'
)
user.set_password('<PASSWORD>')
user.is_staff = True
user.is_superuser = True
user.is_active = True
user.save()
print('Usuário criado com sucesso.')
| [
"orcamentos.crm.models.Employee.objects.create"
] | [((456, 572), 'orcamentos.crm.models.Employee.objects.create', 'Employee.objects.create', ([], {'username': 'username', 'first_name': 'first_name', 'last_name': 'last_name', 'email': 'email', 'gender': '"""I"""'}), "(username=username, first_name=first_name, last_name\n =last_name, email=email, gender='I')\n", (479, 572), False, 'from orcamentos.crm.models import Employee\n')] |
# Copyright (c) 2020 <NAME> OpenDigitalStudio.net
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
from fastapi import APIRouter
from fastapi import Depends
from fastapi import HTTPException
from sqlalchemy.orm import Session
from burren.db import crud
from burren.db import database
from burren.db import schemas
router = APIRouter()
@router.get('/', response_model=List[schemas.Session])
async def list_sessions(skip: int = 0, limit: int = 100,
db: Session = Depends(database.get_db)):
sessions = crud.list_sessions(db, skip=skip, limit=limit)
return sessions
@router.post('/', response_model=schemas.Session)
async def create_session(new_session: schemas.SessionCreate,
db: Session = Depends(database.get_db)):
owner = crud.get_user(db, user_id=new_session.owner_id)
if owner is None:
raise HTTPException(status_code=404, detail="Owner not found")
db_session = crud.create_session(db, new_session)
return db_session
@router.get('/{session_id}', response_model=schemas.Session)
async def get_session(session_id: str,
db: Session = Depends(database.get_db)):
db_session = crud.get_session(db, session_id=session_id)
if db_session is None:
raise HTTPException(status_code=404, detail="Sesssion not found")
return db_session
| [
"burren.db.crud.list_sessions",
"fastapi.HTTPException",
"burren.db.crud.get_user",
"fastapi.APIRouter",
"burren.db.crud.create_session",
"burren.db.crud.get_session",
"fastapi.Depends"
] | [((848, 859), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (857, 859), False, 'from fastapi import APIRouter\n'), ((1012, 1036), 'fastapi.Depends', 'Depends', (['database.get_db'], {}), '(database.get_db)\n', (1019, 1036), False, 'from fastapi import Depends\n'), ((1054, 1100), 'burren.db.crud.list_sessions', 'crud.list_sessions', (['db'], {'skip': 'skip', 'limit': 'limit'}), '(db, skip=skip, limit=limit)\n', (1072, 1100), False, 'from burren.db import crud\n'), ((1273, 1297), 'fastapi.Depends', 'Depends', (['database.get_db'], {}), '(database.get_db)\n', (1280, 1297), False, 'from fastapi import Depends\n'), ((1312, 1359), 'burren.db.crud.get_user', 'crud.get_user', (['db'], {'user_id': 'new_session.owner_id'}), '(db, user_id=new_session.owner_id)\n', (1325, 1359), False, 'from burren.db import crud\n'), ((1471, 1507), 'burren.db.crud.create_session', 'crud.create_session', (['db', 'new_session'], {}), '(db, new_session)\n', (1490, 1507), False, 'from burren.db import crud\n'), ((1668, 1692), 'fastapi.Depends', 'Depends', (['database.get_db'], {}), '(database.get_db)\n', (1675, 1692), False, 'from fastapi import Depends\n'), ((1712, 1755), 'burren.db.crud.get_session', 'crud.get_session', (['db'], {'session_id': 'session_id'}), '(db, session_id=session_id)\n', (1728, 1755), False, 'from burren.db import crud\n'), ((1396, 1452), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(404)', 'detail': '"""Owner not found"""'}), "(status_code=404, detail='Owner not found')\n", (1409, 1452), False, 'from fastapi import HTTPException\n'), ((1797, 1856), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(404)', 'detail': '"""Sesssion not found"""'}), "(status_code=404, detail='Sesssion not found')\n", (1810, 1856), False, 'from fastapi import HTTPException\n')] |