hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0acabac25e7f182a0cc9d197e74fb9a54f708fdd
| 629
|
py
|
Python
|
day10/samematrix.py
|
nikhilsamninan/python-files
|
15198459081097058a939b40b5e8ef754e578fe0
|
[
"Apache-2.0"
] | null | null | null |
day10/samematrix.py
|
nikhilsamninan/python-files
|
15198459081097058a939b40b5e8ef754e578fe0
|
[
"Apache-2.0"
] | null | null | null |
day10/samematrix.py
|
nikhilsamninan/python-files
|
15198459081097058a939b40b5e8ef754e578fe0
|
[
"Apache-2.0"
] | null | null | null |
def matrix_form():
r = int(input("Enter the no of rows"))
c = int(input("Enter the no of columns"))
matrix=[]
print("Enter the enteries")
for i in range(r):
a = []
for j in range(c):
a.append(int(input()))
matrix.append(a)
return(matrix)
def check_matrix(first_matrix,sec_matrix):
if(first_matrix==sec_matrix):
print("same")
else:
print("not same")
print("Enter the 1st matrix")
first_matrix = matrix_form()
print(first_matrix)
print("Enter the 2nd matrix")
sec_matrix = matrix_form()
print(sec_matrix)
check_matrix(first_matrix,sec_matrix)
| 22.464286
| 45
| 0.63434
| 92
| 629
| 4.173913
| 0.347826
| 0.104167
| 0.15625
| 0.15625
| 0.265625
| 0.265625
| 0
| 0
| 0
| 0
| 0
| 0.004141
| 0.232114
| 629
| 28
| 46
| 22.464286
| 0.79089
| 0
| 0
| 0
| 0
| 0
| 0.179365
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086957
| false
| 0
| 0
| 0
| 0.086957
| 0.304348
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0accac5244ae00b90c3dcaa313e0ad6674cf5f7f
| 5,284
|
py
|
Python
|
kepler.py
|
mdbernard/astrodynamics
|
cf98df6cd17086e3675c1f7c2fce342d5322ee51
|
[
"MIT"
] | null | null | null |
kepler.py
|
mdbernard/astrodynamics
|
cf98df6cd17086e3675c1f7c2fce342d5322ee51
|
[
"MIT"
] | 14
|
2020-11-10T02:37:15.000Z
|
2022-02-07T01:11:29.000Z
|
kepler.py
|
mdbernard/astrodynamics
|
cf98df6cd17086e3675c1f7c2fce342d5322ee51
|
[
"MIT"
] | null | null | null |
import numpy as np
from stumpff import C, S
from CelestialBody import BODIES
from numerical import newton, laguerre
from lagrange import calc_f, calc_fd, calc_g, calc_gd
def kepler_chi(chi, alpha, r0, vr0, mu, dt):
''' Kepler's Equation of the universal anomaly, modified
for use in numerical solvers. '''
z = alpha*chi**2
return (r0*vr0/np.sqrt(mu))*chi**2*C(z) + \
(1 - alpha*r0)*chi**3*S(z) + \
r0*chi - np.sqrt(mu)*dt
def dkepler_dchi(chi, alpha, r0, vr0, mu, dt):
''' Derivative of Kepler's Equation of the universal anomaly,
modified for use in numerical solvers. '''
z = alpha*chi**2
return (r0*vr0/np.sqrt(mu))*chi*(1 - alpha*chi**2*S(z)) + \
(1 - alpha*r0)*chi**2*C(z) + r0
def d2kepler_dchi2(chi, alpha, r0, vr0, mu, dt):
''' Second derivative of Kepler's Equation of the universal
anomaly, modified for use in numerical solvers. '''
z = alpha*chi**2
S_ = S(z)
return (r0*vr0/np.sqrt(mu))*(1 - 3*z*S_ + z*(C(z) - 3*S_)) + \
chi*(1 - z*S_)*(1 - alpha*r0)
def solve_kepler_chi(r_0, v_0, dt, body=BODIES['Earth'], method='laguerre', tol=1e-7, max_iters=100):
''' Solve Kepler's Equation of the universal anomaly chi using the specified
numerical method. Applies Algorithm 3.4 from Orbital Mechanics for Engineering
Students, 4 ed, Curtis.
:param r_0: `iterable` (km) initial position 3-vector
:param v_0: `iterable` (km/s) initial velocity 3-vector
:param dt: `float` (s) time after initial state to solve for r, v as 3-vectors
:param body: `CelestialBody` (--) the celestial body to use for orbital parameters
:param method: `str` (--) which numerical method to use to solve Kepler's Equation
:param tol: `float` (--) decimal tolerance for numerical method (default 1e-7 is IEEE 745 single precision)
:param max_iters: `int` (--) maximum number of iterations in numerical method before breaking
:return: (km) final position 3-vector, (km/s) final velocity 3-vector
'''
VALID_METHODS = ('laguerre', 'newton')
mu = body.mu # (km**3/s**2) gravitational parameter of the specified primary body
r0 = np.linalg.norm(r_0) # (km) initial position magnitude
v0 = np.linalg.norm(v_0) # (km/s) initial velocity magnitude
vr0 = np.dot(v_0, r_0)/r0 # (km/s) initial radial velocity magnitude
alpha = 2/r0 - v0**2/mu # (1/km) inverse of semi-major axis
chi0 = np.sqrt(mu)*np.abs(alpha)*dt
if method not in VALID_METHODS:
print(f'Method \'{method}\' is not valid, must be one of {VALID_METHODS}.\nDefaulting to laguerre method.')
chi, _, _ = laguerre(chi0, kepler_chi, dkepler_dchi, d2kepler_dchi2, alpha, r0, vr0, mu, dt)
elif method == 'newton':
chi, _, _ = newton(chi0, kepler_chi, dkepler_dchi, alpha, r0, vr0, mu, dt)
else: # method == 'laguerre'
chi, _, _ = laguerre(chi0, kepler_chi, dkepler_dchi, d2kepler_dchi2, alpha, r0, vr0, mu, dt)
f = calc_f(chi, r0, alpha)
g = calc_g(dt, mu, chi, alpha)
r_1 = f*r_0 + g*v_0
r1 = np.linalg.norm(r_1)
fd = calc_fd(mu, r1, r0, alpha, chi)
gd = calc_gd(chi, r1, alpha)
v_1 = fd*r_0 + gd*v_0
return r_1, v_1
def solve_kepler_E(e, Me, tol=1e-7, max_iters=100):
''' Solve Kepler's Equation in the form containing Eccentric Anomaly (E),
eccentricity (e), and Mean Anomaly of Ellipse (Me). Uses Algorithm 3.1 from Orbital
Mechanics for Engineering Students, 4 ed, Curtis. '''
# TODO: have this function make use of one of the numerical methods in numerical.py
def f(E, e, Me):
return E - e*np.sin(E) - Me
def fp(E, e):
return 1 - e*np.cos(E)
E = Me + e/2 if Me < np.pi else Me - e/2
ratio = f(E, e, Me)/fp(E, e)
iters = 0
while abs(ratio) > tol and iters < max_iters:
E -= ratio
ratio = f(E, e, Me)/fp(E, e)
iters += 1
E -= ratio
converged = np.abs(ratio) <= tol
return E, iters, converged
def test():
''' Test the functionality of solve_kepler_chi
and solve_kepler_laguerre using Problem 3.20 from
Orbital Mechanics for Engineering Students, 4 ed, Curtis.
'''
# given starting information
Earth = BODIES['Earth'] # `CelestialBody` (--) Earth and all the Earth things
r_0 = np.array([20000, -105000, -19000]) # (km) initial position vector
v_0 = np.array([0.9, -3.4, -1.5]) # (km/s) initial velocity vector
dt = 2*60*60 # (s) time of interest after initial time
# given correct answer from textbook
correct_r_1 = np.array([26338, -128750, -29656]) # (km) final position vector
correct_v_1 = np.array([0.86280, -3.2116, -1.4613]) # (km/s) final velocity vector
# solve using above methods
r_n, v_n = solve_kepler_chi(r_0, v_0, dt, Earth, method='newton')
r_l, v_l = solve_kepler_chi(r_0, v_0, dt, Earth, method='laguerre')
# check correctness
# tolerance based on significant figures of given answers
newton_valid = np.allclose(r_n, correct_r_1, atol=1) and np.allclose(v_n, correct_v_1, atol=1e-4)
laguerre_valid = np.allclose(r_l, correct_r_1, atol=1) and np.allclose(v_l, correct_v_1, atol=1e-4)
return all([newton_valid, laguerre_valid])
if __name__ == '__main__':
print(test())
| 39.140741
| 115
| 0.645912
| 867
| 5,284
| 3.817762
| 0.224913
| 0.019033
| 0.018127
| 0.021752
| 0.306344
| 0.287613
| 0.256798
| 0.250453
| 0.244411
| 0.169789
| 0
| 0.046993
| 0.222748
| 5,284
| 134
| 116
| 39.432836
| 0.758948
| 0.402914
| 0
| 0.126761
| 0
| 0
| 0.048296
| 0.009262
| 0
| 0
| 0
| 0.007463
| 0
| 1
| 0.112676
| false
| 0
| 0.070423
| 0.028169
| 0.295775
| 0.028169
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0acd26a6aeb9fbb21484a68cd667f26b74d856f7
| 952
|
py
|
Python
|
nicos_demo/vpgaa/setups/pgai.py
|
jkrueger1/nicos
|
5f4ce66c312dedd78995f9d91e8a6e3c891b262b
|
[
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 12
|
2019-11-06T15:40:36.000Z
|
2022-01-01T16:23:00.000Z
|
nicos_demo/vpgaa/setups/pgai.py
|
jkrueger1/nicos
|
5f4ce66c312dedd78995f9d91e8a6e3c891b262b
|
[
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 91
|
2020-08-18T09:20:26.000Z
|
2022-02-01T11:07:14.000Z
|
nicos_demo/vpgaa/setups/pgai.py
|
jkrueger1/nicos
|
5f4ce66c312dedd78995f9d91e8a6e3c891b262b
|
[
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 6
|
2020-01-11T10:52:30.000Z
|
2022-02-25T12:35:23.000Z
|
description = 'PGAA setup with XYZOmega sample table'
group = 'basic'
sysconfig = dict(
datasinks = ['mcasink', 'chnsink', 'csvsink', 'livesink']
)
includes = [
'system',
'reactor',
'nl4b',
'pressure',
'sampletable',
'pilz',
'detector',
'collimation',
]
devices = dict(
mcasink = device('nicos_mlz.pgaa.devices.MCASink',
settypes = {'point'},
detectors = ['_60p', 'LEGe'],
),
chnsink = device('nicos_mlz.pgaa.devices.CHNSink',
settypes = {'point'},
detectors = ['_60p', 'LEGe'],
),
csvsink = device('nicos_mlz.pgaa.devices.CSVDataSink',
settypes = {'point'},
),
)
startupcode = """
SetDetectors('_60p', 'LEGe')
SetEnvironment(chamber_pressure)
printinfo("============================================================")
printinfo("Welcome to the NICOS PGAI demo setup.")
printinfo("============================================================")
"""
| 23.219512
| 73
| 0.522059
| 77
| 952
| 6.363636
| 0.584416
| 0.067347
| 0.085714
| 0.110204
| 0.271429
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00915
| 0.196429
| 952
| 40
| 74
| 23.8
| 0.631373
| 0
| 0
| 0.285714
| 0
| 0
| 0.543067
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.085714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ace54f568ea92472966bb73d6fa4f6d624bebbf
| 6,859
|
py
|
Python
|
official/nlp/transformer/utils/tokenizer_test.py
|
hjkim-haga/TF-OD-API
|
22ac477ff4dfb93fe7a32c94b5f0b1e74330902b
|
[
"Apache-2.0"
] | 1
|
2021-05-22T12:50:50.000Z
|
2021-05-22T12:50:50.000Z
|
official/nlp/transformer/utils/tokenizer_test.py
|
DemonDamon/mask-detection-based-on-tf2odapi
|
192ae544169c1230c21141c033800aa1bd94e9b6
|
[
"MIT"
] | null | null | null |
official/nlp/transformer/utils/tokenizer_test.py
|
DemonDamon/mask-detection-based-on-tf2odapi
|
192ae544169c1230c21141c033800aa1bd94e9b6
|
[
"MIT"
] | null | null | null |
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test Subtokenizer and string helper methods."""
import collections
import tempfile
import tensorflow as tf
from official.nlp.transformer.utils import tokenizer
class SubtokenizerTest(tf.test.TestCase):
def _init_subtokenizer(self, vocab_list):
temp_file = tempfile.NamedTemporaryFile(delete=False)
with tf.io.gfile.GFile(temp_file.name, "w") as w:
for subtoken in vocab_list:
w.write("'%s'" % subtoken)
w.write("\n")
return tokenizer.Subtokenizer(temp_file.name, reserved_tokens=[])
def test_encode(self):
vocab_list = ["123_", "test", "ing_"]
subtokenizer = self._init_subtokenizer(vocab_list)
s = "testing 123"
encoded_list = subtokenizer.encode(s)
self.assertEqual([1, 2, 0], encoded_list)
def test_decode(self):
vocab_list = ["123_", "test", "ing_"]
subtokenizer = self._init_subtokenizer(vocab_list)
encoded_list = [1, 2, 0] # testing 123
decoded_str = subtokenizer.decode(encoded_list)
self.assertEqual("testing 123", decoded_str)
def test_subtoken_ids_to_tokens(self):
vocab_list = ["123_", "test", "ing_"]
subtokenizer = self._init_subtokenizer(vocab_list)
encoded_list = [1, 2, 0] # testing 123
token_list = subtokenizer._subtoken_ids_to_tokens(encoded_list)
self.assertEqual([u"testing", u"123"], token_list)
class StringHelperTest(tf.test.TestCase):
def test_split_string_to_tokens(self):
text = "test? testing 123."
tokens = tokenizer._split_string_to_tokens(text,
tokenizer._ALPHANUMERIC_CHAR_SET)
self.assertEqual(["test", "? ", "testing", "123", "."], tokens)
def test_join_tokens_to_string(self):
tokens = ["test", "? ", "testing", "123", "."]
s = tokenizer._join_tokens_to_string(tokens,
tokenizer._ALPHANUMERIC_CHAR_SET)
self.assertEqual("test? testing 123.", s)
def test_escape_token(self):
token = u"abc_\\4"
alphabet = set("abc_\\u;")
escaped_token = tokenizer._escape_token(token, alphabet)
self.assertEqual("abc\\u\\\\\\52;_", escaped_token)
def test_unescape_token(self):
escaped_token = u"Underline: \\u, Backslash: \\\\, Unicode: \\52;"
unescaped_token = tokenizer._unescape_token(escaped_token)
self.assertEqual("Underline: _, Backslash: \\, Unicode: 4", unescaped_token)
def test_list_to_index_dict(self):
lst = ["test", "strings"]
d = tokenizer._list_to_index_dict(lst)
self.assertDictEqual({"test": 0, "strings": 1}, d)
def test_split_token_to_subtokens(self):
token = "abc"
subtoken_dict = {"a": 0, "b": 1, "c": 2, "ab": 3}
max_subtoken_length = 2
subtokens = tokenizer._split_token_to_subtokens(token, subtoken_dict,
max_subtoken_length)
self.assertEqual(["ab", "c"], subtokens)
def test_generate_alphabet_dict(self):
s = ["testing", "123"]
reserved_tokens = ["???"]
alphabet = tokenizer._generate_alphabet_dict(s, reserved_tokens)
self.assertIn("?", alphabet)
self.assertIn("t", alphabet)
self.assertIn("e", alphabet)
self.assertIn("s", alphabet)
self.assertIn("i", alphabet)
self.assertIn("n", alphabet)
self.assertIn("g", alphabet)
self.assertIn("1", alphabet)
self.assertIn("2", alphabet)
self.assertIn("3", alphabet)
def test_count_and_gen_subtokens(self):
token_counts = {"abc": 5}
alphabet = set("abc_")
subtoken_dict = {"a": 0, "b": 1, "c": 2, "_": 3}
max_subtoken_length = 2
subtoken_counts = tokenizer._count_and_gen_subtokens(
token_counts, alphabet, subtoken_dict, max_subtoken_length)
self.assertIsInstance(subtoken_counts, collections.defaultdict)
self.assertDictEqual(
{
"a": 5,
"b": 5,
"c": 5,
"_": 5,
"ab": 5,
"bc": 5,
"c_": 5,
"abc": 5,
"bc_": 5,
"abc_": 5
}, subtoken_counts)
def test_filter_and_bucket_subtokens(self):
subtoken_counts = collections.defaultdict(int, {
"a": 2,
"b": 4,
"c": 1,
"ab": 6,
"ac": 3,
"abbc": 5
})
min_count = 3
subtoken_buckets = tokenizer._filter_and_bucket_subtokens(
subtoken_counts, min_count)
self.assertEqual(len(subtoken_buckets[0]), 0)
self.assertEqual(set("b"), subtoken_buckets[1])
self.assertEqual(set(["ab", "ac"]), subtoken_buckets[2])
self.assertEqual(len(subtoken_buckets[3]), 0)
self.assertEqual(set(["abbc"]), subtoken_buckets[4])
def test_gen_new_subtoken_list(self):
subtoken_counts = collections.defaultdict(int, {
"translate": 10,
"t": 40,
"tr": 16,
"tra": 12
})
min_count = 5
alphabet = set("translate")
reserved_tokens = ["reserved", "tokens"]
subtoken_list, max_token_length = tokenizer._gen_new_subtoken_list(
subtoken_counts, min_count, alphabet, reserved_tokens)
# Check that "tra" isn"t in the list (its count should be decremented to 2,
# so it should not be added to the canddiate list).
self.assertNotIn("tra", subtoken_list)
self.assertIn("tr", subtoken_list)
self.assertIn("t", subtoken_list)
self.assertEqual(len("translate"), max_token_length)
def test_generate_subtokens(self):
token_counts = {"ab": 1, "bc": 3, "abc": 5}
alphabet = set("abc_")
min_count = 100
num_iterations = 1
reserved_tokens = ["reserved", "tokens"]
vocab_list = tokenizer._generate_subtokens(token_counts, alphabet,
min_count, num_iterations,
reserved_tokens)
# Check that reserved tokens are at the front of the list
self.assertEqual(vocab_list[:2], reserved_tokens)
# Check that each character in alphabet is in the vocab list
for c in alphabet:
self.assertIn(c, vocab_list)
if __name__ == "__main__":
tf.test.main()
| 33.458537
| 81
| 0.626185
| 830
| 6,859
| 4.926506
| 0.238554
| 0.055026
| 0.048912
| 0.011739
| 0.167523
| 0.134018
| 0.096845
| 0.096845
| 0.096845
| 0.058694
| 0
| 0.023392
| 0.252078
| 6,859
| 204
| 82
| 33.622549
| 0.773684
| 0.129757
| 0
| 0.13986
| 0
| 0
| 0.079254
| 0
| 0
| 0
| 0
| 0
| 0.223776
| 1
| 0.104895
| false
| 0
| 0.027972
| 0
| 0.153846
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0acf3366802d8714bb15485c54ab7f3de9aac778
| 2,776
|
py
|
Python
|
Z - Tool Box/LaZagne/Windows/lazagne/softwares/windows/ppypykatz.py
|
dfirpaul/Active-Directory-Exploitation-Cheat-Sheet-1
|
1dcf54522e9d20711ff1114550dc2893ed3e9ed0
|
[
"MIT"
] | 1,290
|
2020-05-28T21:24:43.000Z
|
2022-03-31T16:38:43.000Z
|
Z - Tool Box/LaZagne/Windows/lazagne/softwares/windows/ppypykatz.py
|
dfirpaul/Active-Directory-Exploitation-Cheat-Sheet-1
|
1dcf54522e9d20711ff1114550dc2893ed3e9ed0
|
[
"MIT"
] | 1
|
2020-07-03T21:14:52.000Z
|
2020-07-03T21:14:52.000Z
|
Z - Tool Box/LaZagne/Windows/lazagne/softwares/windows/ppypykatz.py
|
dfirpaul/Active-Directory-Exploitation-Cheat-Sheet-1
|
1dcf54522e9d20711ff1114550dc2893ed3e9ed0
|
[
"MIT"
] | 280
|
2020-05-29T17:28:38.000Z
|
2022-03-31T13:54:15.000Z
|
# -*- coding: utf-8 -*-
# Thanks to @skelsec for his awesome tool Pypykatz
# Checks his project here: https://github.com/skelsec/pypykatz
import codecs
import traceback
from lazagne.config.module_info import ModuleInfo
from lazagne.config.constant import constant
from pypykatz.pypykatz import pypykatz
class Pypykatz(ModuleInfo):
"""
Pypykatz dumps all secrets from the lsass.exe memory
It does not work if:
- LSASS is running as a protected process
- A security product blocks this access
"""
def __init__(self):
ModuleInfo.__init__(self, 'pypykatz', 'windows', system_module=True)
def run(self):
mimi = None
try:
mimi = pypykatz.go_live()
except Exception:
self.debug(traceback.format_exc())
if mimi:
results = {}
logon_sessions = mimi.to_dict().get('logon_sessions', [])
for logon_session in logon_sessions:
# Right now kerberos_creds, dpapi_creds results are not used
user = logon_sessions[logon_session]
# Get cleartext password
for i in ['credman_creds', 'ssp_creds', 'livessp_creds', 'tspkg_creds', 'wdigest_creds']:
for data in user.get(i, []):
if all((data['username'], data['password'])):
login = data['username']
if login not in results:
results[login] = {}
results[login]['Type'] = i
results[login]['Domain'] = data.get('domainname', 'N/A')
results[login]['Password'] = data['password']
# msv_creds to get sha1 user hash
for data in user.get('msv_creds', []):
if data['username']:
login = data['username']
else:
login = user['username']
if login not in results:
results[login] = {}
if data['SHAHash']:
results[login]['Shahash'] = codecs.encode(data['SHAHash'], 'hex')
if data['LMHash']:
results[login]['Lmhash'] = codecs.encode(data['LMHash'], 'hex')
if data['NThash']:
results[login]['Nthash'] = codecs.encode(data['NThash'], 'hex')
constant.pypykatz_result = results
pwd_found = []
for user in results:
results[user]['Login'] = user
pwd_found.append(results[user])
return pwd_found
| 36.526316
| 106
| 0.501801
| 273
| 2,776
| 4.985348
| 0.410256
| 0.070536
| 0.035268
| 0.019104
| 0.080823
| 0.057311
| 0.057311
| 0.057311
| 0
| 0
| 0
| 0.001189
| 0.394092
| 2,776
| 75
| 107
| 37.013333
| 0.807967
| 0.144813
| 0
| 0.12766
| 0
| 0
| 0.112533
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.042553
| false
| 0.042553
| 0.106383
| 0
| 0.191489
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0acf54e8a20fd816eda3589c3b616626bb4f33fb
| 14,981
|
py
|
Python
|
test/test_discogs.py
|
mglukhovsky/beets
|
889e30c056a609cf71c8c8200259520230545222
|
[
"MIT"
] | null | null | null |
test/test_discogs.py
|
mglukhovsky/beets
|
889e30c056a609cf71c8c8200259520230545222
|
[
"MIT"
] | null | null | null |
test/test_discogs.py
|
mglukhovsky/beets
|
889e30c056a609cf71c8c8200259520230545222
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Tests for discogs plugin.
"""
from __future__ import division, absolute_import, print_function
import unittest
from test import _common
from test._common import Bag
from test.helper import capture_log
from beetsplug.discogs import DiscogsPlugin
class DGAlbumInfoTest(_common.TestCase):
def _make_release(self, tracks=None):
"""Returns a Bag that mimics a discogs_client.Release. The list
of elements on the returned Bag is incomplete, including just
those required for the tests on this class."""
data = {
'id': 'ALBUM ID',
'uri': 'ALBUM URI',
'title': 'ALBUM TITLE',
'year': '3001',
'artists': [{
'name': 'ARTIST NAME',
'id': 'ARTIST ID',
'join': ','
}],
'formats': [{
'descriptions': ['FORMAT DESC 1', 'FORMAT DESC 2'],
'name': 'FORMAT',
'qty': 1
}],
'styles': [
'STYLE1', 'STYLE2'
],
'labels': [{
'name': 'LABEL NAME',
'catno': 'CATALOG NUMBER',
}],
'tracklist': []
}
if tracks:
for recording in tracks:
data['tracklist'].append(recording)
return Bag(data=data,
# Make some fields available as properties, as they are
# accessed by DiscogsPlugin methods.
title=data['title'],
artists=[Bag(data=d) for d in data['artists']])
def _make_track(self, title, position='', duration='', type_=None):
track = {
'title': title,
'position': position,
'duration': duration
}
if type_ is not None:
# Test samples on discogs_client do not have a 'type_' field, but
# the API seems to return it. Values: 'track' for regular tracks,
# 'heading' for descriptive texts (ie. not real tracks - 12.13.2).
track['type_'] = type_
return track
def _make_release_from_positions(self, positions):
"""Return a Bag that mimics a discogs_client.Release with a
tracklist where tracks have the specified `positions`."""
tracks = [self._make_track('TITLE%s' % i, position) for
(i, position) in enumerate(positions, start=1)]
return self._make_release(tracks)
def test_parse_media_for_tracks(self):
tracks = [self._make_track('TITLE ONE', '1', '01:01'),
self._make_track('TITLE TWO', '2', '02:02')]
release = self._make_release(tracks=tracks)
d = DiscogsPlugin().get_album_info(release)
t = d.tracks
self.assertEqual(d.media, 'FORMAT')
self.assertEqual(t[0].media, d.media)
self.assertEqual(t[1].media, d.media)
def test_parse_medium_numbers_single_medium(self):
release = self._make_release_from_positions(['1', '2'])
d = DiscogsPlugin().get_album_info(release)
t = d.tracks
self.assertEqual(d.mediums, 1)
self.assertEqual(t[0].medium, 1)
self.assertEqual(t[0].medium_total, 2)
self.assertEqual(t[1].medium, 1)
self.assertEqual(t[0].medium_total, 2)
def test_parse_medium_numbers_two_mediums(self):
release = self._make_release_from_positions(['1-1', '2-1'])
d = DiscogsPlugin().get_album_info(release)
t = d.tracks
self.assertEqual(d.mediums, 2)
self.assertEqual(t[0].medium, 1)
self.assertEqual(t[0].medium_total, 1)
self.assertEqual(t[1].medium, 2)
self.assertEqual(t[1].medium_total, 1)
def test_parse_medium_numbers_two_mediums_two_sided(self):
release = self._make_release_from_positions(['A1', 'B1', 'C1'])
d = DiscogsPlugin().get_album_info(release)
t = d.tracks
self.assertEqual(d.mediums, 2)
self.assertEqual(t[0].medium, 1)
self.assertEqual(t[0].medium_total, 2)
self.assertEqual(t[0].medium_index, 1)
self.assertEqual(t[1].medium, 1)
self.assertEqual(t[1].medium_total, 2)
self.assertEqual(t[1].medium_index, 2)
self.assertEqual(t[2].medium, 2)
self.assertEqual(t[2].medium_total, 1)
self.assertEqual(t[2].medium_index, 1)
def test_parse_track_indices(self):
release = self._make_release_from_positions(['1', '2'])
d = DiscogsPlugin().get_album_info(release)
t = d.tracks
self.assertEqual(t[0].medium_index, 1)
self.assertEqual(t[0].index, 1)
self.assertEqual(t[0].medium_total, 2)
self.assertEqual(t[1].medium_index, 2)
self.assertEqual(t[1].index, 2)
self.assertEqual(t[1].medium_total, 2)
def test_parse_track_indices_several_media(self):
release = self._make_release_from_positions(['1-1', '1-2', '2-1',
'3-1'])
d = DiscogsPlugin().get_album_info(release)
t = d.tracks
self.assertEqual(d.mediums, 3)
self.assertEqual(t[0].medium_index, 1)
self.assertEqual(t[0].index, 1)
self.assertEqual(t[0].medium_total, 2)
self.assertEqual(t[1].medium_index, 2)
self.assertEqual(t[1].index, 2)
self.assertEqual(t[1].medium_total, 2)
self.assertEqual(t[2].medium_index, 1)
self.assertEqual(t[2].index, 3)
self.assertEqual(t[2].medium_total, 1)
self.assertEqual(t[3].medium_index, 1)
self.assertEqual(t[3].index, 4)
self.assertEqual(t[3].medium_total, 1)
def test_parse_position(self):
"""Test the conversion of discogs `position` to medium, medium_index
and subtrack_index."""
# List of tuples (discogs_position, (medium, medium_index, subindex)
positions = [('1', (None, '1', None)),
('A12', ('A', '12', None)),
('12-34', ('12-', '34', None)),
('CD1-1', ('CD1-', '1', None)),
('1.12', (None, '1', '12')),
('12.a', (None, '12', 'A')),
('12.34', (None, '12', '34')),
('1ab', (None, '1', 'AB')),
# Non-standard
('IV', ('IV', None, None)),
]
d = DiscogsPlugin()
for position, expected in positions:
self.assertEqual(d.get_track_index(position), expected)
def test_parse_tracklist_without_sides(self):
"""Test standard Discogs position 12.2.9#1: "without sides"."""
release = self._make_release_from_positions(['1', '2', '3'])
d = DiscogsPlugin().get_album_info(release)
self.assertEqual(d.mediums, 1)
self.assertEqual(len(d.tracks), 3)
def test_parse_tracklist_with_sides(self):
"""Test standard Discogs position 12.2.9#2: "with sides"."""
release = self._make_release_from_positions(['A1', 'A2', 'B1', 'B2'])
d = DiscogsPlugin().get_album_info(release)
self.assertEqual(d.mediums, 1) # 2 sides = 1 LP
self.assertEqual(len(d.tracks), 4)
def test_parse_tracklist_multiple_lp(self):
"""Test standard Discogs position 12.2.9#3: "multiple LP"."""
release = self._make_release_from_positions(['A1', 'A2', 'B1', 'C1'])
d = DiscogsPlugin().get_album_info(release)
self.assertEqual(d.mediums, 2) # 3 sides = 1 LP + 1 LP
self.assertEqual(len(d.tracks), 4)
def test_parse_tracklist_multiple_cd(self):
"""Test standard Discogs position 12.2.9#4: "multiple CDs"."""
release = self._make_release_from_positions(['1-1', '1-2', '2-1',
'3-1'])
d = DiscogsPlugin().get_album_info(release)
self.assertEqual(d.mediums, 3)
self.assertEqual(len(d.tracks), 4)
def test_parse_tracklist_non_standard(self):
"""Test non standard Discogs position."""
release = self._make_release_from_positions(['I', 'II', 'III', 'IV'])
d = DiscogsPlugin().get_album_info(release)
self.assertEqual(d.mediums, 1)
self.assertEqual(len(d.tracks), 4)
def test_parse_tracklist_subtracks_dot(self):
"""Test standard Discogs position 12.2.9#5: "sub tracks, dots"."""
release = self._make_release_from_positions(['1', '2.1', '2.2', '3'])
d = DiscogsPlugin().get_album_info(release)
self.assertEqual(d.mediums, 1)
self.assertEqual(len(d.tracks), 3)
release = self._make_release_from_positions(['A1', 'A2.1', 'A2.2',
'A3'])
d = DiscogsPlugin().get_album_info(release)
self.assertEqual(d.mediums, 1)
self.assertEqual(len(d.tracks), 3)
def test_parse_tracklist_subtracks_letter(self):
"""Test standard Discogs position 12.2.9#5: "sub tracks, letter"."""
release = self._make_release_from_positions(['A1', 'A2a', 'A2b', 'A3'])
d = DiscogsPlugin().get_album_info(release)
self.assertEqual(d.mediums, 1)
self.assertEqual(len(d.tracks), 3)
release = self._make_release_from_positions(['A1', 'A2.a', 'A2.b',
'A3'])
d = DiscogsPlugin().get_album_info(release)
self.assertEqual(d.mediums, 1)
self.assertEqual(len(d.tracks), 3)
def test_parse_tracklist_subtracks_extra_material(self):
"""Test standard Discogs position 12.2.9#6: "extra material"."""
release = self._make_release_from_positions(['1', '2', 'Video 1'])
d = DiscogsPlugin().get_album_info(release)
self.assertEqual(d.mediums, 2)
self.assertEqual(len(d.tracks), 3)
def test_parse_tracklist_subtracks_indices(self):
"""Test parsing of subtracks that include index tracks."""
release = self._make_release_from_positions(['', '', '1.1', '1.2'])
# Track 1: Index track with medium title
release.data['tracklist'][0]['title'] = 'MEDIUM TITLE'
# Track 2: Index track with track group title
release.data['tracklist'][1]['title'] = 'TRACK GROUP TITLE'
d = DiscogsPlugin().get_album_info(release)
self.assertEqual(d.mediums, 1)
self.assertEqual(d.tracks[0].disctitle, 'MEDIUM TITLE')
self.assertEqual(len(d.tracks), 1)
self.assertEqual(d.tracks[0].title, 'TRACK GROUP TITLE')
def test_parse_tracklist_subtracks_nested_logical(self):
"""Test parsing of subtracks defined inside a index track that are
logical subtracks (ie. should be grouped together into a single track).
"""
release = self._make_release_from_positions(['1', '', '3'])
# Track 2: Index track with track group title, and sub_tracks
release.data['tracklist'][1]['title'] = 'TRACK GROUP TITLE'
release.data['tracklist'][1]['sub_tracks'] = [
self._make_track('TITLE ONE', '2.1', '01:01'),
self._make_track('TITLE TWO', '2.2', '02:02')
]
d = DiscogsPlugin().get_album_info(release)
self.assertEqual(d.mediums, 1)
self.assertEqual(len(d.tracks), 3)
self.assertEqual(d.tracks[1].title, 'TRACK GROUP TITLE')
def test_parse_tracklist_subtracks_nested_physical(self):
"""Test parsing of subtracks defined inside a index track that are
physical subtracks (ie. should not be grouped together).
"""
release = self._make_release_from_positions(['1', '', '4'])
# Track 2: Index track with track group title, and sub_tracks
release.data['tracklist'][1]['title'] = 'TRACK GROUP TITLE'
release.data['tracklist'][1]['sub_tracks'] = [
self._make_track('TITLE ONE', '2', '01:01'),
self._make_track('TITLE TWO', '3', '02:02')
]
d = DiscogsPlugin().get_album_info(release)
self.assertEqual(d.mediums, 1)
self.assertEqual(len(d.tracks), 4)
self.assertEqual(d.tracks[1].title, 'TITLE ONE')
self.assertEqual(d.tracks[2].title, 'TITLE TWO')
def test_parse_tracklist_disctitles(self):
"""Test parsing of index tracks that act as disc titles."""
release = self._make_release_from_positions(['', '1-1', '1-2', '',
'2-1'])
# Track 1: Index track with medium title (Cd1)
release.data['tracklist'][0]['title'] = 'MEDIUM TITLE CD1'
# Track 4: Index track with medium title (Cd2)
release.data['tracklist'][3]['title'] = 'MEDIUM TITLE CD2'
d = DiscogsPlugin().get_album_info(release)
self.assertEqual(d.mediums, 2)
self.assertEqual(d.tracks[0].disctitle, 'MEDIUM TITLE CD1')
self.assertEqual(d.tracks[1].disctitle, 'MEDIUM TITLE CD1')
self.assertEqual(d.tracks[2].disctitle, 'MEDIUM TITLE CD2')
self.assertEqual(len(d.tracks), 3)
def test_parse_minimal_release(self):
"""Test parsing of a release with the minimal amount of information."""
data = {'id': 123,
'tracklist': [self._make_track('A', '1', '01:01')],
'artists': [{'name': 'ARTIST NAME', 'id': 321, 'join': ''}],
'title': 'TITLE'}
release = Bag(data=data,
title=data['title'],
artists=[Bag(data=d) for d in data['artists']])
d = DiscogsPlugin().get_album_info(release)
self.assertEqual(d.artist, 'ARTIST NAME')
self.assertEqual(d.album, 'TITLE')
self.assertEqual(len(d.tracks), 1)
def test_parse_release_without_required_fields(self):
"""Test parsing of a release that does not have the required fields."""
release = Bag(data={}, refresh=lambda *args: None)
with capture_log() as logs:
d = DiscogsPlugin().get_album_info(release)
self.assertEqual(d, None)
self.assertIn('Release does not contain the required fields', logs[0])
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| 41.269972
| 79
| 0.59235
| 1,866
| 14,981
| 4.594855
| 0.146302
| 0.145206
| 0.069046
| 0.05645
| 0.635059
| 0.607534
| 0.572662
| 0.513646
| 0.456146
| 0.405412
| 0
| 0.032152
| 0.271277
| 14,981
| 362
| 80
| 41.383978
| 0.753229
| 0.177959
| 0
| 0.432
| 0
| 0
| 0.08492
| 0
| 0
| 0
| 0
| 0
| 0.336
| 1
| 0.1
| false
| 0
| 0.024
| 0.004
| 0.144
| 0.004
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ad02fbe661ef723ec6b1d7108a2d41a85831a5b
| 17,018
|
py
|
Python
|
darknet2ncnn.py
|
nihui/gen-ncnn-models
|
18523f1920d9afc44ce3058087c07e09f28aa151
|
[
"BSD-2-Clause"
] | 4
|
2019-12-24T15:16:18.000Z
|
2021-05-14T08:12:17.000Z
|
darknet2ncnn.py
|
nihui/gen-ncnn-models
|
18523f1920d9afc44ce3058087c07e09f28aa151
|
[
"BSD-2-Clause"
] | null | null | null |
darknet2ncnn.py
|
nihui/gen-ncnn-models
|
18523f1920d9afc44ce3058087c07e09f28aa151
|
[
"BSD-2-Clause"
] | null | null | null |
#! /usr/bin/env python
# coding: utf-8
import configparser
import numpy as np
import re,sys,os
from graph import MyGraph
from collections import OrderedDict
def unique_config_sections(config_file):
"""Convert all config sections to have unique names.
Adds unique suffixes to config sections for compability with configparser.
"""
from collections import defaultdict
import io
section_counters = defaultdict(int)
output_stream = io.StringIO()
with open(config_file) as fin:
for line in fin:
if line.startswith('['):
section = line.strip().strip('[]')
_section = section + '_' + str(section_counters[section])
section_counters[section] += 1
line = line.replace(section, _section)
output_stream.write(line)
output_stream.seek(0)
return output_stream
def getFilters(mydict, name):
#print('find filters for ', name)
if hasattr(mydict[name], 'filters'):
return mydict[name].filters
else:
assert len(mydict[name].input) >= 1
return getFilters(mydict, mydict[name].input[0])
def readfile(f, len, msg):
print(" %s read %d bytes" % (msg, len))
return f.read(len)
def buildGraph(config_path, weights_path):
unique_config_file = unique_config_sections(config_path)
cfg_parser = configparser.ConfigParser()
cfg_parser.read_file(unique_config_file)
weights_file = open(weights_path, 'rb')
# read out major, minor, revision, net.seen
readfile(weights_file, (4*4), 'head')
mydict = OrderedDict()
# record the output of the original layer
mylist = []
count = 4
import queue
for _section in cfg_parser.sections():
sec_q = queue.Queue(0)
sec_q.put(cfg_parser[_section])
while not sec_q.empty():
sec = sec_q.get()
section = sec.name
print('Parsing section {}'.format(section))
# this section will can be a subsection
if section.startswith('activation') or section.endswith('activation'):
activation = sec.get('activation', fallback = 'logistic')
if activation == 'linear':
pass
elif activation == 'linear' or activation == 'leaky' or activation == 'relu':
node = MyGraph.MyNode()
node.name = section
node.op = 'Leaky'
if activation == 'linear':
node.slope = 1
elif activation == 'leaky':
node.slope = 0.1
elif activation == 'relu':
node.slope = 0
node.input = [prev_output]
node.input_norm = node.input
#node.attr = []
mydict[node.name] = node
prev_output = node.name
# prev_layer_filters no change
else:
raise ValueError(
'Unknown activation function `{}` in section {}'.format(
activation, section))
if section.startswith('activation'):
mylist.append(section)
elif re.match(r'^(convolutional|depthwise|groupwise)_\d+$', section):
if section.startswith('convolutional'):
conv = 'conv'
filters = sec.getint('filters', fallback = 1)
groups = 1
op = 'Conv2D'
elif section.startswith('depthwise'):
conv = 'dconv'
filters = prev_layer_filters
multiplier = sec.getint('multiplier', fallback = 1)
assert multiplier == 1
groups = filters
op = 'DepthwiseConv2dNative'
elif section.startswith('groupwise'):
conv = 'gconv'
filters = sec.getint('filters', fallback=1)
groups = sec.getint('groups', fallback = 1)
op = 'DepthwiseConv2dNative'
size = sec.getint('size', fallback = 1)
stride = sec.getint('stride', fallback = 1)
pad = sec.getint('pad', fallback = 0)
padding = sec.getint('padding', fallback = 0)
activation = sec.get('activation', fallback = 'logistic')
batch_normalize = sec.getint('batch_normalize', 0)
# padding='same' is equivalent to Darknet pad=1
# padding = 'same' if pad == 1 else 'valid'
if pad:
padding = size//2
# Setting weights.
# Darknet serializes convolutional weights as:
# [bias/beta, [gamma, mean, variance], conv_weights]
#prev_layer_shape = prev_layer.shape
# TODO: This assumes channel last dim_ordering.
if conv == 'conv':
weights_shape = (size, size, prev_layer_filters, filters)
idx_tf2darknet = [0, 1, 2, 3]
elif conv == 'dconv':
weights_shape = (size, size, filters)
idx_tf2darknet = [0, 1, 2]
elif conv == 'gconv':
weights_shape = (size, size, prev_layer_filters//groups, filters//groups, groups)
idx_tf2darknet = [0, 1, 2, 3, 4]
idxmap = {x: i for i, x in enumerate(idx_tf2darknet)}
idx_dartnet2tf = [idxmap[i] for i in range(len(idxmap))]
weights_size = np.product(weights_shape)
print(' ' + conv, 'bn' if batch_normalize else ' ', activation, weights_shape)
conv_bias = np.ndarray(
shape=(filters, ),
dtype=np.float32,
buffer=readfile(weights_file, (filters * 4), section+'-bias'))
count += filters
if batch_normalize:
bn_weights = np.ndarray(
shape=(3, filters),
dtype=np.float32,
buffer=readfile(weights_file, (filters * 12), section+'-batchnorm'))
count += 3 * filters
# TODO: Keras BatchNormalization mistakenly refers to var
# as std.
bn_weight_list = [
bn_weights[0], # scale gamma
conv_bias, # shift beta
bn_weights[1], # running mean
bn_weights[2] # running var
]
conv_weights = np.ndarray(
shape=[weights_shape[i] for i in idx_tf2darknet],
dtype=np.float32,
buffer=readfile(weights_file, (weights_size * 4), section+'-weights'))
count += weights_size
# DarkNet conv_weights are serialized Caffe-style:
# (out_dim, in_dim, height, width)
# We would like to set these to Tensorflow order:
# (height, width, in_dim, out_dim)
# TODO: Add check for Theano dim ordering.
#print("the darknet shape is ", conv_weights.shape)
conv_weights = np.transpose(conv_weights, idx_dartnet2tf)
#print("the tf shape is ", conv_weights.shape)
conv_weights = [conv_weights] if batch_normalize else [
conv_weights, conv_bias
]
# Create nodes
#conv_layer = np.zeros([1, 1, filters], dtype = np.float32)
node = MyGraph.MyNode()
node.name = section
node.op = op
node.input = [prev_output]
node.input_norm = node.input
node.kernel = conv_weights[0]
node.padding = padding
node.strides = [1,stride,stride,1]
node.groups = groups
node.filters = filters
mydict[node.name] = node
prev_output = node.name
prev_layer_filters = filters
if batch_normalize:
node = MyGraph.MyNode()
node.name = section + '_batch_normalize'
node.op = 'FusedBatchNorm'
node.input = [prev_output]
node.input_norm = node.input
#node.attr = []
node.gamma = bn_weights[0]
node.beta = conv_bias
node.mean = bn_weights[1]
node.variance = bn_weights[2]
mydict[node.name] = node
prev_output = node.name
# prev_layer_filters no change
else:
node = MyGraph.MyNode()
node.name = section + '_bias'
node.op = 'BiasAdd'
node.input = [prev_output]
node.input_norm = node.input
#node.attr = []
node.bias = conv_bias
mydict[node.name] = node
prev_output = node.name
if activation == 'linear':
mylist.append(prev_output)
else:
tmp_parser = configparser.ConfigParser()
name = section + '_activation'
tmp_parser.add_section(name)
tmp_parser.set(name, 'activation', activation)
sec_q.put(tmp_parser[name])
mylist.append(name)
elif section.startswith('shuffle'):
node = MyGraph.MyNode()
node.name = section
node.op = 'Shuffle'
node.input = [prev_output]
node.input_norm = node.input
node.groups = int(cfg_parser[section]['groups'])
mydict[node.name] = node
prev_output = node.name
mylist.append(section)
elif re.match(r'^(pooling|maxpool|avgpool)_\d+$', section):
node = MyGraph.MyNode()
node.stride = sec.getint('stride', fallback = 1)
node.size = sec.getint('size', node.stride)
node.padding = sec.getint('padding', fallback = (node.size-1)//2)
if section.startswith('pooling'):
node.mode = str(cfg_parser[section]['mode'])
node.global_pooling = 0
elif section.startswith('maxpool'):
node.mode = 'max'
node.global_pooling = 0
elif section.startswith('avgpool'):
node.mode = 'avg'
node.global_pooling = 1
node.name = section
node.op = 'Pooling'
node.input = [prev_output]
node.input_norm = node.input
mydict[node.name] = node
prev_output = node.name
#print('pooling ', vars(node))
mylist.append(section)
elif section.startswith('route'):
ids = [int(i) for i in cfg_parser[section]['layers'].split(',')]
node = MyGraph.MyNode()
node.name = section
node.op = 'NCNNConcat'
node.input = [mylist[i] for i in ids]
#print('mylist is ', mylist, 'the ids is ', ids, 'node input is ', node.input)
node.input_norm = node.input
node.axis = 0
node.filters = sum([getFilters(mydict, mylist[i]) for i in ids])
mydict[node.name] = node
prev_output = node.name
mylist.append(section)
prev_layer_filters = node.filters
elif section.startswith('reorg'):
node = MyGraph.MyNode()
node.name = section
node.op = 'DarknetReorg'
node.input = [prev_output]
node.stride = sec.getint('stride', fallback = 1)
node.input_norm = node.input
node.filters = getFilters(mydict, node.input[0]) * node.stride * node.stride
mydict[node.name] = node
prev_output = node.name
mylist.append(section)
prev_layer_filters = node.filters
elif re.match(r'^(shortcut)_\d+$', section):
activation = sec.get('activation', fallback = 'logistic')
from_ = sec.getint('from')
node = MyGraph.MyNode()
node.name = section
node.op = 'BinaryOp'
node.op_type = 0
node.input = [prev_output, mylist[from_]]
#print('mylist is ', mylist, 'the from_ is ', from_, 'node input is ', node.input)
node.input_norm = node.input
mydict[node.name] = node
prev_output = node.name
if activation == 'linear':
mylist.append(prev_output)
else:
tmp_parser = configparser.ConfigParser()
name = section + '_activation'
tmp_parser.add_section(name)
tmp_parser.set(name, 'activation', activation)
sec_q.put(tmp_parser[name])
# NOTE: this section has relative reference
mylist.append(name)
elif section.startswith('connected'):
activation = sec.get('activation', fallback='linear')
filters = sec.getint('output', 2)
bias_data = np.ndarray(
shape=[filters],
dtype=np.float32,
buffer=readfile(weights_file, (filters * 4), section+'-bias'))
fc_data = np.ndarray(
shape=[prev_layer_filters, filters],
dtype=np.float32,
buffer=readfile(weights_file, (prev_layer_filters * filters * 4), section+'-weight'))
node = MyGraph.MyNode()
node.name = section
node.op = 'MatMul'
node.input = [prev_output]
node.input_norm = node.input
node.multiplier = fc_data
mydict[node.name] = node
prev_output = node.name
prev_layer_filters = filters
node = MyGraph.MyNode()
node.name = section + '_bias'
node.op = 'BiasAdd'
node.input = [prev_output]
node.input_norm = node.input
# node.attr = []
node.bias = bias_data
mydict[node.name] = node
prev_output = node.name
if activation == 'linear':
mylist.append(prev_output)
else:
tmp_parser = configparser.ConfigParser()
name = section + '_activation'
tmp_parser.add_section(name)
tmp_parser.set(name, 'activation', activation)
sec_q.put(tmp_parser[name])
mylist.append(name)
elif section.startswith('net'):
node = MyGraph.MyNode()
node.name = section
node.op = 'DarknetNet'
node.input = []
node.input_norm = []
node.width = int(cfg_parser['net_0']['width'])
node.height = int(cfg_parser['net_0']['height'])
node.channels = int(cfg_parser['net_0']['channels'])
node.filters = node.channels
# print(vars(node))
# node.attr = []
mydict[node.name] = node
# start here
prev_output = node.name
prev_layer_filters = node.channels
mylist.append(section)
elif section.startswith('region'):
node = MyGraph.MyNode()
node.name = section
node.op = 'DarknetRegion'
node.input = [prev_output]
node.input_norm = node.input
node.classes = int(cfg_parser[section]['classes'])
node.num = int(cfg_parser[section]['num'])
node.softmax = int(cfg_parser[section]['softmax'])
node.anchors = [float(i) for i in re.split(r',', cfg_parser[section]['anchors'])]
#print(vars(node))
#node.attr = []
mydict[node.name] = node
prev_output = node.name
mylist.append(section)
elif section.startswith('softmax'):
node = MyGraph.MyNode()
node.name = section
node.op = 'Softmax'
node.input = [prev_output]
node.input_norm = node.input
mydict[node.name] = node
prev_output = node.name
mylist.append(section)
pass
elif section.startswith('cost'):
pass # Configs not currently handled during model definition.
else:
raise ValueError(
'Unsupported section header type: {}'.format(section))
print(' out filters ', prev_layer_filters)
print('loaded {} bytes in weights file'.format(count*4))
mygraph = MyGraph(mydict)
mygraph.type = 'darknet'
return mygraph
if __name__ == '__main__':
config_path = sys.argv[1]
weights_path = sys.argv[2]
mygraph = buildGraph(config_path, weights_path)
# 定义子图所需要的输出节点,输入节点,终止节点
outputNodes = ['region_0', 'softmax_0']
stopNodes = []
inputNodes = ['darknet_0']
mygraph.extractSubGraph(inputNodes, outputNodes, stopNodes)
mygraph.generateDot('YoloV2.dot')
# 生成子图对应的代码
mygraph.generateSource('YoloV2', os.path.split(config_path)[1]+'.ncnn', os.path.split(weights_path)[1] + '.ncnn')
| 36.915401
| 117
| 0.534317
| 1,767
| 17,018
| 5.011885
| 0.166384
| 0.046748
| 0.039521
| 0.033198
| 0.448058
| 0.41294
| 0.359869
| 0.314024
| 0.25734
| 0.24458
| 0
| 0.009554
| 0.360324
| 17,018
| 460
| 118
| 36.995652
| 0.803968
| 0.093078
| 0
| 0.433908
| 0
| 0
| 0.070578
| 0.007416
| 0
| 0
| 0
| 0.002174
| 0.005747
| 1
| 0.011494
| false
| 0.008621
| 0.022989
| 0
| 0.048851
| 0.014368
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ad20a796d3e2e784e9676daf81a22cf86a1d3cb
| 8,474
|
py
|
Python
|
liuetal2019/utils.py
|
wasiahmad/GATE
|
1e48504a3641f00265a271a19eb6b6449fdc33bd
|
[
"MIT"
] | 24
|
2020-12-07T10:22:40.000Z
|
2022-03-31T09:24:13.000Z
|
liuetal2019/utils.py
|
wasiahmad/GATE
|
1e48504a3641f00265a271a19eb6b6449fdc33bd
|
[
"MIT"
] | 15
|
2021-03-22T04:52:57.000Z
|
2022-01-01T18:32:31.000Z
|
liuetal2019/utils.py
|
wasiahmad/GATE
|
1e48504a3641f00265a271a19eb6b6449fdc33bd
|
[
"MIT"
] | 8
|
2021-03-04T05:09:42.000Z
|
2022-01-25T12:59:19.000Z
|
import io
import logging
import json
import numpy
import torch
import numpy as np
from tqdm import tqdm
from clie.inputters import constant
from clie.objects import Sentence
from torch.utils.data import Dataset
from torch.utils.data.sampler import Sampler
logger = logging.getLogger(__name__)
def load_word_embeddings(file):
embeddings_index = {}
fin = io.open(file, 'r', encoding='utf-8', newline='\n', errors='ignore')
n, d = map(int, fin.readline().split())
for i, line in tqdm(enumerate(fin), total=n):
tokens = line.rstrip().split(' ')
v = numpy.array(tokens[1:], dtype=float)
embeddings_index[tokens[0]] = v
return embeddings_index
# ------------------------------------------------------------------------------
# Data loading
# ------------------------------------------------------------------------------
def load_data(filename, src_lang, tgt_lang, knn_file,
knn_size, max_examples=-1):
examples = []
wrong_subj_pos, wrong_obj_pos = 0, 0
with open(filename) as f:
data = json.load(f)
knn_dict = None
if knn_file:
with open(knn_file) as f:
knn_dict = json.load(f)
for idx, ex in enumerate(tqdm(data, total=len(data))):
sentence = Sentence(ex['id'])
sentence.language = src_lang
sentence.words = ex['token']
sentence.pos = ex['stanford_pos']
sentence.ner = ex['stanford_ner']
sentence.deprel = ex['stanford_deprel']
sentence.head = [int(x) for x in ex['stanford_head']]
sentence.subj_type = ex['subj_type']
sentence.obj_type = ex['obj_type']
sentence.relation = ex['relation']
if ex['subj_end'] - ex['subj_start'] < 0:
# we swap the start and end index
wrong_subj_pos += 1
sentence.subject = [ex['subj_end'], ex['subj_start']]
else:
sentence.subject = [ex['subj_start'], ex['subj_end']]
if ex['obj_end'] - ex['obj_start'] < 0:
# we swap the start and end index
wrong_obj_pos += 1
sentence.object = [ex['obj_end'], ex['obj_start']]
else:
sentence.object = [ex['obj_start'], ex['obj_end']]
# store KNN word info
if knn_dict:
sentence.tgt_lang = tgt_lang
knn_words = []
for w in ex['token']:
w = '!{}_{}'.format(src_lang, w)
if w in knn_dict:
assert len(knn_dict[w]) == knn_size
knn_words.append(knn_dict[w])
else:
knn_words.append([constant.UNK_WORD] * knn_size)
sentence.knn_words = knn_words
examples.append(sentence)
if max_examples != -1 and len(examples) > max_examples:
break
if wrong_subj_pos > 0 or wrong_obj_pos > 0:
logger.info('{} and {} wrong subject and object positions found!'.format(
wrong_subj_pos, wrong_obj_pos))
return examples
def vectorize(ex, model, iseval):
"""Torchify a single example."""
words = ['!{}_{}'.format(ex.language, w) for w in ex.words]
words = [model.word_dict[w] for w in words]
knn_word = None
if ex.knn_words:
knn_word = [[model.word_dict[w] for w in knn]
for knn in ex.knn_words]
knn_word = torch.LongTensor(knn_word)
word = torch.LongTensor(words)
pos = torch.LongTensor([model.pos_dict[p] for p in ex.pos])
ner = torch.LongTensor([model.ner_dict[n] for n in ex.ner])
deprel = torch.LongTensor([model.deprel_dict[d] for d in ex.deprel])
assert any([x == 0 for x in ex.head])
head = torch.LongTensor(ex.head)
subj_position = torch.LongTensor(ex.subj_position)
obj_position = torch.LongTensor(ex.obj_position)
type = [0] * len(ex.words)
ttype = model.type_dict[ex.subj_type]
start, end = ex.subject
type[start: end + 1] = [ttype] * (end - start + 1)
atype = model.type_dict[ex.obj_type]
start, end = ex.object
type[start: end + 1] = [atype] * (end - start + 1)
type = torch.LongTensor(type)
return {
'id': ex.id,
'language': ex.language,
'word': word,
'pos': pos,
'ner': ner,
'deprel': deprel,
'type': type,
'head': head,
'subject': ex.subj_text,
'object': ex.obj_text,
'subject_pos': subj_position,
'object_pos': obj_position,
'relation': model.label_dict[ex.relation],
'knn_word': knn_word
}
def batchify(batch):
"""Gather a batch of individual examples into one batch."""
# batch is a list of vectorized examples
batch_size = len(batch)
ids = [ex['id'] for ex in batch]
language = [ex['language'] for ex in batch]
use_knn = batch[0]['knn_word'] is not None
# NOTE. batch[0]['knn_word'] is a 2d list
knn_size = len(batch[0]['knn_word'][0]) if use_knn else 0
# --------- Prepare Code tensors ---------
max_len = max([ex['word'].size(0) for ex in batch])
# Batch Code Representations
len_rep = torch.LongTensor(batch_size).fill_(constant.PAD)
word_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD)
head_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD)
subject_pos_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD)
object_pos_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD)
pos_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD)
ner_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD)
deprel_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD)
type_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD)
labels = torch.LongTensor(batch_size)
subject = []
object = []
knn_rep = None
if use_knn:
knn_rep = torch.LongTensor(batch_size, max_len, knn_size).fill_(constant.PAD)
for i, ex in enumerate(batch):
len_rep[i] = ex['word'].size(0)
labels[i] = ex['relation']
word_rep[i, :len_rep[i]] = ex['word']
head_rep[i, :len_rep[i]] = ex['head']
subject_pos_rep[i, :len_rep[i]] = ex['subject_pos']
object_pos_rep[i, :len_rep[i]] = ex['object_pos']
pos_rep[i, :len_rep[i]] = ex['pos']
ner_rep[i, :len_rep[i]] = ex['ner']
deprel_rep[i, :len_rep[i]] = ex['deprel']
type_rep[i, :len_rep[i]] = ex['type']
subject.append(ex['subject'])
object.append(ex['object'])
if use_knn:
knn_rep[i, :len_rep[i]] = ex['knn_word']
return {
'ids': ids,
'language': language,
'batch_size': batch_size,
'len_rep': len_rep,
'word_rep': word_rep,
'knn_rep': knn_rep,
'head_rep': head_rep,
'subject': subject,
'object': object,
'subject_pos_rep': subject_pos_rep,
'object_pos_rep': object_pos_rep,
'labels': labels,
'pos_rep': pos_rep,
'ner_rep': ner_rep,
'deprel_rep': deprel_rep,
'type_rep': type_rep
}
class ACE05Dataset(Dataset):
def __init__(self, examples, model, evaluation=False):
self.model = model
self.examples = examples
self.evaluation = evaluation
def __len__(self):
return len(self.examples)
def __getitem__(self, index):
return vectorize(self.examples[index], self.model,
iseval=self.evaluation)
def lengths(self):
return [len(ex.words) for ex in self.examples]
class SortedBatchSampler(Sampler):
def __init__(self, lengths, batch_size, shuffle=True):
self.lengths = lengths
self.batch_size = batch_size
self.shuffle = shuffle
def __iter__(self):
lengths = np.array(
[(-l, np.random.random()) for l in self.lengths],
dtype=[('l1', np.int_), ('rand', np.float_)]
)
indices = np.argsort(lengths, order=('l1', 'rand'))
batches = [indices[i:i + self.batch_size]
for i in range(0, len(indices), self.batch_size)]
if self.shuffle:
np.random.shuffle(batches)
return iter([i for batch in batches for i in batch])
def __len__(self):
return len(self.lengths)
| 34.587755
| 85
| 0.576941
| 1,119
| 8,474
| 4.15639
| 0.151028
| 0.064502
| 0.047302
| 0.056762
| 0.211137
| 0.176736
| 0.125994
| 0.099979
| 0.099979
| 0.099979
| 0
| 0.005215
| 0.275903
| 8,474
| 244
| 86
| 34.729508
| 0.752771
| 0.056998
| 0
| 0.046154
| 0
| 0
| 0.075775
| 0
| 0
| 0
| 0
| 0
| 0.010256
| 1
| 0.05641
| false
| 0
| 0.05641
| 0.020513
| 0.169231
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ad2503d07ac5b15fee30f7480f83b4ea51f1515
| 914
|
py
|
Python
|
build.py
|
dnanexus/IndexTools
|
0392b3be92ff50b401290b59e9ca6c7767fa5a96
|
[
"MIT"
] | 15
|
2019-07-17T11:41:36.000Z
|
2021-03-02T09:36:34.000Z
|
build.py
|
dnanexus/IndexTools
|
0392b3be92ff50b401290b59e9ca6c7767fa5a96
|
[
"MIT"
] | 22
|
2019-05-15T20:08:12.000Z
|
2019-10-11T13:33:42.000Z
|
build.py
|
dnanexus/IndexTools
|
0392b3be92ff50b401290b59e9ca6c7767fa5a96
|
[
"MIT"
] | 3
|
2019-06-01T15:58:06.000Z
|
2022-01-21T21:10:01.000Z
|
from distutils.extension import Extension
cmdclass = {}
try:
# with Cython
from Cython.Build import build_ext
cmdclass["build_ext"] = build_ext
module_src = "cgranges/python/cgranges.pyx"
except ImportError: # without Cython
module_src = "cgranges/python/cgranges.c"
def build(setup_kwargs):
"""
This function is mandatory in order to build the extensions.
"""
setup_kwargs.update(
{
"ext_modules": [
Extension(
"cgranges",
sources=[module_src, "cgranges/cgranges.c"],
depends=[
"cgranges/cgranges.h",
"cgranges/khash.h",
"cgranges/python/cgranges.pyx"
],
include_dirs=["cgranges"]
)
],
"cmdclass": cmdclass
}
)
| 25.388889
| 64
| 0.504376
| 79
| 914
| 5.708861
| 0.493671
| 0.053215
| 0.113082
| 0.101996
| 0.137472
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.399344
| 914
| 35
| 65
| 26.114286
| 0.821494
| 0.09628
| 0
| 0.076923
| 0
| 0
| 0.222497
| 0.10136
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038462
| false
| 0
| 0.115385
| 0
| 0.153846
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ad2916f049d06f5df6ddbf5e08b57510f7c1b78
| 17,212
|
py
|
Python
|
gluoncv/data/kinetics400/classification.py
|
YvetteGuo/gluon-cv
|
123af8cf9f15a879c16a5c7d12f01ce1471d85b6
|
[
"Apache-2.0"
] | 1
|
2019-04-02T02:08:04.000Z
|
2019-04-02T02:08:04.000Z
|
gluoncv/data/kinetics400/classification.py
|
YvetteGuo/gluon-cv
|
123af8cf9f15a879c16a5c7d12f01ce1471d85b6
|
[
"Apache-2.0"
] | 1
|
2019-06-06T08:39:12.000Z
|
2019-06-06T08:39:12.000Z
|
gluoncv/data/kinetics400/classification.py
|
YvetteGuo/gluon-cv
|
123af8cf9f15a879c16a5c7d12f01ce1471d85b6
|
[
"Apache-2.0"
] | 1
|
2019-08-26T09:26:42.000Z
|
2019-08-26T09:26:42.000Z
|
# pylint: disable=line-too-long,too-many-lines,missing-docstring
"""Kinetics400 action classification dataset."""
import os
import random
import numpy as np
from mxnet import nd
from mxnet.gluon.data import dataset
__all__ = ['Kinetics400']
class Kinetics400(dataset.Dataset):
"""Load the Kinetics400 action recognition dataset.
Refer to :doc:`../build/examples_datasets/kinetics400` for the description of
this dataset and how to prepare it.
Parameters
----------
root : str, default '~/.mxnet/datasets/kinetics400'
Path to the folder stored the dataset.
setting : str, required
Config file of the prepared dataset.
train : bool, default True
Whether to load the training or validation set.
test_mode : bool, default False
Whether to perform evaluation on the test set
name_pattern : str, default None
The naming pattern of the decoded video frames.
For example, img_00012.jpg
is_color : bool, default True
Whether the loaded image is color or grayscale
modality : str, default 'rgb'
Input modalities, we support only rgb video frames for now.
Will add support for rgb difference image and optical flow image later.
num_segments : int, default 1
Number of segments to evenly divide the video into clips.
A useful technique to obtain global video-level information.
Limin Wang, etal, Temporal Segment Networks: Towards Good Practices for Deep Action Recognition, ECCV 2016
new_length : int, default 1
The length of input video clip. Default is a single image, but it can be multiple video frames.
For example, new_length=16 means we will extract a video clip of consecutive 16 frames.
new_width : int, default 340
Scale the width of loaded image to 'new_width' for later multiscale cropping and resizing.
new_height : int, default 256
Scale the height of loaded image to 'new_height' for later multiscale cropping and resizing.
target_width : int, default 224
Scale the width of transformed image to the same 'target_width' for batch forwarding.
target_height : int, default 224
Scale the height of transformed image to the same 'target_height' for batch forwarding.
transform : function, default None
A function that takes data and label and transforms them.
"""
def __init__(self,
setting=os.path.expanduser('~/.mxnet/datasets/kinetics400/kinetics400_train_list_rawframes.txt'),
root=os.path.expanduser('~/.mxnet/datasets/kinetics400/rawframes_train'),
train=True,
test_mode=False,
name_pattern=None,
is_color=True,
modality='rgb',
num_segments=1,
new_length=1,
new_width=340,
new_height=256,
target_width=224,
target_height=224,
transform=None):
super(Kinetics400, self).__init__()
self.root = root
self.setting = setting
self.train = train
self.test_mode = test_mode
self.is_color = is_color
self.modality = modality
self.num_segments = num_segments
self.new_height = new_height
self.new_width = new_width
self.target_height = target_height
self.target_width = target_width
self.new_length = new_length
self.transform = transform
self.classes, self.class_to_idx = self._find_classes(root)
self.clips = self._make_dataset(root, setting)
if len(self.clips) == 0:
raise(RuntimeError("Found 0 video clips in subfolders of: " + root + "\n"
"Check your data directory (opt.data-dir)."))
if name_pattern:
self.name_pattern = name_pattern
else:
if self.modality == "rgb":
self.name_pattern = "img_%05d.jpg"
elif self.modality == "flow":
self.name_pattern = "flow_%s_%05d.jpg"
def __getitem__(self, index):
directory, duration, target = self.clips[index]
average_duration = int(duration / self.num_segments)
offsets = []
for seg_id in range(self.num_segments):
if self.train and not self.test_mode:
# training
if average_duration >= self.new_length:
offset = random.randint(0, average_duration - self.new_length)
# No +1 because randint(a,b) return a random integer N such that a <= N <= b.
offsets.append(offset + seg_id * average_duration)
else:
offsets.append(0)
elif not self.train and not self.test_mode:
# validation
if average_duration >= self.new_length:
offsets.append(int((average_duration - self.new_length + 1)/2 + seg_id * average_duration))
else:
offsets.append(0)
else:
# test
if average_duration >= self.new_length:
offsets.append(int((average_duration - self.new_length + 1)/2 + seg_id * average_duration))
else:
offsets.append(0)
clip_input = self._TSN_RGB(directory, offsets, self.new_height, self.new_width, self.new_length, self.is_color, self.name_pattern)
if self.transform is not None:
clip_input = self.transform(clip_input)
if self.num_segments > 1 and not self.test_mode:
# For TSN training, reshape the input to B x 3 x H x W. Here, B = batch_size * num_segments
clip_input = clip_input.reshape((-1, 3 * self.new_length, self.target_height, self.target_width))
return clip_input, target
def __len__(self):
return len(self.clips)
def _find_classes(self, directory):
classes = [d for d in os.listdir(directory) if os.path.isdir(os.path.join(directory, d))]
classes.sort()
class_to_idx = {classes[i]: i for i in range(len(classes))}
return classes, class_to_idx
def _make_dataset(self, directory, setting):
if not os.path.exists(setting):
raise(RuntimeError("Setting file %s doesn't exist. Check opt.train-list and opt.val-list. " % (setting)))
clips = []
with open(setting) as split_f:
data = split_f.readlines()
for line in data:
line_info = line.split()
# line format: video_path, video_duration, video_label
if len(line_info) < 3:
print('Video input format is not correct, missing one or more element. %s' % line)
continue
clip_path = os.path.join(directory, line_info[0])
duration = int(line_info[1])
target = int(line_info[2])
item = (clip_path, duration, target)
clips.append(item)
return clips
def _TSN_RGB(self, directory, offsets, new_height, new_width, new_length, is_color, name_pattern):
from ...utils.filesystem import try_import_cv2
cv2 = try_import_cv2()
if is_color:
cv_read_flag = cv2.IMREAD_COLOR
else:
cv_read_flag = cv2.IMREAD_GRAYSCALE
interpolation = cv2.INTER_LINEAR
sampled_list = []
for _, offset in enumerate(offsets):
for length_id in range(1, new_length+1):
frame_name = name_pattern % (length_id + offset)
frame_path = directory + "/" + frame_name
cv_img_origin = cv2.imread(frame_path, cv_read_flag)
if cv_img_origin is None:
raise(RuntimeError("Could not load file %s. Check data path." % (frame_path)))
if new_width > 0 and new_height > 0:
cv_img = cv2.resize(cv_img_origin, (new_width, new_height), interpolation)
else:
cv_img = cv_img_origin
cv_img = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB)
sampled_list.append(cv_img)
# the shape of clip_input will be H x W x C, and C = num_segments * new_length * 3
clip_input = np.concatenate(sampled_list, axis=2)
return nd.array(clip_input)
class Kinetics400Attr(object):
def __init__(self):
self.num_class = 400
self.classes = ['abseiling', 'air_drumming', 'answering_questions', 'applauding', 'applying_cream', 'archery',
'arm_wrestling', 'arranging_flowers', 'assembling_computer', 'auctioning', 'baby_waking_up', 'baking_cookies',
'balloon_blowing', 'bandaging', 'barbequing', 'bartending', 'beatboxing', 'bee_keeping', 'belly_dancing',
'bench_pressing', 'bending_back', 'bending_metal', 'biking_through_snow', 'blasting_sand', 'blowing_glass',
'blowing_leaves', 'blowing_nose', 'blowing_out_candles', 'bobsledding', 'bookbinding', 'bouncing_on_trampoline',
'bowling', 'braiding_hair', 'breading_or_breadcrumbing', 'breakdancing', 'brush_painting', 'brushing_hair',
'brushing_teeth', 'building_cabinet', 'building_shed', 'bungee_jumping', 'busking', 'canoeing_or_kayaking',
'capoeira', 'carrying_baby', 'cartwheeling', 'carving_pumpkin', 'catching_fish', 'catching_or_throwing_baseball',
'catching_or_throwing_frisbee', 'catching_or_throwing_softball', 'celebrating', 'changing_oil', 'changing_wheel',
'checking_tires', 'cheerleading', 'chopping_wood', 'clapping', 'clay_pottery_making', 'clean_and_jerk',
'cleaning_floor', 'cleaning_gutters', 'cleaning_pool', 'cleaning_shoes', 'cleaning_toilet', 'cleaning_windows',
'climbing_a_rope', 'climbing_ladder', 'climbing_tree', 'contact_juggling', 'cooking_chicken', 'cooking_egg',
'cooking_on_campfire', 'cooking_sausages', 'counting_money', 'country_line_dancing', 'cracking_neck', 'crawling_baby',
'crossing_river', 'crying', 'curling_hair', 'cutting_nails', 'cutting_pineapple', 'cutting_watermelon',
'dancing_ballet', 'dancing_charleston', 'dancing_gangnam_style', 'dancing_macarena', 'deadlifting',
'decorating_the_christmas_tree', 'digging', 'dining', 'disc_golfing', 'diving_cliff', 'dodgeball', 'doing_aerobics',
'doing_laundry', 'doing_nails', 'drawing', 'dribbling_basketball', 'drinking', 'drinking_beer', 'drinking_shots',
'driving_car', 'driving_tractor', 'drop_kicking', 'drumming_fingers', 'dunking_basketball', 'dying_hair',
'eating_burger', 'eating_cake', 'eating_carrots', 'eating_chips', 'eating_doughnuts', 'eating_hotdog',
'eating_ice_cream', 'eating_spaghetti', 'eating_watermelon', 'egg_hunting', 'exercising_arm',
'exercising_with_an_exercise_ball', 'extinguishing_fire', 'faceplanting', 'feeding_birds', 'feeding_fish',
'feeding_goats', 'filling_eyebrows', 'finger_snapping', 'fixing_hair', 'flipping_pancake', 'flying_kite',
'folding_clothes', 'folding_napkins', 'folding_paper', 'front_raises', 'frying_vegetables', 'garbage_collecting',
'gargling', 'getting_a_haircut', 'getting_a_tattoo', 'giving_or_receiving_award', 'golf_chipping', 'golf_driving',
'golf_putting', 'grinding_meat', 'grooming_dog', 'grooming_horse', 'gymnastics_tumbling', 'hammer_throw',
'headbanging', 'headbutting', 'high_jump', 'high_kick', 'hitting_baseball', 'hockey_stop', 'holding_snake',
'hopscotch', 'hoverboarding', 'hugging', 'hula_hooping', 'hurdling', 'hurling_-sport-', 'ice_climbing', 'ice_fishing',
'ice_skating', 'ironing', 'javelin_throw', 'jetskiing', 'jogging', 'juggling_balls', 'juggling_fire',
'juggling_soccer_ball', 'jumping_into_pool', 'jumpstyle_dancing', 'kicking_field_goal', 'kicking_soccer_ball',
'kissing', 'kitesurfing', 'knitting', 'krumping', 'laughing', 'laying_bricks', 'long_jump', 'lunge', 'making_a_cake',
'making_a_sandwich', 'making_bed', 'making_jewelry', 'making_pizza', 'making_snowman', 'making_sushi', 'making_tea',
'marching', 'massaging_back', 'massaging_feet', 'massaging_legs', "massaging_person's_head", 'milking_cow',
'mopping_floor', 'motorcycling', 'moving_furniture', 'mowing_lawn', 'news_anchoring', 'opening_bottle',
'opening_present', 'paragliding', 'parasailing', 'parkour', 'passing_American_football_-in_game-',
'passing_American_football_-not_in_game-', 'peeling_apples', 'peeling_potatoes', 'petting_animal_-not_cat-',
'petting_cat', 'picking_fruit', 'planting_trees', 'plastering', 'playing_accordion', 'playing_badminton',
'playing_bagpipes', 'playing_basketball', 'playing_bass_guitar', 'playing_cards', 'playing_cello', 'playing_chess',
'playing_clarinet', 'playing_controller', 'playing_cricket', 'playing_cymbals', 'playing_didgeridoo', 'playing_drums',
'playing_flute', 'playing_guitar', 'playing_harmonica', 'playing_harp', 'playing_ice_hockey', 'playing_keyboard',
'playing_kickball', 'playing_monopoly', 'playing_organ', 'playing_paintball', 'playing_piano', 'playing_poker',
'playing_recorder', 'playing_saxophone', 'playing_squash_or_racquetball', 'playing_tennis', 'playing_trombone',
'playing_trumpet', 'playing_ukulele', 'playing_violin', 'playing_volleyball', 'playing_xylophone', 'pole_vault',
'presenting_weather_forecast', 'pull_ups', 'pumping_fist', 'pumping_gas', 'punching_bag', 'punching_person_-boxing-',
'push_up', 'pushing_car', 'pushing_cart', 'pushing_wheelchair', 'reading_book', 'reading_newspaper', 'recording_music',
'riding_a_bike', 'riding_camel', 'riding_elephant', 'riding_mechanical_bull', 'riding_mountain_bike', 'riding_mule',
'riding_or_walking_with_horse', 'riding_scooter', 'riding_unicycle', 'ripping_paper', 'robot_dancing', 'rock_climbing',
'rock_scissors_paper', 'roller_skating', 'running_on_treadmill', 'sailing', 'salsa_dancing', 'sanding_floor',
'scrambling_eggs', 'scuba_diving', 'setting_table', 'shaking_hands', 'shaking_head', 'sharpening_knives',
'sharpening_pencil', 'shaving_head', 'shaving_legs', 'shearing_sheep', 'shining_shoes', 'shooting_basketball',
'shooting_goal_-soccer-', 'shot_put', 'shoveling_snow', 'shredding_paper', 'shuffling_cards', 'side_kick',
'sign_language_interpreting', 'singing', 'situp', 'skateboarding', 'ski_jumping', 'skiing_-not_slalom_or_crosscountry-',
'skiing_crosscountry', 'skiing_slalom', 'skipping_rope', 'skydiving', 'slacklining', 'slapping', 'sled_dog_racing',
'smoking', 'smoking_hookah', 'snatch_weight_lifting', 'sneezing', 'sniffing', 'snorkeling', 'snowboarding', 'snowkiting',
'snowmobiling', 'somersaulting', 'spinning_poi', 'spray_painting', 'spraying', 'springboard_diving', 'squat',
'sticking_tongue_out', 'stomping_grapes', 'stretching_arm', 'stretching_leg', 'strumming_guitar', 'surfing_crowd',
'surfing_water', 'sweeping_floor', 'swimming_backstroke', 'swimming_breast_stroke', 'swimming_butterfly_stroke',
'swing_dancing', 'swinging_legs', 'swinging_on_something', 'sword_fighting', 'tai_chi', 'taking_a_shower', 'tango_dancing',
'tap_dancing', 'tapping_guitar', 'tapping_pen', 'tasting_beer', 'tasting_food', 'testifying', 'texting', 'throwing_axe',
'throwing_ball', 'throwing_discus', 'tickling', 'tobogganing', 'tossing_coin', 'tossing_salad', 'training_dog',
'trapezing', 'trimming_or_shaving_beard', 'trimming_trees', 'triple_jump', 'tying_bow_tie', 'tying_knot_-not_on_a_tie-',
'tying_tie', 'unboxing', 'unloading_truck', 'using_computer', 'using_remote_controller_-not_gaming-', 'using_segway',
'vault', 'waiting_in_line', 'walking_the_dog', 'washing_dishes', 'washing_feet', 'washing_hair', 'washing_hands',
'water_skiing', 'water_sliding', 'watering_plants', 'waxing_back', 'waxing_chest', 'waxing_eyebrows', 'waxing_legs',
'weaving_basket', 'welding', 'whistling', 'windsurfing', 'wrapping_present', 'wrestling', 'writing', 'yawning', 'yoga', 'zumba']
| 65.444867
| 152
| 0.625552
| 1,887
| 17,212
| 5.384208
| 0.409115
| 0.014173
| 0.011516
| 0.012992
| 0.081496
| 0.056102
| 0.037992
| 0.026181
| 0.022441
| 0.022441
| 0
| 0.009244
| 0.264641
| 17,212
| 262
| 153
| 65.694656
| 0.793474
| 0.138799
| 0
| 0.078947
| 0
| 0.005263
| 0.407726
| 0.058695
| 0
| 0
| 0
| 0
| 0
| 1
| 0.036842
| false
| 0.010526
| 0.036842
| 0.005263
| 0.110526
| 0.005263
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ad331ec8ece0975704ec9214918b2580008a6a0
| 23,842
|
py
|
Python
|
watcher/api/controllers/v1/action_plan.py
|
ajaytikoo/watcher
|
6dbac1f6ae7f3e10dfdcef5721fa4af7af54e159
|
[
"Apache-2.0"
] | 64
|
2015-10-18T02:57:24.000Z
|
2022-01-13T11:27:51.000Z
|
watcher/api/controllers/v1/action_plan.py
|
ajaytikoo/watcher
|
6dbac1f6ae7f3e10dfdcef5721fa4af7af54e159
|
[
"Apache-2.0"
] | null | null | null |
watcher/api/controllers/v1/action_plan.py
|
ajaytikoo/watcher
|
6dbac1f6ae7f3e10dfdcef5721fa4af7af54e159
|
[
"Apache-2.0"
] | 35
|
2015-12-25T13:53:21.000Z
|
2021-07-19T15:50:16.000Z
|
# -*- encoding: utf-8 -*-
# Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
An :ref:`Action Plan <action_plan_definition>` specifies a flow of
:ref:`Actions <action_definition>` that should be executed in order to satisfy
a given :ref:`Goal <goal_definition>`. It also contains an estimated
:ref:`global efficacy <efficacy_definition>` alongside a set of
:ref:`efficacy indicators <efficacy_indicator_definition>`.
An :ref:`Action Plan <action_plan_definition>` is generated by Watcher when an
:ref:`Audit <audit_definition>` is successful which implies that the
:ref:`Strategy <strategy_definition>`
which was used has found a :ref:`Solution <solution_definition>` to achieve the
:ref:`Goal <goal_definition>` of this :ref:`Audit <audit_definition>`.
In the default implementation of Watcher, an action plan is composed of
a list of successive :ref:`Actions <action_definition>` (i.e., a Workflow of
:ref:`Actions <action_definition>` belonging to a unique branch).
However, Watcher provides abstract interfaces for many of its components,
allowing other implementations to generate and handle more complex :ref:`Action
Plan(s) <action_plan_definition>` composed of two types of Action Item(s):
- simple :ref:`Actions <action_definition>`: atomic tasks, which means it
can not be split into smaller tasks or commands from an OpenStack point of
view.
- composite Actions: which are composed of several simple
:ref:`Actions <action_definition>`
ordered in sequential and/or parallel flows.
An :ref:`Action Plan <action_plan_definition>` may be described using
standard workflow model description formats such as
`Business Process Model and Notation 2.0 (BPMN 2.0)
<http://www.omg.org/spec/BPMN/2.0/>`_ or `Unified Modeling Language (UML)
<http://www.uml.org/>`_.
To see the life-cycle and description of
:ref:`Action Plan <action_plan_definition>` states, visit :ref:`the Action Plan
state machine <action_plan_state_machine>`.
"""
import datetime
from http import HTTPStatus
from oslo_log import log
import pecan
from pecan import rest
import wsme
from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan
from watcher._i18n import _
from watcher.api.controllers import base
from watcher.api.controllers import link
from watcher.api.controllers.v1 import collection
from watcher.api.controllers.v1 import efficacy_indicator as efficacyindicator
from watcher.api.controllers.v1 import types
from watcher.api.controllers.v1 import utils as api_utils
from watcher.applier import rpcapi
from watcher.common import exception
from watcher.common import policy
from watcher.common import utils
from watcher import objects
from watcher.objects import action_plan as ap_objects
LOG = log.getLogger(__name__)
def hide_fields_in_newer_versions(obj):
"""This method hides fields that were added in newer API versions.
Certain node fields were introduced at certain API versions.
These fields are only made available when the request's API version
matches or exceeds the versions when these fields were introduced.
"""
pass
class ActionPlanPatchType(types.JsonPatchType):
@staticmethod
def _validate_state(patch):
serialized_patch = {'path': patch.path, 'op': patch.op}
if patch.value is not wtypes.Unset:
serialized_patch['value'] = patch.value
# todo: use state machines to handle state transitions
state_value = patch.value
if state_value and not hasattr(ap_objects.State, state_value):
msg = _("Invalid state: %(state)s")
raise exception.PatchError(
patch=serialized_patch, reason=msg % dict(state=state_value))
@staticmethod
def validate(patch):
if patch.path == "/state":
ActionPlanPatchType._validate_state(patch)
return types.JsonPatchType.validate(patch)
@staticmethod
def internal_attrs():
return types.JsonPatchType.internal_attrs()
@staticmethod
def mandatory_attrs():
return ["audit_id", "state"]
class ActionPlan(base.APIBase):
"""API representation of a action plan.
This class enforces type checking and value constraints, and converts
between the internal object model and the API representation of an
action plan.
"""
_audit_uuid = None
_strategy_uuid = None
_strategy_name = None
_efficacy_indicators = None
def _get_audit_uuid(self):
return self._audit_uuid
def _set_audit_uuid(self, value):
if value == wtypes.Unset:
self._audit_uuid = wtypes.Unset
elif value and self._audit_uuid != value:
try:
audit = objects.Audit.get(pecan.request.context, value)
self._audit_uuid = audit.uuid
self.audit_id = audit.id
except exception.AuditNotFound:
self._audit_uuid = None
def _get_efficacy_indicators(self):
if self._efficacy_indicators is None:
self._set_efficacy_indicators(wtypes.Unset)
return self._efficacy_indicators
def _set_efficacy_indicators(self, value):
efficacy_indicators = []
if value == wtypes.Unset and not self._efficacy_indicators:
try:
_efficacy_indicators = objects.EfficacyIndicator.list(
pecan.request.context,
filters={"action_plan_uuid": self.uuid})
for indicator in _efficacy_indicators:
efficacy_indicator = efficacyindicator.EfficacyIndicator(
context=pecan.request.context,
name=indicator.name,
description=indicator.description,
unit=indicator.unit,
value=float(indicator.value),
)
efficacy_indicators.append(efficacy_indicator.as_dict())
self._efficacy_indicators = efficacy_indicators
except exception.EfficacyIndicatorNotFound as exc:
LOG.exception(exc)
elif value and self._efficacy_indicators != value:
self._efficacy_indicators = value
def _get_strategy(self, value):
if value == wtypes.Unset:
return None
strategy = None
try:
if utils.is_uuid_like(value) or utils.is_int_like(value):
strategy = objects.Strategy.get(
pecan.request.context, value)
else:
strategy = objects.Strategy.get_by_name(
pecan.request.context, value)
except exception.StrategyNotFound:
pass
if strategy:
self.strategy_id = strategy.id
return strategy
def _get_strategy_uuid(self):
return self._strategy_uuid
def _set_strategy_uuid(self, value):
if value and self._strategy_uuid != value:
self._strategy_uuid = None
strategy = self._get_strategy(value)
if strategy:
self._strategy_uuid = strategy.uuid
def _get_strategy_name(self):
return self._strategy_name
def _set_strategy_name(self, value):
if value and self._strategy_name != value:
self._strategy_name = None
strategy = self._get_strategy(value)
if strategy:
self._strategy_name = strategy.name
uuid = wtypes.wsattr(types.uuid, readonly=True)
"""Unique UUID for this action plan"""
audit_uuid = wtypes.wsproperty(types.uuid, _get_audit_uuid,
_set_audit_uuid,
mandatory=True)
"""The UUID of the audit this port belongs to"""
strategy_uuid = wtypes.wsproperty(
wtypes.text, _get_strategy_uuid, _set_strategy_uuid, mandatory=False)
"""Strategy UUID the action plan refers to"""
strategy_name = wtypes.wsproperty(
wtypes.text, _get_strategy_name, _set_strategy_name, mandatory=False)
"""The name of the strategy this action plan refers to"""
efficacy_indicators = wtypes.wsproperty(
types.jsontype, _get_efficacy_indicators, _set_efficacy_indicators,
mandatory=True)
"""The list of efficacy indicators associated to this action plan"""
global_efficacy = wtypes.wsattr(types.jsontype, readonly=True)
"""The global efficacy of this action plan"""
state = wtypes.text
"""This action plan state"""
links = wtypes.wsattr([link.Link], readonly=True)
"""A list containing a self link and associated action links"""
hostname = wtypes.wsattr(wtypes.text, mandatory=False)
"""Hostname the actionplan is running on"""
def __init__(self, **kwargs):
super(ActionPlan, self).__init__()
self.fields = []
fields = list(objects.ActionPlan.fields)
for field in fields:
# Skip fields we do not expose.
if not hasattr(self, field):
continue
self.fields.append(field)
setattr(self, field, kwargs.get(field, wtypes.Unset))
self.fields.append('audit_uuid')
self.fields.append('efficacy_indicators')
setattr(self, 'audit_uuid', kwargs.get('audit_id', wtypes.Unset))
fields.append('strategy_uuid')
setattr(self, 'strategy_uuid', kwargs.get('strategy_id', wtypes.Unset))
fields.append('strategy_name')
setattr(self, 'strategy_name', kwargs.get('strategy_id', wtypes.Unset))
@staticmethod
def _convert_with_links(action_plan, url, expand=True):
if not expand:
action_plan.unset_fields_except(
['uuid', 'state', 'efficacy_indicators', 'global_efficacy',
'updated_at', 'audit_uuid', 'strategy_uuid', 'strategy_name'])
action_plan.links = [
link.Link.make_link(
'self', url,
'action_plans', action_plan.uuid),
link.Link.make_link(
'bookmark', url,
'action_plans', action_plan.uuid,
bookmark=True)]
return action_plan
@classmethod
def convert_with_links(cls, rpc_action_plan, expand=True):
action_plan = ActionPlan(**rpc_action_plan.as_dict())
hide_fields_in_newer_versions(action_plan)
return cls._convert_with_links(action_plan, pecan.request.host_url,
expand)
@classmethod
def sample(cls, expand=True):
sample = cls(uuid='9ef4d84c-41e8-4418-9220-ce55be0436af',
state='ONGOING',
created_at=datetime.datetime.utcnow(),
deleted_at=None,
updated_at=datetime.datetime.utcnow())
sample._audit_uuid = 'abcee106-14d3-4515-b744-5a26885cf6f6'
sample._efficacy_indicators = [{'description': 'Test indicator',
'name': 'test_indicator',
'unit': '%'}]
sample._global_efficacy = {'description': 'Global efficacy',
'name': 'test_global_efficacy',
'unit': '%'}
return cls._convert_with_links(sample, 'http://localhost:9322', expand)
class ActionPlanCollection(collection.Collection):
"""API representation of a collection of action_plans."""
action_plans = [ActionPlan]
"""A list containing action_plans objects"""
def __init__(self, **kwargs):
self._type = 'action_plans'
@staticmethod
def convert_with_links(rpc_action_plans, limit, url=None, expand=False,
**kwargs):
ap_collection = ActionPlanCollection()
ap_collection.action_plans = [ActionPlan.convert_with_links(
p, expand) for p in rpc_action_plans]
ap_collection.next = ap_collection.get_next(limit, url=url, **kwargs)
return ap_collection
@classmethod
def sample(cls):
sample = cls()
sample.action_plans = [ActionPlan.sample(expand=False)]
return sample
class ActionPlansController(rest.RestController):
"""REST controller for Actions."""
def __init__(self):
super(ActionPlansController, self).__init__()
self.applier_client = rpcapi.ApplierAPI()
from_actionsPlans = False
"""A flag to indicate if the requests to this controller are coming
from the top-level resource ActionPlan."""
_custom_actions = {
'start': ['POST'],
'detail': ['GET']
}
def _get_action_plans_collection(self, marker, limit,
sort_key, sort_dir, expand=False,
resource_url=None, audit_uuid=None,
strategy=None):
additional_fields = ['audit_uuid', 'strategy_uuid', 'strategy_name']
api_utils.validate_sort_key(
sort_key, list(objects.ActionPlan.fields) + additional_fields)
limit = api_utils.validate_limit(limit)
api_utils.validate_sort_dir(sort_dir)
marker_obj = None
if marker:
marker_obj = objects.ActionPlan.get_by_uuid(
pecan.request.context, marker)
filters = {}
if audit_uuid:
filters['audit_uuid'] = audit_uuid
if strategy:
if utils.is_uuid_like(strategy):
filters['strategy_uuid'] = strategy
else:
filters['strategy_name'] = strategy
need_api_sort = api_utils.check_need_api_sort(sort_key,
additional_fields)
sort_db_key = (sort_key if not need_api_sort
else None)
action_plans = objects.ActionPlan.list(
pecan.request.context,
limit,
marker_obj, sort_key=sort_db_key,
sort_dir=sort_dir, filters=filters)
action_plans_collection = ActionPlanCollection.convert_with_links(
action_plans, limit, url=resource_url, expand=expand,
sort_key=sort_key, sort_dir=sort_dir)
if need_api_sort:
api_utils.make_api_sort(action_plans_collection.action_plans,
sort_key, sort_dir)
return action_plans_collection
@wsme_pecan.wsexpose(ActionPlanCollection, types.uuid, int, wtypes.text,
wtypes.text, types.uuid, wtypes.text)
def get_all(self, marker=None, limit=None,
sort_key='id', sort_dir='asc', audit_uuid=None, strategy=None):
"""Retrieve a list of action plans.
:param marker: pagination marker for large data sets.
:param limit: maximum number of resources to return in a single result.
:param sort_key: column to sort results by. Default: id.
:param sort_dir: direction to sort. "asc" or "desc". Default: asc.
:param audit_uuid: Optional UUID of an audit, to get only actions
for that audit.
:param strategy: strategy UUID or name to filter by
"""
context = pecan.request.context
policy.enforce(context, 'action_plan:get_all',
action='action_plan:get_all')
return self._get_action_plans_collection(
marker, limit, sort_key, sort_dir,
audit_uuid=audit_uuid, strategy=strategy)
@wsme_pecan.wsexpose(ActionPlanCollection, types.uuid, int, wtypes.text,
wtypes.text, types.uuid, wtypes.text)
def detail(self, marker=None, limit=None,
sort_key='id', sort_dir='asc', audit_uuid=None, strategy=None):
"""Retrieve a list of action_plans with detail.
:param marker: pagination marker for large data sets.
:param limit: maximum number of resources to return in a single result.
:param sort_key: column to sort results by. Default: id.
:param sort_dir: direction to sort. "asc" or "desc". Default: asc.
:param audit_uuid: Optional UUID of an audit, to get only actions
for that audit.
:param strategy: strategy UUID or name to filter by
"""
context = pecan.request.context
policy.enforce(context, 'action_plan:detail',
action='action_plan:detail')
# NOTE(lucasagomes): /detail should only work agaist collections
parent = pecan.request.path.split('/')[:-1][-1]
if parent != "action_plans":
raise exception.HTTPNotFound
expand = True
resource_url = '/'.join(['action_plans', 'detail'])
return self._get_action_plans_collection(
marker, limit, sort_key, sort_dir, expand,
resource_url, audit_uuid=audit_uuid, strategy=strategy)
@wsme_pecan.wsexpose(ActionPlan, types.uuid)
def get_one(self, action_plan_uuid):
"""Retrieve information about the given action plan.
:param action_plan_uuid: UUID of a action plan.
"""
if self.from_actionsPlans:
raise exception.OperationNotPermitted
context = pecan.request.context
action_plan = api_utils.get_resource('ActionPlan', action_plan_uuid)
policy.enforce(
context, 'action_plan:get', action_plan, action='action_plan:get')
return ActionPlan.convert_with_links(action_plan)
@wsme_pecan.wsexpose(None, types.uuid, status_code=HTTPStatus.NO_CONTENT)
def delete(self, action_plan_uuid):
"""Delete an action plan.
:param action_plan_uuid: UUID of a action.
"""
context = pecan.request.context
action_plan = api_utils.get_resource(
'ActionPlan', action_plan_uuid, eager=True)
policy.enforce(context, 'action_plan:delete', action_plan,
action='action_plan:delete')
allowed_states = (ap_objects.State.SUCCEEDED,
ap_objects.State.RECOMMENDED,
ap_objects.State.FAILED,
ap_objects.State.SUPERSEDED,
ap_objects.State.CANCELLED)
if action_plan.state not in allowed_states:
raise exception.DeleteError(
state=action_plan.state)
action_plan.soft_delete()
@wsme.validate(types.uuid, [ActionPlanPatchType])
@wsme_pecan.wsexpose(ActionPlan, types.uuid,
body=[ActionPlanPatchType])
def patch(self, action_plan_uuid, patch):
"""Update an existing action plan.
:param action_plan_uuid: UUID of a action plan.
:param patch: a json PATCH document to apply to this action plan.
"""
if self.from_actionsPlans:
raise exception.OperationNotPermitted
context = pecan.request.context
action_plan_to_update = api_utils.get_resource(
'ActionPlan', action_plan_uuid, eager=True)
policy.enforce(context, 'action_plan:update', action_plan_to_update,
action='action_plan:update')
try:
action_plan_dict = action_plan_to_update.as_dict()
action_plan = ActionPlan(**api_utils.apply_jsonpatch(
action_plan_dict, patch))
except api_utils.JSONPATCH_EXCEPTIONS as e:
raise exception.PatchError(patch=patch, reason=e)
launch_action_plan = False
cancel_action_plan = False
# transitions that are allowed via PATCH
allowed_patch_transitions = [
(ap_objects.State.RECOMMENDED,
ap_objects.State.PENDING),
(ap_objects.State.RECOMMENDED,
ap_objects.State.CANCELLED),
(ap_objects.State.ONGOING,
ap_objects.State.CANCELLING),
(ap_objects.State.PENDING,
ap_objects.State.CANCELLED),
]
# todo: improve this in blueprint watcher-api-validation
if hasattr(action_plan, 'state'):
transition = (action_plan_to_update.state, action_plan.state)
if transition not in allowed_patch_transitions:
error_message = _("State transition not allowed: "
"(%(initial_state)s -> %(new_state)s)")
raise exception.PatchError(
patch=patch,
reason=error_message % dict(
initial_state=action_plan_to_update.state,
new_state=action_plan.state))
if action_plan.state == ap_objects.State.PENDING:
launch_action_plan = True
if action_plan.state == ap_objects.State.CANCELLED:
cancel_action_plan = True
# Update only the fields that have changed
for field in objects.ActionPlan.fields:
try:
patch_val = getattr(action_plan, field)
except AttributeError:
# Ignore fields that aren't exposed in the API
continue
if patch_val == wtypes.Unset:
patch_val = None
if action_plan_to_update[field] != patch_val:
action_plan_to_update[field] = patch_val
if (field == 'state' and
patch_val == objects.action_plan.State.PENDING):
launch_action_plan = True
action_plan_to_update.save()
# NOTE: if action plan is cancelled from pending or recommended
# state update action state here only
if cancel_action_plan:
filters = {'action_plan_uuid': action_plan.uuid}
actions = objects.Action.list(pecan.request.context,
filters=filters, eager=True)
for a in actions:
a.state = objects.action.State.CANCELLED
a.save()
if launch_action_plan:
self.applier_client.launch_action_plan(pecan.request.context,
action_plan.uuid)
action_plan_to_update = objects.ActionPlan.get_by_uuid(
pecan.request.context,
action_plan_uuid)
return ActionPlan.convert_with_links(action_plan_to_update)
@wsme_pecan.wsexpose(ActionPlan, types.uuid)
def start(self, action_plan_uuid, **kwargs):
"""Start an action_plan
:param action_plan_uuid: UUID of an action_plan.
"""
action_plan_to_start = api_utils.get_resource(
'ActionPlan', action_plan_uuid, eager=True)
context = pecan.request.context
policy.enforce(context, 'action_plan:start', action_plan_to_start,
action='action_plan:start')
if action_plan_to_start['state'] != \
objects.action_plan.State.RECOMMENDED:
raise exception.StartError(
state=action_plan_to_start.state)
action_plan_to_start['state'] = objects.action_plan.State.PENDING
action_plan_to_start.save()
self.applier_client.launch_action_plan(pecan.request.context,
action_plan_uuid)
action_plan_to_start = objects.ActionPlan.get_by_uuid(
pecan.request.context, action_plan_uuid)
return ActionPlan.convert_with_links(action_plan_to_start)
| 39.149425
| 79
| 0.637279
| 2,766
| 23,842
| 5.267534
| 0.161605
| 0.082361
| 0.020178
| 0.012354
| 0.328552
| 0.273644
| 0.218257
| 0.177763
| 0.174674
| 0.15628
| 0
| 0.00402
| 0.28001
| 23,842
| 608
| 80
| 39.213816
| 0.844751
| 0.195453
| 0
| 0.190722
| 0
| 0
| 0.055565
| 0.003942
| 0
| 0
| 0
| 0.003289
| 0
| 1
| 0.074742
| false
| 0.005155
| 0.054124
| 0.012887
| 0.231959
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ad57f93e09c3cfa475ee8a3a4f941a9c684524d
| 1,613
|
py
|
Python
|
run.py
|
shark803/Torch_serve_example_NLP
|
7f7984a1668f21aced3a7a1e8ddac3c8e0ff0105
|
[
"MIT"
] | 1
|
2021-11-19T07:59:58.000Z
|
2021-11-19T07:59:58.000Z
|
run.py
|
shark803/Torch_serve_example_NLP
|
7f7984a1668f21aced3a7a1e8ddac3c8e0ff0105
|
[
"MIT"
] | null | null | null |
run.py
|
shark803/Torch_serve_example_NLP
|
7f7984a1668f21aced3a7a1e8ddac3c8e0ff0105
|
[
"MIT"
] | null | null | null |
# coding: UTF-8
import time
import torch
import numpy as np
from train_eval import train, init_network
from importlib import import_module
import argparse
parser = argparse.ArgumentParser(description='Chinese Text Classification')
parser.add_argument('--model', type=str, required=True, help='choose a model: TextCNN')
parser.add_argument('--embedding', default='pre_trained', type=str, help='random or pre_trained')
parser.add_argument('--word', default=False, type=bool, help='True for word, False for char')
args = parser.parse_args()
if __name__ == '__main__':
dataset = 'THUCNews' # 数据集
# 搜狗新闻:embedding_SougouNews.npz, 腾讯:embedding_Tencent.npz, 随机初始化:random
# embedding = 'random'
model_name = args.model # TextCNN
from utils import build_dataset, build_iterator, get_time_dif
x = import_module('models.' + model_name)
from config import Config
config = Config(dataset)
np.random.seed(1)
torch.manual_seed(1)
torch.cuda.manual_seed_all(1)
torch.backends.cudnn.deterministic = True # 保证每次结果一样
start_time = time.time()
print("Loading data...")
vocab, train_data, dev_data, test_data = build_dataset(config, args.word)
train_iter = build_iterator(train_data, config)
dev_iter = build_iterator(dev_data, config)
test_iter = build_iterator(test_data, config)
time_dif = get_time_dif(start_time)
print("Time usage:", time_dif)
# train
config.n_vocab = len(vocab)
model = x.Model().to(config.device)
init_network(model)
print(model.parameters)
train(config, model, train_iter, dev_iter, test_iter)
| 32.918367
| 97
| 0.726596
| 223
| 1,613
| 5.017937
| 0.403587
| 0.04647
| 0.045576
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002959
| 0.16181
| 1,613
| 48
| 98
| 33.604167
| 0.824704
| 0.081215
| 0
| 0
| 0
| 0
| 0.12483
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.257143
| 0
| 0.257143
| 0.085714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ad630d29820371f228b1287947197de5ede3fb0
| 5,954
|
py
|
Python
|
tests/mb_util.py
|
vasilydenisenko/modbus_rtu_slave
|
8a531b776ab82c60b5d335f0565468f19a7801f5
|
[
"MIT"
] | null | null | null |
tests/mb_util.py
|
vasilydenisenko/modbus_rtu_slave
|
8a531b776ab82c60b5d335f0565468f19a7801f5
|
[
"MIT"
] | null | null | null |
tests/mb_util.py
|
vasilydenisenko/modbus_rtu_slave
|
8a531b776ab82c60b5d335f0565468f19a7801f5
|
[
"MIT"
] | null | null | null |
# MIT License
# Copyright (c) 2021 Vasily Denisenko, Sergey Kuznetsov
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import mb_bsp
PDU_SIZE_REG = 0
CONFIG_REG = 1
SLAVE_ADDR_REG = 2
CS_REG = 3
MB_MAX_WRITE_REGNUM = 123
MB_MAX_READ_REGNUM = 125
MB_MAX_REG_ADDR = 65535
MB_MAX_REG_VAL = 65535
MB_MAX_SLAVE_ADDR = 247
MB_MIN_SLAVE_ADDR = 1
MB_MAX_PDU_SIZE = 253
MB_MIN_PDU_SIZE = 1
FCODE_0x3 = 0x3
FCODE_0x6 = 0x6
FCODE_0x10 = 0x10
def incr_err_count():
incr_err_count.count += 1
setattr(incr_err_count, 'count', 0)
def wait_mb_master_status(status):
mb_bsp.wait_master_status(status) # 'FSM status' or 'PDU status'
if mb_bsp.alarm_cb.status_timeout == 1:
print('*** Test FAILED: ', status , ' timeout ***')
mb_bsp.alarm_cb.status_timeout = 0
incr_err_count()
def config_modbus(modbus_role, slave_addr, pdu, config_val):
wait_mb_master_status('FSM status')
if modbus_role == 'Master':
mb_bsp.write_mb_master_cs(CONFIG_REG, config_val) # Set configuration
mb_bsp.write_mb_master_cs(SLAVE_ADDR_REG, slave_addr) # Set slave address
mb_bsp.write_mb_master_cs(PDU_SIZE_REG, len(pdu)) # Set request PDU size
mb_bsp.write_mb_master_pdu(pdu) # Set request PDU
else:
mb_bsp.write_mb_slave_cs(CONFIG_REG, config_val) # Set configuration
mb_bsp.write_mb_slave_cs(SLAVE_ADDR_REG, slave_addr) # Set slave address
def generate_0x03_pdu(addr, regnum):
pdu = list()
ref_pdu = list()
pdu.append(0x3)
ref_pdu.append(0x3)
addr_h = (addr & 0xff00) >> 8
pdu.append(addr_h)
addr_l = (addr & 0xff)
pdu.append(addr_l)
regnum_h = (regnum & 0xff00) >> 8
pdu.append(regnum_h)
regnum_l = regnum & 0xff
pdu.append(regnum_l)
bytecount = regnum << 1
ref_pdu.append(bytecount)
for i in range(bytecount):
ref_pdu.append(0)
return [pdu, ref_pdu]
def generate_0x06_pdu(addr, regval):
pdu = list()
pdu.append(0x6)
addr_h = (addr & 0xff00) >> 8
pdu.append(addr_h)
addr_l = (addr & 0xff)
pdu.append(addr_l)
regval_h = (regval[0] & 0xff00) >> 8
pdu.append(regval_h)
regval_l = regval[0] & 0xff
pdu.append(regval_l)
ref_pdu = pdu.copy()
return [pdu, ref_pdu]
def generate_0x10_pdu(addr, regnum, regval):
pdu = list()
pdu.append(0x10)
addr_h = (addr & 0xff00) >> 8
pdu.append(addr_h)
addr_l = (addr & 0xff)
pdu.append(addr_l)
regnum_h = (regnum & 0xff00) >> 8
pdu.append(regnum_h)
regnum_l = regnum & 0xff
pdu.append(regnum_l)
ref_pdu = pdu.copy()
bytecount = regnum_l << 1
pdu.append(bytecount)
for i in range(regnum_l):
regval_h = (regval[i] & 0xff00) >> 8
pdu.append(regval_h)
regval_l = regval[i] & 0xff
pdu.append(regval_l)
return [pdu, ref_pdu]
def print_test_result(result_ok):
if result_ok:
msg = '\tTest Successful'
else:
msg = '\tTest FAILED'
print()
print('***************************')
print(msg)
print('***************************')
print()
def get_total_error_count(modbus_role):
count = 0
error_tuple = mb_bsp.get_error_count()
if modbus_role == 'Both':
for err_list in error_tuple:
for i in err_list:
count += i
elif modbus_role == 'Master':
for i in error_tuple[0]:
count += i
elif modbus_role == 'Slave':
for i in error_tuple[1]:
count += i
return count
def get_single_error_count(modbus_role, error_type):
error_tuple = mb_bsp.get_error_count()
count = 0
if modbus_role == 'Master':
if error_type == 'parity':
count = error_tuple[0][0]
elif error_type == 'start bit':
count = error_tuple[0][1]
elif error_type == 'stop bit':
count = error_tuple[0][2]
elif error_type == 'address':
count = error_tuple[0][3]
elif error_type == 'crc':
count = error_tuple[0][4]
elif modbus_role == 'Slave':
if error_type == 'parity':
count = error_tuple[1][0]
elif error_type == 'start bit':
count = error_tuple[1][1]
elif error_type == 'stop bit':
count = error_tuple[1][2]
elif error_type == 'address':
count = error_tuple[1][3]
elif error_type == 'crc':
count = error_tuple[1][4]
return count
def print_error_count():
error_tuple = mb_bsp.get_error_count()
print()
print('master_parity_err_count = ', error_tuple[0][0])
print('master_start_bit_err_count = ', error_tuple[0][1])
print('master_stop_bit_err_count = ', error_tuple[0][2])
print('master_addr_err_count = ', error_tuple[0][3])
print('master_crc_err_count = ', error_tuple[0][4])
print('slave_parity_err_count = ', error_tuple[1][0])
print('slave_start_bit_err_count = ', error_tuple[1][1])
print('slave_stop_bit_err_count = ', error_tuple[1][2])
print('slave_addr_err_count = ', error_tuple[1][3])
print('slave_crc_err_count = ', error_tuple[1][4])
print('--------------------------------')
print()
| 25.553648
| 81
| 0.673161
| 915
| 5,954
| 4.113661
| 0.204372
| 0.069075
| 0.083688
| 0.042508
| 0.450319
| 0.349362
| 0.276567
| 0.221838
| 0.185707
| 0.107598
| 0
| 0.032054
| 0.203561
| 5,954
| 233
| 82
| 25.553648
| 0.761704
| 0.205576
| 0
| 0.423841
| 0
| 0
| 0.114662
| 0.056325
| 0
| 0
| 0.020787
| 0
| 0
| 1
| 0.066225
| false
| 0
| 0.006623
| 0
| 0.10596
| 0.139073
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ad85408ba998c356a370a0f1582159d01f77a69
| 8,390
|
py
|
Python
|
carto/maps.py
|
danicarrion/carto-python
|
631b018f065960baa35473e2087ce598560b9e17
|
[
"BSD-3-Clause"
] | 85
|
2016-08-07T16:46:58.000Z
|
2022-03-23T01:44:02.000Z
|
carto/maps.py
|
danicarrion/carto-python
|
631b018f065960baa35473e2087ce598560b9e17
|
[
"BSD-3-Clause"
] | 109
|
2016-08-02T18:40:04.000Z
|
2021-08-23T08:08:02.000Z
|
carto/maps.py
|
danicarrion/carto-python
|
631b018f065960baa35473e2087ce598560b9e17
|
[
"BSD-3-Clause"
] | 29
|
2016-11-29T03:42:47.000Z
|
2022-01-23T17:37:11.000Z
|
"""
Module for working with named and anonymous maps
.. module:: carto.maps
:platform: Unix, Windows
:synopsis: Module for working with named and anonymous maps
.. moduleauthor:: Daniel Carrion <daniel@carto.com>
.. moduleauthor:: Alberto Romeu <alrocar@carto.com>
"""
try:
from urllib.parse import urljoin
except ImportError:
from urlparse import urljoin
from pyrestcli.resources import Manager, Resource
from .exceptions import CartoException, CartoRateLimitException
API_VERSION = "v1"
NAMED_API_ENDPOINT = "api/{api_version}/map/named/"
ANONYMOUS_API_ENDPOINT = "api/{api_version}/map/"
class BaseMap(Resource):
"""
Base class for NamedMap and AnonymousMap
"""
def __init__(self, auth_client):
"""
Initializes a BaseMap instance
:param auth_client: Auth client
"""
super(BaseMap, self).__init__(auth_client)
def get_tile_url(self, x, y, z, layer_id=None, feature_id=None,
filter=None, extension="png"):
"""
Prepares a URL to get data (raster or vector) from a NamedMap or
AnonymousMap
:param x: The x tile
:param y: The y tile
:param z: The zoom level
:param layer_id: Can be a number (referring to the # layer of your \
map), all layers of your map, or a list of layers.
To show just the basemap layer, enter the value 0
To show the first layer, enter the value 1
To show all layers, enter the value 'all'
To show a list of layers, enter the comma separated \
layer value as '0,1,2'
:param feature_id: The id of the feature
:param filter: The filter to be applied to the layer
:param extension: The format of the data to be retrieved: png, mvt, ...
:type x: int
:type y: int
:type z: int
:type layer_id: str
:type feature_id: str
:type filter: str
:type extension: str
:return: A URL to download data
:rtype: str
:raise: CartoException
"""
base_url = self.client.base_url + self.Meta.collection_endpoint
template_id = self.template_id if hasattr(self, 'template_id') \
else self.layergroupid
if layer_id is not None and feature_id is not None:
url = urljoin(base_url,
"{template_id}/{layer}/attributes/{feature_id}"). \
format(template_id=template_id,
layer=layer_id,
feature_id=feature_id)
elif layer_id is not None and filter is not None:
url = urljoin(base_url,
"{template_id}/{filter}/{z}/{x}/{y}.{extension}"). \
format(template_id=template_id,
filter=filter,
z=z, x=x, y=y,
extension=extension)
elif layer_id is not None:
url = urljoin(base_url,
"{template_id}/{layer}/{z}/{x}/{y}.{extension}"). \
format(template_id=template_id,
layer=layer_id,
z=z, x=x, y=y,
extension=extension)
else:
url = urljoin(base_url, "{template_id}/{z}/{x}/{y}.{extension}"). \
format(
template_id=template_id,
z=z, x=x, y=y,
extension=extension)
if hasattr(self, 'auth') and self.auth is not None \
and len(self.auth['valid_tokens']) > 0:
url = urljoin(url, "?auth_token={auth_token}"). \
format(auth_token=self.auth['valid_tokens'][0])
return url
class NamedMap(BaseMap):
"""
Equivalent to creating a named map in CARTO.
"""
class Meta:
collection_endpoint = NAMED_API_ENDPOINT.format(
api_version=API_VERSION)
id_field = "template_id"
name_field = "name"
def __str__(self):
try:
return unicode(self.name).encode("utf-8")
except AttributeError:
return super(NamedMap, self).__repr__()
def __init__(self, auth_client):
"""
Initializes a NamedMap instance
:param auth_client: Auth client
"""
self.fields = ["version",
"name",
"auth",
"placeholders",
"layergroup",
"view"]
# Optional fields can be assigned by some responses create, instantiate,
# but are not saved to the backend
self.optional_fields = ["template_id", "layergroupid", "last_updated"]
super(NamedMap, self).__init__(auth_client)
def instantiate(self, params, auth=None):
"""
Allows you to fetch the map tiles of a created map
:param params: The json with the styling info for the named map
:param auth: The auth client
:type params: dict
:type auth: :class:`carto.auth.APIKeyAuthClient`
:return:
:raise: CartoException
"""
try:
endpoint = (self.Meta.collection_endpoint
+ "{template_id}"). \
format(template_id=self.template_id)
if (auth is not None):
endpoint = (endpoint + "?auth_token={auth_token}"). \
format(auth_token=auth)
self.send(endpoint, "POST", json=params)
except CartoRateLimitException as e:
raise e
except Exception as e:
raise CartoException(e)
def update_from_dict(self, attribute_dict):
"""
Method overriden from the base class
"""
if 'template' in attribute_dict:
self.update_from_dict(attribute_dict['template'])
setattr(self,
self.Meta.id_field, attribute_dict['template']['name'])
return
try:
for k, v in attribute_dict.items():
if k in self.fields + self.optional_fields:
setattr(self, k, v)
except Exception:
setattr(self, self.Meta.id_field, attribute_dict)
class AnonymousMap(BaseMap):
"""
Equivalent to creating an anonymous map in CARTO.
"""
class Meta:
collection_endpoint = ANONYMOUS_API_ENDPOINT.format(
api_version=API_VERSION)
def __init__(self, auth_client):
"""
Initializes an AnonymousMap instance
:param auth_client: Auth client
"""
self.optional_fields = ['cdn_url', 'last_updated', 'layergroupid', 'metadata']
super(AnonymousMap, self).__init__(auth_client)
def instantiate(self, params):
"""
Allows you to fetch the map tiles of a created map
:param params: The json with the styling info for the named map
:type params: dict
:return:
:raise: CartoException
"""
try:
self.send(self.Meta.collection_endpoint, "POST", json=params)
except CartoRateLimitException as e:
raise e
except Exception as e:
raise CartoException(e)
def update_from_dict(self, attribute_dict):
for k, v in attribute_dict.items():
if k in self.fields + self.optional_fields:
setattr(self, k, v)
class NamedMapManager(Manager):
"""
Manager for the NamedMap class
"""
resource_class = NamedMap
json_collection_attribute = "template_ids"
def create(self, **kwargs):
"""
Creates a named map
:param kwargs: Attributes for creating the named map. Specifically
an attribute `template` must contain the JSON object
defining the named map
:type kwargs: kwargs
:return: New named map object
:rtype: NamedMap
:raise: CartoException
"""
resource = self.resource_class(self.client)
resource.update_from_dict(kwargs['template'])
resource.save(force_create=True)
return resource
| 33.293651
| 86
| 0.555662
| 938
| 8,390
| 4.815565
| 0.203625
| 0.044277
| 0.013947
| 0.009741
| 0.423733
| 0.410228
| 0.341377
| 0.263006
| 0.193048
| 0.148771
| 0
| 0.001666
| 0.356138
| 8,390
| 251
| 87
| 33.426295
| 0.834506
| 0.288439
| 0
| 0.352941
| 0
| 0
| 0.096576
| 0.050428
| 0
| 0
| 0
| 0
| 0
| 1
| 0.084034
| false
| 0
| 0.042017
| 0
| 0.235294
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ad8ce46348b78515a8db8b2c9bc54898f1ab6f9
| 1,208
|
py
|
Python
|
pytorch-frontend/benchmarks/operator_benchmark/pt/embeddingbag_test.py
|
AndreasKaratzas/stonne
|
2915fcc46cc94196303d81abbd1d79a56d6dd4a9
|
[
"MIT"
] | 206
|
2020-11-28T22:56:38.000Z
|
2022-03-27T02:33:04.000Z
|
pytorch-frontend/benchmarks/operator_benchmark/pt/embeddingbag_test.py
|
AndreasKaratzas/stonne
|
2915fcc46cc94196303d81abbd1d79a56d6dd4a9
|
[
"MIT"
] | 19
|
2020-12-09T23:13:14.000Z
|
2022-01-24T23:24:08.000Z
|
pytorch-frontend/benchmarks/operator_benchmark/pt/embeddingbag_test.py
|
AndreasKaratzas/stonne
|
2915fcc46cc94196303d81abbd1d79a56d6dd4a9
|
[
"MIT"
] | 28
|
2020-11-29T15:25:12.000Z
|
2022-01-20T02:16:27.000Z
|
import operator_benchmark as op_bench
import torch
import numpy
from . import configs
"""EmbeddingBag Operator Benchmark"""
class EmbeddingBagBenchmark(op_bench.TorchBenchmarkBase):
def init(self, embeddingbags, dim, mode, input_size, offset, sparse, include_last_offset, device):
self.embedding = torch.nn.EmbeddingBag(
num_embeddings=embeddingbags,
embedding_dim=dim,
mode=mode,
include_last_offset=include_last_offset,
sparse=sparse).to(device=device)
numpy.random.seed((1 << 32) - 1)
self.input = torch.tensor(numpy.random.randint(0, embeddingbags, input_size), device=device).long()
offsets = torch.LongTensor([offset], device=device)
self.offset = torch.cat((offsets, torch.tensor([self.input.size(0)], dtype=torch.long)), 0)
self.set_module_name('embeddingbag')
def forward(self):
return self.embedding(self.input, self.offset)
op_bench.generate_pt_test(configs.embeddingbag_short_configs, EmbeddingBagBenchmark)
op_bench.generate_pt_gradient_test(configs.embeddingbag_short_configs, EmbeddingBagBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| 38.967742
| 107
| 0.724338
| 144
| 1,208
| 5.826389
| 0.395833
| 0.041716
| 0.060787
| 0.040524
| 0.133492
| 0.133492
| 0
| 0
| 0
| 0
| 0
| 0.006979
| 0.169702
| 1,208
| 30
| 108
| 40.266667
| 0.829511
| 0
| 0
| 0
| 0
| 0
| 0.017079
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086957
| false
| 0
| 0.173913
| 0.043478
| 0.347826
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ad9fee81c50ef01672c1f7b553d66bc07bc9155
| 3,972
|
py
|
Python
|
python/dgl/geometry/capi.py
|
lfchener/dgl
|
77f4287a4118db64c46f4f413a426e1419a09d53
|
[
"Apache-2.0"
] | 9,516
|
2018-12-08T22:11:31.000Z
|
2022-03-31T13:04:33.000Z
|
python/dgl/geometry/capi.py
|
lfchener/dgl
|
77f4287a4118db64c46f4f413a426e1419a09d53
|
[
"Apache-2.0"
] | 2,494
|
2018-12-08T22:43:00.000Z
|
2022-03-31T21:16:27.000Z
|
python/dgl/geometry/capi.py
|
lfchener/dgl
|
77f4287a4118db64c46f4f413a426e1419a09d53
|
[
"Apache-2.0"
] | 2,529
|
2018-12-08T22:56:14.000Z
|
2022-03-31T13:07:41.000Z
|
"""Python interfaces to DGL farthest point sampler."""
from dgl._ffi.base import DGLError
import numpy as np
from .._ffi.function import _init_api
from .. import backend as F
from .. import ndarray as nd
def _farthest_point_sampler(data, batch_size, sample_points, dist, start_idx, result):
r"""Farthest Point Sampler
Parameters
----------
data : tensor
A tensor of shape (N, d) where N is the number of points and d is the dimension.
batch_size : int
The number of batches in the ``data``. N should be divisible by batch_size.
sample_points : int
The number of points to sample in each batch.
dist : tensor
Pre-allocated tensor of shape (N, ) for to-sample distance.
start_idx : tensor of int
Pre-allocated tensor of shape (batch_size, ) for the starting sample in each batch.
result : tensor of int
Pre-allocated tensor of shape (sample_points * batch_size, ) for the sampled index.
Returns
-------
No return value. The input variable ``result`` will be overwriten with sampled indices.
"""
assert F.shape(data)[0] >= sample_points * batch_size
assert F.shape(data)[0] % batch_size == 0
_CAPI_FarthestPointSampler(F.zerocopy_to_dgl_ndarray(data),
batch_size, sample_points,
F.zerocopy_to_dgl_ndarray(dist),
F.zerocopy_to_dgl_ndarray(start_idx),
F.zerocopy_to_dgl_ndarray(result))
def _neighbor_matching(graph_idx, num_nodes, edge_weights=None, relabel_idx=True):
"""
Description
-----------
The neighbor matching procedure of edge coarsening used in
`Metis <http://cacs.usc.edu/education/cs653/Karypis-METIS-SIAMJSC98.pdf>`__
and
`Graclus <https://www.cs.utexas.edu/users/inderjit/public_papers/multilevel_pami.pdf>`__
for homogeneous graph coarsening. This procedure keeps picking an unmarked
vertex and matching it with one its unmarked neighbors (that maximizes its
edge weight) until no match can be done.
If no edge weight is given, this procedure will randomly pick neighbor for each
vertex.
The GPU implementation is based on `A GPU Algorithm for Greedy Graph Matching
<http://www.staff.science.uu.nl/~bisse101/Articles/match12.pdf>`__
NOTE: The input graph must be bi-directed (undirected) graph. Call :obj:`dgl.to_bidirected`
if you are not sure your graph is bi-directed.
Parameters
----------
graph : HeteroGraphIndex
The input homogeneous graph.
num_nodes : int
The number of nodes in this homogeneous graph.
edge_weight : tensor, optional
The edge weight tensor holding non-negative scalar weight for each edge.
default: :obj:`None`
relabel_idx : bool, optional
If true, relabel resulting node labels to have consecutive node ids.
default: :obj:`True`
Returns
-------
a 1-D tensor
A vector with each element that indicates the cluster ID of a vertex.
"""
edge_weight_capi = nd.NULL["int64"]
if edge_weights is not None:
edge_weight_capi = F.zerocopy_to_dgl_ndarray(edge_weights)
node_label = F.full_1d(
num_nodes, -1, getattr(F, graph_idx.dtype), F.to_backend_ctx(graph_idx.ctx))
node_label_capi = F.zerocopy_to_dgl_ndarray_for_write(node_label)
_CAPI_NeighborMatching(graph_idx, edge_weight_capi, node_label_capi)
if F.reduce_sum(node_label < 0).item() != 0:
raise DGLError("Find unmatched node")
# reorder node id
# TODO: actually we can add `return_inverse` option for `unique`
# function in backend for efficiency.
if relabel_idx:
node_label_np = F.zerocopy_to_numpy(node_label)
_, node_label_np = np.unique(node_label_np, return_inverse=True)
return F.tensor(node_label_np)
else:
return node_label
_init_api('dgl.geometry', __name__)
| 38.563107
| 95
| 0.680514
| 558
| 3,972
| 4.646953
| 0.370968
| 0.03818
| 0.029695
| 0.032395
| 0.121481
| 0.04705
| 0.027767
| 0.027767
| 0
| 0
| 0
| 0.00659
| 0.235901
| 3,972
| 102
| 96
| 38.941176
| 0.847776
| 0.558912
| 0
| 0
| 0
| 0
| 0.023529
| 0
| 0
| 0
| 0
| 0.009804
| 0.064516
| 1
| 0.064516
| false
| 0
| 0.16129
| 0
| 0.290323
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0adab04d82e555974b5ee3aecff08feca7c75415
| 6,478
|
py
|
Python
|
scidb/core/data.py
|
oxdc/sci.db
|
0a751a0e05e7ad4c83c350e32e32ea9ce5831cbb
|
[
"MIT"
] | null | null | null |
scidb/core/data.py
|
oxdc/sci.db
|
0a751a0e05e7ad4c83c350e32e32ea9ce5831cbb
|
[
"MIT"
] | null | null | null |
scidb/core/data.py
|
oxdc/sci.db
|
0a751a0e05e7ad4c83c350e32e32ea9ce5831cbb
|
[
"MIT"
] | null | null | null |
import shutil
import hashlib
from pathlib import Path
from typing import TextIO, BinaryIO, IO, Union
from datetime import datetime
from os.path import getmtime
from .low import ObservableDict
class Data:
def __init__(self, data_name: str, parent, bucket,
protected_parent_methods: Union[None, dict] = None):
self.__data_name__ = data_name
self.__parent__ = parent
self.__bucket__ = bucket
self.__protected_parent_methods__ = protected_parent_methods
self.__protected_parent_methods__['increase_data_count']()
self.init_metadata()
self.init_properties()
@property
def database(self):
return self.__bucket__.db
@property
def db(self):
return self.__bucket__.db
@property
def bucket(self):
return self.__bucket__
def init_metadata(self):
if self.__data_name__ not in self.__parent__.metadata:
self.__parent__.metadata[self.__data_name__] = dict()
def init_properties(self):
if self.__data_name__ not in self.__parent__.properties:
self.__parent__.properties[self.__data_name__] = dict()
def set_metadata(self, metadata: Union[None, dict], merge: bool = True):
if metadata is None:
return
if merge:
metadata = {**self.metadata, **metadata}
self.__parent__.metadata[self.__data_name__] = metadata
def set_properties(self, properties: Union[None, dict], merge: bool = True):
if properties is None:
return
if merge:
properties = {**self.properties, **properties}
self.__parent__.properties[self.__data_name__] = properties
@property
def parent(self):
return self.__parent__
@property
def path(self) -> Path:
return self.__parent__.path / self.__data_name__
@property
def name(self) -> str:
return self.__data_name__
@property
def metadata(self) -> ObservableDict:
return self.__parent__.metadata[self.__data_name__]
@property
def properties(self) -> ObservableDict:
return self.__parent__.properties[self.__data_name__]
def rename(self, new_name: str):
shutil.move(str(self.path), str(self.__parent__.path / new_name))
self.__data_name__ = new_name
def reader(self, binary: bool = False, **kwargs) -> [IO, BinaryIO, TextIO, None]:
mode = 'r'
mode += 'b' if binary else ''
return open(str(self.path), mode=mode, **kwargs)
def creator(self,
binary: bool = False,
confirm: bool = False,
feedback: bool = False,
**kwargs) -> [IO, BinaryIO, TextIO, None]:
if confirm and not feedback:
return None
mode = 'x'
mode += 'b' if binary else ''
return open(str(self.path), mode=mode, **kwargs)
def writer(self,
binary: bool = False,
append: bool = True,
allow_overwrite: bool = False,
confirm: bool = True,
feedback: bool = False,
**kwargs) -> [IO, BinaryIO, TextIO, None]:
if not allow_overwrite and not append:
raise PermissionError('Trying to overwrite existed data.')
if confirm and not feedback:
return
mode = 'a' if append else 'w'
mode += 'b' if binary else ''
return open(str(self.path), mode=mode, **kwargs)
def __repr__(self):
return f"Data('{self.__data_name__}')"
def import_file(self, src_path: [str, Path], allow_overwrite=False, confirm=True, feedback=False):
if self.path.exists() and not allow_overwrite:
return
if confirm and not feedback:
return
shutil.copyfile(str(src_path), str(self.path))
def export_file(self, dst_path: [str, Path], allow_overwrite=False):
if Path(dst_path).exists() and not allow_overwrite:
return
shutil.copyfile(str(self.path), str(dst_path))
def __calc_hash__(self, h, buffer_size: int = 131072):
if not self.path.exists():
return None
with open(str(self.path), 'rb') as file_reader:
while True:
data = file_reader.read(buffer_size)
if not data:
break
h.update(data)
return h.hexdigest()
def md5(self, buffer_size: int = 131072, require_update: bool = False) -> [str, None]:
if not self.path.exists():
return None
last_modified_time = getmtime(str(self.path))
if require_update \
or 'md5' not in self.metadata \
or 'md5-timestamp' not in self.metadata \
or self.metadata['md5-timestamp'] < last_modified_time:
result = self.__calc_hash__(hashlib.md5(), buffer_size)
self.metadata['md5'] = result
self.metadata['md5-timestamp'] = datetime.now().timestamp()
return result
else:
return self.metadata['md5']
def sha1(self, buffer_size: int = 131072, require_update: bool = False) -> [str, None]:
if not self.path.exists():
return None
last_modified_time = getmtime(str(self.path))
if require_update \
or 'sha1' not in self.metadata \
or 'sha1-timestamp' not in self.metadata \
or self.metadata['sha1-timestamp'] < last_modified_time:
result = self.__calc_hash__(hashlib.sha1(), buffer_size)
self.metadata['sha1'] = result
self.metadata['sha1-timestamp'] = datetime.now().timestamp()
return result
else:
return self.metadata['sha1']
def sha256(self, buffer_size: int = 131072, require_update: bool = False) -> [str, None]:
if not self.path.exists():
return None
last_modified_time = getmtime(str(self.path))
if require_update \
or 'sha256' not in self.metadata \
or 'sha256-timestamp' not in self.metadata \
or self.metadata['sha256-timestamp'] < last_modified_time:
result = self.__calc_hash__(hashlib.sha256(), buffer_size)
self.metadata['sha256'] = result
self.metadata['sha256-timestamp'] = datetime.now().timestamp()
return result
else:
return self.metadata['sha256']
| 35.988889
| 102
| 0.599105
| 749
| 6,478
| 4.878505
| 0.142857
| 0.065681
| 0.045977
| 0.027915
| 0.552271
| 0.493706
| 0.437055
| 0.32266
| 0.289819
| 0.206076
| 0
| 0.014075
| 0.298086
| 6,478
| 179
| 103
| 36.189944
| 0.789532
| 0
| 0
| 0.350649
| 0
| 0
| 0.039673
| 0.004322
| 0
| 0
| 0
| 0
| 0
| 1
| 0.155844
| false
| 0
| 0.051948
| 0.058442
| 0.409091
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0adacd25859bed18399a4d523ba68cd8adb2bc90
| 39,932
|
py
|
Python
|
tensorflow/python/keras/optimizer_v2/optimizer_v2.py
|
PaulWang1905/tensorflow
|
ebf12d22b4801fb8dab5034cc94562bf7cc33fa0
|
[
"Apache-2.0"
] | 9
|
2019-12-29T01:47:37.000Z
|
2021-12-21T13:47:41.000Z
|
tensorflow/python/keras/optimizer_v2/optimizer_v2.py
|
PaulWang1905/tensorflow
|
ebf12d22b4801fb8dab5034cc94562bf7cc33fa0
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/keras/optimizer_v2/optimizer_v2.py
|
PaulWang1905/tensorflow
|
ebf12d22b4801fb8dab5034cc94562bf7cc33fa0
|
[
"Apache-2.0"
] | 1
|
2020-05-28T11:22:49.000Z
|
2020-05-28T11:22:49.000Z
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Version 2 of class Optimizer."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import functools
import six
from tensorflow.python.distribute import distribution_strategy_context as distribute_ctx
from tensorflow.python.distribute import reduce_util as ds_reduce_util
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras import backend
from tensorflow.python.keras import initializers
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.keras.optimizer_v2 import learning_rate_schedule
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import revived_types
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import keras_export
def _deduplicate_indexed_slices(values, indices):
"""Sums `values` associated with any non-unique `indices`.
Args:
values: A `Tensor` with rank >= 1.
indices: A one-dimensional integer `Tensor`, indexing into the first
dimension of `values` (as in an IndexedSlices object).
Returns:
A tuple of (`summed_values`, `unique_indices`) where `unique_indices` is a
de-duplicated version of `indices` and `summed_values` contains the sum of
`values` slices associated with each unique index.
"""
unique_indices, new_index_positions = array_ops.unique(indices)
summed_values = math_ops.unsorted_segment_sum(
values, new_index_positions,
array_ops.shape(unique_indices)[0])
return (summed_values, unique_indices)
@six.add_metaclass(abc.ABCMeta)
@keras_export("keras.optimizers.Optimizer")
class OptimizerV2(trackable.Trackable):
"""Updated base class for optimizers.
This class defines the API to add Ops to train a model. You never use this
class directly, but instead instantiate one of its subclasses such as
`tf.keras.optimizers.SGD`, `tf.keras.optimizers.Adam`.
### Usage
```python
# Create an optimizer with the desired parameters.
opt = tf.keras.optimizers.SGD(learning_rate=0.1)
# `loss` is a callable that takes no argument and returns the value
# to minimize.
loss = lambda: 3 * var1 * var1 + 2 * var2 * var2
# In graph mode, returns op that minimizes the loss by updating the listed
# variables.
opt_op = opt.minimize(loss, var_list=[var1, var2])
opt_op.run()
# In eager mode, simply call minimize to update the list of variables.
opt.minimize(loss, var_list=[var1, var2])
```
### Custom training loop with Keras models
In Keras models, sometimes variables are created when the model is first
called, instead of construction time. Examples include 1) sequential models
without input shape pre-defined, or 2) subclassed models. Pass var_list as
callable in these cases.
Example:
```python
opt = tf.keras.optimizers.SGD(learning_rate=0.1)
model = tf.keras.Sequential()
model.add(tf.keras.layers.Dense(num_hidden, activation='relu'))
model.add(tf.keras.layers.Dense(num_classes, activation='sigmoid')
loss_fn = lambda: tf.keras.losses.mse(model(input), output)
var_list_fn = lambda: model.trainable_weights
for input, output in data:
opt.minimize(loss_fn, var_list_fn)
```
### Processing gradients before applying them.
Calling `minimize()` takes care of both computing the gradients and
applying them to the variables. If you want to process the gradients
before applying them you can instead use the optimizer in three steps:
1. Compute the gradients with `tf.GradientTape`.
2. Process the gradients as you wish.
3. Apply the processed gradients with `apply_gradients()`.
Example:
```python
# Create an optimizer.
opt = tf.keras.optimizers.SGD(learning_rate=0.1)
# Compute the gradients for a list of variables.
with tf.GradientTape() as tape:
loss = <call_loss_function>
vars = <list_of_variables>
grads = tape.gradient(loss, vars)
processed_grads = [process_gradient(g) for g in grads]
grads_and_vars = zip(processed_grads, var_list)
# grads_and_vars is a list of tuples (gradient, variable). Do whatever you
# need to the 'gradient' part, for example cap them, etc.
capped_grads_and_vars = [(MyCapper(gv[0]), gv[1]) for gv in grads_and_vars]
# Ask the optimizer to apply the capped gradients.
opt.apply_gradients(capped_grads_and_vars)
```
### Use with `tf.distribute.Strategy`.
This optimizer class is `tf.distribute.Strategy` aware, which means it
automatically sums gradients across all replicas. To average gradients,
you divide your loss by the global batch size, which is done
automatically if you use `tf.keras` built-in training or evaluation loops.
See the `reduction` argument of your loss which should be set to
`tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE` for averaging or
`tf.keras.losses.Reduction.SUM` for not.
If you are not using these and you want to average gradients, you should use
`tf.math.reduce_sum` to add up your per-example losses and then divide by the
global batch size. Note that when using `tf.distribute.Strategy`, the first
component of a tensor's shape is the *replica-local* batch size, which is off
by a factor equal to the number of replicas being used to compute a single
step. As a result, using `tf.math.reduce_mean` will give the wrong answer,
resulting in gradients that can be many times too big.
### Variable Constraint
All Keras optimizers respect variable constraints. If constraint function is
passed to any variable, the constraint will be applied to the variable after
the gradient has been applied to the variable.
Important: If gradient is sparse tensor, variable constraint is not supported.
### Thread Compatibility
The entire optimizer is currently thread compatible, not thread-safe. The user
needs to perform synchronization if necessary.
### Slots
Many optimizer subclasses, such as `Adam` and `Adagrad` allocate and manage
additional variables associated with the variables to train. These are called
<i>Slots</i>. Slots have names and you can ask the optimizer for the names of
the slots that it uses. Once you have a slot name you can ask the optimizer
for the variable it created to hold the slot value.
This can be useful if you want to log debug a training algorithm, report stats
about the slots, etc.
### Hyper parameters
These are arguments passed to the optimizer subclass constructor
(the `__init__` method), and then passed to `self._set_hyper()`.
They can be either regular Python values (like 1.0), tensors, or
callables. If they are callable, the callable will be called during
`apply_gradients()` to get the value for the hyper parameter.
Hyper parameters can be overwritten through user code:
Example:
```python
# Create an optimizer with the desired parameters.
opt = tf.keras.optimizers.SGD(learning_rate=0.1)
# `loss` is a callable that takes no argument and returns the value
# to minimize.
loss = lambda: 3 * var1 + 2 * var2
# In eager mode, simply call minimize to update the list of variables.
opt.minimize(loss, var_list=[var1, var2])
# update learning rate
opt.learning_rate = 0.05
opt.minimize(loss, var_list=[var1, var2])
```
### Write a customized optimizer.
If you intend to create your own optimization algorithm, simply inherit from
this class and override the following methods:
- resource_apply_dense (update variable given gradient tensor is dense)
- resource_apply_sparse (update variable given gradient tensor is sparse)
- create_slots (if your optimizer algorithm requires additional variables)
- get_config (serialization of the optimizer, include all hyper parameters)
"""
def __init__(self, name, **kwargs):
"""Create a new Optimizer.
This must be called by the constructors of subclasses.
Note that Optimizer instances should not bind to a single graph,
and so shouldn't keep Tensors as member variables. Generally
you should be able to use the _set_hyper()/state.get_hyper()
facility instead.
This class in stateful and thread-compatible.
Args:
name: A non-empty string. The name to use for accumulators created
for the optimizer.
**kwargs: keyword arguments. Allowed to be {`clipnorm`, `clipvalue`, `lr`,
`decay`}. `clipnorm` is clip gradients by norm; `clipvalue` is clip
gradients by value, `decay` is included for backward compatibility to
allow time inverse decay of learning rate. `lr` is included for backward
compatibility, recommended to use `learning_rate` instead.
Raises:
ValueError: If name is malformed.
RuntimeError: If _create_slots has been overridden instead of
_create_vars.
"""
allowed_kwargs = {"clipnorm", "clipvalue", "lr", "decay"}
for k in kwargs:
if k not in allowed_kwargs:
raise TypeError("Unexpected keyword argument "
"passed to optimizer: " + str(k))
# checks that all keyword arguments are non-negative.
if kwargs[k] < 0:
raise ValueError("Expected {} >= 0, received: {}".format(k, kwargs[k]))
self._use_locking = True
self._name = name
self._hyper = {}
# dict: {variable name : {slot name : variable}}
self._slots = {}
self._slot_names = []
self._weights = []
self._iterations = None
# For implementing Trackable. Stores information about how to restore
# slot variables which have not yet been created
# (trackable._CheckpointPosition objects).
# {slot_name :
# {_var_key(variable_to_train): [checkpoint_position, ... ], ... },
# ... }
self._deferred_slot_restorations = {}
decay = kwargs.pop("decay", 0.0)
if decay < 0.:
raise ValueError("decay cannot be less than 0: {}".format(decay))
self._initial_decay = decay
if "clipnorm" in kwargs:
self.clipnorm = kwargs.pop("clipnorm")
if "clipvalue" in kwargs:
self.clipvalue = kwargs.pop("clipvalue")
self._hypers_created = False
def minimize(self, loss, var_list, grad_loss=None, name=None):
"""Minimize `loss` by updating `var_list`.
This method simply computes gradient using `tf.GradientTape` and calls
`apply_gradients()`. If you want to process the gradient before applying
then call `tf.GradientTape` and `apply_gradients()` explicitly instead
of using this function.
Args:
loss: A callable taking no arguments which returns the value to minimize.
var_list: list or tuple of `Variable` objects to update to minimize
`loss`, or a callable returning the list or tuple of `Variable` objects.
Use callable when the variable list would otherwise be incomplete before
`minimize` since the variables are created at the first time `loss` is
called.
grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`.
name: Optional name for the returned operation.
Returns:
An Operation that updates the variables in `var_list`. If `global_step`
was not `None`, that operation also increments `global_step`.
Raises:
ValueError: If some of the variables are not `Variable` objects.
"""
grads_and_vars = self._compute_gradients(
loss, var_list=var_list, grad_loss=grad_loss)
return self.apply_gradients(grads_and_vars, name=name)
def _compute_gradients(self, loss, var_list, grad_loss=None):
"""Compute gradients of `loss` for the variables in `var_list`.
This is the first part of `minimize()`. It returns a list
of (gradient, variable) pairs where "gradient" is the gradient
for "variable". Note that "gradient" can be a `Tensor`, an
`IndexedSlices`, or `None` if there is no gradient for the
given variable.
Args:
loss: A callable taking no arguments which returns the value to minimize.
var_list: list or tuple of `Variable` objects to update to minimize
`loss`, or a callable returning the list or tuple of `Variable` objects.
Use callable when the variable list would otherwise be incomplete before
`minimize` and the variables are created at the first time when `loss`
is called.
grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`.
Returns:
A list of (gradient, variable) pairs. Variable is always present, but
gradient can be `None`.
Raises:
TypeError: If `var_list` contains anything else than `Variable` objects.
ValueError: If some arguments are invalid, or var_list is None.
"""
# TODO(josh11b): Test that we handle weight decay in a reasonable way.
with backprop.GradientTape() as tape:
if not callable(var_list):
tape.watch(var_list)
loss_value = loss()
if callable(var_list):
var_list = var_list()
var_list = nest.flatten(var_list)
grads = tape.gradient(loss_value, var_list, grad_loss)
if hasattr(self, "clipnorm"):
grads = [clip_ops.clip_by_norm(g, self.clipnorm) for g in grads]
if hasattr(self, "clipvalue"):
grads = [
clip_ops.clip_by_value(g, -self.clipvalue, self.clipvalue)
for g in grads
]
grads_and_vars = list(zip(grads, var_list))
self._assert_valid_dtypes([
v for g, v in grads_and_vars
if g is not None and v.dtype != dtypes.resource
])
return grads_and_vars
def get_gradients(self, loss, params):
"""Returns gradients of `loss` with respect to `params`.
Arguments:
loss: Loss tensor.
params: List of variables.
Returns:
List of gradient tensors.
Raises:
ValueError: In case any gradient cannot be computed (e.g. if gradient
function not implemented).
"""
params = nest.flatten(params)
with backend.get_graph().as_default():
grads = gradients.gradients(loss, params)
for grad, param in zip(grads, params):
if grad is None:
raise ValueError("Variable {} has `None` for gradient. "
"Please make sure that all of your ops have a "
"gradient defined (i.e. are differentiable). "
"Common ops without gradient: "
"K.argmax, K.round, K.eval.".format(param))
if hasattr(self, "clipnorm"):
grads = [clip_ops.clip_by_norm(g, self.clipnorm) for g in grads]
if hasattr(self, "clipvalue"):
grads = [
clip_ops.clip_by_value(g, -self.clipvalue, self.clipvalue)
for g in grads
]
return grads
def apply_gradients(self, grads_and_vars, name=None):
"""Apply gradients to variables.
This is the second part of `minimize()`. It returns an `Operation` that
applies gradients.
Args:
grads_and_vars: List of (gradient, variable) pairs.
name: Optional name for the returned operation. Default to the name
passed to the `Optimizer` constructor.
Returns:
An `Operation` that applies the specified gradients. If `global_step`
was not None, that operation also increments `global_step`.
Raises:
TypeError: If `grads_and_vars` is malformed.
ValueError: If none of the variables have gradients.
"""
grads_and_vars = _filter_grads(grads_and_vars)
var_list = [v for (_, v) in grads_and_vars]
# Create iteration if necessary.
with ops.init_scope():
_ = self.iterations
self._create_hypers()
self._create_slots(var_list)
self._prepare(var_list)
return distribute_ctx.get_replica_context().merge_call(
self._distributed_apply, args=(grads_and_vars,), kwargs={"name": name})
def _distributed_apply(self, distribution, grads_and_vars, name):
"""`apply_gradients` using a `DistributionStrategy`."""
reduced_grads = distribution.extended.batch_reduce_to(
ds_reduce_util.ReduceOp.SUM, grads_and_vars)
var_list = [v for _, v in grads_and_vars]
grads_and_vars = zip(reduced_grads, var_list)
def apply_grad_to_update_var(var, grad):
"""Apply gradient to variable."""
if isinstance(var, ops.Tensor):
raise NotImplementedError("Trying to update a Tensor ", var)
if isinstance(grad, ops.IndexedSlices):
if var.constraint is not None:
raise RuntimeError(
"Cannot use a constraint function on a sparse variable.")
return self._resource_apply_sparse_duplicate_indices(
grad.values, var, grad.indices)
update_op = self._resource_apply_dense(grad, var)
if var.constraint is not None:
with ops.control_dependencies([update_op]):
return var.assign(var.constraint(var))
else:
return update_op
update_ops = []
with backend.name_scope(name or self._name):
for grad, var in grads_and_vars:
scope_name = ("" if ops.executing_eagerly_outside_functions() else
"_" + var.op.name)
with backend.name_scope("update" + scope_name):
update_ops.extend(
distribution.extended.update(
var, apply_grad_to_update_var, args=(grad,), group=False))
any_symbolic = any(isinstance(i, ops.Operation) or
tf_utils.is_symbolic_tensor(i) for i in update_ops)
if not context.executing_eagerly() or any_symbolic:
# If the current context is graph mode or any of the update ops are
# symbolic then the step update should be carried out under a graph
# context. (eager updates execute immediately)
with ops._get_graph_from_inputs(update_ops).as_default(): # pylint: disable=protected-access
with ops.control_dependencies(update_ops):
return self._iterations.assign_add(1).op
return self._iterations.assign_add(1)
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
grads_and_vars = list(zip(grads, params))
self._assert_valid_dtypes([
v for g, v in grads_and_vars
if g is not None and v.dtype != dtypes.resource
])
return [self.apply_gradients(grads_and_vars)]
def _set_hyper(self, name, value):
"""set hyper `name` to value. value can be callable, tensor, numeric."""
if isinstance(value, trackable.Trackable):
self._track_trackable(value, name, overwrite=True)
if name not in self._hyper:
self._hyper[name] = value
else:
prev_value = self._hyper[name]
if (callable(prev_value)
or isinstance(prev_value,
(ops.Tensor, int, float,
learning_rate_schedule.LearningRateSchedule))
or isinstance(value, learning_rate_schedule.LearningRateSchedule)):
self._hyper[name] = value
else:
backend.set_value(self._hyper[name], value)
def _get_hyper(self, name, dtype=None):
if not self._hypers_created:
self._create_hypers()
value = self._hyper[name]
if isinstance(value, learning_rate_schedule.LearningRateSchedule):
return value
if callable(value):
value = value()
if dtype:
return math_ops.cast(value, dtype)
else:
return value
def __getattribute__(self, name):
"""Overridden to support hyperparameter access."""
try:
return super(OptimizerV2, self).__getattribute__(name)
except AttributeError as e:
# Needed to avoid infinite recursion with __setattr__.
if name == "_hyper":
raise e
# Backwards compatibility with Keras optimizers.
if name == "lr":
name = "learning_rate"
if name in self._hyper:
return self._get_hyper(name)
raise e
def __setattr__(self, name, value):
"""Override setattr to support dynamic hyperparameter setting."""
# Backwards compatibility with Keras optimizers.
if name == "lr":
name = "learning_rate"
if hasattr(self, "_hyper") and name in self._hyper:
self._set_hyper(name, value)
else:
super(OptimizerV2, self).__setattr__(name, value)
def get_slot_names(self):
"""A list of names for this optimizer's slots."""
return self._slot_names
def add_slot(self, var, slot_name, initializer="zeros"):
"""Add a new slot variable for `var`."""
if slot_name not in self._slot_names:
self._slot_names.append(slot_name)
var_key = _var_key(var)
slot_dict = self._slots.setdefault(var_key, {})
weight = slot_dict.get(slot_name, None)
if weight is None:
if isinstance(initializer, six.string_types) or callable(initializer):
initializer = initializers.get(initializer)
initial_value = functools.partial(
initializer, shape=var.shape, dtype=var.dtype)
else:
initial_value = initializer
strategy = distribute_ctx.get_strategy()
with strategy.extended.colocate_vars_with(var):
weight = tf_variables.Variable(
name="%s/%s" % (var._shared_name, slot_name), # pylint: disable=protected-access
dtype=var.dtype,
trainable=False,
initial_value=initial_value)
backend.track_variable(weight)
slot_dict[slot_name] = weight
self._restore_slot_variable(
slot_name=slot_name, variable=var,
slot_variable=weight)
self._weights.append(weight)
return weight
def get_slot(self, var, slot_name):
var_key = _var_key(var)
slot_dict = self._slots[var_key]
return slot_dict[slot_name]
def _prepare(self, var_list):
pass
def _create_hypers(self):
if self._hypers_created:
return
# Iterate hyper values deterministically.
for name, value in sorted(self._hyper.items()):
if isinstance(value, ops.Tensor) or callable(value):
continue
else:
self._hyper[name] = self.add_weight(
name,
shape=[],
trainable=False,
initializer=value,
aggregation=tf_variables.VariableAggregation.ONLY_FIRST_REPLICA)
self._hypers_created = True
@property
def iterations(self):
"""Variable. The number of training steps this Optimizer has run."""
if self._iterations is None:
self._iterations = self.add_weight(
"iter",
shape=[],
dtype=dtypes.int64,
trainable=False,
aggregation=tf_variables.VariableAggregation.ONLY_FIRST_REPLICA)
self._weights.append(self._iterations)
return self._iterations
@iterations.setter
def iterations(self, variable):
if self._iterations is not None:
raise RuntimeError("Cannot set `iterations` to a new Variable after "
"the Optimizer weights have been created")
self._iterations = variable
self._weights.append(self._iterations)
def _decayed_lr(self, var_dtype):
"""Get decayed learning rate as a Tensor with dtype=var_dtype."""
lr_t = self._get_hyper("learning_rate", var_dtype)
if isinstance(lr_t, learning_rate_schedule.LearningRateSchedule):
local_step = math_ops.cast(self.iterations, var_dtype)
lr_t = math_ops.cast(lr_t(local_step), var_dtype)
if self._initial_decay > 0.:
local_step = math_ops.cast(self.iterations, var_dtype)
decay_t = self._get_hyper("decay", var_dtype)
lr_t = lr_t / (1. + decay_t * local_step)
return lr_t
@abc.abstractmethod
def get_config(self):
"""Returns the config of the optimimizer.
An optimizer config is a Python dictionary (serializable)
containing the configuration of an optimizer.
The same optimizer can be reinstantiated later
(without any saved state) from this configuration.
Returns:
Python dictionary.
"""
config = {"name": self._name}
if hasattr(self, "clipnorm"):
config["clipnorm"] = self.clipnorm
if hasattr(self, "clipvalue"):
config["clipvalue"] = self.clipvalue
return config
@classmethod
def from_config(cls, config, custom_objects=None):
"""Creates an optimizer from its config.
This method is the reverse of `get_config`,
capable of instantiating the same optimizer from the config
dictionary.
Arguments:
config: A Python dictionary, typically the output of get_config.
custom_objects: A Python dictionary mapping names to additional Python
objects used to create this optimizer, such as a function used for a
hyperparameter.
Returns:
An optimizer instance.
"""
if "lr" in config:
config["learning_rate"] = config.pop("lr")
if "learning_rate" in config:
if isinstance(config["learning_rate"], dict):
config["learning_rate"] = learning_rate_schedule.deserialize(
config["learning_rate"], custom_objects=custom_objects)
return cls(**config)
def _serialize_hyperparameter(self, hyperparameter_name):
"""Serialize a hyperparameter that can be a float, callable, or Tensor."""
value = self._hyper[hyperparameter_name]
if isinstance(value, learning_rate_schedule.LearningRateSchedule):
return learning_rate_schedule.serialize(value)
if callable(value):
return value()
if tensor_util.is_tensor(value):
return backend.get_value(value)
return value
def variables(self):
"""Returns variables of this Optimizer based on the order created."""
return self._weights
@property
def weights(self):
"""Returns variables of this Optimizer based on the order created."""
return self._weights
def get_weights(self):
params = self.weights
return backend.batch_get_value(params)
# TODO(tanzheny): Maybe share this logic with base_layer.
def set_weights(self, weights):
params = self.weights
if len(params) != len(weights):
raise ValueError(
"You called `set_weights(weights)` on optimizer " + self._name +
" with a weight list of length " + str(len(weights)) +
", but the optimizer was expecting " + str(len(params)) +
" weights. Provided weights: " + str(weights)[:50] + "...")
if not params:
return
weight_value_tuples = []
param_values = backend.batch_get_value(params)
for pv, p, w in zip(param_values, params, weights):
if pv.shape != w.shape:
raise ValueError("Optimizer weight shape " + str(pv.shape) +
" not compatible with "
"provided weight shape " + str(w.shape))
weight_value_tuples.append((p, w))
backend.batch_set_value(weight_value_tuples)
def add_weight(self,
name,
shape,
dtype=None,
initializer="zeros",
trainable=None,
synchronization=tf_variables.VariableSynchronization.AUTO,
aggregation=tf_variables.VariableAggregation.NONE):
if dtype is None:
dtype = dtypes.float32
if isinstance(initializer, six.string_types) or callable(initializer):
initializer = initializers.get(initializer)
if synchronization == tf_variables.VariableSynchronization.ON_READ:
if trainable:
raise ValueError(
"Synchronization value can be set to "
"VariableSynchronization.ON_READ only for non-trainable variables. "
"You have specified trainable=True and "
"synchronization=VariableSynchronization.ON_READ.")
else:
# Set trainable to be false when variable is to be synced on read.
trainable = False
elif trainable is None:
trainable = True
variable = self._add_variable_with_custom_getter(
name=name,
shape=shape,
getter=base_layer_utils.make_variable,
overwrite=True,
initializer=initializer,
dtype=dtype,
trainable=trainable,
use_resource=True,
synchronization=synchronization,
aggregation=aggregation)
backend.track_variable(variable)
return variable
def _assert_valid_dtypes(self, tensors):
"""Asserts tensors are all valid types (see `_valid_dtypes`).
Args:
tensors: Tensors to check.
Raises:
ValueError: If any tensor is not a valid type.
"""
valid_dtypes = self._valid_dtypes()
for t in tensors:
dtype = t.dtype.base_dtype
if dtype not in valid_dtypes:
raise ValueError("Invalid type %r for %s, expected: %s." %
(dtype, t.name, [v for v in valid_dtypes]))
def _valid_dtypes(self):
"""Valid types for loss, variables and gradients.
Subclasses should override to allow other float types.
Returns:
Valid types for loss, variables and gradients.
"""
return set(
[dtypes.float16, dtypes.bfloat16, dtypes.float32, dtypes.float64])
def _call_if_callable(self, param):
"""Call the function if param is callable."""
return param() if callable(param) else param
def _resource_apply_dense(self, grad, handle):
"""Add ops to apply dense gradients to the variable `handle`.
Args:
grad: a `Tensor` representing the gradient.
handle: a `Tensor` of dtype `resource` which points to the variable to be
updated.
Returns:
An `Operation` which updates the value of the variable.
"""
raise NotImplementedError()
def _resource_apply_sparse_duplicate_indices(self, grad, handle, indices):
"""Add ops to apply sparse gradients to `handle`, with repeated indices.
Optimizers which override this method must deal with repeated indices. See
the docstring of `_apply_sparse_duplicate_indices` for details. By default
the correct behavior, to sum non-unique indices and their associated
gradients, is enforced by first pre-processing `grad` and `indices` and
passing them on to `_resource_apply_sparse`. Optimizers which deal correctly
with duplicate indices may instead override this method to avoid the
overhead of summing.
Args:
grad: a `Tensor` representing the gradient for the affected indices.
handle: a `Tensor` of dtype `resource` which points to the variable to be
updated.
indices: a `Tensor` of integral type representing the indices for which
the gradient is nonzero. Indices may be repeated.
Returns:
An `Operation` which updates the value of the variable.
"""
summed_grad, unique_indices = _deduplicate_indexed_slices(
values=grad, indices=indices)
return self._resource_apply_sparse(summed_grad, handle, unique_indices)
def _resource_apply_sparse(self, grad, handle, indices):
"""Add ops to apply sparse gradients to the variable `handle`.
Similar to `_apply_sparse`, the `indices` argument to this method has been
de-duplicated. Optimizers which deal correctly with non-unique indices may
instead override `_resource_apply_sparse_duplicate_indices` to avoid this
overhead.
Args:
grad: a `Tensor` representing the gradient for the affected indices.
handle: a `Tensor` of dtype `resource` which points to the variable to be
updated.
indices: a `Tensor` of integral type representing the indices for which
the gradient is nonzero. Indices are unique.
Returns:
An `Operation` which updates the value of the variable.
"""
raise NotImplementedError()
def _resource_scatter_add(self, x, i, v):
with ops.control_dependencies(
[resource_variable_ops.resource_scatter_add(x.handle, i, v)]):
return x.value()
def _resource_scatter_update(self, x, i, v):
with ops.control_dependencies(
[resource_variable_ops.resource_scatter_update(x.handle, i, v)]):
return x.value()
# ---------------
# For implementing the trackable interface
# ---------------
def _restore_slot_variable(self, slot_name, variable, slot_variable):
"""Restore a newly created slot variable's value."""
variable_key = _var_key(variable)
deferred_restorations = self._deferred_slot_restorations.get(
slot_name, {}).pop(variable_key, [])
# Iterate over restores, highest restore UID first to minimize the number
# of assignments.
deferred_restorations.sort(key=lambda position: position.restore_uid,
reverse=True)
for checkpoint_position in deferred_restorations:
checkpoint_position.restore(slot_variable)
def _create_or_restore_slot_variable(
self, slot_variable_position, slot_name, variable):
"""Restore a slot variable's value, possibly creating it.
Called when a variable which has an associated slot variable is created or
restored. When executing eagerly, we create the slot variable with a
restoring initializer.
No new variables are created when graph building. Instead,
_restore_slot_variable catches these after normal creation and adds restore
ops to the graph. This method is nonetheless important when graph building
for the case when a slot variable has already been created but `variable`
has just been added to a dependency graph (causing us to realize that the
slot variable needs to be restored).
Args:
slot_variable_position: A `trackable._CheckpointPosition` object
indicating the slot variable `Trackable` object to be restored.
slot_name: The name of this `Optimizer`'s slot to restore into.
variable: The variable object this slot is being created for.
"""
variable_key = _var_key(variable)
slot_dict = self._slots.get(variable_key, {})
slot_variable = slot_dict.get(slot_name, None)
if (slot_variable is None and context.executing_eagerly() and
slot_variable_position.is_simple_variable()
# Defer slot variable creation if there is an active variable creator
# scope. Generally we'd like to eagerly create/restore slot variables
# when possible, but this may mean that scopes intended to catch
# `variable` also catch its eagerly created slot variable
# unintentionally (specifically make_template would add a dependency on
# a slot variable if not for this case). Deferring is mostly harmless
# (aside from double initialization), and makes variable creator scopes
# behave the same way they do when graph building.
and not ops.get_default_graph()._variable_creator_stack): # pylint: disable=protected-access
initializer = trackable.CheckpointInitialValue(
checkpoint_position=slot_variable_position)
slot_variable = self.add_slot(
var=variable,
initializer=initializer,
slot_name=slot_name)
# Slot variables are not owned by any one object (because we don't want to
# save the slot variable if the optimizer is saved without the non-slot
# variable, or if the non-slot variable is saved without the optimizer;
# it's a dependency hypergraph with edges of the form (optimizer, non-slot
# variable, variable)). So we don't _track_ slot variables anywhere, and
# instead special-case this dependency and otherwise pretend it's a normal
# graph.
if slot_variable is not None:
# If we've either made this slot variable, or if we've pulled out an
# existing slot variable, we should restore it.
slot_variable_position.restore(slot_variable)
else:
# We didn't make the slot variable. Defer restoring until it gets created
# normally. We keep a list rather than the one with the highest restore
# UID in case slot variables have their own dependencies, in which case
# those could differ between restores.
self._deferred_slot_restorations.setdefault(
slot_name, {}).setdefault(variable_key, []).append(
slot_variable_position)
def _filter_grads(grads_and_vars):
"""Filter out iterable with grad equal to None."""
grads_and_vars = tuple(grads_and_vars)
if not grads_and_vars:
return grads_and_vars
filtered = []
vars_with_empty_grads = []
for grad, var in grads_and_vars:
if grad is None:
vars_with_empty_grads.append(var)
else:
filtered.append((grad, var))
filtered = tuple(filtered)
if not filtered:
raise ValueError("No gradients provided for any variable: %s." %
([v.name for _, v in grads_and_vars],))
if vars_with_empty_grads:
logging.warning(
("Gradients does not exist for variables %s when minimizing the loss."),
([v.name for v in vars_with_empty_grads]))
return filtered
def _var_key(var):
"""Key for representing a primary variable, for looking up slots.
In graph mode the name is derived from the var shared name.
In eager mode the name is derived from the var unique id.
If distribution strategy exists, get the primary variable first.
Args:
var: the variable.
Returns:
the unique name of the variable.
"""
# pylint: disable=protected-access
# Get the distributed variable if it exists.
if getattr(var, "_distributed_container", None) is not None:
var = var._distributed_container()
if var._in_graph_mode:
return var._shared_name
return var._unique_id
def _get_slot_key_from_var(var, slot_name):
"""Get the slot key for the variable: var_name/slot_name."""
name = _var_key(var)
return name + "/" + slot_name
class _RestoredOptimizer(OptimizerV2):
"""A non-functional Optimizer implementation for checkpoint compatibility.
Holds slot variables and hyperparameters when an optimizer is restored from a
SavedModel. These variables may be referenced in functions along with ops
created by the original optimizer, but currently we do not support using the
optimizer object iself (e.g. through `apply_gradients`).
"""
# TODO(allenl): Make the restored optimizer functional by tracing its apply
# methods.
def __init__(self):
super(_RestoredOptimizer, self).__init__("_RestoredOptimizer")
self._hypers_created = True
def get_config(self):
# TODO(allenl): Save and restore the Optimizer's config
raise NotImplementedError(
"Restoring functional Optimzers from SavedModels is not currently "
"supported. Please file a feature request if this limitation bothers "
"you.")
revived_types.register_revived_type(
"optimizer",
lambda obj: isinstance(obj, OptimizerV2),
versions=[revived_types.VersionedTypeRegistration(
object_factory=lambda proto: _RestoredOptimizer(),
version=1,
min_producer_version=1,
min_consumer_version=1,
setter=_RestoredOptimizer._set_hyper # pylint: disable=protected-access
)])
| 38.806608
| 101
| 0.698162
| 5,320
| 39,932
| 5.088722
| 0.148496
| 0.015957
| 0.014184
| 0.004137
| 0.242797
| 0.187906
| 0.167886
| 0.140957
| 0.131575
| 0.120789
| 0
| 0.002698
| 0.220375
| 39,932
| 1,028
| 102
| 38.844358
| 0.866889
| 0.470525
| 0
| 0.209644
| 0
| 0
| 0.075533
| 0.007458
| 0
| 0
| 0
| 0.002918
| 0.006289
| 1
| 0.092243
| false
| 0.004193
| 0.060797
| 0
| 0.247379
| 0.002096
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0adb9e87674ba38043bf368fb738d4c5e8ba7c5c
| 362
|
py
|
Python
|
escola/teste_get.py
|
danielrosendos/djangoRestFramework
|
946bb95b8dd9976d1920302ce724572ffd9f98cf
|
[
"MIT"
] | 2
|
2020-07-26T15:17:23.000Z
|
2020-07-26T16:50:18.000Z
|
escola/teste_get.py
|
sport129/djangoRestFramework
|
946bb95b8dd9976d1920302ce724572ffd9f98cf
|
[
"MIT"
] | 3
|
2021-03-30T14:12:18.000Z
|
2021-06-04T23:44:47.000Z
|
escola/teste_get.py
|
sport129/djangoRestFramework
|
946bb95b8dd9976d1920302ce724572ffd9f98cf
|
[
"MIT"
] | null | null | null |
import requests
headers = {
'content-type': 'application/json',
'Authorization': 'Token 80ca9f249b80e7226cdc7fcaada8d7297352f0f9'
}
url_base_cursos = 'http://127.0.0.1:8000/api/v2/cursos'
url_base_avaliacoes = 'http://127.0.0.1:8000/api/v2/avaliacoes'
resultado = requests.get(url=url_base_cursos, headers=headers)
assert resultado.status_code == 200
| 27.846154
| 69
| 0.756906
| 48
| 362
| 5.5625
| 0.5625
| 0.078652
| 0.097378
| 0.067416
| 0.142322
| 0.142322
| 0.142322
| 0.142322
| 0
| 0
| 0
| 0.147239
| 0.099448
| 362
| 13
| 70
| 27.846154
| 0.671779
| 0
| 0
| 0
| 0
| 0
| 0.443526
| 0.110193
| 0
| 0
| 0
| 0
| 0.111111
| 1
| 0
| false
| 0
| 0.111111
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0adc55ed2f06787ab63a1224266a2dd707ce1b10
| 6,455
|
py
|
Python
|
python/avi/sdk/utils/waf_policy/vdi_waf_policy.py
|
aaronjwood/alb-sdk
|
ae4c47b2228651d3f5095e7c14f081aa4adbb732
|
[
"Apache-2.0"
] | null | null | null |
python/avi/sdk/utils/waf_policy/vdi_waf_policy.py
|
aaronjwood/alb-sdk
|
ae4c47b2228651d3f5095e7c14f081aa4adbb732
|
[
"Apache-2.0"
] | null | null | null |
python/avi/sdk/utils/waf_policy/vdi_waf_policy.py
|
aaronjwood/alb-sdk
|
ae4c47b2228651d3f5095e7c14f081aa4adbb732
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 VMware, Inc.
import argparse
import json
import re
import logging
import os
import sys
from avi.sdk.avi_api import ApiSession
API_VERSION = "18.2.13"
SYSTEM_WAF_POLICY_VDI='System-WAF-Policy-VDI'
logger = logging.getLogger(__name__)
def add_allowlist_rule(waf_policy_obj):
#add a allowlist rule to allow request with uri beginning with /ice/
allowlist_rule={
"index": 0,
"name": "allowlist-start-with-ice",
"description": "WAF will buffer the whole request body first and then release to backend. With VDI, client wants to stream data between client and server for some URLs like /ice/..., we should allow these URLs",
"actions": [
"WAF_POLICY_WHITELIST_ACTION_ALLOW"
],
"match": {
"path": {
"match_case": "SENSITIVE",
"match_str": [
"/ice/"
],
"match_criteria": "BEGINS_WITH"
}
}
}
index = 0
waf_policy_obj.setdefault("whitelist", {}).setdefault("rules", [])
for rule in waf_policy_obj["whitelist"]["rules"][:]:
if rule["name"] == "allowlist-start-with-ice":
waf_policy_obj["whitelist"]["rules"].remove(rule)
if rule["index"]>index:
index = rule["index"]
allowlist_rule["index"] = index+1
waf_policy_obj["whitelist"]["rules"].append(allowlist_rule)
def get_id_from_group(group):
pattern = re.compile("[^\d]*(?P<group_id>\d\d\d)")
match = pattern.match(group["name"])
assert match, "can not extract group id from group '{}'".format(group["name"])
groupid = int(match.group("group_id"))
assert groupid == 0 or 100 <= groupid <= 999, "group id for group '{}' not in expected range".format(group["name"])
return groupid
def disable_crs_response_rules(waf_policy_obj):
#disable response side rules and some specific rules
for crs_group in waf_policy_obj.get("crs_groups", []):
group_id = get_id_from_group(crs_group)
if group_id >= 950:
crs_group["enable"] = False
for rule in crs_group.get("rules", []):
if rule["rule_id"] == "920330" or rule["rule_id"] == "932105":
rule["enable"] = False
def add_pre_crs_group(waf_policy_obj):
#add a rule to parse body as xml for requests with /broker/xml uri
xml_rule = [
{
"index": 0,
"name": "enforce XML parsing for /broker/xml",
"description": "Clients often send the wrong Content-Type header. We ignore the header and enforce the request body to be parsed as XML in WAF",
"rule": "SecRule REQUEST_METHOD \"@streq POST\" \"phase:1,id:4099822,t:none,nolog,pass,chain\" \n SecRule REQUEST_URI \"@streq /broker/xml\" \"t:none,ctl:requestBodyProcessor=XML\""
}
]
pre_crs_group = {
"index": 0,
"name": "VDI_409_ENFORCE_XML",
"rules": xml_rule
}
index = 0
if "pre_crs_groups" not in waf_policy_obj:
waf_policy_obj["pre_crs_groups"] = list()
for pre_crs in waf_policy_obj["pre_crs_groups"]:
if pre_crs["name"] == "VDI_409_ENFORCE_XML":
pre_crs["rules"] = xml_rule
pre_crs["enable"] = True
return
if pre_crs["index"] > index:
index = pre_crs["index"]
pre_crs_group["index"] = index + 1
waf_policy_obj["pre_crs_groups"].append(pre_crs_group)
def get_crs(api):
tested_crs = "CRS-2021-1"
resp = api.get("wafcrs?name=" + tested_crs)
if resp.status_code not in range(200, 300):
if resp.status_code == 404:
logger.error("Controller does not have CRS %s, please install first." % tested_crs)
return None
logger.error('Error : %s', resp.text)
exit(0)
waf_crs = json.loads(resp.text)["results"]
return waf_crs[0]
def create_vdi_waf_policy(api, args):
waf_policy_obj = {
"name": SYSTEM_WAF_POLICY_VDI,
"mode": "WAF_MODE_DETECTION_ONLY",
"waf_profile_ref": "/api/wafprofile?name=System-WAF-Profile"
}
waf_crs = get_crs(api)
if waf_crs is None:
return
waf_policy_obj["waf_crs_ref"]="/api/wafcrs?name="+waf_crs["name"]
waf_policy_obj["crs_groups"] = list()
for group in waf_crs["groups"]:
waf_policy_obj["crs_groups"].append(group)
add_allowlist_rule(waf_policy_obj)
disable_crs_response_rules(waf_policy_obj)
add_pre_crs_group(waf_policy_obj)
resp = api.post('wafpolicy', data=json.dumps(waf_policy_obj))
if resp.status_code in range(200, 300):
logger.debug('Create WAF policy successfully')
else:
logger.error('Error : %s' % resp.text)
def update_waf_policy(api, args, waf_policy_obj):
add_allowlist_rule(waf_policy_obj)
disable_crs_response_rules(waf_policy_obj)
add_pre_crs_group(waf_policy_obj)
resp = api.put('wafpolicy/%s' %waf_policy_obj['uuid'], data=waf_policy_obj)
if resp.status_code in range(200, 300):
logger.debug('Create WAF policy successfully')
else:
logger.error('Error : %s' % resp.text)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-u', '--user', action="store", help='controller user',
default='admin')
parser.add_argument('-p', '--password', action="store", help='controller user password',
default='admin')
parser.add_argument('-t', '--tenant', action="store", help='tenant name',
default='admin')
parser.add_argument('-a', '--authtoken', help='Authentication token')
parser.add_argument('-c', '--controller_ip', action="store", help='controller ip')
args = parser.parse_args()
if args.password:
api = ApiSession.get_session(args.controller_ip, args.user, args.password,
tenant=args.tenant, api_version=API_VERSION)
elif args.authtoken:
api = ApiSession.get_session(args.controller_ip, args.user, tenant=args.tenant,
token=args.authtoken, api_version=API_VERSION)
else:
logging.error("Either password or authtokentoken must be provided.")
sys.exit(1)
waf_policy_obj = api.get_object_by_name('wafpolicy', SYSTEM_WAF_POLICY_VDI)
if not waf_policy_obj:
create_vdi_waf_policy(api, args)
else:
update_waf_policy(api, args, waf_policy_obj)
| 38.652695
| 219
| 0.632223
| 866
| 6,455
| 4.463049
| 0.243649
| 0.093144
| 0.090039
| 0.019405
| 0.311255
| 0.22044
| 0.170505
| 0.147477
| 0.129884
| 0.105563
| 0
| 0.0165
| 0.239504
| 6,455
| 166
| 220
| 38.885542
| 0.770829
| 0.032533
| 0
| 0.194444
| 0
| 0.013889
| 0.259295
| 0.030449
| 0
| 0
| 0
| 0
| 0.013889
| 1
| 0.048611
| false
| 0.034722
| 0.048611
| 0
| 0.131944
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0adcde8b96a5cb82b17bdf29ba072f1b54339883
| 4,101
|
py
|
Python
|
src/api/bkuser_core/tests/bkiam/test_constants.py
|
Chace-wang/bk-user
|
057f270d66a1834312306c9fba1f4e95521f10b1
|
[
"MIT"
] | null | null | null |
src/api/bkuser_core/tests/bkiam/test_constants.py
|
Chace-wang/bk-user
|
057f270d66a1834312306c9fba1f4e95521f10b1
|
[
"MIT"
] | null | null | null |
src/api/bkuser_core/tests/bkiam/test_constants.py
|
Chace-wang/bk-user
|
057f270d66a1834312306c9fba1f4e95521f10b1
|
[
"MIT"
] | 1
|
2021-12-31T06:48:41.000Z
|
2021-12-31T06:48:41.000Z
|
# -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-用户管理(Bk-User) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import pytest
from bkuser_core.bkiam.constants import ResourceType
from bkuser_core.categories.models import Department, ProfileCategory
from bkuser_core.tests.utils import make_simple_department
pytestmark = pytest.mark.django_db
class TestResourceTypeEnum:
@pytest.mark.parametrize(
"is_leaf, path, f, v",
[
(True, "/category,5/department,3440/department,3443/", "parent_id", 3443),
(False, "/category,5/department,3440/department,3443/", "id", 3443),
(True, "/category,5/", "category_id", 5),
(False, "/category,5/", "category_id", 5),
(True, "/department,3440/department,3443/", "parent_id", 3443),
(False, "/department,3440/department,3443/", "id", 3443),
],
)
def test_get_key_mapping(self, is_leaf, path, f, v):
key_mapping = ResourceType.get_key_mapping(ResourceType.DEPARTMENT)
path_method = key_mapping["department._bk_iam_path_"]
data = {"value": path}
if not is_leaf:
data["node_type"] = "non-leaf"
f, v = path_method(data)
assert f == f
assert v == v
@pytest.mark.parametrize(
"dep_chain, expected",
[
(
[1000, 1001, 1002],
{"_bk_iam_path_": "/category,1/department,1000/department,1001/department,1002/"},
),
(
[1000],
{"_bk_iam_path_": "/category,1/department,1000/"},
),
],
)
def test_get_attributes_mapping(self, dep_chain, expected):
target_parent = None
for d in dep_chain:
parent_id = target_parent if not target_parent else target_parent.pk
target_parent = make_simple_department(str(d), force_create_params={"id": d}, parent_id=parent_id)
attributes_mapping = ResourceType.get_attributes_mapping(target_parent)
assert attributes_mapping == expected
def test_get_attributes_mapping_other(self):
pc = ProfileCategory.objects.get_default()
attributes_mapping = ResourceType.get_attributes_mapping(pc)
assert attributes_mapping == {}
@pytest.mark.parametrize(
"dep_chain,expected",
[
(
["a", "b", "c"],
[
("category", "默认目录"),
("department", "a"),
("department", "b"),
("department", "c"),
],
),
(
["a", "b"],
[("category", "默认目录"), ("department", "a"), ("department", "b")],
),
],
)
def test_get_resource_nodes_dep(self, dep_chain, expected):
target_parent = None
for d in dep_chain:
parent_id = target_parent if not target_parent else target_parent.pk
target_parent = make_simple_department(d, parent_id=parent_id)
# 只添加 parent,mptt 树需要重建
Department.tree_objects.rebuild()
nodes = ResourceType.get_instance_resource_nodes(target_parent)
assert [(x["type"], x["name"]) for x in nodes] == expected
def test_get_resource_nodes_other(self):
pc = ProfileCategory.objects.get_default()
nodes = ResourceType.get_instance_resource_nodes(pc)
assert [(x["type"], x["name"]) for x in nodes] == [("category", "默认目录")]
| 39.432692
| 115
| 0.613997
| 477
| 4,101
| 5.075472
| 0.348008
| 0.05948
| 0.020653
| 0.046262
| 0.484924
| 0.403139
| 0.234614
| 0.172656
| 0.135481
| 0.113176
| 0
| 0.033077
| 0.270178
| 4,101
| 103
| 116
| 39.815534
| 0.77581
| 0.176298
| 0
| 0.2625
| 0
| 0
| 0.163847
| 0.078955
| 0
| 0
| 0
| 0
| 0.075
| 1
| 0.0625
| false
| 0
| 0.05
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0adf4b5bea842a306db59cff9711a1e6a19b7ae0
| 3,753
|
py
|
Python
|
improver_tests/precipitation_type/test_utilities.py
|
cpelley/improver
|
ebf77fe2adc85ed7aec74c26671872a2e4388ded
|
[
"BSD-3-Clause"
] | 77
|
2017-04-26T07:47:40.000Z
|
2022-03-31T09:40:49.000Z
|
improver_tests/precipitation_type/test_utilities.py
|
cpelley/improver
|
ebf77fe2adc85ed7aec74c26671872a2e4388ded
|
[
"BSD-3-Clause"
] | 1,440
|
2017-03-29T10:04:15.000Z
|
2022-03-28T10:11:29.000Z
|
improver_tests/precipitation_type/test_utilities.py
|
MoseleyS/improver
|
ca028e3a1c842e3ff00b188c8ea6eaedd0a07149
|
[
"BSD-3-Clause"
] | 72
|
2017-03-17T16:53:45.000Z
|
2022-02-16T09:41:37.000Z
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2021 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
""" Tests of precipitation_type utilities"""
import numpy as np
import pytest
from iris.exceptions import CoordinateNotFoundError
from improver.metadata.constants import FLOAT_DTYPE
from improver.precipitation_type.utilities import make_shower_condition_cube
from improver.synthetic_data.set_up_test_cubes import set_up_probability_cube
def set_up_test_cube(n_thresholds=1):
"""Set up a cube testing shower condition conversion"""
thresholds = np.arange(n_thresholds)
shape = [2, 2]
shape = [n_thresholds, *shape] if n_thresholds > 0 else shape
data = np.ones(shape, dtype=FLOAT_DTYPE)
cube = set_up_probability_cube(
data,
thresholds,
variable_name="texture_of_cloud_area_fraction",
threshold_units=1,
spatial_grid="equalarea",
)
return cube
def test_basic():
"""Test that with a valid input the cube is transformed into a shower
condition cube."""
cube = set_up_test_cube()
result = make_shower_condition_cube(cube)
threshold_coord = result.coord(var_name="threshold")
assert result.name() == "probability_of_shower_condition_above_threshold"
assert result.dtype == FLOAT_DTYPE
assert (result.data == cube.data).all()
assert threshold_coord.name() == "shower_condition"
assert threshold_coord.units == 1
def test_no_threshold_coord():
"""Test an exception is raised if the proxy diagnostic cube does not have
a threshold coordinate."""
cube = set_up_test_cube()
cube.remove_coord("texture_of_cloud_area_fraction")
expected = "Input has no threshold coordinate and cannot be used"
with pytest.raises(CoordinateNotFoundError, match=expected):
make_shower_condition_cube(cube)
def test_multi_valued_threshold_coord():
"""Test an exception is raised if the proxy diagnostic cube has a multi
valued threshold coordinate."""
cube = set_up_test_cube(n_thresholds=2)
expected = "Expected a single valued threshold coordinate.*"
with pytest.raises(ValueError, match=expected):
make_shower_condition_cube(cube)
| 39.925532
| 79
| 0.742073
| 507
| 3,753
| 5.34714
| 0.402367
| 0.044264
| 0.035042
| 0.033936
| 0.217263
| 0.164146
| 0.151236
| 0.095168
| 0.095168
| 0.095168
| 0
| 0.005146
| 0.171596
| 3,753
| 93
| 80
| 40.354839
| 0.866838
| 0.523848
| 0
| 0.102564
| 0
| 0
| 0.139616
| 0.062245
| 0
| 0
| 0
| 0
| 0.128205
| 1
| 0.102564
| false
| 0
| 0.153846
| 0
| 0.282051
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ae04a483b4283bc6fdc84bd651d77ab70b6120c
| 5,149
|
py
|
Python
|
app/api/v1/models/items.py
|
bryan-munene/Store-Manager-DB
|
40b24039189aea6854d7fcf33ccb648bb6642231
|
[
"MIT"
] | null | null | null |
app/api/v1/models/items.py
|
bryan-munene/Store-Manager-DB
|
40b24039189aea6854d7fcf33ccb648bb6642231
|
[
"MIT"
] | 4
|
2018-10-25T00:57:18.000Z
|
2018-10-25T21:29:09.000Z
|
app/api/v1/models/items.py
|
bryan-munene/Store-Manager-DB
|
40b24039189aea6854d7fcf33ccb648bb6642231
|
[
"MIT"
] | null | null | null |
from .db_conn import ModelSetup
class ItemsModel(ModelSetup):
'''Handles the data logic of the items section'''
def __init__(
self,
name=None,
price=None,
quantity=None,
category_id=None,
reorder_point=None,
auth=None):
'''Initializes the variables for the items class'''
self.name = name
self.price = price
self.quantity = quantity
self.category_id = category_id
self.reorder_point = reorder_point
self.auth = auth
def add_item(
self,
name,
price,
quantity,
image,
category_id,
reorder_point,
auth):
'''Adds item given the above arguements. Then returns the created item'''
model = ModelSetup()
self.conn = model.conn
self.cur = model.cur
query = """INSERT INTO items(name, price, quantity, image, category, reorder_point, created_by)\
VALUES(%s,%s,%s,%s,%s,%s,%s);"""
self.cur.execute(
query,
(name,
price,
quantity,
image,
category_id,
reorder_point,
auth))
self.conn.commit()
query_confirm = """SELECT * FROM items WHERE name = %s AND price = %s;"""
self.cur.execute(query_confirm, (name, price))
self.item = self.cur.fetchone()
return self.item
def get_all(self):
'''gets all records of items in the databas and returns them'''
model = ModelSetup()
self.conn = model.conn
self.cur = model.cur
query = """SELECT * FROM items;"""
self.cur.execute(query)
self.items = self.cur.fetchall()
return self.items
def get_by_id(self, item_id):
'''retrieves one item by finding them using their unique item_id'''
model = ModelSetup()
self.conn = model.conn
self.cur = model.cur
query = """SELECT * FROM items WHERE item_id = %s;"""
self.cur.execute(query, (item_id, ))
self.item = self.cur.fetchone()
return self.item
def get_by_category(self, category):
'''retrieves items by finding them using their category. all items in the same category are retrieved'''
model = ModelSetup()
self.conn = model.conn
self.cur = model.cur
query = """SELECT * FROM items WHERE category LIKE %s;"""
self.cur.execute(query, (category))
self.item = self.cur.fetchall()
return self.item
def get_by_name_and_price(self, name, price):
'''retrieves one item by finding them using their unique unique combination'''
model = ModelSetup()
self.conn = model.conn
self.cur = model.cur
query = """SELECT * FROM items WHERE name LIKE %s AND price = %s;"""
self.cur.execute(query, (name, price))
self.item = self.cur.fetchone()
return self.item
def update_item(
self,
item_id,
price,
quantity,
image,
category_id,
reorder_point,
auth):
'''updates item's details. the values in the db are changed to what is provided'''
model = ModelSetup()
self.conn = model.conn
self.cur = model.cur
query = """UPDATE items
SET price = %s, quantity = %s, image = %s, category = %s, reorder_point = %s, created_by = %s
WHERE item_id= %s
"""
self.cur.execute(
query,
(price,
quantity,
image,
category_id,
reorder_point,
auth,
item_id))
self.conn.commit()
query_confirm = """SELECT * FROM items WHERE item_id = %s;"""
self.cur.execute(query_confirm, (item_id, ))
self.item = self.cur.fetchone()
return self.item
def update_item_quantity(self, item_id, quantity):
'''updates item's quantity.adds the quantity added to the quantity available'''
model = ModelSetup()
self.conn = model.conn
self.cur = model.cur
query = """UPDATE items
SET quantity = %s
WHERE item_id= %s
"""
self.cur.execute(query, (quantity, item_id))
self.conn.commit()
query_confirm = """SELECT * FROM items WHERE item_id = %s;"""
self.cur.execute(query_confirm, (item_id, ))
self.item = self.cur.fetchone()
return self.item
def delete_item(self, item_id):
'''deletes an item by finding them using the item_id'''
model = ModelSetup()
self.conn = model.conn
self.cur = model.cur
query = """DELETE FROM items WHERE item_id = %s"""
self.cur.execute(query, (item_id, ))
self.conn.commit()
query_confirm = """SELECT * FROM items;"""
self.cur.execute(query_confirm)
self.items = self.cur.fetchall()
return self.items
| 31.206061
| 112
| 0.543212
| 595
| 5,149
| 4.594958
| 0.147899
| 0.07169
| 0.061448
| 0.083394
| 0.664228
| 0.618508
| 0.604974
| 0.58376
| 0.501829
| 0.393563
| 0
| 0
| 0.350554
| 5,149
| 164
| 113
| 31.396341
| 0.817584
| 0.126238
| 0
| 0.633588
| 0
| 0.007634
| 0.168615
| 0.00652
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068702
| false
| 0
| 0.007634
| 0
| 0.145038
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ae122f08d00736fbd1d09356f366ff9dcd6baf8
| 4,215
|
py
|
Python
|
site/src/sphinx/_extensions/api.py
|
linxGnu/armeria
|
7f4b10e66acc377dd16929157aeb417b729ce55a
|
[
"Apache-2.0"
] | null | null | null |
site/src/sphinx/_extensions/api.py
|
linxGnu/armeria
|
7f4b10e66acc377dd16929157aeb417b729ce55a
|
[
"Apache-2.0"
] | null | null | null |
site/src/sphinx/_extensions/api.py
|
linxGnu/armeria
|
7f4b10e66acc377dd16929157aeb417b729ce55a
|
[
"Apache-2.0"
] | null | null | null |
from docutils.parsers.rst.roles import register_canonical_role, set_classes
from docutils.parsers.rst import directives
from docutils import nodes
from sphinx.writers.html import HTMLTranslator
from sphinx.errors import ExtensionError
import os
import re
def api_role(role, rawtext, text, lineno, inliner, options={}, content=[]):
return api_role_internal(False, role, rawtext, text, lineno, inliner, options, content)
def apiplural_role(role, rawtext, text, lineno, inliner, options={}, content=[]):
return api_role_internal(True, role, rawtext, text, lineno, inliner, options, content)
def api_role_internal(plural, role, rawtext, text, lineno, inliner, options, content):
set_classes(options)
classes = ['code', 'api-reference']
if 'classes' in options:
classes.extend(options['classes'])
node = nodes.literal(rawtext, text, classes=classes, api_reference=True, is_plural=plural)
return [node], []
def api_visit_literal(self, node, next_visitor):
if 'api_reference' not in node.attributes:
return next_visitor(self, node)
env = self.builder.env
javadoc_dir = os.path.abspath(env.config['javadoc_dir'])
# Build the mappings from a simple class name to its Javadoc file.
if not hasattr(env, '__javadoc_cache__'):
env.__javadoc_mappings__ = javadoc_mappings = {}
for dirname, subdirs, files in os.walk(javadoc_dir):
for basename in files:
if re.match(r'^[^A-Z]', basename) or not basename.endswith('.html'):
# Ignore the non-class files. We rely on the simple assumption that
# a class name always starts with an upper-case English alphabet.
continue
simple_class_name = basename[:-5].replace('.', '$')
javadoc_mappings[simple_class_name] = os.path.relpath(dirname, javadoc_dir) \
.replace(os.sep, '/') + '/' + basename
else:
javadoc_mappings = env.__javadoc_mappings__
text = node.astext()
if text.startswith('@'):
text = text[1:]
is_annotation = True
else:
is_annotation = False
if text.find('.') != -1:
# FQCN or package name.
if re.fullmatch(r'^[^A-Z$]+$', text):
# Package
uri = text.replace('.', '/') + '/package-summary.html'
else:
# Class
uri = text.replace('.', '/').replace('$', '.') + '.html'
text = re.sub(r'^.*\.', '', text).replace('$', '.')
else:
# Simple class name; find from the pre-calculated mappings.
if text not in javadoc_mappings:
raise ExtensionError('Cannot find a class from Javadoc: ' + text)
uri = javadoc_mappings[text]
text = text.replace('$', '.')
# Prepend the frame index.html path.
uri = os.path.relpath(javadoc_dir, env.app.outdir).replace(os.sep, '/') + '/index.html?' + uri
# Prepend the '@' back again if necessary
if is_annotation:
text = '@' + text
# Emit the tags.
self.body.append(self.starttag(node, 'code', suffix='', CLASS='docutils literal javadoc'))
self.body.append(self.starttag(node, 'a', suffix='', CLASS='reference external javadoc', HREF=uri))
self.body.append(text + '</a>')
# Append a plural suffix.
if node.attributes['is_plural']:
self.body.append(self.starttag(node, 'span', suffix='', CLASS='plural-suffix'))
if re.fullmatch(r'^.*(ch|s|sh|x|z)$', text):
self.body.append('es')
else:
self.body.append('s')
self.body.append('</span>')
self.body.append('</code>')
raise nodes.SkipNode
def setup(app):
app.add_config_value('javadoc_dir', os.path.join(app.outdir, 'apidocs'), 'html')
# Register the 'javadoc' role.
api_role.options = {'class': directives.class_option}
register_canonical_role('api', api_role)
register_canonical_role('apiplural', apiplural_role)
# Intercept the rendering of HTML literals.
old_visitor = HTMLTranslator.visit_literal
HTMLTranslator.visit_literal = lambda self, node: api_visit_literal(self, node, old_visitor)
pass
| 37.633929
| 103
| 0.629656
| 517
| 4,215
| 5
| 0.29207
| 0.024758
| 0.043327
| 0.040619
| 0.155513
| 0.137718
| 0.102901
| 0.086654
| 0.051838
| 0.051838
| 0
| 0.000931
| 0.235113
| 4,215
| 111
| 104
| 37.972973
| 0.800868
| 0.112218
| 0
| 0.067568
| 0
| 0
| 0.093079
| 0.005633
| 0
| 0
| 0
| 0
| 0
| 1
| 0.067568
| false
| 0.013514
| 0.094595
| 0.027027
| 0.216216
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ae22c03054218a911ddc84125341497677c75ac
| 2,045
|
py
|
Python
|
ros_buildfarm/debian_repo.py
|
j-rivero/ros_buildfarm
|
840d2dc1dd5db00d5407da4644cd2bcbc5e0ac88
|
[
"Apache-2.0"
] | null | null | null |
ros_buildfarm/debian_repo.py
|
j-rivero/ros_buildfarm
|
840d2dc1dd5db00d5407da4644cd2bcbc5e0ac88
|
[
"Apache-2.0"
] | 1
|
2019-12-12T21:08:01.000Z
|
2019-12-12T21:08:01.000Z
|
ros_buildfarm/debian_repo.py
|
j-rivero/ros_buildfarm
|
840d2dc1dd5db00d5407da4644cd2bcbc5e0ac88
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2014 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from .common import PlatformPackageDescriptor
from .http_cache import fetch_and_cache_gzip
def get_debian_repo_index(debian_repository_baseurl, target, cache_dir):
url = os.path.join(
debian_repository_baseurl, 'dists', target.os_code_name, 'main')
if target.arch == 'source':
url = os.path.join(url, 'source', 'Sources.gz')
else:
url = os.path.join(url, 'binary-%s' % target.arch, 'Packages.gz')
cache_filename = fetch_and_cache_gzip(url, cache_dir)
logging.debug('Reading file: %s' % cache_filename)
# split package blocks
with open(cache_filename, 'rb') as f:
blocks = f.read().decode('utf8').split('\n\n')
blocks = [b.splitlines() for b in blocks if b]
# extract version number of every package
package_versions = {}
for lines in blocks:
prefix = 'Package: '
assert lines[0].startswith(prefix)
debian_pkg_name = lines[0][len(prefix):]
prefix = 'Version: '
versions = [l[len(prefix):] for l in lines if l.startswith(prefix)]
version = versions[0] if len(versions) == 1 else None
prefix = 'Source: '
source_names = [l[len(prefix):] for l in lines if l.startswith(prefix)]
source_name = source_names[0] if len(source_names) == 1 else None
package_versions[debian_pkg_name] = PlatformPackageDescriptor(version, source_name)
return package_versions
| 36.517857
| 91
| 0.695355
| 288
| 2,045
| 4.822917
| 0.444444
| 0.043197
| 0.019438
| 0.028078
| 0.080634
| 0.057595
| 0.057595
| 0.057595
| 0.057595
| 0.057595
| 0
| 0.009208
| 0.203423
| 2,045
| 55
| 92
| 37.181818
| 0.843462
| 0.310513
| 0
| 0
| 0
| 0
| 0.073888
| 0
| 0
| 0
| 0
| 0
| 0.034483
| 1
| 0.034483
| false
| 0
| 0.137931
| 0
| 0.206897
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ae2b8b9a2e89b056cf58f74862944546c4ef4a9
| 48,440
|
py
|
Python
|
Framwork-Backpropagation/utils/utils_v2.py
|
ConvolutedDog/Implicit-Im2col-for-Backpropagation
|
529a62f52903326b9289091b7d0abb45e6c7bb31
|
[
"Apache-2.0"
] | null | null | null |
Framwork-Backpropagation/utils/utils_v2.py
|
ConvolutedDog/Implicit-Im2col-for-Backpropagation
|
529a62f52903326b9289091b7d0abb45e6c7bb31
|
[
"Apache-2.0"
] | null | null | null |
Framwork-Backpropagation/utils/utils_v2.py
|
ConvolutedDog/Implicit-Im2col-for-Backpropagation
|
529a62f52903326b9289091b7d0abb45e6c7bb31
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2022 ConvolutedDog (https://github.com/ConvolutedDog/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/python3
import torch
import torch.nn as nn
import torch.nn.functional as F
from graphviz import Digraph, render
from torch.autograd import Variable
@torch.no_grad()
def cross_entropy_loss(y_predict, y_true):
print('\n=========================== Layer:'+' {0:18}'.format('cross_entropy_loss')+' Start ===========================')
print('# y_predict.shape: ', list(y_predict.shape))
print('# y_true.shape: ', list(y_true.shape))
y_shift = torch.sub(y_predict, torch.max(y_predict, dim=1, keepdim=True).values)
y_exp = torch.exp(y_shift)
y_probability = torch.div(y_exp, torch.sum(y_exp, dim=1, keepdim=True))
ypred_loss = torch.mean(-torch.sum(torch.mul(y_true, torch.log(y_probability)), dim=1, keepdim=True))
dLoss_dypred = y_probability - y_true
print('# dLoss_dypred.shape: ', list(dLoss_dypred.shape))
print('# Self calculated loss: ', ypred_loss.item())
print('=========================== Layer:'+' {0:18}'.format('cross_entropy_loss')+' End =============================')
return ypred_loss, dLoss_dypred
@torch.no_grad()
def fc_backward(dLoss_dnextz, z, w):
print('# next_dz.shape: ', list(dLoss_dnextz.shape))
print('# z.shape: ', list(z.shape))
print('# weight.shape: ', list(w.shape))
print('# bias.shape: ', '['+str(dLoss_dnextz.shape[1])+']')
N = z.shape[0]
if len(z.shape) == 4:
z = z.view(z.size(0), -1)
dLoss_dz = torch.matmul(dLoss_dnextz, w) #delta
dLoss_dfcW = torch.matmul(dLoss_dnextz.t(), z)
dLoss_dfcB = torch.sum(dLoss_dnextz, dim=0)
print('# dz.shape: ', list(dLoss_dz.shape))
print('# dweight.shape: ', list(dLoss_dfcW.shape))
print('# dbias.shape: ', list(dLoss_dfcB.shape))
return dLoss_dz, dLoss_dfcW/N, dLoss_dfcB/N
@torch.no_grad()
def view_backward(dLoss_dnextz, last_z, params):
print('# next_dz.shape: ', list(dLoss_dnextz.shape))
print('# last_z.shape: ', list(last_z.shape))
if params:
pooling = params[0]
stride = params[1]
padding = params[2]
output_size = (int((last_z.shape[2]-pooling[0]+2*padding[0])/stride[0]+1), \
int((last_z.shape[3]-pooling[0]+2*padding[0])/stride[0]+1))
dLoss_dz = dLoss_dnextz.reshape(last_z.shape[0], last_z.shape[1], output_size[0], output_size[1])
else:
dLoss_dz = dLoss_dnextz.reshape(last_z.shape)
print('# dz.shape: ', list(dLoss_dz.shape))
return dLoss_dz
def add_backward(dLoss_dnextz):
print('# next_dz.shape: ', list(dLoss_dnextz.shape))
dLoss_dz = dLoss_dnextz
print('# dz.shape: ', list(dLoss_dz.shape))
return dLoss_dz
@torch.no_grad()
def relu_backward(next_dz, z):
print('# next_dz.shape: ', list(next_dz.shape))
print('# z.shape: ', list(z.shape))
zeros_tensor = torch.zeros_like(next_dz)
dLoss_dz = torch.where(torch.gt(z, 0), next_dz, zeros_tensor)
print('# dz.shape: ', list(dLoss_dz.shape))
return dLoss_dz
@torch.no_grad()
def dropback_backward(next_dz, mask, p):
print('# zeros probability: ', p)
print('# next_dz.shape: ', list(next_dz.shape))
print('# mask.shape: ', list(mask.shape))
zeros_tensor = torch.zeros_like(mask)
dLoss_dz = torch.mul(torch.where(torch.eq(mask, 1.), next_dz, zeros_tensor), 1./(1.-p))
print('# dz.shape: ', list(dLoss_dz.shape))
return dLoss_dz
@torch.no_grad()
def max_pooling_backward(next_dz, z, pooling, strides, padding=(0, 0)):
print('# next_dz.shape: ', list(next_dz.shape))
print('# z.shape: ', list(z.shape))
print('# padding: ', padding)
print('# strides: ', strides)
N, C, H, W = z.shape
_, _, out_h, out_w = next_dz.shape
padding_z = F.pad(z, pad=(padding[1],padding[1],padding[0],\
padding[0],0,0), mode='constant', value=0)
padding_dz = torch.zeros_like(padding_z)
for n in torch.arange(N):
for c in torch.arange(C):
for i in torch.arange(out_h):
for j in torch.arange(out_w):
flat_idx = torch.argmax(padding_z[n, c,
strides[0] * i:strides[0] * i + pooling[0],
strides[1] * j:strides[1] * j + pooling[1]])
h_idx = strides[0] * i + flat_idx // pooling[1]
w_idx = strides[1] * j + flat_idx % pooling[1]
padding_dz[n, c, h_idx, w_idx] += next_dz[n, c, i, j]
dz = _remove_padding(padding_dz, padding) # padding_z[:, :, padding[0]:-padding[0], padding[1]:-padding[1]]
print('# dz.shape: ', list(dz.shape))
return dz
@torch.no_grad()
def batchnorm2d_backward(next_dz, z, eps, gamma=torch.Tensor([1.,1.,1.])):
print('# next_dz.shape: ', list(next_dz.shape))
print('# z.shape: ', list(z.shape))
print('# eps: ', eps)
print('# gamma.shape: ', list(gamma.shape))
N, C, H, W = z.shape
m = N*H*W
shape = [N,C,H,W]
import numpy as np
ax = list(np.arange(len(shape)))
shape.pop(1)
ax.pop(1)
axis = tuple(ax)
dxhut = torch.zeros_like(next_dz)
for c in range(C):
dxhut[:,c] = next_dz[:,c]*gamma[c]
dz1 = m*dxhut
mu = z.mean(axis=axis, keepdim=True)
xmu = z - mu
xmu2 = xmu**2
var = xmu2.sum(axis=axis, keepdim=True)/m
ivar = 1./torch.pow(var+eps, 0.5)
dz2 = (ivar**2)*((dxhut*xmu).sum(axis=axis, keepdim=True))*xmu
dz3 = dxhut.sum(axis=axis, keepdim=True)
dz = ivar/m*(dz1-dz2-dz3)
print('# dz.shape: ', list(dz.shape))
return dz
@torch.no_grad()
def average_pooling_backward(next_dz, z, pooling, strides, padding=(0, 0)):
print('# next_dz.shape: ', list(next_dz.shape))
print('# z.shape: ', list(z.shape))
print('# padding: ', padding)
print('# strides: ', strides)
N, C, H, W = z.shape
_, _, out_h, out_w = next_dz.shape
padding_z = F.pad(z, pad=(padding[1],padding[1],padding[0],\
padding[0],0,0), mode='constant', value=0)
padding_dz = torch.zeros_like(padding_z)
for n in torch.arange(N):
for c in torch.arange(C):
for i in torch.arange(out_h):
for j in torch.arange(out_w):
padding_dz[n, c,
strides[0] * i:strides[0] * i + pooling[0],
strides[1] * j:strides[1] * j + pooling[1]] += next_dz[n, c, i, j] / (pooling[0] * pooling[1])
dz = _remove_padding(padding_dz, padding) # padding_z[:, :, padding[0]:-padding[0], padding[1]:-padding[1]]
print('# dz.shape: ', list(dz.shape))
return dz
@torch.no_grad()
def _remove_padding(z, padding):
if padding[0] > 0 and padding[1] > 0:
return z[:, :, padding[0]:-padding[0], padding[1]:-padding[1]]
elif padding[0] > 0:
return z[:, :, padding[0]:-padding[0], :]
elif padding[1] > 0:
return z[:, :, :, padding[1]:-padding[1]]
else:
return z
@torch.no_grad()
def conv_backward(next_dz, K, z, padding=(0, 0), strides=(1, 1)):
N, C, H, W = z.shape
D, C, k1, k2 = K.shape
N, D, H1, W1 = next_dz.shape
print('# next_dz.shape: ', list(next_dz.shape))
print('# z.shape: ', list(z.shape))
print('# weight.shape: ', list(K.shape))
print('# bias.shape: ', '['+str(K.shape[0])+']')
print('# padding: ', padding)
print('# strides: ', strides)
padding_next_dz = _insert_zeros(next_dz, strides)
flip_K = torch.flip(K, (2, 3))
swap_flip_K = torch.swapaxes(flip_K, 0, 1)
ppadding_next_dz = F.pad(padding_next_dz, pad=(k2-1-padding[1],k2-1-padding[1],\
k1-1-padding[0],k1-1-padding[0],0,0), mode='constant', value=0)
dz = _conv_forward(ppadding_next_dz, swap_flip_K)
swap_z = torch.swapaxes(z, 0, 1)
dK = _conv_forward(torch.swapaxes(F.pad(z, pad=(padding[1],padding[1],\
padding[0],padding[0],0,0), mode='constant', value=0), 0, 1), torch.swapaxes(padding_next_dz, 0, 1))
db = torch.sum(torch.sum(torch.sum(next_dz, axis=-1), axis=-1), axis=0) # 在高度、宽度上相加;批量大小上相加
print('# dz.shape: ', list(dz.shape))
print('# dweight.shape: ', list(dK.transpose(0,1).shape))
print('# dbias.shape: ', list(db.shape))
return dz, (dK/N).transpose(0,1), db/N
@torch.no_grad()
def _conv_forward(x, weight, strides=(1,1)):
n, c, h_in, w_in = x.shape
d, c, k, j = weight.shape
x_pad = x
x_pad = x_pad.unfold(2, k, strides[0])
x_pad = x_pad.unfold(3, j, strides[1])
out = torch.einsum(
'nchwkj,dckj->ndhw',
x_pad, weight)
return out
@torch.no_grad()
def _insert_zeros(dz, strides):
N, D, H, W = dz.shape
H_last = (H-1)*(strides[0]-1) + H
W_last = (W-1)*(strides[1]-1) + W
pz = torch.zeros(N, D, H_last, W_last)
for n in range(N):
for d in range(D):
for h in range(0, H_last, strides[0]):
for w in range(0, W_last, strides[1]):
pz[n,d,h,w] = dz[n,d,h//strides[0],w//strides[1]]
return pz
@torch.no_grad()
def judge_tensors_equal(tensor_A, tensor_B):
if(not tensor_A.shape == tensor_B.shape):
print('Shape of two compard tensors is not equal.')
return None
error = 0
error_tolerance = 0.001
np_A = tensor_A.detach().numpy()
np_B = tensor_B.detach().numpy()
if len(tensor_A.shape) == 4:
N, C, H, W = tensor_A.shape
for n in range(N):
for c in range(C):
for h in range(H):
for w in range(W):
if np_A[n,c,h,w]-np_B[n,c,h,w] > error_tolerance or np_B[n,c,h,w]-np_A[n,c,h,w] > error_tolerance:
error += 1
if error%20 == 0:
pass
print('error', np_A[n,c,h,w], np_B[n,c,h,w])
else:
if n*c*h*w % 20000000000000 == 0:
pass
#print('right', np_A[n,c,h,w], np_B[n,c,h,w])
#print('Error rate: ', error/(N*C*H*W))
print('4D-error-rate: ', end=' ')
return error/(N*C*H*W)
elif len(tensor_A.shape) == 1:
C = tensor_A.shape[0]
for c in range(C):
if np_A[c]-np_B[c] > error_tolerance or np_B[c]-np_A[c] > error_tolerance:
#print(np_A[c], np_B[c])
error += 1
#print('Error rate: ', error/C)
print('1D-error-rate: ', end=' ')
return error/C
elif len(tensor_A.shape) == 2:
N, C = tensor_A.shape
for n in range(N):
for c in range(C):
if np_A[n,c]-np_B[n,c] > error_tolerance or np_B[n,c]-np_A[n,c] > error_tolerance:
#print(np_A[n,c], np_B[n,c])
error += 1
#print('Error rate: ', error/(C*N))
print('2D-error-rate: ', end=' ')
return error/(C*N)
@torch.no_grad()
def get_featuremap(featuremap_dir=None):
import os
featuremap = []
if featuremap_dir == None:
pth_dir = "./tmp_file/"
else:
pth_dir = featuremap_dir
files = os.listdir(pth_dir)
file_nums = []
for i in range(len(files)):
if '.pth' in files[i]:
file_nums.append(int(files[i].split('.pth')[0]))
file_nums.sort()
for file_num in file_nums:
tensor = torch.load(pth_dir+str(file_num)+'.pth')
featuremap.append(tensor)
delete_allpths(pth_dir=None)
return featuremap
@torch.no_grad()
def get_structure_parameters_v1(model):
layers = []
for layer in model.modules():
if not ':' in str(layer):
layers.append(layer)
parameters = []
fc_conv_weights = []
for layer in layers:
if isinstance(layer, nn.Conv2d):
layer_name = 'Conv2d'
Conv2d_params = {}
Conv2d_params['layer_name'] = layer_name
# in_channel
in_channel = layer.__dict__.get('in_channels')
Conv2d_params['in_channel'] = in_channel
# out_channel
out_channel = layer.__dict__.get('out_channels')
Conv2d_params['out_channel'] = out_channel
# kernel_size
kernel_size = layer.__dict__.get('kernel_size')
if not isinstance(kernel_size, tuple):
Conv2d_params['kernel_size'] = (kernel_size, kernel_size)
else:
Conv2d_params['kernel_size'] = kernel_size
# stride
stride = layer.__dict__.get('stride')
if not isinstance(stride, tuple):
Conv2d_params['stride'] = (stride, stride)
else:
Conv2d_params['stride'] = stride
# padding
padding = layer.__dict__.get('padding')
if not isinstance(padding, tuple):
Conv2d_params['padding'] = (padding, padding)
else:
Conv2d_params['padding'] = padding
# return
fc_conv_weights.append(layer.weight)
parameters.append(Conv2d_params)
elif isinstance(layer, nn.ReLU):
layer_name = 'ReLU'
parameters.append({'layer_name': layer_name})
elif isinstance(layer, nn.MaxPool2d):
layer_name = 'MaxPool2d'
MaxPool2d_params = {}
MaxPool2d_params['layer_name'] = layer_name
# kernel_size
kernel_size = layer.__dict__.get('kernel_size')
if not isinstance(kernel_size, tuple):
MaxPool2d_params['kernel_size'] = (kernel_size, kernel_size)
else:
MaxPool2d_params['kernel_size'] = kernel_size
# stride
stride = layer.__dict__.get('stride')
if not isinstance(stride, tuple):
MaxPool2d_params['stride'] = (stride, stride)
else:
MaxPool2d_params['stride'] = stride
# padding
padding = layer.__dict__.get('padding')
if not isinstance(padding, tuple):
MaxPool2d_params['padding'] = (padding, padding)
else:
MaxPool2d_params['padding'] = padding
# return
parameters.append(MaxPool2d_params)
elif isinstance(layer, nn.AvgPool2d):
layer_name = 'AvgPool2d'
AvgPool2d_params = {}
AvgPool2d_params['layer_name'] = layer_name
# kernel_size
kernel_size = layer.__dict__.get('kernel_size')
if not isinstance(kernel_size, tuple):
AvgPool2d_params['kernel_size'] = (kernel_size, kernel_size)
else:
AvgPool2d_params['kernel_size'] = kernel_size
# stride
stride = layer.__dict__.get('stride')
if not isinstance(stride, tuple):
AvgPool2d_params['stride'] = (stride, stride)
else:
AvgPool2d_params['stride'] = stride
# padding
padding = layer.__dict__.get('padding')
if not isinstance(padding, tuple):
AvgPool2d_params['padding'] = (padding, padding)
else:
AvgPool2d_params['padding'] = padding
# return
parameters.append(AvgPool2d_params)
elif isinstance(layer, nn.Dropout):
layer_name = 'Dropout'
Dropout_params = {}
Dropout_params['layer_name'] = layer_name
# p
p = layer.__dict__.get('p')
Dropout_params['p'] = p
# return
parameters.append(Dropout_params)
elif isinstance(layer, nn.BatchNorm2d):
layer_name = 'BatchNorm2d'
BatchNorm2d_params = {}
BatchNorm2d_params['layer_name'] = layer_name
# num_features
num_features = layer.__dict__.get('num_features')
BatchNorm2d_params['num_features'] = num_features
# eps
eps = layer.__dict__.get('eps')
BatchNorm2d_params['eps'] = eps
# return
fc_conv_weights.append(layer.weight)
parameters.append(BatchNorm2d_params)
elif isinstance(layer, nn.Linear):
layer_name = 'Linear'
Linear_params = {}
Linear_params['layer_name'] = layer_name
# in_features
in_features = layer.__dict__.get('in_features')
Linear_params['in_features'] = in_features
# out_features
out_features = layer.__dict__.get('out_features')
Linear_params['out_features'] = out_features
# return
fc_conv_weights.append(layer.weight)
parameters.append(Linear_params)
elif isinstance(layer, nn.AdaptiveAvgPool2d):
layer_name = 'AdaptiveAvgPool2d'
AdaptiveAvgPool2d_params = {}
AdaptiveAvgPool2d_params['layer_name'] = layer_name
# output_size
output_size = layer.__dict__.get('output_size')
if not isinstance(output_size, tuple):
AdaptiveAvgPool2d_params['output_size'] = (output_size, output_size)
else:
AdaptiveAvgPool2d_params['output_size'] = output_size
# return
parameters.append(AdaptiveAvgPool2d_params)
else:
print('The layer has not been processed in get_structure_parameters_v1!')
return parameters, fc_conv_weights
@torch.no_grad()
def delete_allpths(pth_dir=None):
import os
if pth_dir == None:
pth_dir = "./tmp_file/"
for root, dirs, files in os.walk(pth_dir, topdown=False):
for name in files:
if name.endswith('.pth',):
os.remove(os.path.join(root, name))
@torch.no_grad()
def mul_items(tensor_size):
x = list(tensor_size)
mul = 1.
for i in range(len(x)):
mul *= x[i]
return mul
@torch.no_grad()
def gradient_backward_v1(model, img, label, num_class=1000):
return_dz = []
parameters, fc_conv_weights = get_structure_parameters_v1(model)
featuremap = get_featuremap(featuremap_dir=None)
featuremap.insert(0, img) ###
y_true = F.one_hot(label, num_classes=num_class).float()
loss, dLoss_dz = cross_entropy_loss(featuremap[-1], y_true)
print('Self calculated loss: ', loss)
featuremap.pop()
return_dz.append(dLoss_dz)
dW_dB_fc_conv = []
for i in range(len(parameters)-1, -1, -1):
layer = parameters[i]
print('\n======================== {0:3} Layer: '.format(str(i))+'{0:9}'.format(layer['layer_name'])+' Backward Start ========================')
if layer['layer_name'] == 'Conv2d':
z = featuremap[-1]
weight_z = fc_conv_weights[-1]
try:
padding = layer['padding']
except:
padding = (0, 0)
stride = layer['stride']
dLoss_dz, dLoss_dW, dLoss_dB = conv_backward(dLoss_dz, weight_z, z, padding, stride)
return_dz.append(dLoss_dz)
fc_conv_weights.pop()
if not len(featuremap) == 1:
lastpop = featuremap.pop()
if not len(dLoss_dz.shape) == len(lastpop.shape):
dLoss_dz = dLoss_dz.reshape(lastpop.shape)
elif layer['layer_name'] == 'ReLU':
z = featuremap[-1]
dLoss_dz = relu_backward(dLoss_dz, z)
return_dz.append(dLoss_dz)
lastpop = featuremap.pop()
if not len(dLoss_dz.shape) == len(lastpop.shape):
dLoss_dz = dLoss_dz.reshape(lastpop.shape)
elif layer['layer_name'] == 'MaxPool2d':
z = featuremap[-1]
pooling = layer['kernel_size']
stride = layer['stride']
padding = layer['padding']
dLoss_dz = max_pooling_backward(dLoss_dz, z, pooling, stride, padding)
return_dz.append(dLoss_dz)
lastpop = featuremap.pop()
if not len(dLoss_dz.shape) == len(lastpop.shape):
dLoss_dz = dLoss_dz.reshape(lastpop.shape)
elif layer['layer_name'] == 'AvgPool2d':
z = featuremap[-1]
pooling = layer['kernel_size']
stride = layer['stride']
padding = layer['padding']
dLoss_dz = average_pooling_backward(dLoss_dz, z, pooling, stride, padding)
return_dz.append(dLoss_dz)
lastpop = featuremap.pop()
if not len(dLoss_dz.shape) == len(lastpop.shape):
dLoss_dz = dLoss_dz.reshape(lastpop.shape)
elif layer['layer_name'] == 'Linear':
weight_z = fc_conv_weights[-1]
z = featuremap[-1]
dLoss_dz, dLoss_dW, dLoss_dB = fc_backward(dLoss_dz, z, weight_z)
return_dz.append(dLoss_dz)
fc_conv_weights.pop()
lastpop = featuremap.pop()
if not len(dLoss_dz.shape) == len(lastpop.shape):
dLoss_dz = dLoss_dz.reshape(lastpop.shape)
elif layer['layer_name'] == 'Dropout':
p = layer['p']
mask = featuremap[-1]
dLoss_dz = dropback_backward(dLoss_dz, mask, p)
return_dz.append(dLoss_dz)
featuremap.pop()
lastpop = featuremap.pop()
if not len(dLoss_dz.shape) == len(lastpop.shape):
dLoss_dz = dLoss_dz.reshape(lastpop.shape)
elif layer['layer_name'] == 'BatchNorm2d':
eps = layer['eps']
z = featuremap[-1]
gamma = fc_conv_weights[-1]
dLoss_dz = batchnorm2d_backward(dLoss_dz, z, eps, gamma)
return_dz.append(dLoss_dz)
fc_conv_weights.pop()
lastpop = featuremap.pop()
if not len(dLoss_dz.shape) == len(lastpop.shape):
dLoss_dz = dLoss_dz.reshape(lastpop.shape)
else:
print('Not completed in gradient_backward_v1!')
print('======================== {0:3} Layer: '.format(str(i))+'{0:9}'.format(layer['layer_name'])+' Backward End ==========================')
delete_allpths(pth_dir=None)
return return_dz, dLoss_dW, dLoss_dB
@torch.no_grad()
def make_dot(var, params=None):
""" Produces Graphviz representation of PyTorch autograd graph
Blue nodes are the Variables that require grad, orange are Tensors
saved for backward in torch.autograd.Function
Args:
var: output Variable
params: dict of (name, Variable) to add names to node that
require grad (TODO: make optional)
"""
if params is not None:
assert isinstance(params.values()[0], Variable)
param_map = {id(v): k for k, v in params.items()}
node_attr = dict(style='filled',
shape='box',
align='left',
fontsize='12',
ranksep='0.1',
height='0.2')
dot = Digraph(node_attr=node_attr, graph_attr=dict(size="12,12"))
seen = set()
def size_to_str(size):
return '('+(', ').join(['%d' % v for v in size])+')'
def add_nodes(var):
if var not in seen:
if torch.is_tensor(var):
dot.node(str(id(var)), size_to_str(var.size()), fillcolor='orange')
elif hasattr(var, 'variable'):
u = var.variable
name = param_map[id(u)] if params is not None else ''
node_name = '%s\n %s' % (name, size_to_str(u.size()))
dot.node(str(id(var)), node_name, fillcolor='lightblue')
else:
dot.node(str(id(var)), str(type(var).__name__))
seen.add(var)
if hasattr(var, 'next_functions'):
for u in var.next_functions:
if u[0] is not None:
dot.edge(str(id(u[0])), str(id(var)))
add_nodes(u[0])
if hasattr(var, 'saved_tensors'):
for t in var.saved_tensors:
dot.edge(str(id(t)), str(id(var)))
add_nodes(t)
print(var)
add_nodes(var.grad_fn)
return dot
def generate_g(model, x):
delete_allpths(pth_dir=None)
print('\n=========================== Store network model Results Start =========================')
y = model(x)
print('=========================== Store network model Results End ===========================\n')
if 'GoogLeNet' in str(model).split('\n')[0]:
g = make_dot(y[0])
return g
else:
g = make_dot(y)
return g
@torch.no_grad()
def exchange_name(name):
if 'Relu' in name:
return 'ReLU'
elif 'AddmmBackward' in name:
return 'Linear'
elif 'ViewBackward' in name:
return 'View'
elif 'Mean' in name or 'Avg' in name:
return 'AvgPool2d'
elif 'BatchNorm' in name:
return 'BatchNorm2d'
elif 'Conv' in name:
return 'Conv2d'
elif 'MaxPool' in name:
return 'MaxPool2d'
elif 'MulBackward' in name:
return 'Dropout_2'
elif 'DivBackward' in name:
return 'Dropout_1'
elif 'AddBackward' in name:
return 'Add'
elif 'Cat' in name:
return 'Cat'
elif 'Hardtanh' in name:
return 'ReLU6'
else:
return 'None'
@torch.no_grad()
def generate_connections(g):
graph = str(g).split('\n')
labels = {}
connections = []
for i in range(len(graph)):
if 'label' in graph[i] and graph[i][-1] == '"':
labels[(graph[i]+graph[i+1][1:]).split('\t')[1].split(' ')[0]]=\
(graph[i]+graph[i+1][1:]).split('\t')[1].split('"')[1]
if 'label' in graph[i] and graph[i][-1] == ']':
labels[graph[i].split('\t')[1].split(' ')[0]]=\
graph[i].split('\t')[1].split('=')[1].split(']')[0]
for i in range(len(graph)):
if '->' in graph[i]:
connections.append({labels[graph[i].split('\t')[1].split(' -> ')[0]]+'_'+\
graph[i].split('\t')[1].split(' -> ')[0]:\
labels[graph[i].split('\t')[1].split(' -> ')[1]]+'_'+\
graph[i].split('\t')[1].split(' -> ')[1]})
pop_index = []
for i in range(len(connections)):
item_key = list(connections[i].keys())[0]
if '(' in item_key or 'TBackward' in item_key:
pop_index.append(connections[i])
for i in range(len(pop_index)-1, -1, -1):
connections.remove(pop_index[i])
new_connections = []
for item in connections:
key, value = list(item.items())[0]
key1 = exchange_name(key.split('_')[0]) + '_' + key.split('_')[1]
value1 = exchange_name(value.split('_')[0]) + '_' + value.split('_')[1]
if 'None' in key1 or 'None' in value1:
print('Not completed for '+key+' or '+value+'! Check exchange_name function!')
exit()
new_connections.append({key1: value1})
if not len(new_connections) == len(connections):
print('Generate connections not done! Check generate_connections function!')
exit()
new_connections.insert(0, {list(new_connections[0].values())[0]: None})
new_connections.append({'None': 'None'})
return connections, new_connections
@torch.no_grad()
def get_split_connections(connections):
return_connections = []
tmp_split = []
for i in range(len(connections)):
item = connections[i]
if len(tmp_split) == 0:
tmp_split.append(item)
continue
value = list(item.values())[0]
last_key = list(tmp_split[-1].keys())[0]
if value == last_key:
tmp_split.append(item)
else:
return_connections.append(tmp_split)
tmp_split = [item]
return return_connections
@torch.no_grad()
def find_start_end(list_dic_key_value, i, j):
key1 = list(list_dic_key_value[i].values())[0]
key2 = list(list_dic_key_value[j].keys())[0]
start = 0
end = len(list_dic_key_value)-1
for index in range(len(list_dic_key_value)):
if key1 == list(list_dic_key_value[index].keys())[0]:
start = index
break
for index in range(len(list_dic_key_value)):
if key2 == list(list_dic_key_value[index].keys())[0]:
end = index
break
return start+1, end-1
@torch.no_grad()
def merge_connections(connections):
import copy
last_connections = copy.deepcopy(connections)
connections.append({'None':'None'})
num_Throwed = 0
notchoosed = []
print('\n=========================== Restore network model Start ===============================')
for i in range(len(connections)):
print('# Restore network model: processing {}/{}'.format(i, len(connections)-1))
item_key = list(connections[i].keys())[0]
if not 'None' in item_key:
if i == 0:
pass
else:
last_item_key = list(connections[i-1].keys())[0]
if not connections[i][item_key] == last_item_key:
for j in range(i+1, len(connections)):
if not list(connections[j].values())[0] == list(connections[j-1].keys())[0]:
notchoosed.append(i)
start, end = find_start_end(connections, i, j-1)
tmp = []
tmp.append(connections[start:end+1])
tmp.append(connections[i:j-1])
last_connections[start:end+1] = [tmp]
for kk in range(end-start):
last_connections.insert(start, 'Throwed')
num_Throwed += 1
break
if not notchoosed == []:
last_connections = last_connections[:notchoosed[0]]
else:
pass
for i in range(num_Throwed):
last_connections.remove('Throwed')
if last_connections[-1] == {'None': 'None'}:
last_connections.remove({'None': 'None'})
print('=========================== Restore network model End =================================\n')
return last_connections
@torch.no_grad()
def find_next_layer_by_name(layers, name, start_i):
for i in range(start_i, len(layers)):
layer = layers[i]
if name in str(layer):
return layer, i
@torch.no_grad()
def get_layers(last_connections, model):
return_layers = []
tmp_layers = []
for layer in model.modules():
if not ':' in str(layer):
tmp_layers.append(layer)
index_tmp_layers = 0
for i in range(len(last_connections)-1, -1, -1):
if not isinstance(last_connections[i], list):
# 单一层,无分支
current_layer_name = list(last_connections[i].keys())[0].split('_')[0]
if 'ReLU' in current_layer_name:
return_layers.insert(0, torch.nn.ReLU(inplace=True))
elif 'Add' in current_layer_name:
return_layers.insert(0, 'Add')
elif 'View' in current_layer_name:
return_layers.insert(0, 'View')
else:
tmp = find_next_layer_by_name(tmp_layers, current_layer_name, index_tmp_layers)
return_layers.insert(0, tmp[0])
if isinstance(last_connections[i-1], list):
index_tmp_layers = tmp[1] + 1
elif not list(last_connections[i-1].keys())[0].split('_')[0] == 'Dropout':
index_tmp_layers = tmp[1] + 1
else:
return_layers.insert(0, [])
for j in range(len(last_connections[i])):
return_layers[0].append([])
if len(last_connections[i][j]) == 0:
continue
for k in range(len(last_connections[i][j])-1, -1, -1):
current_layer_name = list(last_connections[i][j][k].keys())[0].split('_')[0]
if 'ReLU' in current_layer_name:
return_layers[0][j].insert(0, torch.nn.ReLU(inplace=True))
elif 'Add' in current_layer_name:
return_layers[0][j].insert(0, 'Add')
elif 'View' in current_layer_name:
return_layers.insert(0, 'View')
else:
tmp = find_next_layer_by_name(tmp_layers, current_layer_name, index_tmp_layers)
return_layers[0][j].insert(0, tmp[0])
if not list(last_connections[i][j][k-1].keys())[0].split('_')[0] == 'Dropout':
index_tmp_layers = tmp[1] + 1
return return_layers
@torch.no_grad()
def get_tensors(last_connections):
tensors = get_featuremap(featuremap_dir=None)
index_tensors = 0
import copy
last_tensors = copy.deepcopy(last_connections)
for i in range(len(last_connections)-1, -1, -1):
if not isinstance(last_connections[i], list):
current_layer_name = list(last_connections[i].keys())[0].split('_')[0]
if 'Add' in current_layer_name:
last_tensors[i] = 'Add'
elif 'View' in current_layer_name:
last_tensors[i] = 'View'
else:
last_tensors[i] = tensors[index_tensors]
index_tensors += 1
else:
for j in range(len(last_connections[i])):
if len(last_connections[i][j]) == 0:
continue
for k in range(len(last_connections[i][j])-1, -1, -1):
current_layer_name = list(last_connections[i][j][k].keys())[0].split('_')[0]
if 'Add' in current_layer_name:
last_tensors[i][j][k] = 'Add'
elif 'View' in current_layer_name:
last_tensors[i][j][k] = 'View'
else:
last_tensors[i][j][k] = tensors[index_tensors]
index_tensors += 1
for i in range(len(last_tensors)-1, -1, -1):
if isinstance(last_tensors[i], str):
# Add or View
if last_tensors[i] == 'Add':
last_tensors[i] = last_tensors[i+1][0][0] + last_tensors[i+1][1][0]
if last_tensors[i] == 'View':
last_tensors[i] = last_tensors[i+1].view(last_tensors[i+1].size(0), -1)
elif isinstance(last_tensors[i], list):
for j in range(len(last_tensors[i])):
if len(last_tensors[i][j]) == 0:
last_tensors[i][j].append(last_tensors[i+1])
return last_tensors
@torch.no_grad()
def get_structure_parameters(return_layers):
import copy
parameters = copy.deepcopy(return_layers)
fc_conv_weights = copy.deepcopy(return_layers)
for i in range(len(return_layers)):
layer = return_layers[i]
if isinstance(layer, nn.Conv2d):
layer_name = 'Conv2d'
Conv2d_params = {}
Conv2d_params['layer_name'] = layer_name
# in_channel
in_channel = layer.__dict__.get('in_channels')
Conv2d_params['in_channel'] = in_channel
# out_channel
out_channel = layer.__dict__.get('out_channels')
Conv2d_params['out_channel'] = out_channel
# kernel_size
kernel_size = layer.__dict__.get('kernel_size')
if not isinstance(kernel_size, tuple):
Conv2d_params['kernel_size'] = (kernel_size, kernel_size)
else:
Conv2d_params['kernel_size'] = kernel_size
# stride
stride = layer.__dict__.get('stride')
if not isinstance(stride, tuple):
Conv2d_params['stride'] = (stride, stride)
else:
Conv2d_params['stride'] = stride
# padding
padding = layer.__dict__.get('padding')
if not isinstance(padding, tuple):
Conv2d_params['padding'] = (padding, padding)
else:
Conv2d_params['padding'] = padding
# return
fc_conv_weights[i] = layer.weight
parameters[i] = Conv2d_params
elif isinstance(layer, nn.ReLU):
layer_name = 'ReLU'
parameters[i] = {'layer_name': layer_name}
elif layer == 'Add':
layer_name = 'Add'
parameters[i] = {'layer_name': layer_name}
elif layer == 'View':
layer_name = 'View'
parameters[i] = {'layer_name': layer_name}
elif layer == 'Cat':
layer_name = 'Cat'
parameters[i] = {'layer_name': layer_name}
elif isinstance(layer, nn.MaxPool2d):
layer_name = 'MaxPool2d'
MaxPool2d_params = {}
MaxPool2d_params['layer_name'] = layer_name
# kernel_size
kernel_size = layer.__dict__.get('kernel_size')
if not isinstance(kernel_size, tuple):
MaxPool2d_params['kernel_size'] = (kernel_size, kernel_size)
else:
MaxPool2d_params['kernel_size'] = kernel_size
# stride
stride = layer.__dict__.get('stride')
if not isinstance(stride, tuple):
MaxPool2d_params['stride'] = (stride, stride)
else:
MaxPool2d_params['stride'] = stride
# padding
padding = layer.__dict__.get('padding')
if not isinstance(padding, tuple):
MaxPool2d_params['padding'] = (padding, padding)
else:
MaxPool2d_params['padding'] = padding
# return
parameters[i] = MaxPool2d_params
elif isinstance(layer, nn.AvgPool2d):
layer_name = 'AvgPool2d'
AvgPool2d_params = {}
AvgPool2d_params['layer_name'] = layer_name
# kernel_size
kernel_size = layer.__dict__.get('kernel_size')
if not isinstance(kernel_size, tuple):
AvgPool2d_params['kernel_size'] = (kernel_size, kernel_size)
else:
AvgPool2d_params['kernel_size'] = kernel_size
# stride
stride = layer.__dict__.get('stride')
if not isinstance(stride, tuple):
AvgPool2d_params['stride'] = (stride, stride)
else:
AvgPool2d_params['stride'] = stride
# padding
padding = layer.__dict__.get('padding')
if not isinstance(padding, tuple):
AvgPool2d_params['padding'] = (padding, padding)
else:
AvgPool2d_params['padding'] = padding
# return
parameters[i] = AvgPool2d_params
elif isinstance(layer, nn.Dropout):
layer_name = 'Dropout'
Dropout_params = {}
Dropout_params['layer_name'] = layer_name
# p
p = layer.__dict__.get('p')
Dropout_params['p'] = p
# return
parameters[i] = Dropout_params
elif isinstance(layer, nn.BatchNorm2d):
layer_name = 'BatchNorm2d'
BatchNorm2d_params = {}
BatchNorm2d_params['layer_name'] = layer_name
# num_features
num_features = layer.__dict__.get('num_features')
BatchNorm2d_params['num_features'] = num_features
# eps
eps = layer.__dict__.get('eps')
BatchNorm2d_params['eps'] = eps
# return
fc_conv_weights[i] = layer.weight
parameters[i] = BatchNorm2d_params
elif isinstance(layer, nn.Linear):
layer_name = 'Linear'
Linear_params = {}
Linear_params['layer_name'] = layer_name
# in_features
in_features = layer.__dict__.get('in_features')
Linear_params['in_features'] = in_features
# out_features
out_features = layer.__dict__.get('out_features')
Linear_params['out_features'] = out_features
# return
fc_conv_weights[i] = layer.weight
parameters[i] = Linear_params
elif isinstance(layer, nn.AdaptiveAvgPool2d):
layer_name = 'AdaptiveAvgPool2d'
AdaptiveAvgPool2d_params = {}
AdaptiveAvgPool2d_params['layer_name'] = layer_name
# output_size
output_size = layer.__dict__.get('output_size')
if not isinstance(output_size, tuple):
AdaptiveAvgPool2d_params['output_size'] = (output_size, output_size)
else:
AdaptiveAvgPool2d_params['output_size'] = output_size
# return
parameters[i] = AdaptiveAvgPool2d_params
elif isinstance(layer, list):
for j in range(len(layer)):
for k in range(len(layer[j])):
tmp_layer = layer[j][k]
###
if isinstance(tmp_layer, nn.Conv2d):
layer_name = 'Conv2d'
Conv2d_params = {}
Conv2d_params['layer_name'] = layer_name
# in_channel
in_channel = tmp_layer.__dict__.get('in_channels')
Conv2d_params['in_channel'] = in_channel
# out_channel
out_channel = tmp_layer.__dict__.get('out_channels')
Conv2d_params['out_channel'] = out_channel
# kernel_size
kernel_size = tmp_layer.__dict__.get('kernel_size')
if not isinstance(kernel_size, tuple):
Conv2d_params['kernel_size'] = (kernel_size, kernel_size)
else:
Conv2d_params['kernel_size'] = kernel_size
# stride
stride = tmp_layer.__dict__.get('stride')
if not isinstance(stride, tuple):
Conv2d_params['stride'] = (stride, stride)
else:
Conv2d_params['stride'] = stride
# padding
padding = tmp_layer.__dict__.get('padding')
if not isinstance(padding, tuple):
Conv2d_params['padding'] = (padding, padding)
else:
Conv2d_params['padding'] = padding
# return
fc_conv_weights[i][j][k] = tmp_layer.weight
parameters[i][j][k] = Conv2d_params
elif isinstance(tmp_layer, nn.ReLU):
layer_name = 'ReLU'
parameters[i][j][k] = {'layer_name': layer_name}
elif tmp_layer == 'Add':
layer_name = 'Add'
parameters[i][j][k] = {'layer_name': layer_name}
elif tmp_layer == 'View':
layer_name = 'View'
parameters[i][j][k] = {'layer_name': layer_name}
elif tmp_layer == 'Cat':
layer_name = 'Cat'
parameters[i][j][k] = {'layer_name': layer_name}
elif isinstance(tmp_layer, nn.MaxPool2d):
layer_name = 'MaxPool2d'
MaxPool2d_params = {}
MaxPool2d_params['layer_name'] = layer_name
# kernel_size
kernel_size = tmp_layer.__dict__.get('kernel_size')
if not isinstance(kernel_size, tuple):
MaxPool2d_params['kernel_size'] = (kernel_size, kernel_size)
else:
MaxPool2d_params['kernel_size'] = kernel_size
# stride
stride = tmp_layer.__dict__.get('stride')
if not isinstance(stride, tuple):
MaxPool2d_params['stride'] = (stride, stride)
else:
MaxPool2d_params['stride'] = stride
# padding
padding = tmp_layer.__dict__.get('padding')
if not isinstance(padding, tuple):
MaxPool2d_params['padding'] = (padding, padding)
else:
MaxPool2d_params['padding'] = padding
# return
parameters[i][j][k] = MaxPool2d_params
elif isinstance(tmp_layer, nn.AvgPool2d):
layer_name = 'AvgPool2d'
AvgPool2d_params = {}
AvgPool2d_params['layer_name'] = layer_name
# kernel_size
kernel_size = tmp_layer.__dict__.get('kernel_size')
if not isinstance(kernel_size, tuple):
AvgPool2d_params['kernel_size'] = (kernel_size, kernel_size)
else:
AvgPool2d_params['kernel_size'] = kernel_size
# stride
stride = tmp_layer.__dict__.get('stride')
if not isinstance(stride, tuple):
AvgPool2d_params['stride'] = (stride, stride)
else:
AvgPool2d_params['stride'] = stride
# padding
padding = tmp_layer.__dict__.get('padding')
if not isinstance(padding, tuple):
AvgPool2d_params['padding'] = (padding, padding)
else:
AvgPool2d_params['padding'] = padding
# return
parameters[i][j][k] = AvgPool2d_params
elif isinstance(tmp_layer, nn.Dropout):
layer_name = 'Dropout'
Dropout_params = {}
Dropout_params['layer_name'] = layer_name
# p
p = tmp_layer.__dict__.get('p')
Dropout_params['p'] = p
# return
parameters[i][j][k] = Dropout_params
elif isinstance(tmp_layer, nn.BatchNorm2d):
layer_name = 'BatchNorm2d'
BatchNorm2d_params = {}
BatchNorm2d_params['layer_name'] = layer_name
# num_features
num_features = tmp_layer.__dict__.get('num_features')
BatchNorm2d_params['num_features'] = num_features
# eps
eps = tmp_layer.__dict__.get('eps')
BatchNorm2d_params['eps'] = eps
# return
fc_conv_weights[i][j][k] = tmp_layer.weight
parameters[i][j][k] = BatchNorm2d_params
elif isinstance(tmp_layer, nn.Linear):
layer_name = 'Linear'
Linear_params = {}
Linear_params['layer_name'] = layer_name
# in_features
in_features = tmp_layer.__dict__.get('in_features')
Linear_params['in_features'] = in_features
# out_features
out_features = tmp_layer.__dict__.get('out_features')
Linear_params['out_features'] = out_features
# return
fc_conv_weights[i][j][k] = tmp_layer.weight
parameters[i][j][k] = Linear_params
elif isinstance(tmp_layer, nn.AdaptiveAvgPool2d):
layer_name = 'AdaptiveAvgPool2d'
AdaptiveAvgPool2d_params = {}
AdaptiveAvgPool2d_params['layer_name'] = layer_name
# output_size
output_size = tmp_layer.__dict__.get('output_size')
if not isinstance(output_size, tuple):
AdaptiveAvgPool2d_params['output_size'] = (output_size, output_size)
else:
AdaptiveAvgPool2d_params['output_size'] = output_size
# return
parameters[i][j][k] = AdaptiveAvgPool2d_params
###
else:
print('The layer has not been processed in get_structure_parameters!')
return parameters, fc_conv_weights
def gradient_backward_v2(model, img, label, num_class=1000, g_view=False):
x = Variable(img)
g = generate_g(model, x)
if g_view:
g.view()
delete_allpths(pth_dir=None)
print('\n=========================== Generate Tensors Start ====================================')
result = model(img)
print('=========================== Generate Tensors End ======================================\n')
Loss = nn.CrossEntropyLoss()
if 'GoogLeNet' in str(model).split('\n')[0]:
loss_torch = Loss(result[0], label)
else:
loss_torch = Loss(result, label)
_, connections = generate_connections(g)
last_connections = merge_connections(connections)
return_layers = get_layers(last_connections, model)
return_tensors = get_tensors(last_connections)
parameters, fc_conv_weights = get_structure_parameters(return_layers)
'''
print('================')
for i in range(len(last_connections)):
print(i, last_connections[i])
print('================')
print('================')
for i in range(len(return_layers)):
print(i, return_layers[i])
print('================')
print('================')
for i in range(len(parameters)):
print(i, parameters[i])
print('================')
print('================')
for i in range(len(return_tensors)):
if not isinstance(return_tensors[i], list) and not isinstance(return_tensors[i], str):
print('=========', i, return_tensors[i].shape)
print('================')
'''
import copy
return_dz = copy.deepcopy(last_connections)
featuremap = return_tensors
featuremap.append(img)
y_true = F.one_hot(label, num_classes=num_class).float()
loss, dLoss_dz = cross_entropy_loss(featuremap[0], y_true)
featuremap.pop(0)
return_dz.append(dLoss_dz)
#####################tensors
'''
for i in range(len(last_connections)):
print(last_connections[i])
for i in range(len(featuremap)):
if not isinstance(featuremap[i], list):
print('=========', i, featuremap[i].shape)
else:
for j in range(len(featuremap[i])):
for k in range(len(featuremap[i][j])):
print(' =========', i, j, k, featuremap[i][j][k].shape)
'''
#####################
# 前面n层倒序遍历
for i in range(len(parameters)):
layer = parameters[i]
if not isinstance(layer, list):
print('\n======================== {0:3} Layer: '.format(str(len(parameters)-1-i))+'{0:11}'.format(layer['layer_name'])+' Backward Start ========================')
if layer['layer_name'] == 'Conv2d':
z = featuremap[i]
weight_z = fc_conv_weights[i]
try:
padding = layer['padding']
except:
padding = (0, 0)
stride = layer['stride']
dLoss_dz, dLoss_dW, dLoss_dB = conv_backward(dLoss_dz, weight_z, z, padding, stride)
return_dz[i] = dLoss_dz
elif layer['layer_name'] == 'ReLU':
z = featuremap[i]
dLoss_dz = relu_backward(dLoss_dz, z)
return_dz[i] = dLoss_dz
elif layer['layer_name'] == 'MaxPool2d':
z = featuremap[i]
pooling = layer['kernel_size']
stride = layer['stride']
padding = layer['padding']
dLoss_dz = max_pooling_backward(dLoss_dz, z, pooling, stride, padding)
return_dz[i] = dLoss_dz
elif layer['layer_name'] == 'AvgPool2d':
z = featuremap[i]
pooling = layer['kernel_size']
stride = layer['stride']
padding = layer['padding']
dLoss_dz = average_pooling_backward(dLoss_dz, z, pooling, stride, padding)
return_dz[i] = dLoss_dz
elif layer['layer_name'] == 'Linear':
weight_z = fc_conv_weights[i]
z = featuremap[i]
dLoss_dz, dLoss_dW, dLoss_dB = fc_backward(dLoss_dz, z, weight_z)
return_dz[i] = dLoss_dz
elif layer['layer_name'] == 'View':
last_z = featuremap[i+1]
if 'Pool' in parameters[i+1]['layer_name']:
params = (parameters[i+1]['kernel_size'], parameters[i+1]['stride'], parameters[i+1]['padding'])
else:
params = None
dLoss_dz = view_backward(dLoss_dz, last_z, params)
return_dz[i] = dLoss_dz
elif layer['layer_name'] == 'Add':
dLoss_dz = add_backward(dLoss_dz)
return_dz[i] = dLoss_dz
elif layer['layer_name'] == 'Dropout':
if parameters[i-1]['layer_name'] == 'Dropout':
return_dz[i] = dLoss_dz
print('# Skip this layer because the layer has been calcualted!')
print('======================== {0:3} Layer: '.format(str(len(parameters)-1-i))+'{0:11}'.\
format(layer['layer_name'])+' Backward End ==========================')
continue
p = layer['p']
mask = featuremap[i]
dLoss_dz = dropback_backward(dLoss_dz, mask, p)
return_dz[i] = dLoss_dz
elif layer['layer_name'] == 'BatchNorm2d':
eps = layer['eps']
z = featuremap[i]
gamma = fc_conv_weights[i]
dLoss_dz = batchnorm2d_backward(dLoss_dz, z, eps, gamma)
return_dz[i] = dLoss_dz
print('======================== {0:3} Layer: '.format(str(len(parameters)-1-i))+'{0:11}'.format(layer['layer_name'])+' Backward End ==========================')
elif isinstance(layer, list):
import copy
tmp_dLoss_dz = []
for j in range(len(layer)):
tmp_dLoss_dz.append(copy.deepcopy(dLoss_dz))
for k in range(len(layer[j])):
tmp_layer = layer[j][k]
print('\n=========================== {0:3} Branch: '.format(str(len(parameters)-1-i))+'{0:11}'.format(tmp_layer['layer_name'])+' Backward Start ====================')
if tmp_layer['layer_name'] == 'Conv2d':
if k+1 >= len(featuremap[i-1][j]):
z = featuremap[i]
else:
z = featuremap[i-1][j][k+1]
weight_z = fc_conv_weights[i][j][k]
try:
padding = tmp_layer['padding']
except:
padding = (0, 0)
stride = tmp_layer['stride']
tmp_dLoss_dz[-1], dLoss_dW, dLoss_dB = conv_backward(tmp_dLoss_dz[-1], weight_z, z, padding, stride)
return_dz[i][j][k] = tmp_dLoss_dz[-1]
elif tmp_layer['layer_name'] == 'ReLU':
z = featuremap[i-1][j][k+1]
tmp_dLoss_dz[-1] = relu_backward(tmp_dLoss_dz[-1], z)
return_dz[i][j][k] = tmp_dLoss_dz[-1]
elif tmp_layer['layer_name'] == 'BatchNorm2d':
eps = tmp_layer['eps']
z = featuremap[i-1][j][k+1]
gamma = fc_conv_weights[i][j][k]
tmp_dLoss_dz[-1] = batchnorm2d_backward(tmp_dLoss_dz[-1], z, eps, gamma)
return_dz[i][j][k] = tmp_dLoss_dz[-1]
print('=========================== {0:3} Branch: '.format(str(len(parameters)-1-i))+'{0:11}'.format(tmp_layer['layer_name'])+' Backward End ======================')
print(tmp_dLoss_dz[0].shape, tmp_dLoss_dz[1].shape)
dLoss_dz = tmp_dLoss_dz[0] + tmp_dLoss_dz[1]
else:
print('Not completed in gradient_backward!')
print('# Torch calculated loss: ', loss_torch.detach().numpy())
loss_torch.backward()
if 'VGG' in str(model) or 'AlexNet' in str(model):
print(judge_tensors_equal(dLoss_dW, model.features[0].weight.grad))
elif 'ResNet' in str(model):
print(judge_tensors_equal(dLoss_dW, model.conv1.weight.grad))
delete_allpths(pth_dir=None)
return return_dz, dLoss_dW, dLoss_dB
| 34.624732
| 172
| 0.63429
| 6,781
| 48,440
| 4.297744
| 0.060905
| 0.041382
| 0.021
| 0.024706
| 0.689051
| 0.639879
| 0.592458
| 0.556703
| 0.53162
| 0.509556
| 0
| 0.016974
| 0.197275
| 48,440
| 1,399
| 173
| 34.624732
| 0.732512
| 0.040235
| 0
| 0.552056
| 0
| 0
| 0.123108
| 0.019954
| 0
| 0
| 0
| 0.000715
| 0.000875
| 1
| 0.028871
| false
| 0.0035
| 0.011374
| 0.000875
| 0.08399
| 0.070866
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ae2d03accd91cc3db5f01917f5d31fdecbb74e5
| 4,372
|
py
|
Python
|
ark_nlp/factory/utils/attack.py
|
yubuyuabc/ark-nlp
|
165d35cfacd7476791c0aeba19bf43f4f8079553
|
[
"Apache-2.0"
] | 1
|
2022-03-23T05:10:55.000Z
|
2022-03-23T05:10:55.000Z
|
ark_nlp/factory/utils/attack.py
|
yubuyuabc/ark-nlp
|
165d35cfacd7476791c0aeba19bf43f4f8079553
|
[
"Apache-2.0"
] | null | null | null |
ark_nlp/factory/utils/attack.py
|
yubuyuabc/ark-nlp
|
165d35cfacd7476791c0aeba19bf43f4f8079553
|
[
"Apache-2.0"
] | null | null | null |
import torch
class FGM(object):
"""
基于FGM算法的攻击机制
Args:
module (:obj:`torch.nn.Module`): 模型
Examples::
>>> # 初始化
>>> fgm = FGM(module)
>>> for batch_input, batch_label in data:
>>> # 正常训练
>>> loss = module(batch_input, batch_label)
>>> loss.backward() # 反向传播,得到正常的grad
>>> # 对抗训练
>>> fgm.attack() # 在embedding上添加对抗扰动
>>> loss_adv = module(batch_input, batch_label)
>>> loss_adv.backward() # 反向传播,并在正常的grad基础上,累加对抗训练的梯度
>>> fgm.restore() # 恢复embedding参数
>>> # 梯度下降,更新参数
>>> optimizer.step()
>>> optimizer.zero_grad()
Reference:
[1] https://zhuanlan.zhihu.com/p/91269728
"""
def __init__(self, module):
self.module = module
self.backup = {}
def attack(
self,
epsilon=1.,
emb_name='word_embeddings'
):
for name, param in self.module.named_parameters():
if param.requires_grad and emb_name in name:
self.backup[name] = param.data.clone()
norm = torch.norm(param.grad)
if norm != 0 and not torch.isnan(norm):
r_at = epsilon * param.grad / norm
param.data.add_(r_at)
def restore(
self,
emb_name='word_embeddings'
):
for name, param in self.module.named_parameters():
if param.requires_grad and emb_name in name:
assert name in self.backup
param.data = self.backup[name]
self.backup = {}
class PGD(object):
"""
基于PGD算法的攻击机制
Args:
module (:obj:`torch.nn.Module`): 模型
Examples::
>>> pgd = PGD(module)
>>> K = 3
>>> for batch_input, batch_label in data:
>>> # 正常训练
>>> loss = module(batch_input, batch_label)
>>> loss.backward() # 反向传播,得到正常的grad
>>> pgd.backup_grad()
>>> # 对抗训练
>>> for t in range(K):
>>> pgd.attack(is_first_attack=(t==0)) # 在embedding上添加对抗扰动, first attack时备份param.data
>>> if t != K-1:
>>> optimizer.zero_grad()
>>> else:
>>> pgd.restore_grad()
>>> loss_adv = module(batch_input, batch_label)
>>> loss_adv.backward() # 反向传播,并在正常的grad基础上,累加对抗训练的梯度
>>> pgd.restore() # 恢复embedding参数
>>> # 梯度下降,更新参数
>>> optimizer.step()
>>> optimizer.zero_grad()
Reference:
[1] https://zhuanlan.zhihu.com/p/91269728
"""
def __init__(self, module):
self.module = module
self.emb_backup = {}
self.grad_backup = {}
def attack(
self,
epsilon=1.,
alpha=0.3,
emb_name='emb.',
is_first_attack=False
):
# emb_name这个参数要换成你模型中embedding的参数名
for name, param in self.module.named_parameters():
if param.requires_grad and emb_name in name:
if is_first_attack:
self.emb_backup[name] = param.data.clone()
norm = torch.norm(param.grad)
if norm != 0 and not torch.isnan(norm):
r_at = alpha * param.grad / norm
param.data.add_(r_at)
param.data = self.project(name, param.data, epsilon)
def restore(self, emb_name='emb.'):
# emb_name这个参数要换成你模型中embedding的参数名
for name, param in self.module.named_parameters():
if param.requires_grad and emb_name in name:
assert name in self.emb_backup
param.data = self.emb_backup[name]
self.emb_backup = {}
def project(self, param_name, param_data, epsilon):
r = param_data - self.emb_backup[param_name]
if torch.norm(r) > epsilon:
r = epsilon * r / torch.norm(r)
return self.emb_backup[param_name] + r
def backup_grad(self):
for name, param in self.module.named_parameters():
if param.requires_grad:
self.grad_backup[name] = param.grad.clone()
def restore_grad(self):
for name, param in self.module.named_parameters():
if param.requires_grad:
param.grad = self.grad_backup[name]
| 31.681159
| 101
| 0.52699
| 478
| 4,372
| 4.648536
| 0.177824
| 0.044554
| 0.040954
| 0.054005
| 0.738074
| 0.670567
| 0.646265
| 0.646265
| 0.588659
| 0.588659
| 0
| 0.009571
| 0.354758
| 4,372
| 137
| 102
| 31.912409
| 0.778093
| 0.368253
| 0
| 0.575758
| 0
| 0
| 0.014879
| 0
| 0
| 0
| 0
| 0
| 0.030303
| 1
| 0.136364
| false
| 0
| 0.015152
| 0
| 0.19697
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ae341f931ab8799a80b73c9036820e58b4d7de6
| 5,790
|
py
|
Python
|
core.py
|
sreejithr/deepfake
|
c7115ce90ea281e2eb95d75f436efa102c8f2e3c
|
[
"MIT"
] | null | null | null |
core.py
|
sreejithr/deepfake
|
c7115ce90ea281e2eb95d75f436efa102c8f2e3c
|
[
"MIT"
] | 3
|
2021-09-08T02:24:48.000Z
|
2022-03-12T00:44:53.000Z
|
core.py
|
sreejithr/deepfake
|
c7115ce90ea281e2eb95d75f436efa102c8f2e3c
|
[
"MIT"
] | null | null | null |
import cv2
import torch
import yaml
import imageio
import throttle
import numpy as np
import matplotlib.pyplot as plt
from argparse import ArgumentParser
from skimage.transform import resize
from scipy.spatial import ConvexHull
from modules.generator import OcclusionAwareGenerator
from modules.keypoint_detector import KPDetector
from sync_batchnorm import DataParallelWithCallback
#from animate import normalize_kp
# command = [ffmpeg,
# '-y',
# '-f', 'rawvideo',
# '-vcodec','rawvideo',
# '-pix_fmt', 'bgr24',
# '-s', dimension,
# '-i', '-',
# '-c:v', 'libx264',
# '-pix_fmt', 'yuv420p',
# '-preset', 'ultrafast',
# '-f', 'flv',
# 'rtmp://10.10.10.80/live/mystream']
def normalize_kp(kp_source, kp_driving, kp_driving_initial, adapt_movement_scale=False,
use_relative_movement=False, use_relative_jacobian=False):
if adapt_movement_scale:
source_area = ConvexHull(kp_source['value'][0].data.cpu().numpy()).volume
driving_area = ConvexHull(kp_driving_initial['value'][0].data.cpu().numpy()).volume
adapt_movement_scale = np.sqrt(source_area) / np.sqrt(driving_area)
else:
adapt_movement_scale = 1
kp_new = {k: v for k, v in kp_driving.items()}
if use_relative_movement:
kp_value_diff = (kp_driving['value'] - kp_driving_initial['value'])
kp_value_diff *= adapt_movement_scale
kp_new['value'] = kp_value_diff + kp_source['value']
if use_relative_jacobian:
jacobian_diff = torch.matmul(kp_driving['jacobian'], torch.inverse(kp_driving_initial['jacobian']))
kp_new['jacobian'] = torch.matmul(jacobian_diff, kp_source['jacobian'])
return kp_new
def load_checkpoints(config_path, checkpoint_path, cpu=False):
with open(config_path) as f:
config = yaml.load(f)
generator = OcclusionAwareGenerator(**config['model_params']['generator_params'],
**config['model_params']['common_params'])
if not cpu:
generator.cuda()
kp_detector = KPDetector(**config['model_params']['kp_detector_params'],
**config['model_params']['common_params'])
if not cpu:
kp_detector.cuda()
if cpu:
checkpoint = torch.load(checkpoint_path, map_location=torch.device('cpu'))
else:
checkpoint = torch.load(checkpoint_path)
generator.load_state_dict(checkpoint['generator'])
kp_detector.load_state_dict(checkpoint['kp_detector'])
if not cpu:
generator = DataParallelWithCallback(generator)
kp_detector = DataParallelWithCallback(kp_detector)
generator.eval()
kp_detector.eval()
return generator, kp_detector
@throttle.wrap(1, 2)
def forward(source_image, driving_frame, kp_source, kp_driving_initial, generator, kp_detector, relative=True, adapt_scale=True, cpu=True):
kp_driving = kp_detector(driving_frame)
kp_norm = normalize_kp(
kp_source=kp_source,
kp_driving=kp_driving,
kp_driving_initial=kp_driving_initial,
use_relative_movement=relative,
use_relative_jacobian=relative,
adapt_movement_scale=adapt_scale
)
out = generator(source_image, kp_source=kp_source, kp_driving=kp_norm)
return np.transpose(out["prediction"].data.cpu().numpy(), [0, 2, 3, 1])[0]
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--config", required=True, help="path to config")
parser.add_argument("--source_image", required=True, help="path to source image")
parser.add_argument("--checkpoint", default="vox-cpk.pth.tar", help="path to checkpoint")
parser.add_argument("--relative", dest="relative", action="store_true", help="use relative or absolute keypoint coordinates")
parser.add_argument("--adapt_scale", dest="adapt_scale", action="store_true", help="adapt movement scale based on convex hull of keypoints")
parser.add_argument("--cpu", dest="cpu", action="store_true", help="CPU mode")
parser.set_defaults(relative=False)
parser.set_defaults(adapt_scale=False)
opt = parser.parse_args()
generator, kp_detector = load_checkpoints(config_path=opt.config, checkpoint_path=opt.checkpoint, cpu=opt.cpu)
source_image = imageio.imread(opt.source_image)
source_image = resize(source_image, (256, 256))[..., :3]
source = torch.tensor(source_image[np.newaxis].astype(np.float32)).permute(0, 3, 1, 2)
if not opt.cpu:
source = source.cuda()
kp_source = kp_detector(source)
#out = cv2.VideoWriter('outpy.avi', cv2.VideoWriter_fourcc('M','J','P','G'), 30, (256, 256))
kp_driving_initial = None
camera = cv2.VideoCapture(0)
ret, frame = camera.read()
while True:
ret, frame = camera.read()
resized = resize(frame, (256, 256))[..., :3]
if not opt.cpu:
resized = resized.cuda()
# y = torch.tensor(np.array(resized))
# x = y.cpu().numpy()
# image = cv2.cvtColor(x, cv2.COLOR_BGR2RGB)
# # x = y.permute(1, 2, 0)
# plt.imshow(np.array(image))
# plt.show()
driving_resized = torch.tensor(np.array(resized)[np.newaxis].astype(np.float32)).permute(0, 3, 1, 2)
if not kp_driving_initial:
kp_driving_initial = kp_detector(driving_resized)
fake_frame = forward(
source,
driving_resized,
kp_source,
kp_driving_initial,
generator,
kp_detector,
relative=opt.relative,
adapt_scale=opt.adapt_scale,
cpu=opt.cpu
)
cv2.imshow("frame", fake_frame)
#x = np.squeeze(driving_resized, axis=(0,))
#x = driving_resized[0].permute(1, 2, 0)
# plt_driving = driving_resized #permute(2, 3, 1)
#print(plt_driving.shape)
#plt.imshow(x)
#plt.show()
if cv2.waitKey(1) & 0xFF == ord('q'):
break
camera.release()
cv2.destroyAllWindows()
| 34.260355
| 142
| 0.68342
| 755
| 5,790
| 5.007947
| 0.250331
| 0.045226
| 0.046549
| 0.022481
| 0.180376
| 0.123512
| 0.085163
| 0.070881
| 0.070881
| 0.021158
| 0
| 0.017722
| 0.181347
| 5,790
| 168
| 143
| 34.464286
| 0.779958
| 0.126598
| 0
| 0.099099
| 0
| 0
| 0.100418
| 0
| 0
| 0
| 0.000795
| 0
| 0
| 1
| 0.027027
| false
| 0
| 0.117117
| 0
| 0.171171
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ae6683abfd956b5c3952439b03a59e007c9300a
| 2,402
|
py
|
Python
|
models/1-Tom/train/kaggle-hubmap-main/src/02_train/transforms.py
|
navekshasood/HuBMAP---Hacking-the-Kidney
|
018100fe4bfa5e8764b9df5a9d188e2c670ac061
|
[
"MIT"
] | null | null | null |
models/1-Tom/train/kaggle-hubmap-main/src/02_train/transforms.py
|
navekshasood/HuBMAP---Hacking-the-Kidney
|
018100fe4bfa5e8764b9df5a9d188e2c670ac061
|
[
"MIT"
] | null | null | null |
models/1-Tom/train/kaggle-hubmap-main/src/02_train/transforms.py
|
navekshasood/HuBMAP---Hacking-the-Kidney
|
018100fe4bfa5e8764b9df5a9d188e2c670ac061
|
[
"MIT"
] | null | null | null |
import numpy as np
from albumentations import (Compose, HorizontalFlip, VerticalFlip, Rotate, RandomRotate90,
ShiftScaleRotate, ElasticTransform,
GridDistortion, RandomSizedCrop, RandomCrop, CenterCrop,
RandomBrightnessContrast, HueSaturationValue, IAASharpen,
RandomGamma, RandomBrightness, RandomBrightnessContrast,
GaussianBlur,CLAHE,
Cutout, CoarseDropout, GaussNoise, ChannelShuffle, ToGray, OpticalDistortion,
Normalize, OneOf, NoOp)
from albumentations.pytorch import ToTensorV2 as ToTensor
from get_config import get_config
config = get_config()
MEAN = np.array([0.485, 0.456, 0.406])
STD = np.array([0.229, 0.224, 0.225])
def get_transforms_train():
transform_train = Compose([
#Basic
RandomRotate90(p=1),
HorizontalFlip(p=0.5),
#Morphology
ShiftScaleRotate(shift_limit=0, scale_limit=(-0.2,0.2), rotate_limit=(-30,30),
interpolation=1, border_mode=0, value=(0,0,0), p=0.5),
GaussNoise(var_limit=(0,50.0), mean=0, p=0.5),
GaussianBlur(blur_limit=(3,7), p=0.5),
#Color
RandomBrightnessContrast(brightness_limit=0.35, contrast_limit=0.5,
brightness_by_max=True,p=0.5),
HueSaturationValue(hue_shift_limit=30, sat_shift_limit=30,
val_shift_limit=0, p=0.5),
CoarseDropout(max_holes=2,
max_height=config['input_resolution'][0]//4, max_width=config['input_resolution'][1]//4,
min_holes=1,
min_height=config['input_resolution'][0]//16, min_width=config['input_resolution'][1]//16,
fill_value=0, mask_fill_value=0, p=0.5),
Normalize(mean=(MEAN[0], MEAN[1], MEAN[2]),
std=(STD[0], STD[1], STD[2])),
ToTensor(),
])
return transform_train
def get_transforms_valid():
transform_valid = Compose([
Normalize(mean=(MEAN[0], MEAN[1], MEAN[2]),
std=(STD[0], STD[1], STD[2])),
ToTensor(),
] )
return transform_valid
def denormalize(z, mean=MEAN.reshape(-1,1,1), std=STD.reshape(-1,1,1)):
return std*z + mean
| 40.711864
| 113
| 0.572856
| 272
| 2,402
| 4.911765
| 0.352941
| 0.011976
| 0.015719
| 0.011976
| 0.181138
| 0.098802
| 0.098802
| 0.098802
| 0.098802
| 0.098802
| 0
| 0.06411
| 0.305162
| 2,402
| 58
| 114
| 41.413793
| 0.736369
| 0.008326
| 0
| 0.177778
| 0
| 0
| 0.026902
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.088889
| 0.022222
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ae709052ebf9505470ee0404f1013ba86cb8e0e
| 13,017
|
py
|
Python
|
cubspack/geometry.py
|
Majikat/cubspack
|
16aa6df0603d48d757d74837d3457a1934601d89
|
[
"Apache-2.0"
] | 11
|
2018-06-18T12:05:34.000Z
|
2021-02-24T19:00:24.000Z
|
cubspack/geometry.py
|
Majikat/cubspack
|
16aa6df0603d48d757d74837d3457a1934601d89
|
[
"Apache-2.0"
] | null | null | null |
cubspack/geometry.py
|
Majikat/cubspack
|
16aa6df0603d48d757d74837d3457a1934601d89
|
[
"Apache-2.0"
] | 2
|
2018-04-08T17:30:00.000Z
|
2018-09-27T08:38:42.000Z
|
# -*- coding: utf-8 -*-
from math import sqrt
class Point(object):
__slots__ = ('x', 'y', 'z')
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
def __eq__(self, other):
return (self.x == other.x and self.y == other.y and self.z == other.z)
def __repr__(self):
return "P({}, {}, {})".format(self.x, self.y, self.z)
def distance(self, point):
"""Calculate distance to another point"""
return sqrt((self.x - point.x)**2 + (self.y - point.y)**2 + (
self.z - point.z)**2)
def distance_squared(self, point):
return (self.x - point.x)**2 + (self.y - point.y)**2 + (
self.z - point.z)**2
class Segment(object):
__slots__ = ('start', 'end')
def __init__(self, start, end):
"""Arguments:
start (Point): Segment start point
end (Point): Segment end point
"""
assert(isinstance(start, Point) and isinstance(end, Point))
self.start = start
self.end = end
def __eq__(self, other):
if not isinstance(other, self.__class__):
None
return self.start == other.start and self.end == other.end
def __repr__(self):
return "S({}, {})".format(self.start, self.end)
@property
def length_squared(self):
"""Faster than length and useful for some comparisons"""
return self.start.distance_squared(self.end)
@property
def length(self):
return self.start.distance(self.end)
@property
def top(self):
return max(self.start.y, self.end.y)
@property
def bottom(self):
return min(self.start.y, self.end.y)
@property
def right(self):
return max(self.start.x, self.end.x)
@property
def left(self):
return min(self.start.x, self.end.x)
@property
def ineye(self):
return max(self.start.z, self.end.z)
@property
def outeye(self):
return min(self.start.z, self.end.z)
class HSegment(Segment):
"""Horizontal Segment"""
def __init__(self, start, length):
"""Create an Horizontal segment given its left most end point and its length.
Arguments:
- start (Point): Starting Point
- length (number): segment length
"""
assert(isinstance(start, Point) and not isinstance(length, Point))
super(HSegment, self).__init__(
start, Point(start.x + length, start.y, start.z))
@property
def length(self):
return self.end.x - self.start.x
class VSegment(Segment):
"""Vertical Segment"""
def __init__(self, start, length):
"""Create a Vertical segment given its bottom most end point and its length.
Arguments:
- start (Point): Starting Point
- length (number): segment length
"""
assert(isinstance(start, Point) and not isinstance(length, Point))
super(VSegment, self).__init__(
start, Point(start.x, start.y + length, start.z))
@property
def length(self):
return self.end.y - self.start.y
class DSegment(Segment):
"""In-Depth Segment"""
def __init__(self, start, length):
"""Create an In-Depth segment given its bottom most end point and its length.
Arguments:
- start (Point): Starting Point
- length (number): segment length
"""
assert(isinstance(start, Point) and not isinstance(length, Point))
super(VSegment, self).__init__(
start, Point(start.x, start.y, start.z + length))
@property
def length(self):
return self.end.z - self.start.z
class Cuboid(object):
"""Basic cuboid primitive class.
x, y, z-> Lower right corner coordinates
width -
height -
depth -
"""
__slots__ = ('width', 'height', 'depth', 'x', 'y', 'z', 'rid')
def __init__(self, x, y, z, width, height, depth, rid=None):
"""Initiating the Cuboid
Args:
x (int, float):
y (int, float):
z (int, float):
width (int, float):
height (int, float):
depth (int, float):
rid (identifier object):
"""
assert(height >= 0 and width >= 0 and depth >= 0)
self.width = width
self.height = height
self.depth = depth
self.x = x
self.y = y
self.z = z
self.rid = rid
@property
def bottom(self):
"""Cuboid bottom edge y coordinate"""
return self.y
@property
def top(self):
"""Cuboid top edge y coordiante"""
return self.y + self.height
@property
def left(self):
"""Cuboid left edge x coordinate"""
return self.x
@property
def right(self):
"""Cuboid right edge x coordinate"""
return self.x + self.width
@property
def outeye(self):
"""Cuboid farther from eye edge z coordinate"""
return self.z
@property
def ineye(self):
"""Cuboid nearer from eye edge z coordinate"""
return self.z + self.depth
@property
def corner_top_l(self):
return Point(self.left, self.top, self.outeye)
@property
def corner_top_r(self):
return Point(self.right, self.top, self.outeye)
@property
def corner_bot_r(self):
return Point(self.right, self.bottom, self.outeye)
@property
def corner_bot_l(self):
return Point(self.left, self.bottom, self.outeye)
@property
def corner_top_l_out(self):
return Point(self.left, self.top, self.ineye)
@property
def corner_top_r_out(self):
return Point(self.right, self.top, self.ineye)
@property
def corner_bot_r_out(self):
return Point(self.right, self.bottom, self.ineye)
@property
def corner_bot_l_out(self):
return Point(self.left, self.bottom, self.ineye)
def __lt__(self, other):
"""Compare cuboids by volume (used for sorting)"""
return self.volume() < other.volume()
def __eq__(self, other):
"""Equal cuboids have same properties."""
if not isinstance(other, self.__class__):
return False
return (self.width == other.width and
self.height == other.height and
self.depth == other.depth and
self.x == other.x and
self.y == other.y and
self.z == other.z)
def __hash__(self):
return hash(
(self.x, self.y, self.z, self.width, self.height, self.depth))
def __iter__(self):
"""Iterate through cuboid corners"""
yield self.corner_top_l
yield self.corner_top_r
yield self.corner_bot_r
yield self.corner_bot_l
yield self.corner_top_l_out
yield self.corner_top_r_out
yield self.corner_bot_r_out
yield self.corner_bot_l_out
def __repr__(self):
return "R({}, {}, {}, {}, {}, {})".format(
self.x, self.y, self.z, self.width, self.height, self.depth)
def volume(self):
"""Cuboid volume"""
return self.width * self.height * self.depth
def move(self, x, y, z):
"""Move Cuboid to x,y,z coordinates
Arguments:
x (int, float): X coordinate
y (int, float): Y coordinate
z (int, float): Z coordinate
"""
self.x = x
self.y = y
self.z = z
def contains(self, cub):
"""Tests if another cuboid is contained by this one
Arguments:
cub (Cuboid): The other cuboiud
Returns:
bool: True if it is inside this one, False otherwise
"""
return (cub.y >= self.y and
cub.x >= self.x and
cub.z >= self.z and
cub.y + cub.height <= self.y + self.height and
cub.x + cub.width <= self.x + self.width and
cub.z + cub.depth <= self.z + self.depth)
def intersects(self, cub, edges=False):
"""Detect intersections between this cuboid and cub.
Args:
cub (Cuboid): Cuboid to test for intersections.
edges (bool): Accept edge touching cuboids as intersects or not
Returns:
bool: True if the cuboids intersect, False otherwise
"""
# Not even touching
if (self.bottom > cub.top or
self.top < cub.bottom or
self.left > cub.right or
self.right < cub.left or
self.outeye > cub.ineye or
self.ineye < cub.outeye):
return False
# Discard edge intersects
if not edges:
if (self.bottom == cub.top or
self.top == cub.bottom or
self.left == cub.right or
self.right == cub.left or
self.outeye == cub.ineye or
self.ineye == cub.outeye):
return False
# Discard corner intersects
if (self.left == cub.right and self.bottom == cub.top and
self.outeye == cub.ineye or
self.left == cub.right and cub.bottom == self.top and
self.outeye == cub.ineye or
self.left == cub.right and self.bottom == cub.top and
cub.outeye == self.ineye or
self.left == cub.right and cub.bottom == self.top and
cub.outeye == self.ineye or
cub.left == self.right and self.bottom == cub.top and
self.outeye == cub.ineye or
cub.left == self.right and cub.bottom == self.top and
self.outeye == cub.ineye or
cub.left == self.right and self.bottom == cub.top and
cub.outeye == self.ineye or
cub.left == self.right and cub.bottom == self.top and
cub.outeye == self.ineye):
return False
return True
def intersection(self, cub, edges=False):
"""Returns the cuboid resulting of the intersection of this and cub
If the cuboids are only touching by their edges, and the
argument 'edges' is True the cuboid returned will have a volume of 0.
Returns None if there is no intersection.
Arguments:
cub (Cuboid): The other cuboid.
edges (bool): If true, touching edges are considered an
intersection, and a cuboid of 0 height or width or depth will be
returned
Returns:
Cuboid: Intersection.
None: There was no intersection.
"""
if not self.intersects(cub, edges=edges):
return None
bottom = max(self.bottom, cub.bottom)
left = max(self.left, cub.left)
top = min(self.top, cub.top)
right = min(self.right, cub.right)
outeye = max(self.outeye, cub.outeye)
ineye = min(self.ineye, cub.ineye)
return Cuboid(
left, bottom, outeye,
right - left, top - bottom, ineye - outeye)
def join(self, other):
"""Try to join a cuboid to this one.
If the result is also a cuboid and the operation is successful then
this cuboid is modified to the union.
Arguments:
other (Cuboid): Cuboid to join
Returns:
bool: True when successfully joined, False otherwise
"""
if self.contains(other):
return True
if other.contains(self):
self.x = other.x
self.y = other.y
self.z = other.z
self.width = other.width
self.height = other.height
self.depth = other.depth
return True
if not self.intersects(other, edges=True):
return False
# Other cuboid is Up/Down from this
if self.left == other.left and self.width == other.width and \
self.outeye == other.outeye and self.depth == self.depth:
y_min = min(self.bottom, other.bottom)
y_max = max(self.top, other.top)
self.y = y_min
self.height = y_max - y_min
return True
# Other cuboid is Right/Left from this
if self.bottom == other.bottom and self.height == other.height and \
self.outeye == other.outeye and self.depth == self.depth:
x_min = min(self.left, other.left)
x_max = max(self.right, other.right)
self.x = x_min
self.width = x_max - x_min
return True
# Other cuboid is Right/Left from this
if self.bottom == other.bottom and self.height == other.height and \
self.left == other.left and self.width == other.width:
z_min = min(self.outeye, other.outeye)
z_max = max(self.ineye, other.ineye)
self.z = z_min
self.depth = z_max - z_min
return True
return False
| 29.517007
| 85
| 0.556657
| 1,666
| 13,017
| 4.259304
| 0.103241
| 0.024662
| 0.019166
| 0.021421
| 0.495068
| 0.448985
| 0.402198
| 0.360203
| 0.281285
| 0.257046
| 0
| 0.001385
| 0.334563
| 13,017
| 440
| 86
| 29.584091
| 0.817825
| 0.217101
| 0
| 0.384
| 0
| 0
| 0.008383
| 0
| 0
| 0
| 0
| 0
| 0.02
| 1
| 0.192
| false
| 0
| 0.004
| 0.096
| 0.436
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ae84e0cfa142229ba7d5efbff2238d28b93f418
| 16,661
|
py
|
Python
|
app/recipe/tests/test_recipe_api.py
|
tahmadvand/recipe_app_api
|
40b4cc6960d7dc4858373b5f6ccca980ed0eeac8
|
[
"MIT"
] | null | null | null |
app/recipe/tests/test_recipe_api.py
|
tahmadvand/recipe_app_api
|
40b4cc6960d7dc4858373b5f6ccca980ed0eeac8
|
[
"MIT"
] | null | null | null |
app/recipe/tests/test_recipe_api.py
|
tahmadvand/recipe_app_api
|
40b4cc6960d7dc4858373b5f6ccca980ed0eeac8
|
[
"MIT"
] | null | null | null |
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
# use that for making our API requests
from core.models import Recipe, Tag, Ingredient
from ..serializers import RecipeSerializer, RecipeDetailSerializer
import tempfile
# allows you to call a function which will then create a temp file
# somewhere in the system and then you can remove that file after
# you've used it
import os
# this allows us to perform things like
# creating path names and also checking if files exist on the system
from PIL import Image
# pillow, this will import our image class which will let us then
# create test images which we can then upload to our API
RECIPES_URL = reverse('recipe:recipe-list')
# since we're going to need to access the URL in more
# or less all the tests let's assign that as a variable
# at top of the class in all capitals.
# app : identifier of the URL in the app
# /api/recipe/recipes
# /api/recipe/recipes/1/ (id) --> detail url
def image_upload_url(recipe_id):
"""Return URL for recipe image upload"""
return reverse('recipe:recipe-upload-image', args=[recipe_id])
# generate our upload image url
# you're going to need the existing recipe ID in order to upload an image
def detail_url(recipe_id):
"""Return recipe detail URL"""
return reverse('recipe:recipe-detail', args=[recipe_id])
# name of the end point that the default router will create
# for our viewset because we're going to have a detail action
# this is how you specify arguments with the reverse function
# you just pass in args and then you pass in a list of the
# arguments you want to add
# here we have single item
def sample_tag(user, name='Main course'):
"""Create and return a sample tag"""
return Tag.objects.create(user=user, name=name)
def sample_ingredient(user, name='Cinnamon'):
"""Create and return a sample ingredient"""
return Ingredient.objects.create(user=user, name=name)
def sample_recipe(user, **params):
"""Create and return a sample recipe"""
defaults = {
'title': 'Sample recipe',
'time_minutes': 10,
'price': 5.00,
}
defaults.update(params)
return Recipe.objects.create(user=user, **defaults)
# convert the dictionary into the argument
# when you use the two asterisks when calling a
# function it has the reverse effect.
class PublicRecipeApiTests(TestCase):
"""Test unauthenticated recipe API access"""
def setUp(self):
self.client = APIClient()
def test_required_auth(self):
"""Test the authenticaiton is required"""
res = self.client.get(RECIPES_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateRecipeApiTests(TestCase):
"""Test authenticated recipe API access"""
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'test@londonappdev.com',
'testpass'
)
self.client.force_authenticate(self.user)
def test_retrieve_recipes(self):
"""Test retrieving list of recipes"""
sample_recipe(user=self.user)
sample_recipe(user=self.user)
# we're going to access them by retrieving
# all of the recipes from our database.
res = self.client.get(RECIPES_URL)
recipes = Recipe.objects.all().order_by('-id')
serializer = RecipeSerializer(recipes, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_recipes_limited_to_user(self):
"""Test retrieving recipes for user"""
# test recipes are limited to the authenticated user.
user2 = get_user_model().objects.create_user(
'other@londonappdev.com',
'pass'
)
sample_recipe(user=user2)
sample_recipe(user=self.user)
res = self.client.get(RECIPES_URL)
# filter our recipes by the authenticated user
recipes = Recipe.objects.filter(user=self.user)
serializer = RecipeSerializer(recipes, many=True)
# many=true: this is because we were returning the list view
# or we wanted to simulate the list view in our serializer
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data, serializer.data)
def test_view_recipe_detail(self):
"""Test viewing a recipe detail"""
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
recipe.ingredients.add(sample_ingredient(user=self.user))
url = detail_url(recipe.id)
res = self.client.get(url)
serializer = RecipeDetailSerializer(recipe)
# in this case we just want to serialize a single object
self.assertEqual(res.data, serializer.data)
def test_create_basic_recipe(self):
"""Test creating recipe"""
payload = {
'title': 'Test recipe',
'time_minutes': 30,
'price': 10.00,
}
res = self.client.post(RECIPES_URL, payload)
# post this payload dictionary to our recipes URL.
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
# this is the standard HTTP response code for creating objects
# in an API.
recipe = Recipe.objects.get(id=res.data['id'])
# When you create an object using the Django rest framework the
# default behavior is that it will return a dictionary containing
# the created object This is how I know that if we do res.data and
# retrieve the id key this will get the id of the created object.
# Next what we're going to do is we're going to loop through each
# one of these keys and then we're going to check
# that is the correct value assigned to our recipe model.
for key in payload.keys():
self.assertEqual(payload[key], getattr(recipe, key))
# assertion for each one of these keys, check that it is
# equal to the same key in the recipe
# payload[key]: This will actually get the value of the
# key in our payload object
# getattr: that allows you to retrieve an attribute from
# an object by passing in a variable. (instead of recipe.key)
def test_create_recipe_with_tags(self):
"""Test creating a recipe with tags"""
tag1 = sample_tag(user=self.user, name='Tag 1')
tag2 = sample_tag(user=self.user, name='Tag 2')
payload = {
'title': 'Test recipe with two tags',
'tags': [tag1.id, tag2.id],
'time_minutes': 30,
'price': 10.00
}
res = self.client.post(RECIPES_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
# retrieve the created recipe
tags = recipe.tags.all()
# retrieve the tags that were created with the recipe
self.assertEqual(tags.count(), 2)
# because we expect two tags to be assigned.
self.assertIn(tag1, tags)
self.assertIn(tag2, tags)
# check if the tags that we created as our sample tags are
# the same as the tags that are in our queryset.
def test_create_recipe_with_ingredients(self):
"""Test creating recipe with ingredients"""
ingredient1 = sample_ingredient(user=self.user, name='Ingredient 1')
ingredient2 = sample_ingredient(user=self.user, name='Ingredient 2')
payload = {
'title': 'Test recipe with ingredients',
'ingredients': [ingredient1.id, ingredient2.id],
'time_minutes': 45,
'price': 15.00
}
res = self.client.post(RECIPES_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
ingredients = recipe.ingredients.all()
# get the ingredients queryset
self.assertEqual(ingredients.count(), 2)
self.assertIn(ingredient1, ingredients)
self.assertIn(ingredient2, ingredients)
# test partial update and full update of an object
# there are two ways in which you can update an object using the
# API there's two different HTTP methods: put, patch
# patch: Patch is used to update the fields that are provided
# in the payload so the only fields that it will change are the
# fields that are provided and any fields that are omitted from
# the request will not be modified in the object that's being updated.
def test_partial_update_recipe(self):
"""Test updating a recipe with patch"""
# make a request to change a field in our recipe.
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
# add a tag to the recipe
new_tag = sample_tag(user=self.user, name='Curry')
# add a new tag and what we're going to do is we're going
# to swap out this tag that we create here and we're going
# to replace it with a new tag
payload = {'title': 'Partially Updated sample recipe',
'tags': [new_tag.id]}
# tags will be replaced with this new tag so the existing tag that
# we created won't be assigned to it
url = detail_url(recipe.id)
# the way that you update an object using the Django rest framework
# view sets is you use the detail URL so that is the URL of the
# recipe with the ID of the recipe that we want to update.
self.client.patch(url, payload)
# make request
# We're going to retrieve an update to the recipe from the
# database and then we're going to check the fields that
# are assigned and just make sure they match what we expect.
recipe.refresh_from_db()
# refreshes the details in our recipe from the database
# typically when you create a new model and you have a
# reference to a model the details of that won't change
# unless you do refresh from dB if the values have changed
# in the database.
self.assertEqual(recipe.title, payload['title'])
tags = recipe.tags.all()
self.assertEqual(len(tags), 1)
self.assertIn(new_tag, tags)
# check that the tag new tag is in the tags that we retrieved
# test full update
# put: it will replace the object that we're updating with the full
# object that is provided in the request that means if you exclude
# any fields in the payload those fields will actually be removed
# from the object that you're updating
def test_full_update_recipe(self):
"""Test updating a recipe with put"""
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
payload = {
'title': 'Fully Updated sample recipe',
'time_minutes': 25,
'price': 5.00
}
url = detail_url(recipe.id)
self.client.put(url, payload)
recipe.refresh_from_db()
self.assertEqual(recipe.title, payload['title'])
self.assertEqual(recipe.time_minutes, payload['time_minutes'])
self.assertEqual(recipe.price, payload['price'])
tags = recipe.tags.all()
self.assertEqual(len(tags), 0)
# we will check that the tags assigned are zero now as I explained
# because when we do a HTTP put if we omit a field
# that should clear the value of that field so now our recipe
# that did have a sample tag assigned should not have any tags
# assigned
class RecipeImageUploadTests(TestCase):
# what happens at the setup of the test
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user('user', 'testpass')
self.client.force_authenticate(self.user)
# authenticate our user
self.recipe = sample_recipe(user=self.user)
# after the test runs it runs tear down
def tearDown(self):
self.recipe.image.delete()
# make sure that our file system is kept clean after our test
# removing all of the test files that we create
# delete the image if it exists in the recipe
def test_upload_image_to_recipe(self):
"""Test uploading an image to recipe"""
url = image_upload_url(self.recipe.id)
# going to use the sample recipe that gets created
# it creates a named temporary file on the system at a random
# location usually in the /temp folder
# create a temporary file we're going to write an image
# to that file and then we're going to upload that file
# through the API like you would with a HTTP POST and then
# we're going to run some assertions to check that it
# uploaded correctly
with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:
img = Image.new('RGB', (10, 10))
# creates black square
img.save(ntf, format='JPEG')
ntf.seek(0)
# it's the way that Python reads files so because we've
# saved the file it will be the seeking will be done to the
# end of the file so if you try to access it then it would
# just be blank because you've already read up to the end
# of the file so use this seek function to set
# the pointer back to the beginning of the file
res = self.client.post(url, {'image': ntf}, format='multipart')
# assertions
# refreshing the database for our recipe
self.recipe.refresh_from_db()
self.assertEqual(res.status_code, status.HTTP_200_OK)
# check that the images in the response so that's the path to
# the image that should be accessible
self.assertIn('image', res.data)
# check that the path exists for the image that is saved to our model
self.assertTrue(os.path.exists(self.recipe.image.path))
def test_upload_image_bad_request(self):
"""Test uploading an invalid image"""
url = image_upload_url(self.recipe.id)
res = self.client.post(url, {'image': 'notimage'}, format='multipart')
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_filter_recipes_by_tags(self):
"""Test returning recipes with specific tags"""
recipe1 = sample_recipe(user=self.user, title='Thai vegetable curry')
recipe2 = sample_recipe(user=self.user, title='Aubergine with tahini')
tag1 = sample_tag(user=self.user, name='Vegan')
tag2 = sample_tag(user=self.user, name='Vegetarian')
recipe1.tags.add(tag1)
recipe2.tags.add(tag2)
recipe3 = sample_recipe(user=self.user, title='Fish and chips')
res = self.client.get(
RECIPES_URL,
{'tags': '{},{}'.format(tag1.id, tag2.id)}
)
# this will create a comma separated list string and assign
# it to the tags get parameter
# if our filtering is working
# should only return the first two recipe
# test the response:
serializer1 = RecipeSerializer(recipe1)
serializer2 = RecipeSerializer(recipe2)
serializer3 = RecipeSerializer(recipe3)
# serialize the recipes and we're going to check if
# they exist in the responses returned
self.assertIn(serializer1.data, res.data)
self.assertIn(serializer2.data, res.data)
self.assertNotIn(serializer3.data, res.data)
# check the return result
def test_filter_recipes_by_ingredients(self):
"""Test returning recipes with specific ingredients"""
recipe1 = sample_recipe(user=self.user, title='Posh beans on toast')
recipe2 = sample_recipe(user=self.user, title='Chicken cacciatore')
ingredient1 = sample_ingredient(user=self.user, name='Feta cheese')
ingredient2 = sample_ingredient(user=self.user, name='Chicken')
recipe1.ingredients.add(ingredient1)
recipe2.ingredients.add(ingredient2)
recipe3 = sample_recipe(user=self.user, title='Steak and mushrooms')
# test API
res = self.client.get(
RECIPES_URL,
{'ingredients': '{},{}'.format(ingredient1.id, ingredient2.id)}
)
serializer1 = RecipeSerializer(recipe1)
serializer2 = RecipeSerializer(recipe2)
serializer3 = RecipeSerializer(recipe3)
self.assertIn(serializer1.data, res.data)
self.assertIn(serializer2.data, res.data)
self.assertNotIn(serializer3.data, res.data)
| 40.43932
| 78
| 0.667547
| 2,339
| 16,661
| 4.689183
| 0.174861
| 0.022611
| 0.02954
| 0.015044
| 0.343636
| 0.292943
| 0.244621
| 0.165664
| 0.131109
| 0.093363
| 0
| 0.00967
| 0.248965
| 16,661
| 411
| 79
| 40.537713
| 0.866858
| 0.398596
| 0
| 0.353234
| 0
| 0
| 0.073004
| 0.007045
| 0
| 0
| 0
| 0
| 0.169154
| 1
| 0.109453
| false
| 0.014925
| 0.049751
| 0
| 0.199005
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ae8c65cafc822a3267fba35c6ed220e7f320711
| 11,646
|
py
|
Python
|
gwcs/coordinate_frames.py
|
migueldvb/gwcs
|
4eb2abdb1d9d49ee10c1edbcae0d1cec4c758c39
|
[
"BSD-3-Clause"
] | null | null | null |
gwcs/coordinate_frames.py
|
migueldvb/gwcs
|
4eb2abdb1d9d49ee10c1edbcae0d1cec4c758c39
|
[
"BSD-3-Clause"
] | null | null | null |
gwcs/coordinate_frames.py
|
migueldvb/gwcs
|
4eb2abdb1d9d49ee10c1edbcae0d1cec4c758c39
|
[
"BSD-3-Clause"
] | null | null | null |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Defines coordinate frames and ties them to data axes.
"""
from __future__ import absolute_import, division, unicode_literals, print_function
import numpy as np
from astropy import units as u
from astropy import utils as astutil
from astropy import coordinates as coord
from astropy.extern import six
from . import utils as gwutils
__all__ = ['Frame2D', 'CelestialFrame', 'SpectralFrame', 'CompositeFrame',
'CoordinateFrame']
STANDARD_REFERENCE_FRAMES = [frame.upper() for frame in coord.builtin_frames.__all__]
STANDARD_REFERENCE_POSITION = ["GEOCENTER", "BARYCENTER", "HELIOCENTER",
"TOPOCENTER", "LSR", "LSRK", "LSRD",
"GALACTIC_CENTER", "LOCAL_GROUP_CENTER"]
class CoordinateFrame(object):
"""
Base class for CoordinateFrames.
Parameters
----------
naxes : int
Number of axes.
axes_type : str
One of ["SPATIAL", "SPECTRAL", "TIME"]
axes_order : tuple of int
A dimension in the input data that corresponds to this axis.
reference_frame : astropy.coordinates.builtin_frames
Reference frame (usually used with output_frame to convert to world coordinate objects).
reference_position : str
Reference position - one of `STANDARD_REFERENCE_POSITION`
unit : list of astropy.units.Unit
Unit for each axis.
axes_names : list
Names of the axes in this frame.
name : str
Name of this frame.
"""
def __init__(self, naxes, axes_type, axes_order, reference_frame=None,
reference_position=None, unit=None, axes_names=None,
name=None):
self._naxes = naxes
self._axes_order = tuple(axes_order)
if isinstance(axes_type, six.string_types):
self._axes_type = (axes_type,)
else:
self._axes_type = tuple(axes_type)
self._reference_frame = reference_frame
if unit is not None:
if astutil.isiterable(unit):
unit = tuple(unit)
else:
unit = (unit,)
if len(unit) != naxes:
raise ValueError("Number of units does not match number of axes.")
else:
self._unit = tuple([u.Unit(au) for au in unit])
if axes_names is not None:
if isinstance(axes_names, six.string_types):
axes_names = (axes_names,)
else:
axes_names = tuple(axes_names)
if len(axes_names) != naxes:
raise ValueError("Number of axes names does not match number of axes.")
else:
axes_names = tuple([""] * naxes)
self._axes_names = axes_names
if name is None:
self._name = self.__class__.__name__
else:
self._name = name
if reference_position is not None:
self._reference_position = reference_position
else:
self._reference_position = None
super(CoordinateFrame, self).__init__()
def __repr__(self):
fmt = '<{0}(name="{1}", unit={2}, axes_names={3}, axes_order={4}'.format(
self.__class__.__name__, self.name,
self.unit, self.axes_names, self.axes_order)
if self.reference_position is not None:
fmt += ', reference_position="{0}"'.format(self.reference_position)
if self.reference_frame is not None:
fmt += ", reference_frame={0}".format(self.reference_frame)
fmt += ")>"
return fmt
def __str__(self):
if self._name is not None:
return self._name
else:
return self.__class__.__name__
@property
def name(self):
""" A custom name of this frame."""
return self._name
@name.setter
def name(self, val):
""" A custom name of this frame."""
self._name = val
@property
def naxes(self):
""" The number of axes intheis frame."""
return self._naxes
@property
def unit(self):
"""The unit of this frame."""
return self._unit
@property
def axes_names(self):
""" Names of axes in the frame."""
return self._axes_names
@property
def axes_order(self):
""" A tuple of indices which map inputs to axes."""
return self._axes_order
@property
def reference_frame(self):
return self._reference_frame
@property
def reference_position(self):
try:
return self._reference_position
except AttributeError:
return None
def input_axes(self, start_frame=None):
"""
Computes which axes in `start_frame` contribute to each axis in the current frame.
Parameters
----------
start_frame : ~gwcs.coordinate_frames.CoordinateFrame
A frame in the WCS pipeline
The transform between start_frame and the current frame is used to compute the
mapping inputs: outputs.
"""
sep = self._separable(start_frame)
inputs = []
for ax in self.axes_order:
inputs.append(list(sep[ax].nonzero()[0]))
return inputs
@property
def axes_type(self):
""" Type of this frame : 'SPATIAL', 'SPECTRAL', 'TIME'. """
return self._axes_type
def coordinates(self, *args):
""" Create world coordinates object"""
raise NotImplementedError("Subclasses may implement this")
class CelestialFrame(CoordinateFrame):
"""
Celestial Frame Representation
Parameters
----------
axes_order : tuple of int
A dimension in the input data that corresponds to this axis.
reference_frame : astropy.coordinates.builtin_frames
A reference frame.
reference_position : str
Reference position.
unit : str or units.Unit instance or iterable of those
Units on axes.
axes_names : list
Names of the axes in this frame.
name : str
Name of this frame.
"""
def __init__(self, axes_order=None, reference_frame=None,
unit=None, axes_names=None,
name=None):
naxes = 2
if reference_frame is not None:
if reference_frame.name.upper() in STANDARD_REFERENCE_FRAMES:
_axes_names = list(reference_frame.representation_component_names.values())
if 'distance' in _axes_names:
_axes_names.remove('distance')
if axes_names is None:
axes_names = _axes_names
naxes = len(_axes_names)
_unit = list(reference_frame.representation_component_units.values())
if unit is None and _unit:
unit = _unit
if axes_order is None:
axes_order = tuple(range(naxes))
if unit is None:
unit = tuple([u.degree] * naxes)
axes_type = ['SPATIAL'] * naxes
super(CelestialFrame, self).__init__(naxes=naxes, axes_type=axes_type,
axes_order=axes_order,
reference_frame=reference_frame,
unit=unit,
axes_names=axes_names,
name=name)
def coordinates(self, *args):
"""
Create a SkyCoord object.
Parameters
----------
args : float
inputs to wcs.input_frame
"""
# Reorder axes if necesary.
try:
return coord.SkyCoord(*args, unit=self.unit, frame=self._reference_frame)
except:
raise
class SpectralFrame(CoordinateFrame):
"""
Represents Spectral Frame
Parameters
----------
axes_order : tuple or int
A dimension in the input data that corresponds to this axis.
reference_frame : astropy.coordinates.builtin_frames
Reference frame (usually used with output_frame to convert to world coordinate objects).
unit : str or units.Unit instance
Spectral unit.
axes_names : str
Spectral axis name.
name : str
Name for this frame.
"""
def __init__(self, axes_order=(0,), reference_frame=None, unit=None,
axes_names=None, name=None, reference_position=None):
super(SpectralFrame, self).__init__(naxes=1, axes_type="SPECTRAL", axes_order=axes_order,
axes_names=axes_names, reference_frame=reference_frame,
unit=unit, name=name,
reference_position=reference_position)
def coordinates(self, *args):
if np.isscalar(args):
return args * self.unit[0]
else:
return args[0] * self.unit[0]
class CompositeFrame(CoordinateFrame):
"""
Represents one or more frames.
Parameters
----------
frames : list
List of frames (TimeFrame, CelestialFrame, SpectralFrame, CoordinateFrame).
name : str
Name for this frame.
"""
def __init__(self, frames, name=None):
self._frames = frames[:]
naxes = sum([frame._naxes for frame in self._frames])
axes_type = list(range(naxes))
unit = list(range(naxes))
axes_names = list(range(naxes))
axes_order = []
for frame in frames:
axes_order.extend(frame.axes_order)
for frame in frames:
for ind, axtype, un, n in zip(frame.axes_order, frame.axes_type,
frame.unit, frame.axes_names):
axes_type[ind] = axtype
axes_names[ind] = n
unit[ind] = un
if len(np.unique(axes_order)) != len(axes_order):
raise ValueError("Incorrect numbering of axes, "
"axes_order should contain unique numbers, "
"got {}.".format(axes_order))
super(CompositeFrame, self).__init__(naxes, axes_type=axes_type,
axes_order=axes_order,
unit=unit, axes_names=axes_names,
name=name)
@property
def frames(self):
return self._frames
def __repr__(self):
return repr(self.frames)
def coordinates(self, *args):
coo = []
for frame in self.frames:
fargs = [args[i] for i in frame.axes_order]
print(frame, fargs, frame.axes_order)
coo.append(frame.coordinates(*fargs))
return coo
class Frame2D(CoordinateFrame):
"""
A 2D coordinate frame.
Parameters
----------
axes_order : tuple of int
A dimension in the input data that corresponds to this axis.
unit : list of astropy.units.Unit
Unit for each axis.
axes_names : list
Names of the axes in this frame.
name : str
Name of this frame.
"""
def __init__(self, axes_order=(0, 1), unit=(u.pix, u.pix), axes_names=('x', 'y'),
name=None):
super(Frame2D, self).__init__(2, ["SPATIAL", "SPATIAL"], axes_order, name=name,
axes_names=axes_names, unit=unit)
def coordinates(self, *args):
args = [args[i] for i in self.axes_order]
coo = tuple([arg * un for arg, un in zip(args, self.unit)])
return coo
| 32.713483
| 99
| 0.574618
| 1,319
| 11,646
| 4.855193
| 0.156937
| 0.05762
| 0.01827
| 0.022486
| 0.313086
| 0.232355
| 0.199563
| 0.190818
| 0.173329
| 0.150531
| 0
| 0.002846
| 0.336339
| 11,646
| 355
| 100
| 32.805634
| 0.825721
| 0.236133
| 0
| 0.206186
| 0
| 0
| 0.060014
| 0.002858
| 0
| 0
| 0
| 0
| 0
| 1
| 0.123711
| false
| 0
| 0.036082
| 0.015464
| 0.28866
| 0.010309
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0aeade2b44478bdc750fc6e4297d377345ef5136
| 500
|
py
|
Python
|
brownie_fund_me/scripts/fund_and_withdraw.py
|
WangCHEN9/solidity_demos
|
cf28111a1e972ab9dde70f6d3fac22c897d8b660
|
[
"MIT"
] | null | null | null |
brownie_fund_me/scripts/fund_and_withdraw.py
|
WangCHEN9/solidity_demos
|
cf28111a1e972ab9dde70f6d3fac22c897d8b660
|
[
"MIT"
] | null | null | null |
brownie_fund_me/scripts/fund_and_withdraw.py
|
WangCHEN9/solidity_demos
|
cf28111a1e972ab9dde70f6d3fac22c897d8b660
|
[
"MIT"
] | null | null | null |
from brownie import FundMe
from scripts.helpful_scripts import get_account
def fund():
fund_me = FundMe[-1]
account = get_account()
entrance_fee = fund_me.getEntranceFee()
print(f"entrance is {entrance_fee}")
print("funding..")
fund_me.fund({"from": account, "value": entrance_fee})
def withdraw():
fund_me = FundMe[-1]
account = get_account()
fund_me.withdraw({"from": account})
def main():
fund()
withdraw()
if __name__ == "__main__":
main()
| 18.518519
| 58
| 0.654
| 63
| 500
| 4.873016
| 0.380952
| 0.09772
| 0.078176
| 0.084691
| 0.19544
| 0.19544
| 0.19544
| 0
| 0
| 0
| 0
| 0.005025
| 0.204
| 500
| 26
| 59
| 19.230769
| 0.766332
| 0
| 0
| 0.222222
| 0
| 0
| 0.112
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.111111
| 0
| 0.277778
| 0.111111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0aeb5c0e9a64382d41d3447557ec9fb64a32a973
| 409
|
py
|
Python
|
ex019.py
|
jefernathan/Python
|
2f840a625e8d46d41ab36df07ef50ae15a03c5ab
|
[
"MIT"
] | null | null | null |
ex019.py
|
jefernathan/Python
|
2f840a625e8d46d41ab36df07ef50ae15a03c5ab
|
[
"MIT"
] | null | null | null |
ex019.py
|
jefernathan/Python
|
2f840a625e8d46d41ab36df07ef50ae15a03c5ab
|
[
"MIT"
] | null | null | null |
# Um professor quer sortear um dos seus quatro alunos para apagar o quadro. Faça um programa que ajude ele, lendo o nome dos alunos e escrevendo na tela o nome do escolhido.
from random import choice
nome1 = input('Digite um nome: ')
nome2 = input('Digite outro nome: ')
nome3 = input('Digite mais um nome: ')
nome4 = input('Digite o último nome: ')
nome = [nome1, nome2, nome3, nome4]
print(choice(nome))
| 34.083333
| 173
| 0.728606
| 66
| 409
| 4.515152
| 0.590909
| 0.147651
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.02381
| 0.178484
| 409
| 11
| 174
| 37.181818
| 0.863095
| 0.418093
| 0
| 0
| 0
| 0
| 0.330508
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 0.142857
| 0.142857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0aeb7979679122962a3fff866f48391b6b9c9278
| 489
|
py
|
Python
|
contacts/admin.py
|
liviamendes/agenda-django-project
|
d602bb5e762ea477c3c97b5a475ad79036c0c93d
|
[
"MIT"
] | null | null | null |
contacts/admin.py
|
liviamendes/agenda-django-project
|
d602bb5e762ea477c3c97b5a475ad79036c0c93d
|
[
"MIT"
] | null | null | null |
contacts/admin.py
|
liviamendes/agenda-django-project
|
d602bb5e762ea477c3c97b5a475ad79036c0c93d
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Categoria, Contact
class ContactAdmin(admin.ModelAdmin):
list_display = ('id', 'name', 'last_name', 'phone', 'email', 'creation_date', 'categoria', 'show')
list_display_links = ('id', 'name', 'last_name')
list_filter = ('categoria',)
list_per_page = 10
search_fields = ('name', 'last_name', 'phone')
list_editable = ('phone', 'show')
admin.site.register(Categoria)
admin.site.register(Contact, ContactAdmin)
| 30.5625
| 102
| 0.691207
| 59
| 489
| 5.525424
| 0.525424
| 0.07362
| 0.110429
| 0.08589
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004796
| 0.147239
| 489
| 15
| 103
| 32.6
| 0.776978
| 0
| 0
| 0
| 0
| 0
| 0.208589
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.181818
| 0
| 0.818182
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0aec7fad0f474867079a857e5fa0aa0966e20a00
| 2,472
|
py
|
Python
|
upload_from_folder.py
|
robinrobinzon/fastpic
|
966f1aa8c6d7e98651727e7ed7f6b25970d5da11
|
[
"MIT"
] | null | null | null |
upload_from_folder.py
|
robinrobinzon/fastpic
|
966f1aa8c6d7e98651727e7ed7f6b25970d5da11
|
[
"MIT"
] | null | null | null |
upload_from_folder.py
|
robinrobinzon/fastpic
|
966f1aa8c6d7e98651727e7ed7f6b25970d5da11
|
[
"MIT"
] | null | null | null |
import datetime
import os
import shutil
import tempfile
from joblib import Parallel, delayed
from fastpic_upload import upload_file_to_fastpic
_n_jobs_for_upload = 20
_root_folders_set = (
'/path/to/folder',
)
_spoiler_for_each_file = True
def process_one_pic(result_key, pic_path, tmp_dir):
pic_url, pic_link = upload_file_to_fastpic(pic_path, tmp_dir)
print(pic_url)
return result_key, (pic_url, pic_link)
def upload_from_folder(folder_path):
pics_to_upload = {}
for root, dirs, files in os.walk(folder_path):
for file in files:
if file.split('.')[-1] not in ('jpg', 'jpeg', 'bmp', 'png'):
continue
file_path = os.path.join(root, file)
pics_to_upload[file] = file_path
print(pics_to_upload)
print('Need upload {} photo'.format(len(pics_to_upload)))
result = {}
tmp_dir = tempfile.mkdtemp()
try:
sub_results = Parallel(n_jobs=_n_jobs_for_upload, backend='threading')(
delayed(process_one_pic)(key, pics_to_upload[key], tmp_dir) for key in sorted(pics_to_upload))
for sub_result in sub_results:
result[sub_result[0]] = sub_result[1]
finally:
shutil.rmtree(tmp_dir)
return result
def print_result_to_file(result, result_file_path):
with open(result_file_path, 'w', encoding='utf8', newline='') as codes_file:
codes_file.write('[spoiler="Скриншоты"]')
codes_file.write(os.linesep)
codes_file.write(os.linesep)
for result_key in sorted(result):
if _spoiler_for_each_file:
codes_file.write('[spoiler="{}"]'.format(result_key))
codes_file.write(os.linesep)
url, link = result[result_key]
codes_file.write('[url={}][img]{}[/img][/url]'.format(link, url))
if _spoiler_for_each_file:
codes_file.write(os.linesep)
codes_file.write('[/spoiler]')
codes_file.write(os.linesep)
codes_file.write(os.linesep)
codes_file.write('[/spoiler]')
def main():
for root_folder in _root_folders_set:
result = upload_from_folder(root_folder)
print_result_to_file(result, os.path.join(root_folder, 'result_codes.txt'))
if __name__ == '__main__':
started = datetime.datetime.now()
print(started, 'started')
main()
finished = datetime.datetime.now()
print(finished, 'all done in', finished - started)
| 29.783133
| 106
| 0.651294
| 335
| 2,472
| 4.465672
| 0.256716
| 0.072193
| 0.102941
| 0.064171
| 0.229947
| 0.141043
| 0.141043
| 0.141043
| 0.104947
| 0.061497
| 0
| 0.00316
| 0.231796
| 2,472
| 82
| 107
| 30.146341
| 0.784623
| 0
| 0
| 0.16129
| 0
| 0
| 0.075709
| 0.019433
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064516
| false
| 0
| 0.096774
| 0
| 0.193548
| 0.112903
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0aecc3617c0fed4d5c58d568836e4b90d9b9886f
| 1,994
|
py
|
Python
|
tools/accuracy_checker/openvino/tools/accuracy_checker/postprocessor/clip_segmentation_mask.py
|
TolyaTalamanov/open_model_zoo
|
1697e60712df4ca72635a2080a197b9d3bc24129
|
[
"Apache-2.0"
] | 2,201
|
2018-10-15T14:37:19.000Z
|
2020-07-16T02:05:51.000Z
|
tools/accuracy_checker/openvino/tools/accuracy_checker/postprocessor/clip_segmentation_mask.py
|
Pandinosaurus/open_model_zoo
|
2543996541346418919c5cddfb71e33e2cdef080
|
[
"Apache-2.0"
] | 759
|
2018-10-18T07:43:55.000Z
|
2020-07-16T01:23:12.000Z
|
tools/accuracy_checker/openvino/tools/accuracy_checker/postprocessor/clip_segmentation_mask.py
|
Pandinosaurus/open_model_zoo
|
2543996541346418919c5cddfb71e33e2cdef080
|
[
"Apache-2.0"
] | 808
|
2018-10-16T14:03:49.000Z
|
2020-07-15T11:41:45.000Z
|
"""
Copyright (c) 2018-2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
from .postprocessor import PostprocessorWithSpecificTargets
from ..representation import BrainTumorSegmentationAnnotation, BrainTumorSegmentationPrediction
from ..config import NumberField, ConfigError
class ClipSegmentationMask(PostprocessorWithSpecificTargets):
__provider__ = 'clip_segmentation_mask'
annotation_types = (BrainTumorSegmentationAnnotation, )
prediction_types = (BrainTumorSegmentationPrediction, )
@classmethod
def parameters(cls):
parameters = super().parameters()
parameters.update({
'min_value': NumberField(value_type=int, min_value=0, optional=True, default=0, description="Min value"),
'max_value': NumberField(value_type=int, description="Max value")
})
return parameters
def configure(self):
self.min_value = self.get_value_from_config('min_value')
self.max_value = self.get_value_from_config('max_value')
if self.max_value < self.min_value:
raise ConfigError('max_value should be greater than min_value')
def process_image(self, annotation, prediction):
for target in annotation:
target.mask = np.clip(target.mask, a_min=self.min_value, a_max=self.max_value)
for target in prediction:
target.mask = np.clip(target.mask, a_min=self.min_value, a_max=self.max_value)
return annotation, prediction
| 38.346154
| 117
| 0.739218
| 248
| 1,994
| 5.798387
| 0.459677
| 0.05007
| 0.03338
| 0.022253
| 0.157163
| 0.11822
| 0.080668
| 0.080668
| 0.080668
| 0.080668
| 0
| 0.008605
| 0.184052
| 1,994
| 51
| 118
| 39.098039
| 0.87523
| 0.284855
| 0
| 0.074074
| 0
| 0
| 0.083216
| 0.015515
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.148148
| 0
| 0.481481
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0aee1a078e80effb05eed8b8321db099a4b35623
| 1,925
|
py
|
Python
|
tests/test_utils.py
|
isabella232/pynacl
|
b3f6c320569d858ba61d4bdf2ac788564528c1c9
|
[
"Apache-2.0"
] | 756
|
2015-01-03T17:49:44.000Z
|
2022-03-31T13:54:33.000Z
|
tests/test_utils.py
|
isabella232/pynacl
|
b3f6c320569d858ba61d4bdf2ac788564528c1c9
|
[
"Apache-2.0"
] | 540
|
2015-01-02T10:54:33.000Z
|
2022-03-05T18:47:01.000Z
|
tests/test_utils.py
|
isabella232/pynacl
|
b3f6c320569d858ba61d4bdf2ac788564528c1c9
|
[
"Apache-2.0"
] | 217
|
2015-01-09T00:48:01.000Z
|
2022-03-26T08:53:32.000Z
|
# Copyright 2013 Donald Stufft and individual contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import nacl.secret
import nacl.utils
def test_random_bytes_produces():
assert len(nacl.utils.random(16)) == 16
def test_random_bytes_produces_different_bytes():
assert nacl.utils.random(16) != nacl.utils.random(16)
def test_string_fixer():
assert str(nacl.secret.SecretBox(b"\x00" * 32)) == str(b"\x00" * 32)
def test_deterministic_random_bytes():
expected = (
b"0d8e6cc68715648926732e7ea73250cfaf2d58422083904c841a8ba"
b"33b986111f346ba50723a68ae283524a6bded09f83be6b80595856f"
b"72e25b86918e8b114bafb94bc8abedd73daab454576b7c5833eb0bf"
b"982a1bb4587a5c970ff0810ca3b791d7e12"
)
seed = (
b"\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d"
b"\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b"
b"\x1c\x1d\x1e\x1f"
)
assert (
nacl.utils.randombytes_deterministic(
100, seed, encoder=nacl.utils.encoding.HexEncoder
)
== expected
)
def test_deterministic_random_bytes_invalid_seed_length():
expected = "Deterministic random bytes must be generated from 32 bytes"
seed = b"\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a"
with pytest.raises(TypeError) as e:
nacl.utils.randombytes_deterministic(100, seed)
assert expected in str(e.value)
| 32.083333
| 75
| 0.725195
| 257
| 1,925
| 5.350195
| 0.540856
| 0.045818
| 0.032727
| 0.037091
| 0.196364
| 0.113455
| 0.055273
| 0.055273
| 0.055273
| 0.055273
| 0
| 0.14864
| 0.178701
| 1,925
| 59
| 76
| 32.627119
| 0.721063
| 0.298182
| 0
| 0
| 0
| 0.090909
| 0.327599
| 0.266268
| 0
| 0
| 0
| 0
| 0.151515
| 1
| 0.151515
| false
| 0
| 0.090909
| 0
| 0.242424
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0aefad001e36b9eae9b3eb392972175239563b8d
| 2,893
|
py
|
Python
|
guesstheword.py
|
Cha0sNation/RandomPython
|
7ba41d78f27bd90e9c09efcd4d5c26eac93e74ec
|
[
"MIT"
] | null | null | null |
guesstheword.py
|
Cha0sNation/RandomPython
|
7ba41d78f27bd90e9c09efcd4d5c26eac93e74ec
|
[
"MIT"
] | null | null | null |
guesstheword.py
|
Cha0sNation/RandomPython
|
7ba41d78f27bd90e9c09efcd4d5c26eac93e74ec
|
[
"MIT"
] | null | null | null |
#! /home/cha0snation/anaconda3/bin/python
import random
def setup():
words = ["banana", "apple", "orange", "peach", "grape", "watermelon"]
output = []
word = words[random.randint(0, len(words) - 1)]
playing = True
tries = 5
return [words, output, word, tries, playing]
def check_finished(output, tries):
if tries == 0:
print("You ran out of tries")
print()
return True
count = 0
for letter in output:
if letter != "_":
count += 1
if count == len(output):
print_output(output)
print()
print()
return True
return False
def check_letter(letter, word, tries):
correct = False
for index, letter in enumerate(word):
if letter == guess:
output[index] = guess
correct = True
if index == len(word) - 1:
if not correct:
print("Incorrect guess")
print()
return tries - 1
else:
return tries
def check_same(guess, output):
same = False
for i in output:
if i == guess:
same = True
if same:
print("You already found that letter")
print()
print_output(output)
print()
print()
while True:
guess = str(input("Guess: "))
if len(guess) == 1:
break
return guess
else:
return guess
def print_output(output):
for i in output:
print("{0} ".format(i), end="")
if __name__ == "__main__":
words, output, word, tries, playing = setup()
while playing:
print("Try to guess the word:")
if tries == 1:
print("You have {0} try left.".format(tries))
else:
print("You have {0} tries left.".format(tries))
# print("DEBUG: word is {0}".format(word))
if output == []:
for i in word:
output.append("_")
for i in range(len(output)):
print("_ ", end="")
else:
print_output(output)
print()
print()
try:
while True:
guess = str(input("Guess: "))
if len(guess) == 1:
break
except (EOFError, KeyboardInterrupt):
print()
break
except ValueError:
print("Invalid guess")
break
print()
guess = check_same(guess, output)
tries = check_letter(guess, word, tries)
if check_finished(output, tries):
choice = input("Do you want to play again ? (y or n): ")
print()
if choice.lower().startswith("y"):
words, output, word, tries, playing = setup()
else:
playing = False
| 24.726496
| 73
| 0.483927
| 306
| 2,893
| 4.506536
| 0.27451
| 0.047861
| 0.049311
| 0.04351
| 0.187092
| 0.108774
| 0.062364
| 0.062364
| 0.062364
| 0.062364
| 0
| 0.009855
| 0.403733
| 2,893
| 116
| 74
| 24.939655
| 0.789565
| 0.027999
| 0
| 0.4
| 0
| 0
| 0.089324
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.010526
| 0
| 0.147368
| 0.263158
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0af0f43e75ad092a7a05698be61aa6dca9c4178e
| 2,131
|
py
|
Python
|
web_app/index.py
|
svakulenk0/ArtDATIS
|
29e646f7bcb931e733ee248cc973411ffb18be64
|
[
"MIT"
] | null | null | null |
web_app/index.py
|
svakulenk0/ArtDATIS
|
29e646f7bcb931e733ee248cc973411ffb18be64
|
[
"MIT"
] | 9
|
2020-03-24T17:57:03.000Z
|
2022-03-12T00:08:07.000Z
|
web_app/index.py
|
svakulenk0/ArtDATIS
|
29e646f7bcb931e733ee248cc973411ffb18be64
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on Dec 8, 2019
.. codeauthor: svitlana vakulenko
<svitlana.vakulenko@gmail.com>
Index docs into ES
https://qbox.io/blog/building-an-elasticsearch-index-with-python
'''
from settings import *
import glob
import re
# n first characters for the doc preview
LIMIT_START = 100
txts_path = '%s/artdatis/tagging/OCRed/typed/' % DATA_PATH
text_corpus = []
def corpus_iterator():
# filter out and collect text files
for file_path in glob.glob(txts_path+'*_text.txt'):
with open(file_path, encoding="utf-8") as file:
text = file.read()
# filter duplicates
if text not in text_corpus:
text_corpus.append(text)
text = re.sub(' +', ' ', text)
start_text = text.lstrip()[:LIMIT_START]
with open(file_path.split('_text.txt')[0]+'_path.txt') as path_file:
path = path_file.read().strip().replace(DATA_PATH, '/images')
yield {
"_index": INDEX_NAME,
"_type": TYPE_NAME,
"_source": {"file_path": path, "text": text, "start_text": start_text},
}
print("Loaded %d documents"%len(text_corpus))
from elasticsearch import Elasticsearch
from elasticsearch.helpers import bulk
# create ES client, create index
es = Elasticsearch(hosts = [ES_HOST])
if es.indices.exists(INDEX_NAME):
print("deleting '%s' index..." % (INDEX_NAME))
res = es.indices.delete(index = INDEX_NAME)
print(" response: '%s'" % (res))
request_body = {
"settings" : {
"number_of_shards": 1,
"number_of_replicas": 0
}
}
print("creating '%s' index..." % (INDEX_NAME))
res = es.indices.create(index = INDEX_NAME, body = request_body)
print(" response: '%s'" % (res))
# bulk index the data
print("bulk indexing...")
bulk(es, corpus_iterator())
# sanity check
res = es.search(index = INDEX_NAME, size=2, body={"query": {"match_all": {}}})
print("results:")
for hit in res['hits']['hits']:
print(hit["_source"])
| 30.014085
| 99
| 0.603003
| 269
| 2,131
| 4.613383
| 0.442379
| 0.050766
| 0.067687
| 0.025786
| 0.043513
| 0.043513
| 0.043513
| 0
| 0
| 0
| 0
| 0.008755
| 0.249648
| 2,131
| 70
| 100
| 30.442857
| 0.767355
| 0.175504
| 0
| 0.045455
| 0
| 0
| 0.174512
| 0.01837
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022727
| false
| 0
| 0.113636
| 0
| 0.136364
| 0.181818
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0af106828dec53475f13db7b60f12e654896ac46
| 277
|
py
|
Python
|
src/tokens.py
|
PythonIsMagic/ponyup
|
3b2630d573cd46d0569f713c6d4c3790688dc62d
|
[
"MIT"
] | 1
|
2022-03-22T12:41:35.000Z
|
2022-03-22T12:41:35.000Z
|
src/tokens.py
|
PythonIsMagic/ponyup
|
3b2630d573cd46d0569f713c6d4c3790688dc62d
|
[
"MIT"
] | null | null | null |
src/tokens.py
|
PythonIsMagic/ponyup
|
3b2630d573cd46d0569f713c6d4c3790688dc62d
|
[
"MIT"
] | 1
|
2022-03-22T12:41:37.000Z
|
2022-03-22T12:41:37.000Z
|
"""
A Token is a button or other object on the table that represents a position, a game state, layer state, or some other piece of info
"""
class Token(object):
def __init__(self, name, table):
self.table = table
self.name = name
self.seat = None
| 25.181818
| 131
| 0.65343
| 43
| 277
| 4.116279
| 0.627907
| 0.090395
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.263538
| 277
| 10
| 132
| 27.7
| 0.867647
| 0.472924
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0af1366c588c694d1d5fccc2c589b64a4b89883f
| 1,089
|
py
|
Python
|
Chapter09/interpolation_search.py
|
Xiangs18/Algorithms-with-Python-Second-Edition
|
96844e1ae7054e099772dc691c1f41f15c2bfba5
|
[
"MIT"
] | null | null | null |
Chapter09/interpolation_search.py
|
Xiangs18/Algorithms-with-Python-Second-Edition
|
96844e1ae7054e099772dc691c1f41f15c2bfba5
|
[
"MIT"
] | null | null | null |
Chapter09/interpolation_search.py
|
Xiangs18/Algorithms-with-Python-Second-Edition
|
96844e1ae7054e099772dc691c1f41f15c2bfba5
|
[
"MIT"
] | null | null | null |
def nearest_mid(input_list, lower_bound_index, upper_bound_index, search_value):
return lower_bound_index + (
(upper_bound_index - lower_bound_index)
// (input_list[upper_bound_index] - input_list[lower_bound_index])
) * (search_value - input_list[lower_bound_index])
def interpolation_search(ordered_list, term):
size_of_list = len(ordered_list) - 1
index_of_first_element = 0
index_of_last_element = size_of_list
while index_of_first_element <= index_of_last_element:
mid_point = nearest_mid(
ordered_list, index_of_first_element, index_of_last_element, term
)
if mid_point > index_of_last_element or mid_point < index_of_first_element:
return None
if ordered_list[mid_point] == term:
return mid_point
if term > ordered_list[mid_point]:
index_of_first_element = mid_point + 1
else:
index_of_last_element = mid_point - 1
store = [2, 4, 5, 12, 43, 54, 60, 77]
a = interpolation_search(store, 2)
print("Index position of value 2 is ", a)
| 37.551724
| 83
| 0.693297
| 157
| 1,089
| 4.343949
| 0.267516
| 0.102639
| 0.109971
| 0.139296
| 0.409091
| 0.325513
| 0.108504
| 0.108504
| 0
| 0
| 0
| 0.0227
| 0.231405
| 1,089
| 28
| 84
| 38.892857
| 0.792115
| 0
| 0
| 0
| 0
| 0
| 0.02663
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0
| 0.041667
| 0.208333
| 0.041667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0af230c3ec87bec2b40fe4cc74ba6765304b22f0
| 13,752
|
py
|
Python
|
src/macro_pack.py
|
lulinsheng/macro_pack
|
4e9d0178354bad2aa557298f44ba5d4385a72a2b
|
[
"Apache-2.0"
] | null | null | null |
src/macro_pack.py
|
lulinsheng/macro_pack
|
4e9d0178354bad2aa557298f44ba5d4385a72a2b
|
[
"Apache-2.0"
] | null | null | null |
src/macro_pack.py
|
lulinsheng/macro_pack
|
4e9d0178354bad2aa557298f44ba5d4385a72a2b
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
# encoding: utf-8
import os
import sys
import getopt
import logging
import shutil
import psutil
from modules.com_run import ComGenerator
from modules.web_server import ListenServer
from modules.Wlisten_server import WListenServer
from modules.payload_builder_factory import PayloadBuilderFactory
from common import utils, mp_session, help
from common.utils import MSTypes
from common.definitions import VERSION, LOGLEVEL
if sys.platform == "win32":
try:
import win32com.client #@UnresolvedImport @UnusedImport
except:
print("Error: Could not find win32com.")
sys.exit(1)
MP_TYPE="Pro"
if utils.checkModuleExist("pro_core"):
from pro_modules.utilities.dcom_run import DcomGenerator
from pro_modules.payload_builders.containers import ContainerGenerator
from pro_core.payload_builder_factory_pro import PayloadBuilderFactoryPro
from pro_core import arg_mgt_pro, mp_session_pro
else:
MP_TYPE="Community"
from colorama import init
from termcolor import colored
# {PyArmor Protection Code}
# {PyArmor Plugins}
# use Colorama to make Termcolor work on Windows too
init()
WORKING_DIR = "temp"
BANNER = help.getToolPres()
def main(argv):
global MP_TYPE
logLevel = LOGLEVEL
# initialize macro_pack session object
working_directory = os.path.join(os.getcwd(), WORKING_DIR)
if MP_TYPE == "Pro":
mpSession = mp_session_pro.MpSessionPro(working_directory, VERSION, MP_TYPE)
else:
mpSession = mp_session.MpSession(working_directory, VERSION, MP_TYPE)
try:
longOptions = ["embed=", "listen=", "port=", "webdav-listen=", "generate=", "quiet", "input-file=", "encode",
"obfuscate", "obfuscate-form", "obfuscate-names", "obfuscate-declares", "obfuscate-strings",
"obfuscate-names-charset=", "obfuscate-names-minlen=", "obfuscate-names-maxlen=",
"file=","template=","listtemplates","listformats","icon=", "start-function=","uac-bypass",
"unicode-rtlo=", "dde", "print", "force-yes", "help"]
shortOptions= "e:l:w:s:f:t:G:hqmop"
# only for Pro release
if MP_TYPE == "Pro":
longOptions.extend(arg_mgt_pro.proArgsLongOptions)
shortOptions += arg_mgt_pro.proArgsShortOptions
# Only enabled on windows
if sys.platform == "win32":
longOptions.extend(["run=", "run-visible"])
opts, args = getopt.getopt(argv, shortOptions, longOptions) # @UnusedVariable
except getopt.GetoptError:
help.printUsage(BANNER, sys.argv[0])
sys.exit(2)
for opt, arg in opts:
if opt in ("-o", "--obfuscate"):
mpSession.obfuscateForm = True
mpSession.obfuscateNames = True
mpSession.obfuscateStrings = True
mpSession.obfuscateDeclares = True
elif opt=="--obfuscate-form":
mpSession.obfuscateForm = True
elif opt=="--obfuscate-declares":
mpSession.obfuscateDeclares = True
elif opt=="--obfuscate-names":
mpSession.obfuscateNames = True
elif opt=="--obfuscate-names-charset":
try:
mpSession.obfuscatedNamesCharset = arg
except ValueError:
help.printUsage(BANNER, sys.argv[0])
sys.exit(0)
elif opt=="--obfuscate-names-minlen":
try:
mpSession.obfuscatedNamesMinLen = int(arg)
except ValueError:
help.printUsage(BANNER, sys.argv[0])
sys.exit(0)
if mpSession.obfuscatedNamesMinLen < 4 or mpSession.obfuscatedNamesMinLen > 255:
help.printUsage(BANNER, sys.argv[0])
sys.exit(0)
elif opt=="--obfuscate-names-maxlen":
try:
mpSession.obfuscatedNamesMaxLen = int(arg)
except ValueError:
help.printUsage(BANNER, sys.argv[0])
sys.exit(0)
if mpSession.obfuscatedNamesMaxLen < 4 or mpSession.obfuscatedNamesMaxLen > 255:
help.printUsage(BANNER, sys.argv[0])
sys.exit(0)
elif opt=="--obfuscate-strings":
mpSession.obfuscateStrings = True
elif opt=="-s" or opt=="--start-function":
mpSession.startFunction = arg
elif opt=="-l" or opt=="--listen":
mpSession.listen = True
mpSession.listenRoot = os.path.abspath(arg)
elif opt=="--port":
mpSession.listenPort = int(arg)
mpSession.WlistenPort = int(arg)
elif opt=="--icon":
mpSession.icon = arg
elif opt=="-w" or opt=="--webdav-listen":
mpSession.Wlisten = True
mpSession.WRoot = os.path.abspath(arg)
elif opt == "-f" or opt== "--input-file":
mpSession.fileInput = arg
elif opt == "-e" or opt== "--embed":
mpSession.embeddedFilePath = os.path.abspath(arg)
elif opt=="-t" or opt=="--template":
mpSession.template = arg
elif opt == "--listtemplates":
help.printTemplatesUsage(BANNER, sys.argv[0])
sys.exit(0)
elif opt=="-q" or opt=="--quiet":
logLevel = "WARN"
elif opt=="-p" or opt=="--print":
mpSession.printFile = True
elif opt == "--dde":
if sys.platform == "win32":
mpSession.ddeMode = True
elif opt == "--run":
if sys.platform == "win32":
mpSession.runTarget = os.path.abspath(arg)
elif opt == "--run-visible":
if sys.platform == "win32":
mpSession.runVisible = True
elif opt == "--force-yes":
mpSession.forceYes = True
elif opt=="--uac-bypass":
mpSession.uacBypass = True
elif opt == "--unicode-rtlo":
mpSession.unicodeRtlo = arg
elif opt in ("-G", "--generate"):
mpSession.outputFilePath = os.path.abspath(arg)
elif opt == "--listformats":
help.printAvailableFormats(BANNER)
sys.exit(0)
elif opt=="-h" or opt=="--help":
help.printUsage(BANNER, sys.argv[0])
sys.exit(0)
else:
if MP_TYPE == "Pro":
arg_mgt_pro.processProArg(opt, arg, mpSession, BANNER)
else:
help.printUsage(BANNER, sys.argv[0])
sys.exit(0)
if logLevel == "INFO":
os.system('cls' if os.name == 'nt' else 'clear')
# Logging
logging.basicConfig(level=getattr(logging, logLevel),format="%(message)s", handlers=[utils.ColorLogFiler()])
logging.info(colored(BANNER, 'green'))
logging.info(" [+] Preparations...")
# check input args
if mpSession.fileInput is None:
# Argument not supplied, try to get file content from stdin
if not os.isatty(0): # check if something is being piped
logging.info(" [-] Waiting for piped input feed...")
mpSession.stdinContent = sys.stdin.readlines()
# Close Stdin pipe, so we can call input() later without triggering EOF
#sys.stdin.close()
if sys.platform == "win32":
sys.stdin = open("conIN$")
else:
sys.stdin = sys.__stdin__
else:
if not os.path.isfile(mpSession.fileInput):
logging.error(" [!] ERROR: Could not find %s!" % mpSession.fileInput)
sys.exit(2)
else:
logging.info(" [-] Input file path: %s" % mpSession.fileInput)
if MP_TYPE == "Pro":
if mpSession.communityMode:
logging.warning(" [!] Running in community mode (pro features not applied)")
MP_TYPE="Community"
else:
arg_mgt_pro.verify(mpSession)
# Check output file format
if mpSession.outputFilePath:
if not os.path.isdir(os.path.dirname(mpSession.outputFilePath)):
logging.error(" [!] Could not find output folder %s." % os.path.dirname(mpSession.outputFilePath))
sys.exit(2)
if mpSession.outputFileType == MSTypes.UNKNOWN:
logging.error(" [!] %s is not a supported extension. Use --listformats to view supported MacroPack formats." % os.path.splitext(mpSession.outputFilePath)[1])
sys.exit(2)
else:
logging.info(" [-] Target output format: %s" % mpSession.outputFileType)
elif not mpSession.listen and not mpSession.Wlisten and mpSession.runTarget is None and (MP_TYPE != "Pro" or mpSession.dcomTarget is None):
logging.error(" [!] You need to provide an output file! (get help using %s -h)" % os.path.basename(utils.getRunningApp()))
sys.exit(2)
if not mpSession.isTrojanMode:
# verify that output file does not already exist
if os.path.isfile(mpSession.outputFilePath):
logging.error(" [!] ERROR: Output file %s already exist!" % mpSession.outputFilePath)
sys.exit(2)
#Create temporary folder
logging.info(" [-] Temporary working dir: %s" % working_directory)
if not os.path.exists(working_directory):
os.makedirs(working_directory)
try:
# Create temporary work file.
if mpSession.ddeMode or mpSession.template or (mpSession.outputFileType not in MSTypes.VB_FORMATS+[MSTypes.VBA] and not mpSession.htaMacro):
inputFile = os.path.join(working_directory, "command.cmd")
else:
inputFile = os.path.join(working_directory, utils.randomAlpha(9)) + ".vba"
if mpSession.stdinContent is not None:
import time
time.sleep(0.4) # Needed to avoid some weird race condition
logging.info(" [-] Store std input in file...")
f = open(inputFile, 'w')
f.writelines(mpSession.stdinContent)
f.close()
else:
# Create temporary work file
if mpSession.fileInput is not None:
# Check there are not binary chars in input fil
if utils.isBinaryString(open(mpSession.fileInput, 'rb').read(1024)):
logging.error(" [!] ERROR: Invalid format for %s. Input should be text format containing your VBA script." % mpSession.fileInput)
logging.info(" [+] Cleaning...")
if os.path.isdir(working_directory):
shutil.rmtree(working_directory)
sys.exit(2)
logging.info(" [-] Store input file...")
shutil.copy2(mpSession.fileInput, inputFile)
if os.path.isfile(inputFile):
logging.info(" [-] Temporary input file: %s" % inputFile)
# Edit outputfile name to spoof extension if unicodeRtlo option is enabled
if mpSession.unicodeRtlo:
# Reminder; mpSession.unicodeRtlo contains the extension we want to spoof, such as "jpg"
logging.info(" [+] Inject %s false extension with unicode RTLO" % mpSession.unicodeRtlo)
# Separate document path and extension
(fileName, fileExtension) = os.path.splitext(mpSession.outputFilePath)
logging.info(" [-] Extension %s " % fileExtension)
# Append unicode RTLO to file name
fileName += '\u202e'
# Append extension to spoof in reverse order
fileName += '\u200b' + mpSession.unicodeRtlo[::-1] # Prepend invisible space so filename does not end with flagged extension
# Append file extension
fileName += fileExtension
mpSession.outputFilePath = fileName
logging.info(" [-] File name modified to: %s" % mpSession.outputFilePath)
# Retrieve the right payload builder
if mpSession.outputFileType != MSTypes.UNKNOWN:
if MP_TYPE == "Pro" and not mpSession.communityMode:
payloadBuilder = PayloadBuilderFactoryPro().getPayloadBuilder(mpSession)
else:
payloadBuilder = PayloadBuilderFactory().getPayloadBuilder(mpSession)
# Build payload
if payloadBuilder is not None:
payloadBuilder.run()
if MP_TYPE == "Pro":
generator = ContainerGenerator(mpSession)
generator.run()
#run com attack
if mpSession.runTarget:
generator = ComGenerator(mpSession)
generator.run()
if MP_TYPE == "Pro":
#run dcom attack
if mpSession.dcom:
generator = DcomGenerator(mpSession)
generator.run()
# Activate Web server
if mpSession.listen:
listener = ListenServer(mpSession)
listener.run()
# Activate WebDav server
if mpSession.Wlisten:
Wlistener = WListenServer(mpSession)
Wlistener.run()
except Exception:
logging.exception(" [!] Exception caught!")
except KeyboardInterrupt:
logging.error(" [!] Keyboard interrupt caught!")
logging.info(" [+] Cleaning...")
if os.path.isdir(working_directory):
shutil.rmtree(working_directory)
logging.info(" Done!\n")
sys.exit(0)
if __name__ == '__main__':
# check if running from explorer, if yes restart from cmd line
# running_from = psutil.Process(os.getpid()).parent().parent().name()
# if running_from == 'explorer.exe':
# os.system("cmd.exe /k \"%s\"" % utils.getRunningApp())
# PyArmor Plugin: checkPlug()
main(sys.argv[1:])
| 40.210526
| 171
| 0.592568
| 1,449
| 13,752
| 5.572119
| 0.257419
| 0.023408
| 0.013624
| 0.015606
| 0.183924
| 0.125093
| 0.076666
| 0.076666
| 0.072331
| 0.06428
| 0
| 0.007341
| 0.296684
| 13,752
| 341
| 172
| 40.328446
| 0.82744
| 0.104639
| 0
| 0.296154
| 0
| 0
| 0.144068
| 0.011653
| 0
| 0
| 0
| 0
| 0
| 1
| 0.003846
| false
| 0.011538
| 0.080769
| 0
| 0.084615
| 0.053846
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0af340336c716992b681bade66c39e840439919b
| 6,148
|
py
|
Python
|
etl/load/elasticsearch.py
|
bilalelhoudaigui/plant-brapi-etl-data-lookup-gnpis
|
973dc444eac6d1cc80c020dd8b9a4656f70eeafb
|
[
"BSD-3-Clause"
] | 3
|
2018-06-04T09:14:55.000Z
|
2018-10-25T14:32:03.000Z
|
etl/load/elasticsearch.py
|
bilalelhoudaigui/plant-brapi-etl-data-lookup-gnpis
|
973dc444eac6d1cc80c020dd8b9a4656f70eeafb
|
[
"BSD-3-Clause"
] | 18
|
2020-06-04T07:08:17.000Z
|
2022-02-02T17:02:17.000Z
|
etl/load/elasticsearch.py
|
bilalelhoudaigui/plant-brapi-etl-data-lookup-gnpis
|
973dc444eac6d1cc80c020dd8b9a4656f70eeafb
|
[
"BSD-3-Clause"
] | 4
|
2019-04-18T12:53:19.000Z
|
2019-11-22T08:53:19.000Z
|
# Load json bulk files into elasticsearch
import json
import os
import time
import traceback
import elasticsearch
from etl.common.store import list_entity_files
from etl.common.utils import get_folder_path, get_file_path, create_logger, first, replace_template
class ElasticSearchException(Exception):
pass
# Init Elasticsearch and test connection
def init_es_client(url, logger):
es_client = elasticsearch.Elasticsearch([url])
try:
info = es_client.info()
logger.debug('Connected to node "{}" of cluster "{}" on "{}"'.format(info['name'], info['cluster_name'], url))
except elasticsearch.exceptions.ConnectionError as e:
logger.error('Connection error: Elasticsearch unavailable on "{}".\nPlease check your configuration'.format(url))
raise e
return es_client
def check_error(response):
if response.get('errors'):
raise ElasticSearchException(response)
def create_index(es_client, index_name, logger):
logger.debug('Creating index "{}"...'.format(index_name))
check_error(es_client.indices.create(index_name))
def delete_index(es_client, index_name, logger):
logger.debug('Deleting index "{}"...'.format(index_name))
check_error(es_client.indices.delete(index_name))
def create_template(es_client, es_config, document_type, base_index_name, logger):
template_name = 'template_elixir_' + base_index_name
template_pattern = base_index_name + '-d*'
mapping = es_config['document-mappings'].get(document_type+"_mapping")
if not mapping:
return
logger.debug('Creating template "{}" on pattern "{}"...'.format(template_name, template_pattern))
template_body = {'template': template_pattern, 'mappings': mapping}
if 'index-settings' in es_config:
template_body['settings'] = es_config['index-settings']
check_error(es_client.indices.put_template(name=template_name, body=template_body))
def bulk_index(es_client, index_name, file_path, logger):
file_name = os.path.basename(file_path)
logger.debug('Bulk indexing file "{}" in index "{}"...'.format(file_name, index_name))
with open(file_path, 'r') as file:
check_error(es_client.bulk(index=index_name, body=file.read(), timeout='2000ms'))
def create_alias(es_client, alias_name, base_index_name, logger):
logger.debug('Creating alias "{}" for index "{}"'.format(alias_name, base_index_name))
check_error(es_client.indices.put_alias(alias_name, base_index_name))
def get_indices(es_client, base_index_name):
indices = es_client.cat.indices(base_index_name + '-d*', params={'h': 'index'})
index_names = list(map(lambda i: i['index'], indices))
index_names.sort(reverse=True)
return index_names
def load_source(source, config, source_bulk_dir, log_dir):
"""
Full Elasticsearch documents indexing
"""
source_name = source['schema:identifier']
action = 'load-elasticsearch-' + source_name
log_file = get_file_path([log_dir, action], ext='.log', recreate=True)
logger = create_logger(source_name, log_file, config['options']['verbose'])
load_config = config['load-elasticsearch']
es_client = init_es_client(load_config['url'], logger)
logger.info("Loading '{}' into elasticsearch '{}'...".format(source_bulk_dir, load_config['url']))
try:
if not os.path.exists(source_bulk_dir):
raise FileNotFoundError(
'No such file or directory: \'{}\'.\n'
'Please make sure you have run the BrAPI extraction and Elasticsearch document transformation'
' before trying to launch the transformation process.'
.format(source_bulk_dir))
bulk_files = list(list_entity_files(source_bulk_dir))
all_document_types = set(map(first, bulk_files))
document_types = load_config.get('document-types') or all_document_types
document_types = document_types.intersection(all_document_types)
index_by_document = dict()
logger.info("Preparing index with template mapping...")
timestamp = int(time.time())
for document_type in document_types:
base_index_name = replace_template(
load_config['index-template'],
{'source': source['schema:identifier'], 'documentType': document_type}
).lower()
create_template(es_client, load_config, document_type, base_index_name, logger)
index_name = base_index_name + '-d' + str(timestamp)
create_index(es_client, index_name, logger)
index_by_document[document_type] = base_index_name, index_name
logger.info("Bulk indexing...")
for document_type, file_path in bulk_files:
if document_type in index_by_document:
base_index_name, index_name = index_by_document[document_type]
bulk_index(es_client, index_name, file_path, logger)
logger.info("Creating index aliases and deleting old indices...")
for document_type, (base_index_name, index_name) in index_by_document.items():
create_alias(es_client, index_name, base_index_name, logger)
new_index, *old_indices = get_indices(es_client, base_index_name)
for old_index in old_indices[1:]:
delete_index(es_client, old_index, logger)
logger.info("SUCCEEDED Loading {}.".format(source_name))
except Exception as e:
logger.debug(traceback.format_exc())
logger.debug(getattr(e, 'long_message', ''))
logger.info("FAILED Loading {} Elasticsearch documents.\n"
"=> Check the logs ({}) for more details."
.format(source_name, log_file))
def main(config):
log_dir = config['log-dir']
bulk_dir = os.path.join(config['data-dir'], 'json-bulk')
if not os.path.exists(bulk_dir):
raise Exception('No json bulk folder found in ' + bulk_dir)
sources = config['sources']
for (source_name, source) in sources.items():
source_bulk_dir = get_folder_path([bulk_dir, source_name])
load_source(source, config, source_bulk_dir, log_dir)
| 40.183007
| 121
| 0.689655
| 788
| 6,148
| 5.101523
| 0.214467
| 0.071642
| 0.051741
| 0.025373
| 0.218159
| 0.169403
| 0.151244
| 0.08209
| 0.062687
| 0
| 0
| 0.001011
| 0.195348
| 6,148
| 152
| 122
| 40.447368
| 0.811603
| 0.019031
| 0
| 0.018349
| 0
| 0
| 0.177752
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.091743
| false
| 0.009174
| 0.06422
| 0
| 0.192661
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0af3b89835e63f3225a17831847f039cebf091f8
| 6,798
|
py
|
Python
|
geoplot/crs.py
|
redfrexx/geoplot
|
8231baab0e286f1dec870dd5e8c6c8218e5b5da7
|
[
"MIT"
] | null | null | null |
geoplot/crs.py
|
redfrexx/geoplot
|
8231baab0e286f1dec870dd5e8c6c8218e5b5da7
|
[
"MIT"
] | null | null | null |
geoplot/crs.py
|
redfrexx/geoplot
|
8231baab0e286f1dec870dd5e8c6c8218e5b5da7
|
[
"MIT"
] | null | null | null |
"""
This module defines the ``geoplot`` coordinate reference system classes, wrappers on
``cartopy.crs`` objects meant to be used as parameters to the ``projection`` parameter of all
front-end ``geoplot`` outputs. For the list of Cartopy CRS objects this module derives from,
refer to http://scitools.org.uk/cartopy/docs/latest/crs/projections.html.
"""
import cartopy.crs as ccrs
import geopandas as gpd
class Base:
# TODO: RotatedPole
"""
Generate instances of ``cartopy.crs``.*name* where *name* matches the instance's class name.
Parameters
----------
`load` : Return a Cartopy CRS initialized with defaults from the `centerings` dictionary,
overridden by initialization parameters.
`_as_mpl_axes` : Return the result of calling cartopy's ``_as_mpl_axes`` for `self.load`
called with empty `df` and `centerings`.
"""
def __init__(self, **kwargs):
"""Save parameters that initialize Cartopy CRSs."""
self.args = kwargs
def load(self, df, centerings):
"""
A meta-method which abstracts the internals of individual projections' load procedures.
Parameters
----------
df : GeoDataFrame
The GeoDataFrame which has been passed as input to the plotter at the top level.
This data is needed to calculate reasonable centering variables in cases in which the
user does not already provide them; which is, incidentally, the reason behind all of
this funny twice-instantiation loading in the first place.
centerings: dict
A dictionary containing names and centering methods. Certain projections have certain
centering parameters whilst others lack them. For example, the geospatial projection
contains both ``central_longitude`` and ``central_latitude`` instance parameter, which
together control the center of the plot, while the North Pole Stereo projection has
only a ``central_longitude`` instance parameter, implying that latitude is fixed (as
indeed it is, as this projection is centered on the North Pole!).
A top-level centerings method is provided in each of the ``geoplot`` top-level plot
functions; each of the projection wrapper classes defined here in turn selects the
functions from this list relevent to this particular instance and passes them to
the ``_generic_load`` method here.
We then in turn execute these functions to get defaults for our ``df`` and pass them
off to our output ``cartopy.crs`` instance.
Returns
-------
crs : ``cartopy.crs`` object instance
Returns a ``cartopy.crs`` object instance whose appropriate instance variables have
been set to reasonable defaults wherever not already provided by the user.
"""
return getattr(ccrs, self.__class__.__name__)(**{**centerings, **self.args})
def _as_mpl_axes(self):
"""
When ``matplotlib`` is provided a projection via a ``projection`` keyword argument, it
expects to get something with a callable ``as_mpl_axes`` method. The precise details of
what this method does, exactly, are not important: it suffices to know that every
``cartopy`` coordinate reference system object has one.
When we pass a ``geoplot.crs`` crs object to a ``geoplot`` function, the loading and
centering of the data occurs automatically (using the function defined immediately above).
Since we control what ``geoplot`` does at execution, we gracefully integrate this two-step
procedure into the function body.
But there are also use cases outside of our control in which we are forced to pass a
``geoplot.crs`` object without having first called ``load``: most prominently, when
creating a plot containing subplots, the "overall" projection must be pre-loaded. It's
possible to get around this by using ``cartopy.crs`` objects instead, but this is
inelegant. This method is a better way: when a ``geoplot.crs`` object called by
``matplotlib``, it silently swaps itself out for a vanilla version of its ``cartopy.crs``
mirror, and calls that function's ``_as_mpl_axes`` instead.
Parameters
----------
proj : geoplot.crs projection instance
The instance in question (self, in the method body).
Returns
-------
Mutates into a ``cartopy.crs`` object and returns the result of executing ``_as_mpl_axes``
on that object instead.
"""
proj = self.load(gpd.GeoDataFrame(), dict())
return proj._as_mpl_axes()
class Filtering(Base):
"""CRS that `load`s with `centering` restricted to keys in `self.filter_`."""
def load(self, df, centerings):
"""Call `load` method with `centerings` filtered to keys in `self.filter_`."""
return super().load(
df,
{key: value
for key, value in centerings.items()
if key in self.filter_}
)
class LongitudeCentering(Filtering):
"""Form a CRS that centers by longitude."""
filter_ = {'central_longitude'}
class LatitudeCentering(Filtering):
"""For a CRS that centers by latitude."""
filter_ = {'central_latitude'}
PlateCarree,\
LambertCylindrical,\
Mercator,\
Miller,\
Mollweide,\
Robinson,\
Sinusoidal,\
InterruptedGoodeHomolosine,\
Geostationary,\
NorthPolarStereo,\
SouthPolarStereo = tuple(
type(name, (LongitudeCentering,), {})
for name in ('PlateCarree',
'LambertCylindrical',
'Mercator',
'Miller',
'Mollweide',
'Robinson',
'Sinusoidal',
'InterruptedGoodeHomolosine',
'Geostationary',
'NorthPolarStereo',
'SouthPolarStereo')
)
Gnomonic = type('Gnomonic', (LatitudeCentering,), {})
AlbersEqualArea,\
AzimuthalEquidistant,\
LambertConformal,\
Orthographic,\
Stereographic,\
TransverseMercator,\
LambertAzimuthalEqualArea,\
UTM,\
OSGB,\
EuroPP,\
OSNI = tuple(
type(name, (Base,), {})
for name in ('AlbersEqualArea',
'AzimuthalEquidistant',
'LambertConformal',
'Orthographic',
'Stereographic',
'TransverseMercator',
'LambertAzimuthalEqualArea',
'UTM',
'OSGB',
'EuroPP',
'OSNI')
)
| 39.523256
| 98
| 0.624595
| 759
| 6,798
| 5.536232
| 0.380764
| 0.026178
| 0.014993
| 0.00476
| 0.159448
| 0.131842
| 0.131842
| 0.131842
| 0.131842
| 0.131842
| 0
| 0
| 0.288614
| 6,798
| 171
| 99
| 39.754386
| 0.8689
| 0.601942
| 0
| 0.028169
| 0
| 0
| 0.143761
| 0.023056
| 0
| 0
| 0
| 0.005848
| 0
| 1
| 0.056338
| false
| 0
| 0.028169
| 0
| 0.211268
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0af473baeece942d5629ff430bbc40a3d23df7c3
| 559
|
py
|
Python
|
tmoga/utils/SDE.py
|
zjg540066169/tmoga
|
a3c3ecd0d72fc7c57fd5e5a624780e7ebf199c61
|
[
"Apache-2.0"
] | 2
|
2021-10-06T04:45:52.000Z
|
2022-03-20T01:18:05.000Z
|
tmoga/utils/SDE.py
|
zjg540066169/tmoga
|
a3c3ecd0d72fc7c57fd5e5a624780e7ebf199c61
|
[
"Apache-2.0"
] | 1
|
2022-03-20T01:45:09.000Z
|
2022-03-21T15:17:21.000Z
|
tmoga/utils/SDE.py
|
zjg540066169/tmoga
|
a3c3ecd0d72fc7c57fd5e5a624780e7ebf199c61
|
[
"Apache-2.0"
] | 3
|
2021-10-09T08:08:44.000Z
|
2022-03-20T01:18:07.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Provide function to calculate SDE distance
@auth: Jungang Zou
@date: 2021/05/05
"""
def SDE(front, values1, values2):
shifted_dict = {}
for i in front:
shifted_dict[i] = [(values1[i], values2[i])]
shifted_list = []
for j in front:
if i == j:
continue
else:
shifted_list.append((min(values1[i], values1[j]), min(values2[i], values2[j])))
shifted_dict[i].append(shifted_list)
return shifted_dict
| 25.409091
| 95
| 0.554562
| 71
| 559
| 4.267606
| 0.507042
| 0.145215
| 0.079208
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.046154
| 0.302326
| 559
| 22
| 96
| 25.409091
| 0.730769
| 0.221825
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0af54c84e47849c156e92dd294fed072b3ed4861
| 1,183
|
py
|
Python
|
tests/v3_validation/cattlevalidationtest/core/test_logs_api.py
|
bmdepesa/validation-tests
|
23e7ab95ce76744483a0657f790b42a88a93436d
|
[
"Apache-2.0"
] | 7
|
2015-11-18T17:43:08.000Z
|
2021-07-14T09:48:18.000Z
|
tests/v3_validation/cattlevalidationtest/core/test_logs_api.py
|
bmdepesa/validation-tests
|
23e7ab95ce76744483a0657f790b42a88a93436d
|
[
"Apache-2.0"
] | 175
|
2015-07-09T18:41:24.000Z
|
2021-06-10T21:23:27.000Z
|
tests/v3_validation/cattlevalidationtest/core/test_logs_api.py
|
bmdepesa/validation-tests
|
23e7ab95ce76744483a0657f790b42a88a93436d
|
[
"Apache-2.0"
] | 25
|
2015-08-08T04:54:24.000Z
|
2021-05-25T21:10:37.000Z
|
from common_fixtures import * # NOQA
import websocket as ws
import pytest
def get_logs(client):
hosts = client.list_host(kind='docker', removed_null=True)
assert len(hosts) > 0
in_log = random_str()
cmd = '/bin/bash -c "echo {}; sleep 2"'.format(in_log)
c = client.create_container(image=TEST_IMAGE_UUID, command=cmd)
c = client.wait_success(c)
logs = c.logs()
return logs, in_log, c
def test_logs_token(client):
logs, in_log, c = get_logs(client)
conn = ws.create_connection(logs.url + '?token='+logs.token)
result = conn.recv()
assert result is not None
assert in_log in result
delete_all(client, [c])
def test_logs_no_token(client):
logs, _, c = get_logs(client)
with pytest.raises(Exception) as excinfo:
ws.create_connection(logs.url)
assert 'Handshake status 401' in str(excinfo.value)
delete_all(client, [c])
def test_host_api_garbage_token(client):
logs, _, c = get_logs(client)
with pytest.raises(Exception) as excinfo:
ws.create_connection(logs.url+'?token=random.garbage.token')
assert 'Handshake status 401' in str(excinfo.value)
delete_all(client, [c])
| 28.853659
| 68
| 0.687236
| 176
| 1,183
| 4.426136
| 0.380682
| 0.032092
| 0.066752
| 0.053915
| 0.455712
| 0.455712
| 0.372272
| 0.372272
| 0.372272
| 0.372272
| 0
| 0.008403
| 0.195266
| 1,183
| 40
| 69
| 29.575
| 0.809874
| 0.003381
| 0
| 0.290323
| 0
| 0
| 0.094308
| 0.02294
| 0
| 0
| 0
| 0
| 0.16129
| 1
| 0.129032
| false
| 0
| 0.096774
| 0
| 0.258065
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0af634a53b2ebcc4683b0c1863c9043af5a4905d
| 1,090
|
py
|
Python
|
drybell/drybell_lfs_spark.py
|
jsnlp/snorkel-tutorials
|
b4cda9f918daf77f4011ec1598c08d9bd7e51c39
|
[
"Apache-2.0"
] | 315
|
2019-07-27T22:49:20.000Z
|
2022-03-30T10:02:02.000Z
|
drybell/drybell_lfs_spark.py
|
jsnlp/snorkel-tutorials
|
b4cda9f918daf77f4011ec1598c08d9bd7e51c39
|
[
"Apache-2.0"
] | 133
|
2019-07-25T02:07:37.000Z
|
2022-03-29T12:08:32.000Z
|
drybell/drybell_lfs_spark.py
|
jsnlp/snorkel-tutorials
|
b4cda9f918daf77f4011ec1598c08d9bd7e51c39
|
[
"Apache-2.0"
] | 173
|
2019-08-13T02:27:11.000Z
|
2022-03-30T05:26:40.000Z
|
from pyspark.sql import Row
from snorkel.labeling.lf import labeling_function
from snorkel.labeling.lf.nlp_spark import spark_nlp_labeling_function
from snorkel.preprocess import preprocessor
from drybell_lfs import load_celebrity_knowledge_base
ABSTAIN = -1
NEGATIVE = 0
POSITIVE = 1
@preprocessor()
def combine_text(x):
return Row(title=x.title, body=x.body, article=f"{x.title} {x.body}")
@spark_nlp_labeling_function(text_field="article", pre=[combine_text])
def article_mentions_person(x):
for ent in x.doc.ents:
if ent.label_ == "PERSON":
return ABSTAIN
return NEGATIVE
@spark_nlp_labeling_function(
text_field="article",
pre=[combine_text],
resources=dict(celebrity_knowledge_base=load_celebrity_knowledge_base()),
)
def person_in_db(x, celebrity_knowledge_base):
for ent in x.doc.ents:
if ent.label_ == "PERSON" and ent.text.lower() in celebrity_knowledge_base:
return POSITIVE
return ABSTAIN
@labeling_function()
def body_contains_fortune(x):
return POSITIVE if "fortune" in x.body else ABSTAIN
| 26.585366
| 83
| 0.748624
| 155
| 1,090
| 5.019355
| 0.335484
| 0.102828
| 0.141388
| 0.092545
| 0.22108
| 0.22108
| 0.22108
| 0.22108
| 0.22108
| 0.22108
| 0
| 0.003275
| 0.159633
| 1,090
| 40
| 84
| 27.25
| 0.84607
| 0
| 0
| 0.133333
| 0
| 0
| 0.046789
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.133333
| false
| 0
| 0.166667
| 0.066667
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0af886d3e8e59b20a8f0a8f86ad88dbe765599d2
| 14,441
|
py
|
Python
|
python/influx/database_tables.py
|
SA-22C-smoothswing/spectrum-protect-sppmon
|
8a9c70f65d9faf6ffc35f3400383dcaa6e0fcbc6
|
[
"Apache-2.0"
] | null | null | null |
python/influx/database_tables.py
|
SA-22C-smoothswing/spectrum-protect-sppmon
|
8a9c70f65d9faf6ffc35f3400383dcaa6e0fcbc6
|
[
"Apache-2.0"
] | null | null | null |
python/influx/database_tables.py
|
SA-22C-smoothswing/spectrum-protect-sppmon
|
8a9c70f65d9faf6ffc35f3400383dcaa6e0fcbc6
|
[
"Apache-2.0"
] | null | null | null |
"""Provides all database and table structures used for the influx database.
Classes:
Datatype
Database
Table
RetentionPolicy
"""
from __future__ import annotations
from enum import Enum, unique
import re
import json
from typing import Any, Dict, List, Set, Tuple, Union
import influx.influx_queries as Queries
from utils.execption_utils import ExceptionUtils
from utils.influx_utils import InfluxUtils
from utils.spp_utils import SppUtils
@unique
class Datatype(Enum):
"""
This enum differentiates between the different Influx-Types.
By declaring the type SPPMon will automatically insert the data in the right format.
The order of the types within the enum is important: bool is a int, but a int is not a bool.
Important: only use `TIME` for epoch timestamps, *NOT* for durations or counts.
`TIME` is automatically converted into second format.
Note: The return type is just a helper and not of a big use.
Methods:
get_auto_datatype - get Datatype enum by value typ analysis
"""
NONE = type(None)
"""Undeclared, only use as a placeholder."""
STRING = str
"""Special symbols and \" will be escaped."""
BOOL = bool
"""Any boolean, be aware it is a subtype of int.
TODO Untested, saves as Boolean within Influx.
"""
INT = int
"""Appends a 'i' at end of number to declare. Fails if the data is mixed with any other type."""
FLOAT = float
"""Unchanged value. Default Influx numeric data type. Mixing with ints works."""
TIMESTAMP = type(int)
"""Automatic transform a timestamp into seconds. Important: Only use for Epoch timestamps, not duration or counter.
Caution: Type is just a placeholder, do not set to int - causing problems!
"""
@staticmethod
def get_auto_datatype(value: Any) -> Datatype:
"""get Datatype enum by value typ analysis. Usage should be avoided.
Only use if no datatype is declared. It skips time-type and fails if ints are mixed with floats.
If no type is detected emits a warning and returns `NONE`.
Arguments:
value {Union[str, float, int, bool, None]} -- Value to be analyzed
Returns:
Datatype -- type of value or `NONE`.
"""
for enum in Datatype:
if(enum is Datatype.TIMESTAMP):
continue
if(isinstance(value, enum.value)):
return enum
ExceptionUtils.error_message(f"No auto type found for {value}")
return Datatype.NONE
class RetentionPolicy:
"""Represents a influxdb retention policy.
By this policy it is declared afer which ammount of time a dataset is deleted from the DB.
Attributes
name - name of RP
database - associated database
duration - time until the data is purged
replication - How often the date is replicated
shard_duration - Size of memory-groups
default - whether this is the default RP
Methods
to_dict - creates a dict out of the values
"""
@property
def name(self) -> str:
"""name of the Retention Policy"""
return self.__name
@property
def database(self) -> Database:
"""associated database"""
return self.__database
@property
def duration(self) -> str:
"""time until the data is purged"""
return self.__duration
@property
def replication(self) -> int:
"""How often the date is replicated. We only have 1 db instance so replication is always 1"""
return self.__replication
@property
def shard_duration(self) -> str:
"""Size of memory-groups. Default time is 0s, then the db decides what to take"""
return self.__shard_duration
@property
def default(self) -> bool:
""" whether this is the default RP"""
return self.__default
def __init__(self, name: str, database: Database, duration: str,
replication: int = 1, shard_duration: str = "0s",
default: bool = False) -> None:
if(not name):
raise ValueError("need retention policy name for creation")
if(not database):
raise ValueError("need retention policy database for creation")
if(not duration):
raise ValueError("need retention policy duration for creation")
if(not replication):
raise ValueError("need retention policy replication factor for creation")
if(not shard_duration):
raise ValueError("need retention policy shard duration for creation")
if(default is None):
raise ValueError("need retention policy default setting for creation")
self.__name = name
self.__database = database
self.__replication = replication
self.__shard_duration = shard_duration
self.__default = default
try:
# str due usage of method
self.__duration: str = InfluxUtils.transform_time_literal(duration, single_vals=False)
except ValueError as error:
ExceptionUtils.exception_info(error)
raise ValueError(f"duration for retention policy {name} is not in the correct time format")
try:
# str due usage of method
self.__shard_duration: str = InfluxUtils.transform_time_literal(shard_duration, single_vals=False)
except ValueError as error:
ExceptionUtils.exception_info(error)
raise ValueError(f"shard duration for retention policy {name} is not in the correct time format")
def to_dict(self) -> Dict[str, Union[str, int, bool]]:
"""Used to create a dict out of the values, able to compare to influxdb-created dict"""
return {
'name': self.name,
'duration': self.duration,
'shardGroupDuration': self.__shard_duration,
'replicaN': self.__replication,
'default': self.default
}
def __str__(self) -> str:
return f"{self.database.name}.{self.name}"
def __repr__(self) -> str:
return f"Retention Policy: {self.name}"
def __eq__(self, o: object) -> bool:
if(isinstance(o, RetentionPolicy)):
return o.to_dict() == self.to_dict()
return False
def __hash__(self) -> int:
return hash(json.dumps(self.to_dict(), sort_keys=True))
class Table:
"""Represents a measurement in influx. Contains pre-defined tag and field definitions.
Attributes
name - name of table
fields - dict of field name with datatype
tags - tags as list of str
time_key - key name of the timestamp field
retention_policy - retention policy associated with this table
database - table is declared within this database
Methods
split_by_table_def - Split the given dict into a pre-defined set of tags, fields and a timestamp.
"""
@property
def fields(self) -> Dict[str, Datatype]:
"""fields of the table, name is key, value is datatype"""
return self.__fields
@property
def tags(self) -> List[str]:
"""tags of the table, datatype always string"""
return self.__tags
@property
def time_key(self) -> str:
"""name of the timestamp key"""
return self.__time_key
@property
def name(self) -> str:
"""name of the table"""
return self.__name
@property
def retention_policy(self) -> RetentionPolicy:
"""retention policy associated with this table"""
return self.__retention_policy
@property
def database(self) -> Database:
"""table is declared within this database"""
return self.__database
__bad_measurement_characters: List[str] = [' ', ',']
"""those chars need to be escaped within a measurement/table name"""
def __init__(self, database: Database, name: str, fields: Dict[str, Datatype] = None,
tags: List[str] = None, time_key: str = 'time', retention_policy: RetentionPolicy = None) -> None:
if(not database):
raise ValueError("need database to create table")
if(not name):
raise ValueError("need str name to create table")
if(not time_key):
raise ValueError("time key cannot be None")
if(not fields):
fields = {}
if(not tags):
tags = []
if(not retention_policy):
retention_policy = next(filter(lambda rp: rp.default, database.retention_policies))
self.__database: Database = database
self.__fields: Dict[str, Datatype] = fields
self.__tags: List[str] = tags
self.__time_key: str = time_key
self.__retention_policy = retention_policy
# escape not allowed characters in Measurement
for bad_character in self.__bad_measurement_characters:
if(re.search(bad_character, name)):
name = name.replace(bad_character, '\\%c'% bad_character)
self.__name: str = name
def __str__(self) -> str:
return f"{self.database.name}.{self.retention_policy.name}.{self.name}"
def __repr__(self) -> str:
return f"Table: {self.name}"
def split_by_table_def(self, mydict: Dict[str, Any]) -> Tuple[
Dict[str, Any], Dict[str, Any], Union[str, int, None]]:
"""Split the given dict into a pre-defined set of tags, fields and a timestamp.
None-Values and empty strings are ignored.
If there are no fields declared, it will split by a default pattern.
Undeclared collums will produce a warning.
This function uses the tag/field and timestamp definiton declared within this table.
Arguments:
self {Table} -- Table with predefined set of tags and fields
mydict {Dict[str, Any]} -- dict with colums as keys. None-Values are ignored
Raises:
ValueError: If no dict is given or not of type dict.
Returns:
(Dict[str, Any], Dict[str, Any], int) -- Tuple of: tags, fields, timestamp
"""
if(not mydict):
raise ValueError("need at least one value in dict to split")
# if table is not defined use default split
if(not self.fields):
return InfluxUtils.default_split(mydict=mydict)
# fill dicts
# table.fields is a dict, we only need the keys
fields: Dict[str, Any] = dict.fromkeys(self.fields.keys(), None)
tags: Dict[str, Any] = dict.fromkeys(self.tags, None)
# what field should be recorded as time
time_stamp_field = self.time_key
# helper variable to only overwrite if it is not the time_stamp_field
time_overwrite_allowed = True
# actualy timestamp saved
time_stamp: Union[str, int, None] = None
for (key, value) in mydict.items():
# Ignore empty entrys
if(value is None or (isinstance(value, str) and not value)):
continue
# Check timestamp value if it matches any of predefined time names
if(key in time_stamp_field or key in InfluxUtils.time_key_names):
# sppmonCTS has lowest priority, only set if otherwise None
if(time_stamp is None and key == SppUtils.capture_time_key):
time_stamp = value
# time_stamp_field is highest priority. Do not overwrite it.
elif(key is time_stamp_field):
time_overwrite_allowed: bool = False
time_stamp = value
# if time_stamp_field is not used yet, overwrite sppmonCaptureTime or others
elif(time_overwrite_allowed):
time_stamp = value
# if no overwrite allowed, continue and drop field
else:
continue
# Otherwise check for Keys or Fields
if(key in fields):
fields[key] = value
elif(key in tags):
tags[key] = value
elif(key in InfluxUtils.time_key_names or key in time_stamp_field):
continue
else:
ExceptionUtils.error_message(f"Not all columns for table {self.name} are declared: {key}")
# before key+"MISSING" : Removed to avoid death-circle on repeated queries.
fields[key] = value
return (tags, fields, time_stamp)
class Database:
"""
Represents a instance of influx database. Define all table definitions within the init method.
Attributes
name - name of the database
tables - tables with predefined tags & fields
retention_policies - Set of all provided Retention Policies
continuous_queries - Set of all provided Continuous Queries
Methods
__getitem__ - [] access on the tables via name. Creates empty table if missing.
"""
@property
def tables(self) -> Dict[str, Table]:
"""Dict with table definitions to look up"""
return self.__tables
@property
def retention_policies(self) -> Set[RetentionPolicy]:
"""Set of all provided Retention Policies"""
return self.__retention_policies
@property
def continuous_queries(self) -> Set[Queries.ContinuousQuery]:
"""Set of all provided Continuous Queries"""
return self.__continuous_queries
@property
def name(self) -> str:
"""name of the database, also used as reference"""
return self.__name
def __getitem__(self, table_name: str) -> Table:
"""Aquire a instance of a predefined table, returns a empty table if it was not defined. []-Access.
Arguments:
table_name {str} -- name of the table you want to aquire
Returns:
Table -- Instance of a predefined table, otherwise new empty table
"""
return self.tables.get(table_name, Table(self, table_name))
def __str__(self) -> str:
return self.name
def __repr__(self) -> str:
return f'Database: {self.name}'
def __init__(self, name: str):
self.__name: str = name
self.__tables: Dict[str, Table] = {}
self.__retention_policies: Set[RetentionPolicy] = set()
self.__continuous_queries: Set[Queries.ContinuousQuery] = set()
| 36.012469
| 119
| 0.628696
| 1,799
| 14,441
| 4.912729
| 0.1801
| 0.037339
| 0.019348
| 0.019009
| 0.259335
| 0.206721
| 0.101607
| 0.086445
| 0.063363
| 0.0568
| 0
| 0.000489
| 0.291531
| 14,441
| 400
| 120
| 36.1025
| 0.863356
| 0.326224
| 0
| 0.277778
| 0
| 0
| 0.09887
| 0.01083
| 0
| 0
| 0
| 0.0025
| 0
| 1
| 0.156566
| false
| 0
| 0.045455
| 0.035354
| 0.414141
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0af95702c3886ad24fef9b7d2bef0b353d7f0d8a
| 5,779
|
py
|
Python
|
eval_encoder.py
|
lithium0003/Image2UTF8-Transformer
|
2620af2a8bdaf332e25b39ce05d610e21e6492fc
|
[
"MIT"
] | null | null | null |
eval_encoder.py
|
lithium0003/Image2UTF8-Transformer
|
2620af2a8bdaf332e25b39ce05d610e21e6492fc
|
[
"MIT"
] | null | null | null |
eval_encoder.py
|
lithium0003/Image2UTF8-Transformer
|
2620af2a8bdaf332e25b39ce05d610e21e6492fc
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import tensorflow as tf
physical_devices = tf.config.list_physical_devices('GPU')
try:
tf.config.experimental.set_memory_growth(physical_devices[0], True)
except:
# Invalid device or cannot modify virtual devices once initialized.
pass
import numpy as np
import os, time, csv
import tqdm
import umap
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import datetime
import signal
import net
from matplotlib import rcParams
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = ['Hiragino Maru Gothic Pro', 'Yu Gothic', 'Meirio', 'Takao', 'IPAexGothic', 'IPAPGothic', 'Noto Sans CJK JP']
import net
class SimpleEncodeDecoder:
def __init__(self):
self.save_dir = './result/step1/'
self.result_dir = './result/plot/'
os.makedirs(self.result_dir, exist_ok=True)
checkpoint_dir = self.save_dir
self.max_epoch = 300
self.steps_per_epoch = 1000
self.batch_size = 64
lr = tf.keras.optimizers.schedules.ExponentialDecay(1e-3, 1e5, 0.5)
self.optimizer = tf.keras.optimizers.Adam(lr)
self.encoder = net.FeatureBlock()
self.encoder.summary()
self.decoder = net.SimpleDecoderBlock()
self.decoder.summary()
inputs = {
'image': tf.keras.Input(shape=(128,128,3)),
}
feature_out = self.encoder(inputs)
outputs = self.decoder(feature_out)
self.model = tf.keras.Model(inputs, outputs, name='SimpleEncodeDecoder')
checkpoint = tf.train.Checkpoint(optimizer=self.optimizer,
model=self.model)
last = tf.train.latest_checkpoint(checkpoint_dir)
checkpoint.restore(last)
self.manager = tf.train.CheckpointManager(
checkpoint, directory=checkpoint_dir, max_to_keep=2)
if not last is None:
self.init_epoch = int(os.path.basename(last).split('-')[1])
print('loaded %d epoch'%self.init_epoch)
else:
self.init_epoch = 0
self.model.summary()
def eval(self):
self.data = net.FontData()
print("Plot: ", self.init_epoch + 1)
acc = self.make_plot(self.data.test_data(self.batch_size), (self.init_epoch + 1))
print('acc', acc)
@tf.function
def eval_substep(self, inputs):
input_data = {
'image': inputs['input'],
}
feature = self.encoder(input_data)
outputs = self.decoder(feature)
target_id = inputs['index']
target_id1 = inputs['idx1']
target_id2 = inputs['idx2']
pred_id1 = tf.nn.softmax(outputs['id1'], -1)
pred_id2 = tf.nn.softmax(outputs['id2'], -1)
return {
'feature': feature,
'pred_id1': pred_id1,
'pred_id2': pred_id2,
'target_id': target_id,
'target_id1': target_id1,
'target_id2': target_id2,
}
def make_plot(self, test_ds, epoch):
result = []
labels = []
with open(os.path.join(self.result_dir,'test_result-%d.txt'%epoch),'w') as txt:
correct_count = 0
failed_count = 0
with tqdm.tqdm(total=len(self.data.test_keys)) as pbar:
for inputs in test_ds:
pred = self.eval_substep(inputs)
result += [pred['feature']]
labels += [pred['target_id']]
for i in range(pred['target_id1'].shape[0]):
txt.write('---\n')
target = pred['target_id'][i].numpy()
txt.write('target: id %d = %s\n'%(target, self.data.glyphs[target-1]))
predid1 = np.argmax(pred['pred_id1'][i])
predid2 = np.argmax(pred['pred_id2'][i])
predid = predid1 * 100 + predid2
if predid == 0:
txt.write('predict: id %d nothing (p=%f)\n'%(predid, pred['pred_id1'][i][predid1] * pred['pred_id2'][i][predid2]))
elif predid > self.data.id_count + 1:
txt.write('predict: id %d nothing (p=%f)\n'%(predid, pred['pred_id1'][i][predid1] * pred['pred_id2'][i][predid2]))
else:
txt.write('predict: id %d = %s (p=%f)\n'%(predid, self.data.glyphs[predid-1], pred['pred_id1'][i][predid1] * pred['pred_id2'][i][predid2]))
if target == predid:
txt.write('Correct!\n')
correct_count += 1
else:
txt.write('Failed!\n')
failed_count += 1
pbar.update(1)
acc = correct_count / (correct_count + failed_count)
txt.write('==============\n')
txt.write('Correct = %d\n'%correct_count)
txt.write('Failed = %d\n'%failed_count)
txt.write('accuracy = %f\n'%acc)
result = np.concatenate(result)
labels = np.concatenate(labels)
print('run UMAP')
X_reduced = umap.UMAP(metric='cosine').fit_transform(result)
fig, ax = plt.subplots(figsize=(50, 50))
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], c=labels, cmap=plt.get_cmap('hsv'))
print('plot UMAP')
for i, label in enumerate(labels):
ax.annotate(self.data.glyphs[label-1], (X_reduced[i,0], X_reduced[i,1]))
plt.savefig(os.path.join(self.result_dir,'test_result-%d.png'%epoch), dpi=300)
plt.close('all')
return acc
def eval():
encoder = SimpleEncodeDecoder()
encoder.eval()
if __name__ == '__main__':
eval()
| 37.525974
| 167
| 0.554767
| 693
| 5,779
| 4.484848
| 0.310245
| 0.028314
| 0.020914
| 0.015444
| 0.086229
| 0.080438
| 0.080438
| 0.080438
| 0.080438
| 0.058559
| 0
| 0.022977
| 0.307147
| 5,779
| 153
| 168
| 37.771242
| 0.753247
| 0.015055
| 0
| 0.053435
| 0
| 0
| 0.113708
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038168
| false
| 0.007634
| 0.091603
| 0
| 0.152672
| 0.038168
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0afa87a4b421519306afb64f3b1e1263669a468c
| 22,351
|
py
|
Python
|
clipper_admin/clipper_admin/clipper_admin.py
|
SimonZsx/clipper
|
457088be2ebe68c68b94d90389d1308e35b4c844
|
[
"Apache-2.0"
] | 2
|
2019-04-24T13:46:28.000Z
|
2019-05-28T06:59:26.000Z
|
clipper_admin/clipper_admin/clipper_admin.py
|
SimonZsx/clipper
|
457088be2ebe68c68b94d90389d1308e35b4c844
|
[
"Apache-2.0"
] | null | null | null |
clipper_admin/clipper_admin/clipper_admin.py
|
SimonZsx/clipper
|
457088be2ebe68c68b94d90389d1308e35b4c844
|
[
"Apache-2.0"
] | 4
|
2019-04-03T11:03:57.000Z
|
2019-06-26T08:22:38.000Z
|
from __future__ import absolute_import, division, print_function
import logging
import docker
import tempfile
import requests
from requests.exceptions import RequestException
import json
import pprint
import time
import re
import os
import tarfile
import sys
from cloudpickle import CloudPickler
import pickle
import numpy as np
from google.protobuf.json_format import MessageToDict
if sys.version_info < (3, 0):
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
PY3 = False
else:
from io import BytesIO as StringIO
PY3 = True
import grpc
from .rpc import model_pb2_grpc
from .rpc import model_pb2
from .rpc import prediction_pb2_grpc
from .rpc import prediction_pb2
from .rpc import management_pb2
from .rpc import management_pb2_grpc
from .container_manager import CONTAINERLESS_MODEL_IMAGE, ClusterAdapter
from .exceptions import ClipperException, UnconnectedException
from .version import __version__, __registry__
from . import graph_parser
DEFAULT_LABEL = []
DEFAULT_PREDICTION_CACHE_SIZE_BYTES = 33554432
CLIPPER_TEMP_DIR = "/tmp/clipper" # Used Internally for Test; Not Windows Compatible
logging.basicConfig(
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%y-%m-%d:%H:%M:%S',
level=logging.INFO)
# logging.basicConfig(
# format='%(asctime)s %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',
# datefmt='%y-%m-%d:%H:%M:%S',
# level=logging.INFO)
logger = logging.getLogger(__name__)
deploy_regex_str = "[a-z0-9]([-a-z0-9]*[a-z0-9])?\Z"
deployment_regex = re.compile(deploy_regex_str)
def _validate_versioned_model_name(name, version):
if deployment_regex.match(name) is None:
raise ClipperException(
"Invalid value: {name}: a model name must be a valid DNS-1123 "
" subdomain. It must consist of lower case "
"alphanumeric characters, '-' or '.', and must start and end with "
"an alphanumeric character (e.g. 'example.com', regex used for "
"validation is '{reg}'".format(name=name, reg=deploy_regex_str))
if deployment_regex.match(version) is None:
raise ClipperException(
"Invalid value: {version}: a model version must be a valid DNS-1123 "
" subdomain. It must consist of lower case "
"alphanumeric characters, '-' or '.', and must start and end with "
"an alphanumeric character (e.g. 'example.com', regex used for "
"validation is '{reg}'".format(
version=version, reg=deploy_regex_str))
class ClipperConnection(object):
def __init__(self, container_manager):
self.connected = False
self.cm = container_manager
#############TEST################
self.runtime_dag = ""
self.lock = False
#################################
self.logger = ClusterAdapter(logger, {
'cluster_name': self.cm.cluster_identifier
})
def start_clipper(self,
mgmt_frontend_image='{}/management_frontend:{}'.format(
__registry__, __version__),
cache_size=DEFAULT_PREDICTION_CACHE_SIZE_BYTES):
try:
self.cm.start_clipper(mgmt_frontend_image)
# while True:
# try:
# query_frontend_url = "http://{host}/metrics".format(
# host=self.cm.get_query_addr())
# mgmt_frontend_url = "http://{host}/admin/ping".format(
# host=self.cm.get_admin_addr())
# for name, url in [('query frontend', query_frontend_url),
# ('management frontend', mgmt_frontend_url)]:
# r = requests.get(url, timeout=5)
# if r.status_code != requests.codes.ok:
# raise RequestException(
# "{name} end point {url} health check failed".format(name=name, url=url))
# break
# except RequestException as e:
# self.logger.info("Clipper still initializing: \n {}".format(e))
# time.sleep(1)
self.logger.info("Clipper is running")
self.connected = True
except ClipperException as e:
self.logger.warning("Error starting Clipper: {}".format(e.msg))
raise e
def connect(self):
"""Connect to a running Clipper cluster."""
self.cm.connect()
self.connected = True
self.logger.info(
"Successfully connected to Clipper cluster at {}".format(
self.cm.get_query_addr()))
def build_and_deploy_DAG(self,
name,
version,
dag_description,
labels):
if not self.connected:
raise UnconnectedException()
def build_and_deploy_model(self,
name,
version,
input_type,
model_data_path,
base_image,
labels=None,
container_registry=None,
num_replicas=1,
batch_size=-1,
pkgs_to_install=None):
if not self.connected:
raise UnconnectedException()
image = self.build_model(name, version, model_data_path, base_image,
container_registry, pkgs_to_install)
self.deploy_model(name, version, input_type, image, labels,
num_replicas, batch_size)
def build_model(self,
name,
version,
model_data_path,
base_image,
container_registry=None,
pkgs_to_install=None):
version = str(version)
_validate_versioned_model_name(name, version)
run_cmd = ''
if pkgs_to_install:
run_as_lst = 'RUN apt-get -y install build-essential && pip install'.split(
' ')
run_cmd = ' '.join(run_as_lst + pkgs_to_install)
with tempfile.NamedTemporaryFile(
mode="w+b", suffix="tar") as context_file:
# Create build context tarfile
with tarfile.TarFile(
fileobj=context_file, mode="w") as context_tar:
context_tar.add(model_data_path)
# From https://stackoverflow.com/a/740854/814642
try:
df_contents = StringIO(
str.encode(
"FROM {container_name}\n{run_command}\nCOPY {data_path} /model/\n".
format(
container_name=base_image,
data_path=model_data_path,
run_command=run_cmd)))
df_tarinfo = tarfile.TarInfo('Dockerfile')
df_contents.seek(0, os.SEEK_END)
df_tarinfo.size = df_contents.tell()
df_contents.seek(0)
context_tar.addfile(df_tarinfo, df_contents)
except TypeError:
df_contents = StringIO(
"FROM {container_name}\n{run_command}\nCOPY {data_path} /model/\n".
format(
container_name=base_image,
data_path=model_data_path,
run_command=run_cmd))
df_tarinfo = tarfile.TarInfo('Dockerfile')
df_contents.seek(0, os.SEEK_END)
df_tarinfo.size = df_contents.tell()
df_contents.seek(0)
context_tar.addfile(df_tarinfo, df_contents)
# Exit Tarfile context manager to finish the tar file
# Seek back to beginning of file for reading
context_file.seek(0)
image = "{cluster}-{name}:{version}".format(
cluster=self.cm.cluster_identifier, name=name, version=version)
if container_registry is not None:
image = "{reg}/{image}".format(
reg=container_registry, image=image)
docker_client = docker.from_env()
self.logger.info(
"Building model Docker image with model data from {}".format(
model_data_path))
image_result, build_logs = docker_client.images.build(
fileobj=context_file, custom_context=True, tag=image)
for b in build_logs:
if 'stream' in b and b['stream'] != '\n': #log build steps only
self.logger.info(b['stream'].rstrip())
self.logger.info("Pushing model Docker image to {}".format(image))
for line in docker_client.images.push(repository=image, stream=True):
self.logger.debug(line)
return image
def deploy_model(self,
name,
version,
input_type,
image,
labels=None,
num_replicas=1,
batch_size=-1):
if not self.connected:
raise UnconnectedException()
version = str(version)
_validate_versioned_model_name(name, version)
self.cm.deploy_model(
name=name,
version=version,
input_type=input_type,
image=image,
num_replicas=num_replicas)
# self.register_model(
# name,
# version,
# input_type,
# image=image,
# labels=labels,
# batch_size=batch_size)
self.logger.info("Done deploying model {name}:{version}.".format(
name=name, version=version))
def connect_host(self, host_ip, host_port):
self.cm.connect_host(host_ip, "2375")
def add_model(self,
model_name,
model_version,
image,
input_type="string",
output_type="string",
stateful=False):
modelinfo = management_pb2.ModelInfo(modelname=model_name,
modelversion=model_version,
image=image,
inputtype=input_type,
outputtype=output_type,
stateful=stateful).SerializeToString()
self.cm.grpc_client("zsxhku/grpcclient", "--addmodel %s %s %s "%("localhost","33333", modelinfo))
return
def deploy_DAG(self, name, version, dag_description=None, runtime=""):
if not self.connected:
raise UnconnectedException()
# model_info = self.get_all_models()
dag_description_ = dag_description
#self.logger.info("dag_description: %s"%(dag_description_))
#if(dag_description==None):
# dag_description_=self.get_dag_description()
nodes_list = graph_parser.get_all_nodes(dag_description_)
container_info = []
proxy_info = []
backup_info = []
count = 1
for model_info in nodes_list:
model_name,model_version,model_image = graph_parser.get_name_version(model_info)
container_name, container_id, host = self.cm.add_replica(model_name, model_version, "22222", model_image, runtime=runtime)
self.logger.info("Started %s with container %s:%s (HOST:%s)"%(model_name, container_name, container_id, host))
container_ip = self.cm.get_container_ip(host, container_id)
proxy_name, proxy_id = self.cm.set_proxy("mxschen/ai-proxy:latest", container_name, container_ip, host)
## get the ip of the instances
proxy_ip = self.cm.get_container_ip(host, proxy_id)
proxy_info.append([proxy_name,proxy_id,proxy_ip])
container_info.append([container_name, container_id, container_ip])
if graph_parser.is_stateful(model_info):
backup_name, backup_id, backup_host = self.cm.add_replica(model_name, model_version, "22222", model_image)
self.logger.info("[Backup] Started %s with container %s:%s (HOST:%s)"%(model_name, backup_name, backup_id, backup_host))
backup_ip = self.cm.get_container_ip(backup_host, backup_id)
backup_proxy_name, backup_proxy_id = self.cm.set_proxy("mxschen/ai-proxy:latest", backup_name, backup_ip, backup_host)
backup_proxy_ip= self.cm.get_container_ip(backup_host, backup_proxy_id)
backup_info.append([backup_name, backup_id, backup_ip, backup_proxy_name, backup_proxy_id, backup_proxy_ip])
else:
backup_info.append([])
#self.cm.check_container_status(host, container_id, 0.3, 20)
#self.cm.check_container_status(host, proxy_id, 0.3, 20)
#time.sleep(25)
#self.logger.info("proxy_ip:%s"%(proxy_ip))
self.cm.grpc_client("zsxhku/grpcclient", "--setmodel %s %s %s %s %s %s"%(proxy_ip, "22223", container_name, count, container_ip, "22222" ))
self.logger.info('[DEPLOYMENT] Finished setting model info to proxy')
if(graph_parser.is_stateful(model_info)):
self.cm.grpc_client("zsxhku/grpcclient", "--setmodel %s %s %s %s %s %s"%(backup_info[-1][-1], "22223", backup_info[-1][0], count, backup_info[-1][2], "22222" ))
self.logger.info('[DEPLOYMENT][Backup] Finished setting model info to proxy')
count += 1
# self.cm.grpc_client("zsxhku/grpcclient", "--setproxy %s %s %s %s"%(container_ip, "22222", proxy_name, "22223"))
# self.logger.info('[DEPLOYMENT] Finished setting proxy info to model')
# if(graph_parser.is_stateful(model_info)):
# self.cm.grpc_client("zsxhku/grpcclient", "--setproxy %s %s %s %s"%(backup_info[-1][2], "22222", backup_info[-1][3], "22223"))
# self.logger.info('[DEPLOYMENT][Backup] Finished setting proxy info to model')
runtime_dag_id = name+version+str(1)
## Starting frontend
frontend_name, frontend_container_id = self.cm.add_frontend("localhost", "mxschen/frontend",runtime_dag_id, proxy_info[0][2], "22223", max_workers=2048)
frontend_ip = self.cm.get_container_ip("localhost", frontend_container_id)
frontend_info = [frontend_name, frontend_container_id, frontend_ip]
self.logger.info("[DEPLOYMENT] ################ Started Frontend #################")
#expand the dag description with the model/proxy instances info
expanded_dag = graph_parser.expand_dag(dag_description_, name, version, container_info, proxy_info, backup_info, frontend_info)
self.runtime_dag = expanded_dag
# TODO: need to modularize
self.cm.grpc_client("zsxhku/grpcclient", "--addruntimedag %s %s %s %s %s %s %s"%('1', name, version, 'old' , self.cm.admin_ip, self.cm.admin_port, expanded_dag))
self.logger.info("Added new runtime DAG to admin daemon\n%s"%(expanded_dag))
#tells the proxy runtime dag info
for tup in proxy_info:
proxy_name = tup[0]
proxy_id = tup[1]
proxy_ip = tup[2]
self.cm.grpc_client("zsxhku/grpcclient", "--setdag %s %s %s"%(proxy_ip, "22223", expanded_dag))
self.logger.info('[DEPLOYMENT] Finished setting DAG for proxy {proxy_name} '.format(proxy_name=proxy_name))
#tells the backups runtime dag info
for tup in backup_info:
if tup:
self.cm.grpc_client("zsxhku/grpcclient", "--setdag %s %s %s"%(tup[-1], "22223", expanded_dag))
self.logger.info('[DEPLOYMENT][Backup] Finished setting DAG for proxy {proxy_name} '.format(proxy_name=tup[-1]))
return
def inspect_instance(self):
"""Fetches performance metrics from the running Clipper cluster.
Returns
-------
str
The JSON string containing the current set of metrics
for this instance. On error, the string will be an error message
(not JSON formatted).
Raises
------
:py:exc:`clipper.UnconnectedException`
:py:exc:`clipper.ClipperException`
"""
def get_query_addr(self):
"""Get the IP address at which the query frontend can be reached request predictions.
Returns
-------
str
The address as an IP address or hostname.
Raises
------
:py:exc:`clipper.UnconnectedException`
versions. All replicas for each version of each model will be stopped.
"""
if not self.connected:
raise UnconnectedException()
return self.cm.get_query_addr()
def stop_models(self, model_names):
"""Stops all versions of the specified models.
This is a convenience method to avoid the need to explicitly list all versions
of a model when calling :py:meth:`clipper_admin.ClipperConnection.stop_versioned_models`.
Parameters
----------
model_names : list(str)
A list of model names. All replicas of all versions of each model specified in the list
will be stopped.
Raises
------
:py:exc:`clipper.UnconnectedException`
versions. All replicas for each version of each model will be stopped.
"""
# if not self.connected:
# raise UnconnectedException()
# model_info = self.get_all_models(verbose=True)
# model_dict = {}
# for m in model_info:
# if m["model_name"] in model_names:
# if m["model_name"] in model_dict:
# model_dict[m["model_name"]].append(m["model_version"])
# else:
# model_dict[m["model_name"]] = [m["model_version"]]
# self.cm.stop_models(model_dict)
# pp = pprint.PrettyPrinter(indent=4)
# self.logger.info(
# "Stopped all containers for these models and versions:\n{}".format(
# pp.pformat(model_dict)))
def stop_versioned_models(self, model_versions_dict):
"""Stops the specified versions of the specified models.
Parameters
----------
model_versions_dict : dict(str, list(str))
For each entry in the dict, the key is a model name and the value is a list of model
Raises
------
:py:exc:`clipper.UnconnectedException`
versions. All replicas for each version of each model will be stopped.
Note
----
This method will stop the currently deployed versions of models if you specify them. You
almost certainly want to use one of the other stop_* methods. Use with caution.
"""
# if not self.connected:
# raise UnconnectedException()
# self.cm.stop_models(model_versions_dict)
# pp = pprint.PrettyPrinter(indent=4)
# self.logger.info(
# "Stopped all containers for these models and versions:\n{}".format(
# pp.pformat(model_versions_dict)))
def stop_inactive_model_versions(self, model_names):
"""Stops all model containers serving stale versions of the specified models.
For example, if you have deployed versions 1, 2, and 3 of model "music_recommender"
and version 3 is the current version::
clipper_conn.stop_inactive_model_versions(["music_recommender"])
will stop any containers serving versions 1 and 2 but will leave containers serving
version 3 untouched.
Parameters
----------
model_names : list(str)
The names of the models whose old containers you want to stop.
Raises
------
:py:exc:`clipper.UnconnectedException`
"""
# if not self.connected:
# raise UnconnectedException()
# model_info = self.get_all_models(verbose=True)
# model_dict = {}
# for m in model_info:
# if m["model_name"] in model_names and not m["is_current_version"]:
# if m["model_name"] in model_dict:
# model_dict[m["model_name"]].append(m["model_version"])
# else:
# model_dict[m["model_name"]] = [m["model_version"]]
# self.cm.stop_models(model_dict)
# pp = pprint.PrettyPrinter(indent=4)
# self.logger.info(
# "Stopped all containers for these models and versions:\n{}".format(
# pp.pformat(model_dict)))
def stop_all_model_containers(self):
"""Stops all model containers started via Clipper admin commands.
This method can be used to clean up leftover Clipper model containers even if the
Clipper management frontend or Redis has crashed. It can also be called without calling
``connect`` first.
"""
self.cm.stop_all_model_containers()
self.logger.info("Stopped all Clipper model containers")
def stop_all(self, graceful=True):
"""Stops all processes that were started via Clipper admin commands.
This includes the query and management frontend Docker containers and all model containers.
If you started Redis independently, this will not affect Redis. It can also be called
without calling ``connect`` first.
If graceful=False, Clipper will issue Docker Kill if it's in the Docker Mode. This parameter
will take not effect in Kubernetes.
"""
self.cm.stop_all(graceful=graceful)
self.logger.info(
"Stopped all Clipper cluster and all model containers")
| 39.629433
| 176
| 0.579437
| 2,542
| 22,351
| 4.894571
| 0.166798
| 0.018325
| 0.027005
| 0.003858
| 0.457402
| 0.39865
| 0.304131
| 0.257836
| 0.257836
| 0.228098
| 0
| 0.011674
| 0.321641
| 22,351
| 563
| 177
| 39.699822
| 0.80893
| 0.291441
| 0
| 0.284698
| 0
| 0
| 0.135436
| 0.013385
| 0
| 0
| 0
| 0.001776
| 0
| 1
| 0.064057
| false
| 0
| 0.113879
| 0
| 0.19573
| 0.007117
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0afc21eecdc60b266d8862b6f28eebf607699a5d
| 48,451
|
py
|
Python
|
chevah/compat/testing/testcase.py
|
chevah/compat
|
d22e5f551a628f8a1652c9f2eea306e17930cb8f
|
[
"BSD-3-Clause"
] | 5
|
2016-12-03T22:54:50.000Z
|
2021-11-17T11:17:39.000Z
|
chevah/compat/testing/testcase.py
|
chevah/compat
|
d22e5f551a628f8a1652c9f2eea306e17930cb8f
|
[
"BSD-3-Clause"
] | 76
|
2015-01-22T16:00:31.000Z
|
2022-02-09T22:13:34.000Z
|
chevah/compat/testing/testcase.py
|
chevah/compat
|
d22e5f551a628f8a1652c9f2eea306e17930cb8f
|
[
"BSD-3-Clause"
] | 1
|
2016-12-10T15:57:31.000Z
|
2016-12-10T15:57:31.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2011 Adi Roiban.
# See LICENSE for details.
"""
TestCase used for Chevah project.
"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from six import text_type
from six.moves import range
import contextlib
import inspect
import threading
import os
import platform
import socket
import sys
import time
from bunch import Bunch
from mock import patch, Mock
from nose import SkipTest
try:
from twisted.internet.defer import Deferred
from twisted.internet.posixbase import (
_SocketWaker, _UnixWaker, _SIGCHLDWaker
)
from twisted.python.failure import Failure
except ImportError:
# Twisted support is optional.
_SocketWaker = None
_UnixWaker = None
_SIGCHLDWaker = None
from chevah.compat import (
DefaultAvatar,
LocalFilesystem,
process_capabilities,
system_users,
SuperAvatar,
)
from chevah.compat.administration import os_administration
from chevah.compat.testing.assertion import AssertionMixin
from chevah.compat.testing.mockup import mk
from chevah.compat.testing.constant import (
TEST_NAME_MARKER,
)
from chevah.compat.testing.filesystem import LocalTestFilesystem
# For Python below 2.7 we use the separate unittest2 module.
# It comes by default in Python 2.7.
if sys.version_info[0:2] < (2, 7):
from unittest2 import TestCase
# Shut up you linter.
TestCase
else:
from unittest import TestCase
try:
# Import reactor last in case some other modules are changing the reactor.
from twisted.internet import reactor
except ImportError:
reactor = None
def _get_hostname():
"""
Return hostname as resolved by default DNS resolver.
"""
return socket.gethostname()
class TwistedTestCase(TestCase):
"""
Test case for Twisted specific code.
Provides support for running deferred and start/stop the reactor during
tests.
"""
# Number of second to wait for a deferred to have a result.
DEFERRED_TIMEOUT = 1
# List of names for delayed calls which should not be considered as
# required to wait for them when running the reactor.
EXCEPTED_DELAYED_CALLS = []
EXCEPTED_READERS = [
_UnixWaker,
_SocketWaker,
_SIGCHLDWaker,
]
# Scheduled event to stop waiting for a deferred.
_reactor_timeout_call = None
def setUp(self):
super(TwistedTestCase, self).setUp()
self._timeout_reached = False
self._reactor_timeout_failure = None
@property
def _caller_success_member(self):
"""
Retrieve the 'success' member from the None test case.
"""
success = None
for i in range(2, 6):
try:
success = inspect.stack()[i][0].f_locals['success']
break
except KeyError:
success = None
if success is None:
raise AssertionError('Failed to find "success" attribute.')
return success
def tearDown(self):
try:
if self._caller_success_member:
# Check for a clean reactor at shutdown, only if test
# passed.
self.assertIsNone(self._reactor_timeout_failure)
self._assertReactorIsClean()
finally:
self._cleanReactor()
super(TwistedTestCase, self).tearDown()
def _reactorQueueToString(self):
"""
Return a string representation of all delayed calls from reactor
queue.
"""
result = []
for delayed in reactor.getDelayedCalls(): # noqa:cover
result.append(text_type(delayed.func))
return '\n'.join(result)
def _threadPoolQueue(self):
"""
Return current tasks of thread Pool, or [] when threadpool does not
exists.
This should only be called at cleanup as it removes elements from
the Twisted thread queue, which will never be called.
"""
if not reactor.threadpool:
return []
result = []
while len(reactor.threadpool._team._pending):
result.append(reactor.threadpool._team._pending.pop())
return result
def _threadPoolThreads(self):
"""
Return current threads from pool, or empty list when threadpool does
not exists.
"""
if not reactor.threadpool:
return []
else:
return reactor.threadpool.threads
def _threadPoolWorking(self):
"""
Return working thread from pool, or empty when threadpool does not
exists or has no job.
"""
if not reactor.threadpool:
return []
else:
return reactor.threadpool.working
@classmethod
def _cleanReactor(cls):
"""
Remove all delayed calls, readers and writers from the reactor.
This is only for cleanup purpose and should not be used by normal
tests.
"""
if not reactor:
return
try:
reactor.removeAll()
except (RuntimeError, KeyError):
# FIXME:863:
# When running threads tests the reactor touched from the test
# case itself which run in one tread and from the fixtures/cleanup
# code which is executed from another thread.
# removeAll might fail since it detects that internal state
# is changed from other source.
pass
reactor.threadCallQueue = []
for delayed_call in reactor.getDelayedCalls():
try:
delayed_call.cancel()
except (ValueError, AttributeError):
# AlreadyCancelled and AlreadyCalled are ValueError.
# Might be canceled from the separate thread.
# AttributeError can occur when we do multi-threading.
pass
def _raiseReactorTimeoutError(self, timeout):
"""
Signal an timeout error while executing the reactor.
"""
self._timeout_reached = True
failure = AssertionError(
'Reactor took more than %.2f seconds to execute.' % timeout)
self._reactor_timeout_failure = failure
def _initiateTestReactor(self, timeout):
"""
Do the steps required to initiate a reactor for testing.
"""
self._timeout_reached = False
# Set up timeout.
self._reactor_timeout_call = reactor.callLater(
timeout, self._raiseReactorTimeoutError, timeout)
# Don't start the reactor if it is already started.
# This can happen if we prevent stop in a previous run.
if reactor._started:
return
reactor._startedBefore = False
reactor._started = False
reactor._justStopped = False
reactor.startRunning()
def _iterateTestReactor(self, debug=False):
"""
Iterate the reactor.
"""
reactor.runUntilCurrent()
if debug: # noqa:cover
# When debug is enabled with iterate using a small delay in steps,
# to have a much better debug output.
# Otherwise the debug messages will flood the output.
print (
u'delayed: %s\n'
u'threads: %s\n'
u'writers: %s\n'
u'readers: %s\n'
u'threadpool size: %s\n'
u'threadpool threads: %s\n'
u'threadpool working: %s\n'
u'\n' % (
self._reactorQueueToString(),
reactor.threadCallQueue,
reactor.getWriters(),
reactor.getReaders(),
reactor.getThreadPool().q.qsize(),
self._threadPoolThreads(),
self._threadPoolWorking(),
)
)
t2 = reactor.timeout()
# For testing we want to force to reactor to wake at an
# interval of at most 1 second.
if t2 is None or t2 > 1:
t2 = 0.1
t = reactor.running and t2
reactor.doIteration(t)
else:
# FIXME:4428:
# When not executed in debug mode, some test will fail as they
# will not spin the reactor.
# To not slow down all the tests, we run with a very small value.
reactor.doIteration(0.000001)
def _shutdownTestReactor(self, prevent_stop=False):
"""
Called at the end of a test reactor run.
When prevent_stop=True, the reactor will not be stopped.
"""
if not self._timeout_reached:
# Everything fine, disable timeout.
if (
self._reactor_timeout_call and
not self._reactor_timeout_call.cancelled
):
self._reactor_timeout_call.cancel()
if prevent_stop:
# Don't continue with stop procedure.
return
# Let the reactor know that we want to stop reactor.
reactor.stop()
# Let the reactor run one more time to execute the stop code.
reactor.iterate()
# Set flag to fake a clean reactor.
reactor._startedBefore = False
reactor._started = False
reactor._justStopped = False
reactor.running = False
# Start running has consumed the startup events, so we need
# to restore them.
reactor.addSystemEventTrigger(
'during', 'startup', reactor._reallyStartRunning)
def _assertReactorIsClean(self):
"""
Check that the reactor has no delayed calls, readers or writers.
This should only be called at teardown.
"""
if reactor is None:
return
def raise_failure(location, reason):
raise AssertionError(
'Reactor is not clean. %s: %s' % (location, reason))
if reactor._started: # noqa:cover
# Reactor was not stopped, so stop it before raising the error.
self._shutdownTestReactor()
raise AssertionError('Reactor was not stopped.')
# Look at threads queue.
if len(reactor.threadCallQueue) > 0:
raise_failure('queued threads', reactor.threadCallQueue)
if reactor.threadpool and len(reactor.threadpool.working) > 0:
raise_failure('active threads', reactor.threadCallQueue)
pool_queue = self._threadPoolQueue()
if pool_queue:
raise_failure('threadpoool queue', pool_queue)
if self._threadPoolWorking():
raise_failure('threadpoool working', self._threadPoolWorking())
if self._threadPoolThreads():
raise_failure('threadpoool threads', self._threadPoolThreads())
if len(reactor.getWriters()) > 0: # noqa:cover
raise_failure('writers', text_type(reactor.getWriters()))
for reader in reactor.getReaders():
excepted = False
for reader_type in self.EXCEPTED_READERS:
if isinstance(reader, reader_type):
excepted = True
break
if not excepted: # noqa:cover
raise_failure('readers', text_type(reactor.getReaders()))
for delayed_call in reactor.getDelayedCalls():
if delayed_call.active():
delayed_str = self._getDelayedCallName(delayed_call)
if delayed_str in self.EXCEPTED_DELAYED_CALLS:
continue
raise_failure('delayed calls', delayed_str)
def _runDeferred(
self, deferred, timeout=None, debug=False, prevent_stop=False):
"""
This is low level method. In most tests you would like to use
`getDeferredFailure` or `getDeferredResult`.
Run the deferred in the reactor loop.
Starts the reactor, waits for deferred execution,
raises error in timeout, stops the reactor.
This will do recursive calls, in case the original deferred returns
another deferred.
Usage::
checker = mk.credentialsChecker()
credentials = mk.credentials()
deferred = checker.requestAvatarId(credentials)
self._runDeferred(deferred)
self.assertIsNotFailure(deferred)
self.assertEqual('something', deferred.result)
"""
if not isinstance(deferred, Deferred):
raise AssertionError('This is not a deferred.')
if timeout is None:
timeout = self.DEFERRED_TIMEOUT
try:
self._initiateTestReactor(timeout=timeout)
self._executeDeferred(deferred, timeout, debug=debug)
finally:
self._shutdownTestReactor(
prevent_stop=prevent_stop)
def _executeDeferred(self, deferred, timeout, debug):
"""
Does the actual deferred execution.
"""
if not deferred.called:
deferred_done = False
while not deferred_done:
self._iterateTestReactor(debug=debug)
deferred_done = deferred.called
if self._timeout_reached:
raise AssertionError(
'Deferred took more than %d to execute.' % timeout)
# Check executing all deferred from chained callbacks.
result = deferred.result
while isinstance(result, Deferred):
self._executeDeferred(result, timeout=timeout, debug=debug)
result = deferred.result
def executeReactor(self, timeout=None, debug=False, run_once=False):
"""
Run reactor until no more delayed calls, readers or
writers or threads are in the queues.
Set run_once=True to only run the reactor once. This is useful if
you have persistent deferred which will be removed only at the end
of test.
Only use this for very high level integration code, where you don't
have the change to get a "root" deferred.
In most tests you would like to use one of the
`getDeferredFailure` or `getDeferredResult`.
Usage::
protocol = mk.makeFTPProtocol()
transport = mk.makeStringTransportProtocol()
protocol.makeConnection(transport)
transport.protocol = protocol
protocol.lineReceived('FEAT')
self.executeReactor()
result = transport.value()
self.assertStartsWith('211-Features:\n', result)
"""
if timeout is None:
timeout = self.DEFERRED_TIMEOUT
self._initiateTestReactor(timeout=timeout)
# Set it to True to enter the first loop.
have_callbacks = True
while have_callbacks and not self._timeout_reached:
self._iterateTestReactor(debug=debug)
have_callbacks = False
# Check for active jobs in thread pool.
if reactor.threadpool:
if (
reactor.threadpool.working or
(reactor.threadpool.q.qsize() > 0)
):
time.sleep(0.01)
have_callbacks = True
continue
# Look at delayed calls.
for delayed in reactor.getDelayedCalls():
# We skip our own timeout call.
if delayed is self._reactor_timeout_call:
continue
if not delayed.func:
# Was already called.
continue
delayed_str = self._getDelayedCallName(delayed)
is_exception = False
for excepted_callback in self.EXCEPTED_DELAYED_CALLS:
if excepted_callback in delayed_str:
is_exception = True
if not is_exception:
# No need to look for other delayed calls.
have_callbacks = True
break
# No need to look for other things as we already know that we need
# to wait at least for delayed calls.
if have_callbacks:
continue
if run_once:
if have_callbacks:
raise AssertionError(
'Reactor queue still contains delayed deferred.\n'
'%s' % (self._reactorQueueToString()))
break
# Look at writers buffers:
if len(reactor.getWriters()) > 0:
have_callbacks = True
continue
for reader in reactor.getReaders():
have_callbacks = True
for excepted_reader in self.EXCEPTED_READERS:
if isinstance(reader, excepted_reader):
have_callbacks = False
break
if have_callbacks:
break
if have_callbacks:
continue
# Look at threads queue and active thread.
if len(reactor.threadCallQueue) > 0:
have_callbacks = True
continue
if reactor.threadpool and len(reactor.threadpool.working) > 0:
have_callbacks = True
continue
self._shutdownTestReactor()
def executeDelayedCalls(self, timeout=None, debug=False):
"""
Run the reactor until no more delayed calls are scheduled.
This will wait for delayed calls to be executed and will not stop
the reactor.
"""
if timeout is None:
timeout = self.DEFERRED_TIMEOUT
self._initiateTestReactor(timeout=timeout)
while not self._timeout_reached:
self._iterateTestReactor(debug=debug)
delayed_calls = reactor.getDelayedCalls()
try:
delayed_calls.remove(self._reactor_timeout_call)
except ValueError: # noqa:cover
# Timeout might be no longer be there.
pass
if not delayed_calls:
break
self._shutdownTestReactor(prevent_stop=True)
if self._reactor_timeout_failure is not None:
self._reactor_timeout_failure = None
# We stop the reactor on failures.
self._shutdownTestReactor()
raise AssertionError(
'executeDelayedCalls took more than %s' % (timeout,))
def executeReactorUntil(
self, callable, timeout=None, debug=False, prevent_stop=True):
"""
Run the reactor until callable returns `True`.
"""
if timeout is None:
timeout = self.DEFERRED_TIMEOUT
self._initiateTestReactor(timeout=timeout)
while not self._timeout_reached:
self._iterateTestReactor(debug=debug)
if callable(reactor):
break
self._shutdownTestReactor(prevent_stop=prevent_stop)
def iterateReactor(self, count=1, timeout=None, debug=False):
"""
Iterate the reactor without stopping it.
"""
iterations = [False] * (count - 1)
iterations.append(True)
self.executeReactorUntil(
lambda _: iterations.pop(0), timeout=timeout, debug=debug)
def iterateReactorWithStop(self, count=1, timeout=None, debug=False):
"""
Iterate the reactor and stop it at the end.
"""
iterations = [False] * (count - 1)
iterations.append(True)
self.executeReactorUntil(
lambda _: iterations.pop(0),
timeout=timeout,
debug=debug,
prevent_stop=False,
)
def iterateReactorForSeconds(self, duration=1, debug=False):
"""
Iterate the reactor for `duration` seconds..
"""
start = time.time()
self.executeReactorUntil(
lambda _: time.time() - start > duration,
timeout=duration + 0.1,
debug=debug,
prevent_stop=False,
)
def _getDelayedCallName(self, delayed_call):
"""
Return a string representation of the delayed call.
"""
raw_name = text_type(delayed_call.func)
raw_name = raw_name.replace('<function ', '')
raw_name = raw_name.replace('<bound method ', '')
return raw_name.split(' ', 1)[0]
def getDeferredFailure(
self, deferred, timeout=None, debug=False, prevent_stop=False):
"""
Run the deferred and return the failure.
Usage::
checker = mk.credentialsChecker()
credentials = mk.credentials()
deferred = checker.requestAvatarId(credentials)
failure = self.getDeferredFailure(deferred)
self.assertFailureType(AuthenticationError, failure)
"""
self._runDeferred(
deferred,
timeout=timeout,
debug=debug,
prevent_stop=prevent_stop,
)
self.assertIsFailure(deferred)
failure = deferred.result
self.ignoreFailure(deferred)
return failure
def successResultOf(self, deferred):
"""
Return the current success result of C{deferred} or raise
C{self.failException}.
@param deferred: A L{Deferred<twisted.internet.defer.Deferred>} which
has a success result. This means
L{Deferred.callback<twisted.internet.defer.Deferred.callback>} or
L{Deferred.errback<twisted.internet.defer.Deferred.errback>} has
been called on it and it has reached the end of its callback chain
and the last callback or errback returned a
non-L{failure.Failure}.
@type deferred: L{Deferred<twisted.internet.defer.Deferred>}
@raise SynchronousTestCase.failureException: If the
L{Deferred<twisted.internet.defer.Deferred>} has no result or has
a failure result.
@return: The result of C{deferred}.
"""
# FIXME:1370:
# Remove / re-route this code after upgrading to Twisted 13.0.
result = []
deferred.addBoth(result.append)
if not result:
self.fail(
"Success result expected on %r, found no result instead" % (
deferred,))
elif isinstance(result[0], Failure):
self.fail(
"Success result expected on %r, "
"found failure result instead:\n%s" % (
deferred, result[0].getBriefTraceback().decode(
'utf-8', errors='replace')))
else:
return result[0]
def failureResultOf(self, deferred, *expectedExceptionTypes):
"""
Return the current failure result of C{deferred} or raise
C{self.failException}.
@param deferred: A L{Deferred<twisted.internet.defer.Deferred>} which
has a failure result. This means
L{Deferred.callback<twisted.internet.defer.Deferred.callback>} or
L{Deferred.errback<twisted.internet.defer.Deferred.errback>} has
been called on it and it has reached the end of its callback chain
and the last callback or errback raised an exception or returned a
L{failure.Failure}.
@type deferred: L{Deferred<twisted.internet.defer.Deferred>}
@param expectedExceptionTypes: Exception types to expect - if
provided, and the the exception wrapped by the failure result is
not one of the types provided, then this test will fail.
@raise SynchronousTestCase.failureException: If the
L{Deferred<twisted.internet.defer.Deferred>} has no result, has a
success result, or has an unexpected failure result.
@return: The failure result of C{deferred}.
@rtype: L{failure.Failure}
"""
# FIXME:1370:
# Remove / re-route this code after upgrading to Twisted 13
result = []
deferred.addBoth(result.append)
if not result:
self.fail(
"Failure result expected on %r, found no result instead" % (
deferred,))
elif not isinstance(result[0], Failure):
self.fail(
"Failure result expected on %r, "
"found success result (%r) instead" % (deferred, result[0]))
elif (expectedExceptionTypes and
not result[0].check(*expectedExceptionTypes)):
expectedString = " or ".join([
'.'.join((t.__module__, t.__name__)) for t in
expectedExceptionTypes])
self.fail(
"Failure of type (%s) expected on %r, "
"found type %r instead: %s" % (
expectedString, deferred, result[0].type,
result[0].getBriefTraceback().decode(
'utf-8', errors='replace')))
else:
return result[0]
def assertNoResult(self, deferred):
"""
Assert that C{deferred} does not have a result at this point.
If the assertion succeeds, then the result of C{deferred} is left
unchanged. Otherwise, any L{failure.Failure} result is swallowed.
@param deferred: A L{Deferred<twisted.internet.defer.Deferred>}
without a result. This means that neither
L{Deferred.callback<twisted.internet.defer.Deferred.callback>} nor
L{Deferred.errback<twisted.internet.defer.Deferred.errback>} has
been called, or that the
L{Deferred<twisted.internet.defer.Deferred>} is waiting on another
L{Deferred<twisted.internet.defer.Deferred>} for a result.
@type deferred: L{Deferred<twisted.internet.defer.Deferred>}
@raise SynchronousTestCase.failureException: If the
L{Deferred<twisted.internet.defer.Deferred>} has a result.
"""
# FIXME:1370:
# Remove / re-route this code after upgrading to Twisted 13
result = []
def cb(res):
result.append(res)
return res
deferred.addBoth(cb)
if result:
# If there is already a failure, the self.fail below will
# report it, so swallow it in the deferred
deferred.addErrback(lambda _: None)
self.fail(
"No result expected on %r, found %r instead" % (
deferred, result[0]))
def getDeferredResult(
self, deferred, timeout=None, debug=False, prevent_stop=False):
"""
Run the deferred and return the result.
Usage::
checker = mk.credentialsChecker()
credentials = mk.credentials()
deferred = checker.requestAvatarId(credentials)
result = self.getDeferredResult(deferred)
self.assertEqual('something', result)
"""
self._runDeferred(
deferred,
timeout=timeout,
debug=debug,
prevent_stop=prevent_stop,
)
self.assertIsNotFailure(deferred)
return deferred.result
def assertWasCalled(self, deferred):
"""
Check that deferred was called.
"""
if not deferred.called:
raise AssertionError('This deferred was not called yet.')
def ignoreFailure(self, deferred):
"""
Ignore the current failure on the deferred.
It transforms an failure into result `None` so that the failure
will not be raised at reactor shutdown for not being handled.
"""
deferred.addErrback(lambda failure: None)
def assertIsFailure(self, deferred):
"""
Check that deferred is a failure.
"""
if not isinstance(deferred.result, Failure):
raise AssertionError('Deferred is not a failure.')
def assertIsNotFailure(self, deferred):
"""
Raise assertion error if deferred is a Failure.
The failed deferred is handled by this method, to avoid propagating
the error into the reactor.
"""
self.assertWasCalled(deferred)
if isinstance(deferred.result, Failure):
error = deferred.result
self.ignoreFailure(deferred)
raise AssertionError(
'Deferred contains a failure: %s' % (error))
def _get_os_version():
"""
On non-Linux this is just the os_name.
On Linux is the distribution name and the version.
On Windows it is the `nt` followed by the major and minor NT version.
It is not the marketing name.
We only support the Windows NT family.
See: https://en.wikipedia.org/wiki/Windows_NT#Releases
On OSX it returns `osx` followed by the version.
It is not the version of the underlying Darwin OS.
See: https://en.wikipedia.org/wiki/MacOS#Release_history
"""
if os.name == 'nt':
parts = platform.version().split('.')
return 'nt-%s.%s' % (parts[0], parts[1])
# We are now in Unix zone.
os_name = os.uname()[0].lower()
if os_name == 'darwin':
parts = platform.mac_ver()[0].split('.')
return 'osx-%s.%s' % (parts[0], parts[1])
if os_name == 'sunos':
parts = platform.release().split('.')
return 'solaris-%s' % (parts[1],)
if os_name == 'aix': # noqa:cover
return 'aix-%s.%s' % (platform.version(), platform.release())
if os_name != 'linux':
return process_capabilities.os_name
# We delay the import as it will call lsb_release.
import ld
distro_name = ld.id()
if distro_name == 'arch':
# Arch has no version.
return 'arch'
if distro_name in ['centos', 'ol']:
# Normalize all RHEL variants.
distro_name = 'rhel'
distro_version = ld.version().split('.', 1)[0]
return '%s-%s' % (distro_name, distro_version)
def _get_cpu_type():
"""
Return the CPU type as used in the brink.sh script.
"""
base = platform.processor()
if base == 'aarch64':
return 'arm64'
if base == 'x86_64':
return 'x64'
return base
_CI_NAMES = Bunch(
LOCAL='local',
GITHUB='github-actions',
TRAVIS='travis',
BUILDBOT='buildbot',
UNKNOWN='unknown-ci',
AZURE='azure-pipelines',
)
def _get_ci_name():
"""
Return the name of the CI on which the tests are currently executed.
"""
if os.environ.get('BUILDBOT', '').lower() == 'true':
return _CI_NAMES.BUILDBOT
if os.environ.get('GITHUB_ACTIONS', '').lower() == 'true':
return _CI_NAMES.GITHUB
if os.environ.get('TRAVIS', '').lower() == 'true':
return _CI_NAMES.TRAVIS
if os.environ.get('INFRASTRUCTURE', '') == 'AZUREPIPELINES':
return _CI_NAMES.AZURE
if os.environ.get('CI', '').lower() == 'true':
return _CI_NAMES.UNKNOWN
return _CI_NAMES.LOCAL
class ChevahTestCase(TwistedTestCase, AssertionMixin):
"""
Test case for Chevah tests.
Checks that temporary folder is clean at exit.
"""
os_name = process_capabilities.os_name
os_family = process_capabilities.os_family
os_version = _get_os_version()
cpu_type = process_capabilities.cpu_type
ci_name = _get_ci_name()
CI = _CI_NAMES
TEST_LANGUAGE = os.getenv('TEST_LANG', 'EN')
# List of partial thread names to ignore during the tearDown.
# No need for the full thread name
excepted_threads = [
'MainThread',
'threaded_reactor',
'GlobalPool-WorkerHandler',
'GlobalPool-TaskHandler',
'GlobalPool-ResultHandler',
'PoolThread-twisted.internet.reactor',
]
# We assume that hostname does not change during test and this
# should save a few DNS queries.
hostname = _get_hostname()
Bunch = Bunch
Mock = Mock
#: Obsolete. Please use self.patch and self.patchObject.
Patch = patch
_environ_user = None
_drop_user = '-'
def setUp(self):
super(ChevahTestCase, self).setUp()
self.__cleanup__ = []
self._cleanup_stack = []
self._teardown_errors = []
self.test_segments = None
def tearDown(self):
self.callCleanup()
self._checkTemporaryFiles()
threads = threading.enumerate()
if len(threads) > 1:
for thread in threads:
thread_name = thread.getName()
if self._isExceptedThread(thread_name):
continue
self._teardown_errors.append(AssertionError(
'There are still active threads, '
'beside the main thread: %s - %s' % (
thread_name, threads)))
super(ChevahTestCase, self).tearDown()
errors, self._teardown_errors = self._teardown_errors, None
if errors:
raise AssertionError('Cleanup errors: %r' % (errors,))
def _isExceptedThread(self, name):
"""
Return `True` if is OK for thread to exist after test is done.
"""
for exception in self.excepted_threads:
if name in exception:
return True
if exception in name:
return True
return False
def addCleanup(self, function, *args, **kwargs):
"""
Overwrite unit-test behaviour to run cleanup method before tearDown.
"""
self.__cleanup__.append((function, args, kwargs))
def callCleanup(self):
"""
Call all cleanup methods.
If a cleanup fails, the next cleanups will continue to be called and
the first failure is raised.
"""
for function, args, kwargs in reversed(self.__cleanup__):
try:
function(*args, **kwargs)
except Exception as error: # noqa:cover
self._teardown_errors.append(error)
self.__cleanup__ = []
def enterCleanup(self):
"""
Called when start using stacked cleanups.
"""
self._cleanup_stack.append(self.__cleanup__)
self.__cleanup__ = []
def exitCleanup(self):
"""
To be called at the end of a stacked cleanup.
"""
self.callCleanup()
self.__cleanup__ = self._cleanup_stack.pop()
@contextlib.contextmanager
def stackedCleanup(self):
"""
Context manager for stacked cleanups.
"""
try:
self.enterCleanup()
yield
finally:
self.exitCleanup()
def _checkTemporaryFiles(self):
"""
Check that no temporary files or folders are present.
"""
# FIXME:922:
# Move all filesystem checks into a specialized class
if self.test_segments:
if mk.fs.isFolder(self.test_segments):
mk.fs.deleteFolder(
self.test_segments, recursive=True)
else:
mk.fs.deleteFile(self.test_segments)
checks = [
self.assertTempIsClean,
self.assertWorkingFolderIsClean,
]
errors = []
for check in checks:
try:
check()
except AssertionError as error:
errors.append(error.message)
if errors: # noqa:cover
self._teardown_errors.append(AssertionError(
'There are temporary files or folders left over.\n %s' % (
'\n'.join(errors))))
def shortDescription(self): # noqa:cover
"""
The short description for the test.
bla.bla.tests. is removed.
The format is customized for Chevah Nose runner.
This is only called when we run with -v or we show the error.
"""
class_name = text_type(self.__class__)[8:-2]
class_name = class_name.replace('.Test', ':Test')
tests_start = class_name.find('.tests.') + 7
class_name = class_name[tests_start:]
return "%s - %s.%s" % (
self._testMethodName,
class_name,
self._testMethodName)
def assertRaises(self, exception_class, callback=None, *args, **kwargs):
"""
Wrapper around the stdlib call to allow non-context usage.
"""
super_assertRaises = super(ChevahTestCase, self).assertRaises
if callback is None:
return super_assertRaises(exception_class)
with super_assertRaises(exception_class) as context:
callback(*args, **kwargs)
return context.exception
def assertSequenceEqual(self, first, second, msg, seq_type):
super(ChevahTestCase, self).assertSequenceEqual(
first, second, msg, seq_type)
for first_element, second_element in zip(first, second):
self.assertEqual(first_element, second_element)
def assertDictEqual(self, first, second, msg):
super(ChevahTestCase, self).assertDictEqual(first, second, msg)
first_keys = sorted(first.keys())
second_keys = sorted(second.keys())
first_values = [first[key] for key in first_keys]
second_values = [second[key] for key in second_keys]
self.assertSequenceEqual(first_keys, second_keys, msg, list)
self.assertSequenceEqual(first_values, second_values, msg, list)
def assertSetEqual(self, first, second, msg):
super(ChevahTestCase, self).assertSetEqual(first, second, msg)
first_elements = sorted(first)
second_elements = sorted(second)
self.assertSequenceEqual(first_elements, second_elements, msg, list)
def _baseAssertEqual(self, first, second, msg=None):
"""
Update to stdlib to make sure we don't compare str with unicode.
"""
if (
isinstance(first, text_type) and
not isinstance(second, text_type)
): # noqa:cover
if not msg:
msg = u'First is unicode while second is str for "%s".' % (
first,)
raise AssertionError(msg.encode('utf-8'))
if (
not isinstance(first, text_type) and
isinstance(second, text_type)
): # noqa:cover
if not msg:
msg = u'First is str while second is unicode for "%s".' % (
first,)
raise AssertionError(msg.encode('utf-8'))
return super(ChevahTestCase, self)._baseAssertEqual(
first, second, msg=msg)
@staticmethod
def getHostname():
"""
Return the hostname of the current system.
"""
return _get_hostname()
@classmethod
def initialize(cls, drop_user):
"""
Initialize the testing environment.
"""
cls._drop_user = drop_user
os.environ['DROP_USER'] = drop_user
if 'LOGNAME' in os.environ and 'USER' not in os.environ:
os.environ['USER'] = os.environ['LOGNAME']
if 'USER' in os.environ and 'USERNAME' not in os.environ:
os.environ['USERNAME'] = os.environ['USER']
if 'USERNAME' in os.environ and 'USER' not in os.environ:
os.environ['USER'] = os.environ['USERNAME']
cls._environ_user = os.environ['USER']
cls.cleanTemporaryFolder()
@classmethod
def dropPrivileges(cls):
'''Drop privileges to normal users.'''
if cls._drop_user == '-':
return
os.environ['USERNAME'] = cls._drop_user
os.environ['USER'] = cls._drop_user
# Test suite should be started as root and we drop effective user
# privileges.
system_users.dropPrivileges(username=cls._drop_user)
@staticmethod
def skipTest(message=''):
'''Return a SkipTest exception.'''
return SkipTest(message)
@property
def _caller_success_member(self):
'''Retrieve the 'success' member from the test case.'''
success_state = None
# We search starting with second stack, since first stack is the
# current stack and we don't care about it.
for level in inspect.stack()[1:]:
try:
success_state = level[0].f_locals['success']
break
except KeyError:
success_state = None
if success_state is None:
raise AssertionError('Failed to find "success" attribute.')
return success_state
@staticmethod
def patch(*args, **kwargs):
"""
Helper for generic patching.
"""
return patch(*args, **kwargs)
@staticmethod
def patchObject(*args, **kwargs):
"""
Helper for patching objects.
"""
return patch.object(*args, **kwargs)
def now(self):
"""
Return current Unix timestamp.
"""
return time.time()
@classmethod
def cleanTemporaryFolder(cls):
"""
Clean all test files from temporary folder.
Return a list of members which were removed.
"""
return cls._cleanFolder(mk.fs.temp_segments)
@classmethod
def cleanWorkingFolder(cls):
path = mk.fs.getAbsoluteRealPath('.')
segments = mk.fs.getSegmentsFromRealPath(path)
return cls._cleanFolder(segments, only_marked=True)
@classmethod
def _cleanFolder(cls, folder_segments, only_marked=False):
"""
Clean all test files from folder_segments.
Return a list of members which were removed.
"""
if not mk.fs.exists(folder_segments):
return []
# In case we are running the test suite as super user,
# we use super filesystem for cleaning.
if cls._environ_user == cls._drop_user:
temp_avatar = SuperAvatar()
else:
temp_avatar = DefaultAvatar()
temp_filesystem = LocalFilesystem(avatar=temp_avatar)
temp_members = []
for member in (temp_filesystem.getFolderContent(folder_segments)):
if only_marked and member.find(TEST_NAME_MARKER) == -1:
continue
temp_members.append(member)
segments = folder_segments[:]
segments.append(member)
if temp_filesystem.isFolder(segments):
temp_filesystem.deleteFolder(segments, recursive=True)
else:
temp_filesystem.deleteFile(segments)
return temp_members
@classmethod
def getPeakMemoryUsage(cls):
"""
Return maximum memory usage in kilo bytes.
"""
if cls.os_family == 'posix':
import resource
return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
elif cls.os_family == 'nt':
from wmi import WMI
local_wmi = WMI('.')
query = (
u'SELECT PeakWorkingSetSize '
u'FROM Win32_Process '
u'WHERE Handle=%d' % os.getpid())
result = local_wmi.query(query.encode('utf-8'))
peak_working_set_size = int(result[0].PeakWorkingSetSize)
# FIXME:2099:
# Windows XP reports value in bytes, instead of Kilobytes.
return int(peak_working_set_size)
else:
raise AssertionError('OS not supported.')
def folderInTemp(self, *args, **kwargs):
"""
Create a folder in the default temp folder and mark it for cleanup.
"""
kwargs['cleanup'] = self.addCleanup
return mk.fs.folderInTemp(*args, **kwargs)
def fileInTemp(self, *args, **kwargs):
"""
Create a file in the default temp folder and mark it for cleanup.
"""
kwargs['cleanup'] = self.addCleanup
return mk.fs.fileInTemp(*args, **kwargs)
def assertIn(self, target, source):
"""
Overwrite stdlib to swap the arguments.
"""
if source not in target:
message = u'%s not in %s.' % (repr(source), repr(target))
raise AssertionError(message.encode('utf-8'))
def assertIsInstance(self, expected_type, value, msg=None):
"""
Raise an exception if `value` is not an instance of `expected_type`
"""
# In Python 2.7 isInstance is already defined, but with swapped
# arguments.
if not inspect.isclass(expected_type):
expected_type, value = value, expected_type
if not isinstance(value, expected_type):
raise AssertionError(
"Expecting type %s, but got %s. %s" % (
expected_type, type(value), msg))
def tempPath(self, prefix='', suffix=''):
"""
Return (path, segments) for a path which is not created yet.
"""
return mk.fs.makePathInTemp(prefix=prefix, suffix=suffix)
def tempPathCleanup(self, prefix='', suffix=''):
"""
Return (path, segments) for a path which is not created yet but which
will be automatically removed.
"""
return mk.fs.pathInTemp(
cleanup=self.addCleanup, prefix=prefix, suffix=suffix)
def tempFile(self, content='', prefix='', suffix='', cleanup=True):
"""
Return (path, segments) for a new file created in temp which is
auto cleaned.
"""
segments = mk.fs.createFileInTemp(prefix=prefix, suffix=suffix)
path = mk.fs.getRealPathFromSegments(segments)
if cleanup:
self.addCleanup(mk.fs.deleteFile, segments)
try:
opened_file = mk.fs.openFileForWriting(segments)
opened_file.write(content)
finally:
opened_file.close()
return (path, segments)
def tempFolder(self, name=None, prefix='', suffix=''):
"""
Create a new temp folder and return its path and segments, which is
auto cleaned.
"""
segments = mk.fs.createFolderInTemp(
foldername=name, prefix=prefix, suffix=suffix)
path = mk.fs.getRealPathFromSegments(segments)
self.addCleanup(mk.fs.deleteFolder, segments, recursive=True)
return (path, segments)
class FileSystemTestCase(ChevahTestCase):
"""
Common test case for all file-system tests using a real OS account.
"""
@classmethod
def setUpClass(cls):
# FIXME:924:
# Disabled when we can not find the home folder path.
if not process_capabilities.get_home_folder:
raise cls.skipTest()
super(FileSystemTestCase, cls).setUpClass()
cls.os_user = cls.setUpTestUser()
home_folder_path = system_users.getHomeFolder(
username=cls.os_user.name, token=cls.os_user.token)
cls.avatar = mk.makeFilesystemOSAvatar(
name=cls.os_user.name,
home_folder_path=home_folder_path,
token=cls.os_user.token,
)
cls.filesystem = LocalFilesystem(avatar=cls.avatar)
@classmethod
def tearDownClass(cls):
if not cls.os_user.windows_create_local_profile:
os_administration.deleteHomeFolder(cls.os_user)
os_administration.deleteUser(cls.os_user)
super(FileSystemTestCase, cls).tearDownClass()
@classmethod
def setUpTestUser(cls):
"""
Set-up OS user for file system testing.
"""
from chevah.compat.testing import TEST_ACCOUNT_GROUP
user = mk.makeTestUser(home_group=TEST_ACCOUNT_GROUP)
os_administration.addUser(user)
return user
def setUp(self):
super(FileSystemTestCase, self).setUp()
# Initialized only to clean the home folder.
test_filesystem = LocalTestFilesystem(avatar=self.avatar)
test_filesystem.cleanHomeFolder()
class OSAccountFileSystemTestCase(FileSystemTestCase):
"""
Test case for tests that need a dedicated local OS account present.
"""
#: User will be created before running the test case and removed on
#: teardown.
CREATE_TEST_USER = None
@classmethod
def setUpTestUser(cls):
"""
Add `CREATE_TEST_USER` to local OS.
"""
os_administration.addUser(cls.CREATE_TEST_USER)
return cls.CREATE_TEST_USER
| 33.049795
| 78
| 0.593053
| 5,209
| 48,451
| 5.408716
| 0.156652
| 0.008873
| 0.012778
| 0.016895
| 0.269539
| 0.209945
| 0.184319
| 0.168418
| 0.152623
| 0.134237
| 0
| 0.004362
| 0.32342
| 48,451
| 1,465
| 79
| 33.072355
| 0.855099
| 0.26823
| 0
| 0.30397
| 0
| 0
| 0.059196
| 0.00317
| 0
| 0
| 0
| 0.005461
| 0.065757
| 1
| 0.095534
| false
| 0.003722
| 0.042184
| 0
| 0.244417
| 0.002481
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0afe13064838542a197bda7a6f3924d3d020b310
| 1,912
|
py
|
Python
|
generative_deep_learning/build_network.py
|
slaily/deep-learning-bits
|
cb9ce7ec539efbdfcaa023d141466f919bd31b71
|
[
"MIT"
] | null | null | null |
generative_deep_learning/build_network.py
|
slaily/deep-learning-bits
|
cb9ce7ec539efbdfcaa023d141466f919bd31b71
|
[
"MIT"
] | null | null | null |
generative_deep_learning/build_network.py
|
slaily/deep-learning-bits
|
cb9ce7ec539efbdfcaa023d141466f919bd31b71
|
[
"MIT"
] | null | null | null |
from keras import layers
# Single-layer LSTM model for next-character prediction
model = keras.models.Sequential()
model.add(layers.LSTM(128, input_shape=(maxlen, len(chars))))
model.add(layers.Dense(len(chars), activation='softmax'))
# Model compilation configuration
optimizer = keras.optimizers.RMSprop(lr=0.01)
model.compile(loss='categorical_crossentropy', optimizer=optimizer)
# Function to sample the next character given the model’s predictions
def sample(preds, temperature=1.0):
preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinominal(1, preds, 1)
return np.argmax(probas)
# Text-generation loop
import sys
import random
# Trains the model for 60 epochs
for epoch in range(1, 60):
print(f'Epoch: {epoch}')
model.fit(x, y, batch_size=128, epochs=1)
# Selects a text seed at random
start_index = random.randint(0, len(text) - maxlen - 1)
generated_text = text[start_index: start_index + maxlen]
print(f'--- Generating with seed: {generated_text} ---')
# Tries a range of different sampling temperatures
for temperature in [0.2, 0.5, 1.0, 1.2]:
print(f'--- Temperature {temperature} ---')
sys.stdout.write(generated_text)
# Generates 400 characters, starting from the seed text
for i in range(400):
sampled = np.zeros((1, maxlen, len(chars)))
for t, char in enumerate(generated_text):
sampled[0, t, char_indices[char]] = 1.
# Samples the next character
preds = model.predict(sampled, verbose=0)[0]
next_index = sample(preds, temperature)
next_char = chars[next_index]
generated_text += next_char
generated_text = generated_text[1:]
sys.stdout.write(next_char)
| 33.54386
| 69
| 0.668933
| 260
| 1,912
| 4.834615
| 0.419231
| 0.072395
| 0.022275
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.028686
| 0.216004
| 1,912
| 56
| 70
| 34.142857
| 0.809873
| 0.1909
| 0
| 0
| 0
| 0
| 0.085231
| 0.015615
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029412
| false
| 0
| 0.088235
| 0
| 0.147059
| 0.088235
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0afe544e807773d996329c44f23a45f84862abbe
| 2,610
|
py
|
Python
|
examples/MDF/states.py
|
29riyasaxena/MDF
|
476e6950d0f14f29463eb4f6e3be518dfb2160a5
|
[
"Apache-2.0"
] | 12
|
2021-01-18T20:38:21.000Z
|
2022-03-29T15:01:10.000Z
|
examples/MDF/states.py
|
29riyasaxena/MDF
|
476e6950d0f14f29463eb4f6e3be518dfb2160a5
|
[
"Apache-2.0"
] | 101
|
2020-12-14T15:23:07.000Z
|
2022-03-31T17:06:19.000Z
|
examples/MDF/states.py
|
29riyasaxena/MDF
|
476e6950d0f14f29463eb4f6e3be518dfb2160a5
|
[
"Apache-2.0"
] | 15
|
2020-12-04T22:37:14.000Z
|
2022-03-31T09:48:03.000Z
|
"""
Example of ModECI MDF - Testing state variables
"""
from modeci_mdf.mdf import *
import sys
def main():
mod = Model(id="States")
mod_graph = Graph(id="state_example")
mod.graphs.append(mod_graph)
## Counter node
counter_node = Node(id="counter_node")
p1 = Parameter(id="increment", value=1)
counter_node.parameters.append(p1)
p2 = Parameter(id="count", value="count + increment")
counter_node.parameters.append(p2)
op1 = OutputPort(id="out_port", value=p2.id)
counter_node.output_ports.append(op1)
mod_graph.nodes.append(counter_node)
## Sine node...
sine_node = Node(id="sine_node")
sine_node.parameters.append(Parameter(id="amp", value=3))
sine_node.parameters.append(Parameter(id="period", value=0.4))
s1 = Parameter(
id="level", default_initial_value=0, time_derivative="6.283185 * rate / period"
)
sine_node.parameters.append(s1)
s2 = Parameter(
id="rate",
default_initial_value=1,
time_derivative="-1 * 6.283185 * level / period",
)
sine_node.parameters.append(s2)
op1 = OutputPort(id="out_port", value="amp * level")
sine_node.output_ports.append(op1)
mod_graph.nodes.append(sine_node)
new_file = mod.to_json_file("%s.json" % mod.id)
new_file = mod.to_yaml_file("%s.yaml" % mod.id)
if "-run" in sys.argv:
verbose = True
# verbose = False
from modeci_mdf.utils import load_mdf, print_summary
from modeci_mdf.execution_engine import EvaluableGraph
eg = EvaluableGraph(mod_graph, verbose)
dt = 0.01
duration = 2
t = 0
recorded = {}
times = []
s = []
while t <= duration:
times.append(t)
print("====== Evaluating at t = %s ======" % (t))
if t == 0:
eg.evaluate() # replace with initialize?
else:
eg.evaluate(time_increment=dt)
s.append(eg.enodes["sine_node"].evaluable_outputs["out_port"].curr_value)
t += dt
if "-nogui" not in sys.argv:
import matplotlib.pyplot as plt
plt.plot(times, s)
plt.show()
if "-graph" in sys.argv:
mod.to_graph_image(
engine="dot",
output_format="png",
view_on_render=False,
level=3,
filename_root="states",
only_warn_on_fail=True, # Makes sure test of this doesn't fail on Windows on GitHub Actions
)
return mod_graph
if __name__ == "__main__":
main()
| 25.841584
| 104
| 0.591188
| 333
| 2,610
| 4.438438
| 0.366366
| 0.054127
| 0.081191
| 0.064953
| 0.182679
| 0.142084
| 0.058187
| 0.058187
| 0.058187
| 0
| 0
| 0.021937
| 0.283908
| 2,610
| 100
| 105
| 26.1
| 0.76886
| 0.069349
| 0
| 0
| 0
| 0
| 0.113646
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014706
| false
| 0
| 0.073529
| 0
| 0.102941
| 0.029412
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e40074d263a071da246090065d0ad8ae39b4da28
| 20,118
|
py
|
Python
|
gaia_tools/xmatch/__init__.py
|
henrysky/gaia_tools
|
c151a1d8f6896d8ef5a379291baa8a1f027bd53b
|
[
"MIT"
] | 44
|
2016-09-13T06:37:46.000Z
|
2022-02-03T20:59:56.000Z
|
gaia_tools/xmatch/__init__.py
|
henrysky/gaia_tools
|
c151a1d8f6896d8ef5a379291baa8a1f027bd53b
|
[
"MIT"
] | 24
|
2016-10-18T23:26:15.000Z
|
2020-12-08T18:24:27.000Z
|
gaia_tools/xmatch/__init__.py
|
henrysky/gaia_tools
|
c151a1d8f6896d8ef5a379291baa8a1f027bd53b
|
[
"MIT"
] | 18
|
2016-10-18T22:26:45.000Z
|
2021-08-20T09:07:31.000Z
|
# Tools for cross-matching catalogs
import csv
import sys
import os
import os.path
import platform
import shutil
import subprocess
import tempfile
import warnings
WIN32= platform.system() == 'Windows'
import numpy
import astropy.coordinates as acoords
from astropy.table import Table
from astropy import units as u
from ..load.download import _ERASESTR
def xmatch(cat1,cat2,maxdist=2,
colRA1='RA',colDec1='DEC',epoch1=None,
colRA2='RA',colDec2='DEC',epoch2=None,
colpmRA2='pmra',colpmDec2='pmdec',
swap=False,
col_field=None):
"""
NAME:
xmatch
PURPOSE:
cross-match two catalogs (incl. proper motion in cat2 if epochs are different)
INPUT:
cat1 - First catalog
cat2 - Second catalog
maxdist= (2) maximum distance in arcsec
colRA1= ('RA') name of the tag in cat1 with the right ascension in degree in cat1 (assumed to be ICRS)
colDec1= ('DEC') name of the tag in cat1 with the declination in degree in cat1 (assumed to be ICRS)
epoch1= (2000.) epoch of the coordinates in cat1
colRA2= ('RA') name of the tag in cat2 with the right ascension in degree in cat2 (assumed to be ICRS)
colDec2= ('DEC') name of the tag in cat2 with the declination in degree in cat2 (assumed to be ICRS)
epoch2= (2000.) epoch of the coordinates in cat2
colpmRA2= ('pmra') name of the tag in cat2 with the proper motion in right ascension in degree in cat2 (assumed to be ICRS; includes cos(Dec)) [only used when epochs are different]
colpmDec2= ('pmdec') name of the tag in cat2 with the proper motion in declination in degree in cat2 (assumed to be ICRS) [only used when epochs are different]
swap= (False) if False, find closest matches in cat2 for each cat1 source, if False do the opposite (important when one of the catalogs has duplicates)
col_field= (None) if None, simply cross-match on RA and Dec; if a string, then cross-match on RA and Dec with additional matching in the data tag specified by the string
OUTPUT:
(index into cat1 of matching objects,
index into cat2 of matching objects,
angular separation between matching objects)
HISTORY:
2016-09-12 - Written - Bovy (UofT)
2016-09-21 - Account for Gaia epoch 2015 - Bovy (UofT)
2019-07-07 - add additional catalog field matching - Leung (UofT)
"""
if epoch1 is None:
if 'ref_epoch' in cat1.dtype.fields:
epoch1= cat1['ref_epoch']
else:
epoch1= 2000.
if epoch2 is None:
if 'ref_epoch' in cat2.dtype.fields:
epoch2= cat2['ref_epoch']
else:
epoch2= 2000.
_check_epoch(cat1,epoch1)
_check_epoch(cat2,epoch2)
depoch= epoch2-epoch1
if numpy.any(depoch != 0.):
# Use proper motion to get both catalogs at the same time
dra=cat2[colpmRA2]/numpy.cos(cat2[colDec2]/180.*numpy.pi)\
/3600000.*depoch
ddec= cat2[colpmDec2]/3600000.*depoch
# Don't shift objects with non-existing proper motion
dra[numpy.isnan(cat2[colpmRA2])]= 0.
ddec[numpy.isnan(cat2[colpmDec2])]= 0.
else:
dra= 0.
ddec= 0.
mc1= acoords.SkyCoord(cat1[colRA1],cat1[colDec1],
unit=(u.degree, u.degree),frame='icrs')
mc2= acoords.SkyCoord(cat2[colRA2]-dra,cat2[colDec2]-ddec,
unit=(u.degree, u.degree),frame='icrs')
if col_field is not None:
try: # check if the field actually exists in both cat1/cat2
cat1[col_field]
cat2[col_field]
except KeyError: # python 2/3 format string
raise KeyError("'%s' does not exist in both catalog" % col_field)
uniques = numpy.unique(cat1[col_field])
if swap: # times neg one to indicate those indices untouch will be noticed at the end and filtered out
d2d = numpy.ones(len(cat2)) * -1.
idx = numpy.zeros(len(cat2), dtype=int)
else:
d2d = numpy.ones(len(cat1)) * -1.
idx = numpy.zeros(len(cat1), dtype=int)
for unique in uniques: # loop over the class
idx_1 = numpy.arange(cat1[colRA1].shape[0])[cat1[col_field] == unique]
idx_2 = numpy.arange(cat2[colRA2].shape[0])[cat2[col_field] == unique]
if idx_1.shape[0] == 0 or idx_2.shape[0] == 0: # the case where a class only exists in one but not the other
continue
if swap:
temp_idx, temp_d2d, d3d = mc2[idx_2].match_to_catalog_sky(mc1[idx_1])
m1 = numpy.arange(len(cat2))
idx[cat2[col_field] == unique] = idx_1[temp_idx]
d2d[cat2[col_field] == unique] = temp_d2d
else:
temp_idx, temp_d2d, d3d = mc1[idx_1].match_to_catalog_sky(mc2[idx_2])
m1 = numpy.arange(len(cat1))
idx[cat1[col_field] == unique] = idx_2[temp_idx]
d2d[cat1[col_field] == unique] = temp_d2d
d2d = d2d * temp_d2d.unit # make sure finally we have an unit on d2d array s.t. "<" operation can complete
else:
if swap:
idx,d2d,d3d = mc2.match_to_catalog_sky(mc1)
m1= numpy.arange(len(cat2))
else:
idx,d2d,d3d = mc1.match_to_catalog_sky(mc2)
m1= numpy.arange(len(cat1))
# to make sure filtering out all neg ones which are untouched
mindx= ((d2d < maxdist*u.arcsec) & (0.*u.arcsec <= d2d))
m1= m1[mindx]
m2= idx[mindx]
if swap:
return (m2,m1,d2d[mindx])
else:
return (m1,m2,d2d[mindx])
def cds(cat,xcat='vizier:I/350/gaiaedr3',maxdist=2,colRA='RA',colDec='DEC',
selection='best',epoch=None,colpmRA='pmra',colpmDec='pmdec',
savefilename=None,gaia_all_columns=False):
"""
NAME:
cds
PURPOSE:
Cross-match against a catalog in the CDS archive using the CDS cross-matching service (http://cdsxmatch.u-strasbg.fr/xmatch); uses the curl interface
INPUT:
cat - a catalog to cross match, requires 'RA' and 'DEC' keywords (see below)
xcat= ('vizier:I/350/gaiaedr3') name of the catalog to cross-match against, in a format understood by the CDS cross-matching service (see http://cdsxmatch.u-strasbg.fr/xmatch/doc/available-tables.html; things like 'vizier:Tycho2' or 'vizier:I/345/gaia2')
maxdist= (2) maximum distance in arcsec
colRA= ('RA') name of the tag in cat with the right ascension
colDec= ('DEC') name of the tag in cat with the declination
selection= ('best') select either all matches or the best match according to CDS (see 'selection' at http://cdsxmatch.u-strasbg.fr/xmatch/doc/API-calls.html)
epoch= (2000.) epoch of the coordinates in cat
colpmRA= ('pmra') name of the tag in cat with the proper motion in right ascension in degree in cat (assumed to be ICRS; includes cos(Dec)) [only used when epoch != 2000.]
colpmDec= ('pmdec') name of the tag in cat with the proper motion in declination in degree in cat (assumed to be ICRS) [only used when epoch != 2000.]
gaia_all_columns= (False) set to True if you are matching against Gaia DR2 and want *all* columns returned; this runs a query at the Gaia Archive, which may or may not work...
savefilename= (None) if set, save the output from CDS to this path; can match back using cds_matchback
OUTPUT:
(xcat entries for those that match,
indices into cat of matching sources: index[0] is cat index of xcat[0])
HISTORY:
2016-09-12 - Written based on RC catalog code - Bovy (UofT)
2016-09-21 - Account for Gaia epoch 2015 - Bovy (UofT)
2018-05-08 - Added gaia_all_columns - Bovy (UofT)
"""
if epoch is None:
if 'ref_epoch' in cat.dtype.fields:
epoch= cat['ref_epoch']
else:
epoch= 2000.
_check_epoch(cat,epoch)
depoch= epoch-2000.
if numpy.any(depoch != 0.):
# Use proper motion to get both catalogs at the same time
dra=cat[colpmRA]/numpy.cos(cat[colDec]/180.*numpy.pi)\
/3600000.*depoch
ddec= cat[colpmDec]/3600000.*depoch
# Don't shift objects with non-existing proper motion
dra[numpy.isnan(cat[colpmRA])]= 0.
ddec[numpy.isnan(cat[colpmDec])]= 0.
else:
dra= numpy.zeros(len(cat))
ddec= numpy.zeros(len(cat))
if selection != 'all': selection= 'best'
if selection == 'all':
raise NotImplementedError("selection='all' CDS cross-match not currently implemented")
# Write positions
posfilename= tempfile.mktemp('.csv',dir=os.getcwd())
resultfilename= tempfile.mktemp('.csv',dir=os.getcwd())
with open(posfilename,'w') as csvfile:
wr= csv.writer(csvfile,delimiter=',',quoting=csv.QUOTE_MINIMAL)
wr.writerow(['RA','DEC'])
for ii in range(len(cat)):
wr.writerow([(cat[ii][colRA]-dra[ii]+360.) % 360.,
cat[ii][colDec]]-ddec[ii])
_cds_match_batched(resultfilename,posfilename,maxdist,selection,xcat)
# Directly match on input RA
ma= cds_load(resultfilename)
if gaia_all_columns:
from astroquery.gaia import Gaia
# Write another temporary file with the XML output of the cross-match
tab= Table(numpy.array([ma['source_id'],ma['RA'],ma['DEC']]).T,
names=('source_id','RA','DEC'),
dtype=('int64','float64','float64'))
xmlfilename= tempfile.mktemp('.xml',dir=os.getcwd())
tab.write(xmlfilename,format='votable')
#get the data release....
table_identifier = xcat.split('/')[-1]
if table_identifier == 'gaia2':
table_identifier = 'gaiadr2'
try:
job= Gaia.launch_job_async(
"""select g.*, m.RA as mRA, m.DEC as mDEC
from %s.gaia_source as g
inner join tap_upload.my_table as m on m.source_id = g.source_id""" % table_identifier,
upload_resource=xmlfilename,
upload_table_name="my_table")
ma= job.get_results()
except:
print("gaia_tools.xmath.cds failed to retrieve all gaia columns, returning just the default returned by the CDS xMatch instead...")
else:
ma.rename_column('mra','RA')
ma.rename_column('mdec','DEC')
finally:
os.remove(xmlfilename)
# Remove temporary files
os.remove(posfilename)
if savefilename is None:
os.remove(resultfilename)
else:
shutil.move(resultfilename,savefilename)
# Match back to the original catalog
mai= cds_matchback(cat,ma,colRA=colRA,colDec=colDec,epoch=epoch,
colpmRA=colpmRA,colpmDec=colpmDec)
return (ma,mai)
def _cds_match_batched(resultfilename,posfilename,maxdist,selection,xcat,
nruns_necessary=1):
"""CDS xMatch (sometimes?) fails for large matches, because of a time-out,
so we recursively split until the batches are small enough to not fail"""
# Figure out which of the hierarchy we are running
try:
runs= ''.join([str(int(r)-1)
for r in posfilename.split('csv.')[-1].split('.')])
except ValueError:
runs= ''
nruns= 2**len(runs)
if nruns >= nruns_necessary:
# Only run this level's match if we don't already know that we should
# be using smaller batches
_cds_basic_match(resultfilename,posfilename,maxdist,selection,xcat)
try:
ma= cds_load(resultfilename)
except ValueError: # Assume this is the time-out failure
pass
else:
return nruns
# xMatch failed because of time-out, split
posfilename1= posfilename+'.1'
posfilename2= posfilename+'.2'
resultfilename1= resultfilename+'.1'
resultfilename2= resultfilename+'.2'
# Figure out which of the hierarchy we are running
runs= ''.join([str(int(r)-1)
for r in posfilename1.split('csv.')[-1].split('.')])
nruns= 2**len(runs)
thisrun1= 1+int(runs,2)
thisrun2= 1+int(''.join([str(int(r)-1)
for r in posfilename2.split('csv.')[-1].split('.')]),2)
# Count the number of objects
with open(posfilename,'r') as posfile:
num_lines= sum(1 for line in posfile)
# Write the header line
with open(posfilename1,'w') as posfile1:
with open(posfilename,'r') as posfile:
posfile1.write(posfile.readline())
with open(posfilename2,'w') as posfile2:
with open(posfilename,'r') as posfile:
posfile2.write(posfile.readline())
# Cut in half
cnt= 0
with open(posfilename,'r') as posfile:
with open(posfilename1,'a') as posfile1:
with open(posfilename2,'a') as posfile2:
for line in posfile:
if cnt == 0:
cnt+= 1
continue
if cnt < num_lines//2:
posfile1.write(line)
cnt+= 1 # Can stop counting once this if is done
else:
posfile2.write(line)
# Run each
sys.stdout.write('\r'+"Working on CDS xMatch batch {} / {} ...\r"\
.format(thisrun1,nruns))
sys.stdout.flush()
nruns_necessary= _cds_match_batched(resultfilename1,posfilename1,
maxdist,selection,xcat,
nruns_necessary=nruns_necessary)
sys.stdout.write('\r'+"Working on CDS xMatch batch {} / {} ...\r"\
.format(thisrun2,nruns))
sys.stdout.flush()
nruns_necessary= _cds_match_batched(resultfilename2,posfilename2,
maxdist,selection,xcat,
nruns_necessary=nruns_necessary)
sys.stdout.write('\r'+_ERASESTR+'\r')
sys.stdout.flush()
# Combine results
with open(resultfilename,'w') as resultfile:
with open(resultfilename1,'r') as resultfile1:
for line in resultfile1:
resultfile.write(line)
with open(resultfilename2,'r') as resultfile2:
for line in resultfile2:
if line[0] == 'a': continue
resultfile.write(line)
# Remove intermediate files
os.remove(posfilename1)
os.remove(posfilename2)
os.remove(resultfilename1)
os.remove(resultfilename2)
return nruns_necessary
def _cds_basic_match(resultfilename,posfilename,maxdist,selection,xcat):
# Send to CDS for matching
result= open(resultfilename,'w')
try:
subprocess.check_call(['curl',
'-X','POST',
'-F','request=xmatch',
'-F','distMaxArcsec=%i' % maxdist,
'-F','selection=%s' % selection,
'-F','RESPONSEFORMAT=csv',
'-F','cat1=@%s' % os.path.basename(posfilename),
'-F','colRA1=RA',
'-F','colDec1=DEC',
'-F','cat2=%s' % xcat,
'http://cdsxmatch.u-strasbg.fr/xmatch/api/v1/sync'],
stdout=result)
except subprocess.CalledProcessError:
os.remove(posfilename)
if os.path.exists(resultfilename):
result.close()
os.remove(resultfilename)
result.close()
return None
def cds_load(filename):
if WIN32:
# windows do not have float128, but source_id is double
# get around this by squeezing precision from int64 on source_id as source_id is always integer anyway
# first read everything as fp64 and then convert source_id to int64 will keep its precision
data = numpy.genfromtxt(filename, delimiter=',', skip_header=0,
filling_values=-9999.99, names=True, max_rows=1,
dtype='float64') # only read the first row max to reduce workload to just get the column name
to_list = list(data.dtype.names)
# construct a list where everything is fp64 except 'source_id' being int64
dtype_list = [('{}'.format(i), numpy.float64) for i in to_list]
dtype_list[dtype_list.index(('source_id', numpy.float64))] = ('source_id', numpy.uint64)
return numpy.genfromtxt(filename, delimiter=',', skip_header=0,
filling_values=-9999.99, names=True,
dtype=dtype_list)
else:
return numpy.genfromtxt(filename, delimiter=',', skip_header=0,
filling_values=-9999.99, names=True,
dtype='float128')
def cds_matchback(cat,xcat,colRA='RA',colDec='DEC',selection='best',
epoch=None,colpmRA='pmra',colpmDec='pmdec',):
"""
NAME:
cds_matchback
PURPOSE:
Match a matched catalog from xmatch.cds back to the original catalog
INPUT
cat - original catalog
xcat - matched catalog returned by xmatch.cds
colRA= ('RA') name of the tag in cat with the right ascension
colDec= ('DEC') name of the tag in cat with the declination
selection= ('best') select either all matches or the best match according to CDS (see 'selection' at http://cdsxmatch.u-strasbg.fr/xmatch/doc/API-calls.html)
epoch= (2000.) epoch of the coordinates in cat
colpmRA= ('pmra') name of the tag in cat with the proper motion in right ascension in degree in cat (assumed to be ICRS; includes cos(Dec)) [only used when epoch != 2000.]
colpmDec= ('pmdec') name of the tag in cat with the proper motion in declination in degree in cat (assumed to be ICRS) [only used when epoch != 2000.]
OUTPUT:
Array indices into cat of xcat entries: index[0] is cat index of xcat[0]
HISTORY:
2016-09-12 - Written - Bovy (UofT)
2018-05-04 - Account for non-zero epoch difference - Bovy (UofT)
"""
if selection != 'all': selection= 'best'
if selection == 'all':
raise NotImplementedError("selection='all' CDS cross-match not currently implemented")
if epoch is None:
if 'ref_epoch' in cat.dtype.fields:
epoch= cat['ref_epoch']
else:
epoch= 2000.
_check_epoch(cat,epoch)
depoch= epoch-2000.
if numpy.any(depoch != 0.):
# Use proper motion to get both catalogs at the same time
dra=cat[colpmRA]/numpy.cos(cat[colDec]/180.*numpy.pi)\
/3600000.*depoch
ddec= cat[colpmDec]/3600000.*depoch
# Don't shift objects with non-existing proper motion
dra[numpy.isnan(cat[colpmRA])]= 0.
ddec[numpy.isnan(cat[colpmDec])]= 0.
else:
dra= numpy.zeros(len(cat))
ddec= numpy.zeros(len(cat))
# xmatch to v. small diff., because match is against *original* coords,
# not matched coords in CDS
mc1= acoords.SkyCoord(cat[colRA]-dra,cat[colDec]-ddec,
unit=(u.degree, u.degree),frame='icrs')
mc2= acoords.SkyCoord(xcat['RA'],xcat['DEC'],
unit=(u.degree, u.degree),frame='icrs')
idx,d2d,d3d = mc2.match_to_catalog_sky(mc1)
mindx= d2d < 1e-5*u.arcsec
return idx[mindx]
def _check_epoch(cat,epoch):
warn_about_epoch= False
if 'ref_epoch' in cat.dtype.fields:
if 'designation' not in cat.dtype.fields: # Assume this is DR1
if numpy.any(numpy.fabs(epoch-2015.) > 0.01):
warn_about_epoch= True
elif 'Gaia DR2' in cat['designation'][0].decode('utf-8'):
if numpy.any(numpy.fabs(epoch-2015.5) > 0.01):
warn_about_epoch= True
if warn_about_epoch:
warnings.warn("You appear to be using a Gaia catalog, but are not setting the epoch to 2015. (DR1) or 2015.5 (DR2), which may lead to incorrect matches")
return None
| 46.786047
| 261
| 0.607814
| 2,669
| 20,118
| 4.52042
| 0.186961
| 0.009532
| 0.011189
| 0.013925
| 0.424782
| 0.390966
| 0.341152
| 0.324824
| 0.287775
| 0.257605
| 0
| 0.037606
| 0.284919
| 20,118
| 429
| 262
| 46.895105
| 0.801057
| 0.329407
| 0
| 0.375
| 0
| 0.006579
| 0.083506
| 0.001615
| 0
| 0
| 0
| 0
| 0
| 1
| 0.023026
| false
| 0.003289
| 0.049342
| 0
| 0.105263
| 0.003289
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e400f6b243c2f7da007de4b3632bc30927997f62
| 14,873
|
py
|
Python
|
rllib/agents/dqn/dqn_torch_policy.py
|
ThomasLecat/ray
|
eb025ea8cb27583e8ef6287f5654f23d1ab270ef
|
[
"Apache-2.0"
] | null | null | null |
rllib/agents/dqn/dqn_torch_policy.py
|
ThomasLecat/ray
|
eb025ea8cb27583e8ef6287f5654f23d1ab270ef
|
[
"Apache-2.0"
] | null | null | null |
rllib/agents/dqn/dqn_torch_policy.py
|
ThomasLecat/ray
|
eb025ea8cb27583e8ef6287f5654f23d1ab270ef
|
[
"Apache-2.0"
] | null | null | null |
from typing import Dict, List, Tuple
import gym
import ray
from ray.rllib.agents.a3c.a3c_torch_policy import apply_grad_clipping
from ray.rllib.agents.dqn.dqn_tf_policy import (
PRIO_WEIGHTS, Q_SCOPE, Q_TARGET_SCOPE, postprocess_nstep_and_prio)
from ray.rllib.agents.dqn.dqn_torch_model import DQNTorchModel
from ray.rllib.agents.dqn.simple_q_torch_policy import TargetNetworkMixin
from ray.rllib.models.catalog import ModelCatalog
from ray.rllib.models.modelv2 import ModelV2
from ray.rllib.models.torch.torch_action_dist import (TorchCategorical,
TorchDistributionWrapper)
from ray.rllib.policy.policy import Policy
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.policy.torch_policy import LearningRateSchedule
from ray.rllib.policy.torch_policy_template import build_torch_policy
from ray.rllib.utils.error import UnsupportedSpaceException
from ray.rllib.utils.exploration.parameter_noise import ParameterNoise
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.utils.torch_ops import (FLOAT_MIN, huber_loss,
reduce_mean_ignore_inf,
softmax_cross_entropy_with_logits)
from ray.rllib.utils.typing import TensorType, TrainerConfigDict
torch, nn = try_import_torch()
F = None
if nn:
F = nn.functional
class QLoss:
def __init__(self,
q_t_selected,
q_logits_t_selected,
q_tp1_best,
q_probs_tp1_best,
importance_weights,
rewards,
done_mask,
gamma=0.99,
n_step=1,
num_atoms=1,
v_min=-10.0,
v_max=10.0):
if num_atoms > 1:
# Distributional Q-learning which corresponds to an entropy loss
z = torch.range(0.0, num_atoms - 1, dtype=torch.float32)
z = v_min + z * (v_max - v_min) / float(num_atoms - 1)
# (batch_size, 1) * (1, num_atoms) = (batch_size, num_atoms)
r_tau = torch.unsqueeze(
rewards, -1) + gamma**n_step * torch.unsqueeze(
1.0 - done_mask, -1) * torch.unsqueeze(z, 0)
r_tau = torch.clamp(r_tau, v_min, v_max)
b = (r_tau - v_min) / ((v_max - v_min) / float(num_atoms - 1))
lb = torch.floor(b)
ub = torch.ceil(b)
# Indispensable judgement which is missed in most implementations
# when b happens to be an integer, lb == ub, so pr_j(s', a*) will
# be discarded because (ub-b) == (b-lb) == 0.
floor_equal_ceil = (ub - lb < 0.5).float()
# (batch_size, num_atoms, num_atoms)
l_project = F.one_hot(lb.long(), num_atoms)
# (batch_size, num_atoms, num_atoms)
u_project = F.one_hot(ub.long(), num_atoms)
ml_delta = q_probs_tp1_best * (ub - b + floor_equal_ceil)
mu_delta = q_probs_tp1_best * (b - lb)
ml_delta = torch.sum(
l_project * torch.unsqueeze(ml_delta, -1), dim=1)
mu_delta = torch.sum(
u_project * torch.unsqueeze(mu_delta, -1), dim=1)
m = ml_delta + mu_delta
# Rainbow paper claims that using this cross entropy loss for
# priority is robust and insensitive to `prioritized_replay_alpha`
self.td_error = softmax_cross_entropy_with_logits(
logits=q_logits_t_selected, labels=m)
self.loss = torch.mean(self.td_error * importance_weights)
self.stats = {
# TODO: better Q stats for dist dqn
"mean_td_error": torch.mean(self.td_error),
}
else:
q_tp1_best_masked = (1.0 - done_mask) * q_tp1_best
# compute RHS of bellman equation
q_t_selected_target = rewards + gamma**n_step * q_tp1_best_masked
# compute the error (potentially clipped)
self.td_error = q_t_selected - q_t_selected_target.detach()
self.loss = torch.mean(
importance_weights.float() * huber_loss(self.td_error))
self.stats = {
"mean_q": torch.mean(q_t_selected),
"min_q": torch.min(q_t_selected),
"max_q": torch.max(q_t_selected),
"mean_td_error": torch.mean(self.td_error),
}
class ComputeTDErrorMixin:
def __init__(self):
def compute_td_error(obs_t, act_t, rew_t, obs_tp1, done_mask,
importance_weights):
input_dict = self._lazy_tensor_dict({SampleBatch.CUR_OBS: obs_t})
input_dict[SampleBatch.ACTIONS] = act_t
input_dict[SampleBatch.REWARDS] = rew_t
input_dict[SampleBatch.NEXT_OBS] = obs_tp1
input_dict[SampleBatch.DONES] = done_mask
input_dict[PRIO_WEIGHTS] = importance_weights
# Do forward pass on loss to update td error attribute
build_q_losses(self, self.model, None, input_dict)
return self.q_loss.td_error
self.compute_td_error = compute_td_error
def build_q_model_and_distribution(
policy: Policy, obs_space: gym.Space, action_space: gym.Space,
config: TrainerConfigDict) -> Tuple[ModelV2, TorchDistributionWrapper]:
if not isinstance(action_space, gym.spaces.Discrete):
raise UnsupportedSpaceException(
"Action space {} is not supported for DQN.".format(action_space))
if config["hiddens"]:
# try to infer the last layer size, otherwise fall back to 256
num_outputs = ([256] + config["model"]["fcnet_hiddens"])[-1]
config["model"]["no_final_linear"] = True
else:
num_outputs = action_space.n
# TODO(sven): Move option to add LayerNorm after each Dense
# generically into ModelCatalog.
add_layer_norm = (
isinstance(getattr(policy, "exploration", None), ParameterNoise)
or config["exploration_config"]["type"] == "ParameterNoise")
policy.q_model = ModelCatalog.get_model_v2(
obs_space=obs_space,
action_space=action_space,
num_outputs=num_outputs,
model_config=config["model"],
framework="torch",
model_interface=DQNTorchModel,
name=Q_SCOPE,
q_hiddens=config["hiddens"],
dueling=config["dueling"],
num_atoms=config["num_atoms"],
use_noisy=config["noisy"],
v_min=config["v_min"],
v_max=config["v_max"],
sigma0=config["sigma0"],
# TODO(sven): Move option to add LayerNorm after each Dense
# generically into ModelCatalog.
add_layer_norm=add_layer_norm)
policy.q_func_vars = policy.q_model.variables()
policy.target_q_model = ModelCatalog.get_model_v2(
obs_space=obs_space,
action_space=action_space,
num_outputs=num_outputs,
model_config=config["model"],
framework="torch",
model_interface=DQNTorchModel,
name=Q_TARGET_SCOPE,
q_hiddens=config["hiddens"],
dueling=config["dueling"],
num_atoms=config["num_atoms"],
use_noisy=config["noisy"],
v_min=config["v_min"],
v_max=config["v_max"],
sigma0=config["sigma0"],
# TODO(sven): Move option to add LayerNorm after each Dense
# generically into ModelCatalog.
add_layer_norm=add_layer_norm)
policy.target_q_func_vars = policy.target_q_model.variables()
return policy.q_model, TorchCategorical
def get_distribution_inputs_and_class(
policy: Policy,
model: ModelV2,
obs_batch: TensorType,
*,
explore: bool = True,
is_training: bool = False,
**kwargs) -> Tuple[TensorType, type, List[TensorType]]:
q_vals = compute_q_values(policy, model, obs_batch, explore, is_training)
q_vals = q_vals[0] if isinstance(q_vals, tuple) else q_vals
policy.q_values = q_vals
return policy.q_values, TorchCategorical, [] # state-out
def build_q_losses(policy: Policy, model, _,
train_batch: SampleBatch) -> TensorType:
config = policy.config
# Q-network evaluation.
q_t, q_logits_t, q_probs_t = compute_q_values(
policy,
policy.q_model,
train_batch[SampleBatch.CUR_OBS],
explore=False,
is_training=True)
# Target Q-network evaluation.
q_tp1, q_logits_tp1, q_probs_tp1 = compute_q_values(
policy,
policy.target_q_model,
train_batch[SampleBatch.NEXT_OBS],
explore=False,
is_training=True)
# Q scores for actions which we know were selected in the given state.
one_hot_selection = F.one_hot(train_batch[SampleBatch.ACTIONS],
policy.action_space.n)
q_t_selected = torch.sum(
torch.where(q_t > FLOAT_MIN, q_t,
torch.tensor(0.0, device=policy.device)) *
one_hot_selection, 1)
q_logits_t_selected = torch.sum(
q_logits_t * torch.unsqueeze(one_hot_selection, -1), 1)
# compute estimate of best possible value starting from state at t + 1
if config["double_q"]:
q_tp1_using_online_net, q_logits_tp1_using_online_net, \
q_dist_tp1_using_online_net = compute_q_values(
policy,
policy.q_model,
train_batch[SampleBatch.NEXT_OBS],
explore=False,
is_training=True)
q_tp1_best_using_online_net = torch.argmax(q_tp1_using_online_net, 1)
q_tp1_best_one_hot_selection = F.one_hot(q_tp1_best_using_online_net,
policy.action_space.n)
q_tp1_best = torch.sum(
torch.where(q_tp1 > FLOAT_MIN, q_tp1,
torch.tensor(0.0, device=policy.device)) *
q_tp1_best_one_hot_selection, 1)
q_probs_tp1_best = torch.sum(
q_probs_tp1 * torch.unsqueeze(q_tp1_best_one_hot_selection, -1), 1)
else:
q_tp1_best_one_hot_selection = F.one_hot(
torch.argmax(q_tp1, 1), policy.action_space.n)
q_tp1_best = torch.sum(
torch.where(q_tp1 > FLOAT_MIN, q_tp1,
torch.tensor(0.0, device=policy.device)) *
q_tp1_best_one_hot_selection, 1)
q_probs_tp1_best = torch.sum(
q_probs_tp1 * torch.unsqueeze(q_tp1_best_one_hot_selection, -1), 1)
policy.q_loss = QLoss(
q_t_selected, q_logits_t_selected, q_tp1_best, q_probs_tp1_best,
train_batch[PRIO_WEIGHTS], train_batch[SampleBatch.REWARDS],
train_batch[SampleBatch.DONES].float(), config["gamma"],
config["n_step"], config["num_atoms"], config["v_min"],
config["v_max"])
return policy.q_loss.loss
def adam_optimizer(policy: Policy,
config: TrainerConfigDict) -> "torch.optim.Optimizer":
return torch.optim.Adam(
policy.q_func_vars, lr=policy.cur_lr, eps=config["adam_epsilon"])
def build_q_stats(policy: Policy, batch) -> Dict[str, TensorType]:
return dict({
"cur_lr": policy.cur_lr,
}, **policy.q_loss.stats)
def setup_early_mixins(policy: Policy, obs_space, action_space,
config: TrainerConfigDict) -> None:
LearningRateSchedule.__init__(policy, config["lr"], config["lr_schedule"])
def after_init(policy: Policy, obs_space: gym.Space, action_space: gym.Space,
config: TrainerConfigDict) -> None:
ComputeTDErrorMixin.__init__(policy)
TargetNetworkMixin.__init__(policy, obs_space, action_space, config)
# Move target net to device (this is done autoatically for the
# policy.model, but not for any other models the policy has).
policy.target_q_model = policy.target_q_model.to(policy.device)
def compute_q_values(policy: Policy,
model: ModelV2,
obs: TensorType,
explore,
is_training: bool = False):
config = policy.config
model_out, state = model({
SampleBatch.CUR_OBS: obs,
"is_training": is_training,
}, [], None)
if config["num_atoms"] > 1:
(action_scores, z, support_logits_per_action, logits,
probs_or_logits) = model.get_q_value_distributions(model_out)
else:
(action_scores, logits,
probs_or_logits) = model.get_q_value_distributions(model_out)
if config["dueling"]:
state_score = model.get_state_value(model_out)
if policy.config["num_atoms"] > 1:
support_logits_per_action_mean = torch.mean(
support_logits_per_action, dim=1)
support_logits_per_action_centered = (
support_logits_per_action - torch.unsqueeze(
support_logits_per_action_mean, dim=1))
support_logits_per_action = torch.unsqueeze(
state_score, dim=1) + support_logits_per_action_centered
support_prob_per_action = nn.functional.softmax(
support_logits_per_action)
value = torch.sum(z * support_prob_per_action, dim=-1)
logits = support_logits_per_action
probs_or_logits = support_prob_per_action
else:
advantages_mean = reduce_mean_ignore_inf(action_scores, 1)
advantages_centered = action_scores - torch.unsqueeze(
advantages_mean, 1)
value = state_score + advantages_centered
else:
value = action_scores
return value, logits, probs_or_logits
def grad_process_and_td_error_fn(policy: Policy,
optimizer: "torch.optim.Optimizer",
loss: TensorType) -> Dict[str, TensorType]:
# Clip grads if configured.
return apply_grad_clipping(policy, optimizer, loss)
def extra_action_out_fn(policy: Policy, input_dict, state_batches, model,
action_dist) -> Dict[str, TensorType]:
return {"q_values": policy.q_values}
DQNTorchPolicy = build_torch_policy(
name="DQNTorchPolicy",
loss_fn=build_q_losses,
get_default_config=lambda: ray.rllib.agents.dqn.dqn.DEFAULT_CONFIG,
make_model_and_action_dist=build_q_model_and_distribution,
action_distribution_fn=get_distribution_inputs_and_class,
stats_fn=build_q_stats,
postprocess_fn=postprocess_nstep_and_prio,
optimizer_fn=adam_optimizer,
extra_grad_process_fn=grad_process_and_td_error_fn,
extra_learn_fetches_fn=lambda policy: {"td_error": policy.q_loss.td_error},
extra_action_out_fn=extra_action_out_fn,
before_init=setup_early_mixins,
after_init=after_init,
mixins=[
TargetNetworkMixin,
ComputeTDErrorMixin,
LearningRateSchedule,
])
| 39.76738
| 79
| 0.638741
| 1,897
| 14,873
| 4.666842
| 0.166579
| 0.010392
| 0.021688
| 0.02485
| 0.347227
| 0.286344
| 0.232125
| 0.228623
| 0.207387
| 0.20061
| 0
| 0.011001
| 0.272709
| 14,873
| 373
| 80
| 39.873995
| 0.807433
| 0.08902
| 0
| 0.255172
| 0
| 0
| 0.034024
| 0.003107
| 0
| 0
| 0
| 0.002681
| 0
| 1
| 0.044828
| false
| 0
| 0.086207
| 0.013793
| 0.168966
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e401cec76e2495c504bab2f84a98dc13530872c1
| 6,865
|
py
|
Python
|
tests/integration/states/test_cmd.py
|
l2ol33rt/salt
|
ff68bbd9f4bda992a3e039822fb32f141e94347c
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/states/test_cmd.py
|
l2ol33rt/salt
|
ff68bbd9f4bda992a3e039822fb32f141e94347c
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/states/test_cmd.py
|
l2ol33rt/salt
|
ff68bbd9f4bda992a3e039822fb32f141e94347c
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
Tests for the file state
'''
# Import python libs
from __future__ import absolute_import
import errno
import os
import textwrap
import tempfile
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.paths import TMP_STATE_TREE
from tests.support.mixins import SaltReturnAssertsMixin
# Import salt libs
import salt.utils
IS_WINDOWS = salt.utils.is_windows()
class CMDTest(ModuleCase, SaltReturnAssertsMixin):
'''
Validate the cmd state
'''
def test_run_simple(self):
'''
cmd.run
'''
cmd = 'dir' if IS_WINDOWS else 'ls'
ret = self.run_state('cmd.run', name=cmd, cwd=tempfile.gettempdir())
self.assertSaltTrueReturn(ret)
def test_test_run_simple(self):
'''
cmd.run test interface
'''
ret = self.run_state('cmd.run', name='ls',
cwd=tempfile.gettempdir(), test=True)
self.assertSaltNoneReturn(ret)
class CMDRunRedirectTest(ModuleCase, SaltReturnAssertsMixin):
'''
Validate the cmd state of run_redirect
'''
def setUp(self):
self.state_name = 'run_redirect'
state_filename = self.state_name + '.sls'
self.state_file = os.path.join(TMP_STATE_TREE, state_filename)
# Create the testfile and release the handle
fd, self.test_file = tempfile.mkstemp()
try:
os.close(fd)
except OSError as exc:
if exc.errno != errno.EBADF:
raise exc
# Create the testfile and release the handle
fd, self.test_tmp_path = tempfile.mkstemp()
try:
os.close(fd)
except OSError as exc:
if exc.errno != errno.EBADF:
raise exc
super(CMDRunRedirectTest, self).setUp()
def tearDown(self):
for path in (self.state_file, self.test_tmp_path, self.test_file):
try:
os.remove(path)
except OSError:
# Not all of the tests leave files around that we want to remove
# As some of the tests create the sls files in the test itself,
# And some are using files in the integration test file state tree.
pass
super(CMDRunRedirectTest, self).tearDown()
def test_run_unless(self):
'''
test cmd.run unless
'''
state_key = 'cmd_|-{0}_|-{0}_|-run'.format(self.test_tmp_path)
with salt.utils.fopen(self.state_file, 'w') as fb_:
fb_.write(textwrap.dedent('''
{0}:
cmd.run:
- unless: echo cheese > {1}
'''.format(self.test_tmp_path, self.test_file)))
ret = self.run_function('state.sls', [self.state_name])
self.assertTrue(ret[state_key]['result'])
def test_run_unless_multiple_cmds(self):
'''
test cmd.run using multiple unless options where the first cmd in the
list will pass, but the second will fail. This tests the fix for issue
#35384. (The fix is in PR #35545.)
'''
sls = self.run_function('state.sls', mods='issue-35384')
self.assertSaltTrueReturn(sls)
# We must assert against the comment here to make sure the comment reads that the
# command "echo "hello"" was run. This ensures that we made it to the last unless
# command in the state. If the comment reads "unless execution succeeded", or similar,
# then the unless state run bailed out after the first unless command succeeded,
# which is the bug we're regression testing for.
self.assertEqual(sls['cmd_|-cmd_run_unless_multiple_|-echo "hello"_|-run']['comment'],
'Command "echo "hello"" run')
def test_run_creates_exists(self):
'''
test cmd.run creates already there
'''
state_key = 'cmd_|-echo >> {0}_|-echo >> {0}_|-run'.format(self.test_file)
with salt.utils.fopen(self.state_file, 'w') as fb_:
fb_.write(textwrap.dedent('''
echo >> {0}:
cmd.run:
- creates: {0}
'''.format(self.test_file)))
ret = self.run_function('state.sls', [self.state_name])
self.assertTrue(ret[state_key]['result'])
self.assertEqual(len(ret[state_key]['changes']), 0)
def test_run_creates_new(self):
'''
test cmd.run creates not there
'''
os.remove(self.test_file)
state_key = 'cmd_|-echo >> {0}_|-echo >> {0}_|-run'.format(self.test_file)
with salt.utils.fopen(self.state_file, 'w') as fb_:
fb_.write(textwrap.dedent('''
echo >> {0}:
cmd.run:
- creates: {0}
'''.format(self.test_file)))
ret = self.run_function('state.sls', [self.state_name])
self.assertTrue(ret[state_key]['result'])
self.assertEqual(len(ret[state_key]['changes']), 4)
def test_run_redirect(self):
'''
test cmd.run with shell redirect
'''
state_key = 'cmd_|-echo test > {0}_|-echo test > {0}_|-run'.format(self.test_file)
with salt.utils.fopen(self.state_file, 'w') as fb_:
fb_.write(textwrap.dedent('''
echo test > {0}:
cmd.run
'''.format(self.test_file)))
ret = self.run_function('state.sls', [self.state_name])
self.assertTrue(ret[state_key]['result'])
class CMDRunWatchTest(ModuleCase, SaltReturnAssertsMixin):
'''
Validate the cmd state of run_watch
'''
def setUp(self):
self.state_name = 'run_watch'
state_filename = self.state_name + '.sls'
self.state_file = os.path.join(TMP_STATE_TREE, state_filename)
super(CMDRunWatchTest, self).setUp()
def tearDown(self):
os.remove(self.state_file)
super(CMDRunWatchTest, self).tearDown()
def test_run_watch(self):
'''
test cmd.run watch
'''
saltines_key = 'cmd_|-saltines_|-echo changed=true_|-run'
biscuits_key = 'cmd_|-biscuits_|-echo biscuits_|-wait'
with salt.utils.fopen(self.state_file, 'w') as fb_:
fb_.write(textwrap.dedent('''
saltines:
cmd.run:
- name: echo changed=true
- cwd: /
- stateful: True
biscuits:
cmd.wait:
- name: echo biscuits
- cwd: /
- watch:
- cmd: saltines
'''))
ret = self.run_function('state.sls', [self.state_name])
self.assertTrue(ret[saltines_key]['result'])
self.assertTrue(ret[biscuits_key]['result'])
| 34.154229
| 94
| 0.57276
| 823
| 6,865
| 4.613609
| 0.207776
| 0.042139
| 0.031604
| 0.022123
| 0.486437
| 0.436134
| 0.410587
| 0.371609
| 0.342112
| 0.342112
| 0
| 0.006984
| 0.311726
| 6,865
| 200
| 95
| 34.325
| 0.796614
| 0.174654
| 0
| 0.441667
| 0
| 0
| 0.231852
| 0.018333
| 0
| 0
| 0
| 0
| 0.133333
| 1
| 0.1
| false
| 0.008333
| 0.075
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e402affb74681aeffbd7073f07e5537c7f847fc0
| 2,591
|
py
|
Python
|
mars/tensor/execution/datastore.py
|
ChenQuan/mars
|
46fc9747e99210cebfabfc2d85bcc8272440d1a3
|
[
"Apache-2.0"
] | null | null | null |
mars/tensor/execution/datastore.py
|
ChenQuan/mars
|
46fc9747e99210cebfabfc2d85bcc8272440d1a3
|
[
"Apache-2.0"
] | null | null | null |
mars/tensor/execution/datastore.py
|
ChenQuan/mars
|
46fc9747e99210cebfabfc2d85bcc8272440d1a3
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
try:
import tiledb
except ImportError: # pragma: no cover
tiledb = None
from ...lib.sparse import SparseNDArray
from ...lib.sparse.core import sps
from ..expressions import datastore
from .utils import get_tiledb_ctx
def _store_tiledb(ctx, chunk):
tiledb_ctx = get_tiledb_ctx(chunk.op.tiledb_config)
uri = chunk.op.tiledb_uri
key = chunk.op.tiledb_key
timestamp = chunk.op.tiledb_timestamp
axis_offsets = chunk.op.axis_offsets
if not chunk.issparse():
# dense
to_store = np.ascontiguousarray(ctx[chunk.op.input.key])
slcs = []
for axis in range(chunk.ndim):
axis_offset = axis_offsets[axis]
axis_length = chunk.op.input.shape[axis]
slcs.append(slice(axis_offset, axis_offset + axis_length))
with tiledb.DenseArray(tiledb_ctx, uri, mode='w',
key=key, timestamp=timestamp) as arr:
arr[tuple(slcs)] = to_store
ctx[chunk.key] = np.empty((0,) * chunk.ndim, dtype=chunk.dtype)
else:
# sparse
to_store = ctx[chunk.op.input.key].spmatrix.tocoo()
if to_store.nnz > 0:
with tiledb.SparseArray(tiledb_ctx, uri, mode='w',
key=key, timestamp=timestamp) as arr:
if chunk.ndim == 1:
vec = to_store.col if to_store.shape[0] == 1 else to_store.row
vec += axis_offsets[0]
arr[vec] = to_store.data
else:
i, j = to_store.row + axis_offsets[0], to_store.col + axis_offsets[1]
arr[i, j] = to_store.data
ctx[chunk.key] = SparseNDArray(sps.csr_matrix((0, 0), dtype=chunk.dtype),
shape=chunk.shape)
def register_data_store_handler():
from ...executor import register
register(datastore.TensorTileDBDataStore, _store_tiledb)
| 37.550725
| 89
| 0.63296
| 349
| 2,591
| 4.581662
| 0.406877
| 0.048155
| 0.03252
| 0.020013
| 0.08005
| 0.057536
| 0.057536
| 0.057536
| 0.057536
| 0.057536
| 0
| 0.012144
| 0.269008
| 2,591
| 68
| 90
| 38.102941
| 0.832101
| 0.247781
| 0
| 0.093023
| 0
| 0
| 0.001036
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046512
| false
| 0
| 0.186047
| 0
| 0.232558
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e4041f8f3f0e170375ff7b152259c16fb293ef71
| 1,689
|
py
|
Python
|
fastgc/model/mlp.py
|
ppmlguy/fastgradclip
|
0d8bff42ab13fa3471c520a2823050ccf0ff4a21
|
[
"MIT"
] | 2
|
2020-10-16T10:14:25.000Z
|
2021-03-25T17:19:34.000Z
|
fastgc/model/mlp.py
|
ppmlguy/fastgradclip
|
0d8bff42ab13fa3471c520a2823050ccf0ff4a21
|
[
"MIT"
] | null | null | null |
fastgc/model/mlp.py
|
ppmlguy/fastgradclip
|
0d8bff42ab13fa3471c520a2823050ccf0ff4a21
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
from fastgc.model.penet import PeGradNet
from fastgc.layers.linear import Linear
from fastgc.activation import activation
class MLP(PeGradNet):
def __init__(self, input_size, hidden_sizes, output_size, act_func='sigmoid',
train_alg='batch'):
"""
Parameters:
------------------
- input_size: integer, the number of features in the input
- hidden_sizes: a list of integers, a list object containing number of units for hidden layers
- output_size: an integer, the length of output vector
- act_func: string, name of activation function to use for each hidden layer
- train_alg: string, allowed values are {'batch', 'reweight', 'naive'}
"""
super(MLP, self).__init__()
self.input_size = input_size
layer_sizes = [input_size] + hidden_sizes
self.linears = nn.ModuleList([Linear(in_size, out_size, bias=True)
for in_size, out_size in zip(layer_sizes[:-1],
layer_sizes[1:])])
self.output_layer = Linear(hidden_sizes[-1], output_size, bias=True)
self.act = activation[act_func]
self.train_alg=train_alg
# list of layers in the network
self.layers = [layer for layer in self.linears]
self.layers.append(self.output_layer)
def forward(self, x):
x = x.view(-1, self.input_size)
out = x
for layer in self.linears:
out = self.act(layer(out))
logits = self.output_layer(out)
return logits
| 35.1875
| 102
| 0.605684
| 216
| 1,689
| 4.560185
| 0.342593
| 0.054822
| 0.039594
| 0.034518
| 0.04264
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003378
| 0.298993
| 1,689
| 47
| 103
| 35.93617
| 0.828547
| 0.248668
| 0
| 0
| 0
| 0
| 0.010033
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074074
| false
| 0
| 0.222222
| 0
| 0.37037
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e407a1b65cd96d68a622c0a025047b036e6148f4
| 21,659
|
py
|
Python
|
test_vector_handlers/src/awses_test_vectors/manifests/full_message/decrypt_generation.py
|
farleyb-amazon/aws-encryption-sdk-python
|
7950abd73ee333407d2dadd02ef2d57c3df464cf
|
[
"Apache-2.0"
] | 95
|
2018-08-20T23:10:00.000Z
|
2022-02-17T02:54:32.000Z
|
test_vector_handlers/src/awses_test_vectors/manifests/full_message/decrypt_generation.py
|
farleyb-amazon/aws-encryption-sdk-python
|
7950abd73ee333407d2dadd02ef2d57c3df464cf
|
[
"Apache-2.0"
] | 220
|
2018-08-01T20:56:29.000Z
|
2022-03-28T18:12:35.000Z
|
test_vector_handlers/src/awses_test_vectors/manifests/full_message/decrypt_generation.py
|
farleyb-amazon/aws-encryption-sdk-python
|
7950abd73ee333407d2dadd02ef2d57c3df464cf
|
[
"Apache-2.0"
] | 63
|
2018-08-01T19:37:33.000Z
|
2022-03-20T17:14:15.000Z
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""
AWS Encryption SDK Decrypt Message Generation manifest handler.
Described in AWS Crypto Tools Test Vector Framework feature #0006 AWS Encryption SDK Decrypt Message Generation.
"""
import json
import os
import uuid
from copy import copy
import attr
import six
from aws_encryption_sdk.caches.local import LocalCryptoMaterialsCache
from aws_encryption_sdk.materials_managers.base import CryptoMaterialsManager
from aws_encryption_sdk.materials_managers.caching import CachingCryptoMaterialsManager
from aws_encryption_sdk.materials_managers.default import DefaultCryptoMaterialsManager
from awses_test_vectors.internal.defaults import ENCODING
from awses_test_vectors.internal.util import (
dictionary_validator,
file_reader,
file_writer,
iterable_validator,
membership_validator,
validate_manifest_type,
)
from awses_test_vectors.manifests.full_message.decrypt import (
DecryptionMethod,
MessageDecryptionManifest,
MessageDecryptionTestResult,
MessageDecryptionTestScenario,
)
from awses_test_vectors.manifests.full_message.encrypt import MessageEncryptionTestScenario
from awses_test_vectors.manifests.keys import KeysManifest
try:
from aws_encryption_sdk.identifiers import AlgorithmSuite
except ImportError:
from aws_encryption_sdk.identifiers import Algorithm as AlgorithmSuite
from awses_test_vectors.manifests.master_key import MasterKeySpec, master_key_provider_from_master_key_specs
try: # Python 3.5.0 and 3.5.1 have incompatible typing modules
from typing import IO, Callable, Dict, Iterable, Optional # noqa pylint: disable=unused-import
from awses_test_vectors.internal.mypy_types import ( # noqa pylint: disable=unused-import
ENCRYPT_SCENARIO_SPEC,
PLAINTEXTS_SPEC,
)
except ImportError: # pragma: no cover
# We only actually need these imports when running the mypy checks
pass
SUPPORTED_VERSIONS = (2,)
class TamperingMethod:
"""Base class for all tampering methods."""
@classmethod
def from_tampering_spec(cls, spec):
"""Load from a tampering specification"""
if spec is None:
return TamperingMethod()
if spec == "truncate":
return TruncateTamperingMethod()
if spec == "mutate":
return MutateTamperingMethod()
if spec == "half-sign":
return HalfSigningTamperingMethod()
((tampering_tag, tampering_values_spec),) = spec.items()
if tampering_tag == "change-edk-provider-info":
return ChangeEDKProviderInfoTamperingMethod.from_values_spec(tampering_values_spec)
raise ValueError("Unrecognized tampering method tag: " + tampering_tag)
# pylint: disable=R0201
def run_scenario_with_tampering(self, ciphertext_writer, generation_scenario, plaintext_uri):
"""
Run a given scenario, tampering with the input or the result.
return: a list of (ciphertext, result) pairs
"""
materials_manager = DefaultCryptoMaterialsManager(
generation_scenario.encryption_scenario.master_key_provider_fn()
)
ciphertext_to_decrypt = generation_scenario.encryption_scenario.run(materials_manager)
if generation_scenario.result:
expected_result = generation_scenario.result
else:
expected_result = MessageDecryptionTestResult.expect_output(
plaintext_uri=plaintext_uri, plaintext=generation_scenario.encryption_scenario.plaintext
)
return [
generation_scenario.decryption_test_scenario_pair(ciphertext_writer, ciphertext_to_decrypt, expected_result)
]
class ChangeEDKProviderInfoTamperingMethod(TamperingMethod):
"""Tampering method that changes the provider info on all EDKs."""
new_provider_infos = attr.ib(validator=iterable_validator(list, six.string_types))
def __init__(self, new_provider_infos):
"""Create a new instance for a given new provider info value."""
self.new_provider_infos = new_provider_infos
@classmethod
def from_values_spec(cls, values_spec):
"""Load from a tampering parameters specification"""
return ChangeEDKProviderInfoTamperingMethod(values_spec)
# pylint: disable=R0201
def run_scenario_with_tampering(self, ciphertext_writer, generation_scenario, _plaintext_uri):
"""
Run a given scenario, tampering with the input or the result.
return: a list of (ciphertext, result) pairs.
"""
master_key_provider = generation_scenario.encryption_scenario.master_key_provider_fn()
# Use a caching CMM to avoid generating a new data key every time.
cache = LocalCryptoMaterialsCache(10)
caching_cmm = CachingCryptoMaterialsManager(
master_key_provider=master_key_provider,
cache=cache,
max_age=60.0,
max_messages_encrypted=100,
)
return [
self.run_scenario_with_new_provider_info(
ciphertext_writer, generation_scenario, caching_cmm, new_provider_info
)
for new_provider_info in self.new_provider_infos
]
def run_scenario_with_new_provider_info(
self, ciphertext_writer, generation_scenario, materials_manager, new_provider_info
):
"""Run with tampering for a specific new provider info value"""
tampering_materials_manager = ProviderInfoChangingCryptoMaterialsManager(materials_manager, new_provider_info)
ciphertext_to_decrypt = generation_scenario.encryption_scenario.run(tampering_materials_manager)
expected_result = MessageDecryptionTestResult.expect_error(
"Incorrect encrypted data key provider info: " + new_provider_info
)
return generation_scenario.decryption_test_scenario_pair(
ciphertext_writer, ciphertext_to_decrypt, expected_result
)
class ProviderInfoChangingCryptoMaterialsManager(CryptoMaterialsManager):
"""
Custom CMM that modifies the provider info field on EDKS.
THIS IS ONLY USED TO CREATE INVALID MESSAGES and should never be used in
production!
"""
wrapped_cmm = attr.ib(validator=attr.validators.instance_of(CryptoMaterialsManager))
new_provider_info = attr.ib(validator=attr.validators.instance_of(six.string_types))
def __init__(self, materials_manager, new_provider_info):
"""Create a new CMM that wraps a the given CMM."""
self.wrapped_cmm = materials_manager
self.new_provider_info = new_provider_info
def get_encryption_materials(self, request):
"""
Request materials from the wrapped CMM, and then change the provider info
on each EDK.
"""
result = self.wrapped_cmm.get_encryption_materials(request)
for encrypted_data_key in result.encrypted_data_keys:
encrypted_data_key.key_provider.key_info = self.new_provider_info
return result
def decrypt_materials(self, request):
"""Thunks to the wrapped CMM"""
return self.wrapped_cmm.decrypt_materials(request)
BITS_PER_BYTE = 8
class TruncateTamperingMethod(TamperingMethod):
"""Tampering method that truncates a good message at every byte (except zero)."""
# pylint: disable=R0201
def run_scenario_with_tampering(self, ciphertext_writer, generation_scenario, _plaintext_uri):
"""
Run a given scenario, tampering with the input or the result.
return: a list of (ciphertext, result) pairs.
"""
ciphertext_to_decrypt = generation_scenario.encryption_scenario.run()
return [
generation_scenario.decryption_test_scenario_pair(
ciphertext_writer,
TruncateTamperingMethod.flip_bit(ciphertext_to_decrypt, bit),
MessageDecryptionTestResult.expect_error("Bit {} flipped".format(bit)),
)
for bit in range(0, len(ciphertext_to_decrypt) * BITS_PER_BYTE)
]
@classmethod
def flip_bit(cls, ciphertext, bit):
"""Flip only the given bit in the given ciphertext"""
byte_index, bit_index = divmod(bit, BITS_PER_BYTE)
result = bytearray(ciphertext)
result[byte_index] ^= 1 << (BITS_PER_BYTE - bit_index - 1)
return bytes(result)
class MutateTamperingMethod(TamperingMethod):
"""Tampering method that produces a message with a single bit flipped, for every possible bit."""
# pylint: disable=R0201
def run_scenario_with_tampering(self, ciphertext_writer, generation_scenario, _plaintext_uri):
"""
Run a given scenario, tampering with the input or the result.
return: a list of (ciphertext, result) pairs.
"""
ciphertext_to_decrypt = generation_scenario.encryption_scenario.run()
return [
generation_scenario.decryption_test_scenario_pair(
ciphertext_writer,
ciphertext_to_decrypt[0:length],
MessageDecryptionTestResult.expect_error("Truncated at byte {}".format(length)),
)
for length in range(1, len(ciphertext_to_decrypt))
]
class HalfSigningTamperingMethod(TamperingMethod):
"""Tampering method that changes the provider info on all EDKs."""
# pylint: disable=R0201
def run_scenario_with_tampering(self, ciphertext_writer, generation_scenario, _plaintext_uri):
"""
Run a given scenario, tampering with the input or the result.
return: a list of (ciphertext, result) pairs.
"""
tampering_materials_manager = HalfSigningCryptoMaterialsManager(
generation_scenario.encryption_scenario.master_key_provider_fn()
)
ciphertext_to_decrypt = generation_scenario.encryption_scenario.run(tampering_materials_manager)
expected_result = MessageDecryptionTestResult.expect_error(
"Unsigned message using a data key with a public key"
)
return [
generation_scenario.decryption_test_scenario_pair(ciphertext_writer, ciphertext_to_decrypt, expected_result)
]
class HalfSigningCryptoMaterialsManager(CryptoMaterialsManager):
"""
Custom CMM that generates materials for an unsigned algorithm suite
that includes the "aws-crypto-public-key" encryption context.
THIS IS ONLY USED TO CREATE INVALID MESSAGES and should never be used in
production! It is imitating what a malicious decryptor without encryption
permissions might do, to attempt to forge an unsigned message from a decrypted
signed message, and therefore this is an important case for ESDKs to reject.
"""
wrapped_default_cmm = attr.ib(validator=attr.validators.instance_of(CryptoMaterialsManager))
def __init__(self, master_key_provider):
"""
Create a new CMM that wraps a new DefaultCryptoMaterialsManager
based on the given master key provider.
"""
self.wrapped_default_cmm = DefaultCryptoMaterialsManager(master_key_provider)
def get_encryption_materials(self, request):
"""
Generate half-signing materials by requesting signing materials
from the wrapped default CMM, and then changing the algorithm suite
and removing the signing key from teh result.
"""
if request.algorithm == AlgorithmSuite.AES_256_GCM_HKDF_SHA512_COMMIT_KEY:
signing_request = copy(request)
signing_request.algorithm = AlgorithmSuite.AES_256_GCM_HKDF_SHA512_COMMIT_KEY_ECDSA_P384
result = self.wrapped_default_cmm.get_encryption_materials(signing_request)
result.algorithm = request.algorithm
result.signing_key = None
return result
raise NotImplementedError(
"The half-sign tampering method is only supported on the "
"AES_256_GCM_HKDF_SHA512_COMMIT_KEY algorithm suite."
)
def decrypt_materials(self, request):
"""Thunks to the wrapped default CMM"""
return self.wrapped_default_cmm.decrypt_materials(request)
@attr.s
class MessageDecryptionTestScenarioGenerator(object):
# pylint: disable=too-many-instance-attributes
"""Data class for a single full message decrypt test scenario.
Handles serialization and deserialization to and from manifest specs.
:param MessageEncryptionTestScenario encryption_scenario: Encryption parameters
:param tampering_method: Optional method used to tamper with the ciphertext
:type tampering_method: :class:`TamperingMethod`
:param decryption_method:
:param decryption_master_key_specs: Iterable of master key specifications
:type decryption_master_key_specs: iterable of :class:`MasterKeySpec`
:param Callable decryption_master_key_provider_fn:
:param result:
"""
encryption_scenario = attr.ib(validator=attr.validators.instance_of(MessageEncryptionTestScenario))
tampering_method = attr.ib(validator=attr.validators.optional(attr.validators.instance_of(TamperingMethod)))
decryption_method = attr.ib(validator=attr.validators.optional(attr.validators.instance_of(DecryptionMethod)))
decryption_master_key_specs = attr.ib(validator=iterable_validator(list, MasterKeySpec))
decryption_master_key_provider_fn = attr.ib(validator=attr.validators.is_callable())
result = attr.ib(validator=attr.validators.optional(attr.validators.instance_of(MessageDecryptionTestResult)))
@classmethod
def from_scenario(cls, scenario, keys, plaintexts):
"""Load from a scenario specification.
:param dict scenario: Scenario specification JSON
:param KeysManifest keys: Loaded keys
:param dict plaintexts: Mapping of plaintext names to plaintext values
:return: Loaded test scenario
:rtype: MessageDecryptionTestScenarioGenerator
"""
encryption_scenario_spec = scenario["encryption-scenario"]
encryption_scenario = MessageEncryptionTestScenario.from_scenario(encryption_scenario_spec, keys, plaintexts)
tampering = scenario.get("tampering")
tampering_method = TamperingMethod.from_tampering_spec(tampering)
decryption_method_spec = scenario.get("decryption-method")
decryption_method = DecryptionMethod(decryption_method_spec) if decryption_method_spec else None
if "decryption-master-keys" in scenario:
decryption_master_key_specs = [
MasterKeySpec.from_scenario(spec) for spec in scenario["decryption-master-keys"]
]
def decryption_master_key_provider_fn():
return master_key_provider_from_master_key_specs(keys, decryption_master_key_specs)
else:
decryption_master_key_specs = encryption_scenario.master_key_specs
decryption_master_key_provider_fn = encryption_scenario.master_key_provider_fn
result_spec = scenario.get("result")
result = MessageDecryptionTestResult.from_result_spec(result_spec, None) if result_spec else None
return cls(
encryption_scenario=encryption_scenario,
tampering_method=tampering_method,
decryption_method=decryption_method,
decryption_master_key_specs=decryption_master_key_specs,
decryption_master_key_provider_fn=decryption_master_key_provider_fn,
result=result,
)
def run(self, ciphertext_writer, plaintext_uri):
"""Run this scenario, writing the resulting ciphertext with ``ciphertext_writer`` and returning
a :class:`MessageDecryptionTestScenario` that describes the matching decrypt scenario.
:param callable ciphertext_writer: Callable that will write the requested named ciphertext and
return a URI locating the written data
:param str plaintext_uri: URI locating the written plaintext data for this scenario
:return: Decrypt test scenario that describes the generated scenario
:rtype: MessageDecryptionTestScenario
"""
return dict(self.tampering_method.run_scenario_with_tampering(ciphertext_writer, self, plaintext_uri))
def decryption_test_scenario_pair(self, ciphertext_writer, ciphertext_to_decrypt, expected_result):
"""Create a new (name, decryption scenario) pair"""
ciphertext_name = str(uuid.uuid4())
ciphertext_uri = ciphertext_writer(ciphertext_name, ciphertext_to_decrypt)
return (
ciphertext_name,
MessageDecryptionTestScenario(
ciphertext_uri=ciphertext_uri,
ciphertext=ciphertext_to_decrypt,
master_key_specs=self.decryption_master_key_specs,
master_key_provider_fn=self.decryption_master_key_provider_fn,
decryption_method=self.decryption_method,
result=expected_result,
),
)
@attr.s
class MessageDecryptionGenerationManifest(object):
"""AWS Encryption SDK Decryption Message Generation manifest handler.
Described in AWS Crypto Tools Test Vector Framework feature #0006 AWS Encryption SDK Decrypt Message Generation.
:param int version: Version of this manifest
:param KeysManifest keys: Loaded keys
:param dict plaintexts: Mapping of plaintext names to plaintext values
:param dict tests: Mapping of test scenario names to :class:`MessageDecryptionGenerationManifest`s
"""
version = attr.ib(validator=membership_validator(SUPPORTED_VERSIONS))
keys = attr.ib(validator=attr.validators.instance_of(KeysManifest))
plaintexts = attr.ib(validator=dictionary_validator(six.string_types, six.binary_type))
tests = attr.ib(validator=dictionary_validator(six.string_types, MessageDecryptionTestScenarioGenerator))
type_name = "awses-decrypt-generate"
@staticmethod
def _generate_plaintexts(plaintexts_specs):
# type: (PLAINTEXTS_SPEC) -> Dict[str, bytes]
"""Generate required plaintext values.
:param dict plaintexts_specs: Mapping of plaintext name to size in bytes
:return: Mapping of plaintext name to randomly generated bytes
:rtype: dict
"""
return {name: os.urandom(size) for name, size in plaintexts_specs.items()}
@classmethod
def from_file(cls, input_file):
# type: (IO) -> MessageDecryptionGenerationManifest
"""Load from a file containing a full message encrypt manifest.
:param file input_file: File object for file containing JSON manifest
:return: Loaded manifest
:rtype: MessageEncryptionManifest
"""
raw_manifest = json.load(input_file)
validate_manifest_type(
type_name=cls.type_name, manifest_version=raw_manifest["manifest"], supported_versions=SUPPORTED_VERSIONS
)
parent_dir = os.path.abspath(os.path.dirname(input_file.name))
reader = file_reader(parent_dir)
raw_keys_manifest = json.loads(reader(raw_manifest["keys"]).decode(ENCODING))
keys = KeysManifest.from_manifest_spec(raw_keys_manifest)
plaintexts = cls._generate_plaintexts(raw_manifest["plaintexts"])
tests = {}
for name, scenario in raw_manifest["tests"].items():
try:
tests[name] = MessageDecryptionTestScenarioGenerator.from_scenario(
scenario=scenario, keys=keys, plaintexts=plaintexts
)
except NotImplementedError:
continue
return cls(version=raw_manifest["manifest"]["version"], keys=keys, plaintexts=plaintexts, tests=tests)
def run_and_write_to_dir(self, target_directory, json_indent=None):
# type: (str, Optional[int]) -> None
"""Process all known encrypt test scenarios and write the resulting data and manifests to disk.
:param str target_directory: Directory in which to write all output
:param int json_indent: Number of spaces to indent JSON files (optional: default is to write minified)
"""
root_dir = os.path.abspath(target_directory)
root_writer = file_writer(root_dir)
root_writer("keys.json", json.dumps(self.keys.manifest_spec, indent=json_indent).encode(ENCODING))
plaintext_writer = file_writer(os.path.join(root_dir, "plaintexts"))
plaintext_uris = {name: plaintext_writer(name, plaintext) for name, plaintext in self.plaintexts.items()}
ciphertext_writer = file_writer(os.path.join(root_dir, "ciphertexts"))
test_scenarios = {
decrypt_scenario_name: decrypt_scenario
for name, scenario in self.tests.items()
for decrypt_scenario_name, decrypt_scenario in scenario.run(
ciphertext_writer, plaintext_uris[scenario.encryption_scenario.plaintext_name]
).items()
}
decrypt_manifest = MessageDecryptionManifest(
keys_uri="file://keys.json", keys=self.keys, test_scenarios=test_scenarios
)
root_writer("manifest.json", json.dumps(decrypt_manifest.manifest_spec, indent=json_indent).encode(ENCODING))
| 43.755556
| 120
| 0.722009
| 2,431
| 21,659
| 6.191279
| 0.160839
| 0.020929
| 0.02259
| 0.015149
| 0.349678
| 0.303701
| 0.262773
| 0.229021
| 0.20889
| 0.194007
| 0
| 0.004625
| 0.211367
| 21,659
| 494
| 121
| 43.84413
| 0.876529
| 0.273466
| 0
| 0.156934
| 0
| 0
| 0.035686
| 0.008256
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083942
| false
| 0.00365
| 0.080292
| 0.00365
| 0.335766
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e40ca767179088e9b2626907b90dc14b9802c60c
| 10,237
|
py
|
Python
|
atmpro1_vsm2.py
|
joselynzhao/One-shot-Person-Re-ID-ATM
|
d039b1a66410f87cfe931774eba54a5f1a1a0260
|
[
"MIT"
] | 3
|
2020-07-28T03:16:51.000Z
|
2020-11-23T05:39:54.000Z
|
atmpro1_vsm2.py
|
joselynzhao/One-shot-Person-Re-ID-ATM
|
d039b1a66410f87cfe931774eba54a5f1a1a0260
|
[
"MIT"
] | null | null | null |
atmpro1_vsm2.py
|
joselynzhao/One-shot-Person-Re-ID-ATM
|
d039b1a66410f87cfe931774eba54a5f1a1a0260
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3.6
# -*- coding: utf-8 -*-
# @Time : 2020/9/3 上午11:03
# @Author : Joselynzhao
# @Email : zhaojing17@forxmail.com
# @File : atmpro1_vsm2.py
# @Software: PyCharm
# @Desc :
#!/usr/bin/python3.6
# -*- coding: utf-8 -*-
# @Time : 2020/9/1 下午7:07
# @Author : Joselynzhao
# @Email : zhaojing17@forxmail.com
# @File : atmpro1_vsm.py
# @Software: PyCharm
# @Desc :
#!/usr/bin/python3.6
# -*- coding: utf-8 -*-
# @Time : 2020/8/26 下午8:26
# @Author : Joselynzhao
# @Email : zhaojing17@forxmail.com
# @File : atmpro1.py
# @Software: PyCharm
# @Desc :
from my_reid.eug import *
from my_reid import datasets
from my_reid import models
import numpy as np
import torch
import argparse
import os
import warnings
warnings.filterwarnings("ignore")
from my_reid.utils.logging import Logger
import os.path as osp
import sys
from torch.backends import cudnn
from my_reid.utils.serialization import load_checkpoint
from torch import nn
import time
import pickle
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data.distributed import DistributedSampler
from pathlib import Path
def resume(savepath):
import re
pattern = re.compile(r'step_(\d+)\.ckpt')
start_step = -1
ckpt_file = ""
# find start step
files = os.listdir(savepath)
files.sort()
for filename in files:
try:
iter_ = int(pattern.search(filename).groups()[0])
print(iter_)
if iter_ > start_step:
start_step = iter_
ckpt_file = osp.join(savepath, filename)
except:
continue
# if need resume
if start_step >= 0:
print("continued from iter step", start_step)
else:
print("resume failed", start_step, files)
return start_step, ckpt_file
def main(args):
father = Path('/mnt/')
if father.exists(): # 是在服务器上
data_dir = Path('/mnt/share/datasets/RE-ID/data') # 服务器
logs_dir = Path('/mnt/home/{}'.format(args.log_name)) # 服务器
else: #本地
data_dir = Path('/home/joselyn/workspace/ATM_SERIES/data') # 本地跑用这个
logs_dir = Path('/home/joselyn/workspace/ATM_SERIES/{}'.format(args.log_name)) # 本地跑用这个
cudnn.benchmark = True
cudnn.enabled = True
save_path = os.path.join(logs_dir, args.dataset, args.exp_name, args.exp_order) # 到编号位置.
total_step = 100 // args.EF + 1
sys.stdout = Logger(osp.join(save_path, 'log' + str(args.EF) + time.strftime(".%m_%d_%H:%M:%S") + '.txt'))
dataf_file = open(osp.join(save_path, 'dataf.txt'), 'a') # 保存性能数据. #特征空间中的性能问题.
data_file = open(osp.join(save_path, 'data.txt'), 'a') # 保存性能数据. #特征空间中的性能问题.
kf_file = open(osp.join(save_path,'kf.txt'),'a')
# 数据格式为 label_pre_r, select_pre_r,label_pre_t, select_pre_t ,加上了了tagper的数据.
tagper_path = osp.join(save_path,'tagper') #tagper存储路径.
if not Path(tagper_path).exists():
os.mkdir(tagper_path)
'''# 记录配置信息 和路径'''
print('-'*20+'config_info'+'-'*20)
config_file = open(osp.join(save_path, 'config.txt'), 'w')
config_info = str(args).split('(')[1].strip(')').split(',')
config_info.sort()
for one in config_info:
key,value=map(str,one.split('='))
config_file.write(key.strip()+'='+value.strip('\'')+'\n')
print(key.strip()+'='+value.strip('\''))
config_file.write('save_path='+save_path)
print('save_path='+save_path)
print('-' * 20 + 'config_info' + '-' * 20)
config_file.close()
train_time_file = open(osp.join(save_path, 'time.txt'), 'a') # 只记录训练所需要的时间.
# 数据格式为 step_time total_time.
total_time = 0
# get all the labeled and unlabeled data for training
dataset_all = datasets.create(args.dataset, osp.join(data_dir, args.dataset))
num_all_examples = len(dataset_all.train)
l_data, u_data = get_init_shot_in_cam1(dataset_all,
load_path="./examples/{}_init_{}.pickle".format(dataset_all.name, args.init),
init=args.init)
resume_step, ckpt_file = -1, ''
if args.resume:
resume_step, ckpt_file = resume(save_path)
# initial the EUG algorithm
eug = EUG(batch_size=args.batch_size, num_classes=dataset_all.num_train_ids,
dataset=dataset_all, l_data=l_data, u_data=u_data, save_path=save_path, max_frames=args.max_frames,
embeding_fea_size=args.fea, momentum=args.momentum, lamda=args.lamda)
tagper = EUG(batch_size=args.batch_size, num_classes=dataset_all.num_train_ids,
dataset=dataset_all, l_data=l_data, u_data=u_data, save_path=tagper_path,
max_frames=args.max_frames,
embeding_fea_size=args.fea, momentum=args.momentum, lamda=args.lamda)
new_train_data = l_data
unselected_data = u_data
iter_mode = 2 #迭代模式,确定是否训练tagper
for step in range(total_step):
# for resume
if step < resume_step:
continue
ratio = (step + 1) * args.EF / 100
ratio_t = (step+1+args.t) * args.EF /100
nums_to_select = int(len(u_data) * ratio)
nums_to_select_tagper = int(len(u_data) * ratio_t)
if nums_to_select >= len(u_data):
break
#args.vsm_lambda的衰减 0.5 - 0
vsm_lambda = args.vsm_lambda*step/(1-(total_step/2)) +args.vsm_lambda
vsm_lambda +=1
print("Runing: EF={}%, step {}:\t Nums_to_be_select {} \t Ritio \t Logs-dir {}".format(
args.EF, step, nums_to_select, ratio, save_path))
# train the model or load ckpt
start_time = time.time()
print("training reid model")
eug.train(new_train_data, unselected_data, step, loss=args.loss, epochs=args.epochs, step_size=args.step_size,
init_lr=0.1) if step != resume_step else eug.resume(ckpt_file, step)
# 只对eug进行性能评估
# mAP, rank1, rank5, rank10, rank20 = 0, 0, 0, 0, 0
mAP, rank1, rank5, rank10, rank20 = eug.evaluate(dataset_all.query, dataset_all.gallery)
# 把数据写到data文件里.
data_file.write('{} {:.2%} {:.2%} {:.2%} {:.2%} {:.2%}\n'.format(step, mAP, rank1, rank5, rank10, rank20))
pred_y, pred_score,label_pre,dists= eug.estimate_label_vsm()
selected_idx = eug.select_top_data_vsm2(pred_score, dists,args.topk,vsm_lambda,min(nums_to_select_tagper,len(u_data)-50) if iter_mode==2 else min(nums_to_select,len(u_data))) #直接翻两倍取数据. -50个样本,保证unselected_data数量不为0
new_train_data, unselected_data, select_pre= eug.generate_new_train_data(selected_idx, pred_y)
raw_label_pre, raw_select_pre = label_pre,select_pre
t_label_pre,t_select_pre = 0,0
raw_select_pre_t = 0
# label_pre_t,select_pre_t=0,0
if iter_mode==2:
raw_select_pre_t = raw_select_pre
print("training tagper model")
selected_idx = eug.select_top_data_vsm2(pred_score,dists,args.topk,vsm_lambda, min(nums_to_select, len(u_data)))
_, _, raw_select_pre = eug.generate_new_train_data(selected_idx, pred_y)
# kf_file.write('{} {:.2%} {:.2%}'.format(step, label_pre, select_pre))
tagper.resume(osp.join(save_path,'step_{}.ckpt'.format(step)),step)
tagper.train(new_train_data, unselected_data, step, loss=args.loss, epochs=args.epochs, step_size=args.step_size, init_lr=0.1)
pred_y, pred_score, label_pre,dists= tagper.estimate_label_vsm()
selected_idx = tagper.select_top_data_vsm2(pred_score,dists,args.topk,vsm_lambda,min(nums_to_select,len(u_data))) # 采样目标数量
new_train_data, unselected_data, select_pre= tagper.generate_new_train_data(selected_idx, pred_y)
t_label_pre,t_select_pre = label_pre,select_pre
label_pre,select_pre = t_label_pre,t_select_pre
if nums_to_select_tagper >=len(u_data):
iter_mode=1 #切换模式
print('tagper is stop')
else: #mode = 1
# raw_select_pre = raw_select_pre_t
# raw_select_pre_t = 0
label_pre,select_pre = raw_label_pre,raw_select_pre
end_time = time.time()
step_time = end_time - start_time
total_time = step_time + total_time
train_time_file.write('{} {:.6} {:.6}\n'.format(step, step_time, total_time))
kf_file.write('{} {} {} {:.2%} {:.2%} {:.2%} {:.2%} {:.2%}\n'.format(step,nums_to_select,nums_to_select_tagper,raw_label_pre,raw_select_pre,raw_select_pre_t,t_label_pre,t_select_pre))
dataf_file.write(
'{} {:.2%} {:.2%}\n'.format(step, label_pre, select_pre))
dataf_file.close()
train_time_file.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Progressive Learning for One-Example re-ID')
parser.add_argument('-d', '--dataset', type=str, default='mars',
choices=datasets.names())
parser.add_argument('-b', '--batch-size', type=int, default=16)
parser.add_argument('-f', '--fea', type=int, default=1024)
parser.add_argument('--EF', type=int, default=10)
parser.add_argument('--t', type=float, default=2) #不再tagper采样的倍率, 而是表示跨多少个step采样.
parser.add_argument('--exp_order', type=str, default='0')
parser.add_argument('--exp_name', type=str, default='atm')
parser.add_argument('--exp_aim', type=str, default='for paper')
parser.add_argument('--run_file',type=str,default='train.py')
parser.add_argument('--log_name',type=str,default='pl_logs')
parser.add_argument('--topk',type=int,default=2)
parser.add_argument('--vsm_lambda',type=float,default=0.5)
parser.add_argument('--resume', type=str, default='Yes')
parser.add_argument('--max_frames', type=int, default=900)
parser.add_argument('--loss', type=str, default='ExLoss', choices=['CrossEntropyLoss', 'ExLoss'])
parser.add_argument('--init', type=float, default=-1)
parser.add_argument('-m', '--momentum', type=float, default=0.5)
parser.add_argument('-e', '--epochs', type=int, default=70)
parser.add_argument('-s', '--step_size', type=int, default=55)
parser.add_argument('--lamda', type=float, default=0.5)
main(parser.parse_args())
| 41.783673
| 225
| 0.65019
| 1,460
| 10,237
| 4.296575
| 0.197945
| 0.037303
| 0.054201
| 0.01913
| 0.38578
| 0.344492
| 0.295234
| 0.227164
| 0.184441
| 0.184441
| 0
| 0.020705
| 0.207385
| 10,237
| 244
| 226
| 41.954918
| 0.752403
| 0.123376
| 0
| 0.053571
| 0
| 0
| 0.099921
| 0.015061
| 0
| 0
| 0
| 0
| 0
| 1
| 0.011905
| false
| 0
| 0.125
| 0
| 0.142857
| 0.065476
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c0e42d68dd892a292e20be61de2cca89811eb9b
| 6,252
|
py
|
Python
|
consumer/tests/test__index_handler.py
|
eHealthAfrica/aether-elasticsearch-consumer
|
fc29a1da8cfd7482257b1023b50a1a43372886c5
|
[
"Apache-2.0"
] | null | null | null |
consumer/tests/test__index_handler.py
|
eHealthAfrica/aether-elasticsearch-consumer
|
fc29a1da8cfd7482257b1023b50a1a43372886c5
|
[
"Apache-2.0"
] | 8
|
2018-08-02T09:11:22.000Z
|
2021-09-13T14:12:22.000Z
|
consumer/tests/test__index_handler.py
|
eHealthAfrica/aether-elasticsearch-consumer
|
fc29a1da8cfd7482257b1023b50a1a43372886c5
|
[
"Apache-2.0"
] | 1
|
2019-10-29T11:29:32.000Z
|
2019-10-29T11:29:32.000Z
|
# Copyright (C) 2019 by eHealth Africa : http://www.eHealthAfrica.org
#
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import pytest
import requests
import responses
from time import sleep
from elasticsearch.exceptions import NotFoundError
from aet.logger import get_logger
from app import index_handler
from . import * # noqa # fixtures
LOG = get_logger('TEST-IDX')
# convenience function for jsonpath
@responses.activate
@pytest.mark.unit
def test__handle_http():
responses.add(
responses.GET,
'http://bad-url',
json={'error': 'not found'},
status=404
)
res = requests.get('http://bad-url')
with pytest.raises(requests.exceptions.HTTPError):
index_handler.handle_http(res)
@pytest.mark.unit
def test__get_es_index_from_autoconfig(SubscriptionDefinition, ComplexSchema):
es_options = SubscriptionDefinition.get('es_options')
tenant = 'dev'
name = 'a-topic'
alias = es_options.get('alias_name')
index = index_handler.get_es_index_from_subscription(
es_options, name, tenant, ComplexSchema
)
LOG.debug(json.dumps(index, indent=2))
assert(first('$.name', index) == f'{tenant}.{name}')
geo_name = es_options['geo_point_name']
assert(first(
f'$.body.mappings._doc.properties.{geo_name}', index) is not None)
@pytest.mark.unit
def test__get_index_for_topic(SubscriptionDefinition, ComplexSchema):
name = 'Person'
es_options = SubscriptionDefinition.get('es_options')
geo_name = es_options.get('geo_point_name')
auto_ts = es_options.get('auto_timestamp')
index = index_handler.get_index_for_topic(name, geo_name, auto_ts, ComplexSchema)
index = index.get('mappings', None)
assert(len(index) == 1)
assert(first('$._doc', index) is not None)
assert(first(f'$._doc.properties.{geo_name}.type', index) == 'geo_point')
assert(first(f'$._doc._meta.aet_auto_ts', index) == auto_ts)
@pytest.mark.unit
def test__get_es_types_from_schema(ComplexSchema):
res = index_handler.get_es_types_from_schema(ComplexSchema)
assert(first('$.beds.type', res) == 'integer')
assert(first('$.username.type', res) == 'keyword')
assert(first('$._start.type', res) == 'date')
assert(first('$.geometry.type', res) == 'object')
assert(first('$.meta.type', res) == 'object')
assert(first('$.mandatory_date.type', res) == 'date')
assert(first('$.mandatory_date.format', res) == 'date')
assert(first('$.optional_dt.type', res) == 'date')
assert(first('$.optional_dt.format', res) == 'epoch_millis')
assert(len(list(res.keys())) == 55)
@pytest.mark.unit
def test__make_kibana_index(AutoGenSchema):
name = 'kibana-index-name'
res = index_handler.make_kibana_index(name, AutoGenSchema)
assert(res.get('attributes', {}).get('title') == name)
@pytest.mark.unit
def test___find_timestamp(ComplexSchema):
result = index_handler._find_timestamp(ComplexSchema)
assert(result == 'timestamp')
@pytest.mark.unit
def test___format_lookups(ComplexSchema):
formatted = index_handler._format_lookups(ComplexSchema)
assert(
json.dumps(
formatted.get(
'operational_status'), sort_keys=True) ==
json.dumps(
SAMPLE_FIELD_LOOKUP.get(
'operational_status'), sort_keys=True)
)
@pytest.mark.unit
def test___format_single_lookup(ComplexSchema):
matching = ComplexSchema.get_node('MySurvey.operational_status')
res = index_handler._format_single_lookup(matching)
assert(
json.dumps(res, sort_keys=True) ==
json.dumps(SAMPLE_FIELD_LOOKUP.get(
'operational_status'), sort_keys=True)
)
@pytest.mark.unit
def test__get_alias_from_namespace():
namespace = 'A_Gather_Form_V1'
res = index_handler.get_alias_from_namespace(namespace)
assert(res == 'A_Gather_Form')
@pytest.mark.integration
def test__update_es_index(TestElasticsearch, PolySchemaA, PolySchemaB):
# register index with mapping
es = TestElasticsearch.get_session()
doc_id = 'poly-test-doc'
doc = {
'id': doc_id,
'poly': '1001'
}
index_a = index_handler.get_es_index_from_subscription(
es_options={},
name='test1',
tenant='test-tenant',
schema=PolySchemaA
)
index_name = index_a.get('name')
index_b = index_handler.get_es_index_from_subscription(
es_options={},
name='test1',
tenant='test-tenant',
schema=PolySchemaB
)
alias = index_handler.get_alias_from_namespace(PolySchemaA.name)
# register schema A
index_handler.update_es_index(es, index_a, 'test-tenant', alias)
# put doc
es.create(
index=index_name,
id=doc_id,
body=doc
)
es.indices.refresh(index=index_name)
res = es.search(index=index_name, body={
"query": {"term": {"poly": "1001"}}
})
assert(res.get('hits').get('max_score') < 1.0) # find imperfect by string
res = es.search(index=index_name, body={
"query": {"term": {"poly": 1001}}
})
assert(res.get('hits').get('max_score') < 1.0) # find imperfect by string
# migrate to schema B
index_handler.update_es_index(es, index_b, 'test-tenant', alias)
es.indices.refresh(index=index_name)
res = es.search(index=index_name, body={
"query": {"term": {"poly": "1001"}}
})
assert(res.get('hits').get('max_score') == 1.0) # find by string
res = es.search(index=index_name, body={
"query": {"term": {"poly": 1001}}
})
assert(res.get('hits').get('max_score') == 1.0) # find by int
| 31.736041
| 85
| 0.679303
| 812
| 6,252
| 5.008621
| 0.261084
| 0.044259
| 0.030981
| 0.03762
| 0.3612
| 0.313253
| 0.22744
| 0.198918
| 0.198918
| 0.198918
| 0
| 0.009053
| 0.1873
| 6,252
| 196
| 86
| 31.897959
| 0.79138
| 0.143154
| 0
| 0.288732
| 0
| 0
| 0.15503
| 0.031907
| 0
| 0
| 0
| 0
| 0.176056
| 1
| 0.070423
| false
| 0
| 0.06338
| 0
| 0.133803
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c0f8b607ed4a4992f5429c04c93d80a3e6a70fc
| 9,656
|
py
|
Python
|
tests/test_api_transaction.py
|
preston-wagner/authorizesauce
|
130ee30f500c8b5bf9a6384296ca4f5d5bb565e7
|
[
"MIT"
] | null | null | null |
tests/test_api_transaction.py
|
preston-wagner/authorizesauce
|
130ee30f500c8b5bf9a6384296ca4f5d5bb565e7
|
[
"MIT"
] | null | null | null |
tests/test_api_transaction.py
|
preston-wagner/authorizesauce
|
130ee30f500c8b5bf9a6384296ca4f5d5bb565e7
|
[
"MIT"
] | 1
|
2020-06-17T15:48:46.000Z
|
2020-06-17T15:48:46.000Z
|
from datetime import date
from six import BytesIO, binary_type, u
from six.moves.urllib.parse import parse_qsl, urlencode
from unittest2 import TestCase
import mock
from authorizesauce.apis.transaction import PROD_URL, TEST_URL, TransactionAPI
from authorizesauce.data import Address, CreditCard
from authorizesauce.exceptions import AuthorizeConnectionError, \
AuthorizeResponseError
class MockResponse(BytesIO):
class Headers(dict):
def getparam(self, *args, **kwargs):
"""Python 2 version"""
return None
def get_content_charset(self, failobj=None, *args, **kwargs):
"""Python 3 version"""
return failobj
def __init__(self, *args, **kwargs):
BytesIO.__init__(self, *args, **kwargs)
self.headers = self.Headers()
SUCCESS = MockResponse(
b'1;1;1;This transaction has been approved.;IKRAGJ;Y;2171062816;;;20.00;CC'
b';auth_only;;Jeffrey;Schenck;;45 Rose Ave;Venice;CA;90291;USA;;;;;;;;;;;;'
b';;;;;375DD9293D7605E20DF0B437EE2A7B92;P;2;;;;;;;;;;;XXXX1111;Visa;;;;;;;'
b';;;;;;;;;;Y')
PARSED_SUCCESS = {
'cvv_response': 'P',
'authorization_code': 'IKRAGJ',
'response_code': '1',
'amount': '20.00',
'transaction_type': 'auth_only',
'avs_response': 'Y',
'response_reason_code': '1',
'response_reason_text': 'This transaction has been approved.',
'transaction_id': '2171062816',
}
ERROR = MockResponse(
b'2;1;2;This transaction has been declined.;000000;N;2171062816;;;20.00;CC'
b';auth_only;;Jeffrey;Schenck;;45 Rose Ave;Venice;CA;90291;USA;;;;;;;;;;;;'
b';;;;;375DD9293D7605E20DF0B437EE2A7B92;N;1;;;;;;;;;;;XXXX1111;Visa;;;;;;;'
b';;;;;;;;;;Y')
PARSED_ERROR = {
'cvv_response': 'N',
'authorization_code': '000000',
'response_code': '2',
'amount': '20.00',
'transaction_type': 'auth_only',
'avs_response': 'N',
'response_reason_code': '2',
'response_reason_text': 'This transaction has been declined.',
'transaction_id': '2171062816',
}
def _unicode_str(s):
if isinstance(s, binary_type):
return s.decode('unicode_escape')
return s
def _are_params_eq(params1, params2):
_params1, _params2 = map(_unicode_str, (params1, params2))
return frozenset(parse_qsl(_params1)) == frozenset(parse_qsl(_params2))
class TransactionAPITests(TestCase):
def setUp(self):
self.api = TransactionAPI('123', '456')
self.success = lambda *args, **kwargs: SUCCESS.seek(0) or SUCCESS
self.error = lambda *args, **kwargs: ERROR.seek(0) or ERROR
self.year = date.today().year + 10
self.credit_card = CreditCard('4111111111111111', self.year, 1, '911')
self.address = Address('45 Rose Ave', 'Venice', 'CA', '90291')
def test_basic_api(self):
api = TransactionAPI('123', '456')
self.assertEqual(api.url, TEST_URL)
api = TransactionAPI('123', '456', debug=False)
self.assertEqual(api.url, PROD_URL)
@mock.patch('authorizesauce.apis.transaction.urlopen')
def test_make_call(self, urlopen):
urlopen.side_effect = self.success
params = {'a': '1', 'b': '2'}
result = self.api._make_call(params)
self.assertEqual(urlopen.call_args[0][0], TEST_URL)
self.assertTrue(_are_params_eq(
urlopen.call_args[1]['data'], urlencode(params)
))
self.assertEqual(result, PARSED_SUCCESS)
@mock.patch('authorizesauce.apis.transaction.urlopen')
def test_make_call_with_unicode(self, urlopen):
urlopen.side_effect = self.success
result = self.api._make_call({u('\xe3'): '1', 'b': u('\xe3')})
self.assertEqual(urlopen.call_args[0][0], TEST_URL)
self.assertTrue(_are_params_eq(
urlopen.call_args[1]['data'], 'b=%C3%A3&%C3%A3=1'
))
self.assertEqual(result, PARSED_SUCCESS)
@mock.patch('authorizesauce.apis.transaction.urlopen')
def test_make_call_connection_error(self, urlopen):
urlopen.side_effect = IOError('Borked')
self.assertRaises(AuthorizeConnectionError, self.api._make_call,
{'a': '1', 'b': '2'})
@mock.patch('authorizesauce.apis.transaction.urlopen')
def test_make_call_response_error(self, urlopen):
urlopen.side_effect = self.error
try:
self.api._make_call({'a': '1', 'b': '2'})
except AuthorizeResponseError as e:
self.assertTrue(str(e).startswith(
'This transaction has been declined.'
))
self.assertEqual(e.full_response, PARSED_ERROR)
def test_add_params(self):
self.assertEqual(self.api._add_params({}), {})
params = self.api._add_params({}, credit_card=self.credit_card)
self.assertEqual(params, {
'x_card_num': '4111111111111111',
'x_exp_date': '01-{0}'.format(self.year),
'x_card_code': '911',
})
params = self.api._add_params({}, address=self.address)
self.assertEqual(params, {
'x_address': '45 Rose Ave',
'x_city': 'Venice',
'x_state': 'CA',
'x_zip': '90291',
'x_country': 'US',
})
params = self.api._add_params(
{}, credit_card=self.credit_card, address=self.address
)
self.assertEqual(params, {
'x_card_num': '4111111111111111',
'x_exp_date': '01-{0}'.format(self.year),
'x_card_code': '911',
'x_address': '45 Rose Ave',
'x_city': 'Venice',
'x_state': 'CA',
'x_zip': '90291',
'x_country': 'US',
})
@mock.patch('authorizesauce.apis.transaction.urlopen')
def test_auth(self, urlopen):
urlopen.side_effect = self.success
result = self.api.auth(20, self.credit_card, self.address)
self.assertEqual(urlopen.call_args[0][0], TEST_URL)
self.assertTrue(urlopen.call_args[1]['data'], (
'x_login=123&x_zip=90291&x_card_num=4111111111111111&'
'x_amount=20.00&x_tran_key=456&x_city=Venice&x_country=US&'
'x_version=3.1&x_state=CA&x_delim_char=%3B&'
'x_address=45+Rose+Ave&x_exp_date=01-{0}&x_test_request=FALSE'
'&x_card_code=911&x_type=AUTH_ONLY&x_delim_data=TRUE'.format(
str(self.year)
)
))
self.assertEqual(result, PARSED_SUCCESS)
@mock.patch('authorizesauce.apis.transaction.urlopen')
def test_capture(self, urlopen):
urlopen.side_effect = self.success
result = self.api.capture(20, self.credit_card, self.address)
self.assertEqual(urlopen.call_args[0][0], TEST_URL)
self.assertTrue(urlopen.call_args[1]['data'], (
'x_login=123&x_zip=90291&x_card_num=4111111111111111&'
'x_amount=20.00&x_tran_key=456&x_city=Venice&x_country=US&'
'x_version=3.1&x_state=CA&x_delim_char=%3B&'
'x_address=45+Rose+Ave&x_exp_date=01-{0}&x_test_request=FALSE'
'&x_card_code=911&x_type=AUTH_ONLY&x_delim_data=TRUE'.format(
str(self.year)
)
))
self.assertEqual(result, PARSED_SUCCESS)
@mock.patch('authorizesauce.apis.transaction.urlopen')
def test_settle(self, urlopen):
urlopen.side_effect = self.success
# Test without specified amount
result = self.api.settle('123456')
self.assertEqual(urlopen.call_args[0][0], TEST_URL)
self.assertTrue(urlopen.call_args[1]['data'], (
'https://test.authorize.net/gateway/transact.dll?x_login=123'
'&x_trans_id=123456&x_version=3.1&x_delim_char=%3B'
'&x_type=PRIOR_AUTH_CAPTURE&x_delim_data=TRUE&x_tran_key=456'
'&x_test_request=FALSE'
))
self.assertEqual(result, PARSED_SUCCESS)
# Test with specified amount
result = self.api.settle('123456', amount=10)
self.assertEqual(urlopen.call_args[0][0], TEST_URL)
self.assertTrue(urlopen.call_args[1]['data'], (
'https://test.authorize.net/gateway/transact.dll?x_login=123'
'&x_trans_id=123456&x_version=3.1&x_delim_char=%3B'
'&x_type=PRIOR_AUTH_CAPTURE&x_amount=10.00&x_delim_data=TRUE'
'&x_tran_key=456&x_test_request=FALSE'
))
self.assertEqual(result, PARSED_SUCCESS)
@mock.patch('authorizesauce.apis.transaction.urlopen')
def test_credit(self, urlopen):
urlopen.side_effect = self.success
# Test with transaction_id, amount
result = self.api.credit('1111', '123456', 10)
self.assertEqual(urlopen.call_args[0][0], TEST_URL)
self.assertTrue(urlopen.call_args[1]['data'], (
'https://test.authorize.net/gateway/transact.dll?x_login=123'
'&x_trans_id=123456&x_version=3.1&x_amount=10.00'
'&x_delim_char=%3B&x_type=CREDIT&x_card_num=1111'
'&x_delim_data=TRUE&x_tran_key=456&x_test_request=FALSE'
))
self.assertEqual(result, PARSED_SUCCESS)
@mock.patch('authorizesauce.apis.transaction.urlopen')
def test_void(self, urlopen):
urlopen.side_effect = self.success
result = self.api.void('123456')
self.assertEqual(urlopen.call_args[0][0], TEST_URL)
self.assertTrue(urlopen.call_args[1]['data'], (
'https://test.authorize.net/gateway/transact.dll?x_login=123'
'&x_trans_id=123456&x_version=3.1&x_delim_char=%3B&x_type=VOID'
'&x_delim_data=TRUE&x_tran_key=456&x_test_request=FALSE'
))
self.assertEqual(result, PARSED_SUCCESS)
| 40.06639
| 79
| 0.629453
| 1,225
| 9,656
| 4.730612
| 0.149388
| 0.059534
| 0.041415
| 0.041933
| 0.69698
| 0.662985
| 0.630026
| 0.586885
| 0.556514
| 0.541329
| 0
| 0.065495
| 0.222038
| 9,656
| 240
| 80
| 40.233333
| 0.705937
| 0.012842
| 0
| 0.487923
| 0
| 0.009662
| 0.310117
| 0.185629
| 0
| 0
| 0
| 0
| 0.15942
| 1
| 0.082126
| false
| 0
| 0.038647
| 0
| 0.15942
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c11512944aa360a8ca2b2179d573b01222bea5e
| 2,621
|
py
|
Python
|
build_json.py
|
sungpyocho/covid19-aichi-tools
|
5170bf405f67b14179fe10838701ec5baa9d6cc1
|
[
"MIT"
] | null | null | null |
build_json.py
|
sungpyocho/covid19-aichi-tools
|
5170bf405f67b14179fe10838701ec5baa9d6cc1
|
[
"MIT"
] | null | null | null |
build_json.py
|
sungpyocho/covid19-aichi-tools
|
5170bf405f67b14179fe10838701ec5baa9d6cc1
|
[
"MIT"
] | null | null | null |
import csv
import io
import json
import pandas as pd
import sys
from dateutil import tz
from datetime import datetime, date, time, timedelta
# Japan Standard Time (UTC + 09:00)
JST = tz.gettz('Asia/Tokyo')
JST_current_time = datetime.now(tz=JST).strftime('%Y/%m/%d %H:%M')
patients_list = []
patients_summary_dic = {}
# 引数を取得 異常系処理はしてないので注意
args = sys.argv
with open('data/patients.csv', 'r', encoding="utf-8") as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
patients_list.append(row)
patients_summary_dic.setdefault(row['date'], 0)
patients_summary_dic[row['date']] += 1
# 日付のリストを生成
strdt = datetime.strptime("2020-01-26", '%Y-%m-%d') # 開始日
enddt = datetime.strptime(args[1], '%Y-%m-%d') # 終了日
# 日付差の日数を算出(リストに最終日も含めたいので、+1しています)
days_num = (enddt - strdt).days + 1
datelist = []
for i in range(days_num):
datelist.append(strdt + timedelta(days = i))
patients_summary_list = []
# 日付の新しい順に辿って小計が 0 でない日から開始する
foundZero = True
for date in reversed(datelist):
if (not (date.strftime('%Y-%m-%d') in patients_summary_dic)) and foundZero:
continue
else:
foundZero = False
patients_summary_dic.setdefault(date.strftime('%Y-%m-%d'), 0)
patients_summary_list.append({
"日付": date.strftime('%Y-%m-%d'),
"小計": patients_summary_dic[date.strftime('%Y-%m-%d')]
})
patients_summary_list = patients_summary_list[::-1] # 日付の昇順に並び替え
# main_summary_history.csvをPandasのDataframeに変換
main_summary_history_df = pd.read_csv('data/main_summary_history.csv', keep_default_na=False)
# 検査件数の読み込み
inspections_summary_list = []
with open('data/inspections_summary.csv', 'r', encoding="utf-8") as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
inspections_summary_list.append({
"日付": datetime.strptime(row['検査日'], '%Y/%m/%d').strftime('%Y-%m-%d'),
"小計": int(row['検査件数(件)']),
"合算": row['合算']
})
data = {
"lastUpdate": JST_current_time,
"patients": {
"date": JST_current_time,
"data": patients_list
},
"patients_summary" : {
"date": JST_current_time,
"data": patients_summary_list
},
"inspections_summary" : {
"date": JST_current_time,
"data": inspections_summary_list
},
"main_summary_history": {
"date": JST_current_time,
"data": json.loads(main_summary_history_df.to_json(orient='records', force_ascii=False))
}
}
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
print(json.dumps(data, indent=4, ensure_ascii=False))
| 28.48913
| 96
| 0.655857
| 343
| 2,621
| 4.819242
| 0.341108
| 0.108893
| 0.016334
| 0.039927
| 0.195402
| 0.136721
| 0.078645
| 0.078645
| 0.078645
| 0.078645
| 0
| 0.011342
| 0.192675
| 2,621
| 91
| 97
| 28.802198
| 0.769849
| 0.076307
| 0
| 0.147059
| 0
| 0
| 0.137344
| 0.023651
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.102941
| 0
| 0.102941
| 0.014706
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c138f84c229bf0a17e877706fc36f489907d8bf
| 23,732
|
py
|
Python
|
scipy/optimize/_numdiff.py
|
jeremiedbb/scipy
|
2bea64c334b18fd445a7945b350d7ace2dc22913
|
[
"BSD-3-Clause"
] | 1
|
2019-12-19T16:51:27.000Z
|
2019-12-19T16:51:27.000Z
|
scipy/optimize/_numdiff.py
|
jeremiedbb/scipy
|
2bea64c334b18fd445a7945b350d7ace2dc22913
|
[
"BSD-3-Clause"
] | null | null | null |
scipy/optimize/_numdiff.py
|
jeremiedbb/scipy
|
2bea64c334b18fd445a7945b350d7ace2dc22913
|
[
"BSD-3-Clause"
] | null | null | null |
"""Routines for numerical differentiation."""
from __future__ import division
import numpy as np
from numpy.linalg import norm
from scipy.sparse.linalg import LinearOperator
from ..sparse import issparse, csc_matrix, csr_matrix, coo_matrix, find
from ._group_columns import group_dense, group_sparse
EPS = np.finfo(np.float64).eps
def _adjust_scheme_to_bounds(x0, h, num_steps, scheme, lb, ub):
"""Adjust final difference scheme to the presence of bounds.
Parameters
----------
x0 : ndarray, shape (n,)
Point at which we wish to estimate derivative.
h : ndarray, shape (n,)
Desired finite difference steps.
num_steps : int
Number of `h` steps in one direction required to implement finite
difference scheme. For example, 2 means that we need to evaluate
f(x0 + 2 * h) or f(x0 - 2 * h)
scheme : {'1-sided', '2-sided'}
Whether steps in one or both directions are required. In other
words '1-sided' applies to forward and backward schemes, '2-sided'
applies to center schemes.
lb : ndarray, shape (n,)
Lower bounds on independent variables.
ub : ndarray, shape (n,)
Upper bounds on independent variables.
Returns
-------
h_adjusted : ndarray, shape (n,)
Adjusted step sizes. Step size decreases only if a sign flip or
switching to one-sided scheme doesn't allow to take a full step.
use_one_sided : ndarray of bool, shape (n,)
Whether to switch to one-sided scheme. Informative only for
``scheme='2-sided'``.
"""
if scheme == '1-sided':
use_one_sided = np.ones_like(h, dtype=bool)
elif scheme == '2-sided':
h = np.abs(h)
use_one_sided = np.zeros_like(h, dtype=bool)
else:
raise ValueError("`scheme` must be '1-sided' or '2-sided'.")
if np.all((lb == -np.inf) & (ub == np.inf)):
return h, use_one_sided
h_total = h * num_steps
h_adjusted = h.copy()
lower_dist = x0 - lb
upper_dist = ub - x0
if scheme == '1-sided':
x = x0 + h_total
violated = (x < lb) | (x > ub)
fitting = np.abs(h_total) <= np.maximum(lower_dist, upper_dist)
h_adjusted[violated & fitting] *= -1
forward = (upper_dist >= lower_dist) & ~fitting
h_adjusted[forward] = upper_dist[forward] / num_steps
backward = (upper_dist < lower_dist) & ~fitting
h_adjusted[backward] = -lower_dist[backward] / num_steps
elif scheme == '2-sided':
central = (lower_dist >= h_total) & (upper_dist >= h_total)
forward = (upper_dist >= lower_dist) & ~central
h_adjusted[forward] = np.minimum(
h[forward], 0.5 * upper_dist[forward] / num_steps)
use_one_sided[forward] = True
backward = (upper_dist < lower_dist) & ~central
h_adjusted[backward] = -np.minimum(
h[backward], 0.5 * lower_dist[backward] / num_steps)
use_one_sided[backward] = True
min_dist = np.minimum(upper_dist, lower_dist) / num_steps
adjusted_central = (~central & (np.abs(h_adjusted) <= min_dist))
h_adjusted[adjusted_central] = min_dist[adjusted_central]
use_one_sided[adjusted_central] = False
return h_adjusted, use_one_sided
relative_step = {"2-point": EPS**0.5,
"3-point": EPS**(1/3),
"cs": EPS**0.5}
def _compute_absolute_step(rel_step, x0, method):
if rel_step is None:
rel_step = relative_step[method]
sign_x0 = (x0 >= 0).astype(float) * 2 - 1
return rel_step * sign_x0 * np.maximum(1.0, np.abs(x0))
def _prepare_bounds(bounds, x0):
lb, ub = [np.asarray(b, dtype=float) for b in bounds]
if lb.ndim == 0:
lb = np.resize(lb, x0.shape)
if ub.ndim == 0:
ub = np.resize(ub, x0.shape)
return lb, ub
def group_columns(A, order=0):
"""Group columns of a 2-D matrix for sparse finite differencing [1]_.
Two columns are in the same group if in each row at least one of them
has zero. A greedy sequential algorithm is used to construct groups.
Parameters
----------
A : array_like or sparse matrix, shape (m, n)
Matrix of which to group columns.
order : int, iterable of int with shape (n,) or None
Permutation array which defines the order of columns enumeration.
If int or None, a random permutation is used with `order` used as
a random seed. Default is 0, that is use a random permutation but
guarantee repeatability.
Returns
-------
groups : ndarray of int, shape (n,)
Contains values from 0 to n_groups-1, where n_groups is the number
of found groups. Each value ``groups[i]`` is an index of a group to
which ith column assigned. The procedure was helpful only if
n_groups is significantly less than n.
References
----------
.. [1] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of
sparse Jacobian matrices", Journal of the Institute of Mathematics
and its Applications, 13 (1974), pp. 117-120.
"""
if issparse(A):
A = csc_matrix(A)
else:
A = np.atleast_2d(A)
A = (A != 0).astype(np.int32)
if A.ndim != 2:
raise ValueError("`A` must be 2-dimensional.")
m, n = A.shape
if order is None or np.isscalar(order):
rng = np.random.RandomState(order)
order = rng.permutation(n)
else:
order = np.asarray(order)
if order.shape != (n,):
raise ValueError("`order` has incorrect shape.")
A = A[:, order]
if issparse(A):
groups = group_sparse(m, n, A.indices, A.indptr)
else:
groups = group_dense(m, n, A)
groups[order] = groups.copy()
return groups
def approx_derivative(fun, x0, method='3-point', rel_step=None, f0=None,
bounds=(-np.inf, np.inf), sparsity=None,
as_linear_operator=False, args=(), kwargs={}):
"""Compute finite difference approximation of the derivatives of a
vector-valued function.
If a function maps from R^n to R^m, its derivatives form m-by-n matrix
called the Jacobian, where an element (i, j) is a partial derivative of
f[i] with respect to x[j].
Parameters
----------
fun : callable
Function of which to estimate the derivatives. The argument x
passed to this function is ndarray of shape (n,) (never a scalar
even if n=1). It must return 1-D array_like of shape (m,) or a scalar.
x0 : array_like of shape (n,) or float
Point at which to estimate the derivatives. Float will be converted
to a 1-D array.
method : {'3-point', '2-point', 'cs'}, optional
Finite difference method to use:
- '2-point' - use the first order accuracy forward or backward
difference.
- '3-point' - use central difference in interior points and the
second order accuracy forward or backward difference
near the boundary.
- 'cs' - use a complex-step finite difference scheme. This assumes
that the user function is real-valued and can be
analytically continued to the complex plane. Otherwise,
produces bogus results.
rel_step : None or array_like, optional
Relative step size to use. The absolute step size is computed as
``h = rel_step * sign(x0) * max(1, abs(x0))``, possibly adjusted to
fit into the bounds. For ``method='3-point'`` the sign of `h` is
ignored. If None (default) then step is selected automatically,
see Notes.
f0 : None or array_like, optional
If not None it is assumed to be equal to ``fun(x0)``, in this case
the ``fun(x0)`` is not called. Default is None.
bounds : tuple of array_like, optional
Lower and upper bounds on independent variables. Defaults to no bounds.
Each bound must match the size of `x0` or be a scalar, in the latter
case the bound will be the same for all variables. Use it to limit the
range of function evaluation. Bounds checking is not implemented
when `as_linear_operator` is True.
sparsity : {None, array_like, sparse matrix, 2-tuple}, optional
Defines a sparsity structure of the Jacobian matrix. If the Jacobian
matrix is known to have only few non-zero elements in each row, then
it's possible to estimate its several columns by a single function
evaluation [3]_. To perform such economic computations two ingredients
are required:
* structure : array_like or sparse matrix of shape (m, n). A zero
element means that a corresponding element of the Jacobian
identically equals to zero.
* groups : array_like of shape (n,). A column grouping for a given
sparsity structure, use `group_columns` to obtain it.
A single array or a sparse matrix is interpreted as a sparsity
structure, and groups are computed inside the function. A tuple is
interpreted as (structure, groups). If None (default), a standard
dense differencing will be used.
Note, that sparse differencing makes sense only for large Jacobian
matrices where each row contains few non-zero elements.
as_linear_operator : bool, optional
When True the function returns an `scipy.sparse.linalg.LinearOperator`.
Otherwise it returns a dense array or a sparse matrix depending on
`sparsity`. The linear operator provides an efficient way of computing
``J.dot(p)`` for any vector ``p`` of shape (n,), but does not allow
direct access to individual elements of the matrix. By default
`as_linear_operator` is False.
args, kwargs : tuple and dict, optional
Additional arguments passed to `fun`. Both empty by default.
The calling signature is ``fun(x, *args, **kwargs)``.
Returns
-------
J : {ndarray, sparse matrix, LinearOperator}
Finite difference approximation of the Jacobian matrix.
If `as_linear_operator` is True returns a LinearOperator
with shape (m, n). Otherwise it returns a dense array or sparse
matrix depending on how `sparsity` is defined. If `sparsity`
is None then a ndarray with shape (m, n) is returned. If
`sparsity` is not None returns a csr_matrix with shape (m, n).
For sparse matrices and linear operators it is always returned as
a 2-D structure, for ndarrays, if m=1 it is returned
as a 1-D gradient array with shape (n,).
See Also
--------
check_derivative : Check correctness of a function computing derivatives.
Notes
-----
If `rel_step` is not provided, it assigned to ``EPS**(1/s)``, where EPS is
machine epsilon for float64 numbers, s=2 for '2-point' method and s=3 for
'3-point' method. Such relative step approximately minimizes a sum of
truncation and round-off errors, see [1]_.
A finite difference scheme for '3-point' method is selected automatically.
The well-known central difference scheme is used for points sufficiently
far from the boundary, and 3-point forward or backward scheme is used for
points near the boundary. Both schemes have the second-order accuracy in
terms of Taylor expansion. Refer to [2]_ for the formulas of 3-point
forward and backward difference schemes.
For dense differencing when m=1 Jacobian is returned with a shape (n,),
on the other hand when n=1 Jacobian is returned with a shape (m, 1).
Our motivation is the following: a) It handles a case of gradient
computation (m=1) in a conventional way. b) It clearly separates these two
different cases. b) In all cases np.atleast_2d can be called to get 2-D
Jacobian with correct dimensions.
References
----------
.. [1] W. H. Press et. al. "Numerical Recipes. The Art of Scientific
Computing. 3rd edition", sec. 5.7.
.. [2] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of
sparse Jacobian matrices", Journal of the Institute of Mathematics
and its Applications, 13 (1974), pp. 117-120.
.. [3] B. Fornberg, "Generation of Finite Difference Formulas on
Arbitrarily Spaced Grids", Mathematics of Computation 51, 1988.
Examples
--------
>>> import numpy as np
>>> from scipy.optimize import approx_derivative
>>>
>>> def f(x, c1, c2):
... return np.array([x[0] * np.sin(c1 * x[1]),
... x[0] * np.cos(c2 * x[1])])
...
>>> x0 = np.array([1.0, 0.5 * np.pi])
>>> approx_derivative(f, x0, args=(1, 2))
array([[ 1., 0.],
[-1., 0.]])
Bounds can be used to limit the region of function evaluation.
In the example below we compute left and right derivative at point 1.0.
>>> def g(x):
... return x**2 if x >= 1 else x
...
>>> x0 = 1.0
>>> approx_derivative(g, x0, bounds=(-np.inf, 1.0))
array([ 1.])
>>> approx_derivative(g, x0, bounds=(1.0, np.inf))
array([ 2.])
"""
if method not in ['2-point', '3-point', 'cs']:
raise ValueError("Unknown method '%s'. " % method)
x0 = np.atleast_1d(x0)
if x0.ndim > 1:
raise ValueError("`x0` must have at most 1 dimension.")
lb, ub = _prepare_bounds(bounds, x0)
if lb.shape != x0.shape or ub.shape != x0.shape:
raise ValueError("Inconsistent shapes between bounds and `x0`.")
if as_linear_operator and not (np.all(np.isinf(lb))
and np.all(np.isinf(ub))):
raise ValueError("Bounds not supported when "
"`as_linear_operator` is True.")
def fun_wrapped(x):
f = np.atleast_1d(fun(x, *args, **kwargs))
if f.ndim > 1:
raise RuntimeError("`fun` return value has "
"more than 1 dimension.")
return f
if f0 is None:
f0 = fun_wrapped(x0)
else:
f0 = np.atleast_1d(f0)
if f0.ndim > 1:
raise ValueError("`f0` passed has more than 1 dimension.")
if np.any((x0 < lb) | (x0 > ub)):
raise ValueError("`x0` violates bound constraints.")
if as_linear_operator:
if rel_step is None:
rel_step = relative_step[method]
return _linear_operator_difference(fun_wrapped, x0,
f0, rel_step, method)
else:
h = _compute_absolute_step(rel_step, x0, method)
if method == '2-point':
h, use_one_sided = _adjust_scheme_to_bounds(
x0, h, 1, '1-sided', lb, ub)
elif method == '3-point':
h, use_one_sided = _adjust_scheme_to_bounds(
x0, h, 1, '2-sided', lb, ub)
elif method == 'cs':
use_one_sided = False
if sparsity is None:
return _dense_difference(fun_wrapped, x0, f0, h,
use_one_sided, method)
else:
if not issparse(sparsity) and len(sparsity) == 2:
structure, groups = sparsity
else:
structure = sparsity
groups = group_columns(sparsity)
if issparse(structure):
structure = csc_matrix(structure)
else:
structure = np.atleast_2d(structure)
groups = np.atleast_1d(groups)
return _sparse_difference(fun_wrapped, x0, f0, h,
use_one_sided, structure,
groups, method)
def _linear_operator_difference(fun, x0, f0, h, method):
m = f0.size
n = x0.size
if method == '2-point':
def matvec(p):
if np.array_equal(p, np.zeros_like(p)):
return np.zeros(m)
dx = h / norm(p)
x = x0 + dx*p
df = fun(x) - f0
return df / dx
elif method == '3-point':
def matvec(p):
if np.array_equal(p, np.zeros_like(p)):
return np.zeros(m)
dx = 2*h / norm(p)
x1 = x0 - (dx/2)*p
x2 = x0 + (dx/2)*p
f1 = fun(x1)
f2 = fun(x2)
df = f2 - f1
return df / dx
elif method == 'cs':
def matvec(p):
if np.array_equal(p, np.zeros_like(p)):
return np.zeros(m)
dx = h / norm(p)
x = x0 + dx*p*1.j
f1 = fun(x)
df = f1.imag
return df / dx
else:
raise RuntimeError("Never be here.")
return LinearOperator((m, n), matvec)
def _dense_difference(fun, x0, f0, h, use_one_sided, method):
m = f0.size
n = x0.size
J_transposed = np.empty((n, m))
h_vecs = np.diag(h)
for i in range(h.size):
if method == '2-point':
x = x0 + h_vecs[i]
dx = x[i] - x0[i] # Recompute dx as exactly representable number.
df = fun(x) - f0
elif method == '3-point' and use_one_sided[i]:
x1 = x0 + h_vecs[i]
x2 = x0 + 2 * h_vecs[i]
dx = x2[i] - x0[i]
f1 = fun(x1)
f2 = fun(x2)
df = -3.0 * f0 + 4 * f1 - f2
elif method == '3-point' and not use_one_sided[i]:
x1 = x0 - h_vecs[i]
x2 = x0 + h_vecs[i]
dx = x2[i] - x1[i]
f1 = fun(x1)
f2 = fun(x2)
df = f2 - f1
elif method == 'cs':
f1 = fun(x0 + h_vecs[i]*1.j)
df = f1.imag
dx = h_vecs[i, i]
else:
raise RuntimeError("Never be here.")
J_transposed[i] = df / dx
if m == 1:
J_transposed = np.ravel(J_transposed)
return J_transposed.T
def _sparse_difference(fun, x0, f0, h, use_one_sided,
structure, groups, method):
m = f0.size
n = x0.size
row_indices = []
col_indices = []
fractions = []
n_groups = np.max(groups) + 1
for group in range(n_groups):
# Perturb variables which are in the same group simultaneously.
e = np.equal(group, groups)
h_vec = h * e
if method == '2-point':
x = x0 + h_vec
dx = x - x0
df = fun(x) - f0
# The result is written to columns which correspond to perturbed
# variables.
cols, = np.nonzero(e)
# Find all non-zero elements in selected columns of Jacobian.
i, j, _ = find(structure[:, cols])
# Restore column indices in the full array.
j = cols[j]
elif method == '3-point':
# Here we do conceptually the same but separate one-sided
# and two-sided schemes.
x1 = x0.copy()
x2 = x0.copy()
mask_1 = use_one_sided & e
x1[mask_1] += h_vec[mask_1]
x2[mask_1] += 2 * h_vec[mask_1]
mask_2 = ~use_one_sided & e
x1[mask_2] -= h_vec[mask_2]
x2[mask_2] += h_vec[mask_2]
dx = np.zeros(n)
dx[mask_1] = x2[mask_1] - x0[mask_1]
dx[mask_2] = x2[mask_2] - x1[mask_2]
f1 = fun(x1)
f2 = fun(x2)
cols, = np.nonzero(e)
i, j, _ = find(structure[:, cols])
j = cols[j]
mask = use_one_sided[j]
df = np.empty(m)
rows = i[mask]
df[rows] = -3 * f0[rows] + 4 * f1[rows] - f2[rows]
rows = i[~mask]
df[rows] = f2[rows] - f1[rows]
elif method == 'cs':
f1 = fun(x0 + h_vec*1.j)
df = f1.imag
dx = h_vec
cols, = np.nonzero(e)
i, j, _ = find(structure[:, cols])
j = cols[j]
else:
raise ValueError("Never be here.")
# All that's left is to compute the fraction. We store i, j and
# fractions as separate arrays and later construct coo_matrix.
row_indices.append(i)
col_indices.append(j)
fractions.append(df[i] / dx[j])
row_indices = np.hstack(row_indices)
col_indices = np.hstack(col_indices)
fractions = np.hstack(fractions)
J = coo_matrix((fractions, (row_indices, col_indices)), shape=(m, n))
return csr_matrix(J)
def check_derivative(fun, jac, x0, bounds=(-np.inf, np.inf), args=(),
kwargs={}):
"""Check correctness of a function computing derivatives (Jacobian or
gradient) by comparison with a finite difference approximation.
Parameters
----------
fun : callable
Function of which to estimate the derivatives. The argument x
passed to this function is ndarray of shape (n,) (never a scalar
even if n=1). It must return 1-D array_like of shape (m,) or a scalar.
jac : callable
Function which computes Jacobian matrix of `fun`. It must work with
argument x the same way as `fun`. The return value must be array_like
or sparse matrix with an appropriate shape.
x0 : array_like of shape (n,) or float
Point at which to estimate the derivatives. Float will be converted
to 1-D array.
bounds : 2-tuple of array_like, optional
Lower and upper bounds on independent variables. Defaults to no bounds.
Each bound must match the size of `x0` or be a scalar, in the latter
case the bound will be the same for all variables. Use it to limit the
range of function evaluation.
args, kwargs : tuple and dict, optional
Additional arguments passed to `fun` and `jac`. Both empty by default.
The calling signature is ``fun(x, *args, **kwargs)`` and the same
for `jac`.
Returns
-------
accuracy : float
The maximum among all relative errors for elements with absolute values
higher than 1 and absolute errors for elements with absolute values
less or equal than 1. If `accuracy` is on the order of 1e-6 or lower,
then it is likely that your `jac` implementation is correct.
See Also
--------
approx_derivative : Compute finite difference approximation of derivative.
Examples
--------
>>> import numpy as np
>>> from scipy.optimize import check_derivative
>>>
>>>
>>> def f(x, c1, c2):
... return np.array([x[0] * np.sin(c1 * x[1]),
... x[0] * np.cos(c2 * x[1])])
...
>>> def jac(x, c1, c2):
... return np.array([
... [np.sin(c1 * x[1]), c1 * x[0] * np.cos(c1 * x[1])],
... [np.cos(c2 * x[1]), -c2 * x[0] * np.sin(c2 * x[1])]
... ])
...
>>>
>>> x0 = np.array([1.0, 0.5 * np.pi])
>>> check_derivative(f, jac, x0, args=(1, 2))
2.4492935982947064e-16
"""
J_to_test = jac(x0, *args, **kwargs)
if issparse(J_to_test):
J_diff = approx_derivative(fun, x0, bounds=bounds, sparsity=J_to_test,
args=args, kwargs=kwargs)
J_to_test = csr_matrix(J_to_test)
abs_err = J_to_test - J_diff
i, j, abs_err_data = find(abs_err)
J_diff_data = np.asarray(J_diff[i, j]).ravel()
return np.max(np.abs(abs_err_data) /
np.maximum(1, np.abs(J_diff_data)))
else:
J_diff = approx_derivative(fun, x0, bounds=bounds,
args=args, kwargs=kwargs)
abs_err = np.abs(J_to_test - J_diff)
return np.max(abs_err / np.maximum(1, np.abs(J_diff)))
| 37.08125
| 79
| 0.583727
| 3,358
| 23,732
| 4.030673
| 0.149792
| 0.013594
| 0.016254
| 0.007093
| 0.334318
| 0.25652
| 0.226745
| 0.185962
| 0.170743
| 0.151385
| 0
| 0.025248
| 0.312405
| 23,732
| 639
| 80
| 37.13928
| 0.804204
| 0.488454
| 0
| 0.296167
| 0
| 0
| 0.050031
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045296
| false
| 0.003484
| 0.020906
| 0
| 0.135889
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c147e3dd10a5e110c033ad9ba1df174aabe3c39
| 20,303
|
py
|
Python
|
tests/models/test_hparams.py
|
abhinavg97/pytorch-lightning
|
0d54cf25a2dba33e4640ac52768a83406e7a0a94
|
[
"Apache-2.0"
] | 1
|
2020-10-26T09:02:08.000Z
|
2020-10-26T09:02:08.000Z
|
tests/models/test_hparams.py
|
vivektalwar13071999/pytorch-lightning
|
7c4f80a1afe3d7b0f1e9ee834aacaf8439195cdf
|
[
"Apache-2.0"
] | null | null | null |
tests/models/test_hparams.py
|
vivektalwar13071999/pytorch-lightning
|
7c4f80a1afe3d7b0f1e9ee834aacaf8439195cdf
|
[
"Apache-2.0"
] | null | null | null |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pickle
from argparse import Namespace
import cloudpickle
import pytest
import torch
from fsspec.implementations.local import LocalFileSystem
from omegaconf import OmegaConf, Container
from torch.nn import functional as F
from torch.utils.data import DataLoader
from pytorch_lightning import Trainer, LightningModule
from pytorch_lightning.core.saving import save_hparams_to_yaml, load_hparams_from_yaml
from pytorch_lightning.utilities import AttributeDict, is_picklable
from tests.base import EvalModelTemplate, TrialMNIST, BoringModel
class SaveHparamsModel(EvalModelTemplate):
""" Tests that a model can take an object """
def __init__(self, hparams):
super().__init__()
self.save_hyperparameters(hparams)
class AssignHparamsModel(EvalModelTemplate):
""" Tests that a model can take an object with explicit setter """
def __init__(self, hparams):
super().__init__()
self.hparams = hparams
# -------------------------
# STANDARD TESTS
# -------------------------
def _run_standard_hparams_test(tmpdir, model, cls, try_overwrite=False):
"""
Tests for the existence of an arg 'test_arg=14'
"""
hparam_type = type(model.hparams)
# test proper property assignments
assert model.hparams.test_arg == 14
# verify we can train
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, overfit_batches=2)
trainer.fit(model)
# make sure the raw checkpoint saved the properties
raw_checkpoint_path = _raw_checkpoint_path(trainer)
raw_checkpoint = torch.load(raw_checkpoint_path)
assert LightningModule.CHECKPOINT_HYPER_PARAMS_KEY in raw_checkpoint
assert raw_checkpoint[LightningModule.CHECKPOINT_HYPER_PARAMS_KEY]['test_arg'] == 14
# verify that model loads correctly
model2 = cls.load_from_checkpoint(raw_checkpoint_path)
assert model2.hparams.test_arg == 14
assert isinstance(model2.hparams, hparam_type)
if try_overwrite:
# verify that we can overwrite the property
model3 = cls.load_from_checkpoint(raw_checkpoint_path, test_arg=78)
assert model3.hparams.test_arg == 78
return raw_checkpoint_path
@pytest.mark.parametrize("cls", [SaveHparamsModel, AssignHparamsModel])
def test_namespace_hparams(tmpdir, cls):
# init model
model = cls(hparams=Namespace(test_arg=14))
# run standard test suite
_run_standard_hparams_test(tmpdir, model, cls)
@pytest.mark.parametrize("cls", [SaveHparamsModel, AssignHparamsModel])
def test_dict_hparams(tmpdir, cls):
# init model
model = cls(hparams={'test_arg': 14})
# run standard test suite
_run_standard_hparams_test(tmpdir, model, cls)
@pytest.mark.parametrize("cls", [SaveHparamsModel, AssignHparamsModel])
def test_omega_conf_hparams(tmpdir, cls):
# init model
conf = OmegaConf.create(dict(test_arg=14, mylist=[15.4, dict(a=1, b=2)]))
model = cls(hparams=conf)
assert isinstance(model.hparams, Container)
# run standard test suite
raw_checkpoint_path = _run_standard_hparams_test(tmpdir, model, cls)
model2 = cls.load_from_checkpoint(raw_checkpoint_path)
assert isinstance(model2.hparams, Container)
# config specific tests
assert model2.hparams.test_arg == 14
assert model2.hparams.mylist[0] == 15.4
def test_explicit_args_hparams(tmpdir):
"""
Tests that a model can take implicit args and assign
"""
# define model
class LocalModel(EvalModelTemplate):
def __init__(self, test_arg, test_arg2):
super().__init__()
self.save_hyperparameters('test_arg', 'test_arg2')
model = LocalModel(test_arg=14, test_arg2=90)
# run standard test suite
raw_checkpoint_path = _run_standard_hparams_test(tmpdir, model, LocalModel)
model = LocalModel.load_from_checkpoint(raw_checkpoint_path, test_arg2=120)
# config specific tests
assert model.hparams.test_arg2 == 120
def test_implicit_args_hparams(tmpdir):
"""
Tests that a model can take regular args and assign
"""
# define model
class LocalModel(EvalModelTemplate):
def __init__(self, test_arg, test_arg2):
super().__init__()
self.save_hyperparameters()
model = LocalModel(test_arg=14, test_arg2=90)
# run standard test suite
raw_checkpoint_path = _run_standard_hparams_test(tmpdir, model, LocalModel)
model = LocalModel.load_from_checkpoint(raw_checkpoint_path, test_arg2=120)
# config specific tests
assert model.hparams.test_arg2 == 120
def test_explicit_missing_args_hparams(tmpdir):
"""
Tests that a model can take regular args and assign
"""
# define model
class LocalModel(EvalModelTemplate):
def __init__(self, test_arg, test_arg2):
super().__init__()
self.save_hyperparameters('test_arg')
model = LocalModel(test_arg=14, test_arg2=90)
# test proper property assignments
assert model.hparams.test_arg == 14
# verify we can train
trainer = Trainer(default_root_dir=tmpdir, max_epochs=2, overfit_batches=0.5)
trainer.fit(model)
# make sure the raw checkpoint saved the properties
raw_checkpoint_path = _raw_checkpoint_path(trainer)
raw_checkpoint = torch.load(raw_checkpoint_path)
assert LightningModule.CHECKPOINT_HYPER_PARAMS_KEY in raw_checkpoint
assert raw_checkpoint[LightningModule.CHECKPOINT_HYPER_PARAMS_KEY]['test_arg'] == 14
# verify that model loads correctly
model = LocalModel.load_from_checkpoint(raw_checkpoint_path, test_arg2=123)
assert model.hparams.test_arg == 14
assert 'test_arg2' not in model.hparams # test_arg2 is not registered in class init
return raw_checkpoint_path
# -------------------------
# SPECIFIC TESTS
# -------------------------
def test_class_nesting():
class MyModule(LightningModule):
def forward(self):
...
# make sure PL modules are always nn.Module
a = MyModule()
assert isinstance(a, torch.nn.Module)
def test_outside():
a = MyModule()
_ = a.hparams
class A:
def test(self):
a = MyModule()
_ = a.hparams
def test2(self):
test_outside()
test_outside()
A().test2()
A().test()
class SubClassEvalModel(EvalModelTemplate):
any_other_loss = torch.nn.CrossEntropyLoss()
def __init__(self, *args, subclass_arg=1200, **kwargs):
super().__init__(*args, **kwargs)
self.save_hyperparameters()
class SubSubClassEvalModel(SubClassEvalModel):
pass
class AggSubClassEvalModel(SubClassEvalModel):
def __init__(self, *args, my_loss=torch.nn.CrossEntropyLoss(), **kwargs):
super().__init__(*args, **kwargs)
self.save_hyperparameters()
class UnconventionalArgsEvalModel(EvalModelTemplate):
""" A model that has unconventional names for "self", "*args" and "**kwargs". """
def __init__(obj, *more_args, other_arg=300, **more_kwargs):
# intentionally named obj
super().__init__(*more_args, **more_kwargs)
obj.save_hyperparameters()
class DictConfSubClassEvalModel(SubClassEvalModel):
def __init__(self, *args, dict_conf=OmegaConf.create(dict(my_param='something')), **kwargs):
super().__init__(*args, **kwargs)
self.save_hyperparameters()
@pytest.mark.parametrize("cls", [
EvalModelTemplate,
SubClassEvalModel,
SubSubClassEvalModel,
AggSubClassEvalModel,
UnconventionalArgsEvalModel,
DictConfSubClassEvalModel,
])
def test_collect_init_arguments(tmpdir, cls):
""" Test that the model automatically saves the arguments passed into the constructor """
extra_args = {}
if cls is AggSubClassEvalModel:
extra_args.update(my_loss=torch.nn.CosineEmbeddingLoss())
elif cls is DictConfSubClassEvalModel:
extra_args.update(dict_conf=OmegaConf.create(dict(my_param='anything')))
model = cls(**extra_args)
assert model.hparams.batch_size == 32
model = cls(batch_size=179, **extra_args)
assert model.hparams.batch_size == 179
if isinstance(model, SubClassEvalModel):
assert model.hparams.subclass_arg == 1200
if isinstance(model, AggSubClassEvalModel):
assert isinstance(model.hparams.my_loss, torch.nn.CosineEmbeddingLoss)
# verify that the checkpoint saved the correct values
trainer = Trainer(default_root_dir=tmpdir, max_epochs=2, overfit_batches=0.5)
trainer.fit(model)
raw_checkpoint_path = _raw_checkpoint_path(trainer)
raw_checkpoint = torch.load(raw_checkpoint_path)
assert LightningModule.CHECKPOINT_HYPER_PARAMS_KEY in raw_checkpoint
assert raw_checkpoint[LightningModule.CHECKPOINT_HYPER_PARAMS_KEY]['batch_size'] == 179
# verify that model loads correctly
model = cls.load_from_checkpoint(raw_checkpoint_path)
assert model.hparams.batch_size == 179
if isinstance(model, AggSubClassEvalModel):
assert isinstance(model.hparams.my_loss, torch.nn.CosineEmbeddingLoss)
if isinstance(model, DictConfSubClassEvalModel):
assert isinstance(model.hparams.dict_conf, Container)
assert model.hparams.dict_conf['my_param'] == 'anything'
# verify that we can overwrite whatever we want
model = cls.load_from_checkpoint(raw_checkpoint_path, batch_size=99)
assert model.hparams.batch_size == 99
def _raw_checkpoint_path(trainer) -> str:
raw_checkpoint_paths = os.listdir(trainer.checkpoint_callback.dirpath)
raw_checkpoint_paths = [x for x in raw_checkpoint_paths if '.ckpt' in x]
assert raw_checkpoint_paths
raw_checkpoint_path = raw_checkpoint_paths[0]
raw_checkpoint_path = os.path.join(trainer.checkpoint_callback.dirpath, raw_checkpoint_path)
return raw_checkpoint_path
class LocalVariableModelSuperLast(EvalModelTemplate):
""" This model has the super().__init__() call at the end. """
def __init__(self, arg1, arg2, *args, **kwargs):
self.argument1 = arg1 # arg2 intentionally not set
arg1 = 'overwritten'
local_var = 1234
super().__init__(*args, **kwargs) # this is intentionally here at the end
class LocalVariableModelSuperFirst(EvalModelTemplate):
""" This model has the _auto_collect_arguments() call at the end. """
def __init__(self, arg1, arg2, *args, **kwargs):
super().__init__(*args, **kwargs)
self.argument1 = arg1 # arg2 intentionally not set
arg1 = 'overwritten'
local_var = 1234
self.save_hyperparameters() # this is intentionally here at the end
@pytest.mark.parametrize("cls", [
LocalVariableModelSuperFirst,
# LocalVariableModelSuperLast,
])
def test_collect_init_arguments_with_local_vars(cls):
""" Tests that only the arguments are collected and not local variables. """
model = cls(arg1=1, arg2=2)
assert 'local_var' not in model.hparams
assert model.hparams['arg1'] == 'overwritten'
assert model.hparams['arg2'] == 2
# @pytest.mark.parametrize("cls,config", [
# (SaveHparamsModel, Namespace(my_arg=42)),
# (SaveHparamsModel, dict(my_arg=42)),
# (SaveHparamsModel, OmegaConf.create(dict(my_arg=42))),
# (AssignHparamsModel, Namespace(my_arg=42)),
# (AssignHparamsModel, dict(my_arg=42)),
# (AssignHparamsModel, OmegaConf.create(dict(my_arg=42))),
# ])
# def test_single_config_models(tmpdir, cls, config):
# """ Test that the model automatically saves the arguments passed into the constructor """
# model = cls(config)
#
# # no matter how you do it, it should be assigned
# assert model.hparams.my_arg == 42
#
# # verify that the checkpoint saved the correct values
# trainer = Trainer(default_root_dir=tmpdir, max_epochs=2, overfit_batches=0.5)
# trainer.fit(model)
#
# # verify that model loads correctly
# raw_checkpoint_path = _raw_checkpoint_path(trainer)
# model = cls.load_from_checkpoint(raw_checkpoint_path)
# assert model.hparams.my_arg == 42
class AnotherArgModel(EvalModelTemplate):
def __init__(self, arg1):
super().__init__()
self.save_hyperparameters(arg1)
class OtherArgsModel(EvalModelTemplate):
def __init__(self, arg1, arg2):
super().__init__()
self.save_hyperparameters(arg1, arg2)
@pytest.mark.parametrize("cls,config", [
(AnotherArgModel, dict(arg1=42)),
(OtherArgsModel, dict(arg1=3.14, arg2='abc')),
])
def test_single_config_models_fail(tmpdir, cls, config):
""" Test fail on passing unsupported config type. """
with pytest.raises(ValueError):
_ = cls(**config)
@pytest.mark.parametrize("past_key", ['module_arguments'])
def test_load_past_checkpoint(tmpdir, past_key):
model = EvalModelTemplate()
# verify we can train
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1)
trainer.fit(model)
# make sure the raw checkpoint saved the properties
raw_checkpoint_path = _raw_checkpoint_path(trainer)
raw_checkpoint = torch.load(raw_checkpoint_path)
raw_checkpoint[past_key] = raw_checkpoint[LightningModule.CHECKPOINT_HYPER_PARAMS_KEY]
raw_checkpoint['hparams_type'] = 'Namespace'
raw_checkpoint[past_key]['batch_size'] = -17
del raw_checkpoint[LightningModule.CHECKPOINT_HYPER_PARAMS_KEY]
# save back the checkpoint
torch.save(raw_checkpoint, raw_checkpoint_path)
# verify that model loads correctly
model2 = EvalModelTemplate.load_from_checkpoint(raw_checkpoint_path)
assert model2.hparams.batch_size == -17
def test_hparams_pickle(tmpdir):
ad = AttributeDict({'key1': 1, 'key2': 'abc'})
pkl = pickle.dumps(ad)
assert ad == pickle.loads(pkl)
pkl = cloudpickle.dumps(ad)
assert ad == pickle.loads(pkl)
class UnpickleableArgsEvalModel(EvalModelTemplate):
""" A model that has an attribute that cannot be pickled. """
def __init__(self, foo='bar', pickle_me=(lambda x: x + 1), **kwargs):
super().__init__(**kwargs)
assert not is_picklable(pickle_me)
self.save_hyperparameters()
def test_hparams_pickle_warning(tmpdir):
model = UnpickleableArgsEvalModel()
trainer = Trainer(default_root_dir=tmpdir, max_steps=1)
with pytest.warns(UserWarning, match="attribute 'pickle_me' removed from hparams because it cannot be pickled"):
trainer.fit(model)
assert 'pickle_me' not in model.hparams
def test_hparams_save_yaml(tmpdir):
hparams = dict(batch_size=32, learning_rate=0.001, data_root='./any/path/here',
nasted=dict(any_num=123, anystr='abcd'))
path_yaml = os.path.join(tmpdir, 'testing-hparams.yaml')
save_hparams_to_yaml(path_yaml, hparams)
assert load_hparams_from_yaml(path_yaml) == hparams
save_hparams_to_yaml(path_yaml, Namespace(**hparams))
assert load_hparams_from_yaml(path_yaml) == hparams
save_hparams_to_yaml(path_yaml, AttributeDict(hparams))
assert load_hparams_from_yaml(path_yaml) == hparams
save_hparams_to_yaml(path_yaml, OmegaConf.create(hparams))
assert load_hparams_from_yaml(path_yaml) == hparams
class NoArgsSubClassEvalModel(EvalModelTemplate):
def __init__(self):
super().__init__()
class SimpleNoArgsModel(LightningModule):
def __init__(self):
super().__init__()
self.l1 = torch.nn.Linear(28 * 28, 10)
def forward(self, x):
return torch.relu(self.l1(x.view(x.size(0), -1)))
def training_step(self, batch, batch_nb):
x, y = batch
loss = F.cross_entropy(self(x), y)
return {'loss': loss, 'log': {'train_loss': loss}}
def test_step(self, batch, batch_nb):
x, y = batch
loss = F.cross_entropy(self(x), y)
return {'loss': loss, 'log': {'train_loss': loss}}
def configure_optimizers(self):
return torch.optim.Adam(self.parameters(), lr=0.02)
@pytest.mark.parametrize("cls", [
SimpleNoArgsModel,
NoArgsSubClassEvalModel,
])
def test_model_nohparams_train_test(tmpdir, cls):
"""Test models that do not tae any argument in init."""
model = cls()
trainer = Trainer(
max_epochs=1,
default_root_dir=tmpdir,
)
train_loader = DataLoader(TrialMNIST(os.getcwd(), train=True, download=True), batch_size=32)
trainer.fit(model, train_loader)
test_loader = DataLoader(TrialMNIST(os.getcwd(), train=False, download=True), batch_size=32)
trainer.test(test_dataloaders=test_loader)
def test_model_ignores_non_exist_kwargument(tmpdir):
"""Test that the model takes only valid class arguments."""
class LocalModel(EvalModelTemplate):
def __init__(self, batch_size=15):
super().__init__(batch_size=batch_size)
self.save_hyperparameters()
model = LocalModel()
assert model.hparams.batch_size == 15
# verify that the checkpoint saved the correct values
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1)
trainer.fit(model)
# verify that we can overwrite whatever we want
raw_checkpoint_path = _raw_checkpoint_path(trainer)
model = LocalModel.load_from_checkpoint(raw_checkpoint_path, non_exist_kwarg=99)
assert 'non_exist_kwarg' not in model.hparams
class SuperClassPositionalArgs(EvalModelTemplate):
def __init__(self, hparams):
super().__init__()
self._hparams = None # pretend EvalModelTemplate did not call self.save_hyperparameters()
self.hparams = hparams
class SubClassVarArgs(SuperClassPositionalArgs):
""" Loading this model should accept hparams and init in the super class """
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def test_args(tmpdir):
""" Test for inheritance: super class takes positional arg, subclass takes varargs. """
hparams = dict(test=1)
model = SubClassVarArgs(hparams)
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1)
trainer.fit(model)
raw_checkpoint_path = _raw_checkpoint_path(trainer)
with pytest.raises(TypeError, match="__init__\(\) got an unexpected keyword argument 'test'"):
SubClassVarArgs.load_from_checkpoint(raw_checkpoint_path)
class RuntimeParamChangeModelSaving(BoringModel):
def __init__(self, **kwargs):
super().__init__()
self.save_hyperparameters()
class RuntimeParamChangeModelAssign(BoringModel):
def __init__(self, **kwargs):
super().__init__()
self.hparams = kwargs
@pytest.mark.parametrize("cls", [RuntimeParamChangeModelSaving, RuntimeParamChangeModelAssign])
def test_init_arg_with_runtime_change(tmpdir, cls):
"""Test that we save/export only the initial hparams, no other runtime change allowed"""
model = cls(running_arg=123)
assert model.hparams.running_arg == 123
model.hparams.running_arg = -1
assert model.hparams.running_arg == -1
model.hparams = Namespace(abc=42)
assert model.hparams.abc == 42
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=2,
limit_val_batches=2,
limit_test_batches=2,
max_epochs=1,
)
trainer.fit(model)
path_yaml = os.path.join(trainer.logger.log_dir, trainer.logger.NAME_HPARAMS_FILE)
hparams = load_hparams_from_yaml(path_yaml)
assert hparams.get('running_arg') == 123
class UnsafeParamModel(BoringModel):
def __init__(self, my_path, any_param=123):
super().__init__()
self.save_hyperparameters()
def test_model_with_fsspec_as_parameter(tmpdir):
model = UnsafeParamModel(LocalFileSystem(tmpdir))
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=2,
limit_val_batches=2,
limit_test_batches=2,
max_epochs=1,
)
trainer.fit(model)
trainer.test()
| 33.174837
| 116
| 0.713441
| 2,501
| 20,303
| 5.513395
| 0.153139
| 0.061281
| 0.050548
| 0.025455
| 0.548046
| 0.477192
| 0.430271
| 0.391471
| 0.345348
| 0.308217
| 0
| 0.015879
| 0.184209
| 20,303
| 611
| 117
| 33.229133
| 0.81664
| 0.19736
| 0
| 0.429379
| 0
| 0
| 0.030618
| 0
| 0
| 0
| 0
| 0
| 0.135593
| 1
| 0.144068
| false
| 0.002825
| 0.039548
| 0.00565
| 0.276836
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c149f4f2e879ee66f71bed92f16a685a097e92b
| 20,142
|
py
|
Python
|
tests/space_test.py
|
hadrianmontes/jax-md
|
cea1cc6b22db6044a502eeeab4bddde35ac15d94
|
[
"ECL-2.0",
"Apache-2.0"
] | 713
|
2019-05-14T19:02:00.000Z
|
2022-03-31T17:42:23.000Z
|
tests/space_test.py
|
hadrianmontes/jax-md
|
cea1cc6b22db6044a502eeeab4bddde35ac15d94
|
[
"ECL-2.0",
"Apache-2.0"
] | 109
|
2019-05-15T13:27:09.000Z
|
2022-03-17T16:15:59.000Z
|
tests/space_test.py
|
hadrianmontes/jax-md
|
cea1cc6b22db6044a502eeeab4bddde35ac15d94
|
[
"ECL-2.0",
"Apache-2.0"
] | 117
|
2019-05-17T13:23:37.000Z
|
2022-03-18T10:32:29.000Z
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for jax_md.space."""
from absl.testing import absltest
from absl.testing import parameterized
from jax.config import config as jax_config
from jax import random
import jax.numpy as jnp
from jax import grad, jit, jacfwd
from jax import test_util as jtu
from jax_md import space, test_util, quantity, energy
from jax_md.util import *
from functools import partial
from unittest import SkipTest
test_util.update_test_tolerance(5e-5, 5e-13)
jax_config.parse_flags_with_absl()
jax_config.enable_omnistaging()
FLAGS = jax_config.FLAGS
PARTICLE_COUNT = 10
STOCHASTIC_SAMPLES = 10
SHIFT_STEPS = 10
SPATIAL_DIMENSION = [2, 3]
BOX_FORMATS = ['scalar', 'vector', 'matrix']
if FLAGS.jax_enable_x64:
POSITION_DTYPE = [f32, f64]
else:
POSITION_DTYPE = [f32]
def make_periodic_general_test_system(N, dim, dtype, box_format):
assert box_format in BOX_FORMATS
box_size = quantity.box_size_at_number_density(N, 1.0, dim)
box = dtype(box_size)
if box_format == 'vector':
box = jnp.array(jnp.ones(dim) * box_size, dtype)
elif box_format == 'matrix':
box = jnp.array(jnp.eye(dim) * box_size, dtype)
d, s = space.periodic(jnp.diag(box) if box_format == 'matrix' else box)
d_gf, s_gf = space.periodic_general(box)
d_g, s_g = space.periodic_general(box, fractional_coordinates=False)
key = random.PRNGKey(0)
R_f = random.uniform(key, (N, dim), dtype=dtype)
R = space.transform(box, R_f)
E = jit(energy.soft_sphere_pair(d))
E_gf = jit(energy.soft_sphere_pair(d_gf))
E_g = jit(energy.soft_sphere_pair(d_g))
return R_f, R, box, (s, E), (s_gf, E_gf), (s_g, E_g)
# pylint: disable=invalid-name
class SpaceTest(jtu.JaxTestCase):
# pylint: disable=g-complex-comprehension
@parameterized.named_parameters(jtu.cases_from_list(
{
'testcase_name': '_dim={}_dtype={}'.format(dim, dtype.__name__),
'spatial_dimension': dim,
'dtype': dtype
} for dim in SPATIAL_DIMENSION for dtype in POSITION_DTYPE))
def test_transform(self, spatial_dimension, dtype):
key = random.PRNGKey(0)
for _ in range(STOCHASTIC_SAMPLES):
key, split1, split2 = random.split(key, 3)
R = random.normal(
split1, (PARTICLE_COUNT, spatial_dimension), dtype=dtype)
T = random.normal(
split2, (spatial_dimension, spatial_dimension), dtype=dtype)
R_prime_exact = jnp.array(jnp.einsum('ij,kj->ki', T, R), dtype=dtype)
R_prime = space.transform(T, R)
self.assertAllClose(R_prime_exact, R_prime)
@parameterized.named_parameters(jtu.cases_from_list(
{
'testcase_name': '_dim={}'.format(dim),
'spatial_dimension': dim
} for dim in SPATIAL_DIMENSION))
def test_transform_grad(self, spatial_dimension):
key = random.PRNGKey(0)
for _ in range(STOCHASTIC_SAMPLES):
key, split1, split2 = random.split(key, 3)
R = random.normal(split1, (PARTICLE_COUNT, spatial_dimension))
T = random.normal(split2, (spatial_dimension, spatial_dimension))
R_prime = space.transform(T, R)
energy_direct = lambda R: jnp.sum(R ** 2)
energy_indirect = lambda T, R: jnp.sum(space.transform(T, R) ** 2)
grad_direct = grad(energy_direct)(R_prime)
grad_indirect = grad(energy_indirect, 1)(T, R)
self.assertAllClose(grad_direct, grad_indirect)
@parameterized.named_parameters(jtu.cases_from_list(
{
'testcase_name': '_dim={}_dtype={}'.format(dim, dtype.__name__),
'spatial_dimension': dim,
'dtype': dtype
} for dim in SPATIAL_DIMENSION for dtype in POSITION_DTYPE))
def test_transform_inverse(self, spatial_dimension, dtype):
key = random.PRNGKey(0)
tol = 1e-13
if dtype is f32:
tol = 1e-5
for _ in range(STOCHASTIC_SAMPLES):
key, split1, split2 = random.split(key, 3)
R = random.normal(
split1, (PARTICLE_COUNT, spatial_dimension), dtype=dtype)
T = random.normal(
split2, (spatial_dimension, spatial_dimension), dtype=dtype)
T_inv = space.inverse(T)
R_test = space.transform(T_inv, space.transform(T, R))
self.assertAllClose(R, R_test)
@parameterized.named_parameters(jtu.cases_from_list(
{
'testcase_name': '_dim={}_dtype={}'.format(dim, dtype.__name__),
'spatial_dimension': dim,
'dtype': dtype
} for dim in SPATIAL_DIMENSION for dtype in POSITION_DTYPE))
def test_canonicalize_displacement_or_metric(self, spatial_dimension, dtype):
key = random.PRNGKey(0)
displacement, _ = space.periodic_general(jnp.eye(spatial_dimension))
metric = space.metric(displacement)
test_metric = space.canonicalize_displacement_or_metric(displacement)
metric = space.map_product(metric)
test_metric = space.map_product(test_metric)
for _ in range(STOCHASTIC_SAMPLES):
key, split1, split2 = random.split(key, 3)
R = random.normal(
split1, (PARTICLE_COUNT, spatial_dimension), dtype=dtype)
self.assertAllClose(metric(R, R), test_metric(R, R))
@parameterized.named_parameters(jtu.cases_from_list(
{
'testcase_name': '_dim={}_dtype={}'.format(dim, dtype.__name__),
'spatial_dimension': dim,
'dtype': dtype
} for dim in SPATIAL_DIMENSION for dtype in POSITION_DTYPE))
def test_periodic_displacement(self, spatial_dimension, dtype):
key = random.PRNGKey(0)
for _ in range(STOCHASTIC_SAMPLES):
key, split = random.split(key)
R = random.uniform(
split, (PARTICLE_COUNT, spatial_dimension), dtype=dtype)
dR = space.map_product(space.pairwise_displacement)(R, R)
dR_wrapped = space.periodic_displacement(f32(1.0), dR)
dR_direct = dR
dr_direct = space.distance(dR)
dr_direct = jnp.reshape(dr_direct, dr_direct.shape + (1,))
if spatial_dimension == 2:
for i in range(-1, 2):
for j in range(-1, 2):
dR_shifted = dR + jnp.array([i, j], dtype=R.dtype)
dr_shifted = space.distance(dR_shifted)
dr_shifted = jnp.reshape(dr_shifted, dr_shifted.shape + (1,))
dR_direct = jnp.where(dr_shifted < dr_direct, dR_shifted, dR_direct)
dr_direct = jnp.where(dr_shifted < dr_direct, dr_shifted, dr_direct)
elif spatial_dimension == 3:
for i in range(-1, 2):
for j in range(-1, 2):
for k in range(-1, 2):
dR_shifted = dR + jnp.array([i, j, k], dtype=R.dtype)
dr_shifted = space.distance(dR_shifted)
dr_shifted = jnp.reshape(dr_shifted, dr_shifted.shape + (1,))
dR_direct = jnp.where(
dr_shifted < dr_direct, dR_shifted, dR_direct)
dr_direct = jnp.where(
dr_shifted < dr_direct, dr_shifted, dr_direct)
dR_direct = jnp.array(dR_direct, dtype=dR.dtype)
assert dR_wrapped.dtype == dtype
self.assertAllClose(dR_wrapped, dR_direct)
@parameterized.named_parameters(jtu.cases_from_list(
{
'testcase_name': '_dim={}_dtype={}'.format(dim, dtype.__name__),
'spatial_dimension': dim,
'dtype': dtype
} for dim in SPATIAL_DIMENSION for dtype in POSITION_DTYPE))
def test_periodic_shift(self, spatial_dimension, dtype):
key = random.PRNGKey(0)
for _ in range(STOCHASTIC_SAMPLES):
key, split1, split2 = random.split(key, 3)
R = random.uniform(
split1, (PARTICLE_COUNT, spatial_dimension), dtype=dtype)
dR = jnp.sqrt(f32(0.1)) * random.normal(
split2, (PARTICLE_COUNT, spatial_dimension), dtype=dtype)
dR = jnp.where(dR > 0.49, f32(0.49), dR)
dR = jnp.where(dR < -0.49, f32(-0.49), dR)
R_shift = space.periodic_shift(f32(1.0), R, dR)
assert R_shift.dtype == R.dtype
assert jnp.all(R_shift < 1.0)
assert jnp.all(R_shift > 0.0)
dR_after = space.periodic_displacement(f32(1.0), R_shift - R)
assert dR_after.dtype == R.dtype
self.assertAllClose(dR_after, dR)
@parameterized.named_parameters(jtu.cases_from_list(
{
'testcase_name': '_dim={}_dtype={}'.format(dim, dtype.__name__),
'spatial_dimension': dim,
'dtype': dtype
} for dim in SPATIAL_DIMENSION for dtype in POSITION_DTYPE))
def test_periodic_against_periodic_general(self, spatial_dimension, dtype):
key = random.PRNGKey(0)
tol = 1e-13
if dtype is f32:
tol = 1e-5
for _ in range(STOCHASTIC_SAMPLES):
key, split1, split2, split3 = random.split(key, 4)
max_box_size = f32(10.0)
box_size = max_box_size * random.uniform(
split1, (spatial_dimension,), dtype=dtype)
transform = jnp.diag(box_size)
R = random.uniform(
split2, (PARTICLE_COUNT, spatial_dimension), dtype=dtype)
R_scaled = R * box_size
dR = random.normal(
split3, (PARTICLE_COUNT, spatial_dimension), dtype=dtype)
disp_fn, shift_fn = space.periodic(box_size)
general_disp_fn, general_shift_fn = space.periodic_general(transform)
disp_fn = space.map_product(disp_fn)
general_disp_fn = space.map_product(general_disp_fn)
self.assertAllClose(disp_fn(R_scaled, R_scaled), general_disp_fn(R, R))
assert disp_fn(R_scaled, R_scaled).dtype == dtype
self.assertAllClose(
shift_fn(R_scaled, dR), general_shift_fn(R, dR) * box_size)
assert shift_fn(R_scaled, dR).dtype == dtype
@parameterized.named_parameters(jtu.cases_from_list(
{
'testcase_name': '_dim={}_dtype={}'.format(dim, dtype.__name__),
'spatial_dimension': dim,
'dtype': dtype
} for dim in SPATIAL_DIMENSION for dtype in POSITION_DTYPE))
def test_periodic_against_periodic_general_grad(self, spatial_dimension, dtype):
key = random.PRNGKey(0)
tol = 1e-13
if dtype is f32:
tol = 1e-5
for _ in range(STOCHASTIC_SAMPLES):
key, split1, split2, split3 = random.split(key, 4)
max_box_size = f32(10.0)
box_size = max_box_size * random.uniform(
split1, (spatial_dimension,), dtype=dtype)
transform = jnp.diag(box_size)
R = random.uniform(
split2, (PARTICLE_COUNT, spatial_dimension), dtype=dtype)
R_scaled = R * box_size
dR = random.normal(
split3, (PARTICLE_COUNT, spatial_dimension), dtype=dtype)
disp_fn, shift_fn = space.periodic(box_size)
general_disp_fn, general_shift_fn = space.periodic_general(transform)
disp_fn = space.map_product(disp_fn)
general_disp_fn = space.map_product(general_disp_fn)
grad_fn = grad(lambda R: jnp.sum(disp_fn(R, R) ** 2))
general_grad_fn = grad(lambda R: jnp.sum(general_disp_fn(R, R) ** 2))
self.assertAllClose(grad_fn(R_scaled), general_grad_fn(R))
assert general_grad_fn(R).dtype == dtype
@parameterized.named_parameters(jtu.cases_from_list(
{
'testcase_name': '_dim={}_dtype={}'.format(dim, dtype.__name__),
'spatial_dimension': dim,
'dtype': dtype,
} for dim in SPATIAL_DIMENSION for dtype in POSITION_DTYPE))
def test_periodic_general_dynamic(self, spatial_dimension, dtype):
key = random.PRNGKey(0)
eye = jnp.eye(spatial_dimension)
for _ in range(STOCHASTIC_SAMPLES):
key, split_T0_scale, split_T0_dT = random.split(key, 3)
key, split_T1_scale, split_T1_dT = random.split(key, 3)
key, split_t, split_R, split_dR = random.split(key, 4)
size_0 = 10.0 * random.uniform(split_T0_scale, ())
dtransform_0 = 0.5 * random.normal(
split_T0_dT, (spatial_dimension, spatial_dimension))
T_0 = jnp.array(size_0 * (eye + dtransform_0), dtype=dtype)
size_1 = 10.0 * random.uniform(split_T1_scale, (), dtype=dtype)
dtransform_1 = 0.5 * random.normal(
split_T1_dT, (spatial_dimension, spatial_dimension), dtype=dtype)
T_1 = jnp.array(size_1 * (eye + dtransform_1), dtype=dtype)
disp_fn, shift_fn = space.periodic_general(T_0)
true_disp_fn, true_shift_fn = space.periodic_general(T_1)
disp_fn = partial(disp_fn, box=T_1)
disp_fn = space.map_product(disp_fn)
true_disp_fn = space.map_product(true_disp_fn)
R = random.uniform(
split_R, (PARTICLE_COUNT, spatial_dimension), dtype=dtype)
dR = random.normal(
split_dR, (PARTICLE_COUNT, spatial_dimension), dtype=dtype)
self.assertAllClose(
disp_fn(R, R), jnp.array(true_disp_fn(R, R), dtype=dtype))
self.assertAllClose(
shift_fn(R, dR, box=T_1), jnp.array(true_shift_fn(R, dR), dtype=dtype))
@parameterized.named_parameters(jtu.cases_from_list(
{
'testcase_name': '_dim={}_dtype={}'.format(dim, dtype.__name__),
'spatial_dimension': dim,
'dtype': dtype,
} for dim in SPATIAL_DIMENSION for dtype in POSITION_DTYPE))
def test_periodic_general_wrapped_vs_unwrapped(
self, spatial_dimension, dtype):
key = random.PRNGKey(0)
eye = jnp.eye(spatial_dimension, dtype=dtype)
tol = 1e-13
if dtype is f32:
tol = 2e-5
for _ in range(STOCHASTIC_SAMPLES):
key, split_R, split_T = random.split(key, 3)
dT = random.normal(
split_T, (spatial_dimension, spatial_dimension), dtype=dtype)
T = eye + dT + jnp.transpose(dT)
R = random.uniform(
split_R, (PARTICLE_COUNT, spatial_dimension), dtype=dtype)
R0 = R
unwrapped_R = R
displacement, shift = space.periodic_general(T)
_, unwrapped_shift = space.periodic_general(T, wrapped=False)
displacement = space.map_product(displacement)
for _ in range(SHIFT_STEPS):
key, split = random.split(key)
dR = random.normal(
split, (PARTICLE_COUNT, spatial_dimension), dtype=dtype)
R = shift(R, dR)
unwrapped_R = unwrapped_shift(unwrapped_R, dR)
self.assertAllClose(
displacement(R, R0),
displacement(unwrapped_R, R0))
assert not (jnp.all(unwrapped_R > 0) and jnp.all(unwrapped_R < 1))
@parameterized.named_parameters(jtu.cases_from_list(
{
'testcase_name': f'_dim={dim}_dtype={dtype.__name__}_box_format={box_format}',
'spatial_dimension': dim,
'dtype': dtype,
'box_format': box_format
} for dim in SPATIAL_DIMENSION
for dtype in POSITION_DTYPE
for box_format in BOX_FORMATS))
def test_periodic_general_energy(self, spatial_dimension, dtype, box_format):
N = 16
R_f, R, box, (s, E), (s_gf, E_gf), (s_g, E_g) = \
make_periodic_general_test_system(N, spatial_dimension, dtype, box_format)
self.assertAllClose(E(R), E_gf(R_f))
self.assertAllClose(E(R), E_g(R))
@parameterized.named_parameters(jtu.cases_from_list(
{
'testcase_name': f'_dim={dim}_dtype={dtype.__name__}_box_format={box_format}',
'spatial_dimension': dim,
'dtype': dtype,
'box_format': box_format
} for dim in SPATIAL_DIMENSION
for dtype in POSITION_DTYPE
for box_format in BOX_FORMATS))
def test_periodic_general_force(self, spatial_dimension, dtype, box_format):
N = 16
R_f, R, box, (s, E), (s_gf, E_gf), (s_g, E_g) = \
make_periodic_general_test_system(N, spatial_dimension, dtype, box_format)
self.assertAllClose(grad(E)(R), grad(E_gf)(R_f))
self.assertAllClose(grad(E)(R), grad(E_g)(R))
@parameterized.named_parameters(jtu.cases_from_list(
{
'testcase_name': f'_dim={dim}_dtype={dtype.__name__}_box_format={box_format}',
'spatial_dimension': dim,
'dtype': dtype,
'box_format': box_format
} for dim in SPATIAL_DIMENSION
for dtype in POSITION_DTYPE
for box_format in BOX_FORMATS))
def test_periodic_general_shift(self, spatial_dimension, dtype, box_format):
N = 16
R_f, R, box, (s, E), (s_gf, E_gf), (s_g, E_g) = \
make_periodic_general_test_system(N, spatial_dimension, dtype, box_format)
R_new = s(R, grad(E)(R))
R_gf_new = s_gf(R_f, grad(E_gf)(R_f))
R_g_new = s_g(R, grad(E_g)(R))
self.assertAllClose(R_new, space.transform(box, R_gf_new))
self.assertAllClose(R_new, R_g_new)
@parameterized.named_parameters(jtu.cases_from_list(
{
'testcase_name': f'_dim={dim}_dtype={dtype.__name__}_box_format={box_format}',
'spatial_dimension': dim,
'dtype': dtype,
'box_format': box_format
} for dim in SPATIAL_DIMENSION
for dtype in POSITION_DTYPE
for box_format in BOX_FORMATS))
def test_periodic_general_deform(self, spatial_dimension, dtype, box_format):
N = 16
R_f, R, box, (s, E), (s_gf, E_gf), (s_g, E_g) = \
make_periodic_general_test_system(N, spatial_dimension, dtype, box_format)
deformed_box = box * 0.9
self.assertAllClose(E_gf(R_f, box=deformed_box),
E_g(R, new_box=deformed_box))
@parameterized.named_parameters(jtu.cases_from_list(
{
'testcase_name': f'_dim={dim}_dtype={dtype.__name__}_box_format={box_format}',
'spatial_dimension': dim,
'dtype': dtype,
'box_format': box_format
} for dim in SPATIAL_DIMENSION
for dtype in POSITION_DTYPE
for box_format in BOX_FORMATS))
def test_periodic_general_deform_grad(self,
spatial_dimension, dtype, box_format):
N = 16
R_f, R, box, (s, E), (s_gf, E_gf), (s_g, E_g) = \
make_periodic_general_test_system(N, spatial_dimension, dtype, box_format)
deformed_box = box * 0.9
self.assertAllClose(grad(E_gf)(R_f, box=deformed_box),
grad(E_g)(R, new_box=deformed_box))
self.assertAllClose(jacfwd(E_gf)(R_f, box=deformed_box),
jacfwd(E_g)(R, new_box=deformed_box))
@parameterized.named_parameters(jtu.cases_from_list(
{
'testcase_name': f'_dim={dim}_dtype={dtype.__name__}_box_format={box_format}',
'spatial_dimension': dim,
'dtype': dtype,
'box_format': box_format
} for dim in SPATIAL_DIMENSION
for dtype in POSITION_DTYPE
for box_format in BOX_FORMATS))
def test_periodic_general_deform_shift(self,
spatial_dimension, dtype, box_format):
N = 16
R_f, R, box, (s, E), (s_gf, E_gf), (s_g, E_g) = \
make_periodic_general_test_system(N, spatial_dimension, dtype, box_format)
deformed_box = box * 0.9
R_new = s_g(R, grad(E_g)(R), new_box=deformed_box)
R_gf_new = space.transform(deformed_box, s_gf(R_f, grad(E_gf)(R_f)))
self.assertAllClose(R_new, R_gf_new)
@parameterized.named_parameters(jtu.cases_from_list(
{
'testcase_name': f'_dim={dim}_dtype={dtype.__name__}_box_format={box_format}',
'spatial_dimension': dim,
'dtype': dtype,
'box_format': box_format
} for dim in SPATIAL_DIMENSION
for dtype in POSITION_DTYPE
for box_format in BOX_FORMATS))
def test_periodic_general_grad_box(self, spatial_dimension, dtype, box_format):
if box_format == 'scalar':
raise SkipTest('Scalar case fails due to JAX Issue #5849.')
N = 16
R_f, R, box, (s, E), (s_gf, E_gf), (s_g, E_g) = \
make_periodic_general_test_system(N, spatial_dimension, dtype, box_format)
@grad
def box_energy_g_fn(box):
return E_g(R, new_box=box)
@grad
def box_energy_gf_fn(box):
return E_gf(R_f, box=box)
self.assertAllClose(box_energy_g_fn(box), box_energy_gf_fn(box))
if __name__ == '__main__':
absltest.main()
| 35.52381
| 88
| 0.666369
| 2,840
| 20,142
| 4.416901
| 0.085915
| 0.118622
| 0.073661
| 0.043527
| 0.714126
| 0.670839
| 0.645089
| 0.593192
| 0.563217
| 0.560985
| 0
| 0.015215
| 0.220137
| 20,142
| 566
| 89
| 35.586572
| 0.783359
| 0.031874
| 0
| 0.541667
| 0
| 0
| 0.067248
| 0.020483
| 0
| 0
| 0
| 0
| 0.076389
| 1
| 0.046296
| false
| 0
| 0.025463
| 0.00463
| 0.081019
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c14cbf83bd9f7d5d27ebfe3490cc6f31c415451
| 246
|
py
|
Python
|
functions/batch-custom-action/status-api/lambda.py
|
TrollPursePublishing/trollpurse-trollops
|
27e54cfd1ba1eed27097e2e3038dfab56691cf49
|
[
"Xnet",
"Linux-OpenIB",
"X11"
] | 2
|
2020-11-18T06:04:27.000Z
|
2021-04-22T12:38:15.000Z
|
functions/batch-custom-action/status-api/lambda.py
|
TrollPursePublishing/trollpurse-ops
|
27e54cfd1ba1eed27097e2e3038dfab56691cf49
|
[
"Xnet",
"Linux-OpenIB",
"X11"
] | null | null | null |
functions/batch-custom-action/status-api/lambda.py
|
TrollPursePublishing/trollpurse-ops
|
27e54cfd1ba1eed27097e2e3038dfab56691cf49
|
[
"Xnet",
"Linux-OpenIB",
"X11"
] | null | null | null |
import boto3
batch_client = boto3.client('batch')
def lambda_handler(event, context):
describe_response = batch_client.describe_jobs(
jobs=[ event.get('jobId', '')]
)
return describe_response.get('jobs', [{}])[0].get('status', '')
| 22.363636
| 65
| 0.678862
| 30
| 246
| 5.366667
| 0.566667
| 0.136646
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014151
| 0.138211
| 246
| 10
| 66
| 24.6
| 0.745283
| 0
| 0
| 0
| 0
| 0
| 0.081301
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.142857
| 0
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c170adc77db7c06c4c5968ae2d5e3df343748b4
| 776
|
py
|
Python
|
python97/chapter05/list_gen.py
|
youaresherlock/PythonPractice
|
2e22d3fdcb26353cb0d8215c150e84d11bc9a022
|
[
"Apache-2.0"
] | null | null | null |
python97/chapter05/list_gen.py
|
youaresherlock/PythonPractice
|
2e22d3fdcb26353cb0d8215c150e84d11bc9a022
|
[
"Apache-2.0"
] | null | null | null |
python97/chapter05/list_gen.py
|
youaresherlock/PythonPractice
|
2e22d3fdcb26353cb0d8215c150e84d11bc9a022
|
[
"Apache-2.0"
] | 1
|
2019-11-05T01:10:15.000Z
|
2019-11-05T01:10:15.000Z
|
#!usr/bin/python
# -*- coding:utf8 -*-
# 列表生成式(列表推导式)
# 1. 提取出1-20之间的奇数
# odd_list = []
# for i in range(21):
# if i % 2 == 1:
# odd_list.append(i)
# odd_list = [i for i in range(21) if i % 2 == 1]
# print(odd_list)
# 2. 逻辑复杂的情况 如果是奇数将结果平方
# 列表生成式性能高于列表操作
def handle_item(item):
return item * item
odd_list = [handle_item(i) for i in range(21) if i % 2 == 1]
print(odd_list)
# 生成器表达式
odd_gen = (i for i in range(21) if i % 2 == 1)
print(type(odd_gen))
for item in odd_gen:
print(item)
# 字典推导式
my_dict = {"bobby1": 22, "bobby2": 23, "imooc.com": 5}
reversed_dict = {value:key for key, value in my_dict.items()}
print(reversed_dict)
# 集合推导式
my_set = set(my_dict.keys())
my_set = {key for key, value in my_dict.items()}
print(type(my_set))
| 15.836735
| 61
| 0.627577
| 137
| 776
| 3.408759
| 0.350365
| 0.089936
| 0.051392
| 0.094218
| 0.359743
| 0.359743
| 0.359743
| 0.359743
| 0.359743
| 0.184154
| 0
| 0.047308
| 0.210052
| 776
| 48
| 62
| 16.166667
| 0.714519
| 0.338918
| 0
| 0
| 0
| 0
| 0.043299
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0
| 0.071429
| 0.142857
| 0.357143
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c17743faf77b54c0516f30699a3b1dc9b050a25
| 11,409
|
py
|
Python
|
src/streamlink/plugin/plugin.py
|
isqad/streamlink
|
f6708f1d38d056177ac3d614ebbb740d956d46f0
|
[
"BSD-2-Clause"
] | 1
|
2017-11-26T18:48:29.000Z
|
2017-11-26T18:48:29.000Z
|
src/streamlink/plugin/plugin.py
|
isqad/streamlink
|
f6708f1d38d056177ac3d614ebbb740d956d46f0
|
[
"BSD-2-Clause"
] | null | null | null |
src/streamlink/plugin/plugin.py
|
isqad/streamlink
|
f6708f1d38d056177ac3d614ebbb740d956d46f0
|
[
"BSD-2-Clause"
] | 1
|
2021-06-03T23:08:48.000Z
|
2021-06-03T23:08:48.000Z
|
import ast
import operator
import re
from collections import OrderedDict
from functools import partial
from ..cache import Cache
from ..exceptions import PluginError, NoStreamsError
from ..options import Options
# FIXME: This is a crude attempt at making a bitrate's
# weight end up similar to the weight of a resolution.
# Someone who knows math, please fix.
BIT_RATE_WEIGHT_RATIO = 2.8
ALT_WEIGHT_MOD = 0.01
QUALITY_WEIGTHS_EXTRA = {
"other": {
"live": 1080,
},
"tv": {
"hd": 1080,
"sd": 576,
},
"quality": {
"ehq": 720,
"hq": 576,
"sq": 360,
},
}
FILTER_OPERATORS = {
"<": operator.lt,
"<=": operator.le,
">": operator.gt,
">=": operator.ge,
}
PARAMS_REGEX = r"(\w+)=({.+?}|\[.+?\]|\(.+?\)|'(?:[^'\\]|\\')*'|\"(?:[^\"\\]|\\\")*\"|\S+)"
HIGH_PRIORITY = 30
NORMAL_PRIORITY = 20
LOW_PRIORITY = 10
NO_PRIORITY = 0
def stream_weight(stream):
for group, weights in QUALITY_WEIGTHS_EXTRA.items():
if stream in weights:
return weights[stream], group
match = re.match(r"^(\d+)(k|p)?(\d+)?(\+)?(?:_(\d+)k)?(?:_(alt)(\d)?)?$", stream)
if match:
weight = 0
if match.group(6):
if match.group(7):
weight -= ALT_WEIGHT_MOD * int(match.group(7))
else:
weight -= ALT_WEIGHT_MOD
name_type = match.group(2)
if name_type == "k": # bit rate
bitrate = int(match.group(1))
weight += bitrate / BIT_RATE_WEIGHT_RATIO
return weight, "bitrate"
elif name_type == "p": # resolution
weight += int(match.group(1))
if match.group(3): # fps eg. 60p or 50p
weight += int(match.group(3))
if match.group(4) == "+":
weight += 1
if match.group(5): # bit rate classifier for resolution
weight += int(match.group(5)) / BIT_RATE_WEIGHT_RATIO
return weight, "pixels"
return 0, "none"
def iterate_streams(streams):
for name, stream in streams:
if isinstance(stream, list):
for sub_stream in stream:
yield (name, sub_stream)
else:
yield (name, stream)
def stream_type_priority(stream_types, stream):
stream_type = type(stream[1]).shortname()
try:
prio = stream_types.index(stream_type)
except ValueError:
try:
prio = stream_types.index("*")
except ValueError:
prio = 99
return prio
def stream_sorting_filter(expr, stream_weight):
match = re.match(r"(?P<op><=|>=|<|>)?(?P<value>[\w+]+)", expr)
if not match:
raise PluginError("Invalid filter expression: {0}".format(expr))
op, value = match.group("op", "value")
op = FILTER_OPERATORS.get(op, operator.eq)
filter_weight, filter_group = stream_weight(value)
def func(quality):
weight, group = stream_weight(quality)
if group == filter_group:
return not op(weight, filter_weight)
return True
return func
def parse_url_params(url):
split = url.split(" ", 1)
url = split[0]
params = split[1] if len(split) > 1 else ''
return url, parse_params(params)
def parse_params(params):
rval = {}
matches = re.findall(PARAMS_REGEX, params)
for key, value in matches:
try:
value = ast.literal_eval(value)
except Exception:
pass
rval[key] = value
return rval
class Plugin(object):
"""A plugin can retrieve stream information from the URL specified.
:param url: URL that the plugin will operate on
"""
cache = None
logger = None
module = "unknown"
options = Options()
session = None
@classmethod
def bind(cls, session, module):
cls.cache = Cache(filename="plugin-cache.json",
key_prefix=module)
cls.logger = session.logger.new_module("plugin." + module)
cls.module = module
cls.session = session
def __init__(self, url):
self.url = url
@classmethod
def can_handle_url(cls, url):
raise NotImplementedError
@classmethod
def set_option(cls, key, value):
cls.options.set(key, value)
@classmethod
def get_option(cls, key):
return cls.options.get(key)
@classmethod
def stream_weight(cls, stream):
return stream_weight(stream)
@classmethod
def default_stream_types(cls, streams):
stream_types = ["rtmp", "hls", "hds", "http"]
for name, stream in iterate_streams(streams):
stream_type = type(stream).shortname()
if stream_type not in stream_types:
stream_types.append(stream_type)
return stream_types
@classmethod
def broken(cls, issue=None):
def func(*args, **kwargs):
msg = (
"This plugin has been marked as broken. This is likely due to "
"changes to the service preventing a working implementation. "
)
if issue:
msg += "More info: https://github.com/streamlink/streamlink/issues/{0}".format(issue)
raise PluginError(msg)
def decorator(*args, **kwargs):
return func
return decorator
@classmethod
def priority(cls, url):
"""
Return the plugin priority for a given URL, by default it returns
NORMAL priority.
:return: priority level
"""
return NORMAL_PRIORITY
def streams(self, stream_types=None, sorting_excludes=None):
"""Attempts to extract available streams.
Returns a :class:`dict` containing the streams, where the key is
the name of the stream, most commonly the quality and the value
is a :class:`Stream` object.
The result can contain the synonyms **best** and **worst** which
points to the streams which are likely to be of highest and
lowest quality respectively.
If multiple streams with the same name are found, the order of
streams specified in *stream_types* will determine which stream
gets to keep the name while the rest will be renamed to
"<name>_<stream type>".
The synonyms can be fine tuned with the *sorting_excludes*
parameter. This can be either of these types:
- A list of filter expressions in the format
*[operator]<value>*. For example the filter ">480p" will
exclude streams ranked higher than "480p" from the list
used in the synonyms ranking. Valid operators are >, >=, <
and <=. If no operator is specified then equality will be
tested.
- A function that is passed to filter() with a list of
stream names as input.
:param stream_types: A list of stream types to return.
:param sorting_excludes: Specify which streams to exclude from
the best/worst synonyms.
.. versionchanged:: 1.4.2
Added *priority* parameter.
.. versionchanged:: 1.5.0
Renamed *priority* to *stream_types* and changed behaviour
slightly.
.. versionchanged:: 1.5.0
Added *sorting_excludes* parameter.
.. versionchanged:: 1.6.0
*sorting_excludes* can now be a list of filter expressions
or a function that is passed to filter().
"""
try:
ostreams = self._get_streams()
if isinstance(ostreams, dict):
ostreams = ostreams.items()
# Flatten the iterator to a list so we can reuse it.
if ostreams:
ostreams = list(ostreams)
except NoStreamsError:
return {}
except (IOError, OSError, ValueError) as err:
raise PluginError(err)
if not ostreams:
return {}
if stream_types is None:
stream_types = self.default_stream_types(ostreams)
# Add streams depending on stream type and priorities
sorted_streams = sorted(iterate_streams(ostreams),
key=partial(stream_type_priority,
stream_types))
streams = {}
for name, stream in sorted_streams:
stream_type = type(stream).shortname()
# Use * as wildcard to match other stream types
if "*" not in stream_types and stream_type not in stream_types:
continue
# drop _alt from any stream names
if name.endswith("_alt"):
name = name[:-len("_alt")]
existing = streams.get(name)
if existing:
existing_stream_type = type(existing).shortname()
if existing_stream_type != stream_type:
name = "{0}_{1}".format(name, stream_type)
if name in streams:
name = "{0}_alt".format(name)
num_alts = len(list(filter(lambda n: n.startswith(name), streams.keys())))
# We shouldn't need more than 2 alt streams
if num_alts >= 2:
continue
elif num_alts > 0:
name = "{0}{1}".format(name, num_alts + 1)
# Validate stream name and discard the stream if it's bad.
match = re.match("([A-z0-9_+]+)", name)
if match:
name = match.group(1)
else:
self.logger.debug("The stream '{0}' has been ignored "
"since it is badly named.", name)
continue
# Force lowercase name and replace space with underscore.
streams[name.lower()] = stream
# Create the best/worst synonmys
def stream_weight_only(s):
return (self.stream_weight(s)[0] or
(len(streams) == 1 and 1))
stream_names = filter(stream_weight_only, streams.keys())
sorted_streams = sorted(stream_names, key=stream_weight_only)
if isinstance(sorting_excludes, list):
for expr in sorting_excludes:
filter_func = stream_sorting_filter(expr, self.stream_weight)
sorted_streams = list(filter(filter_func, sorted_streams))
elif callable(sorting_excludes):
sorted_streams = list(filter(sorting_excludes, sorted_streams))
final_sorted_streams = OrderedDict()
for stream_name in sorted(streams, key=stream_weight_only):
final_sorted_streams[stream_name] = streams[stream_name]
if len(sorted_streams) > 0:
best = sorted_streams[-1]
worst = sorted_streams[0]
final_sorted_streams["worst"] = streams[worst]
final_sorted_streams["best"] = streams[best]
return final_sorted_streams
def get_streams(self, *args, **kwargs):
"""Deprecated since version 1.9.0.
Has been renamed to :func:`Plugin.streams`, this is an alias
for backwards compatibility.
"""
return self.streams(*args, **kwargs)
def _get_streams(self):
raise NotImplementedError
__all__ = ["Plugin"]
| 29.104592
| 101
| 0.573582
| 1,333
| 11,409
| 4.781695
| 0.250563
| 0.034515
| 0.009413
| 0.008472
| 0.085817
| 0.037967
| 0.009099
| 0
| 0
| 0
| 0
| 0.012887
| 0.326672
| 11,409
| 391
| 102
| 29.179028
| 0.816845
| 0.22193
| 0
| 0.131579
| 0
| 0.004386
| 0.068714
| 0.015884
| 0
| 0
| 0
| 0.002558
| 0
| 1
| 0.096491
| false
| 0.004386
| 0.035088
| 0.017544
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c1898e479d14fbe657ed1376514f87c04d2b942
| 2,971
|
py
|
Python
|
swav/vissl/vissl/data/ssl_transforms/img_patches_tensor.py
|
lhoestq/DeDLOC
|
36f5a6d043c3d727f9d098a35fba94aa351a5cd4
|
[
"Apache-2.0"
] | null | null | null |
swav/vissl/vissl/data/ssl_transforms/img_patches_tensor.py
|
lhoestq/DeDLOC
|
36f5a6d043c3d727f9d098a35fba94aa351a5cd4
|
[
"Apache-2.0"
] | null | null | null |
swav/vissl/vissl/data/ssl_transforms/img_patches_tensor.py
|
lhoestq/DeDLOC
|
36f5a6d043c3d727f9d098a35fba94aa351a5cd4
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
import math
from typing import Any, Dict
import numpy as np
from classy_vision.dataset.transforms import register_transform
from classy_vision.dataset.transforms.classy_transform import ClassyTransform
@register_transform("ImgPatchesFromTensor")
class ImgPatchesFromTensor(ClassyTransform):
"""
Create image patches from a torch Tensor or numpy array.
This transform was proposed in Jigsaw - https://arxiv.org/abs/1603.09246
Args:
num_patches (int): how many image patches to create
patch_jitter (int): space to leave between patches
"""
def __init__(self, num_patches=9, patch_jitter=21):
self.num_patches = num_patches
self.patch_jitter = patch_jitter
assert self.patch_jitter > 0, "Negative jitter not supported"
self.grid_side_len = int(math.sqrt(self.num_patches)) # usually = 3
logging.info(
f"ImgPatchesFromTensor: num_patches: {num_patches} "
f"patch_jitter: {patch_jitter}"
)
def __call__(self, image):
"""
Input image which is a torch.Tensor object of shape 3 x H x W
"""
data = []
grid_size = int(image.shape[1] / self.grid_side_len)
patch_size = grid_size - self.patch_jitter
jitter = np.random.randint(
0, self.patch_jitter, (2, self.grid_side_len, self.grid_side_len)
)
for i in range(self.grid_side_len):
for j in range(self.grid_side_len):
x_offset = i * grid_size
y_offset = j * grid_size
grid_cell = image[
:, y_offset : y_offset + grid_size, x_offset : x_offset + grid_size
]
patch = grid_cell[
:,
jitter[1, i, j] : jitter[1, i, j] + patch_size,
jitter[0, i, j] : jitter[0, i, j] + patch_size,
]
assert patch.shape[1] == patch_size, "Image not cropped properly"
assert patch.shape[2] == patch_size, "Image not cropped properly"
# copy patch data so that all patches are different in underlying memory
data.append(np.copy(patch))
return data
@classmethod
def from_config(cls, config: Dict[str, Any]) -> "ImgPatchesFromTensor":
"""
Instantiates ImgPatchesFromTensor from configuration.
Args:
config (Dict): arguments for for the transform
Returns:
ImgPatchesFromTensor instance.
"""
num_patches = config.get("num_patches", 9)
patch_jitter = config.get("patch_jitter", 21)
logging.info(f"ImgPatchesFromTensor | Using num_patches: {num_patches}")
logging.info(f"ImgPatchesFromTensor | Using patch_jitter: {patch_jitter}")
return cls(num_patches=num_patches, patch_jitter=patch_jitter)
| 37.607595
| 88
| 0.623023
| 363
| 2,971
| 4.898072
| 0.338843
| 0.092801
| 0.040495
| 0.050619
| 0.17604
| 0.060742
| 0
| 0
| 0
| 0
| 0
| 0.012814
| 0.290811
| 2,971
| 78
| 89
| 38.089744
| 0.831039
| 0.208347
| 0
| 0
| 0
| 0
| 0.148661
| 0.009375
| 0
| 0
| 0
| 0
| 0.06383
| 1
| 0.06383
| false
| 0
| 0.12766
| 0
| 0.255319
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c1a65d75547f91601127884078028e187b93021
| 588
|
py
|
Python
|
prodapt_solutions/config/cliargs.py
|
DineshDevaraj/interview_answers
|
8d3d631dc96dc97ebef80604d6455c2c57c8823d
|
[
"MIT"
] | null | null | null |
prodapt_solutions/config/cliargs.py
|
DineshDevaraj/interview_answers
|
8d3d631dc96dc97ebef80604d6455c2c57c8823d
|
[
"MIT"
] | null | null | null |
prodapt_solutions/config/cliargs.py
|
DineshDevaraj/interview_answers
|
8d3d631dc96dc97ebef80604d6455c2c57c8823d
|
[
"MIT"
] | null | null | null |
import argparse
from helper.metaclasses_definition import Singleton
class CliArgs(metaclass=Singleton):
LogLevel = None
BankName = None
InputFilepath = None
@staticmethod
def init():
my_parser = argparse.ArgumentParser()
my_parser.add_argument('--bank-name', required=True)
my_parser.add_argument('--input-filepath')
my_parser.add_argument('--log-level')
args = my_parser.parse_args()
CliArgs.BankName = args.bank_name
CliArgs.InputFilepath = args.input_filepath
CliArgs.LogLevel = args.log_level
| 24.5
| 60
| 0.685374
| 65
| 588
| 6
| 0.492308
| 0.102564
| 0.084615
| 0.146154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.221088
| 588
| 23
| 61
| 25.565217
| 0.851528
| 0
| 0
| 0
| 0
| 0
| 0.064736
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.125
| 0
| 0.4375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c1c295aedd09d62a7ca4222595cff9f7fd4e5fc
| 1,237
|
py
|
Python
|
plugins/flytekit-papermill/setup.py
|
TeoZosa/flytekit
|
c4f33c6deaf36a3feaf397cfc6de3bd62e986733
|
[
"Apache-2.0"
] | null | null | null |
plugins/flytekit-papermill/setup.py
|
TeoZosa/flytekit
|
c4f33c6deaf36a3feaf397cfc6de3bd62e986733
|
[
"Apache-2.0"
] | null | null | null |
plugins/flytekit-papermill/setup.py
|
TeoZosa/flytekit
|
c4f33c6deaf36a3feaf397cfc6de3bd62e986733
|
[
"Apache-2.0"
] | null | null | null |
from setuptools import setup
PLUGIN_NAME = "papermill"
microlib_name = f"flytekitplugins-{PLUGIN_NAME}"
plugin_requires = [
"flytekit>=0.16.0b0,<1.0.0",
"flytekitplugins-spark>=0.16.0b0,<1.0.0,!=0.24.0b0",
"papermill>=1.2.0",
"nbconvert>=6.0.7",
"ipykernel>=5.0.0",
]
__version__ = "0.0.0+develop"
setup(
name=microlib_name,
version=__version__,
author="flyteorg",
author_email="admin@flyte.org",
description="This is the flytekit papermill plugin",
namespace_packages=["flytekitplugins"],
packages=[f"flytekitplugins.{PLUGIN_NAME}"],
install_requires=plugin_requires,
license="apache2",
python_requires=">=3.7",
classifiers=[
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Software Development",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules",
],
)
| 30.170732
| 71
| 0.645918
| 133
| 1,237
| 5.864662
| 0.481203
| 0.015385
| 0.092308
| 0.066667
| 0.023077
| 0.023077
| 0
| 0
| 0
| 0
| 0
| 0.040527
| 0.202102
| 1,237
| 40
| 72
| 30.925
| 0.749747
| 0
| 0
| 0
| 0
| 0.028571
| 0.566694
| 0.14228
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.028571
| 0
| 0.028571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c1e9749d62da31f126224b5dcf3c15abd4025bd
| 10,568
|
py
|
Python
|
base/frontends/views.py
|
danielecook/upvote.pub
|
fdda3c0895427ddc76f4680d0d63f2d4bac59da0
|
[
"MIT"
] | 1
|
2020-09-13T09:16:44.000Z
|
2020-09-13T09:16:44.000Z
|
base/frontends/views.py
|
danielecook/upvote.pub
|
fdda3c0895427ddc76f4680d0d63f2d4bac59da0
|
[
"MIT"
] | null | null | null |
base/frontends/views.py
|
danielecook/upvote.pub
|
fdda3c0895427ddc76f4680d0d63f2d4bac59da0
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
"""
import os
import markdown2
from flask import (Blueprint,
request,
render_template,
flash, g,
session,
redirect,
url_for,
abort,
Markup)
from werkzeug import check_password_hash, generate_password_hash
from logzero import logger
from base import db, app
from base import search as search_module # don't override function name
from base.users.forms import RegisterForm, LoginForm
from base.users.models import User
from base.threads.models import Thread, Publication
from base.subreddits.models import Subreddit
from base.users.decorators import requires_login
from base.utils.user_utils import get_school
from base.subreddits.forms import subreddit_subs, sub_form
from base.utils.email import send_email
from base.utils.misc import random_string, validate_sort_type
mod = Blueprint('frontends', __name__, url_prefix='')
@mod.before_request
def before_request():
g.user = None
if session.get('user_id'):
g.user = User.query.get(session['user_id'])
def home_subreddit():
logger.info(g.user)
if g.get('user'):
subreddit_subs = g.user.subreddit_subs.get('subs')
subs = Thread.query.order_by(db.desc(Thread.hotness), db.desc(Thread.hotness)) \
.filter(Subreddit.name.in_(subreddit_subs))
else:
subs = Thread.query.order_by(db.desc(Thread.hotness), db.desc(Thread.hotness))
return subs
def get_subreddits():
"""
Fetch user subreddits otherwise fetch a list of defaults
"""
if g.get('user'):
subreddit_subs = g.user.subreddit_subs.get('subs')
subreddits = Subreddit.query.filter(Subreddit.name.in_(subreddit_subs))
else:
# Default set of subreddits
subreddits = Subreddit.query.all()
return subreddits
def process_thread_paginator(trending=False, rs=None, subreddit=None, sort_type='hot'):
"""
abstracted because many sources pull from a thread listing
source (subreddit permalink, homepage, etc)
"""
threads_per_page = 15
cur_page = request.args.get('page') or 1
cur_page = int(cur_page)
thread_paginator = None
# if we are passing in a resultset, that means we are just looking to
# quickly paginate some arbitrary data, no sorting
if rs:
thread_paginator = rs.paginate(cur_page,
per_page=threads_per_page,
error_out=True)
return thread_paginator
# sexy line of code :)
base_query = subreddit.threads if subreddit else Thread.query
# Filter by user subs
logger.info(g.user)
if g.user:
subreddit_subs = g.user.subreddit_subs.get('subs')
base_query = base_query.join(Subreddit).filter(Subreddit.name.in_(subreddit_subs))
# Sorting
if sort_type == 'hot':
base_query = base_query.order_by(db.desc(Thread.hotness))
elif sort_type == 'top':
base_query = base_query.order_by(db.desc(Thread.votes))
elif sort_type == 'comments':
base_query = base_query.order_by(db.desc(Thread.n_comments))
elif sort_type == 'new':
base_query = base_query.order_by(db.desc(Thread.created_on))
elif sort_type == 'publication_date':
base_query = base_query.join(Publication).order_by(db.desc(Publication.pub_date))
thread_paginator = base_query.paginate(cur_page, per_page=threads_per_page, error_out=True)
return thread_paginator
@mod.route('/')
def home(sort_type='hot'):
"""
If not trending we order by creation date
"""
atom_url = url_for('subreddits.atom_feed', subreddit_name='frontpage', _external=True)
trending = True if request.path.endswith('trending') else False
page_title = "Trending" if trending else "Frontpage"
thread_paginator = process_thread_paginator(trending=trending)
return render_template('home.html',
atom_url=atom_url,
page_title=page_title,
cur_subreddit=home_subreddit(),
thread_paginator=thread_paginator)
@mod.route('/.atom')
@mod.route('/.xml')
@mod.route('/.rss')
def atom_redirect():
return redirect(url_for("subreddits.atom_feed", subreddit_name="frontpage"))
@mod.route('/h/<string:page>')
def render_markdown(page):
page_md = f"base/markdown/{page}.md"
if not os.path.exists(page_md):
abort(404)
with open(page_md, 'r') as f:
content = f.read()
md = markdown2.markdown(content,
extras = ['fenced-code-blocks',
'nofollow',
'target-blank-links',
'toc',
'tables',
'footnotes',
'metadata',
'markdown-in-html'])
return render_template('markdown.html',
page=md,
**md.metadata)
@mod.route('/search/', methods=['GET'])
def search():
"""
Allows users to search threads and comments
"""
query = request.args.get('query')
page_title=f"Search results for '{query}'"
rs = search_module.search(query, orderby='creation', search_title=True,
search_text=True)
thread_paginator = process_thread_paginator(rs=rs)
#rs = rs.all()
num_searches = rs.count()
subreddits = get_subreddits()
return render_template('home.html',
page_title=page_title,
cur_subreddit=home_subreddit(),
thread_paginator=thread_paginator,
num_searches=num_searches)
@mod.route('/login/', methods=['GET', 'POST'])
def login():
"""
We had to do some extra work to route the user back to
his or her original place before logging in
"""
if g.user:
return redirect(url_for('frontends.home'))
next = ''
if request.method == 'GET':
if 'next' in request.args:
next = request.args['next']
form = LoginForm(request.form)
# make sure data is valid, but doesn't validate password is right
if form.validate_on_submit():
# continue where we left off if so
user = User.query.filter_by(email=form.email.data).first()
# we use werzeug to validate user's password
if user and check_password_hash(user.password, form.password.data):
# the session can't be modified as it's signed,
# it's a safe place to store the user id
session['user_id'] = user.id
if 'next' in request.form and request.form['next']:
return redirect(request.form['next'])
return redirect(url_for('frontends.home'))
flash('Wrong email or password', 'danger')
return render_template("login.html", form=form, next=next)
@mod.route('/logout/', methods=['GET', 'POST'])
@requires_login
def logout():
session.pop('user_id', None)
return redirect(url_for('frontends.home'))
@mod.route('/confirm-email/<string:token>')
def confirm_email(token):
"""
Confirm user email
"""
user = User.query.filter_by(email_token=token).first()
if user.email_token == token:
user.email_verified = True
db.session.commit()
flash("Thank you for confirming your email! You can now submit and comment.", 'success')
return redirect(url_for('frontends.home'))
@mod.route('/register/', methods=['GET', 'POST'])
def register():
"""
Registration page
"""
if g.user:
# If the user is logged in send them home
return redirect(url_for('frontends.home'))
next = ''
if request.method == 'GET':
if 'next' in request.args:
next = request.args['next']
form = RegisterForm(request.form)
if form.validate_on_submit():
# create an user instance not yet stored in the database
user = User(username=form.username.data,
email=form.email.data, \
password=generate_password_hash(form.password.data),
university=get_school(form.email.data),
email_token=random_string())
# Insert the record in our database and commit it
db.session.add(user)
email_confirm_link = url_for('frontends.confirm_email', token = user.email_token)
email_response = send_email("Confirm upvote.pub email",
"""Please visit the link below to confirm your email:\n\n{}{}""".format(request.url_root.strip("/"), email_confirm_link),
user.email)
# Log the user in, as he now has an id
db.session.commit()
session['user_id'] = user.id
flash('Thanks for signing up! Please confirm your email by following the link sent in the confirmation email.', 'success')
if 'next' in request.form and request.form['next']:
return redirect(request.form['next'])
return redirect(url_for('frontends.home'))
return render_template("register.html", form=form, next=next)
@mod.route('/subs/', methods=['GET', 'POST'])
def view_all():
"""
"""
subreddit_list = Subreddit.query.all()
form = None
if g.user:
if request.form:
form = subreddit_subs(request.form)
if form.validate_on_submit():
form_subs = form.data.get('subs')
form_subs = list(set([x['sub_name'] for x in form_subs if x['value']]))
g.user.subreddit_subs = {'subs': form_subs}
flash("Updated Subs", 'success')
db.session.commit()
else:
form = subreddit_subs()
for subreddit in subreddit_list:
sform = sub_form()
sform.sub_name = subreddit.name
sform.sub_group = subreddit.group
if g.user:
sform.value=subreddit.name in g.user.subreddit_subs['subs']
form.subs.append_entry(sform)
return render_template('subreddits/subs.html',
cur_subreddit=None,
page_title='subs',
form=form,
subreddit_list=subreddit_list)
| 35.582492
| 157
| 0.599924
| 1,281
| 10,568
| 4.79313
| 0.226386
| 0.029642
| 0.018241
| 0.014821
| 0.305212
| 0.267101
| 0.24772
| 0.20456
| 0.176222
| 0.145277
| 0
| 0.001201
| 0.290973
| 10,568
| 296
| 158
| 35.702703
| 0.81823
| 0.099167
| 0
| 0.236715
| 0
| 0
| 0.105976
| 0.008061
| 0
| 0
| 0
| 0
| 0
| 1
| 0.062802
| false
| 0.019324
| 0.077295
| 0.004831
| 0.231884
| 0.009662
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c1ed9a736672c0c84e29905bebe37cc7b644280
| 2,949
|
py
|
Python
|
Jarvis.py
|
vijayeshmt/Securitylock
|
5877663a170a22ab8b5931dcef07c74f149cf9b8
|
[
"CC0-1.0"
] | 1
|
2021-05-27T09:05:00.000Z
|
2021-05-27T09:05:00.000Z
|
Jarvis.py
|
vijayeshmt/Securitylock
|
5877663a170a22ab8b5931dcef07c74f149cf9b8
|
[
"CC0-1.0"
] | null | null | null |
Jarvis.py
|
vijayeshmt/Securitylock
|
5877663a170a22ab8b5931dcef07c74f149cf9b8
|
[
"CC0-1.0"
] | null | null | null |
import pyttsx3
import datetime
import speech_recognition as sr
import wikipedia
import webbrowser
import os
import smtplib
engine = pyttsx3.init('sapi5')
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[0].id)
# To change the voice to female change 0 to 1.
def speak(audio):
engine.say(audio)
engine.runAndWait()
pass
def take_command():
"""
It takes microphone input from the user and returns a string
:return:
"""
r = sr.Recognizer()
with sr.Microphone() as source:
print("Listening...")
r.pause_threshold = 1.5 # It will wait 1.5 seconds to complete a sentence
audio = r.listen(source)
#Do read details
try:
print("Recognizing")
query = r.recognize_google(audio,language='en-in')
print(f'user said : {query}\n')
except Exception as e:
#print(e)
print("Say that again please")
return "None"
return query
def sendEmail(to,content):
server =smtplib.SMTP('smtp.gmail.com',28)
# server.connect("smtp.gmail.com",465)
# server.ehlo()
server.login('jayeshvijayesh@gmail.com','########')
server.sendmail('jayeshvijayesh@gmail.com',to,content)
server.close()
def wish_me():
hour = int(datetime.datetime.now().hour)
if hour >= 0 and hour < 12:
speak("Good morning")
elif hour >= 12 and hour < 18:
speak("Good afternoon")
else:
speak("Good night")
speak("I am JARVIS how can i help you")
if __name__ == '__main__':
wish_me()
while True:
query =take_command().lower()
if 'wikipedia' in query:
speak("Searching wikipedia")
query = query.replace('wikipedia','')
results = wikipedia.summary(query,sentences=2)#To read more increase sentence to decrease sentence decreease sentence
speak("According to wikipedia")
#print(results)
speak(results)
elif 'open youtube' in query:
# webbrowser.Chrome.open_new("youtube.com")
webbrowser.open("youtube.com")
elif "open google" in query:
webbrowser.open("google.com")
elif "play music" in query:
music_dir = "D:\\vijayesh\\music"
songs = os.listdir(music_dir)
print(songs)
os.startfile(os.path.join(music_dir,songs[1]))
elif "the time" in query:
strtime = datetime.datetime.now().strftime("%H:%M:%S")
speak(f"The time is {strtime}")
elif " open pycharm" in query:
pycharmpath ="C:\\Program Files\\JetBrains\\PyCharm Community Edition 2021"
os.startfile(pycharmpath)
#elif "open command" in query:
# filelocation = "path of the particular file like above"
# os.startfile(filelocation)
elif " email to vijayesh" or "email to vijesh" in query:
try:
speak("What should i say")#error present
content = take_command()
to = "jayeshvijayesh@gmail.com"
sendEmail(to,content)
speak("Email has been sent")
exit()
except Exception as e:
print(e)
speak("Sorry,I am not able to send this email")
exit()
| 26.097345
| 121
| 0.664632
| 401
| 2,949
| 4.837905
| 0.456359
| 0.028866
| 0.034021
| 0.018557
| 0.024742
| 0.024742
| 0
| 0
| 0
| 0
| 0
| 0.011961
| 0.206172
| 2,949
| 112
| 122
| 26.330357
| 0.816745
| 0.166158
| 0
| 0.075949
| 0
| 0
| 0.261618
| 0.041738
| 0
| 0
| 0
| 0
| 0
| 1
| 0.050633
| false
| 0.012658
| 0.088608
| 0
| 0.164557
| 0.075949
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c1ff3b3368700c34adbc70fc88801c1bc52b535
| 2,838
|
py
|
Python
|
utils/data_loader.py
|
dilum1995/DAugmentor
|
6cc86dccf826415a88b8226265e16ae96b5cc05b
|
[
"MIT"
] | 1
|
2020-08-02T13:06:03.000Z
|
2020-08-02T13:06:03.000Z
|
utils/data_loader.py
|
dilum1995/DAugmentor
|
6cc86dccf826415a88b8226265e16ae96b5cc05b
|
[
"MIT"
] | null | null | null |
utils/data_loader.py
|
dilum1995/DAugmentor
|
6cc86dccf826415a88b8226265e16ae96b5cc05b
|
[
"MIT"
] | null | null | null |
import pandas as pd
import os
import numpy as np
import cv2
from utils import constants as const
import matplotlib.pyplot as plt
class DataLoader:
def load_data():
'''
This function is handling the data loading and pre-processing
:return: (xtrain, ytrain), (xtest, ytest)
'''
print('**** Read data into DAugmentor ****')
x_train = []
y_train = []
x_test = []
y_test = []
# setting the path to metadata
path = const.PATH
metadata_csv_path = os.path.join(path, const.FILE_METADATA)
test_img_dir_path = os.path.join(path, const.DIR_TEST)
train_img_dir_path = os.path.join(path, const.DIR_TRAIN)
print(metadata_csv_path)
# setting the path to train data
x_train_path = os.path.join(path, const.DIR_TRAIN)
print(x_train_path)
# setting the path to train data
x_test_path = os.path.join(path, const.DIR_TEST)
# reading meta data file as dataframe
df = pd.read_csv(metadata_csv_path, delimiter=',')
# dataset format:
# image_name
# label
# data_type
data_type_row = df["data_type"].tolist()
image_row = df["image_name"].tolist()
label_row = df["label"].tolist()
data_rows = len(data_type_row)
for row in range(data_rows):
if (data_type_row[row] == "TRAIN"):
# setting the path of the current image
img_path = os.path.join(train_img_dir_path, image_row[row])
# reading image
image = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)
# downscaling image to 28x28
image = cv2.resize(image, (128, 128))
x_train.append(image)
print("Loaded: " + img_path)
# extracting labels
y_train.append(label_row[row])
if (data_type_row[row] == "TEST"):
# setting the path of the current image
img_path = os.path.join(test_img_dir_path, image_row[row])
# reading image
image = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)
# downscaling image to 28x28
image = cv2.resize(image, (128, 128))
x_test.append(image)
print("Loaded: " + img_path)
# extracting labels
y_test.append(label_row[row])
xtrain = np.asarray(x_train)
ytrain = np.asarray(y_train)
xtest = np.asarray(x_test)
ytest = np.asarray(y_test)
print(x_train[0].shape)
print(x_train[0].shape)
print(xtrain[0].shape)
print(x_test[0].shape)
#(X_train, y_train), (X_test, y_test)
return (xtrain, ytrain), (xtest, ytest)
| 31.88764
| 75
| 0.565891
| 361
| 2,838
| 4.232687
| 0.235457
| 0.031414
| 0.045812
| 0.064136
| 0.541885
| 0.484293
| 0.443717
| 0.443717
| 0.353403
| 0.221204
| 0
| 0.016411
| 0.33439
| 2,838
| 89
| 76
| 31.88764
| 0.792483
| 0.180056
| 0
| 0.16
| 0
| 0
| 0.037281
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02
| false
| 0
| 0.12
| 0
| 0.18
| 0.18
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c2027c5e127752f77dcae4527133dc870a9894e
| 288
|
py
|
Python
|
CompilerPython/LexerPython/main.py
|
valternunez/Compiler
|
879cecbbeb1c21d9d19021664ace62442273d3ba
|
[
"MIT"
] | null | null | null |
CompilerPython/LexerPython/main.py
|
valternunez/Compiler
|
879cecbbeb1c21d9d19021664ace62442273d3ba
|
[
"MIT"
] | null | null | null |
CompilerPython/LexerPython/main.py
|
valternunez/Compiler
|
879cecbbeb1c21d9d19021664ace62442273d3ba
|
[
"MIT"
] | null | null | null |
from lexer import *
import sys
if len(sys.argv) != 2:
print("usage: main.py file")
else:
lex = Lexer(sys.argv[1])
with open(sys.argv[1]) as f:
while True:
c = f.read(1)
if not c:
break
print(lex.scan().toString())
| 19.2
| 40
| 0.496528
| 42
| 288
| 3.404762
| 0.666667
| 0.146853
| 0.111888
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021858
| 0.364583
| 288
| 14
| 41
| 20.571429
| 0.759563
| 0
| 0
| 0
| 0
| 0
| 0.065972
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 0.166667
| 0.166667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c20c3110a71ede08c1358d9822f7b43bb07338f
| 4,903
|
py
|
Python
|
3D/Train_Module_3D.py
|
geometatqueens/RCNN
|
2e1e67264969f05a2f554595577dfb1025938245
|
[
"Unlicense"
] | 1
|
2020-04-30T21:31:59.000Z
|
2020-04-30T21:31:59.000Z
|
3D/Train_Module_3D.py
|
geometatqueens/RCNN
|
2e1e67264969f05a2f554595577dfb1025938245
|
[
"Unlicense"
] | null | null | null |
3D/Train_Module_3D.py
|
geometatqueens/RCNN
|
2e1e67264969f05a2f554595577dfb1025938245
|
[
"Unlicense"
] | null | null | null |
"""The present code is the Version 1.0 of the RCNN approach to perform MPS
in 3D for categorical variables. It has been developed by S. Avalos and J. Ortiz in the
Geometallurygical Group at Queen's University as part of a PhD program.
The code is not free of bugs but running end-to-end.
Any comments and further improvements are well recevied to: 17saa6@queensu.ca
April 16, 2019.
Geomet Group - Queen's University - Canada"""
# Do not display the AVX message about using GPU
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
#from tensorflow.python.client import device_lib
#print(device_lib.list_local_devices())
#os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" # see issue #152
#os.environ["CUDA_VISIBLE_DEVICES"]="0"
## #########################
import numpy as np
import tensorflow as tf
import time
import External_Functions_3D as fns_nested
import gc
for ind0 in range(1):
start_time_AllTrain = time.time()
HyperPar = []
HyperPar.append(50) # SGsizex - Num 0
HyperPar.append(50) # SGsizey - Num 1
HyperPar.append(50) # SGsizez - Num 2
HyperPar.append(int(7)) # Search_x - Num 3
HyperPar.append(int(7)) # Search_y - Num 4
HyperPar.append(int(7)) # Search_z - Num 5
HyperPar.append(int(7)) # IPsizex - Num 6
HyperPar.append(int(7)) # IPsizey - Num 7
HyperPar.append(int(7)) # IPsizez - Num 8
HyperPar.append(50) # Percentage of Data Conditioning - Num 9 .. divided by 3 so 1% is 10 represents 1%
HyperPar.append(1) # MinDC - Num 10
HyperPar.append(1500) # Num Fully Connected - Num 11
HyperPar.append(3) # wdnh - Num 12
HyperPar.append(16) # convdepth - Num 13
HyperPar.append(2) # num of categories - Num 14
print("SG: ", int(HyperPar[3]),"x",int(HyperPar[4]),"x",int(HyperPar[5]), "IP: ", int(HyperPar[6]),"x",int(HyperPar[7]),"x",int(HyperPar[8]))
Ncicles = 500
Nepoch = 1
#Nbatch = 250
Nsamples = 512
TrainingImage = "TI_Collaboration_1of4_50x50x50_newRepresentation.dat"
LocModel = 'Models/3D_NewRepresentation/Allperc/%sx%sx%s_%sx%sx%s_4ConvNets_4HL_BN_3FC%s_ws%sx%sx%s_%sconvdepth/FeatMaps'%(int(HyperPar[3]),int(HyperPar[4]),int(HyperPar[5]), int(HyperPar[6]),int(HyperPar[7]),int(HyperPar[8]), int(HyperPar[11]), int(HyperPar[12]),int(HyperPar[12]),int(HyperPar[12]), int(HyperPar[13]))
#LocModel = 'Models/3D_NewRepresentation/New%sperc/%sx%sx%s_%sx%sx%s_4ConvNets_4HL_BN_3FC%s_ws%sx%sx%s_%sconvdepth/FeatMaps'%(int(HyperPar[9]), int(HyperPar[3]),int(HyperPar[4]),int(HyperPar[5]), int(HyperPar[6]),int(HyperPar[7]),int(HyperPar[8]), int(HyperPar[11]), int(HyperPar[12]),int(HyperPar[12]),int(HyperPar[12]), int(HyperPar[13]))
LocFile = 'Models/3D_NewRepresentation/Allperc/%sx%sx%s_%sx%sx%s_4ConvNets_4HL_BN_3FC%s_ws%sx%sx%s_%sconvdepth'%(int(HyperPar[3]),int(HyperPar[4]),int(HyperPar[5]), int(HyperPar[6]),int(HyperPar[7]),int(HyperPar[8]), int(HyperPar[11]), int(HyperPar[12]),int(HyperPar[12]),int(HyperPar[12]), int(HyperPar[13]))
#LocFile = 'Models/3D_NewRepresentation/New%sperc/%sx%sx%s_%sx%sx%s_4ConvNets_4HL_BN_3FC%s_ws%sx%sx%s_%sconvdepth'%(int(HyperPar[9]), int(HyperPar[3]),int(HyperPar[4]),int(HyperPar[5]), int(HyperPar[6]),int(HyperPar[7]),int(HyperPar[8]), int(HyperPar[11]), int(HyperPar[12]),int(HyperPar[12]),int(HyperPar[12]), int(HyperPar[13]))
print("[Graph]")
#fns_nested.CreateGraph_4ConvNets_4HL_NFeaConv_wdnhxwdnh_BN_3D_NoBN(HyperPar=HyperPar, LocModel=LocModel)
fns_nested.CreateGraph_4ConvNets_4HL_NFeaConv_wdnhxwdnh_BN_3D(HyperPar=HyperPar, LocModel=LocModel)
# To save the TI
TempSimGrid = fns_nested.Grid(HyperPar=HyperPar, DBname=TrainingImage, Lvl=3,Training=False, Padding=True)
TempSimGrid.SavePlot(name=LocModel+'_TI.png', Level=1)
MaxLR, MinLR = 0.01, 0.001
StepLR = 10
PointStart = 1
for indTrain in range(Ncicles):
#HyperPar[9] = np.random.randint(41)+10
cuos = indTrain%(2*StepLR)
if cuos < StepLR:
LearningRate = np.around(((MaxLR - MinLR)/StepLR)*cuos + MinLR, decimals=7)
else:
LearningRate = np.around(((MaxLR - MinLR)/StepLR)*(StepLR - cuos) + MaxLR, decimals=7)
start_time_1 = time.time()
print ("Cicle: {}".format(indTrain+PointStart), "Learning Rate: ", LearningRate)
TempSimGrid = fns_nested.Grid(HyperPar=HyperPar, DBname=TrainingImage, Lvl=5, Training=True, Padding=True)
print("[Sim]")
TempSimGrid.Simulate_4ConvNets_BN_3D(LocModel=LocModel, Cicle=(indTrain+PointStart), Plot=True)
print("[Saving Grid]")
TempSimGrid.SaveGrid(file="{}/TrainReas_{}.txt".format(LocFile, indTrain+PointStart))
print("[Train]")
TempSimGrid.Train_4ConvNets_BN_3D(Epochs=Nepoch, Num_samples=Nsamples, LocModel=LocModel, LR=LearningRate)
print("--%s seconds of whole training process-" % (np.around((time.time() - start_time_1), decimals=2)))
gc.collect()
print(" ")
print("--%s minutes of ALL training-" % ((time.time() - start_time_AllTrain)/60))
| 53.879121
| 343
| 0.713237
| 743
| 4,903
| 4.585464
| 0.312248
| 0.16789
| 0.017611
| 0.056355
| 0.377458
| 0.356325
| 0.335192
| 0.335192
| 0.335192
| 0.267684
| 0
| 0.048894
| 0.124006
| 4,903
| 91
| 344
| 53.879121
| 0.744354
| 0.373241
| 0
| 0.172414
| 0
| 0.034483
| 0.151505
| 0.088577
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.103448
| 0
| 0.103448
| 0.155172
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c21319778186a2abea07c3db5dcc502d14e209f
| 1,306
|
py
|
Python
|
feature_flags_project/feature_flags/providers.py
|
steuke/django_feature_flags_example
|
00e33378999d6d567c37593c17289405fc7b5847
|
[
"MIT"
] | null | null | null |
feature_flags_project/feature_flags/providers.py
|
steuke/django_feature_flags_example
|
00e33378999d6d567c37593c17289405fc7b5847
|
[
"MIT"
] | 3
|
2021-09-22T18:56:38.000Z
|
2021-11-29T16:11:59.000Z
|
feature_flags_project/feature_flags/providers.py
|
steuke/django_feature_flags_example
|
00e33378999d6d567c37593c17289405fc7b5847
|
[
"MIT"
] | null | null | null |
import logging
from typing import Dict
from django.http import HttpRequest
logger = logging.getLogger(__name__)
class FeatureFlagProvider:
def is_feature_enabled(self, feature_name: str, user_id: str = None, attributes: Dict = None):
raise NotImplementedError("You must override FeatureFlagProvider.is_feature_enabled()")
def _attributes_from_request(request: HttpRequest) -> Dict:
if not request:
return dict()
attributes = dict()
try:
attributes["is_staff"] = request.user.is_staff
return attributes
except Exception:
logger.exception(
"Unexpected exception while trying to parse http-request for feature-attributes."
)
return dict()
def is_feature_enabled(feature_name: str, request: HttpRequest) -> bool:
from django.conf import settings
is_enabled = False
attributes = _attributes_from_request(request)
try:
is_enabled = settings.FEATURE_FLAG_PROVIDER.is_feature_enabled(
feature_name=feature_name, user_id="dontcare", attributes=attributes
)
logger.info(f"Feature '{feature_name}' is enabled={is_enabled}")
except Exception:
logger.exception(f"Exception while trying to check feature-flag state for '{feature_name}'")
return is_enabled
| 32.65
| 100
| 0.712098
| 153
| 1,306
| 5.856209
| 0.346405
| 0.073661
| 0.071429
| 0.042411
| 0.060268
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.20827
| 1,306
| 39
| 101
| 33.487179
| 0.866538
| 0
| 0
| 0.193548
| 0
| 0
| 0.20827
| 0.030628
| 0
| 0
| 0
| 0
| 0
| 1
| 0.096774
| false
| 0
| 0.129032
| 0
| 0.387097
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c23d8601d0a15002cc4ed3c1cea741aa47089e1
| 34,227
|
py
|
Python
|
src/plottoolbox/functions/kde.py
|
timcera/plottoolbox
|
b5f4b634d366eb5ba244e2f1fd33a7ef0eba7298
|
[
"BSD-3-Clause"
] | null | null | null |
src/plottoolbox/functions/kde.py
|
timcera/plottoolbox
|
b5f4b634d366eb5ba244e2f1fd33a7ef0eba7298
|
[
"BSD-3-Clause"
] | 6
|
2021-09-06T21:26:12.000Z
|
2022-03-30T11:55:56.000Z
|
src/plottoolbox/functions/kde.py
|
timcera/plottoolbox
|
b5f4b634d366eb5ba244e2f1fd33a7ef0eba7298
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Collection of functions for the manipulation of time series."""
from __future__ import absolute_import, division, print_function
import itertools
import os
import warnings
import mando
import numpy as np
import pandas as pd
from mando.rst_text_formatter import RSTHelpFormatter
from tstoolbox import tsutils
from .. import plotutils
warnings.filterwarnings("ignore")
@mando.command("kde", formatter_class=RSTHelpFormatter, doctype="numpy")
@tsutils.doc(plotutils.ldocstrings)
def kde_cli(
input_ts="-",
columns=None,
start_date=None,
end_date=None,
clean=False,
skiprows=None,
index_type="datetime",
names=None,
ofilename="plot.png",
xtitle="",
ytitle="",
title="",
figsize="10,6.0",
legend=None,
legend_names=None,
subplots=False,
sharex=True,
sharey=False,
colors="auto",
linestyles="auto",
markerstyles=" ",
bar_hatchstyles="auto",
style="auto",
logx=False,
logy=False,
xaxis="arithmetic",
yaxis="arithmetic",
xlim=None,
ylim=None,
secondary_y=False,
mark_right=True,
scatter_matrix_diagonal="kde",
bootstrap_size=50,
bootstrap_samples=500,
norm_xaxis=False,
norm_yaxis=False,
lognorm_xaxis=False,
lognorm_yaxis=False,
xy_match_line="",
grid=False,
label_rotation=None,
label_skip=1,
force_freq=None,
drawstyle="default",
por=False,
invert_xaxis=False,
invert_yaxis=False,
round_index=None,
plotting_position="weibull",
prob_plot_sort_values="descending",
source_units=None,
target_units=None,
lag_plot_lag=1,
plot_styles="bright",
hlines_y=None,
hlines_xmin=None,
hlines_xmax=None,
hlines_colors=None,
hlines_linestyles="-",
vlines_x=None,
vlines_ymin=None,
vlines_ymax=None,
vlines_colors=None,
vlines_linestyles="-",
):
r"""Kernel density estimation of probability density function.
"kde" will create a plot of estimation of the probability density function
based on the data called kernel density estimation (KDE).
{ydata}
Parameters
----------
{input_ts}
ofilename : str
[optional, defaults to 'plot.png']
Output filename for the plot. Extension defines
the type, for example 'filename.png' will create a PNG file.
If used within Python, and `ofilename` is None will return the
Matplotlib figure that can then be changed or added to as
needed.
lag_plot_lag
[optional, default to 1]
The lag used if ``type`` "lag_plot" is chosen.
xtitle : str
[optional, default depends on ``type``]
Title of x-axis.
ytitle : str
[optional, default depends on ``type``]
Title of y-axis.
title : str
[optional, defaults to '']
Title of chart.
figsize : str
[optional, defaults to '10,6.5']
The 'width,height' of plot in inches.
legend
[optional, defaults to True]
Whether to display the legend.
legend_names : str
[optional, defaults to None]
Legend would normally use the time-series names associated with
the input data. The 'legend_names' option allows you to
override the names in the data set. You must supply a comma
separated list of strings for each time-series in the data set.
subplots
[optional, defaults to False]
Make separate subplots for each time series.
sharex
[optional, default to True]
In case subplots=True, share x axis.
sharey
[optional, default to False]
In case subplots=True, share y axis.
colors
[optional, default is 'auto']
The default 'auto' will cycle through matplotlib colors in the chosen
style.
At the command line supply a comma separated matplotlib
color codes, or within Python a list of color code strings.
Can identify colors in four different ways.
1. Use 'CN' where N is a number from 0 to 9 that gets the Nth color
from the current style.
2. Single character code from the table below.
+------+---------+
| Code | Color |
+======+=========+
| b | blue |
+------+---------+
| g | green |
+------+---------+
| r | red |
+------+---------+
| c | cyan |
+------+---------+
| m | magenta |
+------+---------+
| y | yellow |
+------+---------+
| k | black |
+------+---------+
3. Number between 0 and 1 that represents the level of gray, where 0 is
white an 1 is black.
4. Any of the HTML color names.
+------------------+
| HTML Color Names |
+==================+
| red |
+------------------+
| burlywood |
+------------------+
| chartreuse |
+------------------+
| ...etc. |
+------------------+
Color reference:
http://matplotlib.org/api/colors_api.html
linestyles
[optional, default to 'auto']
If 'auto' will iterate through the available matplotlib line types.
Otherwise on the command line a comma separated list, or a list of
strings if using the Python API.
To not display lines use a space (' ') as the linestyle code.
Separated 'colors', 'linestyles', and 'markerstyles' instead of using
the 'style' keyword.
+---------+--------------+
| Code | Lines |
+=========+==============+
| ``-`` | solid |
+---------+--------------+
| -- | dashed |
+---------+--------------+
| -. | dash_dot |
+---------+--------------+
| : | dotted |
+---------+--------------+
| None | draw nothing |
+---------+--------------+
| ' ' | draw nothing |
+---------+--------------+
| '' | draw nothing |
+---------+--------------+
Line reference:
http://matplotlib.org/api/artist_api.html
markerstyles
[optional, default to ' ']
The default ' ' will not plot a marker. If 'auto' will iterate through
the available matplotlib marker types. Otherwise on the command line
a comma separated list, or a list of strings if using the Python API.
Separated 'colors', 'linestyles', and 'markerstyles' instead of using
the 'style' keyword.
+-------+----------------+
| Code | Markers |
+=======+================+
| . | point |
+-------+----------------+
| o | circle |
+-------+----------------+
| v | triangle down |
+-------+----------------+
| ^ | triangle up |
+-------+----------------+
| < | triangle left |
+-------+----------------+
| > | triangle right |
+-------+----------------+
| 1 | tri_down |
+-------+----------------+
| 2 | tri_up |
+-------+----------------+
| 3 | tri_left |
+-------+----------------+
| 4 | tri_right |
+-------+----------------+
| 8 | octagon |
+-------+----------------+
| s | square |
+-------+----------------+
| p | pentagon |
+-------+----------------+
| ``*`` | star |
+-------+----------------+
| h | hexagon1 |
+-------+----------------+
| H | hexagon2 |
+-------+----------------+
| ``+`` | plus |
+-------+----------------+
| x | x |
+-------+----------------+
| D | diamond |
+-------+----------------+
| d | thin diamond |
+-------+----------------+
| _ | hlines_y |
+-------+----------------+
| None | nothing |
+-------+----------------+
| ' ' | nothing |
+-------+----------------+
| '' | nothing |
+-------+----------------+
Marker reference:
http://matplotlib.org/api/markers_api.html
style
[optional, default is None]
Still available, but if None is replaced by 'colors', 'linestyles', and
'markerstyles' options. Currently the 'style' option will override the
others.
Comma separated matplotlib style strings per time-series. Just
combine codes in 'ColorMarkerLine' order, for example 'r*--' is
a red dashed line with star marker.
bar_hatchstyles
[optional, default to "auto", only used if type equal to "bar", "barh",
"bar_stacked", and "barh_stacked"]
If 'auto' will iterate through the available matplotlib hatch types.
Otherwise on the command line a comma separated list, or a list of
strings if using the Python API.
+-----------------+-------------------+
| bar_hatchstyles | Description |
+=================+===================+
| / | diagonal hatching |
+-----------------+-------------------+
| ``\`` | back diagonal |
+-----------------+-------------------+
| ``|`` | vertical |
+-----------------+-------------------+
| - | horizontal |
+-----------------+-------------------+
| + | crossed |
+-----------------+-------------------+
| x | crossed diagonal |
+-----------------+-------------------+
| o | small circle |
+-----------------+-------------------+
| O | large circle |
+-----------------+-------------------+
| . | dots |
+-----------------+-------------------+
| * | stars |
+-----------------+-------------------+
logx
DEPRECATED: use '--xaxis="log"' instead.
logy
DEPRECATED: use '--yaxis="log"' instead.
xlim
[optional, default is based on range of x values]
Comma separated lower and upper limits for the x-axis of the
plot. For example, '--xlim 1,1000' would limit the plot from
1 to 1000, where '--xlim ,1000' would base the lower limit on
the data and set the upper limit to 1000.
ylim
[optional, default is based on range of y values]
Comma separated lower and upper limits for the y-axis of the
plot. See `xlim` for examples.
xaxis : str
[optional, default is 'arithmetic']
Defines the type of the xaxis. One of 'arithmetic', 'log'.
yaxis : str
[optional, default is 'arithmetic']
Defines the type of the yaxis. One of 'arithmetic', 'log'.
secondary_y
[optional, default is False]
Whether to plot on the secondary y-axis. If a list/tuple, which
time-series to plot on secondary y-axis.
mark_right
[optional, default is True]
When using a secondary_y axis, should the legend label the axis of the
various time-series automatically.
scatter_matrix_diagonal : str
[optional, defaults to 'kde']
If plot type is 'scatter_matrix', this specifies the plot along the
diagonal. One of 'kde' for Kernel Density Estimation or 'hist'
for a histogram.
bootstrap_size : int
[optional, defaults to 50]
The size of the random subset for 'bootstrap' plot.
bootstrap_samples
[optional, defaults to 500]
The number of random subsets of 'bootstrap_size'.
norm_xaxis
DEPRECATED: use '--type="norm_xaxis"' instead.
norm_yaxis
DEPRECATED: use '--type="norm_yaxis"' instead.
lognorm_xaxis
DEPRECATED: use '--type="lognorm_xaxis"' instead.
lognorm_yaxis
DEPRECATED: use '--type="lognorm_yaxis"' instead.
xy_match_line : str
[optional, defaults is '']
Will add a match line where x == y. Set to a line style code.
grid
[optional, default is False]
Whether to plot grid lines on the major ticks.
label_rotation : int
[optional]
Rotation for major labels for bar plots.
label_skip : int
[optional]
Skip for major labels for bar plots.
drawstyle : str
[optional, default is 'default']
'default' connects the points with lines. The
steps variants produce step-plots. 'steps' is equivalent to 'steps-pre'
and is maintained for backward-compatibility.
ACCEPTS::
['default' | 'steps' | 'steps-pre' | 'steps-mid' | 'steps-post']
por
[optional]
Plot from first good value to last good value. Strips NANs
from beginning and end.
{force_freq}
invert_xaxis
[optional, default is False]
Invert the x-axis.
invert_yaxis
[optional, default is False]
Invert the y-axis.
plotting_position : str
[optional, default is 'weibull']
{plotting_position_table}
Only used for norm_xaxis, norm_yaxis, lognorm_xaxis,
lognorm_yaxis, weibull_xaxis, and weibull_yaxis.
prob_plot_sort_values : str
[optional, default is 'descending']
How to sort the values for the probability plots.
Only used for norm_xaxis, norm_yaxis, lognorm_xaxis,
lognorm_yaxis, weibull_xaxis, and weibull_yaxis.
{columns}
{start_date}
{end_date}
{clean}
{skiprows}
{index_type}
{names}
{source_units}
{target_units}
{round_index}
plot_styles: str
[optional, default is "default"]
Set the style of the plot. One or more of Matplotlib styles "classic",
"Solarize_Light2", "bmh", "dark_background", "fast", "fivethirtyeight",
"ggplot", "grayscale", "seaborn", "seaborn-bright",
"seaborn-colorblind", "seaborn-dark", "seaborn-dark-palette",
"seaborn-darkgrid", "seaborn-deep", "seaborn-muted",
"seaborn-notebook", "seaborn-paper", "seaborn-pastel",
"seaborn-poster", "seaborn-talk", "seaborn-ticks", "seaborn-white",
"seaborn-whitegrid", "tableau-colorblind10", and
SciencePlots styles "science", "grid", "ieee", "scatter", "notebook",
"high-vis", "bright", "vibrant", "muted", and "retro".
If multiple styles then each over rides some or all of the
characteristics of the previous.
Color Blind Appropriate Styles
The styles "seaborn-colorblind", "tableau-colorblind10", "bright",
"vibrant", and "muted" are all styles that are setup to be able to be
distinguished by someone with color blindness.
Black, White, and Gray Styles
The "ieee" style is appropriate for black, white, and gray, however the
"ieee" also will change the chart size to fit in a column of the "IEEE"
journal.
The "grayscale" is another style useful for photo-copyable black,
white, nd gray.
Matplotlib styles:
https://matplotlib.org/3.3.1/gallery/style_sheets/style_sheets_reference.html
SciencePlots styles:
https://github.com/garrettj403/SciencePlots
hlines_y:
[optional, defaults to None]
Number or list of y values where to place a horizontal line.
hlines_xmin:
[optional, defaults to None]
List of minimum x values to start the horizontal line. If a list must
be same length as `hlines_y`. If a single number will be used as the
minimum x values for all horizontal lines. A missing value or None
will start at the minimum x value for the entire plot.
hlines_xmax:
[optional, defaults to None]
List of maximum x values to end each horizontal line. If a list must
be same length as `hlines_y`. If a single number will be the maximum
x value for all horizontal lines. A missing value or None will end at
the maximum x value for the entire plot.
hlines_colors:
[optional, defaults to None]
List of colors for the horizontal lines. If a single color then will
be used as the color for all horizontal lines. If a list must be same
length as `hlines_y`. If None will take from the color pallette in the
current plot style.
hlines_linestyles:
[optional, defaults to None]
List of linestyles for the horizontal lines. If a single linestyle
then will be used as the linestyle for all horizontal lines. If a list
must be same length as `hlines_y`. If None will take for the standard
linestyles list.
vlines_x:
[optional, defaults to None]
List of x values where to place a vertical line.
vlines_ymin:
[optional, defaults to None]
List of minimum y values to start the vertical line. If a list must be
same length as `vlines_x`. If a single number will be used as the
minimum x values for all vertical lines. A missing value or None will
start at the minimum x value for the entire plot.
vlines_ymax:
[optional, defaults to None]
List of maximum x values to end each vertical line. If a list must be
same length as `vlines_x`. If a single number will be the maximum
x value for all vertical lines. A missing value or None will end at
the maximum x value for the entire plot.
vlines_colors:
[optional, defaults to None]
List of colors for the vertical lines. If a single color then will be
used as the color for all vertical lines. If a list must be same
length as `vlines_x`. If None will take from the color pallette in the
current plot style.
vlines_linestyles:
[optional, defaults to None]
List of linestyles for the vertical lines. If a single linestyle then
will be used as the linestyle for all vertical lines. If a list must
be same length as `vlines_x`. If None will take for the standard
linestyles list.
"""
plt = kde(
input_ts=input_ts,
columns=columns,
start_date=start_date,
end_date=end_date,
clean=clean,
skiprows=skiprows,
index_type=index_type,
names=names,
ofilename=ofilename,
xtitle=xtitle,
ytitle=ytitle,
title=title,
figsize=figsize,
legend=legend,
legend_names=legend_names,
subplots=subplots,
sharex=sharex,
sharey=sharey,
colors=colors,
linestyles=linestyles,
markerstyles=markerstyles,
bar_hatchstyles=bar_hatchstyles,
style=style,
logx=logx,
logy=logy,
xaxis=xaxis,
yaxis=yaxis,
xlim=xlim,
ylim=ylim,
secondary_y=secondary_y,
mark_right=mark_right,
scatter_matrix_diagonal=scatter_matrix_diagonal,
bootstrap_size=bootstrap_size,
bootstrap_samples=bootstrap_samples,
norm_xaxis=norm_xaxis,
norm_yaxis=norm_yaxis,
lognorm_xaxis=lognorm_xaxis,
lognorm_yaxis=lognorm_yaxis,
xy_match_line=xy_match_line,
grid=grid,
label_rotation=label_rotation,
label_skip=label_skip,
force_freq=force_freq,
drawstyle=drawstyle,
por=por,
invert_xaxis=invert_xaxis,
invert_yaxis=invert_yaxis,
round_index=round_index,
plotting_position=plotting_position,
prob_plot_sort_values=prob_plot_sort_values,
source_units=source_units,
target_units=target_units,
lag_plot_lag=lag_plot_lag,
plot_styles=plot_styles,
hlines_y=hlines_y,
hlines_xmin=hlines_xmin,
hlines_xmax=hlines_xmax,
hlines_colors=hlines_colors,
hlines_linestyles=hlines_linestyles,
vlines_x=vlines_x,
vlines_ymin=vlines_ymin,
vlines_ymax=vlines_ymax,
vlines_colors=vlines_colors,
vlines_linestyles=vlines_linestyles,
)
# @tsutils.validator(
# ofilename=[str, ["pass", []], 1],
# type=[str, ["domain", ["kde",],], 1,],
# lag_plot_lag=[int, ["range", [1, None]], 1],
# xtitle=[str, ["pass", []], 1],
# ytitle=[str, ["pass", []], 1],
# title=[str, ["pass", []], 1],
# figsize=[float, ["range", [0, None]], 2],
# legend=[bool, ["domain", [True, False]], 1],
# legend_names=[str, ["pass", []], 1],
# subplots=[bool, ["domain", [True, False]], 1],
# sharex=[bool, ["domain", [True, False]], 1],
# sharey=[bool, ["domain", [True, False]], 1],
# colors=[str, ["pass", []], None],
# linestyles=[str, ["domain", ["auto", None, "", " ", " "] + plotutils.LINE_LIST], None],
# markerstyles=[str, ["domain", ["auto", None, "", " ", " "] + plotutils.MARKER_LIST], None],
# bar_hatchstyles=[str, ["domain", ["auto", None, "", " ", " "] + plotutils.HATCH_LIST], None],
# style=[str, ["pass", []], None],
# xlim=[float, ["pass", []], 2],
# ylim=[float, ["pass", []], 2],
# xaxis=[str, ["domain", ["arithmetic", "log"]], 1],
# yaxis=[str, ["domain", ["arithmetic", "log"]], 1],
# secondary_y=[bool, ["domain", [True, False]], 1],
# mark_right=[bool, ["domain", [True, False]], 1],
# scatter_matrix_diagonal=[str, ["domain", ["kde", "hist"]], 1],
# bootstrap_size=[int, ["range", [0, None]], 1],
# xy_match_line=[str, ["pass", []], 1],
# grid=[bool, ["domain", [True, False]], 1],
# label_rotation=[float, ["pass", []], 1],
# label_skip=[int, ["range", [1, None]], 1],
# drawstyle=[str, ["pass", []], 1],
# por=[bool, ["domain", [True, False]], 1],
# invert_xaxis=[bool, ["domain", [True, False]], 1],
# invert_yaxis=[bool, ["domain", [True, False]], 1],
# plotting_position=[
# str,
# [
# "domain",
# ["weibull", "benard", "tukey", "gumbel", "hazen", "cunnane", "california"],
# ],
# 1,
# ],
# prob_plot_sort_values=[str, ["domain", ["ascending", "descending"]], 1],
# plot_styles=[
# str,
# [
# "domain",
# [
# "classic",
# "Solarize_Light2",
# "bmh",
# "dark_background",
# "fast",
# "fivethirtyeight",
# "ggplot",
# "grayscale",
# "seaborn",
# "seaborn-bright",
# "seaborn-colorblind",
# "seaborn-dark",
# "seaborn-dark-palette",
# "seaborn-darkgrid",
# "seaborn-deep",
# "seaborn-muted",
# "seaborn-notebook",
# "seaborn-paper",
# "seaborn-pastel",
# "seaborn-poster",
# "seaborn-talk",
# "seaborn-ticks",
# "seaborn-white",
# "seaborn-whitegrid",
# "tableau-colorblind10",
# "science",
# "grid",
# "ieee",
# "scatter",
# "notebook",
# "high-vis",
# "bright",
# "vibrant",
# "muted",
# "retro",
# ],
# ],
# None,
# ],
# hlines_y=[float, ["pass", []], None],
# hlines_xmin=[float, ["pass", []], None],
# hlines_xmax=[float, ["pass", []], None],
# hlines_colors=[str, ["pass", []], None],
# hlines_linestyles=[
# str,
# ["domain", ["auto", None, "", " ", " "] + plotutils.LINE_LIST],
# None,
# ],
# vlines_x=[float, ["pass", []], None],
# vlines_ymin=[float, ["pass", []], None],
# vlines_ymax=[float, ["pass", []], None],
# vlines_colors=[str, ["pass", []], None],
# vlines_linestyles=[
# str,
# ["domain", ["auto", None, "", " ", " "] + plotutils.LINE_LIST],
# None,
# ],
# )
def kde(
input_ts="-",
columns=None,
start_date=None,
end_date=None,
clean=False,
skiprows=None,
index_type="datetime",
names=None,
ofilename="plot.png",
xtitle="",
ytitle="",
title="",
figsize="10,6.0",
legend=None,
legend_names=None,
subplots=False,
sharex=True,
sharey=False,
colors="auto",
linestyles="auto",
markerstyles=" ",
bar_hatchstyles="auto",
style="auto",
logx=False,
logy=False,
xaxis="arithmetic",
yaxis="arithmetic",
xlim=None,
ylim=None,
secondary_y=False,
mark_right=True,
scatter_matrix_diagonal="kde",
bootstrap_size=50,
bootstrap_samples=500,
norm_xaxis=False,
norm_yaxis=False,
lognorm_xaxis=False,
lognorm_yaxis=False,
xy_match_line="",
grid=False,
label_rotation=None,
label_skip=1,
force_freq=None,
drawstyle="default",
por=False,
invert_xaxis=False,
invert_yaxis=False,
round_index=None,
plotting_position="weibull",
prob_plot_sort_values="descending",
source_units=None,
target_units=None,
lag_plot_lag=1,
plot_styles="bright",
hlines_y=None,
hlines_xmin=None,
hlines_xmax=None,
hlines_colors=None,
hlines_linestyles="-",
vlines_x=None,
vlines_ymin=None,
vlines_ymax=None,
vlines_colors=None,
vlines_linestyles="-",
**kwds,
):
r"""Plot data."""
# Need to work around some old option defaults with the implementation of
# mando
legend = bool(legend == "" or legend == "True" or legend is None)
type = "kde"
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from matplotlib.ticker import FixedLocator
tsd = tsutils.common_kwds(
input_ts,
skiprows=skiprows,
names=names,
index_type=index_type,
start_date=start_date,
end_date=end_date,
pick=columns,
round_index=round_index,
dropna="all",
source_units=source_units,
target_units=target_units,
clean=clean,
por=por,
)
tsd, lnames = plotutils.check(type, tsd, legend_names)
# This is to help pretty print the frequency
try:
try:
pltfreq = str(tsd.index.freq, "utf-8").lower()
except TypeError:
pltfreq = str(tsd.index.freq).lower()
if pltfreq.split(" ")[0][1:] == "1":
beginstr = 3
else:
beginstr = 1
if pltfreq == "none":
short_freq = ""
else:
# short freq string (day) OR (2 day)
short_freq = "({})".format(pltfreq[beginstr:-1])
except AttributeError:
short_freq = ""
if colors == "auto":
colors = None
else:
colors = tsutils.make_list(colors)
if linestyles == "auto":
linestyles = plotutils.LINE_LIST
else:
linestyles = tsutils.make_list(linestyles)
if bar_hatchstyles == "auto":
bar_hatchstyles = plotutils.HATCH_LIST
else:
bar_hatchstyles = tsutils.make_list(bar_hatchstyles)
if markerstyles == "auto":
markerstyles = plotutils.MARKER_LIST
else:
markerstyles = tsutils.make_list(markerstyles)
if markerstyles is None:
markerstyles = " "
if style != "auto":
nstyle = tsutils.make_list(style)
if len(nstyle) != len(tsd.columns):
raise ValueError(
tsutils.error_wrapper(
"""
You have to have the same number of style strings as time-series to plot.
You supplied '{}' for style which has {} style strings,
but you have {} time-series.
""".format(
style, len(nstyle), len(tsd.columns)
)
)
)
colors = []
markerstyles = []
linestyles = []
for st in nstyle:
colors.append(st[0])
if len(st) == 1:
markerstyles.append(" ")
linestyles.append("-")
continue
if st[1] in plotutils.MARKER_LIST:
markerstyles.append(st[1])
try:
linestyles.append(st[2:])
except IndexError:
linestyles.append(" ")
else:
markerstyles.append(" ")
linestyles.append(st[1:])
if linestyles is None:
linestyles = [" "]
else:
linestyles = [" " if i in [" ", None] else i for i in linestyles]
markerstyles = [" " if i is None else i for i in markerstyles]
if colors is not None:
icolors = itertools.cycle(colors)
else:
icolors = None
imarkerstyles = itertools.cycle(markerstyles)
ilinestyles = itertools.cycle(linestyles)
# Only for bar, barh, bar_stacked, and barh_stacked.
ibar_hatchstyles = itertools.cycle(bar_hatchstyles)
if (
logx is True
or logy is True
or norm_xaxis is True
or norm_yaxis is True
or lognorm_xaxis is True
or lognorm_yaxis is True
):
warnings.warn(
"""
*
* The --logx, --logy, --norm_xaxis, --norm_yaxis, --lognorm_xaxis, and
* --lognorm_yaxis options are deprecated.
*
* For --logx use --xaxis="log"
* For --logy use --yaxis="log"
* For --norm_xaxis use --type="norm_xaxis"
* For --norm_yaxis use --type="norm_yaxis"
* For --lognorm_xaxis use --type="lognorm_xaxis"
* For --lognorm_yaxis use --type="lognorm_yaxis"
*
"""
)
if xaxis == "log":
logx = True
if yaxis == "log":
logy = True
xlim = plotutils.know_your_limits(xlim, axis=xaxis)
ylim = plotutils.know_your_limits(ylim, axis=yaxis)
plot_styles = tsutils.make_list(plot_styles) + ["no-latex"]
style_loc = os.path.join(
os.path.dirname(__file__), os.pardir, "SciencePlots_styles"
)
plot_styles = [
os.path.join(style_loc, i + ".mplstyle")
if os.path.exists(os.path.join(style_loc, i + ".mplstyle"))
else i
for i in plot_styles
]
plt.style.use(plot_styles)
figsize = tsutils.make_list(figsize, n=2)
_, ax = plt.subplots(figsize=figsize)
if type in ["kde", "probability_density"]:
ax = tsd.plot.kde(
legend=legend,
subplots=subplots,
sharex=sharex,
sharey=sharey,
style=None,
logx=logx,
logy=logy,
xlim=xlim,
ylim=ylim,
secondary_y=secondary_y,
figsize=figsize,
)
for index, line in enumerate(ax.lines):
if icolors is not None:
c = next(icolors)
else:
c = None
if imarkerstyles is not None:
m = next(imarkerstyles)
else:
m = None
if ilinestyles is not None:
l = next(ilinestyles)
else:
l = None
if c is not None:
plt.setp(line, color=c)
plt.setp(line, marker=m)
plt.setp(line, linestyle=l)
ytitle = ytitle or "Density"
if legend is True:
plt.legend(loc="best")
if hlines_y is not None:
hlines_y = tsutils.make_list(hlines_y)
hlines_xmin = tsutils.make_list(hlines_xmin)
hlines_xmax = tsutils.make_list(hlines_xmax)
hlines_colors = tsutils.make_list(hlines_colors)
hlines_linestyles = tsutils.make_list(hlines_linestyles)
nxlim = ax.get_xlim()
if hlines_xmin is None:
hlines_xmin = nxlim[0]
if hlines_xmax is None:
hlines_xmax = nxlim[1]
if vlines_x is not None:
vlines_x = tsutils.make_list(vlines_x)
vlines_ymin = tsutils.make_list(vlines_ymin)
vlines_ymax = tsutils.make_list(vlines_ymax)
vlines_colors = tsutils.make_list(vlines_colors)
vlines_linestyles = tsutils.make_list(vlines_linestyles)
nylim = ax.get_ylim()
if vlines_ymin is None:
vlines_ymin = nylim[0]
if vlines_ymax is None:
vlines_ymax = nylim[1]
if type in [
"time",
"xy",
"bar",
"bar_stacked",
"histogram",
"norm_xaxis",
"lognorm_xaxis",
"weibull_xaxis",
"norm_yaxis",
"lognorm_yaxis",
"weibull_yaxis",
]:
if hlines_y is not None:
if type in ["norm_yaxis", "lognorm_yaxis", "weibull_yaxis"]:
hlines_y = ppf(tsutils.make_list(hlines_y))
plt.hlines(
hlines_y,
hlines_xmin,
hlines_xmax,
colors=hlines_colors,
linestyles=hlines_linestyles,
)
if vlines_x is not None:
if type in ["norm_xaxis", "lognorm_xaxis", "weibull_xaxis"]:
vlines_x = ppf(tsutils.make_list(vlines_x))
plt.vlines(
vlines_x,
vlines_ymin,
vlines_ymax,
colors=vlines_colors,
linestyles=vlines_linestyles,
)
plt.xlabel(xtitle)
plt.ylabel(ytitle)
if invert_xaxis is True:
plt.gca().invert_xaxis()
if invert_yaxis is True:
plt.gca().invert_yaxis()
plt.grid(grid)
plt.title(title)
plt.tight_layout()
if ofilename is not None:
plt.savefig(ofilename)
return plt
kde.__doc__ = kde_cli.__doc__
| 29.918706
| 100
| 0.530984
| 3,699
| 34,227
| 4.785347
| 0.148418
| 0.01949
| 0.019321
| 0.013672
| 0.402011
| 0.351393
| 0.322581
| 0.298853
| 0.274109
| 0.257726
| 0
| 0.005824
| 0.322815
| 34,227
| 1,143
| 101
| 29.944882
| 0.757874
| 0.563327
| 0
| 0.440909
| 0
| 0
| 0.040892
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.004545
| false
| 0
| 0.029545
| 0
| 0.036364
| 0.002273
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c241e9ea6651f1832b530bacf0b946a3f610e8c
| 2,255
|
py
|
Python
|
src/models/GNN.py
|
3verlyn/DL-abstract-argumentation
|
885e442077f5f8e576092c6648077e00ceb79dff
|
[
"MIT"
] | 6
|
2020-05-01T10:04:16.000Z
|
2021-12-12T06:35:00.000Z
|
src/models/GNN.py
|
3verlyn/DL-abstract-argumentation
|
885e442077f5f8e576092c6648077e00ceb79dff
|
[
"MIT"
] | 3
|
2020-05-01T09:58:16.000Z
|
2021-12-05T09:24:42.000Z
|
src/models/GNN.py
|
3verlyn/DL-abstract-argumentation
|
885e442077f5f8e576092c6648077e00ceb79dff
|
[
"MIT"
] | 3
|
2021-12-01T12:09:40.000Z
|
2022-03-08T07:35:10.000Z
|
from collections import OrderedDict
import torch
import torch.nn as nn
from torch_geometric.data.batch import Batch
class GNN(nn.Module):
def __init__(self, mp_steps, **config):
super().__init__()
self.mp_steps = mp_steps
self.update_fns = self.assign_update_fns()
self.readout_fns = self.assign_readout_fns()
def assign_update_fns(self) -> OrderedDict:
raise NotImplementedError
def assign_readout_fns(self) -> dict:
raise NotImplementedError
def forward(self, batch: Batch, output_all_steps=True):
edge_index = batch.edge_index
sections = (
torch.bincount(batch.batch).tolist() if hasattr(batch, "batch") else None
)
hiddens = self.initialize(batch)
del batch
# update attributes with update and aggregation step
outputs = {element: [] for element in self.readout_fns.keys()}
for step in range(self.mp_steps):
hiddens = self.step(edge_index=edge_index, sections=sections, **hiddens)
if not output_all_steps and (step + 1) != self.mp_steps:
continue
for element, readout_fn in self.readout_fns.items():
outputs[element].append(readout_fn(**hiddens))
return outputs
def initialize(self, batch):
hiddens = {}
# initialize attributes trough embeddings and intialize lstm states to None
for element in self.embeddings.keys():
embedding = self.embeddings[element](batch[f"{element}_input"])
hiddens.update(
{
f"{element}_input": embedding,
f"{element}_embedding": embedding.clone(),
f"{element}_lstm": None,
}
)
return hiddens
def step(self, edge_index, sections, **hiddens):
"""
Perform a message passing step by propagating information and updating each element
"""
for element, update_fn in self.update_fns.items():
hiddens[f"{element}_embedding"], hiddens[f"{element}_lstm"] = update_fn(
edge_index=edge_index, sections=sections, element=element, **hiddens
)
return hiddens
| 32.214286
| 91
| 0.613747
| 254
| 2,255
| 5.259843
| 0.314961
| 0.047156
| 0.032934
| 0.022455
| 0.050898
| 0.050898
| 0
| 0
| 0
| 0
| 0
| 0.000627
| 0.292683
| 2,255
| 69
| 92
| 32.681159
| 0.836991
| 0.092683
| 0
| 0.085106
| 0
| 0
| 0.049975
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.12766
| false
| 0
| 0.085106
| 0
| 0.297872
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c247e4df77036ee1f8b8a7c4396fc03bed606ad
| 977
|
py
|
Python
|
configs/baselines/DACN/GNN/GCN_res_layer.py
|
vivek-r-2000/BoundaryNet
|
fce8d51a516646c1001116d03872dbba9e4c5082
|
[
"MIT"
] | 17
|
2021-06-07T12:30:23.000Z
|
2022-03-07T06:32:25.000Z
|
configs/baselines/DACN/GNN/GCN_res_layer.py
|
vivek-r-2000/BoundaryNet
|
fce8d51a516646c1001116d03872dbba9e4c5082
|
[
"MIT"
] | 2
|
2021-07-13T13:24:14.000Z
|
2022-03-08T07:21:09.000Z
|
configs/baselines/DACN/GNN/GCN_res_layer.py
|
vivek-r-2000/BoundaryNet
|
fce8d51a516646c1001116d03872dbba9e4c5082
|
[
"MIT"
] | 4
|
2021-06-26T15:12:44.000Z
|
2021-11-08T16:36:52.000Z
|
import math
import torch
import torch.nn as nn
from torch.nn.modules.module import Module
from GNN.GCN_layer import GraphConvolution
class GraphResConvolution(Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, state_dim, name=''):
super(GraphResConvolution, self).__init__()
self.state_dim = state_dim
self.gcn_1 = GraphConvolution(state_dim, '%s_1' % name)
self.gcn_2 = GraphConvolution(state_dim, '%s_2' % name)
self.relu1 = nn.ReLU()
self.relu2 = nn.ReLU()
self.name = name
def forward(self, input, adj):
output_1 = self.gcn_1(input, adj)
output_1_relu = self.relu1(output_1)
output_2 = self.gcn_2(output_1_relu, adj)
output_2_res = output_2 + input
output = self.relu2(output_2_res)
return output
def __repr__(self):
return self.__class__.__name__ + ' (' + self.name + ')'
| 23.829268
| 65
| 0.63869
| 131
| 977
| 4.427481
| 0.328244
| 0.068966
| 0.044828
| 0.055172
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.036885
| 0.250768
| 977
| 41
| 66
| 23.829268
| 0.755464
| 0.062436
| 0
| 0
| 0
| 0
| 0.012209
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.130435
| false
| 0
| 0.217391
| 0.043478
| 0.478261
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c24dd7d64e797088cd127f5acf19696ee37ca0f
| 28,569
|
py
|
Python
|
mtools/util/logfile.py
|
lukasvosyka/mtools
|
b94620cef48a9eb71b6a7fa93ad88f70cd36982f
|
[
"Apache-2.0"
] | null | null | null |
mtools/util/logfile.py
|
lukasvosyka/mtools
|
b94620cef48a9eb71b6a7fa93ad88f70cd36982f
|
[
"Apache-2.0"
] | null | null | null |
mtools/util/logfile.py
|
lukasvosyka/mtools
|
b94620cef48a9eb71b6a7fa93ad88f70cd36982f
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
from __future__ import print_function
import os
import re
import sys
from datetime import datetime
from math import ceil
from mtools.util.input_source import InputSource
from mtools.util.logevent import LogEvent
class LogFile(InputSource):
"""Log file wrapper class. Handles open file streams or stdin."""
def __init__(self, filehandle):
"""Provide logfile as open file stream or stdin."""
self.filehandle = filehandle
self.name = filehandle.name
self.from_stdin = filehandle.name == "<stdin>"
self._bounds_calculated = False
self._start = None
self._end = None
self._filesize = None
self._num_lines = None
self._restarts = None
self._binary = None
self._timezone = None
self._hostname = None
self._port = None
self._rs_state = None
self._repl_set = None
self._repl_set_members = None
self._repl_set_version = None
self._repl_set_protocol = None
self._storage_engine = None
self._datetime_format = None
self._year_rollover = None
self._shards = None
self._csrs = None
self._chunks_moved_from = None
self._chunks_moved_to = None
self._chunk_splits = None
# Track previous file position for loop detection in _find_curr_line()
self.prev_pos = None
self._has_level = None
# make sure bounds are calculated before starting to iterate,
# including potential year rollovers
self._calculate_bounds()
@property
def start(self):
"""
Lazy evaluation of start and end of logfile.
Returns None for stdin input currently.
"""
if not self._start:
self._calculate_bounds()
return self._start
@property
def end(self):
"""
Lazy evaluation of start and end of logfile.
Returns None for stdin input currently.
"""
if not self._end:
self._calculate_bounds()
return self._end
@property
def timezone(self):
"""Lazy evaluation of timezone of logfile."""
if not self._timezone:
self._calculate_bounds()
return self._timezone
@property
def filesize(self):
"""
Lazy evaluation of start and end of logfile.
Returns None for stdin input currently.
"""
if self.from_stdin:
return None
if not self._filesize:
self._calculate_bounds()
return self._filesize
@property
def datetime_format(self):
"""Lazy evaluation of the datetime format."""
if not self._datetime_format:
self._calculate_bounds()
return self._datetime_format
@property
def has_level(self):
"""Lazy evaluation of the whether the logfile has any level lines."""
if self._has_level is None:
self._iterate_lines()
return self._has_level
@property
def year_rollover(self):
"""Lazy evaluation of the datetime format."""
if self._year_rollover is None:
self._calculate_bounds()
return self._year_rollover
@property
def num_lines(self):
"""
Lazy evaluation of the number of lines.
Returns None for stdin input currently.
"""
if self.from_stdin:
return None
if not self._num_lines:
self._iterate_lines()
return self._num_lines
@property
def restarts(self):
"""Lazy evaluation of all restarts."""
if not self._num_lines:
self._iterate_lines()
return self._restarts
@property
def rs_state(self):
"""Lazy evaluation of all restarts."""
if not self._num_lines:
self._iterate_lines()
return self._rs_state
@property
def binary(self):
"""Lazy evaluation of the binary name."""
if not self._num_lines:
self._iterate_lines()
return self._binary
@property
def hostname(self):
"""Lazy evaluation of the binary name."""
if not self._num_lines:
self._iterate_lines()
return self._hostname
@property
def port(self):
"""Lazy evaluation of the binary name."""
if not self._num_lines:
self._iterate_lines()
return self._port
@property
def versions(self):
"""Return all version changes."""
versions = []
for v, _ in self.restarts:
if len(versions) == 0 or v != versions[-1]:
versions.append(v)
return versions
@property
def repl_set(self):
"""Return the replSet (if available)."""
if not self._num_lines:
self._iterate_lines()
return self._repl_set
@property
def repl_set_members(self):
"""Return the replSet (if available)."""
if not self._num_lines:
self._iterate_lines()
return self._repl_set_members
@property
def repl_set_version(self):
"""Return the replSet (if available)."""
if not self._num_lines:
self._iterate_lines()
return self._repl_set_version
@property
def repl_set_protocol(self):
"""Return the replSet protocolVersion (if available)."""
if not self._num_lines:
self._iterate_lines()
return self._repl_set_protocol
@property
def storage_engine(self):
"""Return storage engine if available."""
if not self._num_lines:
self._iterate_lines()
return self._storage_engine
@property
def shards(self):
"""Lazily return the shards (if available)"""
if not self._shards:
self._find_sharding_info()
return self._shards
@property
def csrs(self):
"""Lazily return the CSRS (if available)"""
if not self._csrs:
self._find_sharding_info()
return self._csrs
@property
def chunks_moved_to(self):
"""Lazily return the chunks moved to this shard (if available)"""
if not self._chunks_moved_to:
self._find_sharding_info()
return self._chunks_moved_to
@property
def chunks_moved_from(self):
"""Lazily return the chunks moved from this shard (if available)"""
if not self._chunks_moved_from:
self._find_sharding_info()
return self._chunks_moved_from
@property
def chunk_splits(self):
"""Lazily return the chunks split in this shard (if available)"""
if not self._chunk_splits:
self._find_sharding_info()
return self._chunk_splits
def next(self):
"""Get next line, adjust for year rollover and hint datetime format."""
# use readline here because next() iterator uses internal readahead
# buffer so seek position is wrong
line = self.filehandle.readline()
if isinstance(line, bytes):
line = line.decode('utf-8', 'replace')
if line == '':
raise StopIteration
line = line.rstrip('\n')
le = LogEvent(line)
# hint format and nextpos from previous line
if self._datetime_format and self._datetime_nextpos is not None:
ret = le.set_datetime_hint(self._datetime_format,
self._datetime_nextpos,
self.year_rollover)
if not ret:
# logevent indicates timestamp format has changed,
# invalidate hint info
self._datetime_format = None
self._datetime_nextpos = None
elif le.datetime:
# gather new hint info from another logevent
self._datetime_format = le.datetime_format
self._datetime_nextpos = le._datetime_nextpos
return le
def __iter__(self):
"""
Iterate over LogFile object.
Return a LogEvent object for each line (generator).
"""
le = None
while True:
try:
le = self.next()
except StopIteration as e:
# end of log file, get end date
if not self.end and self.from_stdin:
if le and le.datetime:
self._end = le.datetime
# future iterations start from the beginning
if not self.from_stdin:
self.filehandle.seek(0)
# return (instead of raising StopIteration exception) per PEP 479
return
# get start date for stdin input
if not self.start and self.from_stdin:
if le and le.datetime:
self._start = le.datetime
try:
yield le
except StopIteration:
return
states = (['PRIMARY', 'SECONDARY', 'DOWN', 'STARTUP', 'STARTUP2',
'RECOVERING', 'ROLLBACK', 'ARBITER', 'UNKNOWN'])
def __len__(self):
"""Return the number of lines in a log file."""
return self.num_lines
def _iterate_lines(self):
"""Count number of lines (can be expensive)."""
self._num_lines = 0
self._restarts = []
self._rs_state = []
ln = 0
for ln, line in enumerate(self.filehandle):
if isinstance(line, bytes):
line = line.decode("utf-8", "replace")
if (self._has_level is None and
line[28:31].strip() in LogEvent.log_levels and
line[31:39].strip() in LogEvent.log_components):
self._has_level = True
# find version string (fast check to eliminate most lines)
if "version" in line[:100]:
logevent = LogEvent(line)
restart = self._check_for_restart(logevent)
if restart:
self._restarts.append((restart, logevent))
if "starting :" in line or "starting:" in line:
# look for hostname, port
match = re.search('port=(?P<port>\d+).*host=(?P<host>\S+)',
line)
if match:
self._hostname = match.group('host')
self._port = match.group('port')
""" For 3.0 the "[initandlisten] options:" long entry contained the
"engine" field if WiredTiger was the storage engine. There were
only two engines, MMAPv1 and WiredTiger
"""
if "[initandlisten] options:" in line:
match = re.search('replSet: "(?P<replSet>\S+)"', line)
if match:
self._repl_set = match.group('replSet')
match = re.search('engine: "(?P<engine>\S+)"', line)
if match:
self._storage_engine = match.group('engine')
else:
self._storage_engine = 'mmapv1'
""" For 3.2 the "[initandlisten] options:" no longer contains the
"engine" field So now we have to look for the "[initandlisten]
wiredtiger_open config:" which was present in 3.0, but would
now tell us definitively that wiredTiger is being used
"""
if "[initandlisten] wiredtiger_open config:" in line:
self._storage_engine = 'wiredTiger'
if "command admin.$cmd command: { replSetInitiate:" in line:
match = re.search('{ _id: "(?P<replSet>\S+)", '
'members: (?P<replSetMembers>[^]]+ ])', line)
if match:
self._repl_set = match.group('replSet')
self._repl_set_members = match.group('replSetMembers')
# Replica set config logging in MongoDB 3.0+
new_config = ("New replica set config in use: ")
if new_config in line:
match = re.search('{ _id: "(?P<replSet>\S+)", '
'version: (?P<replSetVersion>\d+), ', line)
if match:
self._repl_set = match.group('replSet')
self._repl_set_version = match.group('replSetVersion')
match = re.search(', protocolVersion: (?P<replSetProtocol>\d+), ', line)
if match:
self._repl_set_protocol = match.group('replSetProtocol')
match = re.search('members: (?P<replSetMembers>[^]]+ ])', line)
if match:
self._repl_set_members = match.group('replSetMembers')
# if ("is now in state" in line and
# next(state for state in states if line.endswith(state))):
if "is now in state" in line:
tokens = line.split()
# 2.6
if tokens[1].endswith(']'):
pos = 4
else:
pos = 5
host = tokens[pos]
rs_state = tokens[-1]
state = (host, rs_state, LogEvent(line))
self._rs_state.append(state)
continue
if "[rsMgr] replSet" in line:
tokens = line.split()
if self._hostname:
host = self._hostname + ':' + self._port
else:
host = os.path.basename(self.name)
host += ' (self)'
if tokens[-1] in self.states:
rs_state = tokens[-1]
else:
# 2.6
if tokens[1].endswith(']'):
pos = 2
else:
pos = 6
rs_state = ' '.join(tokens[pos:])
state = (host, rs_state, LogEvent(line))
self._rs_state.append(state)
continue
self._num_lines = ln + 1
# reset logfile
self.filehandle.seek(0)
def _check_for_restart(self, logevent):
if (logevent.thread == 'initandlisten' and
"db version v" in logevent.line_str):
self._binary = 'mongod'
elif logevent.thread == 'mongosMain' and ('MongoS' in logevent.line_str or
'mongos' in logevent.line_str):
self._binary = 'mongos'
else:
return False
version = re.search(r'(\d\.\d\.\d+)', logevent.line_str)
if version:
version = version.group(1)
return version
else:
return False
def _calculate_bounds(self):
"""Calculate beginning and end of logfile."""
if self._bounds_calculated:
# Assume no need to recalc bounds for lifetime of a Logfile object
return
if self.from_stdin:
return False
# we should be able to find a valid log line within max_start_lines
max_start_lines = 10
lines_checked = 0
# get start datetime
for line in self.filehandle:
logevent = LogEvent(line)
lines_checked += 1
if logevent.datetime:
self._start = logevent.datetime
self._timezone = logevent.datetime.tzinfo
self._datetime_format = logevent.datetime_format
self._datetime_nextpos = logevent._datetime_nextpos
break
if lines_checked > max_start_lines:
break
# sanity check before attempting to find end date
if (self._start is None):
raise SystemExit("Error: <%s> does not appear to be a supported "
"MongoDB log file format" % self.filehandle.name)
# get end datetime (lines are at most 10k,
# go back 30k at most to make sure we catch one)
self.filehandle.seek(0, 2)
self._filesize = self.filehandle.tell()
self.filehandle.seek(-min(self._filesize, 30000), 2)
for line in reversed(self.filehandle.readlines()):
logevent = LogEvent(line)
if logevent.datetime:
self._end = logevent.datetime
break
# if there was a roll-over, subtract 1 year from start time
if self._end < self._start:
self._start = self._start.replace(year=self._start.year - 1)
self._year_rollover = self._end
else:
self._year_rollover = False
# reset logfile
self.filehandle.seek(0)
self._bounds_calculated = True
return True
def _find_curr_line(self, prev=False):
"""
Internal helper function.
Find the current (or previous if prev=True) line in a log file based on
the current seek position.
"""
curr_pos = self.filehandle.tell()
# jump back 15k characters (at most) and find last newline char
jump_back = min(self.filehandle.tell(), 15000)
self.filehandle.seek(-jump_back, 1)
buff = self.filehandle.read(jump_back)
self.filehandle.seek(curr_pos, 0)
if prev and self.prev_pos is not None and self.prev_pos == curr_pos:
# Number of characters to show before/after the log offset
error_context = 300
self.filehandle.seek(-error_context, 1)
buff = self.filehandle.read(curr_pos)
hr = "-" * 60
print("Fatal log parsing loop detected trying to find previous "
"log line near offset %s in %s:\n\n%s\n%s\n"
"<--- (current log parsing offset) \n%s\n%s\n"
% (curr_pos, self.name, hr, buff[:error_context],
buff[error_context:error_context + 1], hr),
file=sys.stderr)
raise SystemExit("Cannot parse %s with requested options"
% self.filehandle.name)
else:
self.prev_pos = curr_pos
if isinstance(buff, bytes):
buff = buff.decode("utf-8", "replace")
newline_pos = buff.rfind('\n')
if prev:
newline_pos = buff[:newline_pos].rfind('\n')
# move back to last newline char
if newline_pos == -1:
self.filehandle.seek(0)
return self.next()
self.filehandle.seek(newline_pos - jump_back + 1, 1)
# roll forward until we found a line with a datetime
try:
logevent = self.next()
while not logevent.datetime:
logevent = self.next()
return logevent
except StopIteration:
# reached end of file
return None
def _find_sharding_info(self):
"""
Iterate over file and find any sharding related information
"""
self._shards = []
self._chunks_moved_from = []
self._chunks_moved_to = []
self._chunk_splits = []
prev_line = ""
for line in self.filehandle:
if isinstance(line, bytes):
line = line.decode("utf-8", "replace")
if self.binary == "mongos":
if "Starting new replica set monitor for" in line:
if "[mongosMain]" in line:
match = re.search("for (?P<csrsName>\w+)/"
"(?P<replSetMembers>\S+)", line)
if match:
csrs_info = (match.group('csrsName'),
match.group('replSetMembers'))
self._csrs = csrs_info
else:
match = re.search("for (?P<shardName>\w+)/"
"(?P<replSetMembers>\S+)", line)
if match:
shard_info = (match.group('shardName'),
match.group('replSetMembers'))
self._shards.append(shard_info)
elif self.binary == "mongod":
logevent = LogEvent(line)
if "New replica set config in use" in line:
if "configsvr: true" in line:
match = re.search(' _id: "(?P<replSet>\S+)".*'
'members: (?P<replSetMembers>[^]]+ ])', line)
if match:
self._csrs = (
match.group('replSet'),
match.group('replSetMembers')
)
if "Starting new replica set monitor for" in line:
match = re.search("for (?P<replSet>\w+)/"
"(?P<replSetMembers>\S+)", line)
if match:
if self._csrs and match.group('replSet') != self._csrs[0]:
self._shards.append((
match.group('replSet'),
match.group('replSetMembers')
))
elif not self._csrs:
self._csrs = (
match.group('replSet'),
match.group('replSetMembers')
)
if "moveChunk.from" in line:
logevent = LogEvent(line)
match = re.search('ns: "(?P<namespace>\S+)".*'
'details: { (?P<range>.*\}).*'
'to: "(?P<movedTo>\S+)".*note: "(?P<note>\S+)"', line)
if match:
time = logevent.datetime
chunk_range = match.group('range')
namespace = match.group('namespace')
moved_to = match.group('movedTo')
note = match.group('note')
if note == "success":
errmsg = None
steps = re.findall('(?P<steps>step \d of \d): (?P<stepTimes>\d+)', line)
else:
match = re.search(':: caused by :: (?P<errmsg>\S+):', prev_line)
steps = None
if match:
errmsg = match.group('errmsg')
else:
errmsg = "Unknown"
chunk_migration = (time, chunk_range, moved_to, namespace, steps, note, errmsg)
self._chunks_moved_from.append(chunk_migration)
if "moveChunk.to" in line:
logevent = LogEvent(line)
match = re.search('ns: "(?P<namespace>\S+)".*'
'details: { (?P<range>.*\}).*.*note: "(?P<note>\S+)"', line)
if match:
time = logevent.datetime
chunk_range = match.group('range')
namespace = match.group('namespace')
# TODO: alter this to find moved from shard name when SERVER-45770 TICKET is added
moved_from = "Unknown"
note = match.group('note')
if note == "success":
errmsg = None
steps = re.findall('(?P<steps>step \d of \d): (?P<stepTimes>\d+)', line)
else:
steps = None
match = re.search('errmsg: "(?P<errmsg>.*)"', line)
if match:
errmsg = match.group('errmsg')
chunk_migration = (time, chunk_range, moved_from, namespace, steps, note, errmsg)
self._chunks_moved_to.append(chunk_migration)
if "Finding the split vector for" in line:
logevent = LogEvent(line)
match = re.search('for (?P<namespace>\S+).*'
'numSplits: (?P<numSplits>\d+)', line)
if match:
time = logevent.datetime
split_range = None
namespace = match.group("namespace")
numSplits = match.group('numSplits')
success = None
time_taken = 0
error = None
self._chunk_splits.append((time, split_range, namespace, numSplits, success, time_taken, error))
elif "splitVector" in line:
logevent = LogEvent(line)
match = re.search('splitVector: "(?P<namespace>\S+)".*,'
' (?P<range>min:.*), max.*op_msg (?P<time_taken>\d+)', line)
if match:
time = logevent.datetime
split_range = match.group("range")
namespace = match.group("namespace")
time_taken = match.group("time_taken")
numSplits = 0
success = True
error = None
self._chunk_splits.append((time, split_range, namespace, numSplits, success, time_taken, error))
elif "Unable to auto-split chunk" in line:
logevent = LogEvent(line)
match = re.search("chunk \[(?P<range>.*)\) "
'in namespace (?P<namespace>\S+)'
' :: caused by :: (?P<error>\S+): ', line)
if match:
time = logevent.datetime
split_range = match.group("range")
namespace = match.group("namespace")
numSplits = 0
success = False
time_taken = 0
error = match.group("error")
self._chunk_splits.append((time, split_range, namespace, numSplits, success, time_taken, error))
elif "jumbo" in line:
logevent = LogEvent(line)
match = re.search('migration (?P<namespace>\S+): \[(?P<range>.*)\)', prev_line)
if match:
time = logevent.datetime
split_range = match.group("range")
namespace = match.group("namespace")
numSplits = 0
success = False
time_taken = 0
error = "Jumbo"
self._chunk_splits.append((time, split_range, namespace, numSplits, success, time_taken, error))
prev_line = line
# reset logfile
self.filehandle.seek(0)
def fast_forward(self, start_dt):
"""
Fast-forward file to given start_dt datetime obj using binary search.
Only fast for files. Streams need to be forwarded manually, and it will
miss the first line that would otherwise match (as it consumes the log
line).
"""
if self.from_stdin:
# skip lines until start_dt is reached
return
else:
# fast bisection path
max_mark = self.filesize
step_size = max_mark
# check if start_dt is already smaller than first datetime
self.filehandle.seek(0)
le = self.next()
if le.datetime and le.datetime >= start_dt:
self.filehandle.seek(0)
return
le = None
self.filehandle.seek(0)
# search for lower bound
while abs(step_size) > 100:
step_size = ceil(step_size / 2.)
self.filehandle.seek(step_size, 1)
le = self._find_curr_line()
if not le:
break
if le.datetime >= start_dt:
step_size = -abs(step_size)
else:
step_size = abs(step_size)
if not le:
return
# now walk backwards until we found a truly smaller line
while self.filehandle.tell() >= 2 and (le.datetime is None or
le.datetime >= start_dt):
self.filehandle.seek(-2, 1)
le = self._find_curr_line(prev=True)
| 35.755945
| 116
| 0.509573
| 2,991
| 28,569
| 4.704112
| 0.14109
| 0.028429
| 0.015352
| 0.018479
| 0.399716
| 0.32914
| 0.291898
| 0.253518
| 0.231059
| 0.201919
| 0
| 0.006625
| 0.397704
| 28,569
| 798
| 117
| 35.800752
| 0.811065
| 0.128846
| 0
| 0.428058
| 0
| 0.001799
| 0.098892
| 0.014378
| 0
| 0
| 0
| 0.001253
| 0
| 1
| 0.061151
| false
| 0
| 0.014388
| 0
| 0.154676
| 0.003597
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c26833e5360e6495c23a5b485ec7547b6bafa06
| 2,136
|
py
|
Python
|
tests/svg.py
|
Tillsten/pyqtgraph
|
0045863165fe526988c58cf4f8232ae2d261a5ee
|
[
"MIT"
] | null | null | null |
tests/svg.py
|
Tillsten/pyqtgraph
|
0045863165fe526988c58cf4f8232ae2d261a5ee
|
[
"MIT"
] | null | null | null |
tests/svg.py
|
Tillsten/pyqtgraph
|
0045863165fe526988c58cf4f8232ae2d261a5ee
|
[
"MIT"
] | null | null | null |
"""
SVG export test
"""
import test
import pyqtgraph as pg
app = pg.mkQApp()
class SVGTest(test.TestCase):
#def test_plotscene(self):
#pg.setConfigOption('foreground', (0,0,0))
#w = pg.GraphicsWindow()
#w.show()
#p1 = w.addPlot()
#p2 = w.addPlot()
#p1.plot([1,3,2,3,1,6,9,8,4,2,3,5,3], pen={'color':'k'})
#p1.setXRange(0,5)
#p2.plot([1,5,2,3,4,6,1,2,4,2,3,5,3], pen={'color':'k', 'cosmetic':False, 'width': 0.3})
#app.processEvents()
#app.processEvents()
#ex = pg.exporters.SVGExporter.SVGExporter(w.scene())
#ex.export(fileName='test.svg')
def test_simple(self):
scene = pg.QtGui.QGraphicsScene()
#rect = pg.QtGui.QGraphicsRectItem(0, 0, 100, 100)
#scene.addItem(rect)
#rect.setPos(20,20)
#rect.translate(50, 50)
#rect.rotate(30)
#rect.scale(0.5, 0.5)
#rect1 = pg.QtGui.QGraphicsRectItem(0, 0, 100, 100)
#rect1.setParentItem(rect)
#rect1.setFlag(rect1.ItemIgnoresTransformations)
#rect1.setPos(20, 20)
#rect1.scale(2,2)
#el1 = pg.QtGui.QGraphicsEllipseItem(0, 0, 100, 100)
#el1.setParentItem(rect1)
##grp = pg.ItemGroup()
#grp.setParentItem(rect)
#grp.translate(200,0)
##grp.rotate(30)
#rect2 = pg.QtGui.QGraphicsRectItem(0, 0, 100, 25)
#rect2.setFlag(rect2.ItemClipsChildrenToShape)
#rect2.setParentItem(grp)
#rect2.setPos(0,25)
#rect2.rotate(30)
#el = pg.QtGui.QGraphicsEllipseItem(0, 0, 100, 50)
#el.translate(10,-5)
#el.scale(0.5,2)
#el.setParentItem(rect2)
grp2 = pg.ItemGroup()
scene.addItem(grp2)
grp2.scale(100,100)
rect3 = pg.QtGui.QGraphicsRectItem(0,0,2,2)
rect3.setPen(pg.mkPen(width=1, cosmetic=False))
grp2.addItem(rect3)
ex = pg.exporters.SVGExporter.SVGExporter(scene)
ex.export(fileName='test.svg')
if __name__ == '__main__':
test.unittest.main()
| 30.514286
| 96
| 0.557116
| 269
| 2,136
| 4.386617
| 0.304833
| 0.013559
| 0.021186
| 0.084746
| 0.285593
| 0.204237
| 0.077966
| 0.023729
| 0
| 0
| 0
| 0.095455
| 0.279026
| 2,136
| 70
| 97
| 30.514286
| 0.670779
| 0.535112
| 0
| 0
| 0
| 0
| 0.016913
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.125
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c26b3633189c7cbd7b00d1addad30f94587f9ec
| 993
|
py
|
Python
|
src/api/models/enums/apschedulerevents.py
|
jedicontributors/pythondataintegrator
|
3e877b367ab9b20185476128ec053db41087879f
|
[
"MIT"
] | 14
|
2020-12-19T15:06:13.000Z
|
2022-01-12T19:52:17.000Z
|
src/api/models/enums/apschedulerevents.py
|
jedicontributors/pythondataintegrator
|
3e877b367ab9b20185476128ec053db41087879f
|
[
"MIT"
] | 43
|
2021-01-06T22:05:22.000Z
|
2022-03-10T10:30:30.000Z
|
src/api/models/enums/apschedulerevents.py
|
jedicontributors/pythondataintegrator
|
3e877b367ab9b20185476128ec053db41087879f
|
[
"MIT"
] | 4
|
2020-12-18T23:10:09.000Z
|
2021-04-02T13:03:12.000Z
|
EVENT_SCHEDULER_STARTED = EVENT_SCHEDULER_START = 2 ** 0
EVENT_SCHEDULER_SHUTDOWN = 2 ** 1
EVENT_SCHEDULER_PAUSED = 2 ** 2
EVENT_SCHEDULER_RESUMED = 2 ** 3
EVENT_EXECUTOR_ADDED = 2 ** 4
EVENT_EXECUTOR_REMOVED = 2 ** 5
EVENT_JOBSTORE_ADDED = 2 ** 6
EVENT_JOBSTORE_REMOVED = 2 ** 7
EVENT_ALL_JOBS_REMOVED = 2 ** 8
EVENT_JOB_ADDED = 2 ** 9
EVENT_JOB_REMOVED = 2 ** 10
EVENT_JOB_MODIFIED = 2 ** 11
EVENT_JOB_EXECUTED = 2 ** 12
EVENT_JOB_ERROR = 2 ** 13
EVENT_JOB_MISSED = 2 ** 14
EVENT_JOB_SUBMITTED = 2 ** 15
EVENT_JOB_MAX_INSTANCES = 2 ** 16
EVENT_ALL = (EVENT_SCHEDULER_STARTED | EVENT_SCHEDULER_SHUTDOWN | EVENT_SCHEDULER_PAUSED |
EVENT_SCHEDULER_RESUMED | EVENT_EXECUTOR_ADDED | EVENT_EXECUTOR_REMOVED |
EVENT_JOBSTORE_ADDED | EVENT_JOBSTORE_REMOVED | EVENT_ALL_JOBS_REMOVED |
EVENT_JOB_ADDED | EVENT_JOB_REMOVED | EVENT_JOB_MODIFIED | EVENT_JOB_EXECUTED |
EVENT_JOB_ERROR | EVENT_JOB_MISSED | EVENT_JOB_SUBMITTED | EVENT_JOB_MAX_INSTANCES)
| 45.136364
| 96
| 0.75428
| 145
| 993
| 4.648276
| 0.262069
| 0.189911
| 0.062315
| 0.077151
| 0.103858
| 0
| 0
| 0
| 0
| 0
| 0
| 0.05
| 0.17422
| 993
| 22
| 96
| 45.136364
| 0.771951
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c272bc2beff83ce709b4ecff735eaf333a85378
| 25,166
|
py
|
Python
|
scripts/build/build/targets.py
|
mrninhvn/matter
|
c577b233db9d2f3a6f87108a062b1699a40c5169
|
[
"Apache-2.0"
] | 2
|
2022-03-29T12:17:41.000Z
|
2022-03-30T13:25:20.000Z
|
scripts/build/build/targets.py
|
mrninhvn/matter
|
c577b233db9d2f3a6f87108a062b1699a40c5169
|
[
"Apache-2.0"
] | null | null | null |
scripts/build/build/targets.py
|
mrninhvn/matter
|
c577b233db9d2f3a6f87108a062b1699a40c5169
|
[
"Apache-2.0"
] | 2
|
2022-02-24T15:42:39.000Z
|
2022-03-04T20:38:07.000Z
|
# Copyright (c) 2021 Project CHIP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from itertools import combinations
from typing import List
from builders.ameba import AmebaApp, AmebaBoard, AmebaBuilder
from builders.android import AndroidApp, AndroidBoard, AndroidBuilder
from builders.cc13x2x7_26x2x7 import cc13x2x7_26x2x7App, cc13x2x7_26x2x7Builder
from builders.cyw30739 import Cyw30739App, Cyw30739Board, Cyw30739Builder
from builders.efr32 import Efr32App, Efr32Board, Efr32Builder
from builders.esp32 import Esp32App, Esp32Board, Esp32Builder
from builders.host import HostApp, HostBoard, HostBuilder
from builders.infineon import InfineonApp, InfineonBoard, InfineonBuilder
from builders.k32w import K32WApp, K32WBuilder
from builders.mbed import MbedApp, MbedBoard, MbedBuilder, MbedProfile
from builders.nrf import NrfApp, NrfBoard, NrfConnectBuilder
from builders.qpg import QpgApp, QpgBoard, QpgBuilder
from builders.telink import TelinkApp, TelinkBoard, TelinkBuilder
from builders.tizen import TizenApp, TizenBoard, TizenBuilder
from builders.bl602 import Bl602App, Bl602Board, Bl602Builder
from builders.imx import IMXApp, IMXBuilder
class Target:
"""Represents a build target:
Has a name identifier plus parameters on how to build it (what
builder class to use and what arguments are required to produce
the specified build)
"""
def __init__(self, name, builder_class, **kwargs):
self.name = name
self.builder_class = builder_class
self.glob_blacklist_reason = None
self.create_kw_args = kwargs
def Clone(self):
"""Creates a clone of self."""
clone = Target(self.name, self.builder_class,
**self.create_kw_args.copy())
clone.glob_blacklist_reason = self.glob_blacklist_reason
return clone
def Extend(self, suffix, **kargs):
"""Creates a clone of the current object extending its build parameters.
Arguments:
suffix: appended with a "-" as separator to the clone name
**kargs: arguments needed to produce the new build variant
"""
clone = self.Clone()
clone.name += "-" + suffix
clone.create_kw_args.update(kargs)
return clone
def Create(self, runner, repository_path: str, output_prefix: str,
enable_flashbundle: bool):
builder = self.builder_class(
repository_path, runner=runner, **self.create_kw_args)
builder.target = self
builder.identifier = self.name
builder.output_dir = os.path.join(output_prefix, self.name)
builder.enable_flashbundle(enable_flashbundle)
return builder
def GlobBlacklist(self, reason):
clone = self.Clone()
if clone.glob_blacklist_reason:
clone.glob_blacklist_reason += ", "
clone.glob_blacklist_reason += reason
else:
clone.glob_blacklist_reason = reason
return clone
@property
def IsGlobBlacklisted(self):
return self.glob_blacklist_reason is not None
@property
def GlobBlacklistReason(self):
return self.glob_blacklist_reason
class AcceptAnyName:
def Accept(self, name: str):
return True
class AcceptNameWithSubstrings:
def __init__(self, substr: List[str]):
self.substr = substr
def Accept(self, name: str):
for s in self.substr:
if s in name:
return True
return False
class BuildVariant:
def __init__(self, name: str, validator=AcceptAnyName(),
conflicts: List[str] = [], requires: List[str] = [],
**buildargs):
self.name = name
self.validator = validator
self.conflicts = conflicts
self.buildargs = buildargs
self.requires = requires
def HasConflicts(items: List[BuildVariant]) -> bool:
for a, b in combinations(items, 2):
if (a.name in b.conflicts) or (b.name in a.conflicts):
return True
return False
def AllRequirementsMet(items: List[BuildVariant]) -> bool:
"""
Check that item.requires is satisfied for all items in the given list
"""
available = set([item.name for item in items])
for item in items:
for requirement in item.requires:
if requirement not in available:
return False
return True
class VariantBuilder:
"""Handles creating multiple build variants based on a starting target.
"""
def __init__(self, targets: List[Target] = []):
# note the clone in case the default arg is used
self.targets = targets[:]
self.variants = []
self.glob_whitelist = []
def WhitelistVariantNameForGlob(self, name):
"""
Whitelist the specified variant to be allowed for globbing.
By default we do not want a 'build all' to select all variants, so
variants are generally glob-blacklisted.
"""
self.glob_whitelist.append(name)
def AppendVariant(self, **args):
"""
Add another variant to accepted variants. Arguments are construction
variants to BuildVariant.
Example usage:
builder.AppendVariant(name="ipv6only", enable_ipv4=False)
"""
self.variants.append(BuildVariant(**args))
def AllVariants(self):
"""
Yields a list of acceptable variants for the given targets.
Handles conflict resolution between build variants and globbing
whitelist targets.
"""
for target in self.targets:
yield target
# skip variants that do not work for this target
ok_variants = [
v for v in self.variants if v.validator.Accept(target.name)]
# Build every possible variant
for variant_count in range(1, len(ok_variants) + 1):
for subgroup in combinations(ok_variants, variant_count):
if HasConflicts(subgroup):
continue
if not AllRequirementsMet(subgroup):
continue
# Target ready to be created - no conflicts
variant_target = target.Clone()
for option in subgroup:
variant_target = variant_target.Extend(
option.name, **option.buildargs)
# Only a few are whitelisted for globs
name = '-'.join([o.name for o in subgroup])
if name not in self.glob_whitelist:
if not variant_target.IsGlobBlacklisted:
variant_target = variant_target.GlobBlacklist(
'Reduce default build variants')
yield variant_target
def HostTargets():
target = Target(HostBoard.NATIVE.PlatformName(), HostBuilder)
target_native = target.Extend(HostBoard.NATIVE.BoardName(), board=HostBoard.NATIVE)
targets = [target_native]
# x64 linux supports cross compile
cross_compile = (HostBoard.NATIVE.PlatformName() == 'linux') and (HostBoard.NATIVE.BoardName() != HostBoard.ARM64.BoardName())
if cross_compile:
targets.append(target.Extend('arm64', board=HostBoard.ARM64))
app_targets = []
# Don't cross compile some builds
app_targets.append(
target_native.Extend('rpc-console', app=HostApp.RPC_CONSOLE))
app_targets.append(
target_native.Extend('tv-app', app=HostApp.TV_APP))
app_targets.append(
target_native.Extend('tv-casting-app', app=HostApp.TV_CASTING_APP))
app_targets.append(
target_native.Extend('nl-test-runner', app=HostApp.NL_TEST_RUNNER))
for target in targets:
app_targets.append(target.Extend(
'all-clusters', app=HostApp.ALL_CLUSTERS))
if (HostBoard.NATIVE.PlatformName() == 'darwin'):
app_targets.append(target.Extend(
'chip-tool-darwin', app=HostApp.CHIP_TOOL_DARWIN))
app_targets.append(target.Extend('chip-tool', app=HostApp.CHIP_TOOL))
app_targets.append(target.Extend('thermostat', app=HostApp.THERMOSTAT))
app_targets.append(target.Extend('minmdns', app=HostApp.MIN_MDNS))
app_targets.append(target.Extend('light', app=HostApp.LIGHT))
app_targets.append(target.Extend('lock', app=HostApp.LOCK))
app_targets.append(target.Extend('shell', app=HostApp.SHELL))
app_targets.append(target.Extend(
'ota-provider', app=HostApp.OTA_PROVIDER, enable_ble=False))
app_targets.append(target.Extend(
'ota-requestor', app=HostApp.OTA_REQUESTOR, enable_ble=False))
app_targets.append(target.Extend('python-bindings', app=HostApp.PYTHON_BINDINGS))
builder = VariantBuilder()
# Possible build variants. Note that number of potential
# builds is exponential here
builder.AppendVariant(name="same-event-loop", validator=AcceptNameWithSubstrings(
['-chip-tool', '-chip-tool-darwin']), separate_event_loop=False),
builder.AppendVariant(name="no-interactive", validator=AcceptNameWithSubstrings(
['-chip-tool']), interactive_mode=False),
builder.AppendVariant(name="ipv6only", enable_ipv4=False),
builder.AppendVariant(name="no-ble", enable_ble=False),
builder.AppendVariant(name="no-wifi", enable_wifi=False),
builder.AppendVariant(name="tsan", conflicts=['asan'], use_tsan=True),
builder.AppendVariant(name="asan", conflicts=['tsan'], use_asan=True),
builder.AppendVariant(name="libfuzzer", requires=[
"clang"], use_libfuzzer=True),
builder.AppendVariant(name="clang", use_clang=True),
builder.AppendVariant(name="test", extra_tests=True),
builder.WhitelistVariantNameForGlob('no-interactive-ipv6only')
builder.WhitelistVariantNameForGlob('ipv6only')
for target in app_targets:
if ('-rpc-console' in target.name) or ('-python-bindings' in target.name) or ('nl-test-runner' in target.name):
# Single-variant builds
yield target
else:
builder.targets.append(target)
for target in builder.AllVariants():
if cross_compile and 'chip-tool' in target.name and 'arm64' in target.name and '-no-interactive' not in target.name:
# Interactive builds will not compile by default on arm cross compiles
# because libreadline is not part of the default sysroot
yield target.GlobBlacklist('Arm crosscompile does not support libreadline-dev')
else:
yield target
# Without extra build variants
yield target_native.Extend('chip-cert', app=HostApp.CERT_TOOL)
yield target_native.Extend('address-resolve-tool', app=HostApp.ADDRESS_RESOLVE)
yield target_native.Extend('address-resolve-tool-clang', app=HostApp.ADDRESS_RESOLVE,
use_clang=True).GlobBlacklist("Reduce default build variants")
yield target_native.Extend('address-resolve-tool-platform-mdns', app=HostApp.ADDRESS_RESOLVE,
use_platform_mdns=True).GlobBlacklist("Reduce default build variants")
yield target_native.Extend('address-resolve-tool-platform-mdns-ipv6only', app=HostApp.ADDRESS_RESOLVE,
use_platform_mdns=True, enable_ipv4=False).GlobBlacklist("Reduce default build variants")
test_target = Target(HostBoard.NATIVE.PlatformName(), HostBuilder)
for board in [HostBoard.NATIVE, HostBoard.FAKE]:
yield test_target.Extend(board.BoardName() + '-tests', board=board, app=HostApp.TESTS)
def Esp32Targets():
esp32_target = Target('esp32', Esp32Builder)
yield esp32_target.Extend('m5stack-all-clusters', board=Esp32Board.M5Stack, app=Esp32App.ALL_CLUSTERS)
yield esp32_target.Extend('m5stack-all-clusters-ipv6only', board=Esp32Board.M5Stack, app=Esp32App.ALL_CLUSTERS,
enable_ipv4=False)
yield esp32_target.Extend('m5stack-all-clusters-rpc', board=Esp32Board.M5Stack, app=Esp32App.ALL_CLUSTERS,
enable_rpcs=True)
yield esp32_target.Extend('m5stack-all-clusters-rpc-ipv6only', board=Esp32Board.M5Stack, app=Esp32App.ALL_CLUSTERS,
enable_rpcs=True, enable_ipv4=False)
yield esp32_target.Extend('c3devkit-all-clusters', board=Esp32Board.C3DevKit, app=Esp32App.ALL_CLUSTERS)
devkitc = esp32_target.Extend('devkitc', board=Esp32Board.DevKitC)
yield devkitc.Extend('all-clusters', app=Esp32App.ALL_CLUSTERS)
yield devkitc.Extend('all-clusters-ipv6only', app=Esp32App.ALL_CLUSTERS, enable_ipv4=False)
yield devkitc.Extend('shell', app=Esp32App.SHELL)
yield devkitc.Extend('light', app=Esp32App.LIGHT)
yield devkitc.Extend('lock', app=Esp32App.LOCK)
yield devkitc.Extend('bridge', app=Esp32App.BRIDGE)
yield devkitc.Extend('temperature-measurement', app=Esp32App.TEMPERATURE_MEASUREMENT)
yield devkitc.Extend('temperature-measurement-rpc', app=Esp32App.TEMPERATURE_MEASUREMENT, enable_rpcs=True)
yield esp32_target.Extend('qemu-tests', board=Esp32Board.QEMU, app=Esp32App.TESTS)
def Efr32Targets():
efr_target = Target('efr32', Efr32Builder)
board_targets = [
efr_target.Extend('brd4161a', board=Efr32Board.BRD4161A),
efr_target.Extend('brd4163a', board=Efr32Board.BRD4163A).GlobBlacklist(
'only user requested'),
efr_target.Extend('brd4164a', board=Efr32Board.BRD4164A).GlobBlacklist(
'only user requested'),
efr_target.Extend('brd4166a', board=Efr32Board.BRD4166A).GlobBlacklist(
'only user requested'),
efr_target.Extend('brd4170a', board=Efr32Board.BRD4170A).GlobBlacklist(
'only user requested'),
efr_target.Extend('brd4186a', board=Efr32Board.BRD4186A).GlobBlacklist(
'only user requested'),
efr_target.Extend('brd4187a', board=Efr32Board.BRD4187A).GlobBlacklist(
'only user requested'),
efr_target.Extend('brd4304a', board=Efr32Board.BRD4304A).GlobBlacklist(
'only user requested')
]
builder = VariantBuilder()
for board_target in board_targets:
builder.targets.append(board_target.Extend(
'window-covering', app=Efr32App.WINDOW_COVERING))
builder.targets.append(board_target.Extend(
'switch', app=Efr32App.SWITCH))
builder.targets.append(board_target.Extend(
'unit-test', app=Efr32App.UNIT_TEST))
builder.targets.append(
board_target.Extend('light', app=Efr32App.LIGHT))
builder.targets.append(board_target.Extend('lock', app=Efr32App.LOCK))
# Possible build variants. Note that number of potential
# builds is exponential here
builder.AppendVariant(name="rpc", validator=AcceptNameWithSubstrings(
['-light', '-lock']), enable_rpcs=True)
builder.AppendVariant(name="with-ota-requestor", enable_ota_requestor=True)
builder.WhitelistVariantNameForGlob('rpc')
for target in builder.AllVariants():
yield target
def NrfTargets():
target = Target('nrf', NrfConnectBuilder)
yield target.Extend('native-posix-64-tests', board=NrfBoard.NATIVE_POSIX_64, app=NrfApp.UNIT_TESTS)
targets = [
target.Extend('nrf5340dk', board=NrfBoard.NRF5340DK),
target.Extend('nrf52840dk', board=NrfBoard.NRF52840DK),
]
# Enable nrf52840dongle for all-clusters and lighting app only
yield target.Extend('nrf52840dongle-all-clusters', board=NrfBoard.NRF52840DONGLE, app=NrfApp.ALL_CLUSTERS)
yield target.Extend('nrf52840dongle-light', board=NrfBoard.NRF52840DONGLE, app=NrfApp.LIGHT)
for target in targets:
yield target.Extend('all-clusters', app=NrfApp.ALL_CLUSTERS)
yield target.Extend('lock', app=NrfApp.LOCK)
yield target.Extend('light', app=NrfApp.LIGHT)
yield target.Extend('shell', app=NrfApp.SHELL)
yield target.Extend('pump', app=NrfApp.PUMP)
yield target.Extend('pump-controller', app=NrfApp.PUMP_CONTROLLER)
rpc = target.Extend('light-rpc', app=NrfApp.LIGHT, enable_rpcs=True)
if '-nrf5340dk-' in rpc.name:
rpc = rpc.GlobBlacklist(
'Compile failure due to pw_build args not forwarded to proto compiler. '
'https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/66760')
yield rpc
def AndroidTargets():
target = Target('android', AndroidBuilder)
yield target.Extend('arm-chip-tool', board=AndroidBoard.ARM, app=AndroidApp.CHIP_TOOL)
yield target.Extend('arm64-chip-tool', board=AndroidBoard.ARM64, app=AndroidApp.CHIP_TOOL)
yield target.Extend('x64-chip-tool', board=AndroidBoard.X64, app=AndroidApp.CHIP_TOOL)
yield target.Extend('x86-chip-tool', board=AndroidBoard.X86, app=AndroidApp.CHIP_TOOL)
yield target.Extend('arm64-chip-test', board=AndroidBoard.ARM64, app=AndroidApp.CHIP_TEST)
yield target.Extend('androidstudio-arm-chip-tool', board=AndroidBoard.AndroidStudio_ARM, app=AndroidApp.CHIP_TOOL)
yield target.Extend('androidstudio-arm64-chip-tool', board=AndroidBoard.AndroidStudio_ARM64, app=AndroidApp.CHIP_TOOL)
yield target.Extend('androidstudio-x86-chip-tool', board=AndroidBoard.AndroidStudio_X86, app=AndroidApp.CHIP_TOOL)
yield target.Extend('androidstudio-x64-chip-tool', board=AndroidBoard.AndroidStudio_X64, app=AndroidApp.CHIP_TOOL)
yield target.Extend('arm64-chip-tvserver', board=AndroidBoard.ARM64, app=AndroidApp.CHIP_TVServer)
yield target.Extend('arm-chip-tvserver', board=AndroidBoard.ARM, app=AndroidApp.CHIP_TVServer)
yield target.Extend('x86-chip-tvserver', board=AndroidBoard.X86, app=AndroidApp.CHIP_TVServer)
yield target.Extend('x64-chip-tvserver', board=AndroidBoard.X64, app=AndroidApp.CHIP_TVServer)
yield target.Extend('arm64-chip-tv-casting-app', board=AndroidBoard.ARM64, app=AndroidApp.CHIP_TV_CASTING_APP)
yield target.Extend('arm-chip-tv-casting-app', board=AndroidBoard.ARM, app=AndroidApp.CHIP_TV_CASTING_APP)
def MbedTargets():
target = Target('mbed', MbedBuilder)
targets = [
target.Extend('CY8CPROTO_062_4343W',
board=MbedBoard.CY8CPROTO_062_4343W),
]
app_targets = []
for target in targets:
app_targets.append(target.Extend('lock', app=MbedApp.LOCK))
app_targets.append(target.Extend('light', app=MbedApp.LIGHT))
app_targets.append(target.Extend(
'all-clusters', app=MbedApp.ALL_CLUSTERS))
app_targets.append(target.Extend('pigweed', app=MbedApp.PIGWEED))
app_targets.append(target.Extend('shell', app=MbedApp.SHELL))
for target in app_targets:
yield target.Extend('release', profile=MbedProfile.RELEASE)
yield target.Extend('develop', profile=MbedProfile.DEVELOP).GlobBlacklist(
'Compile only for debugging purpose - '
'https://os.mbed.com/docs/mbed-os/latest/program-setup/build-profiles-and-rules.html')
yield target.Extend('debug', profile=MbedProfile.DEBUG).GlobBlacklist(
'Compile only for debugging purpose - '
'https://os.mbed.com/docs/mbed-os/latest/program-setup/build-profiles-and-rules.html')
def InfineonTargets():
target = Target('infineon', InfineonBuilder)
yield target.Extend('p6-lock', board=InfineonBoard.P6BOARD, app=InfineonApp.LOCK)
yield target.Extend('p6-all-clusters', board=InfineonBoard.P6BOARD, app=InfineonApp.ALL_CLUSTERS)
yield target.Extend('p6-light', board=InfineonBoard.P6BOARD, app=InfineonApp.LIGHT)
def AmebaTargets():
ameba_target = Target('ameba', AmebaBuilder)
yield ameba_target.Extend('amebad-all-clusters', board=AmebaBoard.AMEBAD, app=AmebaApp.ALL_CLUSTERS)
yield ameba_target.Extend('amebad-light', board=AmebaBoard.AMEBAD, app=AmebaApp.LIGHT)
yield ameba_target.Extend('amebad-pigweed', board=AmebaBoard.AMEBAD, app=AmebaApp.PIGWEED)
def K32WTargets():
target = Target('k32w', K32WBuilder)
yield target.Extend('light-ota-se', app=K32WApp.LIGHT, release=True, disable_ble=True, se05x=True).GlobBlacklist("Only on demand build")
yield target.Extend('light-release-no-ota', app=K32WApp.LIGHT, tokenizer=True, disable_ota=True, release=True)
yield target.Extend('shell-release', app=K32WApp.SHELL, release=True)
yield target.Extend('lock-release', app=K32WApp.LOCK, release=True)
yield target.Extend('lock-low-power-release', app=K32WApp.LOCK,
low_power=True, release=True).GlobBlacklist("Only on demand build")
def cc13x2x7_26x2x7Targets():
target = Target('cc13x2x7_26x2x7', cc13x2x7_26x2x7Builder)
yield target.Extend('lock-ftd', app=cc13x2x7_26x2x7App.LOCK, openthread_ftd=True)
yield target.Extend('lock-mtd', app=cc13x2x7_26x2x7App.LOCK, openthread_ftd=False)
yield target.Extend('pump', app=cc13x2x7_26x2x7App.PUMP)
yield target.Extend('pump-controller', app=cc13x2x7_26x2x7App.PUMP_CONTROLLER)
yield target.Extend('all-clusters', app=cc13x2x7_26x2x7App.ALL_CLUSTERS)
yield target.Extend('shell', app=cc13x2x7_26x2x7App.SHELL)
def Cyw30739Targets():
yield Target('cyw30739-cyw930739m2evb_01-light', Cyw30739Builder,
board=Cyw30739Board.CYW930739M2EVB_01, app=Cyw30739App.LIGHT)
yield Target('cyw30739-cyw930739m2evb_01-lock', Cyw30739Builder,
board=Cyw30739Board.CYW930739M2EVB_01, app=Cyw30739App.LOCK)
yield Target('cyw30739-cyw930739m2evb_01-ota-requestor', Cyw30739Builder,
board=Cyw30739Board.CYW930739M2EVB_01, app=Cyw30739App.OTA_REQUESTOR).GlobBlacklist(
"Running out of XIP flash space")
yield Target('cyw30739-cyw930739m2evb_01-ota-requestor-no-progress-logging', Cyw30739Builder,
board=Cyw30739Board.CYW930739M2EVB_01, app=Cyw30739App.OTA_REQUESTOR, progress_logging=False)
def QorvoTargets():
target = Target('qpg', QpgBuilder)
yield target.Extend('lock', board=QpgBoard.QPG6105, app=QpgApp.LOCK)
yield target.Extend('light', board=QpgBoard.QPG6105, app=QpgApp.LIGHT)
yield target.Extend('shell', board=QpgBoard.QPG6105, app=QpgApp.SHELL)
yield target.Extend('persistent-storage', board=QpgBoard.QPG6105, app=QpgApp.PERSISTENT_STORAGE)
def TizenTargets():
# Possible build variants.
# NOTE: The number of potential builds is exponential here.
builder = VariantBuilder()
builder.AppendVariant(name="no-ble", enable_ble=False)
builder.AppendVariant(name="no-wifi", enable_wifi=False)
builder.AppendVariant(name="asan", use_asan=True)
target = Target('tizen-arm', TizenBuilder, board=TizenBoard.ARM)
builder.targets.append(target.Extend('light', app=TizenApp.LIGHT))
for target in builder.AllVariants():
yield target
def Bl602Targets():
target = Target('bl602', Bl602Builder)
yield target.Extend('light', board=Bl602Board.BL602BOARD, app=Bl602App.LIGHT)
def IMXTargets():
target = Target('imx', IMXBuilder)
yield target.Extend('chip-tool', app=IMXApp.CHIP_TOOL)
yield target.Extend('lighting-app', app=IMXApp.LIGHT)
yield target.Extend('thermostat', app=IMXApp.THERMOSTAT)
yield target.Extend('all-clusters-app', app=IMXApp.ALL_CLUSTERS)
yield target.Extend('ota-provider-app', app=IMXApp.OTA_PROVIDER)
yield target.Extend('chip-tool-release', app=IMXApp.CHIP_TOOL, release=True)
yield target.Extend('lighting-app-release', app=IMXApp.LIGHT, release=True)
yield target.Extend('thermostat-release', app=IMXApp.THERMOSTAT, release=True)
yield target.Extend('all-clusters-app-release', app=IMXApp.ALL_CLUSTERS, release=True)
yield target.Extend('ota-provider-app-release', app=IMXApp.OTA_PROVIDER, release=True)
ALL = []
target_generators = [
HostTargets(),
Esp32Targets(),
Efr32Targets(),
NrfTargets(),
AndroidTargets(),
MbedTargets(),
InfineonTargets(),
AmebaTargets(),
K32WTargets(),
cc13x2x7_26x2x7Targets(),
Cyw30739Targets(),
QorvoTargets(),
TizenTargets(),
Bl602Targets(),
IMXTargets(),
]
for generator in target_generators:
for target in generator:
ALL.append(target)
# Simple targets added one by one
ALL.append(Target('telink-tlsr9518adk80d-light', TelinkBuilder,
board=TelinkBoard.TLSR9518ADK80D, app=TelinkApp.LIGHT))
ALL.append(Target('telink-tlsr9518adk80d-light-switch', TelinkBuilder,
board=TelinkBoard.TLSR9518ADK80D, app=TelinkApp.SWITCH))
# have a consistent order overall
ALL.sort(key=lambda t: t.name)
| 42.510135
| 140
| 0.695581
| 2,934
| 25,166
| 5.871166
| 0.160873
| 0.072449
| 0.055265
| 0.025543
| 0.409613
| 0.303088
| 0.218913
| 0.133055
| 0.080518
| 0.054453
| 0
| 0.036247
| 0.197528
| 25,166
| 591
| 141
| 42.582064
| 0.816737
| 0.095645
| 0
| 0.162907
| 0
| 0.005013
| 0.129666
| 0.035727
| 0
| 0
| 0
| 0
| 0
| 1
| 0.080201
| false
| 0
| 0.047619
| 0.007519
| 0.172932
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c279f6e16ec9934410f291dea61230ff38bf396
| 4,608
|
py
|
Python
|
src/musegan/data.py
|
TRINITRONIC/musegan
|
0a62e0303a8ff357d7f385dcc6edba76afb132b2
|
[
"MIT"
] | null | null | null |
src/musegan/data.py
|
TRINITRONIC/musegan
|
0a62e0303a8ff357d7f385dcc6edba76afb132b2
|
[
"MIT"
] | null | null | null |
src/musegan/data.py
|
TRINITRONIC/musegan
|
0a62e0303a8ff357d7f385dcc6edba76afb132b2
|
[
"MIT"
] | null | null | null |
"""This file contains functions for loading and preprocessing pianoroll data.
"""
import logging
import numpy as np
import tensorflow.compat.v1 as tf
from musegan.config import SHUFFLE_BUFFER_SIZE, PREFETCH_SIZE
LOGGER = logging.getLogger(__name__)
# --- Data loader --------------------------------------------------------------
def load_data_from_npy(filename):
"""Load and return the training data from a npy file."""
return np.load(filename)
def load_data_from_npz(filename):
"""Load and return the training data from a npz file (sparse format)."""
with np.load(filename) as f:
data = np.zeros(f['shape'], np.bool_)
data[[x for x in f['nonzero']]] = True
return data
def load_data(data_source, data_filename):
"""Load and return the training data."""
if data_source == 'sa':
import SharedArray as sa
return sa.attach(data_filename)
if data_source == 'npy':
return load_data_from_npy(data_filename)
if data_source == 'npz':
return load_data_from_npz(data_filename)
raise ValueError("Expect `data_source` to be one of 'sa', 'npy', 'npz'. "
"But get " + str(data_source))
# --- Dataset Utilities -------------------------------------------------------
def random_transpose(pianoroll):
"""Randomly transpose a pianoroll with [-5, 6] semitones."""
semitone = np.random.randint(-5, 6)
if semitone > 0:
pianoroll[:, semitone:, 1:] = pianoroll[:, :-semitone, 1:]
pianoroll[:, :semitone, 1:] = 0
elif semitone < 0:
pianoroll[:, :semitone, 1:] = pianoroll[:, -semitone:, 1:]
pianoroll[:, semitone:, 1:] = 0
return pianoroll
def set_pianoroll_shape(pianoroll, data_shape):
"""Set the pianoroll shape and return the pianoroll."""
pianoroll.set_shape(data_shape)
return pianoroll
def set_label_shape(label):
"""Set the label shape and return the label."""
label.set_shape([1])
return label
# --- Sampler ------------------------------------------------------------------
def get_samples(n_samples, data, labels=None, use_random_transpose=False):
"""Return some random samples of the training data."""
indices = np.random.choice(len(data), n_samples, False)
if np.issubdtype(data.dtype, np.bool_):
sample_data = data[indices] * 2. - 1.
else:
sample_data = data[indices]
if use_random_transpose:
sample_data = np.array([random_transpose(x) for x in sample_data])
if labels is None:
return sample_data
return sample_data, labels[indices]
# --- Tensorflow Dataset -------------------------------------------------------
def _gen_data(data, labels=None):
"""Data Generator."""
if labels is None:
for item in data:
if np.issubdtype(data.dtype, np.bool_):
yield item * 2. - 1.
else:
yield item
else:
for i, item in enumerate(data):
if np.issubdtype(data.dtype, np.bool_):
yield (item * 2. - 1., labels[i])
else:
yield (item, labels[i])
def get_dataset(data, labels=None, batch_size=None, data_shape=None,
use_random_transpose=False, num_threads=1):
"""Create and return a tensorflow dataset from an array."""
if labels is None:
dataset = tf.data.Dataset.from_generator(
lambda: _gen_data(data), tf.float32)
if use_random_transpose:
dataset = dataset.map(
lambda pianoroll: tf.py_func(
random_transpose, [pianoroll], tf.float32),
num_parallel_calls=num_threads)
dataset = dataset.map(lambda pianoroll: set_pianoroll_shape(
pianoroll, data_shape), num_parallel_calls=num_threads)
else:
assert len(data) == len(labels), (
"Lengths of `data` and `lables` do not match.")
dataset = tf.data.Dataset.from_generator(
lambda: _gen_data(data, labels), [tf.float32, tf.int32])
if use_random_transpose:
dataset = dataset.map(
lambda pianoroll, label: (
tf.py_func(random_transpose, [pianoroll], tf.float32),
label),
num_parallel_calls=num_threads)
dataset = dataset.map(
lambda pianoroll, label: (set_pianoroll_shape(
pianoroll, data_shape), set_label_shape(label)),
num_parallel_calls=num_threads)
dataset = dataset.shuffle(SHUFFLE_BUFFER_SIZE).repeat().batch(batch_size)
return dataset.prefetch(PREFETCH_SIZE)
| 39.724138
| 80
| 0.59809
| 556
| 4,608
| 4.771583
| 0.223022
| 0.050886
| 0.040709
| 0.040709
| 0.401055
| 0.352808
| 0.335846
| 0.282699
| 0.232944
| 0.162835
| 0
| 0.009494
| 0.24566
| 4,608
| 115
| 81
| 40.069565
| 0.75374
| 0.175781
| 0
| 0.266667
| 0
| 0
| 0.033663
| 0
| 0
| 0
| 0
| 0
| 0.011111
| 1
| 0.1
| false
| 0
| 0.055556
| 0
| 0.277778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c2b65379c3bd0e388f419a0d07e73a9770aad35
| 48,787
|
py
|
Python
|
visnav/algo/orig/tools.py
|
oknuutti/hw_visnav
|
5254b8bdd146548413554c00e6e76264a2540e8b
|
[
"MIT"
] | null | null | null |
visnav/algo/orig/tools.py
|
oknuutti/hw_visnav
|
5254b8bdd146548413554c00e6e76264a2540e8b
|
[
"MIT"
] | null | null | null |
visnav/algo/orig/tools.py
|
oknuutti/hw_visnav
|
5254b8bdd146548413554c00e6e76264a2540e8b
|
[
"MIT"
] | null | null | null |
import math
import time
import numpy as np
import numba as nb
import quaternion # adds to numpy # noqa # pylint: disable=unused-import
import sys
import scipy
from astropy.coordinates import SkyCoord
from scipy.interpolate import RectBivariateSpline
from scipy.interpolate import NearestNDInterpolator
# from scipy.spatial.ckdtree import cKDTree
from visnav.settings import *
class PositioningException(Exception):
pass
class Stopwatch:
# from https://www.safaribooksonline.com/library/view/python-cookbook-3rd/9781449357337/ch13s13.html
def __init__(self, elapsed=0.0, func=time.perf_counter):
self._elapsed = elapsed
self._func = func
self._start = None
@property
def elapsed(self):
return self._elapsed + ((self._func() - self._start) if self.running else 0)
def start(self):
if self._start is not None:
raise RuntimeError('Already started')
self._start = self._func()
def stop(self):
if self._start is None:
raise RuntimeError('Not started')
end = self._func()
self._elapsed += end - self._start
self._start = None
def reset(self):
self._elapsed = 0.0
@property
def running(self):
return self._start is not None
def __enter__(self):
self.start()
return self
def __exit__(self, *args):
self.stop()
def sphere_angle_radius(loc, r):
return np.arcsin(r / np.linalg.norm(loc, axis=1))
def dist_across_and_along_vect(A, b):
""" A: array of vectors, b: axis vector """
lat, lon, r = cartesian2spherical(*b)
q = ypr_to_q(lat, lon, 0).conj()
R = quaternion.as_rotation_matrix(q)
Ab = R.dot(A.T).T
d = Ab[:, 0:1]
r = np.linalg.norm(Ab[:, 1:3], axis=1).reshape((-1, 1))
return r, d
def point_vector_dist(A, B, dist_along_v=False):
""" A: point, B: vector """
# (length of b)**2
normB2 = (B ** 2).sum(-1).reshape((-1, 1))
# a dot b vector product (project a on b but also times length of b)
diagAB = (A * B).sum(-1).reshape((-1, 1))
# A projected along B (projection = a dot b/||b|| * b/||b||)
A_B = (diagAB / normB2) * B
# vector from projected A to A, it is perpendicular to B
AB2A = A - A_B
# diff vector lengths
normD = np.sqrt((AB2A ** 2).sum(-1)).reshape((-1, 1))
return (normD, diagAB / np.sqrt(normB2)) if dist_along_v else normD
def sc_asteroid_max_shift_error(A, B):
"""
Calculate max error between two set of vertices when projected to camera,
A = estimated vertex positions
B = true vertex positions
Error is a vector perpendicular to B, i.e. A - A||
"""
# diff vector lengths
normD = point_vector_dist(A, B)
# max length of diff vectors
return np.max(normD)
@nb.njit(nb.f8[:](nb.f8[:], nb.f8[:]))
def cross3d(left, right):
# for short vectors cross product is faster in pure python than with numpy.cross
x = ((left[1] * right[2]) - (left[2] * right[1]))
y = ((left[2] * right[0]) - (left[0] * right[2]))
z = ((left[0] * right[1]) - (left[1] * right[0]))
return np.array((x, y, z))
def normalize_v(v):
norm = np.linalg.norm(v)
return v / norm if norm != 0 else v
@nb.njit(nb.types.f8[:](nb.types.f8[:]))
def normalize_v_f8(v):
norm = np.linalg.norm(v)
return v / norm if norm != 0 else v
def generate_field_fft(shape, sd=(0.33, 0.33, 0.34), len_sc=(0.5, 0.5 / 4, 0.5 / 16)):
from visnav.algo.image import ImageProc
sds = sd if getattr(sd, '__len__', False) else [sd]
len_scs = len_sc if getattr(len_sc, '__len__', False) else [len_sc]
assert len(shape) == 2, 'only 2d shapes are valid'
assert len(sds) == len(len_scs), 'len(sd) differs from len(len_sc)'
n = np.prod(shape)
kernel = np.sum(
np.stack([1 / len_sc * sd * n * ImageProc.gkern2d(shape, 1 / len_sc) for sd, len_sc in zip(sds, len_scs)],
axis=2), axis=2)
f_img = np.random.normal(0, 1, shape) + np.complex(0, 1) * np.random.normal(0, 1, shape)
f_img = np.real(np.fft.ifft2(np.fft.fftshift(kernel * f_img)))
return f_img
@nb.njit(nb.types.f8[:](nb.types.f8[:], nb.types.f8[:], nb.types.f8[:]))
def _surf_normal(x1, x2, x3):
# a, b, c = np.array(x1, dtype=np.float64), np.array(x2, dtype=np.float64), np.array(x3, dtype=np.float64)
return normalize_v_f8(cross3d(x2-x1, x3-x1))
def surf_normal(x1, x2, x3):
a, b, c = np.array(x1, dtype=np.float64), np.array(x2, dtype=np.float64), np.array(x3, dtype=np.float64)
return _surf_normal(a, b, c)
# return normalize_v_f8(cross3d(b-a, c-a))
def vector_projection(a, b):
return a.dot(b) / b.dot(b) * b
def vector_rejection(a, b):
return a - vector_projection(a, b)
def angle_between_v(v1, v2):
# Notice: only returns angles between 0 and 180 deg
try:
v1 = np.reshape(v1, (1, -1))
v2 = np.reshape(v2, (-1, 1))
n1 = v1 / np.linalg.norm(v1)
n2 = v2 / np.linalg.norm(v2)
cos_angle = n1.dot(n2)
except TypeError as e:
raise Exception('Bad vectors:\n\tv1: %s\n\tv2: %s' % (v1, v2)) from e
return math.acos(np.clip(cos_angle, -1, 1))
def angle_between_v_mx(a, B, normalize=True):
Bn = B / np.linalg.norm(B, axis=1).reshape((-1, 1)) if normalize else B
an = normalize_v(a).reshape((-1, 1)) if normalize else a
return np.arccos(np.clip(Bn.dot(an), -1.0, 1.0))
def angle_between_mx(A, B):
return angle_between_rows(A, B)
def angle_between_rows(A, B, normalize=True):
assert A.shape[1] == 3 and B.shape[1] == 3, 'matrices need to be of shape (n, 3) and (m, 3)'
if A.shape[0] == B.shape[0]:
# from https://stackoverflow.com/questions/50772176/calculate-the-angle-between-the-rows-of-two-matrices-in-numpy/50772253
cos_angles = np.einsum('ij,ij->i', A, B)
if normalize:
p2 = np.einsum('ij,ij->i', A, A)
p3 = np.einsum('ij,ij->i', B, B)
cos_angles /= np.sqrt(p2 * p3)
else:
if normalize:
A = A / np.linalg.norm(A, axis=1).reshape((-1, 1))
B = B / np.linalg.norm(B, axis=1).reshape((-1, 1))
cos_angles = B.dot(A.T)
return np.arccos(np.clip(cos_angles, -1.0, 1.0))
def rand_q(angle):
r = normalize_v(np.random.normal(size=3))
return angleaxis_to_q(np.hstack((angle, r)))
def angle_between_q(q1, q2):
# from https://chrischoy.github.io/research/measuring-rotation/
qd = q1.conj() * q2
return abs(wrap_rads(2 * math.acos(qd.normalized().w)))
def angle_between_q_arr(q1, q2):
qd = quaternion.as_float_array(q1.conj() * q2)
qd = qd / np.linalg.norm(qd, axis=1).reshape((-1, 1))
return np.abs(wrap_rads(2 * np.arccos(qd[:, 0])))
def angle_between_ypr(ypr1, ypr2):
q1 = ypr_to_q(*ypr1)
q2 = ypr_to_q(*ypr2)
return angle_between_q(q1, q2)
def distance_mx(A, B):
assert A.shape[1] == B.shape[1], 'matrices must have same amount of columns'
k = A.shape[1]
O = np.repeat(A.reshape((-1, 1, k)), B.shape[0], axis=1) - np.repeat(B.reshape((1, -1, k)), A.shape[0], axis=0)
D = np.linalg.norm(O, axis=2)
return D
def q_to_unitbase(q):
U0 = quaternion.as_quat_array([[0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1.]])
Uq = q * U0 * q.conj()
return quaternion.as_float_array(Uq)[:, 1:]
def equatorial_to_ecliptic(ra, dec):
""" translate from equatorial ra & dec to ecliptic ones """
sc = SkyCoord(ra, dec, unit='deg', frame='icrs', obstime='J2000') \
.transform_to('barycentrictrueecliptic')
return sc.lat.value, sc.lon.value
def q_to_angleaxis(q, compact=False):
theta = math.acos(np.clip(q.w, -1, 1)) * 2.0
v = normalize_v(np.array([q.x, q.y, q.z]))
if compact:
return theta * v
else:
return np.array((theta,) + tuple(v))
def angleaxis_to_q(rv):
""" first angle, then axis """
if len(rv) == 4:
theta = rv[0]
v = normalize_v(np.array(rv[1:]))
elif len(rv) == 3:
theta = math.sqrt(sum(x ** 2 for x in rv))
v = np.array(rv) / (1 if theta == 0 else theta)
else:
raise Exception('Invalid angle-axis vector: %s' % (rv,))
w = math.cos(theta / 2)
v = v * math.sin(theta / 2)
return np.quaternion(w, *v).normalized()
def ypr_to_q(lat, lon, roll):
# Tait-Bryan angles, aka yaw-pitch-roll, nautical angles, cardan angles
# intrinsic euler rotations z-y'-x'', pitch=-lat, yaw=lon
return (
np.quaternion(math.cos(lon / 2), 0, 0, math.sin(lon / 2))
* np.quaternion(math.cos(-lat / 2), 0, math.sin(-lat / 2), 0)
* np.quaternion(math.cos(roll / 2), math.sin(roll / 2), 0, 0)
)
def eul_to_q(angles, order='xyz', reverse=False):
assert len(angles) == len(order), 'len(angles) != len(order)'
q = quaternion.one
idx = {'x': 0, 'y': 1, 'z': 2}
for angle, axis in zip(angles, order):
w = math.cos(angle / 2)
v = [0, 0, 0]
v[idx[axis]] = math.sin(angle / 2)
dq = np.quaternion(w, *v)
q = (dq * q) if reverse else (q * dq)
return q
def q_to_ypr(q):
# from https://math.stackexchange.com/questions/687964/getting-euler-tait-bryan-angles-from-quaternion-representation
q0, q1, q2, q3 = quaternion.as_float_array(q)
roll = np.arctan2(q2 * q3 + q0 * q1, .5 - q1 ** 2 - q2 ** 2)
lat = -np.arcsin(np.clip(-2 * (q1 * q3 - q0 * q2), -1, 1))
lon = np.arctan2(q1 * q2 + q0 * q3, .5 - q2 ** 2 - q3 ** 2)
return lat, lon, roll
def mean_q(qs, ws=None):
"""
returns a (weighted) mean of a set of quaternions
idea is to rotate a bit in the direction of new quaternion from the sum of previous rotations
NOTE: not tested properly, might not return same mean quaternion if order of input changed
"""
wtot = 0
qtot = quaternion.one
for q, w in zip(qs, np.ones((len(qs),)) if ws is None else ws):
ddaa = q_to_angleaxis(qtot.conj() * q)
ddaa[0] = wrap_rads(ddaa[0]) * w / (w + wtot)
qtot = angleaxis_to_q(ddaa) * qtot
wtot += w
return qtot
def q_times_v(q, v):
qv = np.quaternion(0, *v)
qv2 = q * qv * q.conj()
return np.array([qv2.x, qv2.y, qv2.z])
def q_times_mx(q, mx):
qqmx = q * mx2qmx(mx) * q.conj()
aqqmx = quaternion.as_float_array(qqmx)
return aqqmx[:, 1:]
def mx2qmx(mx):
qmx = np.zeros((mx.shape[0], 4))
qmx[:, 1:] = mx
return quaternion.as_quat_array(qmx)
def wrap_rads(a):
return (a + math.pi) % (2 * math.pi) - math.pi
def wrap_degs(a):
return (a + 180) % 360 - 180
def eccentric_anomaly(eccentricity, mean_anomaly, tol=1e-6):
# from http://www.jgiesen.de/kepler/kepler.html
E = mean_anomaly if eccentricity < 0.8 else math.pi
F = E - eccentricity * math.sin(mean_anomaly) - mean_anomaly;
for i in range(30):
if abs(F) < tol:
break
E = E - F / (1.0 - eccentricity * math.cos(E))
F = E - eccentricity * math.sin(E) - mean_anomaly
return round(E / tol) * tol
def solar_elongation(ast_v, sc_q):
sco_x, sco_y, sco_z = q_to_unitbase(sc_q)
if USE_ICRS:
sc = SkyCoord(x=ast_v[0], y=ast_v[1], z=ast_v[2], frame='icrs',
unit='m', representation_type='cartesian', obstime='J2000') \
.transform_to('hcrs') \
.represent_as('cartesian')
ast_v = np.array([sc.x.value, sc.y.value, sc.z.value])
# angle between camera axis and the sun, 0: right ahead, pi: behind
elong = angle_between_v(-ast_v, sco_x)
# direction the sun is at when looking along camera axis
nvec = np.cross(sco_x, ast_v)
direc = angle_between_v(nvec, sco_z)
# decide if direction needs to be negative or not
if np.cross(nvec, sco_z).dot(sco_x) < 0:
direc = -direc
return elong, direc
def find_nearest_lesser(array, value):
I = np.where(array < value)
idx = (np.abs(array - value)).argmin()
return array[I[idx]], I[idx]
def find_nearest_greater(array, value):
I = np.where(array > value)
idx = (np.abs(array - value)).argmin()
return array[I[idx]], I[idx]
def find_nearest(array, value):
idx = (np.abs(array - value)).argmin()
return array[idx], idx
def find_nearest_arr(array, value, ord=None, fun=None):
diff = array - value
idx = np.linalg.norm(diff if fun is None else list(map(fun, diff)), ord=ord, axis=1).argmin()
return array[idx], idx
def find_nearest_n(array, value, r, ord=None, fun=None):
diff = array - value
d = np.linalg.norm(diff if fun is None else list(map(fun, diff)), ord=ord, axis=1)
idxs = np.where(d < r)
return idxs[0]
def find_nearest_each(haystack, needles, ord=None):
assert len(haystack.shape) == 2 and len(needles.shape) == 2 and haystack.shape[1] == needles.shape[1], \
'wrong shapes for haystack and needles, %s and %s, respectively' % (haystack.shape, needles.shape)
c = haystack.shape[1]
diff_mx = np.repeat(needles.reshape((-1, 1, c)), haystack.shape[0], axis=1) - np.repeat(
haystack.reshape((1, -1, c)), needles.shape[0], axis=0)
norm_mx = np.linalg.norm(diff_mx, axis=2, ord=ord)
idxs = norm_mx.argmin(axis=1)
return haystack[idxs], idxs
def cartesian2spherical(x, y, z):
r = math.sqrt(x ** 2 + y ** 2 + z ** 2)
theta = math.acos(z / r)
phi = math.atan2(y, x)
lat = math.pi / 2 - theta
lon = phi
return np.array([lat, lon, r])
def spherical2cartesian(lat, lon, r):
theta = math.pi / 2 - lat
phi = lon
x = r * math.sin(theta) * math.cos(phi)
y = r * math.sin(theta) * math.sin(phi)
z = r * math.cos(theta)
return np.array([x, y, z])
def spherical2cartesian_arr(A, r=None):
theta = math.pi / 2 - A[:, 0]
phi = A[:, 1]
r = (r or A[:, 2])
x = r * np.sin(theta)
y = x * np.sin(phi)
x *= np.cos(phi)
# x = r * np.sin(theta) * np.cos(phi)
# y = r * np.sin(theta) * np.sin(phi)
z = r * np.cos(theta)
return np.vstack([x, y, z]).T
def discretize_v(v, tol=None, lat_range=(-math.pi / 2, math.pi / 2), points=None):
"""
simulate feature database by giving closest light direction with given tolerance
"""
if tol is not None and points is not None or tol is None and points is None:
assert False, 'Give either tol or points'
elif tol is not None:
points = bf2_lat_lon(tol, lat_range=lat_range)
lat, lon, r = cartesian2spherical(*v)
(nlat, nlon), idx = find_nearest_arr(
points,
np.array((lat, lon)),
ord=2,
fun=wrap_rads,
)
ret = spherical2cartesian(nlat, nlon, r)
return ret, idx
def discretize_q(q, tol=None, lat_range=(-math.pi / 2, math.pi / 2), points=None):
"""
simulate feature database by giving closest lat & roll with given tolerance
and set lon to zero as feature detectors are rotation invariant (in opengl coords)
"""
if tol is not None and points is not None or tol is None and points is None:
assert False, 'Give either tol or points'
elif tol is not None:
points = bf2_lat_lon(tol, lat_range=lat_range)
lat, lon, roll = q_to_ypr(q)
(nlat, nroll), idx = find_nearest_arr(
points,
np.array((lat, roll)),
ord=2,
fun=wrap_rads,
)
nq0 = ypr_to_q(nlat, 0, nroll)
return nq0, idx
def bf_lat_lon(tol, lat_range=(-math.pi / 2, math.pi / 2)):
# tol**2 == (step/2)**2 + (step/2)**2 -- 7deg is quite nice in terms of len(lon)*len(lat) == 1260
step = math.sqrt(2) * tol
lat_steps = np.linspace(*lat_range, num=math.ceil((lat_range[1] - lat_range[0]) / step), endpoint=False)[1:]
lon_steps = np.linspace(-math.pi, math.pi, num=math.ceil(2 * math.pi / step), endpoint=False)
return lat_steps, lon_steps
def bf2_lat_lon(tol, lat_range=(-math.pi / 2, math.pi / 2)):
# tol**2 == (step/2)**2 + (step/2)**2 -- 7deg is quite nice in terms of len(lon)*len(lat) == 1260
step = math.sqrt(2) * tol
lat_steps = np.linspace(*lat_range, num=math.ceil((lat_range[1] - lat_range[0]) / step), endpoint=False)[1:]
# similar to https://www.cmu.edu/biolphys/deserno/pdf/sphere_equi.pdf
points = []
for lat in lat_steps:
Mphi = math.ceil(2 * math.pi * math.cos(lat) / step)
lon_steps = np.linspace(-math.pi, math.pi, num=Mphi, endpoint=False)
points.extend(zip([lat] * len(lon_steps), lon_steps))
return points
def robust_mean(arr, discard_percentile=0.2, ret_n=False, axis=None):
J = np.logical_not(np.isnan(arr))
if axis is not None:
J = np.all(J, axis=1 if axis == 0 else 0)
if axis == 0:
arr = arr[J, :]
elif axis == 1:
arr = arr[:, J]
else:
arr = arr[J]
low = np.percentile(arr, discard_percentile, axis=axis)
high = np.percentile(arr, 100 - discard_percentile, axis=axis)
I = np.logical_and(low < arr, arr < high)
if axis is not None:
I = np.all(I, axis=1 if axis == 0 else 0)
m = np.mean(arr[:, I] if axis == 1 else arr[I], axis=axis)
return (m, np.sum(I, axis=axis)) if ret_n else m
def robust_std(arr, discard_percentile=0.2, mean=None, axis=None):
corr = 1
if mean is None:
mean, n = robust_mean(arr, discard_percentile=discard_percentile, ret_n=True, axis=axis)
corr = n / (n - 1)
return np.sqrt(robust_mean((arr - mean) ** 2, discard_percentile=discard_percentile, axis=axis) * corr)
def mv_normal(mean, cov=None, L=None, size=None):
if size is None:
final_shape = []
elif isinstance(size, (int, np.integer)):
final_shape = [size]
else:
final_shape = size
final_shape = list(final_shape[:])
final_shape.append(mean.shape[0])
if L is None and cov is None \
or L is not None and cov is not None:
raise ValueError("you must provide either cov or L (cholesky decomp result)")
if len(mean.shape) != 1:
raise ValueError("mean must be 1 dimensional")
if L is not None:
if (len(L.shape) != 2) or (L.shape[0] != L.shape[1]):
raise ValueError("L must be 2 dimensional and square")
if mean.shape[0] != L.shape[0]:
raise ValueError("mean and L must have same length")
if cov is not None:
if (len(cov.shape) != 2) or (cov.shape[0] != cov.shape[1]):
raise ValueError("cov must be 2 dimensional and square")
if mean.shape[0] != cov.shape[0]:
raise ValueError("mean and cov must have same length")
L = np.linalg.cholesky(cov)
from numpy.random import standard_normal
z = standard_normal(final_shape).reshape(mean.shape[0], -1)
x = L.dot(z).T
x += mean
x.shape = tuple(final_shape)
return x, L
def point_cloud_vs_model_err(points: np.ndarray, model) -> np.ndarray:
faces = np.array([f[0] for f in model.faces], dtype='uint')
vertices = np.array(model.vertices)
errs = get_model_errors(points, vertices, faces)
return errs
# @nb.njit(nb.f8[:](nb.f8[:, :], nb.f8[:, :]), nogil=True)
@nb.njit(nb.f8(nb.f8[:, :], nb.f8[:, :]), nogil=True, cache=True)
def poly_line_intersect(poly, line):
# extend_line = True
eps = 1e-6
none = np.inf # np.zeros(1)
v0v1 = poly[1, :] - poly[0, :]
v0v2 = poly[2, :] - poly[0, :]
dir = line[1, :] - line[0, :]
line_len = math.sqrt(np.sum(dir ** 2))
if line_len < eps:
return none
dir = dir / line_len
pvec = cross3d(dir, v0v2).ravel()
det = np.dot(v0v1, pvec)
if abs(det) < eps:
return none
# backface culling
if False and det < 0:
return none
# frontface culling
if False and det > 0:
return none
inv_det = 1.0 / det
tvec = line[0, :] - poly[0, :]
u = tvec.dot(pvec) * inv_det
if u + eps < 0 or u - eps > 1:
return none
qvec = cross3d(tvec, v0v1).ravel()
v = dir.dot(qvec) * inv_det
if v + eps < 0 or u + v - eps > 1:
return none
t = v0v2.dot(qvec) * inv_det
if True:
# return error directly
return t - line_len
else:
# return actual 3d intersect point
if not extend_line and t - eps > line_len:
return none
return line[0, :] + t * dir
# INVESTIGATE: parallel = True does not speed up at all (or marginally) for some reason even though all cores are in use
@nb.njit(nb.f8(nb.u4[:, :], nb.f8[:, :], nb.f8[:, :]), nogil=True, parallel=False, cache=True)
def intersections(faces, vertices, line):
# pts = np.zeros((10, 3))
# i = 0
min_err = np.ones(faces.shape[0]) * np.inf
for k in nb.prange(1, faces.shape[0]):
err = poly_line_intersect(vertices[faces[k, :], :], line)
min_err[k] = err
# if abs(err) < min_err:
# min_err = err
# if len(pt) == 3:
# pts[i, :] = pt
# i += 1
# if i >= pts.shape[0]:
# print('too many intersects')
# i -= 1
i = np.argmin(np.abs(min_err))
return min_err[i] # pts[0:i, :]
# @nb.jit(nb.f8[:](nb.f8[:, :], nb.f8[:, :], nb.i4[:, :]), nogil=True, parallel=False)
def get_model_errors(points, vertices, faces):
count = len(points)
show_progress(count // 10, 0)
j = 0
devs = np.empty(points.shape[0])
for i in nb.prange(count):
vx = points[i, :]
err = intersections(faces, vertices, np.array(((0, 0, 0), vx)))
if math.isinf(err): # len(pts) == 0:
print('no intersections!')
continue
if False:
idx = np.argmin([np.linalg.norm(pt - vx) for pt in pts])
err = np.linalg.norm(pts[idx]) - np.linalg.norm(vx)
devs[i] = err
if j < i // 10:
show_progress(count // 10, i // 10)
j = i // 10
return devs
def crop_model(model, cam_v, cam_q, x_fov, y_fov):
assert False, 'not implemented'
def augment_model(model, multiplier=3, length_scales=(0, 0.1, 1), sds=(1e-5, 1.6e-4, 2.4e-4)):
assert multiplier > 1 and multiplier % 1 == 0, 'multiplier must be integer and >1'
from scipy.interpolate import LinearNDInterpolator
try:
from sklearn.gaussian_process.kernels import Matern, WhiteKernel
except:
print('Requires scikit-learn, install using "conda install scikit-learn"')
sys.exit()
points = np.array(model.vertices)
max_rng = np.max(np.ptp(points, axis=0))
# white noise to ensure positive definite covariance matrix
ls = dict(zip(length_scales, sds))
sd0 = ls.pop(0, 1e-5)
kernel = WhiteKernel(noise_level=sd0 * max_rng)
for l, s in ls.items():
kernel += s ** 2 * Matern(length_scale=l * max_rng, nu=1.5)
assert False, 'not implemented'
# TODO: how is the covariance mx constructed again?
y_cov = kernel(points)
# TODO: sample gp ??? how to tie existing points and generate the new points in between?
aug_points, L = mv_normal(points, cov=y_cov)
# TODO: how to interpolate faces?
pass
# interpolate texture
# TODO: augment texture
interp = LinearNDInterpolator(points, model.texcoords)
aug_texcoords = interp(aug_points)
data = model.as_dict()
data['faces'] = aug_faces
data['vertices'] = aug_points
data['texcoords'] = aug_texcoords
from visnav.iotools import objloader
aug_model = objloader.ShapeModel(data=data)
aug_model.recalc_norms()
return aug_model, L
def apply_noise(model, support=(None, None), L=(None, None), len_sc=SHAPE_MODEL_NOISE_LEN_SC,
noise_lv=SHAPE_MODEL_NOISE_LV['lo'], only_z=False,
tx_noise=0, tx_noise_len_sc=SHAPE_MODEL_NOISE_LEN_SC, tx_hf_noise=True):
Sv, St = support
Lv, Lt = L
inplace = noise_lv == 0 and model.texfile is None
if noise_lv > 0:
noisy_points, avg_dev, Lv = points_with_noise(points=model.vertices, support=Sv, L=Lv,
noise_lv=noise_lv, len_sc=len_sc, only_z=only_z)
else:
noisy_points, avg_dev, Lv = model.vertices, 0, None
tex = model.tex
if tx_noise > 0:
if inplace:
model.tex = np.ones(model.tex.shape)
Lt = Lv if Lt is None and tx_noise == noise_lv and tx_noise_len_sc == len_sc else Lt
tex, tx_avg_dev, Lt = texture_noise(model, support=St, L=Lt, noise_sd=tx_noise,
len_sc=tx_noise_len_sc, hf_noise=tx_hf_noise)
if inplace:
model.tex = tex
noisy_model = model
else:
data = model.as_dict()
data['vertices'] = noisy_points
if tx_noise > 0:
data['tex'] = tex
data['texfile'] = None
from visnav.iotools import objloader
noisy_model = objloader.ShapeModel(data=data)
if noise_lv > 0:
noisy_model.recalc_norms()
else:
noisy_model.normals = model.normals
return noisy_model, avg_dev, (Lv, Lt)
def texture_noise(model, support=None, L=None, noise_sd=SHAPE_MODEL_NOISE_LV['lo'],
len_sc=SHAPE_MODEL_NOISE_LEN_SC, max_rng=None, max_n=1e4, hf_noise=True):
tex = model.load_texture()
if tex is None:
print('tools.texture_noise: no texture loaded')
return [None] * 3
r = np.sqrt(max_n / np.prod(tex.shape[:2]))
ny, nx = (np.array(tex.shape[:2]) * r).astype(np.int)
n = nx * ny
tx_grid_xx, tx_grid_yy = np.meshgrid(np.linspace(0, 1, nx), np.linspace(0, 1, ny))
tx_grid = np.hstack((tx_grid_xx.reshape((-1, 1)), tx_grid_yy.reshape((-1, 1))))
support = support if support else model
points = np.array(support.vertices)
max_rng = np.max(np.ptp(points, axis=0)) if max_rng is None else max_rng
# use vertices for distances, find corresponding vertex for each pixel
y_cov = None
if L is None:
try:
from sklearn.gaussian_process.kernels import Matern, WhiteKernel
except:
print('Requires scikit-learn, install using "conda install scikit-learn"')
sys.exit()
kernel = 1.0 * noise_sd * Matern(length_scale=len_sc * max_rng, nu=1.5) \
+ 0.5 * noise_sd * Matern(length_scale=0.1 * len_sc * max_rng, nu=1.5) \
+ WhiteKernel(
noise_level=1e-5 * noise_sd * max_rng) # white noise for positive definite covariance matrix only
# texture coordinates given so that x points left and *Y POINTS UP*
tex_img_coords = np.array(support.texcoords)
tex_img_coords[:, 1] = 1 - tex_img_coords[:, 1]
_, idxs = find_nearest_each(haystack=tex_img_coords, needles=tx_grid)
tx2vx = support.texture_to_vertex_map()
y_cov = kernel(points[tx2vx[idxs], :] - np.mean(points, axis=0))
if 0:
# for debugging distances
import matplotlib.pyplot as plt
import cv2
from visnav.algo.image import ImageProc
orig_tx = cv2.imread(os.path.join(DATA_DIR, '67p+tex.png'), cv2.IMREAD_GRAYSCALE)
gx, gy = np.gradient(points[tx2vx[idxs], :].reshape((ny, nx, 3)), axis=(1, 0))
gxy = np.linalg.norm(gx, axis=2) + np.linalg.norm(gy, axis=2)
gxy = (gxy - np.min(gxy)) / (np.max(gxy) - np.min(gxy))
grad_img = cv2.resize((gxy * 255).astype('uint8'), orig_tx.shape)
overlaid = ImageProc.merge((orig_tx, grad_img))
plt.figure(1)
plt.imshow(overlaid)
plt.show()
# sample gp
e0, L = mv_normal(np.zeros(n), cov=y_cov, L=L)
e0 = e0.reshape((ny, nx))
# interpolate for final texture
x = np.linspace(np.min(tx_grid_xx), np.max(tx_grid_xx), tex.shape[1])
y = np.linspace(np.min(tx_grid_yy), np.max(tx_grid_yy), tex.shape[0])
interp0 = RectBivariateSpline(tx_grid_xx[0, :], tx_grid_yy[:, 0], e0, kx=1, ky=1)
err0 = interp0(x, y)
if 0:
import matplotlib.pyplot as plt
import cv2
from visnav.algo.image import ImageProc
orig_tx = cv2.imread(os.path.join(DATA_DIR, '67p+tex.png'), cv2.IMREAD_GRAYSCALE)
err_ = err0 if 1 else e0
eimg = (err_ - np.min(err_)) / (np.max(err_) - np.min(err_))
eimg = cv2.resize((eimg * 255).astype('uint8'), orig_tx.shape)
overlaid = ImageProc.merge((orig_tx, eimg))
plt.figure(1)
plt.imshow(overlaid)
plt.show()
err1 = 0
if hf_noise:
e1, L = mv_normal(np.zeros(n), L=L)
e1 = e1.reshape((ny, nx))
interp1 = RectBivariateSpline(tx_grid_xx[0, :], tx_grid_yy[:, 0], e1, kx=1, ky=1)
err_coef = interp1(x, y)
lo, hi = np.min(err_coef), np.max(err_coef)
err_coef = (err_coef - lo) / (hi - lo)
len_sc = 10
err1 = generate_field_fft(tex.shape, (6 * noise_sd, 4 * noise_sd),
(len_sc / 1000, len_sc / 4500)) if hf_noise else 0
err1 *= err_coef
noisy_tex = tex + err0 + err1
noisy_tex /= np.max(noisy_tex)
if 0:
import matplotlib.pyplot as plt
plt.figure(1)
plt.imshow(noisy_tex)
plt.figure(2)
plt.imshow(err0)
plt.figure(3)
plt.imshow(err1)
plt.show()
return noisy_tex, np.std(err0 + err1), L
class NearestKernelNDInterpolator(NearestNDInterpolator):
def __init__(self, *args, k_nearest=None, kernel='gaussian', kernel_sc=None,
kernel_eps=1e-12, query_eps=0.05, max_distance=None, **kwargs):
"""
Parameters
----------
kernel : one of the following functions of distance that give weight to neighbours:
'linear': (kernel_sc/(r + kernel_eps))
'quadratic': (kernel_sc/(r + kernel_eps))**2
'cubic': (kernel_sc/(r + kernel_eps))**3
'gaussian': exp(-(r/kernel_sc)**2)
k_nearest : if given, uses k_nearest neighbours for interpolation regardless of their distances
"""
choices = ('linear', 'quadratic', 'cubic', 'gaussian')
assert kernel in choices, 'kernel must be one of %s' % (choices,)
self._tree_options = kwargs.get('tree_options', {})
super(NearestKernelNDInterpolator, self).__init__(*args, **kwargs)
if max_distance is None:
if kernel_sc is None:
d, _ = self.tree.query(self.points, k=k_nearest)
kernel_sc = np.mean(d) * k_nearest / (k_nearest - 1)
max_distance = kernel_sc * 3
assert kernel_sc is not None, 'kernel_sc need to be set'
self.kernel = kernel
self.kernel_sc = kernel_sc
self.kernel_eps = kernel_eps
self.k_nearest = k_nearest
self.max_distance = max_distance
self.query_eps = query_eps
def _linear(self, r):
if scipy.sparse.issparse(r):
return self.kernel_sc / (r + self.kernel_eps)
else:
return self.kernel_sc / (r + self.kernel_eps)
def _quadratic(self, r):
if scipy.sparse.issparse(r):
return np.power(self.kernel_sc / (r.data + self.kernel_eps), 2, out=r.data)
else:
return (self.kernel_sc / (r + self.kernel_eps)) ** 2
def _cubic(self, r):
if scipy.sparse.issparse(r):
return self.kernel_sc / (r + self.kernel_eps).power(3)
else:
return (self.kernel_sc / (r + self.kernel_eps)) ** 3
def _gaussian(self, r):
if scipy.sparse.issparse(r):
return np.exp((-r.data / self.kernel_sc) ** 2, out=r.data)
else:
return np.exp(-(r / self.kernel_sc) ** 2)
def __call__(self, *args):
"""
Evaluate interpolator at given points.
Parameters
----------
xi : ndarray of float, shape (..., ndim)
Points where to interpolate data at.
"""
from scipy.interpolate.interpnd import _ndim_coords_from_arrays
xi = _ndim_coords_from_arrays(args, ndim=self.points.shape[1])
xi = self._check_call_shape(xi)
xi = self._scale_x(xi)
r, idxs = self.tree.query(xi, self.k_nearest, eps=self.query_eps,
distance_upper_bound=self.max_distance or np.inf)
w = getattr(self, '_' + self.kernel)(r).reshape((-1, self.k_nearest, 1)) + self.kernel_eps
w /= np.sum(w, axis=1).reshape((-1, 1, 1))
yt = np.vstack((self.values, [0])) # if idxs[i, j] == len(values), then i:th point doesnt have j:th match
yi = np.sum(yt[idxs, :] * w, axis=1)
return yi
def points_with_noise(points, support=None, L=None, noise_lv=SHAPE_MODEL_NOISE_LV['lo'],
len_sc=SHAPE_MODEL_NOISE_LEN_SC, max_rng=None, only_z=False):
try:
from sklearn.gaussian_process.kernels import Matern, WhiteKernel
except:
print('Requires scikit-learn, install using "conda install scikit-learn"')
sys.exit()
if support is None:
support = points # [random.sample(list(range(len(points))), min(3000,len(points)))]
n = len(support)
mean = np.mean(points, axis=0)
max_rng = np.max(np.ptp(points, axis=0)) if max_rng is None else max_rng
y_cov = None
if L is None:
kernel = 0.6 * noise_lv * Matern(length_scale=len_sc * max_rng, nu=1.5) \
+ 0.4 * noise_lv * Matern(length_scale=0.1 * len_sc * max_rng, nu=1.5) \
+ WhiteKernel(
noise_level=1e-5 * noise_lv * max_rng) # white noise for positive definite covariance matrix only
y_cov = kernel(support - mean)
# sample gp
e0, L = mv_normal(np.zeros(n), cov=y_cov, L=L)
err = np.exp(e0.astype(points.dtype)).reshape((-1, 1))
if len(err) == len(points):
full_err = err
if DEBUG:
print('using orig gp sampled err')
else:
# interpolate
sc = 0.05 * len_sc * max_rng
interp = NearestKernelNDInterpolator(support - mean, err, k_nearest=12, kernel='gaussian',
kernel_sc=sc, max_distance=sc * 6)
full_err = interp(points - mean).astype(points.dtype)
# maybe extrapolate
nanidx = tuple(np.isnan(full_err).flat)
if np.any(nanidx):
assert False, 'shouldnt happen'
# if DEBUG or not BATCH_MODE:
# print('%sx nans'%np.sum(nanidx))
# naninterp = NearestNDInterpolator(support, err)
# try:
# full_err[nanidx,] = naninterp(points[nanidx, :]).astype(points.dtype)
# except IndexError as e:
# raise IndexError('%s,%s,%s'%(err.shape, full_err.shape, points.shape)) from e
# extra high frequency noise
# white_noise = 1 if True else np.exp(np.random.normal(scale=0.2*noise_lv*max_rng, size=(len(full_err),1)))
if only_z:
add_err_z = (max_rng / 2) * (full_err - 1)
add_err = np.concatenate((np.zeros((len(full_err), 2)), add_err_z), axis=1)
noisy_points = points + add_err
devs = np.abs(noisy_points[:, 2] - points[:, 2]) / (max_rng / 2)
assert np.isclose(devs.flatten(), np.abs(full_err - 1).flatten()).all(), 'something wrong'
else:
# noisy_points = (points-mean)*full_err*white_noise +mean
# r = np.sqrt(np.sum((points - mean)**2, axis=-1)).reshape(-1, 1)
# noisy_points = (points - mean) * (1 + np.log(full_err)/r) + mean
noisy_points = (points - mean) * full_err + mean
devs = np.sqrt(np.sum((noisy_points - points) ** 2, axis=-1) / np.sum((points - mean) ** 2, axis=-1))
if DEBUG or not BATCH_MODE:
print('noise (lv=%.3f): %.3f, %.3f; avg=%.3f' % (
(noise_lv,) + tuple(np.percentile(devs, (68, 95))) + (np.mean(devs),)))
if False:
import matplotlib.pyplot as plt
plt.figure(1, figsize=(8, 8))
# plt.plot(np.concatenate((points[:,0], err0[:,0], err[:,0], points[:,0]*err[:,0])))
plt.subplot(2, 2, 1)
plt.plot(points[:, 0])
plt.title('original', fontsize=12)
plt.subplot(2, 2, 2)
plt.plot(err0[:, 0])
plt.title('norm-err', fontsize=12)
plt.subplot(2, 2, 3)
plt.plot(err[:, 0])
plt.title('exp-err', fontsize=12)
plt.subplot(2, 2, 4)
plt.plot(noisy_points[:, 0])
plt.title('noisy', fontsize=12)
plt.tight_layout()
plt.show()
assert False, 'exiting'
return noisy_points, np.mean(devs), L
def foreground_idxs(array, max_val=None):
iy, ix = np.where(array < max_val)
idxs = np.concatenate(((iy,), (ix,)), axis=0).T
return idxs
def interp2(array, x, y, max_val=None, max_dist=30, idxs=None, discard_bg=False):
assert y < array.shape[0] and x < array.shape[1], 'out of bounds %s: %s' % (array.shape, (y, x))
v = array[int(y):int(y) + 2, int(x):int(x) + 2]
xf = x - int(x)
yf = y - int(y)
w = np.array((
((1 - yf) * (1 - xf), (1 - yf) * xf),
(yf * (1 - xf), yf * xf),
))
# ignore background depths
if max_val is not None:
idx = v.reshape(1, -1) < max_val * 0.999
else:
idx = ~np.isnan(v.reshape(1, -1))
w_sum = np.sum(w.reshape(1, -1)[idx])
if w_sum > 0:
# ignore background values
val = np.sum(w.reshape(1, -1)[idx] * v.reshape(1, -1)[idx]) / w_sum
elif discard_bg:
return float('nan')
else:
# no foreground values in 2x2 matrix, find nearest foreground value
if idxs is None:
idxs = foreground_idxs(array, max_val)
fallback = len(idxs) == 0
if not fallback:
dist = np.linalg.norm(idxs - np.array((y, x)), axis=1)
i = np.argmin(dist)
val = array[idxs[i, 0], idxs[i, 1]]
# print('\n%s, %s, %s, %s, %s, %s, %s'%(v, x,y,dist[i],idxs[i,1],idxs[i,0],val))
fallback = dist[i] > max_dist
if fallback:
val = np.sum(w * v) / np.sum(w)
return val
def solve_rotation(src_q, dst_q):
""" q*src_q*q.conj() == dst_q, solve for q """
# based on http://web.cs.iastate.edu/~cs577/handouts/quaternion.pdf
# and https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation#Pairs_of_unit_quaternions_as_rotations_in_4D_space
# NOTE: not certain if works..
M = np.zeros((4, 4))
for i in range(len(src_q)):
si = src_q[i]
Pi = np.array((
(si.w, -si.x, -si.y, -si.z),
(si.x, si.w, si.z, -si.y),
(si.y, -si.z, si.w, si.x),
(si.z, si.y, -si.x, si.w),
))
qi = dst_q[i]
Qi = np.array((
(qi.w, -qi.x, -qi.y, -qi.z),
(qi.x, qi.w, -qi.z, qi.y),
(qi.y, qi.z, qi.w, -qi.x),
(qi.z, -qi.y, qi.x, qi.w),
))
M += Pi.T * Qi
w, v = np.linalg.eig(M)
i = np.argmax(w)
res_q = np.quaternion(*v[:, i])
# alt = v.dot(w)
# print('%s,%s'%(res_q, alt))
# res_q = np.quaternion(*alt).normalized()
return res_q
def solve_q_bf(src_q, dst_q):
qs = []
d = []
for res_q in (
np.quaternion(0, 0, 0, 1).normalized(),
np.quaternion(0, 0, 1, 0).normalized(),
np.quaternion(0, 0, 1, 1).normalized(),
np.quaternion(0, 0, -1, 1).normalized(),
np.quaternion(0, 1, 0, 0).normalized(),
np.quaternion(0, 1, 0, 1).normalized(),
np.quaternion(0, 1, 0, -1).normalized(),
np.quaternion(0, 1, 1, 0).normalized(),
np.quaternion(0, 1, -1, 0).normalized(),
np.quaternion(0, 1, 1, 1).normalized(),
np.quaternion(0, 1, 1, -1).normalized(),
np.quaternion(0, 1, -1, 1).normalized(),
np.quaternion(0, 1, -1, -1).normalized(),
np.quaternion(1, 0, 0, 1).normalized(),
np.quaternion(1, 0, 0, -1).normalized(),
np.quaternion(1, 0, 1, 0).normalized(),
np.quaternion(1, 0, -1, 0).normalized(),
np.quaternion(1, 0, 1, 1).normalized(),
np.quaternion(1, 0, 1, -1).normalized(),
np.quaternion(1, 0, -1, 1).normalized(),
np.quaternion(1, 0, -1, -1).normalized(),
np.quaternion(1, 1, 0, 0).normalized(),
np.quaternion(1, -1, 0, 0).normalized(),
np.quaternion(1, 1, 0, 1).normalized(),
np.quaternion(1, 1, 0, -1).normalized(),
np.quaternion(1, -1, 0, 1).normalized(),
np.quaternion(1, -1, 0, -1).normalized(),
np.quaternion(1, 1, 1, 0).normalized(),
np.quaternion(1, 1, -1, 0).normalized(),
np.quaternion(1, -1, 1, 0).normalized(),
np.quaternion(1, -1, -1, 0).normalized(),
np.quaternion(1, 1, 1, -1).normalized(),
np.quaternion(1, 1, -1, 1).normalized(),
np.quaternion(1, 1, -1, -1).normalized(),
np.quaternion(1, -1, 1, 1).normalized(),
np.quaternion(1, -1, 1, -1).normalized(),
np.quaternion(1, -1, -1, 1).normalized(),
np.quaternion(1, -1, -1, -1).normalized(),
):
tq = res_q * src_q * res_q.conj()
qs.append(res_q)
# d.append(1-np.array((tq.w, tq.x, tq.y, tq.z)).dot(np.array((dst_q.w, dst_q.x, dst_q.y, dst_q.z)))**2)
d.append(angle_between_q(tq, dst_q))
i = np.argmin(d)
return qs[i]
def hover_annotate(fig, ax, line, annotations):
annot = ax.annotate("", xy=(0, 0), xytext=(-20, 20), textcoords="offset points",
bbox=dict(boxstyle="round", fc="w"),
arrowprops=dict(arrowstyle="->"))
annot.set_visible(False)
def update_annot(ind):
idx = ind["ind"][0]
try:
# for regular plots
x, y = line.get_data()
annot.xy = (x[idx], y[idx])
except AttributeError:
# for scatter plots
annot.xy = tuple(line.get_offsets()[idx])
text = ", ".join([annotations[n] for n in ind["ind"]])
annot.set_text(text)
annot.get_bbox_patch().set_alpha(0.4)
def hover(event):
vis = annot.get_visible()
if event.inaxes == ax:
cont, ind = line.contains(event)
if cont:
update_annot(ind)
annot.set_visible(True)
fig.canvas.draw_idle()
else:
if vis:
annot.set_visible(False)
fig.canvas.draw_idle()
fig.canvas.mpl_connect("motion_notify_event", hover)
def plot_vectors(pts3d, scatter=True, conseq=True, neg_z=True):
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = Axes3D(fig)
if scatter:
ax.scatter(pts3d[:, 0], pts3d[:, 1], pts3d[:, 2])
else:
if conseq:
ax.set_prop_cycle('color', map(lambda c: '%f' % c, np.linspace(1, 0, len(pts3d))))
for i, v1 in enumerate(pts3d):
if v1 is not None:
ax.plot((0, v1[0]), (0, v1[1]), (0, v1[2]))
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
if neg_z:
ax.view_init(90, -90)
else:
ax.view_init(-90, -90)
plt.show()
def numeric(s):
try:
float(s)
except ValueError:
return False
return True
def pseudo_huber_loss(a, delta):
# from https://en.wikipedia.org/wiki/Huber_loss
# first +1e-15 is to avoid divide by zero, second to avoid loss becoming zero if delta > 1e7 due to float precision
return delta ** 2 * (np.sqrt(1 + a ** 2 / (delta ** 2 + 1e-15)) - 1 + 1e-15)
def fixed_precision(val, precision, as_str=False):
if val == 0:
return ('%%.%df' % precision) % val if as_str else val
d = math.ceil(math.log10(abs(val))) - precision
c = 10 ** d
fp_val = round(val / c) * c
return ('%%.%df' % max(0, -d)) % fp_val if as_str else fp_val
def plot_quats(quats, conseq=True, wait=True):
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = Axes3D(fig)
ax.set_xlim(-1, 1)
ax.set_ylim(-1, 1)
ax.set_zlim(-1, 1)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
if conseq:
ax.set_prop_cycle('color', map(lambda c: '%f' % c, np.linspace(1, 0, len(quats))))
for i, q in enumerate(quats):
if q is not None:
lat, lon, _ = q_to_ypr(q)
v1 = spherical2cartesian(lat, lon, 1)
v2 = (v1 + normalize_v(np.cross(np.cross(v1, np.array([0, 0, 1])), v1)) * 0.1) * 0.85
v2 = q_times_v(q, v2)
ax.plot((0, v1[0], v2[0]), (0, v1[1], v2[1]), (0, v1[2], v2[2]))
while (wait and not plt.waitforbuttonpress()):
pass
def plot_poses(poses, conseq=True, wait=True, arrow_len=1):
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = Axes3D(fig)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
if conseq:
plt.hsv()
# ax.set_prop_cycle('color', map(lambda c: '%f' % c, np.linspace(.7, 0, len(poses))))
for i, pose in enumerate(poses):
if pose is not None:
q = np.quaternion(*pose[3:])
lat, lon, _ = q_to_ypr(q)
v1 = spherical2cartesian(lat, lon, 1) * arrow_len
v2 = (v1 + normalize_v(np.cross(np.cross(v1, np.array([0, 0, 1])), v1)) * 0.1 * arrow_len) * 0.85
v2 = q_times_v(q, v2)
ax.plot((pose[0], v1[0], v2[0]), (pose[1], v1[1], v2[1]), (pose[2], v1[2], v2[2]))
while (wait and not plt.waitforbuttonpress()):
pass
#
# Not sure if unitbase_to_q works, haven't deleted just in case still need:
#
# def unitbase_to_q(b_dst, b_src = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]):
# # based on http://stackoverflow.com/questions/16648452/calculating-\
# # quaternion-for-transformation-between-2-3d-cartesian-coordinate-syst
# # , which is based on http://dx.doi.org/10.1117/12.57955
#
# M = np.zeros((3, 3))
#
# for i, v in enumerate(b_src):
# x = np.matrix(np.outer(v, b_dst[i]))
# M = M + x
#
# N11 = M[0, 0] + M[1, 1] + M[2, 2]
# N22 = M[0, 0] - M[1, 1] - M[2, 2]
# N33 = -M[0, 0] + M[1, 1] - M[2, 2]
# N44 = -M[0, 0] - M[1, 1] + M[2, 2]
# N12 = M[1, 2] - M[2, 1]
# N13 = M[2, 0] - M[0, 2]
# N14 = M[0, 1] - M[1, 0]
# N21 = N12
# N23 = M[0, 1] + M[1, 0]
# N24 = M[2, 0] + M[0, 2]
# N31 = N13
# N32 = N23
# N34 = M[1, 2] + M[2, 1]
# N41 = N14
# N42 = N24
# N43 = N34
#
# N=np.matrix([[N11, N12, N13, N14],\
# [N21, N22, N23, N24],\
# [N31, N32, N33, N34],\
# [N41, N42, N43, N44]])
#
# values, vectors = np.linalg.eig(N)
# quat = vectors[:, np.argmax(values)]
# #quat = np.array(quat).reshape(-1,).tolist()
#
# return np.quaternion(*quat)
import tracemalloc
import os
import linecache
def display_top(top_stats, key_type='lineno', limit=10):
# snapshot = snapshot.filter_traces((
# tracemalloc.Filter(False, "<frozen importlib._bootstrap>"),
# tracemalloc.Filter(False, "<unknown>"),
# ))
# top_stats = snapshot.statistics(key_type, cumulative=True)
print("Top %s lines" % limit)
for index, stat in enumerate(top_stats[:limit], 1):
frame = stat.traceback[0]
# replace "/path/to/module/file.py" with "module/file.py"
filename = os.sep.join(frame.filename.split(os.sep)[-2:])
print("#%s: %s:%s: %.1f MB (x%.0f)"
% (index, filename, frame.lineno, stat.size / 1024 / 1024, stat.count))
line = linecache.getline(frame.filename, frame.lineno).strip()
if line:
print(' %s' % line)
other = top_stats[limit:]
if other:
size = sum(stat.size for stat in other)
print("%s other: %.1f MB" % (len(other), size / 1024 / 1024))
total = sum(stat.size for stat in top_stats)
print("Total allocated size: %.1f MB" % (total / 1024 / 1024))
def show_progress(tot, i):
digits = int(math.ceil(math.log10(tot + 1)))
if i == 0:
print('%s/%d' % ('0' * digits, tot), end='', flush=True)
else:
print(('%s%0' + str(digits) + 'd/%d') % ('\b' * (digits * 2 + 1), i + 1, tot), end='', flush=True)
def smooth1d(xt, x, Y, weight_fun=lambda d: 0.9 ** abs(d)):
if xt.ndim != 1 or x.ndim != 1:
raise ValueError("smooth1d only accepts 1 dimension arrays for location")
if x.shape[0] != Y.shape[0]:
raise ValueError("different lenght x and Y")
D = np.repeat(np.expand_dims(xt, 1), len(x), axis=1) - np.repeat(np.expand_dims(x, 0), len(xt), axis=0)
weights = np.array(list(map(weight_fun, D.flatten()))).reshape(D.shape)
Yt = np.sum(Y * weights, axis=1) / np.sum(weights, axis=1)
return Yt
| 33.576738
| 130
| 0.579335
| 7,551
| 48,787
| 3.631572
| 0.12899
| 0.006491
| 0.029684
| 0.020969
| 0.302494
| 0.248851
| 0.223033
| 0.205492
| 0.191817
| 0.177595
| 0
| 0.039014
| 0.266567
| 48,787
| 1,452
| 131
| 33.599862
| 0.727349
| 0.152377
| 0
| 0.182663
| 0
| 0
| 0.039557
| 0.000562
| 0
| 0
| 0
| 0.000689
| 0.017544
| 1
| 0.093911
| false
| 0.004128
| 0.038184
| 0.011352
| 0.231166
| 0.014448
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c2bf254c4e2082b3c9d6ed73d3f8891d0fa09df
| 4,245
|
py
|
Python
|
cirtorch/filters/sobel.py
|
Tarekbouamer/Image-Retrieval-for-Image-Based-Localization
|
fcad9af4f558bebb3cbec1d08e49603a452f439d
|
[
"BSD-3-Clause"
] | 3
|
2021-01-15T13:58:22.000Z
|
2021-01-22T00:03:34.000Z
|
cirtorch/filters/sobel.py
|
Tarekbouamer/Image-Retrieval-for-Image-Based-Localization
|
fcad9af4f558bebb3cbec1d08e49603a452f439d
|
[
"BSD-3-Clause"
] | null | null | null |
cirtorch/filters/sobel.py
|
Tarekbouamer/Image-Retrieval-for-Image-Based-Localization
|
fcad9af4f558bebb3cbec1d08e49603a452f439d
|
[
"BSD-3-Clause"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
from .kernels import (
get_spatial_gradient_kernel2d,
get_spatial_gradient_kernel3d,
normalize_kernel2d
)
def spatial_gradient(input, mode='sobel', order=1, normalized=True):
"""
Computes the first order image derivative in both x and y using a Sobel operator.
"""
if not len(input.shape) == 4:
raise ValueError("Invalid input shape, we expect BxCxHxW. Got: {}"
.format(input.shape))
# allocate kernel
kernel = get_spatial_gradient_kernel2d(mode, order)
if normalized:
kernel = normalize_kernel2d(kernel)
# prepare kernel
b, c, h, w = input.shape
tmp_kernel = kernel.to(input).detach()
tmp_kernel = tmp_kernel.unsqueeze(1).unsqueeze(1)
# convolve input tensor with sobel kernel
kernel_flip = tmp_kernel.flip(-3)
# Pad with "replicate for spatial dims, but with zeros for channel
spatial_pad = [
kernel.size(1) // 2,
kernel.size(1) // 2,
kernel.size(2) // 2,
kernel.size(2) // 2
]
out_channels = 3 if order == 2 else 2
padded_inp = F.pad(input.reshape(b * c, 1, h, w), spatial_pad, 'replicate')[:, :, None]
return F.conv3d(padded_inp, kernel_flip, padding=0).view(b, c, out_channels, h, w)
def spatial_gradient3d(input, mode='diff', order=1):
"""
Computes the first and second order volume derivative in x, y and d using a diff operator.
"""
if not len(input.shape) == 5:
raise ValueError("Invalid input shape, we expect BxCxDxHxW. Got: {}"
.format(input.shape))
# allocate kernel
kernel = get_spatial_gradient_kernel3d(mode, order)
# prepare kernel
b, c, d, h, w = input.shape
tmp_kernel = kernel.to(input).detach()
tmp_kernel = tmp_kernel.repeat(c, 1, 1, 1, 1)
# convolve input tensor with grad kernel
kernel_flip = tmp_kernel.flip(-3)
# Pad with "replicate for spatial dims, but with zeros for channel
spatial_pad = [
kernel.size(2) // 2,
kernel.size(2) // 2,
kernel.size(3) // 2,
kernel.size(3) // 2,
kernel.size(4) // 2,
kernel.size(4) // 2
]
out_ch = 6 if order == 2 else 3
return F.conv3d(F.pad(
input, spatial_pad, 'replicate'), kernel_flip, padding=0, groups=c).view(b, c, out_ch, d, h, w)
def sobel(input, normalized=True, eps=1e-6):
"""
Computes the Sobel operator and returns the magnitude per channel.
"""
if not len(input.shape) == 4:
raise ValueError("Invalid input shape, we expect BxCxHxW. Got: {}"
.format(input.shape))
# comput the x/y gradients
edges = spatial_gradient(input, normalized=normalized)
# unpack the edges
gx = edges[:, :, 0]
gy = edges[:, :, 1]
# compute gradient maginitude
magnitude = torch.sqrt(gx * gx + gy * gy + eps)
return magnitude
class SpatialGradient(nn.Module):
"""
Computes the first order image derivative in both x and y using a Sobel operator.
"""
def __init__(self, mode='sobel', order=1, normalized=True):
super(SpatialGradient, self).__init__()
self.normalized = normalized
self.order = order
self.mode = mode
def forward(self, input):
return spatial_gradient(input, self.mode, self.order, self.normalized)
class SpatialGradient3d(nn.Module):
"""
Computes the first and second order volume derivative in x, y and d using a diff operator.
"""
def __init__(self, mode='diff', order=1):
super(SpatialGradient3d, self).__init__()
self.order = order
self.mode = mode
self.kernel = get_spatial_gradient_kernel3d(mode, order)
def forward(self, input):
return spatial_gradient3d(input, self.mode, self.order)
class Sobel(nn.Module):
"""
Computes the Sobel operator and returns the magnitude per channel.
"""
def __init__(self, normalized=True, eps=1e-6):
super(Sobel, self).__init__()
self.normalized = normalized
self.eps = eps
def forward(self, input):
return sobel(input, self.normalized, self.eps)
| 28.877551
| 103
| 0.627562
| 568
| 4,245
| 4.570423
| 0.209507
| 0.042373
| 0.033898
| 0.01849
| 0.627889
| 0.54584
| 0.440678
| 0.389445
| 0.374807
| 0.374807
| 0
| 0.020051
| 0.259835
| 4,245
| 146
| 104
| 29.075342
| 0.806174
| 0.193404
| 0
| 0.3625
| 0
| 0
| 0.054259
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1125
| false
| 0
| 0.05
| 0.0375
| 0.275
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c2c03c407ba0a2ba9a613836bc2fb4601d6b4a8
| 896
|
py
|
Python
|
PythonCookbook/concurrent_test/findrobots.py
|
xu6148152/Binea_Python_Project
|
d943eb5f4685d08f080b372dcf1a7cbd5d63efed
|
[
"MIT"
] | null | null | null |
PythonCookbook/concurrent_test/findrobots.py
|
xu6148152/Binea_Python_Project
|
d943eb5f4685d08f080b372dcf1a7cbd5d63efed
|
[
"MIT"
] | null | null | null |
PythonCookbook/concurrent_test/findrobots.py
|
xu6148152/Binea_Python_Project
|
d943eb5f4685d08f080b372dcf1a7cbd5d63efed
|
[
"MIT"
] | null | null | null |
# -*- encoding: utf-8 -*-
import gzip
import io
import glob
from concurrent import futures
def find_robots(filename):
'''
Find all of the hosts that access robots.txt in a single log file
'''
robots = set()
with gzip.open(filename) as f:
for line in io.TextIOWrapper(f, encoding='ascii'):
fields = line.split()
if fields[6] == '/robots.txt':
robots.add(fields[0])
return robots
def find_all_robots(logdir):
'''
Find all hosts across and entire sequence of files
'''
files = glob.glob(logdir + '/*.log.gz')
all_robots = set()
with futures.ProcessPoolExecutor() as pool:
for robots in pool.map(find_robots, files):
all_robots.update(robots)
return all_robots
if __name__ == '__main__':
robots = find_all_robots('logs')
for ipaddr in robots:
print(ipaddr)
| 23.578947
| 69
| 0.618304
| 118
| 896
| 4.550847
| 0.491525
| 0.083799
| 0.048417
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004587
| 0.270089
| 896
| 37
| 70
| 24.216216
| 0.816514
| 0.157366
| 0
| 0
| 0
| 0
| 0.051176
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086957
| false
| 0
| 0.173913
| 0
| 0.347826
| 0.043478
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c2c664c7e1b0b10556e368192b5c6b6dfeac1d6
| 13,634
|
py
|
Python
|
cnnblstm_with_adabn/cnnblstm_with_adabn.py
|
Fassial/Air-Writing-with-TL
|
9b9047c5bd5aef3a869e2d5166be1c0cf0c5ccf0
|
[
"MIT"
] | 1
|
2021-06-16T16:45:01.000Z
|
2021-06-16T16:45:01.000Z
|
cnnblstm_with_adabn/cnnblstm_with_adabn.py
|
Fassial/Air-Writing-with-TL
|
9b9047c5bd5aef3a869e2d5166be1c0cf0c5ccf0
|
[
"MIT"
] | null | null | null |
cnnblstm_with_adabn/cnnblstm_with_adabn.py
|
Fassial/Air-Writing-with-TL
|
9b9047c5bd5aef3a869e2d5166be1c0cf0c5ccf0
|
[
"MIT"
] | 1
|
2020-04-21T01:31:26.000Z
|
2020-04-21T01:31:26.000Z
|
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import matplotlib.pyplot as plt
# local model
import sys
sys.path.append("../network")
import Coral
from lstm import LSTMHardSigmoid
from AdaBN import AdaBN
sys.path.append("../network/AutoEncoder")
import AutoEncoder
class cnnblstm_with_adabn(nn.Module):
PARAMS_FILE = "params.pkl"
PARAMS_AE = "params_ae.pkl"
NET1_ADABN = "net1_adabn"
NET2_ADABN = "net2_adabn"
NET3_ADABN = "net3_adabn"
def __init__(self, time_steps = 800, n_features = 3, n_outputs = 10, use_cuda = False, params_dir = "./params", enable_CORAL = False):
super(cnnblstm_with_adabn, self).__init__()
self.time_steps = time_steps
self.n_features = n_features
self.n_outputs = n_outputs
self.use_cuda = use_cuda
self.params_dir = params_dir
if not os.path.exists(self.params_dir):
os.mkdir(self.params_dir)
self.enable_CORAL = enable_CORAL
self.n_filters = 128
self.kernel_size = 15
self.n_hidden = 150 # 150
self.n_layers = 1
self.bidirectional = True
# self.ae = AutoEncoder.load_AE(type = "ConvAE", time_steps = self.time_steps, n_features = self.n_features, use_cuda = self.use_cuda, params_pkl = os.path.join(self.params_dir, cnnblstm_with_adabn.PARAMS_AE))
# build net1 cnn
self.net1 = nn.Sequential(
nn.Conv1d(in_channels = self.n_features, out_channels = self.n_filters, kernel_size = self.kernel_size),
# nn.Conv1d(in_channels = self.ae.n_filters3, out_channels = self.n_filters, kernel_size = self.kernel_size),
nn.ReLU(),
# nn.Sigmoid(),
nn.Dropout(p = 0.5),
nn.MaxPool1d(kernel_size = 2)
)
# build net1_adabn
self.net1_adabn = AdaBN(self.n_filters, variables_dir = os.path.join(self.params_dir, cnnblstm_with_adabn.NET1_ADABN), use_cuda = self.use_cuda)
# build net2 blstm
# self.net2 = nn.LSTM(input_size = self.n_filters, hidden_size = self.n_hidden, num_layers = self.n_layers, dropout = 0.2, batch_first = True, bidirectional = self.bidirectional, bias = True)
self.net2 = LSTMHardSigmoid(input_size = self.n_filters, hidden_size = self.n_hidden, num_layers = self.n_layers, dropout = 0.2, batch_first = True, bidirectional = self.bidirectional, bias = True)
# build net2_adabn
if self.bidirectional:
n_blstm_output = self.n_hidden * 2
else:
n_blstm_output = self.n_hidden
self.net2_adabn = AdaBN(n_blstm_output, variables_dir = os.path.join(self.params_dir, cnnblstm_with_adabn.NET2_ADABN), use_cuda = self.use_cuda)
# build net3 fc
self.net3 = nn.Sequential(
nn.Linear(n_blstm_output, 50, bias = True),
nn.ReLU(),
# nn.Sigmoid(),
)
# build net3_adabn
self.net3_adabn = AdaBN(50, variables_dir = os.path.join(self.params_dir, cnnblstm_with_adabn.NET3_ADABN), use_cuda = self.use_cuda)
# build net4 fc
self.net4 = nn.Sequential(
nn.Dropout(p = 0.2),
nn.Linear(50, self.n_outputs, bias = True),
nn.Softmax(dim = 1)
)
def init_hidden(self, batch_size):
"""
init blstm's hidden states
"""
if self.bidirectional:
n_layers = self.n_layers * 2
else:
n_layers = self.n_layers
if self.use_cuda:
hidden_state = torch.zeros(n_layers, batch_size, self.n_hidden).cuda()
cell_state = torch.zeros(n_layers, batch_size, self.n_hidden).cuda()
else:
hidden_state = torch.zeros(n_layers, batch_size, self.n_hidden)
cell_state = torch.zeros(n_layers, batch_size, self.n_hidden)
self.hidden = (hidden_state, cell_state)
def reset_parameters(self):
"""
temp useless
Here we reproduce Keras default initialization weights for consistency with Keras version
"""
# get weights & bias set
net1_weights = ((name, param.data) for name, param in self.named_parameters() if (("weight" in name) and (("net1" in name) and ("net1_adabn" not in name))))
net1_biases = ((name, param.data) for name, param in self.named_parameters() if (("bias" in name) and (("net1" in name) and ("net1_adabn" not in name))))
# net2_weights = ((name, param.data) for name, param in self.named_parameters() if (("weight" in name) and (("net2" in name) and ("net2_adabn" not in name))))
# net2_biases = ((name, param.data) for name, param in self.named_parameters() if (("bias" in name) and (("net2" in name) and ("net2_adabn" not in name))))
net3_weights = ((name, param.data) for name, param in self.named_parameters() if (("weight" in name) and (("net3" in name) and ("net3_adabn" not in name))))
net3_biases = ((name, param.data) for name, param in self.named_parameters() if (("bias" in name) and (("net3" in name) and ("net3_adabn" not in name))))
net4_weights = ((name, param.data) for name, param in self.named_parameters() if (("weight" in name) and (("net4" in name) and ("net4_adabn" not in name))))
net4_biases = ((name, param.data) for name, param in self.named_parameters() if (("bias" in name) and (("net4" in name) and ("net4_adabn" not in name))))
# init weights & bias
# self.ae.reset_parameters()
for name, params_data in net1_weights:
# print(name)
nn.init.xavier_uniform_(params_data)
for name, params_data in net1_biases:
nn.init.constant_(params_data, 0)
self.net1_adabn.reset_parameters()
self.net2.reset_parameters() # lstm reset parameters
self.net2_adabn.reset_parameters()
for name, params_data in net3_weights:
nn.init.xavier_uniform_(params_data)
for name, params_data in net3_biases:
nn.init.constant_(params_data, 0)
self.net3_adabn.reset_parameters()
for name, params_data in net4_weights:
nn.init.xavier_uniform_(params_data)
for name, params_data in net4_biases:
nn.init.constant_(params_data, 0)
def forward(self, input):
"""
compute the output of input according to the entire network model
"""
# print(input.shape)
# AutoEncoder
# input = self.ae.encoder(input)
# input = self.ae(input)
# MaxPool1d
maxPool1d_output = self.net1(input)
# maxPool1d_adabn_output = maxPool1d_output
maxPool1d_adabn_output, maxPool1d_output = self.net1_adabn(maxPool1d_output), None
maxPool1d_adabn_t_output = maxPool1d_adabn_output.permute(0, 2, 1).contiguous()
# BiLSTM
(bilstm_output, _), maxPool1d_adabn_t_output = self.net2(maxPool1d_adabn_t_output, None), None
# MaxPooling1D time_steps
bilstm_output = bilstm_output.permute(0, 2, 1)
maxPooling_output, bilstm_output = F.max_pool1d(bilstm_output, kernel_size = bilstm_output.size(2)).squeeze(2), None
# maxPooling_adabn_output = maxPooling_output
maxPooling_adabn_output, maxPooling_output = self.net2_adabn(maxPooling_output), None
# get classifier
net3_output, maxPooling_adabn_output = self.net3(maxPooling_adabn_output), None
net3_adabn_output, net3_output = self.net3_adabn(net3_output), None
linear2_softmax_output, net3_adabn_output = self.net4(net3_adabn_output), None
return linear2_softmax_output
def update_adabn_running_stats(self):
"""
update adabn running states, update mu_j with mu_j_next to start next round
"""
self.net1_adabn.update_running_stats()
self.net2_adabn.update_running_stats()
self.net3_adabn.update_running_stats()
def trainAllLayers(self, train_x, train_y, test_x = None, learning_rate = 0.001, n_epoches = 20, batch_size = 20, shuffle = True):
"""
train all layers of network model
"""
# print(os.environ["CUDA_VISIBLE_DEVICES"])
# CORAL
if self.enable_CORAL:
if test_x == None:
print("ERROR: (in cnnblstm_with_adabn.trainAllLayers) test_x == None!")
return
# review train_x & test_x
train_x = train_x.view(-1, self.time_steps * self.n_features)
test_x = test_x.view(-1, self.time_steps * self.n_features)
# get CORAL(train_x, test_x)
train_x = Coral.CORAL_torch(train_x, test_x)
# review train_x
train_x = train_x.view(-1, self.n_features, self.time_steps)
# optimize all cnn parameters
params = [{"params": model.parameters()} for model in self.children() if model not in [self.ae]]
optimizer = torch.optim.Adam(params, lr = learning_rate)
# the target label is not one-hotted
loss_func = nn.CrossEntropyLoss()
# init params
self.reset_parameters()
# load params
self.load_params()
# set train mode True
self.train()
# get parallel model
parallel_cba = self
if self.use_cuda:
# print("we use cuda!")
parallel_cba = torch.nn.DataParallel(self, device_ids = range(torch.cuda.device_count()))
# parallel_cba = parallel_cba.cuda()
# if use_cuda
if self.use_cuda:
train_x = train_x.cuda()
train_y = train_y.cuda()
"""
# get autoencoder
self.ae = AutoEncoder.train_AE(self.ae, train_x, train_x, n_epoches = 20)
self.ae.save_params()
"""
# get train_data
train_data = torch.utils.data.TensorDataset(train_x, train_y)
# Data Loader for easy mini-batch return in training
train_loader = torch.utils.data.DataLoader(dataset = train_data, batch_size = batch_size, shuffle = shuffle)
# training and testing
for epoch in range(n_epoches):
# init loss & acc
train_loss = 0
train_acc = 0
for step, (b_x, b_y) in enumerate(train_loader): # gives batch data
b_x = b_x.view(-1, self.n_features, self.time_steps) # reshape x to (batch, n_features, time_step)
if self.use_cuda:
b_x, b_y = Variable(b_x).cuda(), Variable(b_y).cuda()
else:
b_x, b_y = Variable(b_x), Variable(b_y)
"""
# get hidden
if self.use_cuda:
self.init_hidden(b_x.size(0) // torch.cuda.device_count())
else:
self.init_hidden(b_x.size(0))
"""
# update adabn running stats
self.update_adabn_running_stats()
# get output
output = parallel_cba(b_x) # CNN_BLSTM output
# get loss
loss = loss_func(output, b_y) # cross entropy loss
train_loss += loss.item() * len(b_y)
_, pre = torch.max(output, 1)
num_acc = (pre == b_y).sum()
train_acc += num_acc.item()
# backward
optimizer.zero_grad() # clear gradients for this training step
loss.backward() # backpropagation, compute gradients
optimizer.step() # apply gradients
# print loss
# if (step + 1) % 5 == 0:
# print("[{}/{}], train loss is: {:.6f}, train acc is: {:.6f}".format(step, len(train_loader), train_loss / ((step + 1) * batch_size), train_acc / ((step + 1) * batch_size)))
print("[{}/{}], train loss is: {:.6f}, train acc is: {:.6f}".format(len(train_loader), len(train_loader), train_loss / (len(train_loader) * batch_size), train_acc / (len(train_loader) * batch_size)))
# save params
self.save_params()
# print("train finish!")
def getTestAccuracy(self, test_x, test_y):
"""
test network model with test set
"""
# init params
self.reset_parameters()
# load params
self.load_params()
# set eval
self.eval()
# get parallel model
parallel_cba = self
if self.use_cuda:
# print("we use cuda!")
parallel_cba = torch.nn.DataParallel(self, device_ids = range(torch.cuda.device_count()))
# parallel_cba = parallel_cba.cuda()
# cuda test_data
with torch.no_grad():
if self.use_cuda:
test_x, test_y = Variable(test_x).cuda(), Variable(test_y).cuda()
else:
test_x, test_y = Variable(test_x), Variable(test_y)
"""
# get hidden
if self.use_cuda:
self.init_hidden(test_x.size(0) // torch.cuda.device_count())
else:
self.init_hidden(test_x.size(0))
"""
# update adabn running stats
self.update_adabn_running_stats()
# get output
with torch.no_grad():
output = parallel_cba(test_x)
# print(output)
prediction = torch.max(output, 1)[1]
pred_y = prediction.cpu().data.numpy()
# print(pred_y)
target_y = test_y.cpu().data.numpy()
# print(test_y)
accuracy = float((pred_y == target_y).astype(int).sum()) / float(target_y.size)
# print("Accuracy: ", str(accuracy))
return accuracy
def save_params(self):
"""
save params & adabn's inner stats
"""
self.save_adabn_variables()
torch.save(self.state_dict(), os.path.join(self.params_dir, cnnblstm_with_adabn.PARAMS_FILE))
# self.ae.save_params()
# print("save_params success!")
def save_adabn_variables(self):
"""
save adabn's inner stats
"""
self.net1_adabn.save_attrs()
self.net2_adabn.save_attrs()
self.net3_adabn.save_attrs()
def load_params(self):
"""
load params & adabn's inner stats
"""
self.load_adabn_variables()
if os.path.exists(os.path.join(self.params_dir, cnnblstm_with_adabn.PARAMS_FILE)):
if self.use_cuda:
self.load_state_dict(torch.load(os.path.join(self.params_dir, cnnblstm_with_adabn.PARAMS_FILE), map_location = torch.device('cuda')))
else:
self.load_state_dict(torch.load(os.path.join(self.params_dir, cnnblstm_with_adabn.PARAMS_FILE), map_location = torch.device('cpu')))
# print("load_params success!")
# self.ae.load_params()
def load_adabn_variables(self):
"""
load adabn's inner stats
"""
self.net1_adabn.load_attrs()
self.net2_adabn.load_attrs()
self.net3_adabn.load_attrs()
def get_model(self, pre_trained = False):
"""
get pretrained model
"""
if pre_trained:
self.load_params()
return self
if __name__ == '__main__':
use_cuda = torch.cuda.is_available()
if use_cuda:
cnnblstm = cnnblstm_with_adabn(use_cuda = use_cuda).cuda()
else:
cnnblstm = cnnblstm_with_adabn(use_cuda = use_cuda)
print(cnnblstm)
# get train_x, train_y
train_x = torch.rand(20, 3, 800, dtype = torch.float32)
train_y = torch.randint(10, (20, ), dtype = torch.int64)
# train_y = torch.LongTensor(20, 1).random_() % 10
print(train_x.type())
# train_y = torch.zeros(20, 10).scatter_(1, train_y, 1)
print(train_y)
train_data = torch.utils.data.TensorDataset(train_x, train_y)
cnnblstm.trainAllLayers(train_data)
| 36.068783
| 211
| 0.710943
| 2,080
| 13,634
| 4.404808
| 0.129327
| 0.022157
| 0.015717
| 0.01277
| 0.465837
| 0.407226
| 0.390963
| 0.359201
| 0.327003
| 0.313032
| 0
| 0.017791
| 0.163122
| 13,634
| 377
| 212
| 36.164456
| 0.785188
| 0.230747
| 0
| 0.1875
| 0
| 0
| 0.03476
| 0.005793
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057692
| false
| 0
| 0.057692
| 0
| 0.163462
| 0.024038
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c2d2c77ae28e087d253ce05110db6593a6b0fcc
| 26,658
|
py
|
Python
|
src/emmental/model.py
|
woffett/emmental
|
87884fcd89662cca45f0ea0f78cff73380cc47c8
|
[
"MIT"
] | null | null | null |
src/emmental/model.py
|
woffett/emmental
|
87884fcd89662cca45f0ea0f78cff73380cc47c8
|
[
"MIT"
] | null | null | null |
src/emmental/model.py
|
woffett/emmental
|
87884fcd89662cca45f0ea0f78cff73380cc47c8
|
[
"MIT"
] | null | null | null |
"""Emmental model."""
import itertools
import logging
import os
from collections import defaultdict
from collections.abc import Iterable
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union
import numpy as np
import torch
from numpy import ndarray
from torch import Tensor, nn as nn
from torch.nn import ModuleDict
from tqdm import tqdm
from emmental.data import EmmentalDataLoader
from emmental.meta import Meta
from emmental.scorer import Scorer
from emmental.task import EmmentalTask
from emmental.utils.utils import construct_identifier, move_to_device, prob_to_pred
logger = logging.getLogger(__name__)
class EmmentalModel(nn.Module):
"""A class to build multi-task model.
Args:
name: Name of the model, defaults to None.
tasks: A task or a list of tasks.
"""
def __init__(
self,
name: Optional[str] = None,
tasks: Optional[Union[EmmentalTask, List[EmmentalTask]]] = None,
) -> None:
"""Initialize EmmentalModel."""
super().__init__()
self.name = name if name is not None else type(self).__name__
# Initiate the model attributes
self.module_pool: ModuleDict = ModuleDict()
self.task_names: Set[str] = set()
self.task_flows: Dict[str, Any] = dict() # TODO: make it concrete
self.loss_funcs: Dict[str, Callable] = dict()
self.output_funcs: Dict[str, Callable] = dict()
self.scorers: Dict[str, Scorer] = dict()
self.action_outputs: Dict[
str, Optional[List[Union[Tuple[str, str], Tuple[str, int]]]]
] = dict()
self.weights: Dict[str, float] = dict()
# Build network with given tasks
if tasks is not None:
self.add_tasks(tasks)
if Meta.config["meta_config"]["verbose"]:
logger.info(
f"Created emmental model {self.name} that contains "
f"task {self.task_names}."
)
# Move model to specified device
self._move_to_device()
def _move_to_device(self) -> None:
"""Move model to specified device."""
if Meta.config["model_config"]["device"] != -1:
if torch.cuda.is_available():
device = (
f"cuda:{Meta.config['model_config']['device']}"
if isinstance(Meta.config["model_config"]["device"], int)
else Meta.config["model_config"]["device"]
)
if Meta.config["meta_config"]["verbose"]:
logger.info(f"Moving model to GPU ({device}).")
self.to(torch.device(device))
else:
if Meta.config["meta_config"]["verbose"]:
logger.info("No cuda device available. Switch to cpu instead.")
def _to_dataparallel(self) -> None:
for key in self.module_pool.keys():
self.module_pool[key] = torch.nn.DataParallel(self.module_pool[key])
def _to_distributed_dataparallel(self) -> None:
for key in self.module_pool.keys():
self.module_pool[
key
] = torch.nn.parallel.DistributedDataParallel( # type: ignore
self.module_pool[key],
device_ids=[Meta.config["learner_config"]["local_rank"]],
output_device=Meta.config["learner_config"]["local_rank"],
find_unused_parameters=True,
)
def add_tasks(self, tasks: Union[EmmentalTask, List[EmmentalTask]]) -> None:
"""Build the MTL network using all tasks.
Args:
tasks: A task or a list of tasks.
"""
if not isinstance(tasks, Iterable):
tasks = [tasks]
for task in tasks:
self.add_task(task)
def add_task(self, task: EmmentalTask) -> None:
"""Add a single task into MTL network.
Args:
task: A task to add.
"""
if not isinstance(task, EmmentalTask):
raise ValueError(f"Unrecognized task type {task}.")
if task.name in self.task_names:
raise ValueError(
f"Found duplicate task {task.name}, different task should use "
f"different task name."
)
# Combine module_pool from all tasks
for key in task.module_pool.keys():
if key in self.module_pool.keys():
task.module_pool[key] = self.module_pool[key]
else:
self.module_pool[key] = task.module_pool[key]
# Collect task name
self.task_names.add(task.name)
# Collect task flow
self.task_flows[task.name] = task.task_flow
# Collect loss function
self.loss_funcs[task.name] = task.loss_func
# Collect output function
self.output_funcs[task.name] = task.output_func
# Collect action outputs
self.action_outputs[task.name] = task.action_outputs
# Collect scorer
self.scorers[task.name] = task.scorer
# Collect weight
self.weights[task.name] = task.weight
# Move model to specified device
self._move_to_device()
def update_task(self, task: EmmentalTask) -> None:
"""Update a existing task in MTL network.
Args:
task: A task to update.
"""
# Update module_pool with task
for key in task.module_pool.keys():
# Update the model's module with the task's module
self.module_pool[key] = task.module_pool[key]
# Update task flow
self.task_flows[task.name] = task.task_flow
# Update loss function
self.loss_funcs[task.name] = task.loss_func
# Update output function
self.output_funcs[task.name] = task.output_func
# Update action outputs
self.action_outputs[task.name] = task.action_outputs
# Update scorer
self.scorers[task.name] = task.scorer
# Update weight
self.weights[task.name] = task.weight
# Move model to specified device
self._move_to_device()
def remove_task(self, task_name: str) -> None:
"""Remove a existing task from MTL network.
Args:
task_name: The task name to remove.
"""
if task_name not in self.task_flows:
if Meta.config["meta_config"]["verbose"]:
logger.info(f"Task ({task_name}) not in the current model, skip...")
return
# Remove task by task_name
if Meta.config["meta_config"]["verbose"]:
logger.info(f"Removing Task {task_name}.")
self.task_names.remove(task_name)
del self.task_flows[task_name]
del self.loss_funcs[task_name]
del self.output_funcs[task_name]
del self.action_outputs[task_name]
del self.scorers[task_name]
del self.weights[task_name]
# TODO: remove the modules only associate with that task
def __repr__(self) -> str:
"""Represent the model as a string."""
cls_name = type(self).__name__
return f"{cls_name}(name={self.name})"
def flow(self, X_dict: Dict[str, Any], task_names: List[str]) -> Dict[str, Any]:
"""Forward based on input and task flow.
Note:
We assume that all shared modules from all tasks are based on the
same input.
Args:
X_dict: The input data
task_names: The task names that needs to forward.
Returns:
The output of all forwarded modules
"""
X_dict = move_to_device(X_dict, Meta.config["model_config"]["device"])
output_dict = dict(_input_=X_dict)
# Call forward for each task
for task_name in task_names:
for action in self.task_flows[task_name]:
if action["name"] not in output_dict:
if action["inputs"]:
try:
input = [
output_dict[action_name][output_index]
for action_name, output_index in action["inputs"]
]
except Exception:
raise ValueError(f"Unrecognized action {action}.")
output = self.module_pool[action["module"]].forward(*input)
else:
output = self.module_pool[action["module"]].forward(output_dict)
if isinstance(output, tuple):
output = list(output)
if not isinstance(output, list) and not isinstance(output, dict):
output = [output]
output_dict[action["name"]] = output
return output_dict
def forward( # type: ignore
self,
uids: List[str],
X_dict: Dict[str, Any],
Y_dict: Dict[str, Tensor],
task_to_label_dict: Dict[str, str],
return_action_outputs=False,
) -> Union[
Tuple[
Dict[str, List[str]],
Dict[str, ndarray],
Dict[str, ndarray],
Dict[str, ndarray],
Dict[str, Dict[str, ndarray]],
],
Tuple[
Dict[str, List[str]],
Dict[str, ndarray],
Dict[str, ndarray],
Dict[str, ndarray],
],
]:
"""Forward function.
Args:
uids: The uids of input data.
X_dict: The input data.
Y_dict: The output data.
task_to_label_dict: The task to label mapping.
return_action_outputs: Whether return action_outputs or not,
defaults to False.
Returns:
The (active) uids, loss, prob, gold, action_output (optional) in the batch of
all tasks.
"""
uid_dict: Dict[str, List[str]] = defaultdict(list)
loss_dict: Dict[str, ndarray] = defaultdict(float)
gold_dict: Dict[str, ndarray] = defaultdict(list)
prob_dict: Dict[str, ndarray] = defaultdict(list)
out_dict: Dict[str, Dict[str, ndarray]] = defaultdict(lambda: defaultdict(list))
task_names = (
list(task_to_label_dict.keys())
if isinstance(task_to_label_dict, dict)
else list(task_to_label_dict)
)
output_dict = self.flow(X_dict, task_names)
if Y_dict is not None:
# Calculate logit and loss for each task
for task_name, label_name in task_to_label_dict.items():
Y = Y_dict[label_name]
# Select the active samples
if Meta.config["learner_config"]["ignore_index"] is not None:
if len(Y.size()) == 1:
active = (
Y.detach() != Meta.config["learner_config"]["ignore_index"]
)
else:
active = torch.any(
Y.detach() != Meta.config["learner_config"]["ignore_index"],
dim=1,
)
else:
active = torch.BoolTensor([True] * Y.size()[0]) # type: ignore
# Only calculate the loss when active example exists
if active.any():
uid_dict[task_name] = [*itertools.compress(uids, active.numpy())]
loss_dict[task_name] = self.loss_funcs[task_name](
output_dict,
move_to_device(
Y_dict[label_name], Meta.config["model_config"]["device"]
),
move_to_device(active, Meta.config["model_config"]["device"]),
)
prob_dict[task_name] = (
self.output_funcs[task_name](output_dict)[
move_to_device(
active, Meta.config["model_config"]["device"]
)
]
.cpu()
.detach()
.numpy()
)
gold_dict[task_name] = Y_dict[label_name][active].cpu().numpy()
if (
return_action_outputs
and self.action_outputs[task_name] is not None
):
for action_name, output_index in self.action_outputs[task_name]:
out_dict[task_name][f"{action_name}_{output_index}"] = (
output_dict[action_name][output_index][
move_to_device(
active, Meta.config["model_config"]["device"]
)
]
.cpu()
.detach()
.numpy()
)
else:
# Calculate logit for each task
for task_name in task_to_label_dict:
uid_dict[task_name] = uids
prob_dict[task_name] = (
self.output_funcs[task_name](output_dict).cpu().detach().numpy()
)
if return_action_outputs and self.action_outputs[task_name] is not None:
for action_name, output_index in self.action_outputs[task_name]:
out_dict[task_name][f"{action_name}_{output_index}"] = (
output_dict[action_name][output_index]
.cpu()
.detach()
.numpy()
)
loss_dict = None
gold_dict = None
if return_action_outputs:
return uid_dict, loss_dict, prob_dict, gold_dict, out_dict
else:
return uid_dict, loss_dict, prob_dict, gold_dict
@torch.no_grad()
def predict(
self,
dataloader: EmmentalDataLoader,
return_preds: bool = False,
return_action_outputs: bool = True,
) -> Dict[str, Any]:
"""Predict from dataloader.
Args:
dataloader: The dataloader to predict.
return_preds: Whether return predictions or not, defaults to False.
return_action_outputs: Whether return action_outputs or not, defaults to True.
Returns:
The result dict.
"""
self.eval()
uid_dict: Dict[str, List[str]] = defaultdict(list)
prob_dict: Dict[str, List[Union[ndarray, int, float]]] = defaultdict(list)
pred_dict: Dict[str, List[ndarray]] = defaultdict(list)
gold_dict: Dict[str, List[Union[ndarray, int, float]]] = defaultdict(list)
out_dict: Dict[str, Dict[str, List[Union[ndarray, int, float]]]] = defaultdict(
lambda: defaultdict(list)
)
loss_dict: Dict[str, Union[ndarray, float]] = defaultdict(list) # type: ignore
if not dataloader.is_learnable:
gold_dict = None
loss_dict = None
# Collect dataloader information
task_to_label_dict = dataloader.task_to_label_dict
uid = dataloader.uid
for batch_num, bdict in tqdm(
enumerate(dataloader),
total=len(dataloader),
desc=f"Evaluating {dataloader.data_name} ({dataloader.split})",
):
if isinstance(bdict, dict) == 1:
X_bdict = bdict
Y_bdict = None
else:
X_bdict, Y_bdict = bdict
if not dataloader.is_learnable:
Y_bdict = None
if return_action_outputs:
(
uid_bdict,
loss_bdict,
prob_bdict,
gold_bdict,
out_bdict,
) = self.forward( # type: ignore
X_bdict[uid],
X_bdict,
Y_bdict,
task_to_label_dict,
return_action_outputs=return_action_outputs,
)
else:
(
uid_bdict,
loss_bdict,
prob_bdict,
gold_bdict,
) = self.forward( # type: ignore
X_bdict[uid],
X_bdict,
Y_bdict,
task_to_label_dict,
return_action_outputs=return_action_outputs,
)
out_bdict = None
for task_name in uid_bdict.keys():
uid_dict[task_name].extend(uid_bdict[task_name])
prob_dict[task_name].extend(prob_bdict[task_name])
if dataloader.is_learnable:
gold_dict[task_name].extend(gold_bdict[task_name])
if len(loss_bdict[task_name].size()) == 0:
if loss_dict[task_name] == []:
loss_dict[task_name] = 0
loss_dict[task_name] += loss_bdict[task_name].item() * len(
uid_bdict[task_name]
)
else:
loss_dict[task_name].extend( # type: ignore
loss_bdict[task_name].cpu().numpy()
)
if return_action_outputs and out_bdict:
for task_name in out_bdict.keys():
for action_name in out_bdict[task_name].keys():
out_dict[task_name][action_name].extend(
out_bdict[task_name][action_name]
)
# Calculate average loss
if dataloader.is_learnable:
for task_name in uid_dict.keys():
if not isinstance(loss_dict[task_name], list):
loss_dict[task_name] /= len(uid_dict[task_name])
res = {
"uids": uid_dict,
"golds": gold_dict,
"probs": prob_dict,
"losses": loss_dict,
}
if return_action_outputs:
res["outputs"] = out_dict
if return_preds:
for task_name, prob in prob_dict.items():
pred_dict[task_name] = prob_to_pred(prob)
res["preds"] = pred_dict
return res
@torch.no_grad()
def score(
self,
dataloaders: Union[EmmentalDataLoader, List[EmmentalDataLoader]],
return_average: bool = True,
) -> Dict[str, float]:
"""Score the data from dataloader.
Args:
dataloaders: The dataloaders to score.
return_average: Whether to return average score.
Returns:
Score dict.
"""
self.eval()
if not isinstance(dataloaders, list):
dataloaders = [dataloaders]
metric_score_dict = dict()
if return_average:
micro_score_dict: defaultdict = defaultdict(list)
macro_score_dict: defaultdict = defaultdict(list)
macro_loss_dict: defaultdict = defaultdict(list)
for dataloader in dataloaders:
if not dataloader.is_learnable:
logger.warning(
f"Dataloader {dataloader.data_name} doesn't have gold data, "
f"continue..."
)
continue
predictions = self.predict(dataloader, return_preds=True)
for task_name in predictions["uids"].keys():
metric_score = self.scorers[task_name].score(
predictions["golds"][task_name],
predictions["probs"][task_name],
predictions["preds"][task_name],
predictions["uids"][task_name],
)
for metric_name, metric_value in metric_score.items():
identifier = construct_identifier(
task_name, dataloader.data_name, dataloader.split, metric_name
)
metric_score_dict[identifier] = metric_value
# Store the loss
identifier = construct_identifier(
task_name, dataloader.data_name, dataloader.split, "loss"
)
metric_score_dict[identifier] = np.mean(
predictions["losses"][task_name]
)
if return_average:
# Collect average score
identifier = construct_identifier(
task_name, dataloader.data_name, dataloader.split, "average"
)
metric_score_dict[identifier] = np.mean(list(metric_score.values()))
micro_score_dict[dataloader.split].extend(
list(metric_score.values())
)
macro_score_dict[dataloader.split].append(
metric_score_dict[identifier]
)
# Store the loss
identifier = construct_identifier(
task_name, dataloader.data_name, dataloader.split, "loss"
)
macro_loss_dict[dataloader.split].append(
metric_score_dict[identifier]
)
if return_average:
# Collect split-wise micro/macro average score
for split in micro_score_dict.keys():
identifier = construct_identifier(
"model", "all", split, "micro_average"
)
metric_score_dict[identifier] = np.mean(micro_score_dict[split])
identifier = construct_identifier(
"model", "all", split, "macro_average"
)
metric_score_dict[identifier] = np.mean(macro_score_dict[split])
identifier = construct_identifier("model", "all", split, "loss")
metric_score_dict[identifier] = np.mean(macro_loss_dict[split])
# Collect overall micro/macro average score/loss
if len(micro_score_dict):
identifier = construct_identifier(
"model", "all", "all", "micro_average"
)
metric_score_dict[identifier] = np.mean(
list(itertools.chain.from_iterable(micro_score_dict.values()))
)
if len(macro_score_dict):
identifier = construct_identifier(
"model", "all", "all", "macro_average"
)
metric_score_dict[identifier] = np.mean(
list(itertools.chain.from_iterable(macro_score_dict.values()))
)
if len(macro_loss_dict):
identifier = construct_identifier("model", "all", "all", "loss")
metric_score_dict[identifier] = np.mean(
list(itertools.chain.from_iterable(macro_loss_dict.values()))
)
# TODO: have a better to handle global evaluation metric
if Meta.config["learner_config"]["global_evaluation_metric_dict"]:
global_evaluation_metric_dict = Meta.config["learner_config"][
"global_evaluation_metric_dict"
]
for metric_name, metric in global_evaluation_metric_dict.items():
metric_score_dict[metric_name] = metric(metric_score_dict)
return metric_score_dict
def save(self, model_path: str) -> None:
"""Save the current model.
Args:
model_path: Saved model path.
"""
# Check existence of model saving directory and create if does not exist.
if not os.path.exists(os.path.dirname(model_path)):
os.makedirs(os.path.dirname(model_path))
state_dict = {
"model": {
"name": self.name,
"module_pool": self.collect_state_dict(),
# "task_names": self.task_names,
# "task_flows": self.task_flows,
# "loss_funcs": self.loss_funcs,
# "output_funcs": self.output_funcs,
# "scorers": self.scorers,
}
}
try:
torch.save(state_dict, model_path)
except BaseException:
logger.warning("Saving failed... continuing anyway.")
if Meta.config["meta_config"]["verbose"]:
logger.info(f"[{self.name}] Model saved in {model_path}")
def load(self, model_path: str) -> None:
"""Load model state_dict from file and reinitialize the model weights.
Args:
model_path: Saved model path.
"""
if not os.path.exists(model_path):
logger.error("Loading failed... Model does not exist.")
try:
checkpoint = torch.load(model_path, map_location=torch.device("cpu"))
except BaseException:
logger.error(f"Loading failed... Cannot load model from {model_path}")
raise
self.load_state_dict(checkpoint["model"]["module_pool"])
if Meta.config["meta_config"]["verbose"]:
logger.info(f"[{self.name}] Model loaded from {model_path}")
# Move model to specified device
self._move_to_device()
def collect_state_dict(self) -> Dict[str, Any]:
"""Collect the state dict."""
state_dict: Dict[str, Any] = defaultdict(list)
for module_name, module in self.module_pool.items():
if hasattr(module, "module"):
state_dict[module_name] = module.module.state_dict() # type: ignore
else:
state_dict[module_name] = module.state_dict()
return state_dict
def load_state_dict(self, state_dict: Dict[str, Any]) -> None: # type: ignore
"""Load the state dict.
Args:
state_dict: The state dict to load.
"""
for module_name, module_state_dict in state_dict.items():
if module_name in self.module_pool:
if hasattr(self.module_pool[module_name], "module"):
self.module_pool[module_name].module.load_state_dict(
module_state_dict
)
else:
self.module_pool[module_name].load_state_dict(module_state_dict)
else:
logger.info(f"Missing {module_name} in module_pool, skip it..")
| 38.028531
| 88
| 0.534249
| 2,842
| 26,658
| 4.79064
| 0.100985
| 0.050533
| 0.017628
| 0.012119
| 0.465443
| 0.375835
| 0.321924
| 0.292031
| 0.213882
| 0.185163
| 0
| 0.000419
| 0.373734
| 26,658
| 700
| 89
| 38.082857
| 0.815094
| 0.120939
| 0
| 0.297959
| 0
| 0
| 0.071641
| 0.010072
| 0
| 0
| 0
| 0.004286
| 0
| 1
| 0.034694
| false
| 0
| 0.034694
| 0
| 0.087755
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c2daa2465bd8777ef8940cbc518e195f59d4ad9
| 4,578
|
py
|
Python
|
server/ws_server.py
|
jangxx/OVRT_Soundpad
|
2f9b2cd19421bc7b5586a3dcded2998d381ba688
|
[
"MIT"
] | 1
|
2021-09-29T01:45:35.000Z
|
2021-09-29T01:45:35.000Z
|
server/ws_server.py
|
jangxx/OVRT_Soundpad
|
2f9b2cd19421bc7b5586a3dcded2998d381ba688
|
[
"MIT"
] | 2
|
2021-09-28T08:53:09.000Z
|
2021-10-20T01:06:15.000Z
|
server/ws_server.py
|
jangxx/OVRT_Soundpad
|
2f9b2cd19421bc7b5586a3dcded2998d381ba688
|
[
"MIT"
] | null | null | null |
import asyncio, json
from config import Config
from soundpad_manager import SoundpadManager
from version import BRIDGE_VERSION
import websockets
from sanic.log import logger
# yes I know that it's very lazy to run a separate WS and HTTP server, when both could be run on the same port
# I don't like sanics WS implementation tho and this is just a quick and dirty project anyway, so there is no reason to get all that fancy
class WebsocketServer:
def __init__(self, config: Config, sp_manager: SoundpadManager):
self._server = None
self._config = config
self._soundpad = sp_manager
# ephemeral state
self._state = {
"edit_mode": False,
"soundpad_connected": False,
"version": BRIDGE_VERSION,
}
self._index_sockets = set()
self._control_sockets = set()
def start(self):
port = self._config.get(["server", "ws_port"])
logger.info(f"Websocket server is running on port {port}")
self._server = asyncio.get_event_loop().run_until_complete(websockets.serve(self.connHandler, "localhost", port))
async def stop(self):
self._server.close()
await self._server.wait_closed()
async def changeState(self, key, value):
self._state[key] = value
await self.emitEvent("state-update", self._state)
async def commandHandler(self, socket, command, params):
if command == "register":
if params["as"] == "index":
self._index_sockets.add(socket)
elif params["as"] == "control":
self._control_sockets.add(socket)
await self.emitEvent("settings-change", self._config.getExternalSerialized(), socket=socket, index_sockets=False, control_sockets=False)
await self.emitEvent("state-update", self._state, socket=socket, index_sockets=False, control_sockets=False)
elif command == "change-settings":
if params["setting"] == [ "board", "rows" ] or params["setting"] == [ "board", "columns" ]:
if not 1 <= params["value"] <= 10:
return # invalid values are not allowed
self._config.set(params["setting"], params["value"])
await self.emitEvent("settings-change", self._config.getExternalSerialized())
elif command == "set-edit-mode":
self._state["edit_mode"] = params["value"]
await self.emitEvent("state-update", self._state)
elif command == "select-sound":
if not 0 <= params['page'] <= 9 or not 0 <= params['row'] <= 9 or not 0 <= params['col'] <= 9:
return # out of bounds
if params['page'] == 0 and self._config.exists([ "sounds", f"{params['row']},{params['col']}" ]):
self._config.delete([ "sounds", f"{params['row']},{params['col']}" ])
sound_index = f"{params['page']}:{params['row']},{params['col']}"
self._config.set([ "sounds", sound_index ], params["sound"])
await self.emitEvent("settings-change", self._config.getExternalSerialized(), index_sockets=False)
elif command == "play-sound":
sound_id = params["sound"]
self._soundpad.playSound(sound_id)
elif command == "stop-sound":
self._soundpad.stopSound()
elif command == "pause-sound":
self._soundpad.pauseSound()
elif command == "log":
if "message" in params:
logger.info("Log: " + params["message"])
else:
logger.info("Log: " + json.dumps(params))
async def emitEvent(self, event, data, socket=None, index_sockets=True, control_sockets=True):
msg = json.dumps({ "type": "event", "event": event, "data": data })
if socket is not None:
await socket.send(msg)
if index_sockets:
for socket in self._index_sockets:
await socket.send(msg)
if control_sockets:
for socket in self._control_sockets:
await socket.send(msg)
async def connHandler(self, socket, path):
print("Client connected")
try:
async for raw_msg in socket:
try:
msg = json.loads(raw_msg)
except Exception as err:
logger.error(f"Could not parse JSON: {repr(err)}")
continue
if not "type" in msg:
continue
if msg["type"] == "command":
if not "command" in msg or not "params" in msg:
continue
try:
await self.commandHandler(socket, msg["command"], msg["params"])
except Exception as e: # if we get garbage data just ignore
print(f"Error in commandHandler: {msg['command']}({msg['params']}): {repr(e)}")
pass
except websockets.ConnectionClosedError:
pass
finally:
if socket in self._index_sockets:
self._index_sockets.discard(socket)
if socket in self._control_sockets:
self._control_sockets.discard(socket)
print("Client disconnected")
| 33.661765
| 140
| 0.668633
| 600
| 4,578
| 4.975
| 0.29
| 0.033501
| 0.036181
| 0.023116
| 0.240536
| 0.166499
| 0.137018
| 0.124288
| 0
| 0
| 0
| 0.002714
| 0.195063
| 4,578
| 136
| 141
| 33.661765
| 0.807327
| 0.074487
| 0
| 0.147059
| 0
| 0
| 0.174561
| 0.035156
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019608
| false
| 0.019608
| 0.058824
| 0
| 0.107843
| 0.029412
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c2f595fee4e21dc84c6666b03b2174e6d5731e0
| 8,108
|
py
|
Python
|
tensorforce/tests/test_model_save_restore.py
|
gian1312/suchen
|
df863140fd8df1ac2e195cbdfa4756f09f962270
|
[
"Apache-2.0"
] | null | null | null |
tensorforce/tests/test_model_save_restore.py
|
gian1312/suchen
|
df863140fd8df1ac2e195cbdfa4756f09f962270
|
[
"Apache-2.0"
] | null | null | null |
tensorforce/tests/test_model_save_restore.py
|
gian1312/suchen
|
df863140fd8df1ac2e195cbdfa4756f09f962270
|
[
"Apache-2.0"
] | 1
|
2019-11-29T12:28:33.000Z
|
2019-11-29T12:28:33.000Z
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import unittest
import pytest
from tensorforce import TensorForceError
from tensorforce.core.networks import LayeredNetwork
from tensorforce.models import DistributionModel
from tensorforce.tests.minimal_test import MinimalTest
from tensorforce.agents import PPOAgent
from tensorforce.execution import Runner
import tensorflow as tf
import numpy as np
from tensorforce.util import SavableComponent
import os
class SavableNetwork(LayeredNetwork, SavableComponent):
"""
Minimal implementation of a Network that can be saved and restored independently of the Model.
"""
def get_savable_variables(self):
return super(SavableNetwork, self).get_variables(include_nontrainable=False)
def _get_base_variable_scope(self):
return self.apply.variable_scope_name
def create_environment(spec):
return MinimalTest(spec)
def create_agent(environment, network_spec):
return PPOAgent(
update_mode=dict(
unit='episodes',
batch_size=4,
frequency=4
),
memory=dict(
type='latest',
include_next_states=False,
capacity=100
),
step_optimizer=dict(
type='adam',
learning_rate=1e-3
),
subsampling_fraction=0.3,
optimization_steps=20,
states=environment.states,
actions=environment.actions,
network=network_spec
)
class TestModelSaveRestore(unittest.TestCase):
@pytest.fixture(autouse=True)
def initdir(self, tmpdir):
tmpdir.chdir()
self._tmp_dir_path = str(tmpdir)
print("Using %s" % (self._tmp_dir_path, ))
def test_save_restore(self):
environment_spec = {"float": ()}
environment = create_environment(environment_spec)
network_spec = [
dict(type='dense', size=32)
]
agent = create_agent(environment, network_spec)
runner = Runner(agent=agent, environment=environment)
runner.run(episodes=100)
model_values = agent.model.session.run(agent.model.get_variables(
include_submodules=True,
include_nontrainable=False
))
save_path = agent.model.save(directory=self._tmp_dir_path + "/model")
print("Saved at: %s" % (save_path,))
runner.close()
agent = create_agent(environment, network_spec)
agent.model.restore(directory="", file=save_path)
restored_model_values = agent.model.session.run(agent.model.get_variables(
include_submodules=True,
include_nontrainable=False
))
assert len(model_values) == len(restored_model_values)
assert all([np.array_equal(v1, v2) for v1, v2 in zip(model_values, restored_model_values)])
agent.close()
def test_save_network(self):
"""
Test to validate that calls to save and restore of a SavableComponent successfully save and restore the
component's state.
"""
environment_spec = {"float": ()}
environment = create_environment(environment_spec)
network_spec = dict(
type=SavableNetwork,
layers=[dict(type='dense', size=1)]
)
agent = create_agent(environment, network_spec)
assert isinstance(agent.model.network, SavableComponent)
runner = Runner(agent=agent, environment=environment)
runner.run(episodes=100)
network_values = agent.model.session.run(agent.model.network.get_variables())
distribution = next(iter(agent.model.distributions.values()))
distribution_values = agent.model.session.run(distribution.get_variables())
save_path = self._tmp_dir_path + "/network"
agent.model.save_component(component_name=DistributionModel.COMPONENT_NETWORK, save_path=save_path)
runner.close()
assert os.path.isfile(save_path + ".data-00000-of-00001")
assert os.path.isfile(save_path + ".index")
agent = create_agent(environment, network_spec)
agent.model.restore_component(component_name=DistributionModel.COMPONENT_NETWORK, save_path=save_path)
# Ensure only the network variables are loaded
restored_network_values = agent.model.session.run(agent.model.network.get_variables(include_nontrainable=True))
distribution = next(iter(agent.model.distributions.values()))
restored_distribution_values = agent.model.session.run(distribution.get_variables())
assert len(restored_network_values) == len(network_values)
assert all([np.array_equal(v1, v2) for v1, v2 in zip(network_values, restored_network_values)])
assert len(restored_distribution_values) == len(distribution_values)
assert not all([np.array_equal(v1, v2) for v1, v2 in zip(distribution_values, restored_distribution_values)])
agent.close()
environment.close()
def test_pretrain_network(self):
"""
Simulates training outside of Tensorforce and then loading the parameters in the agent's network.
"""
environment_spec = {"float": ()}
environment = create_environment(environment_spec)
size = environment.states["shape"]
output_size = 1
save_path = self._tmp_dir_path + "/network"
g = tf.Graph()
with g.as_default():
x = tf.placeholder(dtype=environment.states["type"], shape=[None, size])
layer = tf.layers.Dense(units=output_size)
y = layer(x)
y_ = tf.placeholder(dtype=environment.states["type"], shape=[None, output_size])
loss = tf.losses.mean_squared_error(y_, y)
optimizer = tf.train.AdamOptimizer(learning_rate=0.1)
train_step = optimizer.minimize(loss)
batch_size = 64
with tf.Session(graph=g) as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(100):
batch = np.random.random([batch_size, size])
correct = np.ones(shape=[batch.shape[0], output_size])
loss_value, _ = sess.run([loss, train_step], {x: batch, y_: correct})
if epoch % 10 == 0:
print("epoch %d: %f" % (epoch, loss_value))
var_map = {
"dense0/apply/linear/apply/W:0": layer.kernel,
"dense0/apply/linear/apply/b:0": layer.bias
}
saver = tf.train.Saver(var_list=var_map)
saver.save(sess=sess, write_meta_graph=False, save_path=save_path)
network_spec = dict(
type=SavableNetwork,
layers=[dict(type='dense', size=output_size)],
)
agent = create_agent(environment, network_spec)
agent.model.restore_component(component_name=agent.model.COMPONENT_NETWORK, save_path=save_path)
agent.close()
def test_non_savable_component(self):
environment_spec = {"float": ()}
environment = create_environment(environment_spec)
network_spec = [dict(type='dense', size=32)]
agent = create_agent(environment, network_spec)
expected_message = "Component network must implement SavableComponent but is "
with pytest.raises(TensorForceError) as excinfo:
agent.model.restore_component(component_name="network", save_path=self._tmp_dir_path + "/network")
assert expected_message in str(excinfo.value)
with pytest.raises(TensorForceError) as excinfo:
agent.model.save_component(component_name="network", save_path=self._tmp_dir_path + "/network")
assert expected_message in str(excinfo.value)
with pytest.raises(TensorForceError) as excinfo:
agent.model.restore_component(component_name="non-existent", save_path=self._tmp_dir_path + "/network")
assert "Component non-existent must implement SavableComponent but is None" == str(excinfo.value)
agent.close()
| 39.940887
| 119
| 0.662309
| 928
| 8,108
| 5.570043
| 0.235991
| 0.042561
| 0.015477
| 0.021668
| 0.479783
| 0.451538
| 0.424453
| 0.394274
| 0.356742
| 0.322113
| 0
| 0.009753
| 0.241243
| 8,108
| 202
| 120
| 40.138614
| 0.830462
| 0.044524
| 0
| 0.291139
| 0
| 0
| 0.049759
| 0.007555
| 0
| 0
| 0
| 0
| 0.075949
| 1
| 0.056962
| false
| 0
| 0.094937
| 0.025316
| 0.189873
| 0.025316
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c32d21e81a25b4bfc714d53125ce26089327176
| 263
|
py
|
Python
|
what_can_i_cook/urls.py
|
s-maibuecher/what_can_i_cook
|
07d0eb1e1862fad299477b800654e895d7f8829a
|
[
"MIT"
] | null | null | null |
what_can_i_cook/urls.py
|
s-maibuecher/what_can_i_cook
|
07d0eb1e1862fad299477b800654e895d7f8829a
|
[
"MIT"
] | null | null | null |
what_can_i_cook/urls.py
|
s-maibuecher/what_can_i_cook
|
07d0eb1e1862fad299477b800654e895d7f8829a
|
[
"MIT"
] | null | null | null |
from django.urls import path
from what_can_i_cook.views import WCICFilterView, WCICResultView
app_name = "wcic"
urlpatterns = [
path("", WCICFilterView.as_view(), name="wcic-start"),
path("results/", WCICResultView.as_view(), name="wcic-results"),
]
| 20.230769
| 68
| 0.722433
| 33
| 263
| 5.575758
| 0.606061
| 0.130435
| 0.108696
| 0.152174
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.13308
| 263
| 12
| 69
| 21.916667
| 0.807018
| 0
| 0
| 0
| 0
| 0
| 0.129771
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.285714
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c32daa41ae2a8f92a0d91d061b5264ea9984602
| 436
|
py
|
Python
|
shared/templates/grub2_bootloader_argument/template.py
|
justchris1/scap-security-guide
|
030097afa80041fcdffc537a49c09896efedadca
|
[
"BSD-3-Clause"
] | 1,138
|
2018-09-05T06:31:44.000Z
|
2022-03-31T03:38:24.000Z
|
shared/templates/grub2_bootloader_argument/template.py
|
justchris1/scap-security-guide
|
030097afa80041fcdffc537a49c09896efedadca
|
[
"BSD-3-Clause"
] | 4,743
|
2018-09-04T15:14:04.000Z
|
2022-03-31T23:17:57.000Z
|
shared/templates/grub2_bootloader_argument/template.py
|
justchris1/scap-security-guide
|
030097afa80041fcdffc537a49c09896efedadca
|
[
"BSD-3-Clause"
] | 400
|
2018-09-08T20:08:49.000Z
|
2022-03-30T20:54:32.000Z
|
import ssg.utils
def preprocess(data, lang):
data["arg_name_value"] = data["arg_name"] + "=" + data["arg_value"]
if lang == "oval":
# escape dot, this is used in oval regex
data["escaped_arg_name_value"] = data["arg_name_value"].replace(".", "\\.")
# replace . with _, this is used in test / object / state ids
data["sanitized_arg_name"] = ssg.utils.escape_id(data["arg_name"])
return data
| 36.333333
| 83
| 0.623853
| 61
| 436
| 4.229508
| 0.47541
| 0.162791
| 0.170543
| 0.124031
| 0.178295
| 0.178295
| 0
| 0
| 0
| 0
| 0
| 0
| 0.21789
| 436
| 11
| 84
| 39.636364
| 0.756598
| 0.224771
| 0
| 0
| 0
| 0
| 0.304478
| 0.065672
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.142857
| 0
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c34376a6bdd5ec8372f4490b569f441abff9288
| 3,598
|
py
|
Python
|
preprocess.py
|
NNDEV1/NMTWithLuongAttention
|
e6f11d9e8c5f999d413fa0dc51219e979a8f975c
|
[
"MIT"
] | 4
|
2021-07-09T19:17:47.000Z
|
2022-01-04T14:54:11.000Z
|
preprocess.py
|
NNDEV1/NMTWithLuongAttention
|
e6f11d9e8c5f999d413fa0dc51219e979a8f975c
|
[
"MIT"
] | null | null | null |
preprocess.py
|
NNDEV1/NMTWithLuongAttention
|
e6f11d9e8c5f999d413fa0dc51219e979a8f975c
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import os
import contractions
import tensorflow as tf
import pandas as pd
import numpy as np
import time
import rich
from rich.progress import track
import spacy
from config import params
#Preprocessing Text
class preprocess_text():
def __init__(self):
pass
def remove_pattern(self, text, pattern= r'[^a-zA-Z0-9.!?, ]', replace_with= ""):
return re.sub(pattern, replace_with, text)
def tokenize_sent(self, text, nlp):
doc= nlp(text)
return [sent.text for sent in doc.sents]
def tokenize_words(self, text, nlp):
doc= nlp(text)
return " ".join(tok.text for tok in doc)
def expand_contractions(self, text):
return contractions.fix(text)
def do_lemmatization(self, text, nlp):
doc= nlp(text)
return ' '.join(tok.lemma_ if tok.lemma_ != "-PRON-" else tok.text for tok in doc)
def add_sos_eos(self, text, sos= False, eos= False):
if (sos and eos):
return "<sos> " + text + " <eos>"
if eos:
return text + " <eos>"
if sos:
return "<sos> " + text
return text
def remove_accents(self, text):
return unicodedata.normalize('NFKD', text).encode('ascii', 'ignore').decode('UTF-8', 'ignore')
def call_preprocessing(df_col, nlp_en= True, lower_= True, remove_pattern_= False, tokenize_words_= False,
expand_contractions_= False, do_lemmatization_= False,
sos= False, eos= False, remove_accents_= False):
nlp= spacy.load('en_core_web_sm') if nlp_en else spacy.load('de_core_news_sm')
prep= preprocess_text()
if expand_contractions_:
df_col= df_col.map(lambda text: prep.expand_contractions(text))
if remove_accents_:
df_col= df_col.map(lambda text: prep.remove_accents(text))
if do_lemmatization_:
df_col= df_col.map(lambda text: prep.do_lemmatization(text, nlp))
if tokenize_words_:
df_col= df_col.map(lambda text: prep.tokenize_words(text, nlp))
if remove_pattern_:
df_col= df_col.map(lambda text: prep.remove_pattern_(text))
if eos or sos:
df_col= df_col.map(lambda text: prep.add_sos_eos(text, sos, eos))
if lower_:
df_col= df_col.map(lambda text: text.lower())
return df_col
def tokenizer(df_col, nlp_en= True):
vocab= set()
_= [[vocab.update([tok]) for tok in text.split(" ")] for text in df_col]
if not nlp_en:
vocab.update(["<sos>"])
vocab.update(["<eos>"])
tokenize= dict(zip(vocab, range(1, 1+len(vocab))))
detokenize= dict(zip(range(1, 1+len(vocab)), vocab))
return tokenize, detokenize, len(vocab)
def padding(txt_toks, max_len):
curr_ls= txt_toks.split(" ")
len_ls= len(curr_ls)
_= [curr_ls.append("<pad>") for i in range(max_len-len_ls) if len(curr_ls)<max_len]
return " ".join(curr_ls)
def make_minibatches(df, col1= 'rev_eng_tok', col2= 'teach_force_tok', col3= 'target_tok'):
enc_seq= np.array([df[col1].values[i] for i in range(len(df[col1]))])
enc_seq= tf.data.Dataset.from_tensor_slices(enc_seq).batch(params.batch_size)
teach_force_seq= np.array([df[col2].values[i] for i in range(len(df[col2]))])
teach_force_seq= tf.data.Dataset.from_tensor_slices(teach_force_seq).batch(params.batch_size)
y= np.array([df[col3].values[i] for i in range(len(df[col3]))])
y= tf.data.Dataset.from_tensor_slices(y).batch(params.batch_size)
return enc_seq, teach_force_seq, y
| 32.414414
| 106
| 0.644525
| 533
| 3,598
| 4.129456
| 0.242026
| 0.040891
| 0.022263
| 0.031804
| 0.294866
| 0.223989
| 0.210813
| 0.141299
| 0.060881
| 0
| 0
| 0.00577
| 0.229294
| 3,598
| 110
| 107
| 32.709091
| 0.787955
| 0.005003
| 0
| 0.063291
| 0
| 0
| 0.044146
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.151899
| false
| 0.012658
| 0.139241
| 0.037975
| 0.481013
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c3462f9e646dbe27aad64fea0cc1723870ee413
| 1,665
|
py
|
Python
|
setup.py
|
johannesulf/dsigma
|
729337c94669f4a0fdacb51b175df1e13e26304c
|
[
"MIT"
] | 4
|
2020-06-09T01:09:58.000Z
|
2021-09-26T16:39:16.000Z
|
setup.py
|
johannesulf/dsigma
|
729337c94669f4a0fdacb51b175df1e13e26304c
|
[
"MIT"
] | null | null | null |
setup.py
|
johannesulf/dsigma
|
729337c94669f4a0fdacb51b175df1e13e26304c
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
from distutils.extension import Extension
from distutils.command.sdist import sdist
try:
from Cython.Build import cythonize
USE_CYTHON = True
except ImportError:
USE_CYTHON = False
ext = 'pyx' if USE_CYTHON else 'c'
extensions = [Extension(
'dsigma.precompute_engine', ['dsigma/precompute_engine.{}'.format(ext)],
extra_compile_args=['-Ofast', '-march=native'])]
if USE_CYTHON:
extensions = cythonize(extensions)
class sdist_with_cythonize(sdist):
def run(self):
cythonize(['dsigma/precompute_engine.pyx'])
sdist.run(self)
with open('README.md', 'r') as fstream:
long_description = fstream.read()
setup(
name='dsigma',
version='0.5.0',
description=('A Galaxy-Galaxy Lensing Pipeline'),
long_description=long_description,
long_description_content_type='text/markdown',
classifiers=[
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Astronomy',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
keywords='astronomy, weak-lensing',
url='https://github.com/johannesulf/dsigma',
author='Johannes Lange, Song Huang',
author_email='jolange@ucsc.edu',
packages=find_packages(),
install_requires=['numpy', 'astropy', 'scipy', 'scikit-learn',
'healpy'],
python_requires='>=3.4',
ext_modules=extensions,
cmdclass={'sdist': sdist_with_cythonize}
)
| 30.272727
| 76
| 0.667868
| 185
| 1,665
| 5.875676
| 0.583784
| 0.033119
| 0.091996
| 0.095676
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008221
| 0.196396
| 1,665
| 54
| 77
| 30.833333
| 0.804185
| 0
| 0
| 0
| 0
| 0
| 0.34955
| 0.060661
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021739
| false
| 0
| 0.108696
| 0
| 0.152174
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c34972839ffa0fc13d463ba6725ab4c70743477
| 1,967
|
py
|
Python
|
face_detector/modules/mod_faceDetection.py
|
jtfan3/face_detection
|
82e3bc839bf12c956f3166c07012912a0638048f
|
[
"MIT"
] | null | null | null |
face_detector/modules/mod_faceDetection.py
|
jtfan3/face_detection
|
82e3bc839bf12c956f3166c07012912a0638048f
|
[
"MIT"
] | null | null | null |
face_detector/modules/mod_faceDetection.py
|
jtfan3/face_detection
|
82e3bc839bf12c956f3166c07012912a0638048f
|
[
"MIT"
] | null | null | null |
import cv2
import mediapipe as mp
class FaceDetection():
# initialize the face detection class with arguments from https://google.github.io/mediapipe/solutions/face_detection.html
def __init__(self, model_selection = 0, threshold = 0.5):
self.model_selection = model_selection
self.threshold = threshold
self.mp_draw = mp.solutions.drawing_utils
self.face_detection = mp.solutions.face_detection.FaceDetection(model_selection = self.model_selection, min_detection_confidence = self.threshold)
# gets bounding boxes using self.face_detection, returns a list of element, elment = (score, bbox_dict)
def get_bboxs(self, frame):
mp_detections = self.face_detection.process(frame)
score_bboxs = []
if mp_detections.detections:
for detection in mp_detections.detections:
score = detection.score[0]
mp_bbox = detection.location_data.relative_bounding_box
bbox_dict = {
'x_min': mp_bbox.xmin,
'y_min': mp_bbox.ymin,
'w': mp_bbox.width,
'h': mp_bbox.height
}
score_bboxs.append([score, bbox_dict])
return score_bboxs
# draws the bbox onto the frame
def draw_bbox(self, face_probs, bbox_dict, frame, col = (255, 0, 255), gender = None, gender_score = None):
x_min, y_min, w, h = bbox_dict.values()
frame_h, frame_w, _ = frame.shape
bbox = int(x_min * frame_w), int(y_min * frame_h), int(w * frame_w), int(h * frame_h)
# prepare text, depending on what atributes we predict
text = str(round(face_probs, 3))
if gender:
text = gender + ": " + str(round(gender_score, 2))
# draw bbox
cv2.rectangle(frame, bbox, col, 2)
cv2.putText(frame, text, (bbox[0], bbox[1] - 10),
cv2.FONT_HERSHEY_COMPLEX, 0.5, col, 1)
| 40.979167
| 154
| 0.620234
| 254
| 1,967
| 4.574803
| 0.38189
| 0.067126
| 0.046472
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017731
| 0.283172
| 1,967
| 47
| 155
| 41.851064
| 0.806383
| 0.160142
| 0
| 0
| 0
| 0
| 0.008511
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.060606
| 0
| 0.212121
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c378f7b0a34c442460ca831372ef84873f73309
| 768
|
py
|
Python
|
pymc/mc_enum.py
|
cherish-web/pymc
|
9c322abfdcceca0a78b633d85da23e1290c036c8
|
[
"Apache-2.0"
] | 4
|
2021-05-01T12:43:24.000Z
|
2022-01-25T03:44:32.000Z
|
pymc/mc_enum.py
|
cherish-web/pymc
|
9c322abfdcceca0a78b633d85da23e1290c036c8
|
[
"Apache-2.0"
] | null | null | null |
pymc/mc_enum.py
|
cherish-web/pymc
|
9c322abfdcceca0a78b633d85da23e1290c036c8
|
[
"Apache-2.0"
] | 2
|
2021-07-10T03:56:08.000Z
|
2021-09-30T14:59:35.000Z
|
# _*_ coding: utf-8 _*_
# @Time : 2021/3/29 上午 08:57
# @Author : cherish_peng
# @Email : 1058386071@qq.com
# @File : cmd.py
# @Software : PyCharm
from enum import Enum
class EnumSubTitle(Enum):
Request4e = 0x5400
# 请求
Request = 0x5000
# 应答
Respond = 0xD000
Respond4e = 0xD400
class EnumEndCode(Enum):
# 正常应答
Ok = 0x0000
# 异常应答
Err = 0x51C0
class EnumCmd(Enum):
# 成批读
ReadBatch = 0x0401
# 成批写
WriteBatch = 0x1401
class EnumSubCmd(Enum):
# 有存储扩展模块b7=0,b6=0:随机读出,监视数据注册用外
# 按位读写
Bit = 0x0001
# 按字读写
Word = 0x0000
# 有存储扩展模块b7=1,b6=0:随机读出,监视数据注册用外
# 按位读写
BitEx = 0x0081
# 按字读写
WordEx = 0x0080
class EnumType(Enum):
# 位类型
Bit = 0
# 字类型
Word = 1
| 13.714286
| 36
| 0.584635
| 93
| 768
| 4.774194
| 0.752688
| 0.013514
| 0.031532
| 0.067568
| 0.085586
| 0
| 0
| 0
| 0
| 0
| 0
| 0.172348
| 0.3125
| 768
| 55
| 37
| 13.963636
| 0.668561
| 0.324219
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.146045
| 0
| 0
| 1
| 0
| false
| 0
| 0.05
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|