hexsha
stringlengths
40
40
size
int64
2
1.02M
ext
stringclasses
10 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
4
245
max_stars_repo_name
stringlengths
6
130
max_stars_repo_head_hexsha
stringlengths
40
40
max_stars_repo_licenses
listlengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
4
245
max_issues_repo_name
stringlengths
6
130
max_issues_repo_head_hexsha
stringlengths
40
40
max_issues_repo_licenses
listlengths
1
10
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
4
245
max_forks_repo_name
stringlengths
6
130
max_forks_repo_head_hexsha
stringlengths
40
40
max_forks_repo_licenses
listlengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
2
1.02M
avg_line_length
float64
1
417k
max_line_length
int64
1
987k
alphanum_fraction
float64
0
1
content_no_comment
stringlengths
0
1.01M
is_comment_constant_removed
bool
1 class
is_sharp_comment_removed
bool
1 class
f723bc9754ddb86c41340d88ffa3f486b80a42f3
258
py
Python
weight_converter/utils.py
adamriaz/weight-converter
1ac82ef2935e76ec6c78f322e995fbce6454b6c8
[ "MIT" ]
null
null
null
weight_converter/utils.py
adamriaz/weight-converter
1ac82ef2935e76ec6c78f322e995fbce6454b6c8
[ "MIT" ]
null
null
null
weight_converter/utils.py
adamriaz/weight-converter
1ac82ef2935e76ec6c78f322e995fbce6454b6c8
[ "MIT" ]
null
null
null
def divide_by_zero_check(func): """ Decorator for checking division by zero from user input """ def inner(value): if value.value == 0: raise ValueError('Cannot divide by zero!') return func(value) return inner
25.8
59
0.616279
def divide_by_zero_check(func): def inner(value): if value.value == 0: raise ValueError('Cannot divide by zero!') return func(value) return inner
true
true
f723be6ac409c60b2a0d167791d7a99d3f39abbb
2,575
py
Python
external_libs/spglib-1.9.9/python/test/test_hall_number_from_symmetry.py
shunsuke-sato/octopus
dcf68a185cdb13708395546b1557ca46aed969f6
[ "Apache-2.0" ]
4
2016-11-17T09:03:11.000Z
2019-10-17T06:31:08.000Z
external_libs/spglib-1.9.9/python/test/test_hall_number_from_symmetry.py
shunsuke-sato/octopus
dcf68a185cdb13708395546b1557ca46aed969f6
[ "Apache-2.0" ]
1
2020-08-11T19:14:06.000Z
2020-08-11T19:14:06.000Z
external_libs/spglib-1.9.9/python/test/test_hall_number_from_symmetry.py
shunsuke-sato/octopus
dcf68a185cdb13708395546b1557ca46aed969f6
[ "Apache-2.0" ]
5
2016-11-22T20:30:46.000Z
2020-05-29T23:24:51.000Z
import unittest import numpy as np from spglib import get_symmetry_dataset, get_hall_number_from_symmetry from vasp import read_vasp from os import listdir dirnames = ('cubic', 'hexagonal', 'monoclinic', 'orthorhombic', 'tetragonal', 'triclinic', 'trigonal', 'distorted', 'virtual_structure') class TestGetHallNumberFromSymmetry(unittest.TestCase): def setUp(self): self._filenames = [] for d in dirnames: self._filenames += ["%s/%s" % (d, fname) for fname in listdir("./data/%s" % d)] def tearDown(self): pass def test_get_hall_number_from_symmetry(self): for fname in self._filenames: spgnum = int(fname.split('-')[1]) cell = read_vasp("./data/%s" % fname) if 'distorted' in fname: dataset = get_symmetry_dataset(cell, symprec=1e-1) hall_number = get_hall_number_from_symmetry( dataset['rotations'], dataset['translations'], symprec=1e-1) if hall_number != dataset['hall_number']: print("%d != %d in %s" % (hall_number, dataset['hall_number'], fname)) ref_cell = (dataset['std_lattice'], dataset['std_positions'], dataset['std_types']) dataset = get_symmetry_dataset(ref_cell, symprec=1e-5) hall_number = get_hall_number_from_symmetry( dataset['rotations'], dataset['translations'], symprec=1e-5) print("Using refinced cell: %d, %d in %s" % (hall_number, dataset['hall_number'], fname)) else: dataset = get_symmetry_dataset(cell, symprec=1e-5) hall_number = get_hall_number_from_symmetry( dataset['rotations'], dataset['translations'], symprec=1e-5) self.assertEqual(hall_number, dataset['hall_number'], msg=("%d != %d in %s" % (hall_number, dataset['hall_number'], fname))) if __name__ == '__main__': suite = unittest.TestLoader().loadTestsFromTestCase( TestGetHallNumberFromSymmetry) unittest.TextTestRunner(verbosity=2).run(suite) # unittest.main()
39.015152
80
0.514951
import unittest import numpy as np from spglib import get_symmetry_dataset, get_hall_number_from_symmetry from vasp import read_vasp from os import listdir dirnames = ('cubic', 'hexagonal', 'monoclinic', 'orthorhombic', 'tetragonal', 'triclinic', 'trigonal', 'distorted', 'virtual_structure') class TestGetHallNumberFromSymmetry(unittest.TestCase): def setUp(self): self._filenames = [] for d in dirnames: self._filenames += ["%s/%s" % (d, fname) for fname in listdir("./data/%s" % d)] def tearDown(self): pass def test_get_hall_number_from_symmetry(self): for fname in self._filenames: spgnum = int(fname.split('-')[1]) cell = read_vasp("./data/%s" % fname) if 'distorted' in fname: dataset = get_symmetry_dataset(cell, symprec=1e-1) hall_number = get_hall_number_from_symmetry( dataset['rotations'], dataset['translations'], symprec=1e-1) if hall_number != dataset['hall_number']: print("%d != %d in %s" % (hall_number, dataset['hall_number'], fname)) ref_cell = (dataset['std_lattice'], dataset['std_positions'], dataset['std_types']) dataset = get_symmetry_dataset(ref_cell, symprec=1e-5) hall_number = get_hall_number_from_symmetry( dataset['rotations'], dataset['translations'], symprec=1e-5) print("Using refinced cell: %d, %d in %s" % (hall_number, dataset['hall_number'], fname)) else: dataset = get_symmetry_dataset(cell, symprec=1e-5) hall_number = get_hall_number_from_symmetry( dataset['rotations'], dataset['translations'], symprec=1e-5) self.assertEqual(hall_number, dataset['hall_number'], msg=("%d != %d in %s" % (hall_number, dataset['hall_number'], fname))) if __name__ == '__main__': suite = unittest.TestLoader().loadTestsFromTestCase( TestGetHallNumberFromSymmetry) unittest.TextTestRunner(verbosity=2).run(suite)
true
true
f723be8c97c6be2454140fb16008806dc90a7f1d
2,740
py
Python
data/get_data.py
OmarJabri7/SAIA
f45f1d8073d4b56f5bed6f378f791102b067317c
[ "MIT" ]
1
2022-03-22T19:11:48.000Z
2022-03-22T19:11:48.000Z
data/get_data.py
OmarJabri7/Disaster-Tweets-Kaggle
54dfee4684dbfd5bf6cb58cc3974abc051022022
[ "MIT" ]
null
null
null
data/get_data.py
OmarJabri7/Disaster-Tweets-Kaggle
54dfee4684dbfd5bf6cb58cc3974abc051022022
[ "MIT" ]
null
null
null
import pandas as pd import numpy as np from nltk.corpus import words import nltk import re import string from data_processing import DisasterProcessor X = pd.read_csv("emotion_data/tweet_emotions.csv") stop_wrds = nltk.corpus.stopwords.words("english") columns = X.columns columns = ["content"] preprocessor = DisasterProcessor() eng_words = set(words.words()) for column in columns: X[column] = X[column].apply( lambda x: ' '.join([re.sub("[$@&#]","",w) for w in x.lower().split(" ") if w])) table = str.maketrans('', '', string.punctuation) X[column] = X[column].apply( lambda x: ' '.join([w.translate(table) for w in x.split(" ") if w.isalpha()])) X[column] = X[column].apply( lambda x: preprocessor.utils_preprocess_text(x, flg_stemm=False, flg_lemm=True, lst_stopwords=stop_wrds)) X[column] = X[column].apply( lambda x: ' '.join([w for w in x.split(" ") if len(w) >= 2])) X["content"] = X["content"].apply( lambda x: ' '.join(([w for w in x.split(" ") if w in eng_words])) ) unique_words = list(X['content'].str.split(' ', expand=True).stack().unique()) # X.Sentence = X.Sentence.apply(lambda x: x if len(x) > 2 else np.nan) # X["clean_content"] = X["content"].str.replace('[#,@,&,=,[,http://]', '') print(np.unique(X["sentiment"])) X = X.loc[X['sentiment'].isin(['sadness','happiness','love','hate','fun','enthusiasm','relief','fear','anger', 'surprise', 'worry'])] # X = X["sentiment" in ['sadness','happiness','love','hate','fun','enthusiasm','relief','fear','anger']] X = X[['sentiment','content']] # happy = X.loc[X['sentiment'].isin(['happiness','fun','enthusiasm','relief']), 'content'].values happy = X.loc[X['sentiment'].isin(['happiness']), 'content'].values love = X.loc[X['sentiment'].isin(['love']),'content'].values # sadness = X.loc[X['sentiment'].isin(['sadness','worry']), 'content'].values sadness = X.loc[X['sentiment'].isin(['sadness']), 'content'].values # angry = X.loc[X['sentiment'].isin(['hate','anger']), 'content'].values angry = X.loc[X['sentiment'].isin(['anger']), 'content'].values surprise = X.loc[X['sentiment'].isin(['surprise']), 'content'].values fear = X.loc[X['sentiment'].isin(['fear']),'content'].values # emotions = dict(Emotion = ['happy','love','sadness','angry','surprise','fear']) # data = {"Sentence" : [happy, love, sadness, angry, surprise, fear], # "Emotion" : ['joy','love','sadness','anger','surprise','fear'],} # data = {"Sentence" : [sadness, angry, fear], "Emotion" : ['sadness','anger','fear'],} new_df = pd.DataFrame(data) new_df = new_df.explode('Sentence', ignore_index=True) new_df.to_csv('emotion_data/add_data.txt', header=None, index=None, sep=';')
37.027027
113
0.630657
import pandas as pd import numpy as np from nltk.corpus import words import nltk import re import string from data_processing import DisasterProcessor X = pd.read_csv("emotion_data/tweet_emotions.csv") stop_wrds = nltk.corpus.stopwords.words("english") columns = X.columns columns = ["content"] preprocessor = DisasterProcessor() eng_words = set(words.words()) for column in columns: X[column] = X[column].apply( lambda x: ' '.join([re.sub("[$@&#]","",w) for w in x.lower().split(" ") if w])) table = str.maketrans('', '', string.punctuation) X[column] = X[column].apply( lambda x: ' '.join([w.translate(table) for w in x.split(" ") if w.isalpha()])) X[column] = X[column].apply( lambda x: preprocessor.utils_preprocess_text(x, flg_stemm=False, flg_lemm=True, lst_stopwords=stop_wrds)) X[column] = X[column].apply( lambda x: ' '.join([w for w in x.split(" ") if len(w) >= 2])) X["content"] = X["content"].apply( lambda x: ' '.join(([w for w in x.split(" ") if w in eng_words])) ) unique_words = list(X['content'].str.split(' ', expand=True).stack().unique()) print(np.unique(X["sentiment"])) X = X.loc[X['sentiment'].isin(['sadness','happiness','love','hate','fun','enthusiasm','relief','fear','anger', 'surprise', 'worry'])] X = X[['sentiment','content']] happy = X.loc[X['sentiment'].isin(['happiness']), 'content'].values love = X.loc[X['sentiment'].isin(['love']),'content'].values sadness = X.loc[X['sentiment'].isin(['sadness']), 'content'].values angry = X.loc[X['sentiment'].isin(['anger']), 'content'].values surprise = X.loc[X['sentiment'].isin(['surprise']), 'content'].values fear = X.loc[X['sentiment'].isin(['fear']),'content'].values data = {"Sentence" : [sadness, angry, fear], "Emotion" : ['sadness','anger','fear'],} new_df = pd.DataFrame(data) new_df = new_df.explode('Sentence', ignore_index=True) new_df.to_csv('emotion_data/add_data.txt', header=None, index=None, sep=';')
true
true
f723c10b490155fd4a2bd2ac7ff767f6f0cf72e9
8,494
py
Python
crosshair/opcode_intercept.py
samuelchassot/CrossHair
4eac7a23e470567cc23e6d0916ce6dd6820eacd8
[ "MIT" ]
null
null
null
crosshair/opcode_intercept.py
samuelchassot/CrossHair
4eac7a23e470567cc23e6d0916ce6dd6820eacd8
[ "MIT" ]
null
null
null
crosshair/opcode_intercept.py
samuelchassot/CrossHair
4eac7a23e470567cc23e6d0916ce6dd6820eacd8
[ "MIT" ]
null
null
null
from collections.abc import MutableMapping, Set import dis from types import CodeType from types import FrameType from sys import version_info from crosshair.core import CrossHairValue from crosshair.core import register_opcode_patch from crosshair.libimpl.builtinslib import SymbolicInt from crosshair.libimpl.builtinslib import AnySymbolicStr from crosshair.libimpl.builtinslib import LazyIntSymbolicStr from crosshair.simplestructs import LinearSet from crosshair.simplestructs import ShellMutableSequence from crosshair.simplestructs import ShellMutableSet from crosshair.simplestructs import SimpleDict from crosshair.simplestructs import SliceView from crosshair.tracers import COMPOSITE_TRACER from crosshair.tracers import TracingModule from crosshair.tracers import frame_stack_read from crosshair.tracers import frame_stack_write from crosshair.util import CrosshairInternal BINARY_SUBSCR = dis.opmap["BINARY_SUBSCR"] BUILD_STRING = dis.opmap["BUILD_STRING"] COMPARE_OP = dis.opmap["COMPARE_OP"] CONTAINS_OP = dis.opmap.get("CONTAINS_OP", 118) FORMAT_VALUE = dis.opmap["FORMAT_VALUE"] MAP_ADD = dis.opmap["MAP_ADD"] SET_ADD = dis.opmap["SET_ADD"] def frame_op_arg(frame): return frame.f_code.co_code[frame.f_lasti + 1] class SymbolicSubscriptInterceptor(TracingModule): opcodes_wanted = frozenset([BINARY_SUBSCR]) def trace_op(self, frame, codeobj, codenum): # Note that because this is called from inside a Python trace handler, tracing # is automatically disabled, so there's no need for a `with NoTracing():` guard. key = frame_stack_read(frame, -1) if isinstance(key, (int, float, str)): return # If we got this far, the index is likely symbolic (or perhaps a slice object) container = frame_stack_read(frame, -2) container_type = type(container) if container_type is dict: # SimpleDict won't hash the keys it's given! wrapped_dict = SimpleDict(list(container.items())) frame_stack_write(frame, -2, wrapped_dict) elif container_type is list: if isinstance(key, slice): if key.step not in (1, None): return start, stop = key.start, key.stop if isinstance(start, SymbolicInt) or isinstance(stop, SymbolicInt): view_wrapper = SliceView(container, 0, len(container)) frame_stack_write(frame, -2, ShellMutableSequence(view_wrapper)) else: pass # Nothing useful to do with concrete list and symbolic numeric index. _CONTAINMENT_OP_TYPES = tuple( i for (i, name) in enumerate(dis.cmp_op) if name in ("in", "not in") ) assert len(_CONTAINMENT_OP_TYPES) in (0, 2) class ContainmentInterceptor(TracingModule): opcodes_wanted = frozenset( [ COMPARE_OP, CONTAINS_OP, ] ) def trace_op(self, frame, codeobj, codenum): if codenum == COMPARE_OP: compare_type = frame_op_arg(frame) if compare_type not in _CONTAINMENT_OP_TYPES: return item = frame_stack_read(frame, -2) if not isinstance(item, CrossHairValue): return container = frame_stack_read(frame, -1) containertype = type(container) new_container = None if containertype is str: new_container = LazyIntSymbolicStr([ord(c) for c in container]) elif containertype is set: new_container = ShellMutableSet(LinearSet(container)) if new_container is not None: frame_stack_write(frame, -1, new_container) class BuildStringInterceptor(TracingModule): """ Adds symbolic handling for the BUILD_STRING opcode (used by f-strings). BUILD_STRING concatenates strings from the stack is a fast, but unforgiving way: it requires all the substrings to be real Python strings. We work around this by replacing the substrings with empty strings, computing the concatenation ourselves, and swaping our result in after the opcode completes. """ opcodes_wanted = frozenset([BUILD_STRING]) def trace_op(self, frame, codeobj, codenum): count = frame_op_arg(frame) real_result = "" for offset in range(-(count), 0): substr = frame_stack_read(frame, offset) if not isinstance(substr, (str, AnySymbolicStr)): raise CrosshairInternal # Because we know these are all symbolic or concrete strings, it's ok to # not have tracing on when we do the concatenation here: real_result += substr frame_stack_write(frame, offset, "") def post_op(): frame_stack_write(frame, -1, real_result) COMPOSITE_TRACER.set_postop_callback(codeobj, post_op) class FormatValueInterceptor(TracingModule): """Avoid realization during FORMAT_VALUE (used by f-strings).""" opcodes_wanted = frozenset([FORMAT_VALUE]) def trace_op(self, frame, codeobj, codenum): flags = frame_op_arg(frame) if flags not in (0x00, 0x01): return # formatting spec is present orig_obj = frame_stack_read(frame, -1) if not isinstance(orig_obj, AnySymbolicStr): return # Format a dummy empty string, and swap the original back in: frame_stack_write(frame, -1, "") def post_op(): frame_stack_write(frame, -1, orig_obj) COMPOSITE_TRACER.set_postop_callback(codeobj, post_op) class MapAddInterceptor(TracingModule): """De-optimize MAP_ADD over symbolics (used in dict comprehensions).""" opcodes_wanted = frozenset([MAP_ADD]) def trace_op(self, frame: FrameType, codeobj: CodeType, codenum: int) -> None: dict_offset = -(frame_op_arg(frame) + 2) dict_obj = frame_stack_read(frame, dict_offset) if not isinstance(dict_obj, (dict, MutableMapping)): raise CrosshairInternal top, second = frame_stack_read(frame, -1), frame_stack_read(frame, -2) # Key and value were swapped in Python 3.8 key, value = (second, top) if version_info >= (3, 8) else (top, second) if isinstance(dict_obj, dict): if isinstance(key, CrossHairValue): dict_obj = SimpleDict(list(dict_obj.items())) else: # Key and dict are concrete; continue as normal. return # Have the interpreter do a fake assinment, namely `{}[1] = 1` frame_stack_write(frame, dict_offset, {}) frame_stack_write(frame, -1, 1) frame_stack_write(frame, -2, 1) # And do our own assignment separately: dict_obj[key] = value # Later, overwrite the interpreter's result with ours: def post_op(): frame_stack_write(frame, dict_offset + 2, dict_obj) COMPOSITE_TRACER.set_postop_callback(codeobj, post_op) class SetAddInterceptor(TracingModule): """De-optimize SET_ADD over symbolics (used in set comprehensions).""" opcodes_wanted = frozenset([SET_ADD]) def trace_op(self, frame: FrameType, codeobj: CodeType, codenum: int) -> None: set_offset = -(frame_op_arg(frame) + 1) set_obj = frame_stack_read(frame, set_offset) if not isinstance(set_obj, Set): raise CrosshairInternal(type(set_obj)) item = frame_stack_read(frame, -1) if isinstance(set_obj, set): if isinstance(item, CrossHairValue): set_obj = ShellMutableSet(set_obj) else: # Set and value are concrete; continue as normal. return # Have the interpreter do a fake addition, namely `set().add(1)` frame_stack_write(frame, set_offset, set()) frame_stack_write(frame, -1, 1) # And do our own addition separately: set_obj.add(item) # Later, overwrite the interpreter's result with ours: def post_op(): frame_stack_write(frame, set_offset + 1, set_obj) COMPOSITE_TRACER.set_postop_callback(codeobj, post_op) def make_registrations(): register_opcode_patch(SymbolicSubscriptInterceptor()) register_opcode_patch(ContainmentInterceptor()) register_opcode_patch(BuildStringInterceptor()) register_opcode_patch(FormatValueInterceptor()) register_opcode_patch(MapAddInterceptor()) register_opcode_patch(SetAddInterceptor())
38.089686
88
0.674005
from collections.abc import MutableMapping, Set import dis from types import CodeType from types import FrameType from sys import version_info from crosshair.core import CrossHairValue from crosshair.core import register_opcode_patch from crosshair.libimpl.builtinslib import SymbolicInt from crosshair.libimpl.builtinslib import AnySymbolicStr from crosshair.libimpl.builtinslib import LazyIntSymbolicStr from crosshair.simplestructs import LinearSet from crosshair.simplestructs import ShellMutableSequence from crosshair.simplestructs import ShellMutableSet from crosshair.simplestructs import SimpleDict from crosshair.simplestructs import SliceView from crosshair.tracers import COMPOSITE_TRACER from crosshair.tracers import TracingModule from crosshair.tracers import frame_stack_read from crosshair.tracers import frame_stack_write from crosshair.util import CrosshairInternal BINARY_SUBSCR = dis.opmap["BINARY_SUBSCR"] BUILD_STRING = dis.opmap["BUILD_STRING"] COMPARE_OP = dis.opmap["COMPARE_OP"] CONTAINS_OP = dis.opmap.get("CONTAINS_OP", 118) FORMAT_VALUE = dis.opmap["FORMAT_VALUE"] MAP_ADD = dis.opmap["MAP_ADD"] SET_ADD = dis.opmap["SET_ADD"] def frame_op_arg(frame): return frame.f_code.co_code[frame.f_lasti + 1] class SymbolicSubscriptInterceptor(TracingModule): opcodes_wanted = frozenset([BINARY_SUBSCR]) def trace_op(self, frame, codeobj, codenum): key = frame_stack_read(frame, -1) if isinstance(key, (int, float, str)): return # If we got this far, the index is likely symbolic (or perhaps a slice object) container = frame_stack_read(frame, -2) container_type = type(container) if container_type is dict: # SimpleDict won't hash the keys it's given! wrapped_dict = SimpleDict(list(container.items())) frame_stack_write(frame, -2, wrapped_dict) elif container_type is list: if isinstance(key, slice): if key.step not in (1, None): return start, stop = key.start, key.stop if isinstance(start, SymbolicInt) or isinstance(stop, SymbolicInt): view_wrapper = SliceView(container, 0, len(container)) frame_stack_write(frame, -2, ShellMutableSequence(view_wrapper)) else: pass # Nothing useful to do with concrete list and symbolic numeric index. _CONTAINMENT_OP_TYPES = tuple( i for (i, name) in enumerate(dis.cmp_op) if name in ("in", "not in") ) assert len(_CONTAINMENT_OP_TYPES) in (0, 2) class ContainmentInterceptor(TracingModule): opcodes_wanted = frozenset( [ COMPARE_OP, CONTAINS_OP, ] ) def trace_op(self, frame, codeobj, codenum): if codenum == COMPARE_OP: compare_type = frame_op_arg(frame) if compare_type not in _CONTAINMENT_OP_TYPES: return item = frame_stack_read(frame, -2) if not isinstance(item, CrossHairValue): return container = frame_stack_read(frame, -1) containertype = type(container) new_container = None if containertype is str: new_container = LazyIntSymbolicStr([ord(c) for c in container]) elif containertype is set: new_container = ShellMutableSet(LinearSet(container)) if new_container is not None: frame_stack_write(frame, -1, new_container) class BuildStringInterceptor(TracingModule): opcodes_wanted = frozenset([BUILD_STRING]) def trace_op(self, frame, codeobj, codenum): count = frame_op_arg(frame) real_result = "" for offset in range(-(count), 0): substr = frame_stack_read(frame, offset) if not isinstance(substr, (str, AnySymbolicStr)): raise CrosshairInternal # Because we know these are all symbolic or concrete strings, it's ok to real_result += substr frame_stack_write(frame, offset, "") def post_op(): frame_stack_write(frame, -1, real_result) COMPOSITE_TRACER.set_postop_callback(codeobj, post_op) class FormatValueInterceptor(TracingModule): opcodes_wanted = frozenset([FORMAT_VALUE]) def trace_op(self, frame, codeobj, codenum): flags = frame_op_arg(frame) if flags not in (0x00, 0x01): return orig_obj = frame_stack_read(frame, -1) if not isinstance(orig_obj, AnySymbolicStr): return frame_stack_write(frame, -1, "") def post_op(): frame_stack_write(frame, -1, orig_obj) COMPOSITE_TRACER.set_postop_callback(codeobj, post_op) class MapAddInterceptor(TracingModule): opcodes_wanted = frozenset([MAP_ADD]) def trace_op(self, frame: FrameType, codeobj: CodeType, codenum: int) -> None: dict_offset = -(frame_op_arg(frame) + 2) dict_obj = frame_stack_read(frame, dict_offset) if not isinstance(dict_obj, (dict, MutableMapping)): raise CrosshairInternal top, second = frame_stack_read(frame, -1), frame_stack_read(frame, -2) key, value = (second, top) if version_info >= (3, 8) else (top, second) if isinstance(dict_obj, dict): if isinstance(key, CrossHairValue): dict_obj = SimpleDict(list(dict_obj.items())) else: return frame_stack_write(frame, dict_offset, {}) frame_stack_write(frame, -1, 1) frame_stack_write(frame, -2, 1) dict_obj[key] = value def post_op(): frame_stack_write(frame, dict_offset + 2, dict_obj) COMPOSITE_TRACER.set_postop_callback(codeobj, post_op) class SetAddInterceptor(TracingModule): opcodes_wanted = frozenset([SET_ADD]) def trace_op(self, frame: FrameType, codeobj: CodeType, codenum: int) -> None: set_offset = -(frame_op_arg(frame) + 1) set_obj = frame_stack_read(frame, set_offset) if not isinstance(set_obj, Set): raise CrosshairInternal(type(set_obj)) item = frame_stack_read(frame, -1) if isinstance(set_obj, set): if isinstance(item, CrossHairValue): set_obj = ShellMutableSet(set_obj) else: # Set and value are concrete; continue as normal. return # Have the interpreter do a fake addition, namely `set().add(1)` frame_stack_write(frame, set_offset, set()) frame_stack_write(frame, -1, 1) # And do our own addition separately: set_obj.add(item) # Later, overwrite the interpreter's result with ours: def post_op(): frame_stack_write(frame, set_offset + 1, set_obj) COMPOSITE_TRACER.set_postop_callback(codeobj, post_op) def make_registrations(): register_opcode_patch(SymbolicSubscriptInterceptor()) register_opcode_patch(ContainmentInterceptor()) register_opcode_patch(BuildStringInterceptor()) register_opcode_patch(FormatValueInterceptor()) register_opcode_patch(MapAddInterceptor()) register_opcode_patch(SetAddInterceptor())
true
true
f723c12709c80e71332f0ad4c801db953c05c2c8
1,652
py
Python
linptech/packet.py
yangguozhanzhao/linptech
92ee1538d11baf473535cd0ed6b879adcee66e70
[ "MIT" ]
1
2020-07-26T05:37:43.000Z
2020-07-26T05:37:43.000Z
linptech/packet.py
yangguozhanzhao/linptech
92ee1538d11baf473535cd0ed6b879adcee66e70
[ "MIT" ]
null
null
null
linptech/packet.py
yangguozhanzhao/linptech
92ee1538d11baf473535cd0ed6b879adcee66e70
[ "MIT" ]
2
2018-04-03T04:17:12.000Z
2018-10-12T09:44:20.000Z
from linptech.crc8 import crc8 import logging class Packet(object): ''' Base class for Packet. Mainly used for for packet generation and Packet.parse_msg(buf) for parsing message. parse_msg() returns subclass, if one is defined for the data type. ''' def __init__(self, data=None, optional="00"*7): if data is None: logging.warning('Packet.data is None') else: self.data = data if optional is None: logging.info('Packet.optional is None.') else: self.optional = optional @staticmethod def check(packet): """ check packet with crc """ if packet.startswith("550") and \ crc8(packet[2:10])==packet[10:12] and \ crc8(packet[12:-2])==packet[-2:]: return True else: return False @staticmethod def parse(packet): """ parse an packet to data and optional for receive """ if Packet.check(packet): try: data_len=int(packet[4:6],16) data=packet[12:12+data_len*2] optional=packet[12+data_len*2:26+data_len*2] return data,optional except Exception as e: logging.error("parse packet wrong:%s",e) return else : logging.error("packet is invalid") return @staticmethod def create(data=None, optional="00"*7): """ Creates an packet ready for sending. Uses data and optional. """ try: data_len = "{0:>02}".format(hex(int(len(data)/2))[2:]) m1 = "00"+data_len+"0701" m2 = data+optional packet = "55"+m1+crc8(m1)+m2+crc8(m2) return packet except Exception as e: logging.error("create packet wrong:%s",e) return if __name__ == "__main__": logging.getLogger().setLevel(logging.INFO) data="1f8000004581020101" Packet.create(data)
23.6
67
0.672518
from linptech.crc8 import crc8 import logging class Packet(object): def __init__(self, data=None, optional="00"*7): if data is None: logging.warning('Packet.data is None') else: self.data = data if optional is None: logging.info('Packet.optional is None.') else: self.optional = optional @staticmethod def check(packet): if packet.startswith("550") and \ crc8(packet[2:10])==packet[10:12] and \ crc8(packet[12:-2])==packet[-2:]: return True else: return False @staticmethod def parse(packet): if Packet.check(packet): try: data_len=int(packet[4:6],16) data=packet[12:12+data_len*2] optional=packet[12+data_len*2:26+data_len*2] return data,optional except Exception as e: logging.error("parse packet wrong:%s",e) return else : logging.error("packet is invalid") return @staticmethod def create(data=None, optional="00"*7): try: data_len = "{0:>02}".format(hex(int(len(data)/2))[2:]) m1 = "00"+data_len+"0701" m2 = data+optional packet = "55"+m1+crc8(m1)+m2+crc8(m2) return packet except Exception as e: logging.error("create packet wrong:%s",e) return if __name__ == "__main__": logging.getLogger().setLevel(logging.INFO) data="1f8000004581020101" Packet.create(data)
true
true
f723c13ed49b8c87b5f0275078a084b4c8f235a1
16,785
py
Python
test/latency_position_test.py
AustinHellerRepo/GameManager
2eee8e821f551b4683e59ea8cde7e61c26cf8878
[ "MIT" ]
null
null
null
test/latency_position_test.py
AustinHellerRepo/GameManager
2eee8e821f551b4683e59ea8cde7e61c26cf8878
[ "MIT" ]
null
null
null
test/latency_position_test.py
AustinHellerRepo/GameManager
2eee8e821f551b4683e59ea8cde7e61c26cf8878
[ "MIT" ]
null
null
null
from __future__ import annotations import unittest import time import matplotlib.pyplot as plt import numpy as np from typing import List, Tuple, Dict, Set, Callable, Type class Dot(): def __init__(self, position: Tuple[float, float], velocity: Tuple[float, float], acceleration: Tuple[float, float]): self.__position = position self.__velocity = velocity self.__acceleration = acceleration self.__time_index_offset = 0 self.__acceleration_delta = None # type: Tuple[float, float] self.__acceleration_delta_end_time_index = None # type: float self.__acceleration_delta_end_time_index_acceleration = None # type: Tuple[float, float] def set_positiion(self, *, position: Tuple[float, float]): self.__position = position def set_velocity(self, *, velocity: Tuple[float, float]): self.__velocity = velocity def set_acceleration(self, *, acceleration: Tuple[float, float]): self.__acceleration = acceleration def get_position(self, *, time_index: float) -> Tuple[float, float]: calculated_time_index = time_index + self.__time_index_offset position = list(self.__position) for dimension_index in range(len(position)): position[dimension_index] += self.__velocity[dimension_index] * calculated_time_index if self.__acceleration_delta_end_time_index is None: position[dimension_index] += (self.__acceleration[dimension_index] * calculated_time_index ** 2) / 2.0 else: if calculated_time_index < self.__acceleration_delta_end_time_index: position[dimension_index] += (self.__acceleration[dimension_index] * calculated_time_index ** 2) / 2.0 position[dimension_index] += (self.__acceleration_delta[dimension_index] * calculated_time_index ** 3) / 6.0 else: position[dimension_index] += (self.__acceleration[dimension_index] * self.__acceleration_delta_end_time_index ** 2) / 2.0 position[dimension_index] += (self.__acceleration_delta_end_time_index_acceleration[dimension_index] * (calculated_time_index - self.__acceleration_delta_end_time_index) ** 2) / 2.0 position[dimension_index] += (self.__acceleration_delta[dimension_index] * self.__acceleration_delta_end_time_index ** 3) / 6.0 return tuple(position) def get_velocity(self, *, time_index: float) -> Tuple[float, float]: calculated_time_index = time_index + self.__time_index_offset velocity = list(self.__velocity) for dimension_index in range(len(velocity)): if self.__acceleration_delta_end_time_index is None: velocity[dimension_index] += self.__acceleration[dimension_index] * calculated_time_index else: if calculated_time_index < self.__acceleration_delta_end_time_index: velocity[dimension_index] += self.__acceleration[dimension_index] * calculated_time_index velocity[dimension_index] += (self.__acceleration_delta[dimension_index] * calculated_time_index**2) / 2.0 else: velocity[dimension_index] += self.__acceleration[dimension_index] * self.__acceleration_delta_end_time_index velocity[dimension_index] += self.__acceleration_delta_end_time_index_acceleration[dimension_index] * (calculated_time_index - self.__acceleration_delta_end_time_index) velocity[dimension_index] += (self.__acceleration_delta[dimension_index] * self.__acceleration_delta_end_time_index**2) / 2.0 return tuple(velocity) def get_acceleration(self, *, time_index: float) -> Tuple[float, float]: calculated_time_index = time_index + self.__time_index_offset acceleration = [0] * len(self.__position) for dimension_index in range(len(acceleration)): if self.__acceleration_delta_end_time_index is None: acceleration[dimension_index] += self.__acceleration[dimension_index] else: if calculated_time_index < self.__acceleration_delta_end_time_index: acceleration[dimension_index] += self.__acceleration[dimension_index] acceleration[dimension_index] += (self.__acceleration_delta[dimension_index] * calculated_time_index) else: acceleration[dimension_index] += self.__acceleration_delta_end_time_index_acceleration[dimension_index] acceleration[dimension_index] += (self.__acceleration_delta[dimension_index] * self.__acceleration_delta_end_time_index) return tuple(self.__acceleration) def bounce(self, *, time_index: float): bounce_position = self.get_position( time_index=time_index ) bounce_velocity = self.get_velocity( time_index=time_index ) bounce_acceleration = self.get_acceleration( time_index=time_index ) self.__position = bounce_position self.__velocity = (bounce_velocity[0], -bounce_velocity[1]) self.__acceleration = bounce_acceleration calculated_time_index = time_index + self.__time_index_offset if self.__acceleration_delta_end_time_index is not None: self.__acceleration_delta_end_time_index -= calculated_time_index if self.__acceleration_delta_end_time_index <= 0: self.__acceleration_delta = None self.__acceleration_delta_end_time_index = None self.__acceleration_delta_end_time_index_acceleration = None self.__time_index_offset = -time_index def reflect(self, *, time_index: float): reflect_position = self.get_position( time_index=time_index ) reflect_velocity = self.get_velocity( time_index=time_index ) reflect_acceleration = self.get_acceleration( time_index=time_index ) self.__position = reflect_position self.__velocity = (-reflect_velocity[0], reflect_velocity[1]) self.__acceleration = reflect_acceleration calculated_time_index = time_index + self.__time_index_offset if self.__acceleration_delta_end_time_index is not None: self.__acceleration_delta_end_time_index -= calculated_time_index if self.__acceleration_delta_end_time_index <= 0: self.__acceleration_delta = None self.__acceleration_delta_end_time_index = None self.__acceleration_delta_end_time_index_acceleration = None self.__time_index_offset = -time_index def set_state(self, *, position: Tuple[float, float], velocity: Tuple[float, float], acceleration: Tuple[float, float], time_index: float): self.__position = position self.__velocity = velocity self.__acceleration = acceleration calculated_time_index = time_index + self.__time_index_offset if self.__acceleration_delta_end_time_index is not None: self.__acceleration_delta_end_time_index -= calculated_time_index if self.__acceleration_delta_end_time_index <= 0: self.__acceleration_delta = None self.__acceleration_delta_end_time_index = None self.__acceleration_delta_end_time_index_acceleration = None self.__time_index_offset = -time_index def set_acceleration_delta(self, *, time_index: float, acceleration_delta: Tuple[float, float], end_time_index: float): time_index_position = self.get_position( time_index=time_index ) time_index_velocity = self.get_velocity( time_index=time_index ) time_index_acceleration = self.get_acceleration( time_index=time_index ) self.__position = time_index_position self.__velocity = time_index_velocity self.__acceleration = time_index_acceleration self.__time_index_offset = -time_index self.__acceleration_delta = acceleration_delta self.__acceleration_delta_end_time_index = end_time_index self.__acceleration_delta_end_time_index_acceleration = time_index_acceleration def merge(self, *, dot: Dot, current_time_index: float, merge_time_index_offset: float): self_position = self.get_position( time_index=current_time_index ) self_velocity = self.get_velocity( time_index=current_time_index ) destination_position = dot.get_position( time_index=current_time_index + merge_time_index_offset ) destination_velocity = dot.get_velocity( time_index=current_time_index + merge_time_index_offset ) destination_acceleration = dot.get_acceleration( time_index=current_time_index + merge_time_index_offset ) acceleration_delta = [] acceleration = [] for dimension_index in range(len(self.__position)): temp_acceleration_delta = (-12 * destination_position[dimension_index] + 6 * destination_velocity[dimension_index] * merge_time_index_offset + 12 * self_position[dimension_index] + 6 * self_velocity[dimension_index] * merge_time_index_offset) / (merge_time_index_offset**3) temp_acceleration = (destination_velocity[dimension_index] - self_velocity[dimension_index]) / merge_time_index_offset - 0.5 * temp_acceleration_delta * merge_time_index_offset acceleration_delta.append(temp_acceleration_delta) acceleration.append(temp_acceleration) self.__position = self_position self.__velocity = self_velocity self.__acceleration = tuple(acceleration) self.__acceleration_delta = tuple(acceleration_delta) self.__acceleration_delta_end_time_index = merge_time_index_offset self.__acceleration_delta_end_time_index_acceleration = destination_acceleration self.__time_index_offset = -current_time_index class DotPlotter(): def __init__(self, minimum_position: Tuple[float, float], maximum_position: Tuple[float, float]): self.__minimum_position = minimum_position self.__maximum_position = maximum_position self.__dots = [] # type: List[Dot] self.__x = [] self.__y = [] self.__figure = None self.__scatter = None def add_dot(self, *, dot: Dot): self.__dots.append(dot) def __get_scatter(self, *, time_index: float) -> Tuple[List[float], List[float]]: scatter = ([], []) for dot in self.__dots: position = dot.get_position( time_index=time_index ) if position[1] < self.__minimum_position[1]: dot.bounce( time_index=time_index ) if position[0] < self.__minimum_position[0] or position[0] > self.__maximum_position[0]: dot.reflect( time_index=time_index ) scatter[0].append(position[0]) scatter[1].append(position[1]) print(f"position: {position}") return scatter def show(self): plt.ion() self.__figure, ax = plt.subplots() self.__scatter = ax.scatter(self.__x, self.__y, facecolors="none", edgecolors=["black", "red"], s=10) plt.xlim(self.__minimum_position[0], self.__maximum_position[0]) plt.ylim(self.__minimum_position[1], self.__maximum_position[1]) plt.draw() def refresh(self, *, time_index: float): x, y = self.__get_scatter( time_index=time_index ) self.__x.clear() self.__x.extend(x) self.__y.clear() self.__y.extend(y) self.__scatter.set_offsets(np.c_[self.__x, self.__y]) self.__figure.canvas.draw_idle() plt.pause(0.01) class LatencyPositionTest(unittest.TestCase): def test_initialize(self): dot_plotter = DotPlotter( minimum_position=(0, 0), maximum_position=(10, 10) ) self.assertIsNotNone(dot_plotter) def test_move_dot_along_path(self): dot_plotter = DotPlotter( minimum_position=(0, 0), maximum_position=(10, 10) ) dot = Dot( position=(1, 9), velocity=(1, 0), acceleration=(0, -1) ) dot_plotter.add_dot( dot=dot ) dot_plotter.show() print(f"refreshing") time_index = 0.0 time_index_delta = 0.05 while time_index < 20.0: dot_plotter.refresh( time_index=time_index ) time_index += time_index_delta plt.waitforbuttonpress() def test_move_dot_along_path_in_separate_windows(self): dot_plotters_total = 2 dot_plotters = [] for dot_plotter_index in range(dot_plotters_total): dot_plotter = DotPlotter( minimum_position=(0, 0), maximum_position=(10, 10) ) dot = Dot( position=(1, 9), velocity=(1, 0), acceleration=(0, -1) ) dot_plotter.add_dot( dot=dot ) dot_plotter.show() dot_plotters.append(dot_plotter) print(f"refreshing") time_index = 0.0 time_index_delta = 0.05 while time_index < 10.0: for dot_plotter in dot_plotters: dot_plotter.refresh( time_index=time_index ) time_index += time_index_delta plt.waitforbuttonpress() def test_move_dot_along_path_then_alter_state(self): dot_plotter = DotPlotter( minimum_position=(0, 0), maximum_position=(10, 10) ) dot = Dot( position=(1, 9), velocity=(1, 0), acceleration=(0, -1) ) def alter_dot(*, time_index: float): nonlocal dot dot.set_state( position=dot.get_position( time_index=time_index ), velocity=(-1, 1), acceleration=(0, -1), time_index=time_index ) dot_plotter.add_dot( dot=dot ) dot_plotter.show() print(f"refreshing") time_index = 0.0 time_index_delta = 0.05 maximum_time_index = 20.0 is_altered = False while time_index < maximum_time_index: dot_plotter.refresh( time_index=time_index ) time_index += time_index_delta if not is_altered and time_index > maximum_time_index / 2.0: alter_dot( time_index=time_index ) is_altered = True plt.waitforbuttonpress() def test_move_dot_along_path_then_set_acceleration_delta(self): dot_plotter = DotPlotter( minimum_position=(0, 0), maximum_position=(10, 10) ) dot = Dot( position=(1, 9), velocity=(1, 0), acceleration=(0, -1) ) def alter_dot(*, time_index: float): nonlocal dot dot.set_acceleration_delta( time_index=time_index, acceleration_delta=(0, 0.5), end_time_index=5.0 ) dot_plotter.add_dot( dot=dot ) dot_plotter.show() print(f"refreshing") time_index = 0.0 time_index_delta = 0.05 maximum_time_index = 30.0 alter_time_index = 10.0 is_altered = False while time_index < maximum_time_index: dot_plotter.refresh( time_index=time_index ) time_index += time_index_delta if not is_altered and time_index > alter_time_index: alter_dot( time_index=time_index ) is_altered = True plt.waitforbuttonpress() def test_move_two_dots_along_path_in_same_windows(self): dots_total = 2 dots = [] dot_plotter = DotPlotter( minimum_position=(0, 0), maximum_position=(10, 10) ) for dot_index in range(dots_total): dot = Dot( position=(1, 9), velocity=(dot_index + 1, 0), acceleration=(0, -1) ) dot_plotter.add_dot( dot=dot ) dots.append(dot) dot_plotter.show() print(f"refreshing") time_index = 0.0 time_index_delta = 0.05 maximum_time_index = 20.0 while time_index < maximum_time_index: dot_plotter.refresh( time_index=time_index ) time_index += time_index_delta plt.waitforbuttonpress() def test_move_two_dots_along_path_in_same_windows_but_first_gets_acceleration_delta(self): dots_total = 2 dots = [] dot_plotter = DotPlotter( minimum_position=(0, 0), maximum_position=(10, 10) ) for dot_index in range(dots_total): dot = Dot( position=(1, 9), velocity=(1, 0), acceleration=(0, -1) ) dot_plotter.add_dot( dot=dot ) dots.append(dot) dot_plotter.show() def alter_dot(*, time_index: float): nonlocal dots dots[0].set_acceleration_delta( time_index=time_index, acceleration_delta=(0, 0.5), end_time_index=5.0 ) print(f"refreshing") time_index = 0.0 time_index_delta = 0.05 maximum_time_index = 30.0 alter_time_index = 10.0 is_altered = False while time_index < maximum_time_index: dot_plotter.refresh( time_index=time_index ) time_index += time_index_delta if not is_altered and time_index > alter_time_index: alter_dot( time_index=time_index ) is_altered = True plt.waitforbuttonpress() def test_move_two_dots_along_path_in_same_windows_second_merges_specific_time_index_after_first_altered(self): dots_total = 2 dots = [] dot_plotter = DotPlotter( minimum_position=(0, 0), maximum_position=(10, 10) ) for dot_index in range(dots_total): dot = Dot( position=(1, 9), velocity=(1, 0), acceleration=(0, -1) ) dot_plotter.add_dot( dot=dot ) dots.append(dot) dot_plotter.show() def alter_dot(*, time_index: float): nonlocal dots if False: dots[0].set_acceleration_delta( time_index=time_index, acceleration_delta=(0, 0.5), end_time_index=1.0 ) else: dots[0].set_velocity( velocity=(-1, 1) ) def merge_dot(*, time_index: float): nonlocal dots dots[1].merge( dot=dots[0], current_time_index=time_index, merge_time_index_offset=1.0 ) print(f"refreshing") time_index = 0.0 time_index_delta = 0.01 maximum_time_index = 30.0 alter_time_index = 10.0 merge_time_index = 11.0 is_altered = False is_merged = False while time_index < maximum_time_index: dot_plotter.refresh( time_index=time_index ) time_index += time_index_delta if not is_altered and time_index > alter_time_index: alter_dot( time_index=time_index ) is_altered = True if not is_merged and time_index > merge_time_index: merge_dot( time_index=time_index ) is_merged = True plt.waitforbuttonpress()
28.305228
276
0.742985
from __future__ import annotations import unittest import time import matplotlib.pyplot as plt import numpy as np from typing import List, Tuple, Dict, Set, Callable, Type class Dot(): def __init__(self, position: Tuple[float, float], velocity: Tuple[float, float], acceleration: Tuple[float, float]): self.__position = position self.__velocity = velocity self.__acceleration = acceleration self.__time_index_offset = 0 self.__acceleration_delta = None self.__acceleration_delta_end_time_index = None self.__acceleration_delta_end_time_index_acceleration = None def set_positiion(self, *, position: Tuple[float, float]): self.__position = position def set_velocity(self, *, velocity: Tuple[float, float]): self.__velocity = velocity def set_acceleration(self, *, acceleration: Tuple[float, float]): self.__acceleration = acceleration def get_position(self, *, time_index: float) -> Tuple[float, float]: calculated_time_index = time_index + self.__time_index_offset position = list(self.__position) for dimension_index in range(len(position)): position[dimension_index] += self.__velocity[dimension_index] * calculated_time_index if self.__acceleration_delta_end_time_index is None: position[dimension_index] += (self.__acceleration[dimension_index] * calculated_time_index ** 2) / 2.0 else: if calculated_time_index < self.__acceleration_delta_end_time_index: position[dimension_index] += (self.__acceleration[dimension_index] * calculated_time_index ** 2) / 2.0 position[dimension_index] += (self.__acceleration_delta[dimension_index] * calculated_time_index ** 3) / 6.0 else: position[dimension_index] += (self.__acceleration[dimension_index] * self.__acceleration_delta_end_time_index ** 2) / 2.0 position[dimension_index] += (self.__acceleration_delta_end_time_index_acceleration[dimension_index] * (calculated_time_index - self.__acceleration_delta_end_time_index) ** 2) / 2.0 position[dimension_index] += (self.__acceleration_delta[dimension_index] * self.__acceleration_delta_end_time_index ** 3) / 6.0 return tuple(position) def get_velocity(self, *, time_index: float) -> Tuple[float, float]: calculated_time_index = time_index + self.__time_index_offset velocity = list(self.__velocity) for dimension_index in range(len(velocity)): if self.__acceleration_delta_end_time_index is None: velocity[dimension_index] += self.__acceleration[dimension_index] * calculated_time_index else: if calculated_time_index < self.__acceleration_delta_end_time_index: velocity[dimension_index] += self.__acceleration[dimension_index] * calculated_time_index velocity[dimension_index] += (self.__acceleration_delta[dimension_index] * calculated_time_index**2) / 2.0 else: velocity[dimension_index] += self.__acceleration[dimension_index] * self.__acceleration_delta_end_time_index velocity[dimension_index] += self.__acceleration_delta_end_time_index_acceleration[dimension_index] * (calculated_time_index - self.__acceleration_delta_end_time_index) velocity[dimension_index] += (self.__acceleration_delta[dimension_index] * self.__acceleration_delta_end_time_index**2) / 2.0 return tuple(velocity) def get_acceleration(self, *, time_index: float) -> Tuple[float, float]: calculated_time_index = time_index + self.__time_index_offset acceleration = [0] * len(self.__position) for dimension_index in range(len(acceleration)): if self.__acceleration_delta_end_time_index is None: acceleration[dimension_index] += self.__acceleration[dimension_index] else: if calculated_time_index < self.__acceleration_delta_end_time_index: acceleration[dimension_index] += self.__acceleration[dimension_index] acceleration[dimension_index] += (self.__acceleration_delta[dimension_index] * calculated_time_index) else: acceleration[dimension_index] += self.__acceleration_delta_end_time_index_acceleration[dimension_index] acceleration[dimension_index] += (self.__acceleration_delta[dimension_index] * self.__acceleration_delta_end_time_index) return tuple(self.__acceleration) def bounce(self, *, time_index: float): bounce_position = self.get_position( time_index=time_index ) bounce_velocity = self.get_velocity( time_index=time_index ) bounce_acceleration = self.get_acceleration( time_index=time_index ) self.__position = bounce_position self.__velocity = (bounce_velocity[0], -bounce_velocity[1]) self.__acceleration = bounce_acceleration calculated_time_index = time_index + self.__time_index_offset if self.__acceleration_delta_end_time_index is not None: self.__acceleration_delta_end_time_index -= calculated_time_index if self.__acceleration_delta_end_time_index <= 0: self.__acceleration_delta = None self.__acceleration_delta_end_time_index = None self.__acceleration_delta_end_time_index_acceleration = None self.__time_index_offset = -time_index def reflect(self, *, time_index: float): reflect_position = self.get_position( time_index=time_index ) reflect_velocity = self.get_velocity( time_index=time_index ) reflect_acceleration = self.get_acceleration( time_index=time_index ) self.__position = reflect_position self.__velocity = (-reflect_velocity[0], reflect_velocity[1]) self.__acceleration = reflect_acceleration calculated_time_index = time_index + self.__time_index_offset if self.__acceleration_delta_end_time_index is not None: self.__acceleration_delta_end_time_index -= calculated_time_index if self.__acceleration_delta_end_time_index <= 0: self.__acceleration_delta = None self.__acceleration_delta_end_time_index = None self.__acceleration_delta_end_time_index_acceleration = None self.__time_index_offset = -time_index def set_state(self, *, position: Tuple[float, float], velocity: Tuple[float, float], acceleration: Tuple[float, float], time_index: float): self.__position = position self.__velocity = velocity self.__acceleration = acceleration calculated_time_index = time_index + self.__time_index_offset if self.__acceleration_delta_end_time_index is not None: self.__acceleration_delta_end_time_index -= calculated_time_index if self.__acceleration_delta_end_time_index <= 0: self.__acceleration_delta = None self.__acceleration_delta_end_time_index = None self.__acceleration_delta_end_time_index_acceleration = None self.__time_index_offset = -time_index def set_acceleration_delta(self, *, time_index: float, acceleration_delta: Tuple[float, float], end_time_index: float): time_index_position = self.get_position( time_index=time_index ) time_index_velocity = self.get_velocity( time_index=time_index ) time_index_acceleration = self.get_acceleration( time_index=time_index ) self.__position = time_index_position self.__velocity = time_index_velocity self.__acceleration = time_index_acceleration self.__time_index_offset = -time_index self.__acceleration_delta = acceleration_delta self.__acceleration_delta_end_time_index = end_time_index self.__acceleration_delta_end_time_index_acceleration = time_index_acceleration def merge(self, *, dot: Dot, current_time_index: float, merge_time_index_offset: float): self_position = self.get_position( time_index=current_time_index ) self_velocity = self.get_velocity( time_index=current_time_index ) destination_position = dot.get_position( time_index=current_time_index + merge_time_index_offset ) destination_velocity = dot.get_velocity( time_index=current_time_index + merge_time_index_offset ) destination_acceleration = dot.get_acceleration( time_index=current_time_index + merge_time_index_offset ) acceleration_delta = [] acceleration = [] for dimension_index in range(len(self.__position)): temp_acceleration_delta = (-12 * destination_position[dimension_index] + 6 * destination_velocity[dimension_index] * merge_time_index_offset + 12 * self_position[dimension_index] + 6 * self_velocity[dimension_index] * merge_time_index_offset) / (merge_time_index_offset**3) temp_acceleration = (destination_velocity[dimension_index] - self_velocity[dimension_index]) / merge_time_index_offset - 0.5 * temp_acceleration_delta * merge_time_index_offset acceleration_delta.append(temp_acceleration_delta) acceleration.append(temp_acceleration) self.__position = self_position self.__velocity = self_velocity self.__acceleration = tuple(acceleration) self.__acceleration_delta = tuple(acceleration_delta) self.__acceleration_delta_end_time_index = merge_time_index_offset self.__acceleration_delta_end_time_index_acceleration = destination_acceleration self.__time_index_offset = -current_time_index class DotPlotter(): def __init__(self, minimum_position: Tuple[float, float], maximum_position: Tuple[float, float]): self.__minimum_position = minimum_position self.__maximum_position = maximum_position self.__dots = [] self.__x = [] self.__y = [] self.__figure = None self.__scatter = None def add_dot(self, *, dot: Dot): self.__dots.append(dot) def __get_scatter(self, *, time_index: float) -> Tuple[List[float], List[float]]: scatter = ([], []) for dot in self.__dots: position = dot.get_position( time_index=time_index ) if position[1] < self.__minimum_position[1]: dot.bounce( time_index=time_index ) if position[0] < self.__minimum_position[0] or position[0] > self.__maximum_position[0]: dot.reflect( time_index=time_index ) scatter[0].append(position[0]) scatter[1].append(position[1]) print(f"position: {position}") return scatter def show(self): plt.ion() self.__figure, ax = plt.subplots() self.__scatter = ax.scatter(self.__x, self.__y, facecolors="none", edgecolors=["black", "red"], s=10) plt.xlim(self.__minimum_position[0], self.__maximum_position[0]) plt.ylim(self.__minimum_position[1], self.__maximum_position[1]) plt.draw() def refresh(self, *, time_index: float): x, y = self.__get_scatter( time_index=time_index ) self.__x.clear() self.__x.extend(x) self.__y.clear() self.__y.extend(y) self.__scatter.set_offsets(np.c_[self.__x, self.__y]) self.__figure.canvas.draw_idle() plt.pause(0.01) class LatencyPositionTest(unittest.TestCase): def test_initialize(self): dot_plotter = DotPlotter( minimum_position=(0, 0), maximum_position=(10, 10) ) self.assertIsNotNone(dot_plotter) def test_move_dot_along_path(self): dot_plotter = DotPlotter( minimum_position=(0, 0), maximum_position=(10, 10) ) dot = Dot( position=(1, 9), velocity=(1, 0), acceleration=(0, -1) ) dot_plotter.add_dot( dot=dot ) dot_plotter.show() print(f"refreshing") time_index = 0.0 time_index_delta = 0.05 while time_index < 20.0: dot_plotter.refresh( time_index=time_index ) time_index += time_index_delta plt.waitforbuttonpress() def test_move_dot_along_path_in_separate_windows(self): dot_plotters_total = 2 dot_plotters = [] for dot_plotter_index in range(dot_plotters_total): dot_plotter = DotPlotter( minimum_position=(0, 0), maximum_position=(10, 10) ) dot = Dot( position=(1, 9), velocity=(1, 0), acceleration=(0, -1) ) dot_plotter.add_dot( dot=dot ) dot_plotter.show() dot_plotters.append(dot_plotter) print(f"refreshing") time_index = 0.0 time_index_delta = 0.05 while time_index < 10.0: for dot_plotter in dot_plotters: dot_plotter.refresh( time_index=time_index ) time_index += time_index_delta plt.waitforbuttonpress() def test_move_dot_along_path_then_alter_state(self): dot_plotter = DotPlotter( minimum_position=(0, 0), maximum_position=(10, 10) ) dot = Dot( position=(1, 9), velocity=(1, 0), acceleration=(0, -1) ) def alter_dot(*, time_index: float): nonlocal dot dot.set_state( position=dot.get_position( time_index=time_index ), velocity=(-1, 1), acceleration=(0, -1), time_index=time_index ) dot_plotter.add_dot( dot=dot ) dot_plotter.show() print(f"refreshing") time_index = 0.0 time_index_delta = 0.05 maximum_time_index = 20.0 is_altered = False while time_index < maximum_time_index: dot_plotter.refresh( time_index=time_index ) time_index += time_index_delta if not is_altered and time_index > maximum_time_index / 2.0: alter_dot( time_index=time_index ) is_altered = True plt.waitforbuttonpress() def test_move_dot_along_path_then_set_acceleration_delta(self): dot_plotter = DotPlotter( minimum_position=(0, 0), maximum_position=(10, 10) ) dot = Dot( position=(1, 9), velocity=(1, 0), acceleration=(0, -1) ) def alter_dot(*, time_index: float): nonlocal dot dot.set_acceleration_delta( time_index=time_index, acceleration_delta=(0, 0.5), end_time_index=5.0 ) dot_plotter.add_dot( dot=dot ) dot_plotter.show() print(f"refreshing") time_index = 0.0 time_index_delta = 0.05 maximum_time_index = 30.0 alter_time_index = 10.0 is_altered = False while time_index < maximum_time_index: dot_plotter.refresh( time_index=time_index ) time_index += time_index_delta if not is_altered and time_index > alter_time_index: alter_dot( time_index=time_index ) is_altered = True plt.waitforbuttonpress() def test_move_two_dots_along_path_in_same_windows(self): dots_total = 2 dots = [] dot_plotter = DotPlotter( minimum_position=(0, 0), maximum_position=(10, 10) ) for dot_index in range(dots_total): dot = Dot( position=(1, 9), velocity=(dot_index + 1, 0), acceleration=(0, -1) ) dot_plotter.add_dot( dot=dot ) dots.append(dot) dot_plotter.show() print(f"refreshing") time_index = 0.0 time_index_delta = 0.05 maximum_time_index = 20.0 while time_index < maximum_time_index: dot_plotter.refresh( time_index=time_index ) time_index += time_index_delta plt.waitforbuttonpress() def test_move_two_dots_along_path_in_same_windows_but_first_gets_acceleration_delta(self): dots_total = 2 dots = [] dot_plotter = DotPlotter( minimum_position=(0, 0), maximum_position=(10, 10) ) for dot_index in range(dots_total): dot = Dot( position=(1, 9), velocity=(1, 0), acceleration=(0, -1) ) dot_plotter.add_dot( dot=dot ) dots.append(dot) dot_plotter.show() def alter_dot(*, time_index: float): nonlocal dots dots[0].set_acceleration_delta( time_index=time_index, acceleration_delta=(0, 0.5), end_time_index=5.0 ) print(f"refreshing") time_index = 0.0 time_index_delta = 0.05 maximum_time_index = 30.0 alter_time_index = 10.0 is_altered = False while time_index < maximum_time_index: dot_plotter.refresh( time_index=time_index ) time_index += time_index_delta if not is_altered and time_index > alter_time_index: alter_dot( time_index=time_index ) is_altered = True plt.waitforbuttonpress() def test_move_two_dots_along_path_in_same_windows_second_merges_specific_time_index_after_first_altered(self): dots_total = 2 dots = [] dot_plotter = DotPlotter( minimum_position=(0, 0), maximum_position=(10, 10) ) for dot_index in range(dots_total): dot = Dot( position=(1, 9), velocity=(1, 0), acceleration=(0, -1) ) dot_plotter.add_dot( dot=dot ) dots.append(dot) dot_plotter.show() def alter_dot(*, time_index: float): nonlocal dots if False: dots[0].set_acceleration_delta( time_index=time_index, acceleration_delta=(0, 0.5), end_time_index=1.0 ) else: dots[0].set_velocity( velocity=(-1, 1) ) def merge_dot(*, time_index: float): nonlocal dots dots[1].merge( dot=dots[0], current_time_index=time_index, merge_time_index_offset=1.0 ) print(f"refreshing") time_index = 0.0 time_index_delta = 0.01 maximum_time_index = 30.0 alter_time_index = 10.0 merge_time_index = 11.0 is_altered = False is_merged = False while time_index < maximum_time_index: dot_plotter.refresh( time_index=time_index ) time_index += time_index_delta if not is_altered and time_index > alter_time_index: alter_dot( time_index=time_index ) is_altered = True if not is_merged and time_index > merge_time_index: merge_dot( time_index=time_index ) is_merged = True plt.waitforbuttonpress()
true
true
f723c14378cc4d16c4baa11917ffffcdd73ab43e
190
py
Python
mmdet3d/ops/paconv/__init__.py
maskjp/mmdetection3d
98f332372b1a4c82bc2d57588a5d764f4176c869
[ "Apache-2.0" ]
1
2022-03-04T19:29:42.000Z
2022-03-04T19:29:42.000Z
mmdet3d/ops/paconv/__init__.py
maskjp/mmdetection3d
98f332372b1a4c82bc2d57588a5d764f4176c869
[ "Apache-2.0" ]
null
null
null
mmdet3d/ops/paconv/__init__.py
maskjp/mmdetection3d
98f332372b1a4c82bc2d57588a5d764f4176c869
[ "Apache-2.0" ]
null
null
null
# Copyright (c) OpenMMLab. All rights reserved. from .assign_score import assign_score_withk from .paconv import PAConv, PAConvCUDA __all__ = ['assign_score_withk', 'PAConv', 'PAConvCUDA']
31.666667
56
0.784211
from .assign_score import assign_score_withk from .paconv import PAConv, PAConvCUDA __all__ = ['assign_score_withk', 'PAConv', 'PAConvCUDA']
true
true
f723c28d10fea756d280aba926ac651ddd9a5b0d
354
py
Python
wiser_care_theme/wiser_care_theme/doctype/wiser_website_settings/wiser_website_settings.py
MostafaFekry/wiser_care_theme
9892442803dffeeb5e02136c87c2eb4cc9144b60
[ "MIT" ]
null
null
null
wiser_care_theme/wiser_care_theme/doctype/wiser_website_settings/wiser_website_settings.py
MostafaFekry/wiser_care_theme
9892442803dffeeb5e02136c87c2eb4cc9144b60
[ "MIT" ]
null
null
null
wiser_care_theme/wiser_care_theme/doctype/wiser_website_settings/wiser_website_settings.py
MostafaFekry/wiser_care_theme
9892442803dffeeb5e02136c87c2eb4cc9144b60
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # Copyright (c) 2019, Systematic and contributors # For license information, please see license.txt from __future__ import unicode_literals import frappe from frappe.model.document import Document class WiserWebsiteSettings(Document): def on_update(self): from frappe.website.render import clear_cache clear_cache("index")
27.230769
49
0.788136
from __future__ import unicode_literals import frappe from frappe.model.document import Document class WiserWebsiteSettings(Document): def on_update(self): from frappe.website.render import clear_cache clear_cache("index")
true
true
f723c54aee85d58c016b328753e3fa153669b6d6
2,533
py
Python
voctocore/tests/test_audiomix_multiple_sources.py
0xflotus/voctomix
3156f3546890e6ae8d379df17e5cc718eee14b15
[ "MIT" ]
521
2015-01-07T21:43:30.000Z
2022-03-17T22:07:13.000Z
voctocore/tests/test_audiomix_multiple_sources.py
0xflotus/voctomix
3156f3546890e6ae8d379df17e5cc718eee14b15
[ "MIT" ]
241
2015-05-27T10:11:09.000Z
2022-02-11T03:29:20.000Z
voctocore/tests/test_audiomix_multiple_sources.py
0xflotus/voctomix
3156f3546890e6ae8d379df17e5cc718eee14b15
[ "MIT" ]
111
2015-08-13T20:06:52.000Z
2022-03-11T09:48:46.000Z
import unittest from lib.errors.configuration_error import ConfigurationError from tests.helper.voctomix_test import VoctomixTest from lib.audiomix import AudioMix from lib.config import Config # noinspection PyUnusedLocal class AudiomixMultipleSources(VoctomixTest): def test_no_configured_audiosource_sets_first_to_full(self): audiomixer = AudioMix() self.assertListEqual(audiomixer.names, ["cam1", "cam2", "grabber"]) self.assertListEqual(audiomixer.volumes, [1.0, 0.0, 0.0]) def test_audiosource_sets_source_volume_to_full(self): Config.given("mix", "audiosource", "cam2") audiomixer = AudioMix() self.assertListEqual(audiomixer.names, ["cam1", "cam2", "grabber"]) self.assertListEqual(audiomixer.volumes, [0.0, 1.0, 0.0]) def test_per_source_volumes_set_volumes_to_configured_level(self): Config.given("source.cam1", "volume", "0.23") Config.given("source.cam2", "volume", "0.0") Config.given("source.grabber", "volume", "0.42") audiomixer = AudioMix() self.assertListEqual(audiomixer.names, ["cam1", "cam2", "grabber"]) self.assertListEqual(audiomixer.volumes, [0.23, 0.0, 0.42]) def test_audiosource_together_with_per_source_volumes_for_the_same_source_raises_an_error(self): Config.given("mix", "audiosource", "cam1") Config.given("source.cam1", "volume", "0.23") with self.assertRaises(ConfigurationError): audiomixer = AudioMix() def test_audiosource_together_with_per_source_volumes_for_different_sources_raises_an_error(self): Config.given("mix", "audiosource", "cam2") Config.given("source.cam1", "volume", "0.23") with self.assertRaises(ConfigurationError): audiomixer = AudioMix() def test_invalid_audiosource_raises_an_error(self): Config.given("mix", "audiosource", "camInvalid") with self.assertRaises(ConfigurationError): audiomixer = AudioMix() def test_configuring_audiosource_disables_ui_audio_selector(self): Config.given("mix", "audiosource", "cam1") audiomixer = AudioMix() self.assertEqual(Config.getboolean('audio', 'volumecontrol'), False) def test_configuring_per_source_volumes_disables_ui_audio_selector(self): Config.given("source.cam1", "volume", "1.0") audiomixer = AudioMix() self.assertEqual(Config.getboolean('audio', 'volumecontrol'), False) if __name__ == '__main__': unittest.main()
36.185714
102
0.698776
import unittest from lib.errors.configuration_error import ConfigurationError from tests.helper.voctomix_test import VoctomixTest from lib.audiomix import AudioMix from lib.config import Config class AudiomixMultipleSources(VoctomixTest): def test_no_configured_audiosource_sets_first_to_full(self): audiomixer = AudioMix() self.assertListEqual(audiomixer.names, ["cam1", "cam2", "grabber"]) self.assertListEqual(audiomixer.volumes, [1.0, 0.0, 0.0]) def test_audiosource_sets_source_volume_to_full(self): Config.given("mix", "audiosource", "cam2") audiomixer = AudioMix() self.assertListEqual(audiomixer.names, ["cam1", "cam2", "grabber"]) self.assertListEqual(audiomixer.volumes, [0.0, 1.0, 0.0]) def test_per_source_volumes_set_volumes_to_configured_level(self): Config.given("source.cam1", "volume", "0.23") Config.given("source.cam2", "volume", "0.0") Config.given("source.grabber", "volume", "0.42") audiomixer = AudioMix() self.assertListEqual(audiomixer.names, ["cam1", "cam2", "grabber"]) self.assertListEqual(audiomixer.volumes, [0.23, 0.0, 0.42]) def test_audiosource_together_with_per_source_volumes_for_the_same_source_raises_an_error(self): Config.given("mix", "audiosource", "cam1") Config.given("source.cam1", "volume", "0.23") with self.assertRaises(ConfigurationError): audiomixer = AudioMix() def test_audiosource_together_with_per_source_volumes_for_different_sources_raises_an_error(self): Config.given("mix", "audiosource", "cam2") Config.given("source.cam1", "volume", "0.23") with self.assertRaises(ConfigurationError): audiomixer = AudioMix() def test_invalid_audiosource_raises_an_error(self): Config.given("mix", "audiosource", "camInvalid") with self.assertRaises(ConfigurationError): audiomixer = AudioMix() def test_configuring_audiosource_disables_ui_audio_selector(self): Config.given("mix", "audiosource", "cam1") audiomixer = AudioMix() self.assertEqual(Config.getboolean('audio', 'volumecontrol'), False) def test_configuring_per_source_volumes_disables_ui_audio_selector(self): Config.given("source.cam1", "volume", "1.0") audiomixer = AudioMix() self.assertEqual(Config.getboolean('audio', 'volumecontrol'), False) if __name__ == '__main__': unittest.main()
true
true
f723c5c9d3b222d7776eada4ac0bace2610445b6
450
py
Python
V2RaycSpider1225/BusinessCentralLayer/middleware/work_io.py
kujie0121/V2RayCloudSpider
32cd65f7811374679fe09cfae2fda805d42fe7ab
[ "MIT" ]
1
2021-02-17T07:51:09.000Z
2021-02-17T07:51:09.000Z
V2RaycSpider1225/BusinessCentralLayer/middleware/work_io.py
kujie0121/V2RayCloudSpider
32cd65f7811374679fe09cfae2fda805d42fe7ab
[ "MIT" ]
null
null
null
V2RaycSpider1225/BusinessCentralLayer/middleware/work_io.py
kujie0121/V2RayCloudSpider
32cd65f7811374679fe09cfae2fda805d42fe7ab
[ "MIT" ]
null
null
null
__all__ = ['Middleware'] from gevent.queue import Queue # 工作栈 class Middleware: # cache of redis zeus = Queue() # Trash apollo = Queue() theseus = {} # 共享任务队列 poseidon = Queue() hera = Queue() # FIXME # 不明原因bug 使用dict(zip())方案生成的同样的变量, # 在经过同一个函数方案后输出竟然不一样 cache_redis_queue = {'ssr': {}, 'v2ray': {}} # cache_redis_queue = dict(zip(CRAWLER_SEQUENCE, [{}] * CRAWLER_SEQUENCE.__len__()))
16.666667
88
0.6
__all__ = ['Middleware'] from gevent.queue import Queue class Middleware: zeus = Queue() apollo = Queue() theseus = {} poseidon = Queue() hera = Queue() cache_redis_queue = {'ssr': {}, 'v2ray': {}}
true
true
f723c61d42e207f1fe86c447483a8df191033920
2,661
py
Python
tests/test_basics.py
JunyongYao/flask-backend-seed
9d16f56a9f34ebb1ec32eaab800b7ad6b10d0c9d
[ "MIT" ]
9
2017-10-20T09:26:09.000Z
2021-01-28T02:54:43.000Z
tests/test_basics.py
JunyongYao/flask-backend-seed
9d16f56a9f34ebb1ec32eaab800b7ad6b10d0c9d
[ "MIT" ]
2
2018-03-06T06:27:53.000Z
2018-04-19T01:47:38.000Z
tests/test_basics.py
JunyongYao/flask-backend-seed
9d16f56a9f34ebb1ec32eaab800b7ad6b10d0c9d
[ "MIT" ]
2
2019-07-18T22:32:28.000Z
2020-06-15T14:10:29.000Z
# -*- coding: utf-8 -*- import json import random import string import unittest from flask import current_app from config import config from app import create_app, db, redis, add_api_support class BasicsTestCase(unittest.TestCase): def setUp(self): test_app = create_app(config['testing']) test_app = add_api_support(test_app) self.assertTrue(current_app.config['TESTING']) self.app_context = test_app.app_context() self.app_context.push() self.test_client = test_app.test_client() db.drop_all() db.create_all() redis.flushall() def tearDown(self): db.session.remove() self.app_context.pop() @staticmethod def _parse_result(res_data): data = str(res_data, "utf-8") try: ret_data = json.loads(data) except ValueError: ret_data = data return ret_data @staticmethod def generate_random_string(length): return ''.join(random.choice(string.ascii_lowercase + string.digits) for x in range(length)) def create_test_user(self, name, pwd): from app.model.userModel import UserInfo stored_pwd = UserInfo.generate_sha_pwd(pwd) new_user = UserInfo(name=name, sha_pwd=stored_pwd, nickname=self.generate_random_string(5)) db.session.add(new_user) db.session.commit() return new_user def get_request(self, url, data=None, header=None): response = self.test_client.get(url, data=data, headers=header) return response.status_code, self._parse_result(response.data) def put_request(self, url, data=None, header=None): response = self.test_client.put(url, data=data, headers=header) return response.status_code, self._parse_result(response.data) def post_request(self, url, data=None, header=None): response = self.test_client.post(url, data=data, headers=header) return response.status_code, self._parse_result(response.data) def delete_request(self, url, data=None, header=None): response = self.test_client.delete(url, data=data, headers=header) return response.status_code, self._parse_result(response.data) def post_login(self, data): url = "/api/sample/login" return self.post_request(url, data=data) def get_user_info(self, header): url = "/api/sample/user_info" return self.get_request(url, header=header) def put_user_info(self, data, header): url = "/api/sample/user_info" return self.put_request(url, data=data, header=header)
32.45122
100
0.661405
import json import random import string import unittest from flask import current_app from config import config from app import create_app, db, redis, add_api_support class BasicsTestCase(unittest.TestCase): def setUp(self): test_app = create_app(config['testing']) test_app = add_api_support(test_app) self.assertTrue(current_app.config['TESTING']) self.app_context = test_app.app_context() self.app_context.push() self.test_client = test_app.test_client() db.drop_all() db.create_all() redis.flushall() def tearDown(self): db.session.remove() self.app_context.pop() @staticmethod def _parse_result(res_data): data = str(res_data, "utf-8") try: ret_data = json.loads(data) except ValueError: ret_data = data return ret_data @staticmethod def generate_random_string(length): return ''.join(random.choice(string.ascii_lowercase + string.digits) for x in range(length)) def create_test_user(self, name, pwd): from app.model.userModel import UserInfo stored_pwd = UserInfo.generate_sha_pwd(pwd) new_user = UserInfo(name=name, sha_pwd=stored_pwd, nickname=self.generate_random_string(5)) db.session.add(new_user) db.session.commit() return new_user def get_request(self, url, data=None, header=None): response = self.test_client.get(url, data=data, headers=header) return response.status_code, self._parse_result(response.data) def put_request(self, url, data=None, header=None): response = self.test_client.put(url, data=data, headers=header) return response.status_code, self._parse_result(response.data) def post_request(self, url, data=None, header=None): response = self.test_client.post(url, data=data, headers=header) return response.status_code, self._parse_result(response.data) def delete_request(self, url, data=None, header=None): response = self.test_client.delete(url, data=data, headers=header) return response.status_code, self._parse_result(response.data) def post_login(self, data): url = "/api/sample/login" return self.post_request(url, data=data) def get_user_info(self, header): url = "/api/sample/user_info" return self.get_request(url, header=header) def put_user_info(self, data, header): url = "/api/sample/user_info" return self.put_request(url, data=data, header=header)
true
true
f723c6be07f0c68bbd987b45988d3ea7dc170622
24,126
py
Python
refactorings/utils/utils_listener_fast.py
mossj77/CodART
ac83a49a4aa9310b09da12fb476a84586812310b
[ "MIT" ]
1
2021-10-10T23:56:49.000Z
2021-10-10T23:56:49.000Z
refactorings/utils/utils_listener_fast.py
pouorix/CodART
84b35a5a14e583d88319d7f6c2de8dc3b3dc83b2
[ "MIT" ]
null
null
null
refactorings/utils/utils_listener_fast.py
pouorix/CodART
84b35a5a14e583d88319d7f6c2de8dc3b3dc83b2
[ "MIT" ]
null
null
null
import re # regular expressions import antlr4 from antlr4.Token import CommonToken import antlr4.tree from antlr4.CommonTokenStream import CommonTokenStream from typing import List, Optional from gen.java.JavaParser import JavaParser from gen.java.JavaParserListener import JavaParserListener class Program: def __init__(self): self.packages = {} def __str__(self): return str(self.packages) class Package: def __init__(self): self.name = None self.classes = {} self.package_ctx = None def __str__(self): return str(self.name) + " " + str(self.classes) class TokensInfo: """Note that start and stop are inclusive.""" def __init__(self, parser_context=None): if parser_context is not None: self.token_stream: CommonTokenStream = parser_context.parser.getTokenStream() self.start: int = parser_context.start.tokenIndex self.stop: int = parser_context.stop.tokenIndex else: self.token_stream: CommonTokenStream = None self.start: int = None self.stop: int = None def get_token_index(self, tokens: list, start: int, stop: int): return tokens[start:stop] class FileInfo: def __init__(self, filename: str = None, package_name: str = None): self.filename: str = filename self.package_name: str = package_name self.all_imports = [] self.package_imports = [] self.class_imports = [] def has_imported_class(self, package_name: str, class_name: str) -> bool: if self.package_name == package_name: return True return ( any(lambda x: x.package_name == package_name for package_import in self.package_imports) or any(lambda x: x.package_name == package_name and x.class_name == class_name for class_import in self.class_imports) ) def has_imported_package(self, package_name: str): if self.package_name == package_name: return True return ( any(lambda x: x.package_name == package_name for package_import in self.package_imports) ) class SingleFileElement: """The base class for those elements that are extracted from a single file""" def __init__(self, parser_context, filename: str = None, _file_info: FileInfo = None): self.parser_context = parser_context self.filename = filename self.file_info = _file_info def get_token_stream(self) -> CommonTokenStream: return self.parser_context.parser.getTokenStream() def get_tokens_info(self) -> TokensInfo: return TokensInfo( self.parser_context ) def get_first_symbol(self) -> CommonToken: first_terminal = self.parser_context while not isinstance(first_terminal, antlr4.tree.Tree.TerminalNode): first_terminal = first_terminal.getChild(0) return first_terminal.getSymbol() def get_last_symbol(self) -> CommonToken: last_terminal = self.parser_context while not isinstance(last_terminal, antlr4.tree.Tree.TerminalNode): last_terminal = last_terminal.getChild(last_terminal.getChildCount() - 1) return last_terminal.getSymbol() def get_file_position_range(self) -> tuple: return ( self.get_first_symbol().start, self.get_last_symbol().stop ) def get_text_from_file(self, filename=None) -> str: if filename is None: filename = self.filename if filename is None: return None file = open(filename, 'r') text = file.read() file.close() return text[self.get_first_symbol().start:self.get_last_symbol().stop + 1] class ClassImport(SingleFileElement): """import package_name.class_name;""" def __init__(self, package_name: str = None, class_name: str = None, parser_context: JavaParser.ImportDeclarationContext = None, filename: str = None, file_info: FileInfo = None): self.package_name = package_name self.class_name = class_name self.parser_context = parser_context self.filename = filename self.file_info = file_info def __str__(self): return "import " + str(self.package_name) + '.' + str(self.class_name) class PackageImport(SingleFileElement): """import package_name.*;""" def __init__(self, package_name: str = None, parser_context: JavaParser.ImportDeclarationContext = None, filename: str = None, file_info: FileInfo = None): self.package_name = package_name self.parser_context = parser_context self.filename = filename self.file_info = file_info def __str__(self): return "import " + str(self.package_name) + ".*" class Class(SingleFileElement): def __init__(self, name: str = None, super_class_name: str = None, package_name: str = None, parser_context: JavaParser.ClassDeclarationContext = None, filename: str = None, file_info: FileInfo = None): self.modifiers = [] self.modifiers_parser_contexts = [] self.name = name self.superclass_name = None self.superinterface_names = [] self.fields = {} self.methods = {} self.package_name = package_name self.parser_context = parser_context self.filename = filename self.file_info = file_info self.body_context = None def find_methods_with_name(self, name: str) -> list: result = [] for mk in self.methods: m = self.methods[mk] if m.name == name: result.append(m) return result def __str__(self): return str(self.modifiers) + " " + str(self.name) \ + ((" extends " + str(self.superclass_name)) if self.superclass_name is not None else "") \ + ((" implements " + str(self.superinterface_names)) if len(self.superinterface_names) > 0 else "") \ + " " + str(self.fields) \ + " " + str(self.methods) class Field(SingleFileElement): def __init__(self, datatype: str = None, name: str = None, initializer: str = None, package_name: str = None, class_name: str = None, parser_context: JavaParser.FieldDeclarationContext = None, filename: str = None, file_info: FileInfo = None): self.modifiers = [] self.modifiers_parser_contexts = [] self.datatype = datatype self.name = name self.initializer = initializer self.neighbor_names = [] self.all_variable_declarator_contexts = [] self.index_in_variable_declarators: int = None self.package_name = package_name self.class_name = class_name self.parser_context = parser_context self.filename = filename self.file_info = file_info def __str__(self): return str(self.modifiers) + " " + str(self.datatype) + " " + str(self.name) class Method(SingleFileElement): def __init__(self, returntype: str = None, name: str = None, body_text: str = None, package_name: str = None, class_name: str = None, parser_context=None, filename: str = None, file_info: FileInfo = None): self.modifiers = [] self.modifiers_parser_contexts = [] self.returntype = returntype self.name = name self.parameters = [] self.body_text = body_text self.body_method_invocations = {} self.body_local_vars_and_expr_names = [] # Type: either LocalVariable, ExpressionName or MethodInvocation self.package_name = package_name self.class_name = class_name self.parser_context = parser_context self.filename = filename self.file_info = file_info self.formalparam_context = None self.body_method_invocations_without_typename = {} self.method_declaration_context = None self.is_constructor = False def __str__(self): return str(self.modifiers) + " " + str(self.returntype) + " " + str(self.name) \ + str(tuple(self.parameters)) class LocalVariable: def __init__(self, datatype: str = None, identifier: str = None, parser_context: JavaParser.LocalVariableDeclarationContext = None): self.datatype = datatype self.identifier = identifier self.parser_context = parser_context class ExpressionName: def __init__(self, dot_separated_identifiers: list): self.dot_separated_identifiers = dot_separated_identifiers class MethodInvocation: def __init__(self, dot_separated_identifiers: list, parser_context: JavaParser.ExpressionContext = None): self.dot_separated_identifiers = dot_separated_identifiers self.parser_context = parser_context class UtilsListener(JavaParserListener): def __init__(self, filename): self.package = Package() self.last_modifiers = [] self.last_modifiers_contexts = [] self.current_class_identifier = None self.current_class_identifier_temp = None self.nest_count = 0 self.current_method_identifier = None self.current_method = None self.current_local_var_type = None self.current_local_var_ctx = None self.current_field_decl = None self.current_field_ids = None self.current_field_dims = None self.current_field_inits = None self.current_field_var_ctxs = None self.filename = filename self.file_info = FileInfo(filename=filename) self.field_enter_count = 0 def enterPackageDeclaration(self, ctx: JavaParser.PackageDeclarationContext): self.package.name = ctx.qualifiedName().getText() self.file_info.package_name = self.package.name self.package.package_ctx = ctx def enterImportDeclaration(self, ctx: JavaParser.ImportDeclarationContext): if ctx.STATIC() is None: name: str = ctx.qualifiedName().getText() if ctx.getText().endswith(".*;"): # Package import p = name package_import = PackageImport( package_name=p, parser_context=ctx, filename=self.filename, file_info=self.file_info ) self.file_info.all_imports.append(package_import) self.file_info.package_imports.append(package_import) else: # Class import p = None dot_i = name.rfind('.') if dot_i != -1: p = name[:dot_i] c = name[dot_i + 1:] else: c = name class_import = ClassImport( package_name=p, class_name=c, parser_context=ctx, filename=self.filename, file_info=self.file_info ) self.file_info.all_imports.append(class_import) self.file_info.class_imports.append(class_import) def enterTypeDeclaration(self, ctx: JavaParser.TypeDeclarationContext): self.last_modifiers.clear() self.last_modifiers_contexts.clear() for modifier in ctx.getChildren(lambda x: type(x) == JavaParser.ClassOrInterfaceModifierContext): self.last_modifiers.append(modifier.getText()) self.last_modifiers_contexts.append(modifier) def enterClassBodyDeclaration(self, ctx: JavaParser.ClassBodyDeclarationContext): self.last_modifiers.clear() self.last_modifiers_contexts.clear() for modifier in ctx.getChildren(lambda x: type(x) == JavaParser.ModifierContext): self.last_modifiers.append(modifier.getText()) self.last_modifiers_contexts.append(modifier) def enterClassDeclaration(self, ctx: JavaParser.ClassDeclarationContext): if self.current_class_identifier is None and self.nest_count == 0: self.current_class_identifier = ctx.IDENTIFIER().getText() self.current_class_ctx = ctx.IDENTIFIER() current_class = Class( package_name=self.package.name, parser_context=ctx, filename=self.filename, file_info=self.file_info ) current_class.modifiers = self.last_modifiers.copy() current_class.modifiers_parser_contexts = self.last_modifiers_contexts.copy() current_class.name = self.current_class_identifier if ctx.EXTENDS() is not None: current_class.superclass_name = ctx.typeType().getText() if ctx.IMPLEMENTS() is not None: for interface_type in ctx.typeList().getChildren(lambda x: type(x) == JavaParser.TypeTypeContext): current_class.superinterface_names.append(interface_type.getText()) self.package.classes[current_class.name] = current_class else: if self.nest_count == 0: self.current_class_identifier_temp = self.current_class_identifier self.current_class_identifier = None self.nest_count += 1 def enterClassBody(self, ctx: JavaParser.ClassBodyContext): if self.current_class_identifier is not None: self.package.classes[self.current_class_identifier].body_context = ctx def exitClassDeclaration(self, ctx: JavaParser.ClassDeclarationContext): if self.nest_count > 0: self.nest_count -= 1 if self.nest_count == 0: self.current_class_identifier = self.current_class_identifier_temp self.current_class_identifier_temp = None elif self.current_class_identifier is not None: self.current_class_identifier = None def enterFormalParameterList(self, ctx: JavaParser.FormalParameterListContext): if self.current_method is not None: self.current_method.formalparam_context = ctx def enterMethodDeclaration(self, ctx: JavaParser.MethodDeclarationContext): if self.current_class_identifier is not None: # method_header = ctx.methodHeader() self.current_method_identifier = ctx.IDENTIFIER().getText() method = Method( package_name=self.package.name, class_name=self.current_class_identifier, parser_context=ctx.parentCtx.parentCtx, filename=self.filename, file_info=self.file_info ) method.modifiers = self.last_modifiers.copy() method.modifiers_parser_contexts = self.last_modifiers_contexts.copy() method.returntype = ctx.typeTypeOrVoid().getText() method.name = self.current_method_identifier method.is_constructor = False # This is done on exit to collect params too, to support overloading. # self.package.classes[self.current_class_identifier].methods[method.name] = method self.current_method = method def enterFormalParameters(self, ctx: JavaParser.FormalParametersContext): if self.current_method is not None: self.current_method.method_declaration_context = ctx def enterFormalParameter(self, ctx: JavaParser.FormalParameterContext): if self.current_method is not None: self.current_method.parameters.append( (ctx.typeType().getText(), ctx.variableDeclaratorId().IDENTIFIER().getText()) ) def enterMethodBody(self, ctx: JavaParser.MethodBodyContext): if self.current_method is not None: self.current_method.body_text = ctx.getText() pass def general_exit_method_decl(self): if self.current_class_identifier is not None: if self.current_method is not None: method = self.current_method method_key = ("" if method.name is None else method.name) + '(' is_first = True for param in method.parameters: if not is_first: method_key += ',' is_first = False method_key += param[0] # the type method_key += ')' self.package.classes[self.current_class_identifier].methods[method_key] = method self.current_method_identifier = None self.current_method = None def exitMethodDeclaration(self, ctx: JavaParser.MethodDeclarationContext): self.general_exit_method_decl() def enterConstructorDeclaration(self, ctx: JavaParser.ConstructorDeclarationContext): if self.current_class_identifier is not None: self.current_method_identifier = ctx.IDENTIFIER().getText() method = Method( package_name=self.package.name, class_name=self.current_class_identifier, parser_context=ctx.parentCtx.parentCtx, filename=self.filename, file_info=self.file_info ) method.modifiers = self.last_modifiers.copy() method.modifiers_parser_contexts = self.last_modifiers_contexts.copy() method.returntype = None method.name = None # self.current_method_identifier method.body_text = ctx.constructorBody.getText() method.is_constructor = True # This is done on exit to collect params too, to support overloading. # self.package.classes[self.current_class_identifier].methods[method.name] = method self.current_method = method def exitConstructorDeclaration(self, ctx: JavaParser.ConstructorDeclarationContext): self.general_exit_method_decl() def enterMethodCall(self, ctx: JavaParser.MethodCallContext): if self.current_method is not None: if ctx.parentCtx.IDENTIFIER() != None: if ctx.parentCtx.IDENTIFIER() not in self.current_method.body_method_invocations: self.current_method.body_method_invocations[ctx.parentCtx.IDENTIFIER()] = [ ctx.IDENTIFIER().getText()] else: self.current_method.body_method_invocations[ctx.parentCtx.IDENTIFIER()].append( ctx.IDENTIFIER().getText()) else: a = len(ctx.parentCtx.children) if a == 1: if ctx.IDENTIFIER() != None: if self.current_class_ctx not in self.current_method.body_method_invocations_without_typename: self.current_method.body_method_invocations_without_typename[self.current_class_ctx] = [ctx] else: self.current_method.body_method_invocations_without_typename[self.current_class_ctx].append( ctx) # MethodInvocation txt = ctx.getText() ids = txt[:txt.find('(')].split('.') self.current_method.body_local_vars_and_expr_names.append( MethodInvocation(ids, ctx) ) def enterExpression(self, ctx: JavaParser.ExpressionContext): if self.current_method is not None: if ctx.methodCall() is not None: txt = ctx.getText() ids = txt[:txt.find('(')].split('.') self.current_method.body_local_vars_and_expr_names.append( MethodInvocation(ids, ctx) ) else: names = ctx.getText().split('.') should_add = True for name in names: if not re.match("^[A-Za-z0-9_]*$", name): should_add = False if should_add: self.current_method.body_local_vars_and_expr_names.append(ExpressionName(names)) def enterLocalVariableDeclaration(self, ctx: JavaParser.LocalVariableDeclarationContext): if self.current_method is not None: self.current_local_var_type = ctx.typeType().getText() self.current_local_var_ctx = ctx # The rest in: enterVariableDeclarator def exitLocalVariableDeclaration(self, ctx: JavaParser.LocalVariableDeclarationContext): self.current_local_var_type = None def enterFieldDeclaration(self, ctx: JavaParser.FieldDeclarationContext): self.field_enter_count += 1 if self.current_class_identifier is not None and self.field_enter_count == 1: modifiers = self.last_modifiers.copy() modifiers_contexts = self.last_modifiers_contexts.copy() datatype = ctx.typeType().getText() self.current_field_decl = (modifiers, datatype, ctx, modifiers_contexts) self.current_field_ids = [] self.current_field_dims = [] self.current_field_inits = [] self.current_field_var_ctxs = [] def enterVariableDeclarator(self, ctx: JavaParser.VariableDeclaratorContext): dims = "" v_id: str = ctx.variableDeclaratorId().getText() dims_i = v_id.find('[') if dims_i != -1: dims = v_id[dims_i:] if self.current_field_decl is not None: self.current_field_ids.append(ctx.variableDeclaratorId().IDENTIFIER().getText()) self.current_field_dims.append(dims) init = None init_ctx = ctx.variableInitializer() if init_ctx is not None: init = init_ctx.getText() self.current_field_inits.append(init) self.current_field_var_ctxs.append(ctx) if self.current_local_var_type is not None: if self.current_method is not None: self.current_method.body_local_vars_and_expr_names.append( LocalVariable( self.current_local_var_type + dims, ctx.variableDeclaratorId().IDENTIFIER().getText(), self.current_local_var_ctx ) ) def exitFieldDeclaration(self, ctx: JavaParser.FieldDeclarationContext): self.field_enter_count -= 1 if self.current_class_identifier is not None and self.field_enter_count == 0: for i in range(len(self.current_field_ids)): field_id = self.current_field_ids[i] dims = self.current_field_dims[i] field_init = self.current_field_inits[i] var_ctx = self.current_field_var_ctxs[i] field = Field( package_name=self.package.name, class_name=self.current_class_identifier, parser_context=self.current_field_decl[2], filename=self.filename, file_info=self.file_info ) field.modifiers = self.current_field_decl[0] field.modifiers_parser_contexts = self.current_field_decl[3] field.datatype = self.current_field_decl[1] + dims field.name = field_id field.initializer = field_init field.neighbor_names = [x for x in self.current_field_ids if x != field_id] field.all_variable_declarator_contexts = self.current_field_var_ctxs field.index_in_variable_declarators = i self.package.classes[self.current_class_identifier].fields[field.name] = field self.current_field_decl = None
40.822335
116
0.619829
import re import antlr4 from antlr4.Token import CommonToken import antlr4.tree from antlr4.CommonTokenStream import CommonTokenStream from typing import List, Optional from gen.java.JavaParser import JavaParser from gen.java.JavaParserListener import JavaParserListener class Program: def __init__(self): self.packages = {} def __str__(self): return str(self.packages) class Package: def __init__(self): self.name = None self.classes = {} self.package_ctx = None def __str__(self): return str(self.name) + " " + str(self.classes) class TokensInfo: def __init__(self, parser_context=None): if parser_context is not None: self.token_stream: CommonTokenStream = parser_context.parser.getTokenStream() self.start: int = parser_context.start.tokenIndex self.stop: int = parser_context.stop.tokenIndex else: self.token_stream: CommonTokenStream = None self.start: int = None self.stop: int = None def get_token_index(self, tokens: list, start: int, stop: int): return tokens[start:stop] class FileInfo: def __init__(self, filename: str = None, package_name: str = None): self.filename: str = filename self.package_name: str = package_name self.all_imports = [] self.package_imports = [] self.class_imports = [] def has_imported_class(self, package_name: str, class_name: str) -> bool: if self.package_name == package_name: return True return ( any(lambda x: x.package_name == package_name for package_import in self.package_imports) or any(lambda x: x.package_name == package_name and x.class_name == class_name for class_import in self.class_imports) ) def has_imported_package(self, package_name: str): if self.package_name == package_name: return True return ( any(lambda x: x.package_name == package_name for package_import in self.package_imports) ) class SingleFileElement: def __init__(self, parser_context, filename: str = None, _file_info: FileInfo = None): self.parser_context = parser_context self.filename = filename self.file_info = _file_info def get_token_stream(self) -> CommonTokenStream: return self.parser_context.parser.getTokenStream() def get_tokens_info(self) -> TokensInfo: return TokensInfo( self.parser_context ) def get_first_symbol(self) -> CommonToken: first_terminal = self.parser_context while not isinstance(first_terminal, antlr4.tree.Tree.TerminalNode): first_terminal = first_terminal.getChild(0) return first_terminal.getSymbol() def get_last_symbol(self) -> CommonToken: last_terminal = self.parser_context while not isinstance(last_terminal, antlr4.tree.Tree.TerminalNode): last_terminal = last_terminal.getChild(last_terminal.getChildCount() - 1) return last_terminal.getSymbol() def get_file_position_range(self) -> tuple: return ( self.get_first_symbol().start, self.get_last_symbol().stop ) def get_text_from_file(self, filename=None) -> str: if filename is None: filename = self.filename if filename is None: return None file = open(filename, 'r') text = file.read() file.close() return text[self.get_first_symbol().start:self.get_last_symbol().stop + 1] class ClassImport(SingleFileElement): def __init__(self, package_name: str = None, class_name: str = None, parser_context: JavaParser.ImportDeclarationContext = None, filename: str = None, file_info: FileInfo = None): self.package_name = package_name self.class_name = class_name self.parser_context = parser_context self.filename = filename self.file_info = file_info def __str__(self): return "import " + str(self.package_name) + '.' + str(self.class_name) class PackageImport(SingleFileElement): def __init__(self, package_name: str = None, parser_context: JavaParser.ImportDeclarationContext = None, filename: str = None, file_info: FileInfo = None): self.package_name = package_name self.parser_context = parser_context self.filename = filename self.file_info = file_info def __str__(self): return "import " + str(self.package_name) + ".*" class Class(SingleFileElement): def __init__(self, name: str = None, super_class_name: str = None, package_name: str = None, parser_context: JavaParser.ClassDeclarationContext = None, filename: str = None, file_info: FileInfo = None): self.modifiers = [] self.modifiers_parser_contexts = [] self.name = name self.superclass_name = None self.superinterface_names = [] self.fields = {} self.methods = {} self.package_name = package_name self.parser_context = parser_context self.filename = filename self.file_info = file_info self.body_context = None def find_methods_with_name(self, name: str) -> list: result = [] for mk in self.methods: m = self.methods[mk] if m.name == name: result.append(m) return result def __str__(self): return str(self.modifiers) + " " + str(self.name) \ + ((" extends " + str(self.superclass_name)) if self.superclass_name is not None else "") \ + ((" implements " + str(self.superinterface_names)) if len(self.superinterface_names) > 0 else "") \ + " " + str(self.fields) \ + " " + str(self.methods) class Field(SingleFileElement): def __init__(self, datatype: str = None, name: str = None, initializer: str = None, package_name: str = None, class_name: str = None, parser_context: JavaParser.FieldDeclarationContext = None, filename: str = None, file_info: FileInfo = None): self.modifiers = [] self.modifiers_parser_contexts = [] self.datatype = datatype self.name = name self.initializer = initializer self.neighbor_names = [] self.all_variable_declarator_contexts = [] self.index_in_variable_declarators: int = None self.package_name = package_name self.class_name = class_name self.parser_context = parser_context self.filename = filename self.file_info = file_info def __str__(self): return str(self.modifiers) + " " + str(self.datatype) + " " + str(self.name) class Method(SingleFileElement): def __init__(self, returntype: str = None, name: str = None, body_text: str = None, package_name: str = None, class_name: str = None, parser_context=None, filename: str = None, file_info: FileInfo = None): self.modifiers = [] self.modifiers_parser_contexts = [] self.returntype = returntype self.name = name self.parameters = [] self.body_text = body_text self.body_method_invocations = {} self.body_local_vars_and_expr_names = [] self.package_name = package_name self.class_name = class_name self.parser_context = parser_context self.filename = filename self.file_info = file_info self.formalparam_context = None self.body_method_invocations_without_typename = {} self.method_declaration_context = None self.is_constructor = False def __str__(self): return str(self.modifiers) + " " + str(self.returntype) + " " + str(self.name) \ + str(tuple(self.parameters)) class LocalVariable: def __init__(self, datatype: str = None, identifier: str = None, parser_context: JavaParser.LocalVariableDeclarationContext = None): self.datatype = datatype self.identifier = identifier self.parser_context = parser_context class ExpressionName: def __init__(self, dot_separated_identifiers: list): self.dot_separated_identifiers = dot_separated_identifiers class MethodInvocation: def __init__(self, dot_separated_identifiers: list, parser_context: JavaParser.ExpressionContext = None): self.dot_separated_identifiers = dot_separated_identifiers self.parser_context = parser_context class UtilsListener(JavaParserListener): def __init__(self, filename): self.package = Package() self.last_modifiers = [] self.last_modifiers_contexts = [] self.current_class_identifier = None self.current_class_identifier_temp = None self.nest_count = 0 self.current_method_identifier = None self.current_method = None self.current_local_var_type = None self.current_local_var_ctx = None self.current_field_decl = None self.current_field_ids = None self.current_field_dims = None self.current_field_inits = None self.current_field_var_ctxs = None self.filename = filename self.file_info = FileInfo(filename=filename) self.field_enter_count = 0 def enterPackageDeclaration(self, ctx: JavaParser.PackageDeclarationContext): self.package.name = ctx.qualifiedName().getText() self.file_info.package_name = self.package.name self.package.package_ctx = ctx def enterImportDeclaration(self, ctx: JavaParser.ImportDeclarationContext): if ctx.STATIC() is None: name: str = ctx.qualifiedName().getText() if ctx.getText().endswith(".*;"): p = name package_import = PackageImport( package_name=p, parser_context=ctx, filename=self.filename, file_info=self.file_info ) self.file_info.all_imports.append(package_import) self.file_info.package_imports.append(package_import) else: p = None dot_i = name.rfind('.') if dot_i != -1: p = name[:dot_i] c = name[dot_i + 1:] else: c = name class_import = ClassImport( package_name=p, class_name=c, parser_context=ctx, filename=self.filename, file_info=self.file_info ) self.file_info.all_imports.append(class_import) self.file_info.class_imports.append(class_import) def enterTypeDeclaration(self, ctx: JavaParser.TypeDeclarationContext): self.last_modifiers.clear() self.last_modifiers_contexts.clear() for modifier in ctx.getChildren(lambda x: type(x) == JavaParser.ClassOrInterfaceModifierContext): self.last_modifiers.append(modifier.getText()) self.last_modifiers_contexts.append(modifier) def enterClassBodyDeclaration(self, ctx: JavaParser.ClassBodyDeclarationContext): self.last_modifiers.clear() self.last_modifiers_contexts.clear() for modifier in ctx.getChildren(lambda x: type(x) == JavaParser.ModifierContext): self.last_modifiers.append(modifier.getText()) self.last_modifiers_contexts.append(modifier) def enterClassDeclaration(self, ctx: JavaParser.ClassDeclarationContext): if self.current_class_identifier is None and self.nest_count == 0: self.current_class_identifier = ctx.IDENTIFIER().getText() self.current_class_ctx = ctx.IDENTIFIER() current_class = Class( package_name=self.package.name, parser_context=ctx, filename=self.filename, file_info=self.file_info ) current_class.modifiers = self.last_modifiers.copy() current_class.modifiers_parser_contexts = self.last_modifiers_contexts.copy() current_class.name = self.current_class_identifier if ctx.EXTENDS() is not None: current_class.superclass_name = ctx.typeType().getText() if ctx.IMPLEMENTS() is not None: for interface_type in ctx.typeList().getChildren(lambda x: type(x) == JavaParser.TypeTypeContext): current_class.superinterface_names.append(interface_type.getText()) self.package.classes[current_class.name] = current_class else: if self.nest_count == 0: self.current_class_identifier_temp = self.current_class_identifier self.current_class_identifier = None self.nest_count += 1 def enterClassBody(self, ctx: JavaParser.ClassBodyContext): if self.current_class_identifier is not None: self.package.classes[self.current_class_identifier].body_context = ctx def exitClassDeclaration(self, ctx: JavaParser.ClassDeclarationContext): if self.nest_count > 0: self.nest_count -= 1 if self.nest_count == 0: self.current_class_identifier = self.current_class_identifier_temp self.current_class_identifier_temp = None elif self.current_class_identifier is not None: self.current_class_identifier = None def enterFormalParameterList(self, ctx: JavaParser.FormalParameterListContext): if self.current_method is not None: self.current_method.formalparam_context = ctx def enterMethodDeclaration(self, ctx: JavaParser.MethodDeclarationContext): if self.current_class_identifier is not None: self.current_method_identifier = ctx.IDENTIFIER().getText() method = Method( package_name=self.package.name, class_name=self.current_class_identifier, parser_context=ctx.parentCtx.parentCtx, filename=self.filename, file_info=self.file_info ) method.modifiers = self.last_modifiers.copy() method.modifiers_parser_contexts = self.last_modifiers_contexts.copy() method.returntype = ctx.typeTypeOrVoid().getText() method.name = self.current_method_identifier method.is_constructor = False self.current_method = method def enterFormalParameters(self, ctx: JavaParser.FormalParametersContext): if self.current_method is not None: self.current_method.method_declaration_context = ctx def enterFormalParameter(self, ctx: JavaParser.FormalParameterContext): if self.current_method is not None: self.current_method.parameters.append( (ctx.typeType().getText(), ctx.variableDeclaratorId().IDENTIFIER().getText()) ) def enterMethodBody(self, ctx: JavaParser.MethodBodyContext): if self.current_method is not None: self.current_method.body_text = ctx.getText() pass def general_exit_method_decl(self): if self.current_class_identifier is not None: if self.current_method is not None: method = self.current_method method_key = ("" if method.name is None else method.name) + '(' is_first = True for param in method.parameters: if not is_first: method_key += ',' is_first = False method_key += param[0] method_key += ')' self.package.classes[self.current_class_identifier].methods[method_key] = method self.current_method_identifier = None self.current_method = None def exitMethodDeclaration(self, ctx: JavaParser.MethodDeclarationContext): self.general_exit_method_decl() def enterConstructorDeclaration(self, ctx: JavaParser.ConstructorDeclarationContext): if self.current_class_identifier is not None: self.current_method_identifier = ctx.IDENTIFIER().getText() method = Method( package_name=self.package.name, class_name=self.current_class_identifier, parser_context=ctx.parentCtx.parentCtx, filename=self.filename, file_info=self.file_info ) method.modifiers = self.last_modifiers.copy() method.modifiers_parser_contexts = self.last_modifiers_contexts.copy() method.returntype = None method.name = None method.body_text = ctx.constructorBody.getText() method.is_constructor = True self.current_method = method def exitConstructorDeclaration(self, ctx: JavaParser.ConstructorDeclarationContext): self.general_exit_method_decl() def enterMethodCall(self, ctx: JavaParser.MethodCallContext): if self.current_method is not None: if ctx.parentCtx.IDENTIFIER() != None: if ctx.parentCtx.IDENTIFIER() not in self.current_method.body_method_invocations: self.current_method.body_method_invocations[ctx.parentCtx.IDENTIFIER()] = [ ctx.IDENTIFIER().getText()] else: self.current_method.body_method_invocations[ctx.parentCtx.IDENTIFIER()].append( ctx.IDENTIFIER().getText()) else: a = len(ctx.parentCtx.children) if a == 1: if ctx.IDENTIFIER() != None: if self.current_class_ctx not in self.current_method.body_method_invocations_without_typename: self.current_method.body_method_invocations_without_typename[self.current_class_ctx] = [ctx] else: self.current_method.body_method_invocations_without_typename[self.current_class_ctx].append( ctx) txt = ctx.getText() ids = txt[:txt.find('(')].split('.') self.current_method.body_local_vars_and_expr_names.append( MethodInvocation(ids, ctx) ) def enterExpression(self, ctx: JavaParser.ExpressionContext): if self.current_method is not None: if ctx.methodCall() is not None: txt = ctx.getText() ids = txt[:txt.find('(')].split('.') self.current_method.body_local_vars_and_expr_names.append( MethodInvocation(ids, ctx) ) else: names = ctx.getText().split('.') should_add = True for name in names: if not re.match("^[A-Za-z0-9_]*$", name): should_add = False if should_add: self.current_method.body_local_vars_and_expr_names.append(ExpressionName(names)) def enterLocalVariableDeclaration(self, ctx: JavaParser.LocalVariableDeclarationContext): if self.current_method is not None: self.current_local_var_type = ctx.typeType().getText() self.current_local_var_ctx = ctx def exitLocalVariableDeclaration(self, ctx: JavaParser.LocalVariableDeclarationContext): self.current_local_var_type = None def enterFieldDeclaration(self, ctx: JavaParser.FieldDeclarationContext): self.field_enter_count += 1 if self.current_class_identifier is not None and self.field_enter_count == 1: modifiers = self.last_modifiers.copy() modifiers_contexts = self.last_modifiers_contexts.copy() datatype = ctx.typeType().getText() self.current_field_decl = (modifiers, datatype, ctx, modifiers_contexts) self.current_field_ids = [] self.current_field_dims = [] self.current_field_inits = [] self.current_field_var_ctxs = [] def enterVariableDeclarator(self, ctx: JavaParser.VariableDeclaratorContext): dims = "" v_id: str = ctx.variableDeclaratorId().getText() dims_i = v_id.find('[') if dims_i != -1: dims = v_id[dims_i:] if self.current_field_decl is not None: self.current_field_ids.append(ctx.variableDeclaratorId().IDENTIFIER().getText()) self.current_field_dims.append(dims) init = None init_ctx = ctx.variableInitializer() if init_ctx is not None: init = init_ctx.getText() self.current_field_inits.append(init) self.current_field_var_ctxs.append(ctx) if self.current_local_var_type is not None: if self.current_method is not None: self.current_method.body_local_vars_and_expr_names.append( LocalVariable( self.current_local_var_type + dims, ctx.variableDeclaratorId().IDENTIFIER().getText(), self.current_local_var_ctx ) ) def exitFieldDeclaration(self, ctx: JavaParser.FieldDeclarationContext): self.field_enter_count -= 1 if self.current_class_identifier is not None and self.field_enter_count == 0: for i in range(len(self.current_field_ids)): field_id = self.current_field_ids[i] dims = self.current_field_dims[i] field_init = self.current_field_inits[i] var_ctx = self.current_field_var_ctxs[i] field = Field( package_name=self.package.name, class_name=self.current_class_identifier, parser_context=self.current_field_decl[2], filename=self.filename, file_info=self.file_info ) field.modifiers = self.current_field_decl[0] field.modifiers_parser_contexts = self.current_field_decl[3] field.datatype = self.current_field_decl[1] + dims field.name = field_id field.initializer = field_init field.neighbor_names = [x for x in self.current_field_ids if x != field_id] field.all_variable_declarator_contexts = self.current_field_var_ctxs field.index_in_variable_declarators = i self.package.classes[self.current_class_identifier].fields[field.name] = field self.current_field_decl = None
true
true
f723c6d8b64443d90a1248040568d87fc7bf7641
4,464
py
Python
tensorflow/python/keras/optimizer_v2/utils.py
luisangel86a/tensorflow
77ee5e02721ba797fe01d47019e6017d2bb09ab7
[ "Apache-2.0" ]
1
2020-10-25T00:12:12.000Z
2020-10-25T00:12:12.000Z
tensorflow/python/keras/optimizer_v2/utils.py
luisangel86a/tensorflow
77ee5e02721ba797fe01d47019e6017d2bb09ab7
[ "Apache-2.0" ]
null
null
null
tensorflow/python/keras/optimizer_v2/utils.py
luisangel86a/tensorflow
77ee5e02721ba797fe01d47019e6017d2bb09ab7
[ "Apache-2.0" ]
null
null
null
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Optimizer utilities.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.distribute import central_storage_strategy from tensorflow.python.distribute import distribution_strategy_context as distribute_ctx from tensorflow.python.distribute import reduce_util as ds_reduce_util from tensorflow.python.ops import clip_ops from tensorflow.python.platform import tf_logging as logging def all_reduce_sum_gradients(grads_and_vars): """Returns all-reduced gradients aggregated via summation. Args: grads_and_vars: List of (gradient, variable) pairs. Returns: A list of all-reduced gradients. """ grads_and_vars = list(grads_and_vars) filtered_grads_and_vars = filter_empty_gradients(grads_and_vars) # We switch to a cross-replica context since there is a bug which causes # IndexedSlices to be converted to dense tensors when all-reduced in a # replica context. # TODO(b/150507409): Do not switch to a cross-replica context once the bug # is fixed. if filtered_grads_and_vars: reduced = distribute_ctx.get_replica_context().merge_call( _all_reduce_sum_fn, args=(filtered_grads_and_vars,)) else: reduced = [] # Copy 'reduced' but add None gradients back in reduced_with_nones = [] reduced_pos = 0 for g, _ in grads_and_vars: if g is None: reduced_with_nones.append(None) else: reduced_with_nones.append(reduced[reduced_pos]) reduced_pos += 1 assert reduced_pos == len(reduced), "Failed to add all gradients" return reduced_with_nones def make_gradient_clipnorm_fn(clipnorm): """Creates a gradient transformation function for clipping by norm.""" def gradient_clipnorm_fn(grads_and_vars): if isinstance(distribute_ctx.get_strategy(), central_storage_strategy.CentralStorageStrategy): raise ValueError( "`clipnorm` is not supported with `CenteralStorageStrategy`") clipped_grads_and_vars = [ (clip_ops.clip_by_norm(g, clipnorm), v) for g, v in grads_and_vars ] return clipped_grads_and_vars return gradient_clipnorm_fn def make_gradient_clipvalue_fn(clipvalue): """Creates a gradient transformation function for clipping by value.""" def gradient_clipvalue_fn(grads_and_vars): if isinstance(distribute_ctx.get_strategy(), central_storage_strategy.CentralStorageStrategy): raise ValueError( "`clipvalue` is not supported with `CenteralStorageStrategy`") clipped_grads_and_vars = [(clip_ops.clip_by_value(g, -clipvalue, clipvalue), v) for g, v in grads_and_vars] return clipped_grads_and_vars return gradient_clipvalue_fn def filter_empty_gradients(grads_and_vars): """Filter out `(grad, var)` pairs that have a gradient equal to `None`.""" grads_and_vars = tuple(grads_and_vars) if not grads_and_vars: return grads_and_vars filtered = [] vars_with_empty_grads = [] for grad, var in grads_and_vars: if grad is None: vars_with_empty_grads.append(var) else: filtered.append((grad, var)) filtered = tuple(filtered) if not filtered: raise ValueError("No gradients provided for any variable: %s." % ([v.name for _, v in grads_and_vars],)) if vars_with_empty_grads: logging.warning( ("Gradients do not exist for variables %s when minimizing the loss."), ([v.name for v in vars_with_empty_grads])) return filtered def _all_reduce_sum_fn(distribution, grads_and_vars): return distribution.extended.batch_reduce_to(ds_reduce_util.ReduceOp.SUM, grads_and_vars)
35.428571
88
0.714382
from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.distribute import central_storage_strategy from tensorflow.python.distribute import distribution_strategy_context as distribute_ctx from tensorflow.python.distribute import reduce_util as ds_reduce_util from tensorflow.python.ops import clip_ops from tensorflow.python.platform import tf_logging as logging def all_reduce_sum_gradients(grads_and_vars): grads_and_vars = list(grads_and_vars) filtered_grads_and_vars = filter_empty_gradients(grads_and_vars) if filtered_grads_and_vars: reduced = distribute_ctx.get_replica_context().merge_call( _all_reduce_sum_fn, args=(filtered_grads_and_vars,)) else: reduced = [] reduced_with_nones = [] reduced_pos = 0 for g, _ in grads_and_vars: if g is None: reduced_with_nones.append(None) else: reduced_with_nones.append(reduced[reduced_pos]) reduced_pos += 1 assert reduced_pos == len(reduced), "Failed to add all gradients" return reduced_with_nones def make_gradient_clipnorm_fn(clipnorm): def gradient_clipnorm_fn(grads_and_vars): if isinstance(distribute_ctx.get_strategy(), central_storage_strategy.CentralStorageStrategy): raise ValueError( "`clipnorm` is not supported with `CenteralStorageStrategy`") clipped_grads_and_vars = [ (clip_ops.clip_by_norm(g, clipnorm), v) for g, v in grads_and_vars ] return clipped_grads_and_vars return gradient_clipnorm_fn def make_gradient_clipvalue_fn(clipvalue): def gradient_clipvalue_fn(grads_and_vars): if isinstance(distribute_ctx.get_strategy(), central_storage_strategy.CentralStorageStrategy): raise ValueError( "`clipvalue` is not supported with `CenteralStorageStrategy`") clipped_grads_and_vars = [(clip_ops.clip_by_value(g, -clipvalue, clipvalue), v) for g, v in grads_and_vars] return clipped_grads_and_vars return gradient_clipvalue_fn def filter_empty_gradients(grads_and_vars): grads_and_vars = tuple(grads_and_vars) if not grads_and_vars: return grads_and_vars filtered = [] vars_with_empty_grads = [] for grad, var in grads_and_vars: if grad is None: vars_with_empty_grads.append(var) else: filtered.append((grad, var)) filtered = tuple(filtered) if not filtered: raise ValueError("No gradients provided for any variable: %s." % ([v.name for _, v in grads_and_vars],)) if vars_with_empty_grads: logging.warning( ("Gradients do not exist for variables %s when minimizing the loss."), ([v.name for v in vars_with_empty_grads])) return filtered def _all_reduce_sum_fn(distribution, grads_and_vars): return distribution.extended.batch_reduce_to(ds_reduce_util.ReduceOp.SUM, grads_and_vars)
true
true
f723c71f72bc88cff1f65f82dbd10987ac1732ba
9,771
py
Python
stable_baselines3/common/vec_env/subproc_vec_env.py
qgallouedec/stable-baselines3
a6f5049a99a4c21a6f0bcce458ca3306cef310e0
[ "MIT" ]
null
null
null
stable_baselines3/common/vec_env/subproc_vec_env.py
qgallouedec/stable-baselines3
a6f5049a99a4c21a6f0bcce458ca3306cef310e0
[ "MIT" ]
null
null
null
stable_baselines3/common/vec_env/subproc_vec_env.py
qgallouedec/stable-baselines3
a6f5049a99a4c21a6f0bcce458ca3306cef310e0
[ "MIT" ]
null
null
null
import multiprocessing as mp from collections import OrderedDict from typing import Any, Callable, List, Optional, Sequence, Tuple, Type, Union import gym import numpy as np from stable_baselines3.common.vec_env.base_vec_env import ( CloudpickleWrapper, VecEnv, VecEnvIndices, VecEnvObs, VecEnvStepReturn, ) def _worker( remote: mp.connection.Connection, parent_remote: mp.connection.Connection, env_fn_wrapper: CloudpickleWrapper ) -> None: # Import here to avoid a circular import from stable_baselines3.common.env_util import is_wrapped parent_remote.close() env = env_fn_wrapper.var() while True: try: cmd, data = remote.recv() if cmd == "step": observation, reward, done, info = env.step(data) if done: # save final observation where user can get it, then reset info["terminal_observation"] = observation observation = env.reset() remote.send((observation, reward, done, info)) elif cmd == "seed": remote.send(env.seed(data)) elif cmd == "reset": observation = env.reset() remote.send(observation) elif cmd == "render": remote.send(env.render(data)) elif cmd == "close": env.close() remote.close() break elif cmd == "get_spaces": remote.send((env.observation_space, env.action_space)) elif cmd == "env_method": method = getattr(env, data[0]) remote.send(method(*data[1], **data[2])) elif cmd == "get_attr": remote.send(getattr(env, data)) elif cmd == "set_attr": remote.send(setattr(env, data[0], data[1])) elif cmd == "is_wrapped": remote.send(is_wrapped(env, data)) else: raise NotImplementedError(f"`{cmd}` is not implemented in the worker") except EOFError: break class SubprocVecEnv(VecEnv): """ Creates a multiprocess vectorized wrapper for multiple environments, distributing each environment to its own process, allowing significant speed up when the environment is computationally complex. For performance reasons, if your environment is not IO bound, the number of environments should not exceed the number of logical cores on your CPU. .. warning:: Only 'forkserver' and 'spawn' start methods are thread-safe, which is important when TensorFlow sessions or other non thread-safe libraries are used in the parent (see issue #217). However, compared to 'fork' they incur a small start-up cost and have restrictions on global variables. With those methods, users must wrap the code in an ``if __name__ == "__main__":`` block. For more information, see the multiprocessing documentation. :param env_fns: Environments to run in subprocesses :param start_method: method used to start the subprocesses. Must be one of the methods returned by multiprocessing.get_all_start_methods(). Defaults to 'forkserver' on available platforms, and 'spawn' otherwise. """ def __init__(self, env_fns: List[Callable[[], gym.Env]], start_method: Optional[str] = None): self.waiting = False self.closed = False n_envs = len(env_fns) if start_method is None: # Fork is not a thread safe method (see issue #217) # but is more user friendly (does not require to wrap the code in # a `if __name__ == "__main__":`) forkserver_available = "forkserver" in mp.get_all_start_methods() start_method = "forkserver" if forkserver_available else "spawn" ctx = mp.get_context(start_method) self.remotes, self.work_remotes = zip(*[ctx.Pipe() for _ in range(n_envs)]) self.processes = [] for work_remote, remote, env_fn in zip(self.work_remotes, self.remotes, env_fns): args = (work_remote, remote, CloudpickleWrapper(env_fn)) # daemon=True: if the main process crashes, we should not cause things to hang process = ctx.Process(target=_worker, args=args, daemon=True) # pytype:disable=attribute-error process.start() self.processes.append(process) work_remote.close() self.remotes[0].send(("get_spaces", None)) observation_space, action_space = self.remotes[0].recv() VecEnv.__init__(self, len(env_fns), observation_space, action_space) def step_async(self, actions: np.ndarray) -> None: for remote, action in zip(self.remotes, actions): remote.send(("step", action)) self.waiting = True def step_wait(self) -> VecEnvStepReturn: results = [remote.recv() for remote in self.remotes] self.waiting = False obs, rews, dones, infos = zip(*results) return _flatten_obs(obs, self.observation_space), np.stack(rews), np.stack(dones), infos def seed(self, seed: Optional[int] = None) -> List[Union[None, int]]: if seed is None: seed = np.random.randint(0, 2**32 - 1) for idx, remote in enumerate(self.remotes): remote.send(("seed", seed + idx)) return [remote.recv() for remote in self.remotes] def reset(self) -> VecEnvObs: for remote in self.remotes: remote.send(("reset", None)) obs = [remote.recv() for remote in self.remotes] return _flatten_obs(obs, self.observation_space) def close(self) -> None: if self.closed: return if self.waiting: for remote in self.remotes: remote.recv() for remote in self.remotes: remote.send(("close", None)) for process in self.processes: process.join() self.closed = True def get_images(self) -> Sequence[np.ndarray]: for pipe in self.remotes: # gather images from subprocesses # `mode` will be taken into account later pipe.send(("render", "rgb_array")) imgs = [pipe.recv() for pipe in self.remotes] return imgs def get_attr(self, attr_name: str, indices: VecEnvIndices = None) -> List[Any]: """Return attribute from vectorized environment (see base class).""" target_remotes = self._get_target_remotes(indices) for remote in target_remotes: remote.send(("get_attr", attr_name)) return [remote.recv() for remote in target_remotes] def set_attr(self, attr_name: str, value: Any, indices: VecEnvIndices = None) -> None: """Set attribute inside vectorized environments (see base class).""" target_remotes = self._get_target_remotes(indices) for remote in target_remotes: remote.send(("set_attr", (attr_name, value))) for remote in target_remotes: remote.recv() def env_method(self, method_name: str, *method_args, indices: VecEnvIndices = None, **method_kwargs) -> List[Any]: """Call instance methods of vectorized environments.""" target_remotes = self._get_target_remotes(indices) for remote in target_remotes: remote.send(("env_method", (method_name, method_args, method_kwargs))) return [remote.recv() for remote in target_remotes] def env_is_wrapped(self, wrapper_class: Type[gym.Wrapper], indices: VecEnvIndices = None) -> List[bool]: """Check if worker environments are wrapped with a given wrapper""" target_remotes = self._get_target_remotes(indices) for remote in target_remotes: remote.send(("is_wrapped", wrapper_class)) return [remote.recv() for remote in target_remotes] def _get_target_remotes(self, indices: VecEnvIndices) -> List[Any]: """ Get the connection object needed to communicate with the wanted envs that are in subprocesses. :param indices: refers to indices of envs. :return: Connection object to communicate between processes. """ indices = self._get_indices(indices) return [self.remotes[i] for i in indices] def _flatten_obs(obs: Union[List[VecEnvObs], Tuple[VecEnvObs]], space: gym.spaces.Space) -> VecEnvObs: """ Flatten observations, depending on the observation space. :param obs: observations. A list or tuple of observations, one per environment. Each environment observation may be a NumPy array, or a dict or tuple of NumPy arrays. :return: flattened observations. A flattened NumPy array or an OrderedDict or tuple of flattened numpy arrays. Each NumPy array has the environment index as its first axis. """ assert isinstance(obs, (list, tuple)), "expected list or tuple of observations per environment" assert len(obs) > 0, "need observations from at least one environment" if isinstance(space, gym.spaces.Dict): assert isinstance(space.spaces, OrderedDict), "Dict space must have ordered subspaces" assert isinstance(obs[0], dict), "non-dict observation for environment with Dict observation space" return OrderedDict([(k, np.stack([o[k] for o in obs])) for k in space.spaces.keys()]) elif isinstance(space, gym.spaces.Tuple): assert isinstance(obs[0], tuple), "non-tuple observation for environment with Tuple observation space" obs_len = len(space.spaces) return tuple(np.stack([o[i] for o in obs]) for i in range(obs_len)) else: return np.stack(obs)
43.816143
118
0.635861
import multiprocessing as mp from collections import OrderedDict from typing import Any, Callable, List, Optional, Sequence, Tuple, Type, Union import gym import numpy as np from stable_baselines3.common.vec_env.base_vec_env import ( CloudpickleWrapper, VecEnv, VecEnvIndices, VecEnvObs, VecEnvStepReturn, ) def _worker( remote: mp.connection.Connection, parent_remote: mp.connection.Connection, env_fn_wrapper: CloudpickleWrapper ) -> None: from stable_baselines3.common.env_util import is_wrapped parent_remote.close() env = env_fn_wrapper.var() while True: try: cmd, data = remote.recv() if cmd == "step": observation, reward, done, info = env.step(data) if done: info["terminal_observation"] = observation observation = env.reset() remote.send((observation, reward, done, info)) elif cmd == "seed": remote.send(env.seed(data)) elif cmd == "reset": observation = env.reset() remote.send(observation) elif cmd == "render": remote.send(env.render(data)) elif cmd == "close": env.close() remote.close() break elif cmd == "get_spaces": remote.send((env.observation_space, env.action_space)) elif cmd == "env_method": method = getattr(env, data[0]) remote.send(method(*data[1], **data[2])) elif cmd == "get_attr": remote.send(getattr(env, data)) elif cmd == "set_attr": remote.send(setattr(env, data[0], data[1])) elif cmd == "is_wrapped": remote.send(is_wrapped(env, data)) else: raise NotImplementedError(f"`{cmd}` is not implemented in the worker") except EOFError: break class SubprocVecEnv(VecEnv): def __init__(self, env_fns: List[Callable[[], gym.Env]], start_method: Optional[str] = None): self.waiting = False self.closed = False n_envs = len(env_fns) if start_method is None: forkserver_available = "forkserver" in mp.get_all_start_methods() start_method = "forkserver" if forkserver_available else "spawn" ctx = mp.get_context(start_method) self.remotes, self.work_remotes = zip(*[ctx.Pipe() for _ in range(n_envs)]) self.processes = [] for work_remote, remote, env_fn in zip(self.work_remotes, self.remotes, env_fns): args = (work_remote, remote, CloudpickleWrapper(env_fn)) process = ctx.Process(target=_worker, args=args, daemon=True) process.start() self.processes.append(process) work_remote.close() self.remotes[0].send(("get_spaces", None)) observation_space, action_space = self.remotes[0].recv() VecEnv.__init__(self, len(env_fns), observation_space, action_space) def step_async(self, actions: np.ndarray) -> None: for remote, action in zip(self.remotes, actions): remote.send(("step", action)) self.waiting = True def step_wait(self) -> VecEnvStepReturn: results = [remote.recv() for remote in self.remotes] self.waiting = False obs, rews, dones, infos = zip(*results) return _flatten_obs(obs, self.observation_space), np.stack(rews), np.stack(dones), infos def seed(self, seed: Optional[int] = None) -> List[Union[None, int]]: if seed is None: seed = np.random.randint(0, 2**32 - 1) for idx, remote in enumerate(self.remotes): remote.send(("seed", seed + idx)) return [remote.recv() for remote in self.remotes] def reset(self) -> VecEnvObs: for remote in self.remotes: remote.send(("reset", None)) obs = [remote.recv() for remote in self.remotes] return _flatten_obs(obs, self.observation_space) def close(self) -> None: if self.closed: return if self.waiting: for remote in self.remotes: remote.recv() for remote in self.remotes: remote.send(("close", None)) for process in self.processes: process.join() self.closed = True def get_images(self) -> Sequence[np.ndarray]: for pipe in self.remotes: pipe.send(("render", "rgb_array")) imgs = [pipe.recv() for pipe in self.remotes] return imgs def get_attr(self, attr_name: str, indices: VecEnvIndices = None) -> List[Any]: target_remotes = self._get_target_remotes(indices) for remote in target_remotes: remote.send(("get_attr", attr_name)) return [remote.recv() for remote in target_remotes] def set_attr(self, attr_name: str, value: Any, indices: VecEnvIndices = None) -> None: target_remotes = self._get_target_remotes(indices) for remote in target_remotes: remote.send(("set_attr", (attr_name, value))) for remote in target_remotes: remote.recv() def env_method(self, method_name: str, *method_args, indices: VecEnvIndices = None, **method_kwargs) -> List[Any]: target_remotes = self._get_target_remotes(indices) for remote in target_remotes: remote.send(("env_method", (method_name, method_args, method_kwargs))) return [remote.recv() for remote in target_remotes] def env_is_wrapped(self, wrapper_class: Type[gym.Wrapper], indices: VecEnvIndices = None) -> List[bool]: target_remotes = self._get_target_remotes(indices) for remote in target_remotes: remote.send(("is_wrapped", wrapper_class)) return [remote.recv() for remote in target_remotes] def _get_target_remotes(self, indices: VecEnvIndices) -> List[Any]: indices = self._get_indices(indices) return [self.remotes[i] for i in indices] def _flatten_obs(obs: Union[List[VecEnvObs], Tuple[VecEnvObs]], space: gym.spaces.Space) -> VecEnvObs: assert isinstance(obs, (list, tuple)), "expected list or tuple of observations per environment" assert len(obs) > 0, "need observations from at least one environment" if isinstance(space, gym.spaces.Dict): assert isinstance(space.spaces, OrderedDict), "Dict space must have ordered subspaces" assert isinstance(obs[0], dict), "non-dict observation for environment with Dict observation space" return OrderedDict([(k, np.stack([o[k] for o in obs])) for k in space.spaces.keys()]) elif isinstance(space, gym.spaces.Tuple): assert isinstance(obs[0], tuple), "non-tuple observation for environment with Tuple observation space" obs_len = len(space.spaces) return tuple(np.stack([o[i] for o in obs]) for i in range(obs_len)) else: return np.stack(obs)
true
true
f723c763e4fef4f96747ccb5bded293f8e7b9e66
1,057
py
Python
app/main/tools/test_report.py
BorrowHome/flasky-sandbox
70ef7aa087a0954f7ff4b4845f6599d8481ef0b1
[ "Apache-2.0" ]
1
2021-03-15T02:59:13.000Z
2021-03-15T02:59:13.000Z
app/main/tools/test_report.py
BorrowHome/flasky-sandbox
70ef7aa087a0954f7ff4b4845f6599d8481ef0b1
[ "Apache-2.0" ]
6
2021-03-19T09:49:44.000Z
2022-03-12T00:10:14.000Z
app/main/tools/test_report.py
BorrowHome/flasky-sandbox
70ef7aa087a0954f7ff4b4845f6599d8481ef0b1
[ "Apache-2.0" ]
2
2020-01-11T13:39:22.000Z
2020-07-02T03:57:43.000Z
import json from flask import request, render_template from app.utils.docx.docx import set_sand_docxtpl from config import Config from app.main import main @main.route('/test_report/', methods=['GET', 'POST']) def test_report(): if request.method == 'POST': file_location = Config.SAVE_DOCUMENT_PATH origin_data = request.get_data() str_data = str(origin_data, encoding='utf-8') dict_data = json.loads(str_data) with open(file_location + 'data.json', 'w') as f: json.dump(dict_data, f) set_sand_docxtpl(dict_data) return "数据" else: return render_template('test_report.html') @main.route('/update_report/', methods=['GET']) def update_report(): file_location = Config.SAVE_DOCUMENT_PATH location=request.args.get('location') with open(file_location + 'data.json', 'r') as f: dict_data = json.load(f) try: set_sand_docxtpl(dict_data,location) return "成功" except Exception as e: print(str(e)) return str(e)
26.425
57
0.655629
import json from flask import request, render_template from app.utils.docx.docx import set_sand_docxtpl from config import Config from app.main import main @main.route('/test_report/', methods=['GET', 'POST']) def test_report(): if request.method == 'POST': file_location = Config.SAVE_DOCUMENT_PATH origin_data = request.get_data() str_data = str(origin_data, encoding='utf-8') dict_data = json.loads(str_data) with open(file_location + 'data.json', 'w') as f: json.dump(dict_data, f) set_sand_docxtpl(dict_data) return "数据" else: return render_template('test_report.html') @main.route('/update_report/', methods=['GET']) def update_report(): file_location = Config.SAVE_DOCUMENT_PATH location=request.args.get('location') with open(file_location + 'data.json', 'r') as f: dict_data = json.load(f) try: set_sand_docxtpl(dict_data,location) return "成功" except Exception as e: print(str(e)) return str(e)
true
true
f723c8a42621a6c77b949415bc7d8ebb1efd5fee
8,382
py
Python
rstslide/plugins/Matplotlib/XKCDify.py
rartino/httk-rsttools
57c46362899105a72b3b6efc45b50bcda8e574a7
[ "MIT" ]
null
null
null
rstslide/plugins/Matplotlib/XKCDify.py
rartino/httk-rsttools
57c46362899105a72b3b6efc45b50bcda8e574a7
[ "MIT" ]
null
null
null
rstslide/plugins/Matplotlib/XKCDify.py
rartino/httk-rsttools
57c46362899105a72b3b6efc45b50bcda8e574a7
[ "MIT" ]
null
null
null
""" XKCD plot generator ------------------- Author: Jake Vanderplas This is a script that will take any matplotlib line diagram, and convert it to an XKCD-style plot. It will work for plots with line & text elements, including axes labels and titles (but not axes tick labels). The idea for this comes from work by Damon McDougall http://www.mail-archive.com/matplotlib-users@lists.sourceforge.net/msg25499.html """ import os import numpy as np import pylab as pl from scipy import interpolate, signal import matplotlib.font_manager as fm script_path = os.path.dirname(os.path.abspath(__file__)) # We need a special font for the code below. It can be downloaded this way: #import os #import urllib2 #if not os.path.exists('Humor-Sans.ttf'): # print 'Downloading the font Humor-sans.' # fhandle = urllib2.urlopen('http://antiyawn.com/uploads/Humor-Sans.ttf') # open('Humor-Sans.ttf', 'wb').write(fhandle.read()) def xkcd_line(x, y, xlim=None, ylim=None, mag=1.0, f1=30, f2=0.05, f3=15): """ Mimic a hand-drawn line from (x, y) data Parameters ---------- x, y : array_like arrays to be modified xlim, ylim : data range the assumed plot range for the modification. If not specified, they will be guessed from the data mag : float magnitude of distortions f1, f2, f3 : int, float, int filtering parameters. f1 gives the size of the window, f2 gives the high-frequency cutoff, f3 gives the size of the filter Returns ------- x, y : ndarrays The modified lines """ x = np.asarray(x) y = np.asarray(y) # get limits for rescaling if xlim is None: xlim = (x.min(), x.max()) if ylim is None: ylim = (y.min(), y.max()) if xlim[1] == xlim[0]: xlim = ylim if ylim[1] == ylim[0]: ylim = xlim # scale the data x_scaled = (x - xlim[0]) * 1. / (xlim[1] - xlim[0]) y_scaled = (y - ylim[0]) * 1. / (ylim[1] - ylim[0]) # compute the total distance along the path dx = x_scaled[1:] - x_scaled[:-1] dy = y_scaled[1:] - y_scaled[:-1] dist_tot = np.sum(np.sqrt(dx * dx + dy * dy)) # number of interpolated points is proportional to the distance Nu = int(200 * dist_tot) u = np.arange(-1, Nu + 1) * 1. / (Nu - 1) # interpolate curve at sampled points k = min(3, len(x) - 1) res = interpolate.splprep([x_scaled, y_scaled], s=0, k=k) x_int, y_int = interpolate.splev(u, res[0]) # we'll perturb perpendicular to the drawn line dx = x_int[2:] - x_int[:-2] dy = y_int[2:] - y_int[:-2] dist = np.sqrt(dx * dx + dy * dy) # create a filtered perturbation coeffs = mag * np.random.normal(0, 0.01, len(x_int) - 2) b = signal.firwin(f1, f2 * dist_tot, window=('kaiser', f3)) response = signal.lfilter(b, 1, coeffs) x_int[1:-1] += response * dy / dist y_int[1:-1] += response * dx / dist # un-scale data x_int = x_int[1:-1] * (xlim[1] - xlim[0]) + xlim[0] y_int = y_int[1:-1] * (ylim[1] - ylim[0]) + ylim[0] return x_int, y_int def XKCDify(ax, mag=1.0, f1=50, f2=0.01, f3=15, forecolor='k', bgcolor='w', xaxis_loc=None, yaxis_loc=None, xaxis_arrow='+', yaxis_arrow='+', ax_extend=0.1, expand_axes=False): """Make axis look hand-drawn This adjusts all lines, text, legends, and axes in the figure to look like xkcd plots. Other plot elements are not modified. Parameters ---------- ax : Axes instance the axes to be modified. mag : float the magnitude of the distortion f1, f2, f3 : int, float, int filtering parameters. f1 gives the size of the window, f2 gives the high-frequency cutoff, f3 gives the size of the filter xaxis_loc, yaxis_log : float The locations to draw the x and y axes. If not specified, they will be drawn from the bottom left of the plot xaxis_arrow, yaxis_arrow : str where to draw arrows on the x/y axes. Options are '+', '-', '+-', or '' ax_extend : float How far (fractionally) to extend the drawn axes beyond the original axes limits expand_axes : bool if True, then expand axes to fill the figure (useful if there is only a single axes in the figure) """ # Get axes aspect ext = ax.get_window_extent().extents aspect = (ext[3] - ext[1]) / (ext[2] - ext[0]) xlim = ax.get_xlim() ylim = ax.get_ylim() xspan = xlim[1] - xlim[0] yspan = ylim[1] - xlim[0] xax_lim = (xlim[0] - ax_extend * xspan, xlim[1] + ax_extend * xspan) yax_lim = (ylim[0] - ax_extend * yspan, ylim[1] + ax_extend * yspan) if xaxis_loc is None: xaxis_loc = ylim[0] if yaxis_loc is None: yaxis_loc = xlim[0] # Draw axes xaxis = pl.Line2D([xax_lim[0], xax_lim[1]], [xaxis_loc, xaxis_loc], linestyle='-', color=forecolor) yaxis = pl.Line2D([yaxis_loc, yaxis_loc], [yax_lim[0], yax_lim[1]], linestyle='-', color=forecolor) # Label axes3, 0.5, 'hello', fontsize=14) ax.text(xax_lim[1], xaxis_loc - 0.05 * yspan, ax.get_xlabel(), fontsize=14, ha='right', va='top', rotation=5) ax.text(yaxis_loc - 0.05 * xspan, yax_lim[1], ax.get_ylabel(), fontsize=14, ha='right', va='top', rotation=85) ax.set_xlabel('') ax.set_ylabel('') # Add title ax.text(0.5 * (xax_lim[1] + xax_lim[0]), yax_lim[1], ax.get_title(), ha='center', va='bottom', fontsize=16) ax.set_title('') Nlines = len(ax.lines) lines = [xaxis, yaxis] + [ax.lines.pop(0) for i in range(Nlines)] for line in lines: x, y = line.get_data() x_int, y_int = xkcd_line(x, y, xlim, ylim, mag, f1, f2, f3) # create foreground and background line lw = line.get_linewidth() line.set_linewidth(2 * lw) line.set_data(x_int, y_int) # # don't add background line for axes # if (line is not xaxis) and (line is not yaxis): # line_bg = pl.Line2D(x_int, y_int, color=bgcolor, # linewidth=8 * lw) # ax.add_line(line_bg) ax.add_line(line) # Draw arrow-heads at the end of axes lines arr1 = 0.03 * np.array([-1, 0, -1]) arr2 = 0.02 * np.array([-1, 0, 1]) arr1[::2] += np.random.normal(0, 0.005, 2) arr2[::2] += np.random.normal(0, 0.005, 2) x, y = xaxis.get_data() if '+' in str(xaxis_arrow): ax.plot(x[-1] + arr1 * xspan * aspect, y[-1] + arr2 * yspan, color=forecolor, lw=2) if '-' in str(xaxis_arrow): ax.plot(x[0] - arr1 * xspan * aspect, y[0] - arr2 * yspan, color=forecolor, lw=2) x, y = yaxis.get_data() if '+' in str(yaxis_arrow): ax.plot(x[-1] + arr2 * xspan * aspect, y[-1] + arr1 * yspan, color=forecolor, lw=2) if '-' in str(yaxis_arrow): ax.plot(x[0] - arr2 * xspan * aspect, y[0] - arr1 * yspan, color=forecolor, lw=2) # Change all the fonts to humor-sans. prop = fm.FontProperties(fname=os.path.join(script_path, 'fonts', 'Humor-Sans.ttf'), size=16) for text in ax.texts: text.set_fontproperties(prop) # modify legend leg = ax.get_legend() if leg is not None: leg.set_frame_on(False) for child in leg.get_children(): if isinstance(child, pl.Line2D): x, y = child.get_data() child.set_data(xkcd_line(x, y, mag=1., f1=100, f2=0.001)) child.set_linewidth(2 * child.get_linewidth()) if isinstance(child, pl.Text): child.set_fontproperties(prop) # Set the axis limits ax.set_xlim(xax_lim[0] - 0.1 * xspan, xax_lim[1] + 0.1 * xspan) ax.set_ylim(yax_lim[0] - 0.1 * yspan, yax_lim[1] + 0.1 * yspan) # adjust the axes ax.set_xticks([]) ax.set_yticks([]) if expand_axes: ax.figure.set_facecolor(bgcolor) ax.set_axis_off() ax.set_position([0, 0, 1, 1]) return ax
31.393258
97
0.570866
import os import numpy as np import pylab as pl from scipy import interpolate, signal import matplotlib.font_manager as fm script_path = os.path.dirname(os.path.abspath(__file__)) def xkcd_line(x, y, xlim=None, ylim=None, mag=1.0, f1=30, f2=0.05, f3=15): x = np.asarray(x) y = np.asarray(y) if xlim is None: xlim = (x.min(), x.max()) if ylim is None: ylim = (y.min(), y.max()) if xlim[1] == xlim[0]: xlim = ylim if ylim[1] == ylim[0]: ylim = xlim x_scaled = (x - xlim[0]) * 1. / (xlim[1] - xlim[0]) y_scaled = (y - ylim[0]) * 1. / (ylim[1] - ylim[0]) dx = x_scaled[1:] - x_scaled[:-1] dy = y_scaled[1:] - y_scaled[:-1] dist_tot = np.sum(np.sqrt(dx * dx + dy * dy)) Nu = int(200 * dist_tot) u = np.arange(-1, Nu + 1) * 1. / (Nu - 1) k = min(3, len(x) - 1) res = interpolate.splprep([x_scaled, y_scaled], s=0, k=k) x_int, y_int = interpolate.splev(u, res[0]) dx = x_int[2:] - x_int[:-2] dy = y_int[2:] - y_int[:-2] dist = np.sqrt(dx * dx + dy * dy) # create a filtered perturbation coeffs = mag * np.random.normal(0, 0.01, len(x_int) - 2) b = signal.firwin(f1, f2 * dist_tot, window=('kaiser', f3)) response = signal.lfilter(b, 1, coeffs) x_int[1:-1] += response * dy / dist y_int[1:-1] += response * dx / dist # un-scale data x_int = x_int[1:-1] * (xlim[1] - xlim[0]) + xlim[0] y_int = y_int[1:-1] * (ylim[1] - ylim[0]) + ylim[0] return x_int, y_int def XKCDify(ax, mag=1.0, f1=50, f2=0.01, f3=15, forecolor='k', bgcolor='w', xaxis_loc=None, yaxis_loc=None, xaxis_arrow='+', yaxis_arrow='+', ax_extend=0.1, expand_axes=False): # Get axes aspect ext = ax.get_window_extent().extents aspect = (ext[3] - ext[1]) / (ext[2] - ext[0]) xlim = ax.get_xlim() ylim = ax.get_ylim() xspan = xlim[1] - xlim[0] yspan = ylim[1] - xlim[0] xax_lim = (xlim[0] - ax_extend * xspan, xlim[1] + ax_extend * xspan) yax_lim = (ylim[0] - ax_extend * yspan, ylim[1] + ax_extend * yspan) if xaxis_loc is None: xaxis_loc = ylim[0] if yaxis_loc is None: yaxis_loc = xlim[0] # Draw axes xaxis = pl.Line2D([xax_lim[0], xax_lim[1]], [xaxis_loc, xaxis_loc], linestyle='-', color=forecolor) yaxis = pl.Line2D([yaxis_loc, yaxis_loc], [yax_lim[0], yax_lim[1]], linestyle='-', color=forecolor) # Label axes3, 0.5, 'hello', fontsize=14) ax.text(xax_lim[1], xaxis_loc - 0.05 * yspan, ax.get_xlabel(), fontsize=14, ha='right', va='top', rotation=5) ax.text(yaxis_loc - 0.05 * xspan, yax_lim[1], ax.get_ylabel(), fontsize=14, ha='right', va='top', rotation=85) ax.set_xlabel('') ax.set_ylabel('') # Add title ax.text(0.5 * (xax_lim[1] + xax_lim[0]), yax_lim[1], ax.get_title(), ha='center', va='bottom', fontsize=16) ax.set_title('') Nlines = len(ax.lines) lines = [xaxis, yaxis] + [ax.lines.pop(0) for i in range(Nlines)] for line in lines: x, y = line.get_data() x_int, y_int = xkcd_line(x, y, xlim, ylim, mag, f1, f2, f3) # create foreground and background line lw = line.get_linewidth() line.set_linewidth(2 * lw) line.set_data(x_int, y_int) # # don't add background line for axes ax.add_line(line) arr1 = 0.03 * np.array([-1, 0, -1]) arr2 = 0.02 * np.array([-1, 0, 1]) arr1[::2] += np.random.normal(0, 0.005, 2) arr2[::2] += np.random.normal(0, 0.005, 2) x, y = xaxis.get_data() if '+' in str(xaxis_arrow): ax.plot(x[-1] + arr1 * xspan * aspect, y[-1] + arr2 * yspan, color=forecolor, lw=2) if '-' in str(xaxis_arrow): ax.plot(x[0] - arr1 * xspan * aspect, y[0] - arr2 * yspan, color=forecolor, lw=2) x, y = yaxis.get_data() if '+' in str(yaxis_arrow): ax.plot(x[-1] + arr2 * xspan * aspect, y[-1] + arr1 * yspan, color=forecolor, lw=2) if '-' in str(yaxis_arrow): ax.plot(x[0] - arr2 * xspan * aspect, y[0] - arr1 * yspan, color=forecolor, lw=2) prop = fm.FontProperties(fname=os.path.join(script_path, 'fonts', 'Humor-Sans.ttf'), size=16) for text in ax.texts: text.set_fontproperties(prop) leg = ax.get_legend() if leg is not None: leg.set_frame_on(False) for child in leg.get_children(): if isinstance(child, pl.Line2D): x, y = child.get_data() child.set_data(xkcd_line(x, y, mag=1., f1=100, f2=0.001)) child.set_linewidth(2 * child.get_linewidth()) if isinstance(child, pl.Text): child.set_fontproperties(prop) ax.set_xlim(xax_lim[0] - 0.1 * xspan, xax_lim[1] + 0.1 * xspan) ax.set_ylim(yax_lim[0] - 0.1 * yspan, yax_lim[1] + 0.1 * yspan) ax.set_xticks([]) ax.set_yticks([]) if expand_axes: ax.figure.set_facecolor(bgcolor) ax.set_axis_off() ax.set_position([0, 0, 1, 1]) return ax
true
true
f723c8c94f31ee4136915426bd76d4aad731bdda
5,323
py
Python
utils/utils_fit.py
bubbliiiing/yolox-tf2
0407c77858d436a6b370e591eea7963cc807f3b4
[ "Apache-2.0" ]
49
2021-11-01T06:02:21.000Z
2022-03-29T07:08:22.000Z
utils/utils_fit.py
bubbliiiing/yolox-tf2
0407c77858d436a6b370e591eea7963cc807f3b4
[ "Apache-2.0" ]
6
2021-11-17T08:35:09.000Z
2022-02-15T12:43:14.000Z
utils/utils_fit.py
bubbliiiing/yolox-tf2
0407c77858d436a6b370e591eea7963cc807f3b4
[ "Apache-2.0" ]
20
2021-11-19T12:03:21.000Z
2022-03-16T01:45:25.000Z
import os import tensorflow as tf from nets.yolo import get_yolo_loss from tqdm import tqdm #------------------------------# # 防止bug #------------------------------# def get_train_step_fn(strategy): @tf.function def train_step(imgs, targets, net, yolo_loss, optimizer): with tf.GradientTape() as tape: #------------------------------# # 计算loss #------------------------------# P5_output, P4_output, P3_output = net(imgs, training=True) args = [P5_output, P4_output, P3_output] + [targets] loss_value = yolo_loss(args) #------------------------------# # 添加上l2正则化参数 #------------------------------# loss_value = tf.reduce_sum(net.losses) + loss_value grads = tape.gradient(loss_value, net.trainable_variables) optimizer.apply_gradients(zip(grads, net.trainable_variables)) return loss_value if strategy == None: return train_step else: #----------------------# # 多gpu训练 #----------------------# @tf.function def distributed_train_step(imgs, targets, net, yolo_loss, optimizer): per_replica_losses = strategy.run(train_step, args=(imgs, targets, net, yolo_loss, optimizer,)) return strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_losses, axis=None) return distributed_train_step #----------------------# # 防止bug #----------------------# def get_val_step_fn(strategy): @tf.function def val_step(imgs, targets, net, yolo_loss, optimizer): #------------------------------# # 计算loss #------------------------------# P5_output, P4_output, P3_output = net(imgs, training=False) args = [P5_output, P4_output, P3_output] + [targets] loss_value = yolo_loss(args) #------------------------------# # 添加上l2正则化参数 #------------------------------# loss_value = tf.reduce_sum(net.losses) + loss_value return loss_value if strategy == None: return val_step else: #----------------------# # 多gpu验证 #----------------------# @tf.function def distributed_val_step(imgs, targets, net, yolo_loss, optimizer): per_replica_losses = strategy.run(val_step, args=(imgs, targets, net, yolo_loss, optimizer,)) return strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_losses, axis=None) return distributed_val_step def fit_one_epoch(net, yolo_loss, loss_history, eval_callback, optimizer, epoch, epoch_step, epoch_step_val, gen, gen_val, Epoch, input_shape, num_classes, save_period, save_dir, strategy): train_step = get_train_step_fn(strategy) val_step = get_val_step_fn(strategy) loss = 0 val_loss = 0 print('Start Train') with tqdm(total=epoch_step,desc=f'Epoch {epoch + 1}/{Epoch}',postfix=dict,mininterval=0.3) as pbar: for iteration, batch in enumerate(gen): if iteration >= epoch_step: break images, targets = batch[0], batch[1] loss_value = train_step(images, targets, net, yolo_loss, optimizer) loss = loss + loss_value pbar.set_postfix(**{'total_loss': float(loss) / (iteration + 1), 'lr' : optimizer.lr.numpy()}) pbar.update(1) print('Finish Train') print('Start Validation') with tqdm(total=epoch_step_val, desc=f'Epoch {epoch + 1}/{Epoch}',postfix=dict,mininterval=0.3) as pbar: for iteration, batch in enumerate(gen_val): if iteration >= epoch_step_val: break images, targets = batch[0], batch[1] loss_value = val_step(images, targets, net, yolo_loss, optimizer) val_loss = val_loss + loss_value pbar.set_postfix(**{'total_loss': float(val_loss) / (iteration + 1)}) pbar.update(1) print('Finish Validation') logs = {'loss': loss.numpy() / epoch_step, 'val_loss': val_loss.numpy() / epoch_step_val} loss_history.on_epoch_end([], logs) eval_callback.on_epoch_end(epoch, logs) print('Epoch:'+ str(epoch + 1) + '/' + str(Epoch)) print('Total Loss: %.3f || Val Loss: %.3f ' % (loss / epoch_step, val_loss / epoch_step_val)) #-----------------------------------------------# # 保存权值 #-----------------------------------------------# if (epoch + 1) % save_period == 0 or epoch + 1 == Epoch: net.save_weights(os.path.join(save_dir, "ep%03d-loss%.3f-val_loss%.3f.h5" % (epoch + 1, loss / epoch_step, val_loss / epoch_step_val))) if len(loss_history.val_loss) <= 1 or (val_loss / epoch_step_val) <= min(loss_history.val_loss): print('Save best model to best_epoch_weights.pth') net.save_weights(os.path.join(save_dir, "best_epoch_weights.h5")) net.save_weights(os.path.join(save_dir, "last_epoch_weights.h5"))
43.276423
144
0.520195
import os import tensorflow as tf from nets.yolo import get_yolo_loss from tqdm import tqdm ef get_train_step_fn(strategy): @tf.function def train_step(imgs, targets, net, yolo_loss, optimizer): with tf.GradientTape() as tape: P5_output, P4_output, P3_output = net(imgs, training=True) args = [P5_output, P4_output, P3_output] + [targets] loss_value = yolo_loss(args) loss_value = tf.reduce_sum(net.losses) + loss_value grads = tape.gradient(loss_value, net.trainable_variables) optimizer.apply_gradients(zip(grads, net.trainable_variables)) return loss_value if strategy == None: return train_step else: @tf.function def distributed_train_step(imgs, targets, net, yolo_loss, optimizer): per_replica_losses = strategy.run(train_step, args=(imgs, targets, net, yolo_loss, optimizer,)) return strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_losses, axis=None) return distributed_train_step ef get_val_step_fn(strategy): @tf.function def val_step(imgs, targets, net, yolo_loss, optimizer): P5_output, P4_output, P3_output = net(imgs, training=False) args = [P5_output, P4_output, P3_output] + [targets] loss_value = yolo_loss(args) loss_value = tf.reduce_sum(net.losses) + loss_value return loss_value if strategy == None: return val_step else: @tf.function def distributed_val_step(imgs, targets, net, yolo_loss, optimizer): per_replica_losses = strategy.run(val_step, args=(imgs, targets, net, yolo_loss, optimizer,)) return strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_losses, axis=None) return distributed_val_step def fit_one_epoch(net, yolo_loss, loss_history, eval_callback, optimizer, epoch, epoch_step, epoch_step_val, gen, gen_val, Epoch, input_shape, num_classes, save_period, save_dir, strategy): train_step = get_train_step_fn(strategy) val_step = get_val_step_fn(strategy) loss = 0 val_loss = 0 print('Start Train') with tqdm(total=epoch_step,desc=f'Epoch {epoch + 1}/{Epoch}',postfix=dict,mininterval=0.3) as pbar: for iteration, batch in enumerate(gen): if iteration >= epoch_step: break images, targets = batch[0], batch[1] loss_value = train_step(images, targets, net, yolo_loss, optimizer) loss = loss + loss_value pbar.set_postfix(**{'total_loss': float(loss) / (iteration + 1), 'lr' : optimizer.lr.numpy()}) pbar.update(1) print('Finish Train') print('Start Validation') with tqdm(total=epoch_step_val, desc=f'Epoch {epoch + 1}/{Epoch}',postfix=dict,mininterval=0.3) as pbar: for iteration, batch in enumerate(gen_val): if iteration >= epoch_step_val: break images, targets = batch[0], batch[1] loss_value = val_step(images, targets, net, yolo_loss, optimizer) val_loss = val_loss + loss_value pbar.set_postfix(**{'total_loss': float(val_loss) / (iteration + 1)}) pbar.update(1) print('Finish Validation') logs = {'loss': loss.numpy() / epoch_step, 'val_loss': val_loss.numpy() / epoch_step_val} loss_history.on_epoch_end([], logs) eval_callback.on_epoch_end(epoch, logs) print('Epoch:'+ str(epoch + 1) + '/' + str(Epoch)) print('Total Loss: %.3f || Val Loss: %.3f ' % (loss / epoch_step, val_loss / epoch_step_val)) if (epoch + 1) % save_period == 0 or epoch + 1 == Epoch: net.save_weights(os.path.join(save_dir, "ep%03d-loss%.3f-val_loss%.3f.h5" % (epoch + 1, loss / epoch_step, val_loss / epoch_step_val))) if len(loss_history.val_loss) <= 1 or (val_loss / epoch_step_val) <= min(loss_history.val_loss): print('Save best model to best_epoch_weights.pth') net.save_weights(os.path.join(save_dir, "best_epoch_weights.h5")) net.save_weights(os.path.join(save_dir, "last_epoch_weights.h5"))
true
true
f723c9484375a916d20b7a139369a7ec0dc8afb7
3,140
py
Python
warrior/WarriorCore/Classes/war_print_class.py
YutakaMizugaki/warriorframework
685761cf044182ec88ce86a942d4be1e150a1256
[ "Apache-2.0" ]
24
2017-06-06T15:48:08.000Z
2021-03-17T07:52:52.000Z
warrior/WarriorCore/Classes/war_print_class.py
YutakaMizugaki/warriorframework
685761cf044182ec88ce86a942d4be1e150a1256
[ "Apache-2.0" ]
272
2017-05-19T20:39:12.000Z
2021-12-13T19:34:51.000Z
warrior/WarriorCore/Classes/war_print_class.py
pavithra-gowda/warrior
19b153310552b986b86b5470fcfea9547a74c3a9
[ "Apache-2.0" ]
37
2017-05-17T21:24:37.000Z
2021-07-24T18:09:22.000Z
''' Copyright 2017, Fujitsu Network Communications, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ''' import sys import re """ This class will trap stdout and redirects the message to logfile and stdout It takes console_logfile and write_to_stdout ( boolean flag) as arguments. !!! Important!!! DO NOT import any modules from warrior/Framework package that uses warrior/Framework/Utils/print_Utils.py at module level into this module as it will lead to cyclic imports. """ def print_main(message, print_type, color_message=None, *args, **kwargs): """The main print function will be called by other print functions """ if color_message is not None: print_string = print_type + " " + str(color_message) elif color_message is None: print_string = print_type + " " + str(message) if len(args) > 0: print_string = (print_type + " " + str(message) + str(args)) # set logging argument default to True, to write the message in the log file if isinstance(sys.stdout, RedirectPrint): sys.stdout.write((print_string + '\n'), logging=kwargs.get('logging', True)) else: sys.stdout.write(print_string + '\n') sys.stdout.flush() from Framework.Utils.testcase_Utils import TCOBJ if TCOBJ.pnote is False: TCOBJ.p_note_level(message, print_type) return print_string class RedirectPrint(object): """Class that has methods to redirect prints from stdout to correct console log files """ def __init__(self, console_logfile): """Constructor""" self.get_file(console_logfile) # self.write_to_stdout = write_to_stdout self.stdout = sys.stdout def get_file(self, console_logfile): """If the console logfile is not None redirect sys.stdout to console logfile """ self.file = console_logfile if self.file is not None: sys.stdout = self def write(self, data, logging=True): """ - Writes data to the sys.stdout - Writes data to log file only if the logging is True - Removes the ansii escape chars before writing to file """ self.stdout.write(data) ansi_escape = re.compile(r'\x1b[^m]*m') data = ansi_escape.sub('', data) # write to log file if logging is set to True if logging is True: self.file.write(data) self.file.flush() def isatty(self): """Check if sys.stdout is a tty """ # print self.stdout.isatty() return self.stdout.isatty() def flush(self): """flush logfile """ return self.stdout.flush()
35.280899
80
0.670064
import sys import re def print_main(message, print_type, color_message=None, *args, **kwargs): if color_message is not None: print_string = print_type + " " + str(color_message) elif color_message is None: print_string = print_type + " " + str(message) if len(args) > 0: print_string = (print_type + " " + str(message) + str(args)) if isinstance(sys.stdout, RedirectPrint): sys.stdout.write((print_string + '\n'), logging=kwargs.get('logging', True)) else: sys.stdout.write(print_string + '\n') sys.stdout.flush() from Framework.Utils.testcase_Utils import TCOBJ if TCOBJ.pnote is False: TCOBJ.p_note_level(message, print_type) return print_string class RedirectPrint(object): def __init__(self, console_logfile): self.get_file(console_logfile) self.stdout = sys.stdout def get_file(self, console_logfile): self.file = console_logfile if self.file is not None: sys.stdout = self def write(self, data, logging=True): self.stdout.write(data) ansi_escape = re.compile(r'\x1b[^m]*m') data = ansi_escape.sub('', data) if logging is True: self.file.write(data) self.file.flush() def isatty(self): return self.stdout.isatty() def flush(self): return self.stdout.flush()
true
true
f723c959dc405638e9be66e367589488a0bb7950
74,965
py
Python
doppyo/sugar.py
aaronspring/doppyo
e29e21fbb997f024f39d2e5e67decfc235b0dcca
[ "MIT" ]
null
null
null
doppyo/sugar.py
aaronspring/doppyo
e29e21fbb997f024f39d2e5e67decfc235b0dcca
[ "MIT" ]
null
null
null
doppyo/sugar.py
aaronspring/doppyo
e29e21fbb997f024f39d2e5e67decfc235b0dcca
[ "MIT" ]
null
null
null
""" Collection of old doppyo functions and useful tidbits for internal dcfp use Authors: Dougie Squire and Thomas Moore Date created: 01/10/2018 Python Version: 3.6 """ # =================================================================================================== # Packages # =================================================================================================== import numpy as np import pandas as pd import xarray as xr import cartopy from collections import Sequence from itertools import chain, count import matplotlib import matplotlib.pyplot as plt import matplotlib.ticker as mticker from cartopy.util import add_cyclic_point from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER # Load doppyo packages ----- from doppyo import utils # =================================================================================================== def rank_gufunc(x): ''' Returns ranked data along specified dimension ''' import bottleneck ranks = bottleneck.nanrankdata(x,axis=-1) ranks = ranks[...,0] return ranks def compute_rank(da_1, da_2, over_dim): ''' Feeds forecast and observation data to ufunc that ranks data along specified dimension''' # Add 'ensemble' coord to obs if one does not exist ----- if over_dim not in da_2.coords: da_2_pass = da_2.copy() da_2_pass.coords[over_dim] = -1 da_2_pass = da_2_pass.expand_dims(over_dim) else: da_2_pass = da_2.copy() # Only keep and combine instances that appear in both dataarrays (excluding the ensemble dim) ----- aligned = xr.align(da_2_pass, da_1, join='inner', exclude=over_dim) combined = xr.concat(aligned, dim=over_dim) return xr.apply_ufunc(rank_gufunc, combined, input_core_dims=[[over_dim]], dask='allowed', output_dtypes=[int]).rename('rank') # =================================================================================================== def categorize(da, bin_edges): """ Returns the indices of the bins to which each value in input array belongs Output indices are such that bin_edges[i-1] <= x < bin_edges[i] """ return xr.apply_ufunc(np.digitize, da, bin_edges, input_core_dims=[[],[]], dask='allowed', output_dtypes=[int]).rename('categorized') # =================================================================================================== def unstack_and_count(da, dims): """ Unstacks provided xarray object and returns the total number of elements along dims """ try: unstacked = da.unstack(da.dims[0]) except ValueError: unstacked = da if dims is None: return ((0 * unstacked) + 1) else: return ((0 * unstacked) + 1).sum(dim=dims, skipna=True) def compute_histogram(da, bin_edges, over_dims): """ Returns the histogram of data over the specified dimensions """ # To use groupby_bins, da must have a name ----- da = da.rename('data') hist = da.groupby_bins(da, bins=bin_edges, squeeze=False) \ .apply(unstack_and_count, dims=over_dims) \ .fillna(0) \ .rename({'data_bins' : 'bins'}) hist['bins'] = (bin_edges[0:-1]+bin_edges[1:])/2 # Add nans where data did not fall in any bin ----- return hist.astype(int).where(hist.sum('bins') != 0) # =================================================================================================== def calc_gradient(da, dim, x=None): """ Returns the gradient computed using second order accurate central differences in the interior points and either first order accurate one-sided (forward or backwards) differences at the boundaries See https://docs.scipy.org/doc/numpy-1.14.0/reference/generated/numpy.gradient.html """ # Replace dimension values if specified ----- da_n = da.copy() if x is None: x = da_n[dim] centre_chunk = range(len(x[dim])-2) f_hd = da_n.shift(**{dim:-2}) f = da_n.shift(**{dim:-1}) f_hs = da_n hs = x.shift(**{dim:-1}) - x hd = x.shift(**{dim:-2}) - x.shift(**{dim:-1}) c = (hs ** 2 * f_hd + (hd ** 2 - hs ** 2) * f - hd ** 2 * f_hs) / \ (hs * hd * (hd + hs)).isel(**{dim : centre_chunk}) c[dim] = x[dim][1:-1] l = (da_n.shift(**{dim:-1}) - da_n).isel(**{dim : 0}) / \ (x.shift(**{dim:-1}) - x).isel(**{dim : 0}) r = (-da_n.shift(**{dim:1}) + da_n).isel(**{dim : -1}) / \ (-x.shift(**{dim:1}) + x).isel(**{dim : -1}) grad = xr.concat([l, c, r], dim=dim) grad[dim] = da[dim] return grad # =================================================================================================== def bias_correct_ms(da_biased, da_target, da_target_clim=None, init_date_name='init_date', lead_time_name='lead_time'): """ Adjusts, per month and lead time, the mean and standard deviation of da_biased to match that of da_target. Author: Dougie Squire Date: 01/09/2018 Parameters ---------- da_biased : xarray DataArray Array containing values to be corrected. The time information of this array is anticipated in a lead_time/inital_date format da_target : xarray DataArray Array containing values to use for the correction. da_target_clim : xarray DataArray, optional Array containing a climatology of da_target. If da_target_clim is provided, this function returns both the corrected full field and the anomalies. Otherwise, returns only the anomalies init_date_name : str, optional Name of initial date dimension lead_time_name : str, optional Name of lead time dimension Returns ------- corrected : xarray DataArray Bias corrected array Examples -------- >>> biased = xr.DataArray(np.random.normal(size=(48,6)), ... coords=[('init_date', pd.date_range(start='1/1/2018', periods=48, freq='M')), ... ('lead_time', np.arange(6))]) >>> biased['lead_time'].attrs['units'] = 'M' >>> target = xr.DataArray(np.random.normal(size=(48)), ... coords=[('time', pd.date_range(start='1/1/2000', periods=48, freq='M'))]) >>> doppyo.utils.bias_correct_ms(biased, target) <xarray.DataArray (init_date: 48, lead_time: 6)> array([[ 9.336394e-02, 1.133997e-01, -5.851293e-01, -4.908594e-02, 7.952765e-01, 5.325052e-01], [-1.131123e+00, 1.603380e-01, -1.626906e+00, -1.811439e+00, -1.653359e-01, -1.871170e-01], [ 6.515435e-01, -1.064662e+00, 2.249610e+00, 6.881682e-01, -1.831233e-01, -1.159470e+00], ..., [-2.096226e+00, 3.143062e-04, 3.603787e-01, -1.515535e+00, 5.421578e-02, -6.446119e-01], [-8.186274e-01, -9.858171e-01, 1.933307e+00, 5.227265e-02, 5.443201e-01, -7.059492e-01], [ 2.253396e-02, 2.238470e+00, 1.138728e-01, -3.617103e-01, 1.678223e+00, -2.413158e+00]]) Coordinates: * lead_time (lead_time) int64 0 1 2 3 4 5 * init_date (init_date) datetime64[ns] 2018-01-31 2018-02-28 ... 2021-12-31 Notes ----------- Many years of initial dates (in da_biased) and times (in da_target) must exist for the mean and standard deviation to be computed reliably """ def _groupby_lead_and_mean(da, over_dims, init_date_name, lead_time_name): """ Groups provided array by lead time and computes mean """ return da.unstack('stacked_' + init_date_name + '_' + lead_time_name).groupby(lead_time_name).mean(over_dims, skipna=True) def _groupby_lead_and_std(da, over_dims, init_date_name, lead_time_name): """ Groups provided array by lead time and computes standard deviation """ return da.unstack('stacked_' + init_date_name + '_' + lead_time_name).groupby(lead_time_name).std(over_dims, skipna=True) def _unstack_and_shift_per_month(da, shift, init_date_name, lead_time_name): """ Unstacks and adjusts input array by a constant shift as a function of month """ da_us = da.unstack('stacked_' + init_date_name + '_' + lead_time_name) the_month = np.ndarray.flatten(da_us.month.values) the_month = int(np.unique(the_month[~np.isnan(the_month)])) return da_us - shift.sel(month=the_month) def _unstack_and_scale_per_month(da, scale, init_date_name, lead_time_name): """ Unstacks and scales input array by a constant value as a function of month """ da_us = da.unstack('stacked_' + init_date_name + '_' + lead_time_name) the_month = np.ndarray.flatten(da_us.month.values) the_month = int(np.unique(the_month[~np.isnan(the_month)])) return da_us * scale.sel(month=the_month) def _scale_per_month(da, scale): """ Scales input array by a constant value as a function of month """ return da.groupby('time.month') * scale _anomalize = lambda data, clim: datetime_to_leadtime( anomalize( leadtime_to_datetime(data),clim)) _rescale = lambda da, scale : datetime_to_leadtime( _scale_per_month( leadtime_to_datetime(da), scale)) da_biased = da_biased.copy() da_target = da_target.copy() month = (da_biased[init_date_name].dt.month + da_biased[lead_time_name]) % 12 month = month.where(month != 0, 12) # Correct the mean ----- da_biased.coords['month'] = month try: da_biased_mean = da_biased.groupby('month').apply(_groupby_lead_and_mean, over_dims=[init_date_name,'ensemble'], init_date_name=init_date_name, lead_time_name=lead_time_name) except ValueError: da_biased_mean = da_biased.groupby('month').apply(_groupby_lead_and_mean, over_dims=init_date_name, init_date_name=init_date_name, lead_time_name=lead_time_name) if da_target_clim is not None: da_target_mean = da_target.groupby('time.month').mean('time') da_meancorr = da_biased.groupby('month').apply(_unstack_and_shift_per_month, shift=(da_biased_mean - da_target_mean), init_date_name=init_date_name, lead_time_name=lead_time_name) \ .mean('month', skipna=True) da_meancorr[lead_time_name] = da_biased[lead_time_name] da_meancorr.coords['month'] = month # Compute the corrected anomalies ----- da_anom_meancorr = da_meancorr.groupby(init_date_name).apply(_anomalize, clim=da_target_clim) da_anom_meancorr.coords['month'] = month else: da_anom_meancorr = da_biased.groupby('month').apply(_unstack_and_shift_per_month, shift=(da_biased_mean), init_date_name=init_date_name, lead_time_name=lead_time_name) \ .mean('month', skipna=True) da_anom_meancorr[lead_time_name] = da_anom_meancorr[lead_time_name] da_anom_meancorr.coords['month'] = month # Correct the standard deviation ----- try: da_biased_std_tmp = da_anom_meancorr.groupby('month').apply(_groupby_lead_and_std, over_dims=[init_date_name,'ensemble'], init_date_name=init_date_name, lead_time_name=lead_time_name) except ValueError: da_biased_std_tmp = da_anom_meancorr.groupby('month').apply(_groupby_lead_and_std, over_dims=init_date_name, init_date_name=init_date_name, lead_time_name=lead_time_name) try: da_target_std = da_target.sel(lat=da_biased.lat, lon=da_biased.lon).groupby('time.month').std('time') except: da_target_std = da_target.groupby('time.month').std('time') da_anom_stdcorr_tmp = da_anom_meancorr.groupby('month').apply(_unstack_and_scale_per_month, scale=(da_target_std / da_biased_std_tmp), init_date_name=init_date_name, lead_time_name=lead_time_name) \ .mean('month', skipna=True) da_anom_stdcorr_tmp[lead_time_name] = da_biased[lead_time_name] da_anom_stdcorr_tmp.coords['month'] = month # This will "squeeze" each pdf at each lead time appropriately. However, the total variance across all leads for # a given month will now be incorrect. Thus, we now rescale as a function of month only try: da_biased_std = concat_times(da_anom_stdcorr_tmp).groupby('time.month').std(['time','ensemble']) except ValueError: da_biased_std = concat_times(da_anom_stdcorr_tmp).groupby('time.month').std('time') da_anom_stdcorr = da_anom_stdcorr_tmp.groupby(init_date_name).apply(_rescale, scale=(da_target_std / da_biased_std)) if da_target_clim is not None: da_stdcorr = da_anom_stdcorr.groupby(init_date_name).apply(_anomalize, clim=-da_target_clim) return da_stdcorr.drop('month'), da_anom_stdcorr.drop('month') else: return da_anom_stdcorr.drop('month') # =================================================================================================== def bias_correct_m(da_biased, da_target, da_target_clim=None, init_date_name='init_date', lead_time_name='lead_time'): """ Adjusts, per month and lead time, the mean of da_biased to match that of da_target Author: Dougie Squire Date: 01/09/2018 Parameters ---------- da_biased : xarray DataArray Array containing values to be corrected. The time information of this array is anticipated in a lead_time/inital_date format da_target : xarray DataArray Array containing values to use for the correction. da_target_clim : xarray DataArray, optional Array containing a climatology of da_target. If da_target_clim is provided, this function returns both the corrected full field and the anomalies. Otherwise, returns only the anomalies init_date_name : str, optional Name of initial date dimension lead_time_name : str, optional Name of lead time dimension Returns ------- corrected : xarray DataArray Bias corrected array Examples -------- >>> biased = xr.DataArray(np.random.normal(size=(48,6)), ... coords=[('init_date', pd.date_range(start='1/1/2018', periods=48, freq='M')), ... ('lead_time', np.arange(6))]) >>> biased['lead_time'].attrs['units'] = 'M' >>> target = xr.DataArray(np.random.normal(size=(48)), ... coords=[('time', pd.date_range(start='1/1/2000', periods=48, freq='M'))]) >>> doppyo.utils.bias_correct_m(biased, target) <xarray.DataArray (init_date: 48, lead_time: 6)> array([[ 0.541226, 0.693622, -0.367322, 0.820282, 0.111487, 0.078355], [-0.299829, 0.164297, -0.976883, 0.463365, -0.26428 , -0.536119], [ 0.078832, -0.260615, -0.235059, -0.349185, 0.567183, -1.543395], ..., [ 0.335494, -1.121158, 1.313004, 0.604279, 0.135053, 0.031851], [ 0.33103 , 0.876521, -0.980873, 0.640328, 1.053691, 0.166768], [ 1.207329, 0.021916, 0.210883, -0.189922, 0.075786, 0.047616]]) Coordinates: * init_date (init_date) datetime64[ns] 2018-01-31 2018-02-28 ... 2021-12-31 * lead_time (lead_time) int64 0 1 2 3 4 5 Notes ----------- Many years of initial dates (in da_biased) and times (in da_target) must exist for the mean to be computed reliably """ def _groupby_lead_and_mean(da, over_dims, init_date_name, lead_time_name): """ Groups provided array by lead time and computes mean """ return da.unstack('stacked_' + init_date_name + '_' + lead_time_name).groupby(lead_time_name).mean(over_dims, skipna=True) def _unstack_and_shift_per_month(da, shift, init_date_name, lead_time_name): """ Unstacks and adjusts input array by a constant shift as a function of month """ da_us = da.unstack('stacked_' + init_date_name + '_' + lead_time_name) the_month = np.ndarray.flatten(da_us.month.values) the_month = int(np.unique(the_month[~np.isnan(the_month)])) return da_us - shift.sel(month=the_month) _anomalize = lambda data, clim: datetime_to_leadtime( anomalize( leadtime_to_datetime(data),clim)) da_biased = da_biased.copy() da_target = da_target.copy() month = (da_biased[init_date_name].dt.month + da_biased[lead_time_name]) % 12 month = month.where(month != 0, 12) # Correct the mean ----- da_biased.coords['month'] = month try: da_biased_mean = da_biased.groupby('month').apply(_groupby_lead_and_mean, over_dims=[init_date_name,'ensemble'], init_date_name=init_date_name, lead_time_name=lead_time_name) except ValueError: da_biased_mean = da_biased.groupby('month').apply(_groupby_lead_and_mean, over_dims=init_date_name, init_date_name=init_date_name, lead_time_name=lead_time_name) if da_target_clim is not None: da_target_mean = da_target.groupby('time.month').mean('time') da_meancorr = da_biased.groupby('month').apply(_unstack_and_shift_per_month, shift=(da_biased_mean - da_target_mean), init_date_name=init_date_name, lead_time_name=lead_time_name) \ .mean('month', skipna=True) da_meancorr[lead_time_name] = da_biased[lead_time_name] da_meancorr.coords['month'] = month # Compute the corrected anomalies ----- da_anom_meancorr = da_meancorr.groupby(init_date_name).apply(_anomalize, clim=da_target_clim) da_anom_meancorr.coords['month'] = month else: da_anom_meancorr = da_biased.groupby('month').apply(_unstack_and_shift_per_month, shift=(da_biased_mean), init_date_name=init_date_name, lead_time_name=lead_time_name) \ .mean('month', skipna=True) da_anom_meancorr[lead_time_name] = da_anom_meancorr[lead_time_name] da_anom_meancorr.coords['month'] = month if da_target_clim is not None: da_meancorrr = da_anom_meancorr.groupby(init_date_name).apply(_anomalize, clim=-da_target_clim) return da_meancorr.drop('month'), da_anom_meancorr.drop('month') else: return da_anom_meancorr.drop('month') # =================================================================================================== def conditional_bias_correct(da_cmp, da_ref, over_dims): """ Return conditional bias corrected data using the approach of Goddard et al. 2013 """ cc = skill.compute_Pearson_corrcoef(da_cmp.mean('ensemble'), da_ref, over_dims=over_dims, subtract_local_mean=False) correct_cond_bias = (da_ref.std(over_dims) / da_cmp.mean('ensemble').std(over_dims)) * cc return da_cmp * correct_cond_bias # =================================================================================================== def trunc_time(time, freq): """ Truncates values in provided time array to provided frequency. E.g. 2018-01-15T12:00 with freq = 'M' becomes 2018-01-01. """ return time.astype('<M8[' + freq + ']') # =================================================================================================== def month_delta(date_in, delta, trunc_to_start=False): """ Increments provided datetime64 array by delta months """ date_mod = pd.Timestamp(date_in) m, y = (date_mod.month + delta) % 12, date_mod.year + ((date_mod.month) + delta - 1) // 12 if not m: m = 12 d = min(date_mod.day, [31, 29 if y % 4 == 0 and not y % 400 == 0 else 28,31,30,31,30,31,31,30,31,30,31][m - 1]) if trunc_to_start: date_out = trunc_time(np.datetime64(date_mod.replace(day=d,month=m, year=y)),'M') else: date_out = np.datetime64(date_mod.replace(day=d,month=m, year=y)) return np.datetime64(date_out,'ns') # =================================================================================================== def year_delta(date_in, delta, trunc_to_start=False): """ Increments provided datetime64 array by delta years """ date_mod = month_delta(date_in, 12 * delta) if trunc_to_start: date_out = trunc_time(date_mod,'Y') else: date_out = date_mod return date_out # =================================================================================================== def datetime_to_leadtime(data_in): """ Converts time information from single datetime dimension to init_date/lead_time dimension pair """ init_date = data_in.time.values[0] lead_times = range(len(data_in.time)) try: freq = pd.infer_freq(data_in.time.values) # If pandas tries to assign start time to frequency (e.g. QS-OCT), remove this ----- if '-' in freq: freq = freq[:freq.find('-')] # Split frequency into numbers and strings ----- incr_string = ''.join([i for i in freq if i.isdigit()]) freq_incr = [int(incr_string) if incr_string else 1][0] freq_type = ''.join([i for i in freq if not i.isdigit()]) # Specify all lengths great than 1 month in months ----- if 'QS' in freq_type: freq = str(3*freq_incr) + 'MS' elif 'Q' in freq_type: freq = str(3*freq_incr) + 'M' elif ('YS' in freq_type) | ('AS' in freq_type): freq = str(12*freq_incr) + 'MS' elif ('Y' in freq_type) | ('A' in freq_type): freq = str(12*freq_incr) + 'M' except ValueError: dt = (data_in.time.values[1] - data_in.time.values[0]) / np.timedelta64(1, 's') month = data_in.time.dt.month[0] if dt == 60*60*24: freq = 'D' elif ((month == 1) | (month == 3) | (month == 5) | (month == 7) | (month == 8) | (month == 10) | (month == 12)) & (dt == 31*60*60*24): freq = 'MS' elif ((month == 4) | (month == 6) | (month == 9) | (month == 11)) & (dt == 30*60*60*24): freq = 'MS' elif (month == 2) & ((dt == 28*60*60*24) | (dt == 29*60*60*24)): freq = 'MS' elif (dt == 365*60*60*24) | (dt == 366*60*60*24): freq = 'A' else: freq = 'NA' data_out = data_in.rename({'time' : 'lead_time'}) data_out['lead_time'] = lead_times data_out['lead_time'].attrs['units'] = freq data_out.coords['init_date'] = init_date return data_out # =================================================================================================== def leadtime_to_datetime(data_in, init_date_name='init_date', lead_time_name='lead_time'): """ Converts time information from lead time/initial date dimension pair to single datetime dimension """ try: init_date = data_in[init_date_name].values[0] except IndexError: init_date = data_in[init_date_name].values lead_times = list(map(int, data_in[lead_time_name].values)) freq = data_in[lead_time_name].attrs['units'] # # Split frequency into numbers and strings ----- # incr_string = ''.join([i for i in freq if i.isdigit()]) # freq_incr = [int(incr_string) if incr_string else 1][0] # freq_type = ''.join([i for i in freq if not i.isdigit()]) # Deal with special cases of monthly and yearly frequencies ----- # if 'M' in freq_type: # datetimes = np.array([month_delta(init_date, freq_incr * ix) for ix in lead_times]) # elif ('A' in freq_type) | ('Y' in freq_type): # datetimes = np.array([year_delta(init_date, freq_incr * ix) for ix in lead_times]) # else: # datetimes = (pd.date_range(init_date, periods=len(lead_times), freq=freq)).values datetimes = (pd.date_range(init_date, periods=len(lead_times), freq=freq)).values data_out = data_in.drop(init_date_name) data_out = data_out.rename({lead_time_name : 'time'}) data_out['time'] = datetimes return prune(data_out) # =================================================================================================== def get_nearest_point(da, lat, lon): """ Returns the nearest grid point to the specified lat/lon location """ return da.sel(lat=lat,lon=lon,method='nearest') # =================================================================================================== # visualization tools # =================================================================================================== def plot_fields(data, title=None, headings=None, ncol=2, contour=False, vlims=None, clims=None, squeeze_row=1, squeeze_col=1, squeeze_cbar=1, shift_cbar=1, cmap='viridis', fontsize=12, invert=False): """ Plots tiles of figures """ def _depth(seq): for level in count(): if not seq: return level seq = list(chain.from_iterable(s for s in seq if isinstance(s, Sequence))) matplotlib.rc('font', family='sans-serif') matplotlib.rc('font', serif='Helvetica') matplotlib.rc('text', usetex='false') matplotlib.rcParams.update({'font.size': fontsize}) nrow = int(np.ceil(len(data)/ncol)); fig = plt.figure(figsize=(11*squeeze_col, nrow*4*squeeze_row)) if (clims is not None) & (np.shape(vlims) != np.shape(clims)): raise ValueError('The input clims must be equal in size to vlims') # Check if vlims are given per figure or for all figures ----- one_cbar = False if vlims is None: vlims = [[None, None]] * len(data) if _depth(vlims) == 1: one_cbar = True over_count = 1 for idx,dat in enumerate(data): if one_cbar: vmin, vmax = vlims if clims is not None: cmin, cmax = clims else: vmin, vmax = vlims[idx] if clims is not None: cmin, cmax = clims[idx] if ('lat' in dat.dims) and ('lon' in dat.dims): trans = cartopy.crs.PlateCarree() ax = plt.subplot(nrow, ncol, over_count, projection=cartopy.crs.PlateCarree(central_longitude=180)) extent = [dat.lon.min(), dat.lon.max(), dat.lat.min(), dat.lat.max()] if contour is True: if clims is not None: ax.coastlines(color='gray') im = ax.contourf(dat.lon, dat.lat, dat, levels=np.linspace(vmin,vmax,12), origin='lower', transform=trans, vmin=vmin, vmax=vmax, cmap=cmap) ax.contour(dat.lon, dat.lat, dat, levels=np.linspace(cmin,cmax,12), origin='lower', transform=trans, vmin=vmin, vmax=vmax, colors='w', linewidths=2) ax.contour(dat.lon, dat.lat, dat, levels=np.linspace(cmin,cmax,12), origin='lower', transform=trans, vmin=vmin, vmax=vmax, colors='k', linewidths=1) else: ax.coastlines(color='black') im = ax.contourf(dat.lon, dat.lat, dat, origin='lower', transform=trans, vmin=vmin, vmax=vmax, cmap=cmap) else: ax.coastlines(color='black') im = ax.imshow(dat, origin='lower', extent=extent, transform=trans, vmin=vmin, vmax=vmax, cmap=cmap) gl = ax.gridlines(crs=cartopy.crs.PlateCarree(), draw_labels=True) gl.xlines = False gl.ylines = False gl.xlabels_top = False if over_count % ncol == 0: gl.ylabels_left = False elif (over_count+ncol-1) % ncol == 0: gl.ylabels_right = False else: gl.ylabels_left = False gl.ylabels_right = False gl.xlocator = mticker.FixedLocator([-90, 0, 90, 180]) gl.ylocator = mticker.FixedLocator([-90, -60, 0, 60, 90]) gl.xformatter = LONGITUDE_FORMATTER gl.yformatter = LATITUDE_FORMATTER if not one_cbar: cbar = plt.colorbar(im, ax=ax, orientation="horizontal", aspect=30/squeeze_cbar, pad=shift_cbar*0.1) tick_locator = mticker.MaxNLocator(nbins=6) cbar.locator = tick_locator cbar.update_ticks() if headings is not None: cbar.set_label(headings[idx], labelpad=5, fontsize=fontsize); elif headings is not None: ax.set_title(headings[idx], fontsize=fontsize) else: ax = plt.subplot(nrow, ncol, over_count) if 'lat' in dat.dims: x_plt = dat['lat'] y_plt = dat[utils.get_other_dims(dat,'lat')[0]] # if dat.get_axis_num('lat') > 0: # dat = dat.transpose() elif 'lon' in dat.dims: x_plt = dat['lon'] y_plt = dat[utils.get_other_dims(dat,'lon')[0]] # if dat.get_axis_num('lon') > 0: # dat = dat.transpose() else: x_plt = dat[dat.dims[1]] y_plt = dat[dat.dims[0]] extent = [x_plt.min(), x_plt.max(), y_plt.min(), y_plt.max()] if contour is True: if clims is not None: im = ax.contourf(x_plt, y_plt, dat, levels=np.linspace(vmin,vmax,12), vmin=vmin, vmax=vmax, cmap=cmap) ax.contour(x_plt, y_plt, dat, levels=np.linspace(cmin,cmax,12), colors='w', linewidths=2) ax.contour(x_plt, y_plt, dat, levels=np.linspace(cmin,cmax,12), colors='k', linewidths=1) else: im = ax.contourf(x_plt, y_plt, dat, vmin=vmin, vmax=vmax, cmap=cmap) else: im = ax.imshow(dat, origin='lower', extent=extent, vmin=vmin, vmax=vmax, cmap=cmap) if over_count % ncol == 0: ax.yaxis.tick_right() elif (over_count+ncol-1) % ncol == 0: ax.set_ylabel(y_plt.dims[0], fontsize=fontsize) else: ax.set_yticks([]) if idx / ncol >= nrow - 1: ax.set_xlabel(x_plt.dims[0], fontsize=fontsize) if not one_cbar: cbar = plt.colorbar(im, ax=ax, orientation="horizontal", aspect=30/squeeze_cbar, pad=shift_cbar*0.1) tick_locator = mticker.MaxNLocator(nbins=6) cbar.locator = tick_locator cbar.update_ticks() if headings is not None: cbar.set_label(headings[idx], labelpad=5, fontsize=fontsize); elif headings is not None: ax.set_title(headings[idx], fontsize=fontsize) if invert: ax.invert_yaxis() over_count += 1 plt.tight_layout() if one_cbar: vmin, vmax = vlims fig.subplots_adjust(bottom=shift_cbar*0.16) cbar_ax = fig.add_axes([0.15, 0.13, 0.7, squeeze_cbar*0.020]) cbar = fig.colorbar(im, cax=cbar_ax, orientation='horizontal'); cbar_ax.set_xlabel(title, rotation=0, labelpad=15, fontsize=fontsize); cbar.set_ticks(np.linspace(vmin,vmax,5)) elif title is not None: fig.suptitle(title, y=1) # =================================================================================================== def size_GB(xr_object): """ How many GB (or GiB) is your xarray object? // Requires an xarray object // Returns: * equivalent GB (GBytes) - 10^9 conversion * equivalent GiB (GiBytes) - 2^ 30 conversion < Thomas Moore - thomas.moore@csiro.au - 10102018 > """ bytes = xr_object.nbytes Ten2the9 = 10**9 Two2the30 = 2**30 GBytes = bytes / Ten2the9 GiBytes = bytes / Two2the30 #print out results print(xr_object.name, "is", GBytes, "GB", 'which is', GiBytes,"GiB") return GBytes,GiBytes # =================================================================================================== def get_pres_name(da): """ Returns name of pressure dimension in input array Author: Dougie Squire Date: 03/03/2018 Parameters ---------- da : xarray DataArray Array with coordinate corresponding to pressure Returns ------- name : str Name of dimension corresponding to pressure Examples -------- >>> A = xr.DataArray(np.random.normal(size=(2,2,2,2,2)), ... coords=[('lat', np.arange(2)), ('lon', np.arange(2)), ... ('depth', np.arange(2)), ('level', np.arange(2)), ... ('pfull', np.arange(2))]) >>> doppyo.utils.get_pres_name(A) 'pfull' """ if 'pfull' in da.dims: return 'pfull' elif 'phalf' in da.dims: return 'phalf' else: raise KeyError('Unable to determine pressure dimension') pass # =================================================================================================== def did_event(da, event): """ Returns array containing True/False where event occurs/does not occur Notes ----- See http://www.cawcr.gov.au/projects/verification/ """ eval_expr = event.replace(">", "da >").replace("<", "da <").replace("==", "da ==") \ .replace("=", "da ==").replace('&&', '&').replace('||', '|') \ .replace("and", "&").replace("or", "|") eval_expr = '(' + eval_expr + ').rename("event_logical")' return eval(eval_expr) # =================================================================================================== def compute_likelihood(da_logical, dim='ensemble'): """ Returns array of likelihoods computed along dim from logical event data Notes ----- See http://www.cawcr.gov.au/projects/verification/ """ if dim == None: likelihood = da_logical else: likelihood = da_logical.mean(dim=dim).rename('likelihood') return likelihood # =================================================================================================== def atmos_energy_cycle(temp, u, v, omega, gh, terms=None, vgradz=False, spectral=False, n_wavenumbers=20, integrate=True, loop_triple_terms=False, lat_name=None, lon_name=None, plevel_name=None): """ Returns all terms in the Lorenz energy cycle. Follows formulae and notation used in `Marques et al. 2011 Global diagnostic energetics of five state-of-the-art climate models. Climate Dynamics`. Note that this decomposition is in the space domain. A space-time decomposition can also be carried out (though not in Fourier space, but this is not implemented here (see `Oort. 1964 On Estimates of the atmospheric energy cycle. Monthly Weather Review`). Parameters ---------- temp : xarray DataArray Array containing fields of temperature with at least coordinates latitude, longitude and level (following standard naming - see Limitations) u : xarray DataArray Array containing fields of zonal velocity with at least coordinates latitude, longitude and level (following standard naming - see Limitations) v : xarray DataArray Array containing fields of meridional velocity with at least coordinates latitude, longitude and level (following standard naming - see Limitations) omega : xarray DataArray Array containing fields of vertical velocity (pressure coordinates) with at least coordinates latitude, longitude and level (following standard naming - see Limitations) gh : xarray DataArray Array containing fields of geopotential height with at least coordinates latitude, longitude and level (following standard naming - see Limitations) terms : str or sequence of str List of terms to compute. If None, returns all terms. Available options are: Pz; total available potential energy in the zonally averaged temperature distribution Kz; total kinetic energy in zonally averaged motion Pe; total eddy available potential energy [= sum_n Pn (n > 0 only) for spectral=True] (Note that for spectral=True, an additional term, Sn, quantifying the rate of transfer of available potential energy to eddies of wavenumber n from eddies of all other wavenumbers is also returned) Ke; total eddy kinetic energy [= sum_n Kn (n > 0 only) for spectral=True] (Note that for spectral=True, an additional term, Ln, quantifying the rate of transfer of kinetic energy to eddies of wavenumber n from eddies of all other wavenumbers is also returned) Cz; rate of conversion of zonal available potential energy to zonal kinetic energy Ca; rate of transfer of total available potential energy in the zonally averaged temperature distribution (Pz) to total eddy available potential energy (Pe) [= sum_n Rn (n > 0 only) for spectral=True] Ce; rate of transfer of total eddy available potential energy (Pe) to total eddy kinetic energy (Ke) [= sum_n Cn (n > 0 only) for spectral=True] Ck; rate of transfer of total eddy kinetic energy (Ke) to total kinetic energy in zonally averaged motion (Kz) [= sum_n Mn (n > 0 only) for spectral=True] Gz; rate of generation of zonal available potential energy due to the zonally averaged heating (Pz). Note that this term is computed as a residual (Cz + Ca) and cannot be returned in spectral space. If Gz is requested with spectral=True, Gz is returned in real-space only Ge; rate of generation of eddy available potential energy (Pe). Note that this term is computed as a residual (Ce - Ca) and cannot be returned in spectral space. If Ge is requested with spectral=True, Ge is returned in real-space only Dz; rate of viscous dissipation of zonal kinetic energy (Kz). Note that this term is computed as a residual (Cz - Ck) and cannot be returned in spectral space. If Dz is requested with spectral=True, Dz is returned in real-space only De; rate of dissipation of eddy kinetic energy (Ke). Note that this term is computed as a residual (Ce - Ck) and cannot be returned in spectral space. If De is requested with spectral=True, De is returned in real-space only vgradz : bool, optional If True, uses `v-grad-z` approach for computing terms relating to conversion of potential energy to kinetic energy. Otherwise, defaults to using the `omaga-alpha` approach (see reference above for details) spectral : bool, optional If True, computes all terms as a function of wavenumber on longitudinal bands. To use this option, longitudes must be regularly spaced. Note that Ge and De are computed as residuals and cannot be computed in spectral space n_wavenumbers : int, optional Number of wavenumbers to retain either side of wavenumber=0. Obviously only does anything if spectral=True integrate : bool, optional If True, computes and returns the integral of each term over the mass of the atmosphere. Otherwise, only the integrands are returned. Returns ------- atmos_energy_cycle : xarray Dataset Limitations ----------- All input array coordinates must follow standard naming (see doppyo.utils.get_lat_name(), doppyo.utils.get_lon_name(), etc) Pressure levels must be provided in units of hPa Notes ----- The following notation is used below (stackable, e.g. *_ZT indicates the time average of the zonal average): *_A -> area average over an isobaric surface *_a -> departure from area average *_Z -> zonal average *_z -> departure from zonal average *_T -> time average *_t -> departure from time average Additionally, capital variables indicate Fourier transforms: F(u) = U F(v) = V F(omega) = O F(gh) = A F(temp) = B """ def _flip_n(da): """ Flips data along wavenumber coordinate """ daf = da.copy() daf['n'] = -daf['n'] return daf.sortby(daf['n']) def _truncate(F, n_truncate, dim): """ Converts spatial frequency dim to wavenumber, n, and truncates all wavenumbers greater than n_truncate """ F[dim] = 360 * F[dim] F = F.rename({dim : 'n'}) F = F.where(abs(F.n) <= n_truncate, drop=True) return F, _flip_n(F) def _triple_terms(A, B, C): """ Calculate triple term summation of the form \int_{m=-inf}^{inf} A(m) * B(n) * C(n - m) """ # Use rolling operator to build shifted terms ----- Am = A.rename({'n' : 'm'}) Cnm = C.rolling(n=len(C.n), center=True).construct('m', fill_value=0) Cnm['m'] = -C['n'].values # Drop m = 0 and n < 0 ----- Am = Am.where(Am['m'] != 0, drop=True) Cnm = Cnm.where(Cnm['m'] != 0, drop=True) return (B * (Am * Cnm)).sum(dim='m', skipna=False) def _triple_terms_loop(A, B, C): """ Calculate triple term summation of the form \int_{m=-inf}^{inf} A(m) * B(n) * C(n - m) """ # Loop over all m's and perform rolling sum ----- ms = A['n'].where(A['n'] != 0, drop=True).values ABC = A.copy() * 0 for m in ms: Am = A.sel(n=m) Cnm = C.shift(n=int(m)).fillna(0) ABC = ABC + (Am * B * Cnm) return ABC if terms is None: terms = ['Pz', 'Kz', 'Pe', 'Ke', 'Cz', 'Ca', 'Ce', 'Ck', 'Gz', 'Ge', 'Dz', 'De'] if isinstance(terms, str): terms = [terms] # Initialize some things ----- if lat_name is None: lat_name = utils.get_lat_name(temp) if lon_name is None: lon_name = utils.get_lon_name(temp) if plevel_name is None: plevel_name = utils.get_plevel_name(temp) degtorad = utils.constants().pi / 180 tan_lat = xr.ufuncs.tan(temp[lat_name] * degtorad) cos_lat = xr.ufuncs.cos(temp[lat_name] * degtorad) # Determine the stability parameter using Saltzman's approach ----- kappa = utils.constants().R_d / utils.constants().C_pd p_kap = (1000 / temp[plevel_name]) ** kappa theta_A = utils.average(temp * p_kap, [lat_name, lon_name], weights=cos_lat) dtheta_Adp = utils.differentiate_wrt(theta_A, dim=plevel_name, x=(theta_A[plevel_name] * 100)) gamma = - p_kap * (utils.constants().R_d) / ((temp[plevel_name] * 100) * utils.constants().C_pd) / dtheta_Adp # [1/K] energies = gamma.rename('gamma').to_dataset() # Compute zonal terms # ======================== if ('Pz' in terms): # Compute the total available potential energy in the zonally averaged temperature # distribution, Pz [also commonly called Az] ----- temp_A = utils.average(temp, [lat_name, lon_name], weights=cos_lat) temp_Z = temp.mean(dim=lon_name) temp_Za = temp_Z - temp_A Pz_int = gamma * utils.constants().C_pd / 2 * temp_Za ** 2 # [J/kg] energies['Pz_int'] = Pz_int if integrate: Pz = _int_over_atmos(Pz_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name]) # [J/m^2] energies['Pz'] = Pz if ('Kz' in terms): # Compute the total kinetic energy in zonally averaged motion, Kz [also commonly # called Kz] ----- u_Z = u.mean(dim=lon_name) v_Z = v.mean(dim=lon_name) Kz_int = 0.5 * (u_Z ** 2 + v_Z ** 2) # [J/kg] energies['Kz_int'] = Kz_int if integrate: Kz = _int_over_atmos(Kz_int, lat_name, lon_name, plevel_name, lon_dim=u[lon_name]) # [J/m^2] energies['Kz'] = Kz if ('Cz' in terms): # Compute the rate of conversion of zonal available potential energy (Pz) to zonal kinetic # energy (Kz), Cz [also commonly called Cz] ----- if vgradz: if 'v_Z' not in locals(): v_Z = v.mean(dim=lon_name) gh_Z = gh.mean(dim=lon_name) dghdlat = utils.differentiate_wrt(gh_Z, dim=lat_name, x=(gh_Z[lat_name] * degtorad)) Cz_int = - (utils.constants().g / utils.constants().R_earth) * v_Z * dghdlat # [W/kg] energies['Cz_int'] = Cz_int if integrate: Cz = _int_over_atmos(Cz_int, lat_name, lon_name, plevel_name, lon_dim=gh[lon_name]) # [W/m^2] energies['Cz'] = Cz else: if 'temp_Za' not in locals(): temp_A = utils.average(temp, [lat_name, lon_name], weights=cos_lat) temp_Z = temp.mean(dim=lon_name) temp_Za = temp_Z - temp_A omega_A = utils.average(omega, [lat_name, lon_name], weights=cos_lat) omega_Z = omega.mean(dim=lon_name) omega_Za = omega_Z - omega_A Cz_int = - (utils.constants().R_d / (temp[plevel_name] * 100)) * omega_Za * temp_Za # [W/kg] energies['Cz_int'] = Cz_int if integrate: Cz = _int_over_atmos(Cz_int, lat_name, lon_name, plevel_name, lon_dim=omega[lon_name]) # [W/m^2] energies['Cz'] = Cz # Compute eddy terms in Fourier space if spectral=True # ========================================================== if spectral: if ('Pe' in terms): # Compute the total available potential energy eddies of wavenumber n, Pn ----- Bp, Bn = _truncate(utils.fft(temp, dim=lon_name, nfft=len(temp[lon_name]), twosided=True, shift=True) / len(temp[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name) Pn_int = (gamma * utils.constants().C_pd * abs(Bp) ** 2) energies['Pn_int'] = Pn_int if integrate: Pn = _int_over_atmos(Pn_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name]) # [J/m^2] energies['Pn'] = Pn # Compute the rate of transfer of available potential energy to eddies of # wavenumber n from eddies of all other wavenumbers, Sn ----- Up, Un = _truncate(utils.fft(u, dim=lon_name, nfft=len(u[lon_name]), twosided=True, shift=True) / len(u[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name) Vp, Vn = _truncate(utils.fft(v, dim=lon_name, nfft=len(v[lon_name]), twosided=True, shift=True) / len(v[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name) Op, On = _truncate(utils.fft(omega, dim=lon_name, nfft=len(omega[lon_name]), twosided=True, shift=True) / len(omega[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name) dBpdlat = utils.differentiate_wrt(Bp, dim=lat_name, x=(Bp[lat_name] * degtorad)) dBndlat = utils.differentiate_wrt(Bn, dim=lat_name, x=(Bn[lat_name] * degtorad)) dBpdp = utils.differentiate_wrt(Bp, dim=plevel_name, x=(Bp[plevel_name] * 100)) dBndp = utils.differentiate_wrt(Bn, dim=plevel_name, x=(Bn[plevel_name] * 100)) if loop_triple_terms: BpBnUp = _triple_terms_loop(Bp, Bn, Up) BpBpUn = _triple_terms_loop(Bp, Bp, Un) BpglBnVp = _triple_terms_loop(Bp, dBndlat, Vp) BpglBpVn = _triple_terms_loop(Bp, dBpdlat, Vn) BpgpBnOp = _triple_terms_loop(Bp, dBndp, Op) BpgpBpOn = _triple_terms_loop(Bp, dBpdp, On) BpBnOp = _triple_terms_loop(Bp, Bn, Op) BpBpOn = _triple_terms_loop(Bp, Bp, On) else: BpBnUp = _triple_terms(Bp, Bn, Up) BpBpUn = _triple_terms(Bp, Bp, Un) BpglBnVp = _triple_terms(Bp, dBndlat, Vp) BpglBpVn = _triple_terms(Bp, dBpdlat, Vn) BpgpBnOp = _triple_terms(Bp, dBndp, Op) BpgpBpOn = _triple_terms(Bp, dBpdp, On) BpBnOp = _triple_terms(Bp, Bn, Op) BpBpOn = _triple_terms(Bp, Bp, On) Sn_int = -gamma * utils.constants().C_pd * (1j * Bp['n']) / \ (utils.constants().R_earth * xr.ufuncs.cos(Bp[lat_name] * degtorad)) * \ (BpBnUp + BpBpUn) + \ gamma * utils.constants().C_pd / utils.constants().R_earth * \ (BpglBnVp + BpglBpVn) + \ gamma * utils.constants().C_pd * (BpgpBnOp + BpgpBpOn) + \ gamma * utils.constants().R_d / Bp[plevel_name] * \ (BpBnOp + BpBpOn) energies['Sn_int'] = Sn_int if integrate: Sn = abs(_int_over_atmos(Sn_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name])) # [W/m^2] energies['Sn'] = Sn if ('Ke' in terms): # Compute the total kinetic energy in eddies of wavenumber n, Kn ----- if 'U' not in locals(): Up, Un = _truncate(utils.fft(u, dim=lon_name, nfft=len(u[lon_name]), twosided=True, shift=True) / len(u[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name) if 'V' not in locals(): Vp, Vn = _truncate(utils.fft(v, dim=lon_name, nfft=len(v[lon_name]), twosided=True, shift=True) / len(v[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name) Kn_int = abs(Up) ** 2 + abs(Vp) ** 2 energies['Kn_int'] = Kn_int if integrate: Kn = _int_over_atmos(Kn_int, lat_name, lon_name, plevel_name, lon_dim=u[lon_name]) # [J/m^2] energies['Kn'] = Kn # Compute the rate of transfer of kinetic energy to eddies of wavenumber n from # eddies of all other wavenumbers, Ln ----- if 'O' not in locals(): Op, On = _truncate(utils.fft(omega, dim=lon_name, nfft=len(omega[lon_name]), twosided=True, shift=True) / len(omega[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name) dUpdp = utils.differentiate_wrt(Up, dim=plevel_name, x=(Up[plevel_name] * 100)) dVpdp = utils.differentiate_wrt(Vp, dim=plevel_name, x=(Vp[plevel_name] * 100)) dOpdp = utils.differentiate_wrt(Op, dim=plevel_name, x=(Op[plevel_name] * 100)) dOndp = utils.differentiate_wrt(On, dim=plevel_name, x=(On[plevel_name] * 100)) dVpcdl = utils.differentiate_wrt(Vp * cos_lat, dim=lat_name, x=(Vp[lat_name] * degtorad)) dVncdl = utils.differentiate_wrt(Vn * cos_lat, dim=lat_name, x=(Vn[lat_name] * degtorad)) dUpdl = utils.differentiate_wrt(Up, dim=lat_name, x=(Up[lat_name] * degtorad)) dVpdl = utils.differentiate_wrt(Vp, dim=lat_name, x=(Vp[lat_name] * degtorad)) if loop_triple_terms: UpUnUp = _triple_terms_loop(Up, Un, Up) UpUpUn = _triple_terms_loop(Up, Up, Un) VpVnUp = _triple_terms_loop(Vp, Vn, Up) VpVpUn = _triple_terms_loop(Vp, Vp, Un) VpUnUp = _triple_terms_loop(Vp, Un, Up) VpUpUn = _triple_terms_loop(Vp, Up, Un) UpVnUp = _triple_terms_loop(Up, Vn, Up) UpVpUn = _triple_terms_loop(Up, Vp, Un) gpUpUngpOp = _triple_terms_loop(dUpdp, Un, dOpdp) gpUpUpgpOn = _triple_terms_loop(dUpdp, Up, dOndp) gpVpVngpOp = _triple_terms_loop(dVpdp, Vn, dOpdp) gpVpVpgpOn = _triple_terms_loop(dVpdp, Vp, dOndp) glUpUnglVpc = _triple_terms_loop(dUpdl, Un, dVpcdl) glUpUpglVnc = _triple_terms_loop(dUpdl, Up, dVncdl) glVpVnglVpc = _triple_terms_loop(dVpdl, Vn, dVpcdl) glVpVpglVnc = _triple_terms_loop(dVpdl, Vp, dVncdl) else: UpUnUp = _triple_terms(Up, Un, Up) UpUpUn = _triple_terms(Up, Up, Un) VpVnUp = _triple_terms(Vp, Vn, Up) VpVpUn = _triple_terms(Vp, Vp, Un) VpUnUp = _triple_terms(Vp, Un, Up) VpUpUn = _triple_terms(Vp, Up, Un) UpVnUp = _triple_terms(Up, Vn, Up) UpVpUn = _triple_terms(Up, Vp, Un) gpUpUngpOp = _triple_terms(dUpdp, Un, dOpdp) gpUpUpgpOn = _triple_terms(dUpdp, Up, dOndp) gpVpVngpOp = _triple_terms(dVpdp, Vn, dOpdp) gpVpVpgpOn = _triple_terms(dVpdp, Vp, dOndp) glUpUnglVpc = _triple_terms(dUpdl, Un, dVpcdl) glUpUpglVnc = _triple_terms(dUpdl, Up, dVncdl) glVpVnglVpc = _triple_terms(dVpdl, Vn, dVpcdl) glVpVpglVnc = _triple_terms(dVpdl, Vp, dVncdl) Ln_int = -(1j * Up['n']) / (utils.constants().R_earth * cos_lat) * \ (UpUnUp - UpUpUn) + \ (1j * Vp['n']) / (utils.constants().R_earth * cos_lat) * \ (VpVnUp - VpVpUn) - \ tan_lat / utils.constants().R_earth * \ (VpUnUp + VpUpUn) + \ tan_lat / utils.constants().R_earth * \ (UpVnUp + UpVpUn) + \ (gpUpUngpOp + gpUpUpgpOn) + \ (gpVpVngpOp + gpVpVpgpOn) + \ 1 / (utils.constants().R_earth * cos_lat) * \ (glUpUnglVpc + glUpUpglVnc + glVpVnglVpc + glVpVpglVnc) energies['Ln_int'] = Ln_int if integrate: Ln = abs(_int_over_atmos(Ln_int, lat_name, lon_name, plevel_name, lon_dim=u[lon_name])) # [W/m^2] energies['Ln'] = Ln if ('Ca' in terms): # Compute the rate of transfer of zonal available potential energy to eddy # available potential energy in wavenumber n, Rn ----- if 'temp_Z' not in locals(): temp_Z = temp.mean(dim=lon_name) if 'V' not in locals(): Vp, Vn = _truncate(utils.fft(v, dim=lon_name, nfft=len(v[lon_name]), twosided=True, shift=True) / len(v[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name) if 'B' not in locals(): Bp, Bn = _truncate(utils.fft(temp, dim=lon_name, nfft=len(temp[lon_name]), twosided=True, shift=True) / len(temp[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name) if 'O' not in locals(): Op, On = _truncate(utils.fft(omega, dim=lon_name, nfft=len(omega[lon_name]), twosided=True, shift=True) / len(omega[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name) dtemp_Zdlat = utils.differentiate_wrt(temp_Z, dim=lat_name, x=(temp_Z[lat_name] * degtorad)) theta = temp * p_kap theta_Z = theta.mean(dim=lon_name) theta_Za = theta_Z - theta_A dtheta_Zadp = utils.differentiate_wrt(theta_Za, dim=plevel_name, x=(theta_Za[plevel_name] * 100)) Rn_int = gamma * utils.constants().C_pd * ((dtemp_Zdlat / utils.constants().R_earth) * (Vp * Bn + Vn * Bp) + (p_kap * dtheta_Zadp) * (Op * Bn + On * Bp)) # [W/kg] energies['Rn_int'] = Rn_int if integrate: Rn = abs(_int_over_atmos(Rn_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name])) # [W/m^2] energies['Rn'] = Rn if ('Ce' in terms): # Compute the rate of conversion of available potential energy of wavenumber n # to eddy kinetic energy of wavenumber n, Cn ----- if vgradz: if 'U' not in locals(): Up, Un = _truncate(utils.fft(u, dim=lon_name, nfft=len(u[lon_name]), twosided=True, shift=True) / len(u[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name) if 'V' not in locals(): Vp, Vn = _truncate(utils.fft(v, dim=lon_name, nfft=len(v[lon_name]), twosided=True, shift=True) / len(v[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name) Ap, An = _truncate(utils.fft(gh, dim=lon_name, nfft=len(gh[lon_name]), twosided=True, shift=True) / len(gh[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name) dApdlat = utils.differentiate_wrt(Ap, dim=lat_name, x=(Ap[lat_name] * degtorad)) dAndlat = utils.differentiate_wrt(An, dim=lat_name, x=(An[lat_name] * degtorad)) Cn_int = (((-1j * utils.constants().g * Up['n']) / \ (utils.constants().R_earth * xr.ufuncs.cos(Up[lat_name] * degtorad))) * \ (Ap * Un - An * Up)) - \ ((utils.constants().g / utils.constants().R_earth) * \ (dApdlat * Vn + dAndlat * Vp)) # [W/kg] energies['Cn_int'] = Cn_int if integrate: Cn = abs(_int_over_atmos(Cn_int, lat_name, lon_name, plevel_name, lon_dim=u[lon_name])) # [W/m^2] energies['Cn'] = Cn else: if 'O' not in locals(): Op, On = _truncate(utils.fft(omega, dim=lon_name, nfft=len(omega[lon_name]), twosided=True, shift=True) / len(omega[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name) if 'B' not in locals(): Bp, Bn = _truncate(utils.fft(temp, dim=lon_name, nfft=len(temp[lon_name]), twosided=True, shift=True) / len(temp[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name) Cn_int = - (utils.constants().R_d / (omega[plevel_name] * 100)) * (Op * Bn + On * Bp) # [W/kg] energies['Cn_int'] = Cn_int if integrate: Cn = abs(_int_over_atmos(Cn_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name])) # [W/m^2] energies['Cn'] = Cn if ('Ck' in terms): # Compute the rate of transfer of kinetic energy to the zonally averaged flow # from eddies of wavenumber n, Mn ----- if 'v_Z' not in locals(): v_Z = v.mean(dim=lon_name) if 'u_Z' not in locals(): u_Z = u.mean(dim=lon_name) if 'U' not in locals(): Up, Un = _truncate(utils.fft(u, dim=lon_name, nfft=len(u[lon_name]), twosided=True, shift=True) / len(u[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name) if 'V' not in locals(): Vp, Vn = _truncate(utils.fft(v, dim=lon_name, nfft=len(v[lon_name]), twosided=True, shift=True) / len(v[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name) if 'O' not in locals(): Op, On = _truncate(utils.fft(omega, dim=lon_name, nfft=len(omega[lon_name]), twosided=True, shift=True) / len(omega[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name) dv_Zdlat = utils.differentiate_wrt(v_Z, dim=lat_name, x=(v[lat_name] * degtorad)) du_Zndlat = utils.differentiate_wrt(u_Z / xr.ufuncs.cos(u[lat_name] * degtorad), dim=lat_name, x=(u[lat_name] * degtorad)) dv_Zdp = utils.differentiate_wrt(v_Z, dim=plevel_name, x=(v[plevel_name] * 100)) du_Zdp = utils.differentiate_wrt(u_Z, dim=plevel_name, x=(u[plevel_name] * 100)) Mn_int = (-2 * Up * Un * v_Z * tan_lat / utils.constants().R_earth) + \ (2 * Vp * Vn * dv_Zdlat / utils.constants().R_earth + (Vp * On + Vn * Op) * dv_Zdp) + \ ((Up * On + Un * Op) * du_Zdp) + \ ((Up * Vn + Un * Vp) * xr.ufuncs.cos(u[lat_name] * degtorad) / \ utils.constants().R_earth * du_Zndlat) # [W/kg] energies['Mn_int'] = Mn_int if integrate: Mn = abs(_int_over_atmos(Mn_int, lat_name, lon_name, plevel_name, lon_dim=u[lon_name])) # [W/m^2] energies['Mn'] = Mn else: if ('Pe' in terms): # Compute the total eddy available potential energy, Pe [also commonly called # Ae] ----- if 'temp_Z' not in locals(): temp_Z = temp.mean(dim=lon_name) temp_z = temp - temp_Z Pe_int = gamma * utils.constants().C_pd / 2 * (temp_z ** 2).mean(dim=lon_name) # [J/kg] energies['Pe_int'] = Pe_int if integrate: Pe = _int_over_atmos(Pe_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name]) # [J/m^2] energies['Pe'] = Pe if ('Ke' in terms): # Compute the total eddy kinetic energy, Ke ----- if 'u_Z' not in locals(): u_Z = u.mean(dim=lon_name) if 'v_Z' not in locals(): v_Z = v.mean(dim=lon_name) u_z = u - u_Z v_z = v - v_Z Ke_int = 0.5 * (u_z ** 2 + v_z ** 2).mean(dim=lon_name) # [J/kg] energies['Ke_int'] = Ke_int if integrate: Ke = _int_over_atmos(Ke_int, lat_name, lon_name, plevel_name, lon_dim=u[lon_name]) # [J/m^2] energies['Ke'] = Ke if ('Ca' in terms): # Compute the rate of transfer of total available potential energy in the zonally # averaged temperature distribution (Pz) to total eddy available potential energy # (Pe), Ca ----- if 'v_Z' not in locals(): v_Z = v.mean(dim=lon_name) if 'temp_Z' not in locals(): temp_Z = temp.mean(dim=lon_name) if 'omega_Z' not in locals(): omega_Z = omega.mean(dim=lon_name) if 'theta_Z' not in locals(): theta = temp * p_kap theta_Z = theta.mean(dim=lon_name) if 'dtemp_Zdlat' not in locals(): dtemp_Zdlat = utils.differentiate_wrt(temp_Z, dim=lat_name, x=(temp_Z[lat_name] * degtorad)) v_z = v - v_Z temp_z = temp - temp_Z omega_z = omega - omega_Z oT_Z = (omega_z * temp_z).mean(dim=lon_name) oT_A = utils.average(omega_z * temp_z, [lat_name, lon_name], weights=cos_lat) oT_Za = oT_Z - oT_A theta_Za = theta_Z - theta_A dtheta_Zadp = utils.differentiate_wrt(theta_Za, dim=plevel_name, x=(theta_Za[plevel_name] * 100)) Ca_int = - gamma * utils.constants().C_pd * \ (((v_z * temp_z).mean(dim=lon_name) * dtemp_Zdlat / utils.constants().R_earth) + \ (p_kap * oT_Za * dtheta_Zadp)) # [W/kg] energies['Ca_int'] = Ca_int if integrate: Ca = _int_over_atmos(Ca_int, lat_name, lon_name, plevel_name, lon_dim=v[lon_name]) # [W/m^2] energies['Ca'] = Ca if ('Ce' in terms): # Compute the rate of transfer of total eddy available potential energy (Pe) to # total eddy kinetic energy (Ke), Ce ----- if 'temp_Z' not in locals(): temp_Z = temp.mean(dim=lon_name) if 'omega_Z' not in locals(): omega_Z = omega.mean(dim=lon_name) temp_z = temp - temp_Z omega_z = omega - omega_Z Ce_int = - (utils.constants().R_d / (temp[plevel_name] * 100)) * \ (omega_z * temp_z).mean(dim=lon_name) # [W/kg] energies['Ce_int'] = Ce_int if integrate: Ce = _int_over_atmos(Ce_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name]) # [W/m^2] energies['Ce'] = Ce if ('Ck' in terms): # Compute the rate of transfer of total eddy kinetic energy (Ke) to total kinetic # energy in zonally averaged motion (Kz), Ck ----- if 'u_Z' not in locals(): u_Z = u.mean(dim=lon_name) if 'v_Z' not in locals(): v_Z = v.mean(dim=lon_name) if 'omega_Z' not in locals(): omega_Z = omega.mean(dim=lon_name) u_z = u - u_Z v_z = v - v_Z omega_z = omega - omega_Z du_Zndlat = utils.differentiate_wrt(u_Z / cos_lat, dim=lat_name, x=(u_Z[lat_name] * degtorad)) dv_Zdlat = utils.differentiate_wrt(v_Z, dim=lat_name, x=(v_Z[lat_name] * degtorad)) du_Zdp = utils.differentiate_wrt(u_Z, dim=plevel_name, x=(u_Z[plevel_name] * 100)) dv_Zdp = utils.differentiate_wrt(v_Z, dim=plevel_name, x=(v_Z[plevel_name] * 100)) Ck_int = (u_z * v_z).mean(dim=lon_name) * cos_lat * du_Zndlat / utils.constants().R_earth + \ (u_z * omega_z).mean(dim=lon_name) * du_Zdp + \ (v_z ** 2).mean(dim=lon_name) * dv_Zdlat / utils.constants().R_earth + \ (v_z * omega_z).mean(dim=lon_name) * dv_Zdp - \ (u_z ** 2).mean(dim=lon_name) * v_Z * tan_lat / utils.constants().R_earth energies['Ck_int'] = Ck_int if integrate: Ck = _int_over_atmos(Ck_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name]) # [W/m^2] energies['Ck'] = Ck if ('Gz' in terms): # Compute the rate of generation of zonal available potential energy due to the zonally # averaged heating, Gz ----- if ('Cz' not in terms) | ('Ca' not in terms): raise ValueError('The rate of generation of zonal available potential energy, Gz, is computed from the sum of Cz and Ca. Please add these to the list, terms=[<terms>].') if spectral: warnings.warn('Rate of generation of zonal available potential energy is computed from the sum of Cz and Ca and cannot be computed in Fourier space. Returning Gz in real-space.') Ca_int = Rn_int.where(Rn_int.n > 0, drop=True).sum(dim='n').real # sum Rn to get Ca Gz_int = Cz_int + Ca_int energies['Gz_int'] = Gz_int if integrate: Gz = _int_over_atmos(Gz_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name]) # [W/m^2] energies['Gz'] = Gz if ('Ge' in terms): # Compute the rate of generation of eddy available potential energy (Ae), Ge ----- if ('Ce' not in terms) | ('Ca' not in terms): raise ValueError('The rate of generation of eddy available potential energy, Ge, is computed from the residual of Ce and Ca. Please add these to the list, terms=[<terms>].') if spectral: warnings.warn('The rate of generation of eddy available potential energy is computed from the residual of Ce and Ca and cannot be computed in Fourier space. Returning Ge in real-space.') Ce_int = Cn_int.where(Cn_int.n > 0, drop=True).sum(dim='n').real # sum Cn to get Ce if 'Ca_int' not in locals(): Ca_int = Rn_int.where(Rn_int.n > 0, drop=True).sum(dim='n').real # sum Rn to get Ca Ge_int = Ce_int - Ca_int energies['Ge_int'] = Ge_int if integrate: Ge = _int_over_atmos(Ge_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name]) # [W/m^2] energies['Ge'] = Ge if ('Dz' in terms): # Compute the rate of viscous dissipation of zonal kinetic energy, Dz ----- if ('Cz' not in terms) | ('Ck' not in terms): raise ValueError('The rate of viscous dissipation of zonal kinetic energy, Dz, is computed from the residual of Cz and Ck. Please add these to the list, terms=[<terms>].') if spectral: warnings.warn('The rate of viscous dissipation of zonal kinetic energy, Dz, is computed from the residual of Cz and Ck and cannot be computed in Fourier space. Returning De in real-space.') Ck_int = Mn_int.where(Mn_int.n > 0, drop=True).sum(dim='n').real # sum Mn to get Ck Dz_int = Cz_int - Ck_int energies['Dz_int'] = Dz_int if integrate: Dz = _int_over_atmos(Dz_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name]) # [W/m^2] energies['Dz'] = Dz if ('De' in terms): # Compute the rate of dissipation of eddy kinetic energy (Ke), De ----- if ('Ce' not in terms) | ('Ck' not in terms): raise ValueError('The rate of viscous dissipation of eddy kinetic energy, De, is computed from the residual of Ce and Ck. Please add these to the list, terms=[<terms>].') if spectral: warnings.warn('The rate of viscous dissipation of eddy kinetic energy, De, is computed from the residual of Ce and Ck and cannot be computed in Fourier space. Returning De in real-space.') if 'Ce_int' not in locals(): Ce_int = Cn_int.where(Cn_int.n > 0, drop=True).sum(dim='n').real # sum Cn to get Ce if 'Ck_int' not in locals(): Ck_int = Mn_int.where(Mn_int.n > 0, drop=True).sum(dim='n').real # sum Mn to get Ck De_int = Ce_int - Ck_int energies['De_int'] = De_int if integrate: De = _int_over_atmos(De_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name]) # [W/m^2] energies['De'] = De return energies # =================================================================================================== def auto_merge(paths, preprocess=None, parallel=True, **kwargs): """ Automatically merge a split xarray Dataset. This is designed to behave like `xarray.open_mfdataset`, except it supports concatenation along multiple dimensions. Parameters ---------- datasets : str or list of str or list of xarray.Dataset Either a glob expression or list of paths as you would pass to xarray.open_mfdataset, or a list of xarray datasets. If a list of datasets is passed, you should make sure that they are represented as dask arrays to avoid reading the whole dataset into memory. Returns ------- xarray.Dataset The merged dataset. """ if parallel: # wrap the open_dataset, getattr, and preprocess with delayed open_ = dask.delayed(xr.open_dataset) getattr_ = dask.delayed(getattr) if preprocess is not None: preprocess = dask.delayed(preprocess) else: open_ = open_dataset getattr_ = getattr datasets = [open_(p, **kwargs) for p in paths] file_objs = [getattr_(ds, '_file_obj') for ds in datasets] if preprocess is not None: datasets = [preprocess(ds) for ds in datasets] if parallel: # calling compute here will return the datasets/file_objs lists, # the underlying datasets will still be stored as dask arrays datasets, file_objs = dask.compute(datasets, file_objs) def _combine_along_last_dim(datasets): merged = [] # Determine the dimension along which the dataset is split split_dims = [d for d in datasets[0].dims if len(np.unique([ds[d].values[0] for ds in datasets])) > 1] # Concatenate along one of the split dimensions concat_dim = split_dims[-1] # Group along the remaining dimensions and concatenate within each # group. sorted_ds = sorted(datasets, key=lambda ds: tuple(ds[d].values[0] for d in split_dims)) for _, group in itertools.groupby( sorted_ds, key=lambda ds: tuple(ds[d].values[0] for d in split_dims[:-1]) ): merged.append(xr.auto_combine(group, concat_dim=concat_dim)) return merged merged = datasets while len(merged) > 1: merged = _combine_along_last_dim(merged) return merged[0]
48.837134
201
0.555499
import numpy as np import pandas as pd import xarray as xr import cartopy from collections import Sequence from itertools import chain, count import matplotlib import matplotlib.pyplot as plt import matplotlib.ticker as mticker from cartopy.util import add_cyclic_point from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER from doppyo import utils def rank_gufunc(x): import bottleneck ranks = bottleneck.nanrankdata(x,axis=-1) ranks = ranks[...,0] return ranks def compute_rank(da_1, da_2, over_dim): if over_dim not in da_2.coords: da_2_pass = da_2.copy() da_2_pass.coords[over_dim] = -1 da_2_pass = da_2_pass.expand_dims(over_dim) else: da_2_pass = da_2.copy() aligned = xr.align(da_2_pass, da_1, join='inner', exclude=over_dim) combined = xr.concat(aligned, dim=over_dim) return xr.apply_ufunc(rank_gufunc, combined, input_core_dims=[[over_dim]], dask='allowed', output_dtypes=[int]).rename('rank') def categorize(da, bin_edges): return xr.apply_ufunc(np.digitize, da, bin_edges, input_core_dims=[[],[]], dask='allowed', output_dtypes=[int]).rename('categorized') def unstack_and_count(da, dims): try: unstacked = da.unstack(da.dims[0]) except ValueError: unstacked = da if dims is None: return ((0 * unstacked) + 1) else: return ((0 * unstacked) + 1).sum(dim=dims, skipna=True) def compute_histogram(da, bin_edges, over_dims): da = da.rename('data') hist = da.groupby_bins(da, bins=bin_edges, squeeze=False) \ .apply(unstack_and_count, dims=over_dims) \ .fillna(0) \ .rename({'data_bins' : 'bins'}) hist['bins'] = (bin_edges[0:-1]+bin_edges[1:])/2 return hist.astype(int).where(hist.sum('bins') != 0) def calc_gradient(da, dim, x=None): da_n = da.copy() if x is None: x = da_n[dim] centre_chunk = range(len(x[dim])-2) f_hd = da_n.shift(**{dim:-2}) f = da_n.shift(**{dim:-1}) f_hs = da_n hs = x.shift(**{dim:-1}) - x hd = x.shift(**{dim:-2}) - x.shift(**{dim:-1}) c = (hs ** 2 * f_hd + (hd ** 2 - hs ** 2) * f - hd ** 2 * f_hs) / \ (hs * hd * (hd + hs)).isel(**{dim : centre_chunk}) c[dim] = x[dim][1:-1] l = (da_n.shift(**{dim:-1}) - da_n).isel(**{dim : 0}) / \ (x.shift(**{dim:-1}) - x).isel(**{dim : 0}) r = (-da_n.shift(**{dim:1}) + da_n).isel(**{dim : -1}) / \ (-x.shift(**{dim:1}) + x).isel(**{dim : -1}) grad = xr.concat([l, c, r], dim=dim) grad[dim] = da[dim] return grad def bias_correct_ms(da_biased, da_target, da_target_clim=None, init_date_name='init_date', lead_time_name='lead_time'): def _groupby_lead_and_mean(da, over_dims, init_date_name, lead_time_name): return da.unstack('stacked_' + init_date_name + '_' + lead_time_name).groupby(lead_time_name).mean(over_dims, skipna=True) def _groupby_lead_and_std(da, over_dims, init_date_name, lead_time_name): return da.unstack('stacked_' + init_date_name + '_' + lead_time_name).groupby(lead_time_name).std(over_dims, skipna=True) def _unstack_and_shift_per_month(da, shift, init_date_name, lead_time_name): da_us = da.unstack('stacked_' + init_date_name + '_' + lead_time_name) the_month = np.ndarray.flatten(da_us.month.values) the_month = int(np.unique(the_month[~np.isnan(the_month)])) return da_us - shift.sel(month=the_month) def _unstack_and_scale_per_month(da, scale, init_date_name, lead_time_name): da_us = da.unstack('stacked_' + init_date_name + '_' + lead_time_name) the_month = np.ndarray.flatten(da_us.month.values) the_month = int(np.unique(the_month[~np.isnan(the_month)])) return da_us * scale.sel(month=the_month) def _scale_per_month(da, scale): return da.groupby('time.month') * scale _anomalize = lambda data, clim: datetime_to_leadtime( anomalize( leadtime_to_datetime(data),clim)) _rescale = lambda da, scale : datetime_to_leadtime( _scale_per_month( leadtime_to_datetime(da), scale)) da_biased = da_biased.copy() da_target = da_target.copy() month = (da_biased[init_date_name].dt.month + da_biased[lead_time_name]) % 12 month = month.where(month != 0, 12) da_biased.coords['month'] = month try: da_biased_mean = da_biased.groupby('month').apply(_groupby_lead_and_mean, over_dims=[init_date_name,'ensemble'], init_date_name=init_date_name, lead_time_name=lead_time_name) except ValueError: da_biased_mean = da_biased.groupby('month').apply(_groupby_lead_and_mean, over_dims=init_date_name, init_date_name=init_date_name, lead_time_name=lead_time_name) if da_target_clim is not None: da_target_mean = da_target.groupby('time.month').mean('time') da_meancorr = da_biased.groupby('month').apply(_unstack_and_shift_per_month, shift=(da_biased_mean - da_target_mean), init_date_name=init_date_name, lead_time_name=lead_time_name) \ .mean('month', skipna=True) da_meancorr[lead_time_name] = da_biased[lead_time_name] da_meancorr.coords['month'] = month da_anom_meancorr = da_meancorr.groupby(init_date_name).apply(_anomalize, clim=da_target_clim) da_anom_meancorr.coords['month'] = month else: da_anom_meancorr = da_biased.groupby('month').apply(_unstack_and_shift_per_month, shift=(da_biased_mean), init_date_name=init_date_name, lead_time_name=lead_time_name) \ .mean('month', skipna=True) da_anom_meancorr[lead_time_name] = da_anom_meancorr[lead_time_name] da_anom_meancorr.coords['month'] = month try: da_biased_std_tmp = da_anom_meancorr.groupby('month').apply(_groupby_lead_and_std, over_dims=[init_date_name,'ensemble'], init_date_name=init_date_name, lead_time_name=lead_time_name) except ValueError: da_biased_std_tmp = da_anom_meancorr.groupby('month').apply(_groupby_lead_and_std, over_dims=init_date_name, init_date_name=init_date_name, lead_time_name=lead_time_name) try: da_target_std = da_target.sel(lat=da_biased.lat, lon=da_biased.lon).groupby('time.month').std('time') except: da_target_std = da_target.groupby('time.month').std('time') da_anom_stdcorr_tmp = da_anom_meancorr.groupby('month').apply(_unstack_and_scale_per_month, scale=(da_target_std / da_biased_std_tmp), init_date_name=init_date_name, lead_time_name=lead_time_name) \ .mean('month', skipna=True) da_anom_stdcorr_tmp[lead_time_name] = da_biased[lead_time_name] da_anom_stdcorr_tmp.coords['month'] = month try: da_biased_std = concat_times(da_anom_stdcorr_tmp).groupby('time.month').std(['time','ensemble']) except ValueError: da_biased_std = concat_times(da_anom_stdcorr_tmp).groupby('time.month').std('time') da_anom_stdcorr = da_anom_stdcorr_tmp.groupby(init_date_name).apply(_rescale, scale=(da_target_std / da_biased_std)) if da_target_clim is not None: da_stdcorr = da_anom_stdcorr.groupby(init_date_name).apply(_anomalize, clim=-da_target_clim) return da_stdcorr.drop('month'), da_anom_stdcorr.drop('month') else: return da_anom_stdcorr.drop('month') def bias_correct_m(da_biased, da_target, da_target_clim=None, init_date_name='init_date', lead_time_name='lead_time'): def _groupby_lead_and_mean(da, over_dims, init_date_name, lead_time_name): return da.unstack('stacked_' + init_date_name + '_' + lead_time_name).groupby(lead_time_name).mean(over_dims, skipna=True) def _unstack_and_shift_per_month(da, shift, init_date_name, lead_time_name): da_us = da.unstack('stacked_' + init_date_name + '_' + lead_time_name) the_month = np.ndarray.flatten(da_us.month.values) the_month = int(np.unique(the_month[~np.isnan(the_month)])) return da_us - shift.sel(month=the_month) _anomalize = lambda data, clim: datetime_to_leadtime( anomalize( leadtime_to_datetime(data),clim)) da_biased = da_biased.copy() da_target = da_target.copy() month = (da_biased[init_date_name].dt.month + da_biased[lead_time_name]) % 12 month = month.where(month != 0, 12) da_biased.coords['month'] = month try: da_biased_mean = da_biased.groupby('month').apply(_groupby_lead_and_mean, over_dims=[init_date_name,'ensemble'], init_date_name=init_date_name, lead_time_name=lead_time_name) except ValueError: da_biased_mean = da_biased.groupby('month').apply(_groupby_lead_and_mean, over_dims=init_date_name, init_date_name=init_date_name, lead_time_name=lead_time_name) if da_target_clim is not None: da_target_mean = da_target.groupby('time.month').mean('time') da_meancorr = da_biased.groupby('month').apply(_unstack_and_shift_per_month, shift=(da_biased_mean - da_target_mean), init_date_name=init_date_name, lead_time_name=lead_time_name) \ .mean('month', skipna=True) da_meancorr[lead_time_name] = da_biased[lead_time_name] da_meancorr.coords['month'] = month da_anom_meancorr = da_meancorr.groupby(init_date_name).apply(_anomalize, clim=da_target_clim) da_anom_meancorr.coords['month'] = month else: da_anom_meancorr = da_biased.groupby('month').apply(_unstack_and_shift_per_month, shift=(da_biased_mean), init_date_name=init_date_name, lead_time_name=lead_time_name) \ .mean('month', skipna=True) da_anom_meancorr[lead_time_name] = da_anom_meancorr[lead_time_name] da_anom_meancorr.coords['month'] = month if da_target_clim is not None: da_meancorrr = da_anom_meancorr.groupby(init_date_name).apply(_anomalize, clim=-da_target_clim) return da_meancorr.drop('month'), da_anom_meancorr.drop('month') else: return da_anom_meancorr.drop('month') def conditional_bias_correct(da_cmp, da_ref, over_dims): cc = skill.compute_Pearson_corrcoef(da_cmp.mean('ensemble'), da_ref, over_dims=over_dims, subtract_local_mean=False) correct_cond_bias = (da_ref.std(over_dims) / da_cmp.mean('ensemble').std(over_dims)) * cc return da_cmp * correct_cond_bias def trunc_time(time, freq): return time.astype('<M8[' + freq + ']') def month_delta(date_in, delta, trunc_to_start=False): date_mod = pd.Timestamp(date_in) m, y = (date_mod.month + delta) % 12, date_mod.year + ((date_mod.month) + delta - 1) // 12 if not m: m = 12 d = min(date_mod.day, [31, 29 if y % 4 == 0 and not y % 400 == 0 else 28,31,30,31,30,31,31,30,31,30,31][m - 1]) if trunc_to_start: date_out = trunc_time(np.datetime64(date_mod.replace(day=d,month=m, year=y)),'M') else: date_out = np.datetime64(date_mod.replace(day=d,month=m, year=y)) return np.datetime64(date_out,'ns') def year_delta(date_in, delta, trunc_to_start=False): date_mod = month_delta(date_in, 12 * delta) if trunc_to_start: date_out = trunc_time(date_mod,'Y') else: date_out = date_mod return date_out def datetime_to_leadtime(data_in): init_date = data_in.time.values[0] lead_times = range(len(data_in.time)) try: freq = pd.infer_freq(data_in.time.values) if '-' in freq: freq = freq[:freq.find('-')] incr_string = ''.join([i for i in freq if i.isdigit()]) freq_incr = [int(incr_string) if incr_string else 1][0] freq_type = ''.join([i for i in freq if not i.isdigit()]) if 'QS' in freq_type: freq = str(3*freq_incr) + 'MS' elif 'Q' in freq_type: freq = str(3*freq_incr) + 'M' elif ('YS' in freq_type) | ('AS' in freq_type): freq = str(12*freq_incr) + 'MS' elif ('Y' in freq_type) | ('A' in freq_type): freq = str(12*freq_incr) + 'M' except ValueError: dt = (data_in.time.values[1] - data_in.time.values[0]) / np.timedelta64(1, 's') month = data_in.time.dt.month[0] if dt == 60*60*24: freq = 'D' elif ((month == 1) | (month == 3) | (month == 5) | (month == 7) | (month == 8) | (month == 10) | (month == 12)) & (dt == 31*60*60*24): freq = 'MS' elif ((month == 4) | (month == 6) | (month == 9) | (month == 11)) & (dt == 30*60*60*24): freq = 'MS' elif (month == 2) & ((dt == 28*60*60*24) | (dt == 29*60*60*24)): freq = 'MS' elif (dt == 365*60*60*24) | (dt == 366*60*60*24): freq = 'A' else: freq = 'NA' data_out = data_in.rename({'time' : 'lead_time'}) data_out['lead_time'] = lead_times data_out['lead_time'].attrs['units'] = freq data_out.coords['init_date'] = init_date return data_out def leadtime_to_datetime(data_in, init_date_name='init_date', lead_time_name='lead_time'): try: init_date = data_in[init_date_name].values[0] except IndexError: init_date = data_in[init_date_name].values lead_times = list(map(int, data_in[lead_time_name].values)) freq = data_in[lead_time_name].attrs['units'] datetimes = (pd.date_range(init_date, periods=len(lead_times), freq=freq)).values data_out = data_in.drop(init_date_name) data_out = data_out.rename({lead_time_name : 'time'}) data_out['time'] = datetimes return prune(data_out) def get_nearest_point(da, lat, lon): return da.sel(lat=lat,lon=lon,method='nearest') def plot_fields(data, title=None, headings=None, ncol=2, contour=False, vlims=None, clims=None, squeeze_row=1, squeeze_col=1, squeeze_cbar=1, shift_cbar=1, cmap='viridis', fontsize=12, invert=False): def _depth(seq): for level in count(): if not seq: return level seq = list(chain.from_iterable(s for s in seq if isinstance(s, Sequence))) matplotlib.rc('font', family='sans-serif') matplotlib.rc('font', serif='Helvetica') matplotlib.rc('text', usetex='false') matplotlib.rcParams.update({'font.size': fontsize}) nrow = int(np.ceil(len(data)/ncol)); fig = plt.figure(figsize=(11*squeeze_col, nrow*4*squeeze_row)) if (clims is not None) & (np.shape(vlims) != np.shape(clims)): raise ValueError('The input clims must be equal in size to vlims') one_cbar = False if vlims is None: vlims = [[None, None]] * len(data) if _depth(vlims) == 1: one_cbar = True over_count = 1 for idx,dat in enumerate(data): if one_cbar: vmin, vmax = vlims if clims is not None: cmin, cmax = clims else: vmin, vmax = vlims[idx] if clims is not None: cmin, cmax = clims[idx] if ('lat' in dat.dims) and ('lon' in dat.dims): trans = cartopy.crs.PlateCarree() ax = plt.subplot(nrow, ncol, over_count, projection=cartopy.crs.PlateCarree(central_longitude=180)) extent = [dat.lon.min(), dat.lon.max(), dat.lat.min(), dat.lat.max()] if contour is True: if clims is not None: ax.coastlines(color='gray') im = ax.contourf(dat.lon, dat.lat, dat, levels=np.linspace(vmin,vmax,12), origin='lower', transform=trans, vmin=vmin, vmax=vmax, cmap=cmap) ax.contour(dat.lon, dat.lat, dat, levels=np.linspace(cmin,cmax,12), origin='lower', transform=trans, vmin=vmin, vmax=vmax, colors='w', linewidths=2) ax.contour(dat.lon, dat.lat, dat, levels=np.linspace(cmin,cmax,12), origin='lower', transform=trans, vmin=vmin, vmax=vmax, colors='k', linewidths=1) else: ax.coastlines(color='black') im = ax.contourf(dat.lon, dat.lat, dat, origin='lower', transform=trans, vmin=vmin, vmax=vmax, cmap=cmap) else: ax.coastlines(color='black') im = ax.imshow(dat, origin='lower', extent=extent, transform=trans, vmin=vmin, vmax=vmax, cmap=cmap) gl = ax.gridlines(crs=cartopy.crs.PlateCarree(), draw_labels=True) gl.xlines = False gl.ylines = False gl.xlabels_top = False if over_count % ncol == 0: gl.ylabels_left = False elif (over_count+ncol-1) % ncol == 0: gl.ylabels_right = False else: gl.ylabels_left = False gl.ylabels_right = False gl.xlocator = mticker.FixedLocator([-90, 0, 90, 180]) gl.ylocator = mticker.FixedLocator([-90, -60, 0, 60, 90]) gl.xformatter = LONGITUDE_FORMATTER gl.yformatter = LATITUDE_FORMATTER if not one_cbar: cbar = plt.colorbar(im, ax=ax, orientation="horizontal", aspect=30/squeeze_cbar, pad=shift_cbar*0.1) tick_locator = mticker.MaxNLocator(nbins=6) cbar.locator = tick_locator cbar.update_ticks() if headings is not None: cbar.set_label(headings[idx], labelpad=5, fontsize=fontsize); elif headings is not None: ax.set_title(headings[idx], fontsize=fontsize) else: ax = plt.subplot(nrow, ncol, over_count) if 'lat' in dat.dims: x_plt = dat['lat'] y_plt = dat[utils.get_other_dims(dat,'lat')[0]] elif 'lon' in dat.dims: x_plt = dat['lon'] y_plt = dat[utils.get_other_dims(dat,'lon')[0]] else: x_plt = dat[dat.dims[1]] y_plt = dat[dat.dims[0]] extent = [x_plt.min(), x_plt.max(), y_plt.min(), y_plt.max()] if contour is True: if clims is not None: im = ax.contourf(x_plt, y_plt, dat, levels=np.linspace(vmin,vmax,12), vmin=vmin, vmax=vmax, cmap=cmap) ax.contour(x_plt, y_plt, dat, levels=np.linspace(cmin,cmax,12), colors='w', linewidths=2) ax.contour(x_plt, y_plt, dat, levels=np.linspace(cmin,cmax,12), colors='k', linewidths=1) else: im = ax.contourf(x_plt, y_plt, dat, vmin=vmin, vmax=vmax, cmap=cmap) else: im = ax.imshow(dat, origin='lower', extent=extent, vmin=vmin, vmax=vmax, cmap=cmap) if over_count % ncol == 0: ax.yaxis.tick_right() elif (over_count+ncol-1) % ncol == 0: ax.set_ylabel(y_plt.dims[0], fontsize=fontsize) else: ax.set_yticks([]) if idx / ncol >= nrow - 1: ax.set_xlabel(x_plt.dims[0], fontsize=fontsize) if not one_cbar: cbar = plt.colorbar(im, ax=ax, orientation="horizontal", aspect=30/squeeze_cbar, pad=shift_cbar*0.1) tick_locator = mticker.MaxNLocator(nbins=6) cbar.locator = tick_locator cbar.update_ticks() if headings is not None: cbar.set_label(headings[idx], labelpad=5, fontsize=fontsize); elif headings is not None: ax.set_title(headings[idx], fontsize=fontsize) if invert: ax.invert_yaxis() over_count += 1 plt.tight_layout() if one_cbar: vmin, vmax = vlims fig.subplots_adjust(bottom=shift_cbar*0.16) cbar_ax = fig.add_axes([0.15, 0.13, 0.7, squeeze_cbar*0.020]) cbar = fig.colorbar(im, cax=cbar_ax, orientation='horizontal'); cbar_ax.set_xlabel(title, rotation=0, labelpad=15, fontsize=fontsize); cbar.set_ticks(np.linspace(vmin,vmax,5)) elif title is not None: fig.suptitle(title, y=1) def size_GB(xr_object): bytes = xr_object.nbytes Ten2the9 = 10**9 Two2the30 = 2**30 GBytes = bytes / Ten2the9 GiBytes = bytes / Two2the30 print(xr_object.name, "is", GBytes, "GB", 'which is', GiBytes,"GiB") return GBytes,GiBytes def get_pres_name(da): if 'pfull' in da.dims: return 'pfull' elif 'phalf' in da.dims: return 'phalf' else: raise KeyError('Unable to determine pressure dimension') pass def did_event(da, event): eval_expr = event.replace(">", "da >").replace("<", "da <").replace("==", "da ==") \ .replace("=", "da ==").replace('&&', '&').replace('||', '|') \ .replace("and", "&").replace("or", "|") eval_expr = '(' + eval_expr + ').rename("event_logical")' return eval(eval_expr) def compute_likelihood(da_logical, dim='ensemble'): if dim == None: likelihood = da_logical else: likelihood = da_logical.mean(dim=dim).rename('likelihood') return likelihood def atmos_energy_cycle(temp, u, v, omega, gh, terms=None, vgradz=False, spectral=False, n_wavenumbers=20, integrate=True, loop_triple_terms=False, lat_name=None, lon_name=None, plevel_name=None): def _flip_n(da): daf = da.copy() daf['n'] = -daf['n'] return daf.sortby(daf['n']) def _truncate(F, n_truncate, dim): F[dim] = 360 * F[dim] F = F.rename({dim : 'n'}) F = F.where(abs(F.n) <= n_truncate, drop=True) return F, _flip_n(F) def _triple_terms(A, B, C): Am = A.rename({'n' : 'm'}) Cnm = C.rolling(n=len(C.n), center=True).construct('m', fill_value=0) Cnm['m'] = -C['n'].values Am = Am.where(Am['m'] != 0, drop=True) Cnm = Cnm.where(Cnm['m'] != 0, drop=True) return (B * (Am * Cnm)).sum(dim='m', skipna=False) def _triple_terms_loop(A, B, C): ms = A['n'].where(A['n'] != 0, drop=True).values ABC = A.copy() * 0 for m in ms: Am = A.sel(n=m) Cnm = C.shift(n=int(m)).fillna(0) ABC = ABC + (Am * B * Cnm) return ABC if terms is None: terms = ['Pz', 'Kz', 'Pe', 'Ke', 'Cz', 'Ca', 'Ce', 'Ck', 'Gz', 'Ge', 'Dz', 'De'] if isinstance(terms, str): terms = [terms] # Initialize some things ----- if lat_name is None: lat_name = utils.get_lat_name(temp) if lon_name is None: lon_name = utils.get_lon_name(temp) if plevel_name is None: plevel_name = utils.get_plevel_name(temp) degtorad = utils.constants().pi / 180 tan_lat = xr.ufuncs.tan(temp[lat_name] * degtorad) cos_lat = xr.ufuncs.cos(temp[lat_name] * degtorad) # Determine the stability parameter using Saltzman's approach ----- kappa = utils.constants().R_d / utils.constants().C_pd p_kap = (1000 / temp[plevel_name]) ** kappa theta_A = utils.average(temp * p_kap, [lat_name, lon_name], weights=cos_lat) dtheta_Adp = utils.differentiate_wrt(theta_A, dim=plevel_name, x=(theta_A[plevel_name] * 100)) gamma = - p_kap * (utils.constants().R_d) / ((temp[plevel_name] * 100) * utils.constants().C_pd) / dtheta_Adp energies = gamma.rename('gamma').to_dataset() if ('Pz' in terms): temp_A = utils.average(temp, [lat_name, lon_name], weights=cos_lat) temp_Z = temp.mean(dim=lon_name) temp_Za = temp_Z - temp_A Pz_int = gamma * utils.constants().C_pd / 2 * temp_Za ** 2 energies['Pz_int'] = Pz_int if integrate: Pz = _int_over_atmos(Pz_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name]) energies['Pz'] = Pz if ('Kz' in terms): u_Z = u.mean(dim=lon_name) v_Z = v.mean(dim=lon_name) Kz_int = 0.5 * (u_Z ** 2 + v_Z ** 2) energies['Kz_int'] = Kz_int if integrate: Kz = _int_over_atmos(Kz_int, lat_name, lon_name, plevel_name, lon_dim=u[lon_name]) energies['Kz'] = Kz if ('Cz' in terms): if vgradz: if 'v_Z' not in locals(): v_Z = v.mean(dim=lon_name) gh_Z = gh.mean(dim=lon_name) dghdlat = utils.differentiate_wrt(gh_Z, dim=lat_name, x=(gh_Z[lat_name] * degtorad)) Cz_int = - (utils.constants().g / utils.constants().R_earth) * v_Z * dghdlat energies['Cz_int'] = Cz_int if integrate: Cz = _int_over_atmos(Cz_int, lat_name, lon_name, plevel_name, lon_dim=gh[lon_name]) energies['Cz'] = Cz else: if 'temp_Za' not in locals(): temp_A = utils.average(temp, [lat_name, lon_name], weights=cos_lat) temp_Z = temp.mean(dim=lon_name) temp_Za = temp_Z - temp_A omega_A = utils.average(omega, [lat_name, lon_name], weights=cos_lat) omega_Z = omega.mean(dim=lon_name) omega_Za = omega_Z - omega_A Cz_int = - (utils.constants().R_d / (temp[plevel_name] * 100)) * omega_Za * temp_Za energies['Cz_int'] = Cz_int if integrate: Cz = _int_over_atmos(Cz_int, lat_name, lon_name, plevel_name, lon_dim=omega[lon_name]) energies['Cz'] = Cz if spectral: if ('Pe' in terms): Bp, Bn = _truncate(utils.fft(temp, dim=lon_name, nfft=len(temp[lon_name]), twosided=True, shift=True) / len(temp[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name) Pn_int = (gamma * utils.constants().C_pd * abs(Bp) ** 2) energies['Pn_int'] = Pn_int if integrate: Pn = _int_over_atmos(Pn_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name]) energies['Pn'] = Pn Up, Un = _truncate(utils.fft(u, dim=lon_name, nfft=len(u[lon_name]), twosided=True, shift=True) / len(u[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name) Vp, Vn = _truncate(utils.fft(v, dim=lon_name, nfft=len(v[lon_name]), twosided=True, shift=True) / len(v[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name) Op, On = _truncate(utils.fft(omega, dim=lon_name, nfft=len(omega[lon_name]), twosided=True, shift=True) / len(omega[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name) dBpdlat = utils.differentiate_wrt(Bp, dim=lat_name, x=(Bp[lat_name] * degtorad)) dBndlat = utils.differentiate_wrt(Bn, dim=lat_name, x=(Bn[lat_name] * degtorad)) dBpdp = utils.differentiate_wrt(Bp, dim=plevel_name, x=(Bp[plevel_name] * 100)) dBndp = utils.differentiate_wrt(Bn, dim=plevel_name, x=(Bn[plevel_name] * 100)) if loop_triple_terms: BpBnUp = _triple_terms_loop(Bp, Bn, Up) BpBpUn = _triple_terms_loop(Bp, Bp, Un) BpglBnVp = _triple_terms_loop(Bp, dBndlat, Vp) BpglBpVn = _triple_terms_loop(Bp, dBpdlat, Vn) BpgpBnOp = _triple_terms_loop(Bp, dBndp, Op) BpgpBpOn = _triple_terms_loop(Bp, dBpdp, On) BpBnOp = _triple_terms_loop(Bp, Bn, Op) BpBpOn = _triple_terms_loop(Bp, Bp, On) else: BpBnUp = _triple_terms(Bp, Bn, Up) BpBpUn = _triple_terms(Bp, Bp, Un) BpglBnVp = _triple_terms(Bp, dBndlat, Vp) BpglBpVn = _triple_terms(Bp, dBpdlat, Vn) BpgpBnOp = _triple_terms(Bp, dBndp, Op) BpgpBpOn = _triple_terms(Bp, dBpdp, On) BpBnOp = _triple_terms(Bp, Bn, Op) BpBpOn = _triple_terms(Bp, Bp, On) Sn_int = -gamma * utils.constants().C_pd * (1j * Bp['n']) / \ (utils.constants().R_earth * xr.ufuncs.cos(Bp[lat_name] * degtorad)) * \ (BpBnUp + BpBpUn) + \ gamma * utils.constants().C_pd / utils.constants().R_earth * \ (BpglBnVp + BpglBpVn) + \ gamma * utils.constants().C_pd * (BpgpBnOp + BpgpBpOn) + \ gamma * utils.constants().R_d / Bp[plevel_name] * \ (BpBnOp + BpBpOn) energies['Sn_int'] = Sn_int if integrate: Sn = abs(_int_over_atmos(Sn_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name])) energies['Sn'] = Sn if ('Ke' in terms): if 'U' not in locals(): Up, Un = _truncate(utils.fft(u, dim=lon_name, nfft=len(u[lon_name]), twosided=True, shift=True) / len(u[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name) if 'V' not in locals(): Vp, Vn = _truncate(utils.fft(v, dim=lon_name, nfft=len(v[lon_name]), twosided=True, shift=True) / len(v[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name) Kn_int = abs(Up) ** 2 + abs(Vp) ** 2 energies['Kn_int'] = Kn_int if integrate: Kn = _int_over_atmos(Kn_int, lat_name, lon_name, plevel_name, lon_dim=u[lon_name]) energies['Kn'] = Kn if 'O' not in locals(): Op, On = _truncate(utils.fft(omega, dim=lon_name, nfft=len(omega[lon_name]), twosided=True, shift=True) / len(omega[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name) dUpdp = utils.differentiate_wrt(Up, dim=plevel_name, x=(Up[plevel_name] * 100)) dVpdp = utils.differentiate_wrt(Vp, dim=plevel_name, x=(Vp[plevel_name] * 100)) dOpdp = utils.differentiate_wrt(Op, dim=plevel_name, x=(Op[plevel_name] * 100)) dOndp = utils.differentiate_wrt(On, dim=plevel_name, x=(On[plevel_name] * 100)) dVpcdl = utils.differentiate_wrt(Vp * cos_lat, dim=lat_name, x=(Vp[lat_name] * degtorad)) dVncdl = utils.differentiate_wrt(Vn * cos_lat, dim=lat_name, x=(Vn[lat_name] * degtorad)) dUpdl = utils.differentiate_wrt(Up, dim=lat_name, x=(Up[lat_name] * degtorad)) dVpdl = utils.differentiate_wrt(Vp, dim=lat_name, x=(Vp[lat_name] * degtorad)) if loop_triple_terms: UpUnUp = _triple_terms_loop(Up, Un, Up) UpUpUn = _triple_terms_loop(Up, Up, Un) VpVnUp = _triple_terms_loop(Vp, Vn, Up) VpVpUn = _triple_terms_loop(Vp, Vp, Un) VpUnUp = _triple_terms_loop(Vp, Un, Up) VpUpUn = _triple_terms_loop(Vp, Up, Un) UpVnUp = _triple_terms_loop(Up, Vn, Up) UpVpUn = _triple_terms_loop(Up, Vp, Un) gpUpUngpOp = _triple_terms_loop(dUpdp, Un, dOpdp) gpUpUpgpOn = _triple_terms_loop(dUpdp, Up, dOndp) gpVpVngpOp = _triple_terms_loop(dVpdp, Vn, dOpdp) gpVpVpgpOn = _triple_terms_loop(dVpdp, Vp, dOndp) glUpUnglVpc = _triple_terms_loop(dUpdl, Un, dVpcdl) glUpUpglVnc = _triple_terms_loop(dUpdl, Up, dVncdl) glVpVnglVpc = _triple_terms_loop(dVpdl, Vn, dVpcdl) glVpVpglVnc = _triple_terms_loop(dVpdl, Vp, dVncdl) else: UpUnUp = _triple_terms(Up, Un, Up) UpUpUn = _triple_terms(Up, Up, Un) VpVnUp = _triple_terms(Vp, Vn, Up) VpVpUn = _triple_terms(Vp, Vp, Un) VpUnUp = _triple_terms(Vp, Un, Up) VpUpUn = _triple_terms(Vp, Up, Un) UpVnUp = _triple_terms(Up, Vn, Up) UpVpUn = _triple_terms(Up, Vp, Un) gpUpUngpOp = _triple_terms(dUpdp, Un, dOpdp) gpUpUpgpOn = _triple_terms(dUpdp, Up, dOndp) gpVpVngpOp = _triple_terms(dVpdp, Vn, dOpdp) gpVpVpgpOn = _triple_terms(dVpdp, Vp, dOndp) glUpUnglVpc = _triple_terms(dUpdl, Un, dVpcdl) glUpUpglVnc = _triple_terms(dUpdl, Up, dVncdl) glVpVnglVpc = _triple_terms(dVpdl, Vn, dVpcdl) glVpVpglVnc = _triple_terms(dVpdl, Vp, dVncdl) Ln_int = -(1j * Up['n']) / (utils.constants().R_earth * cos_lat) * \ (UpUnUp - UpUpUn) + \ (1j * Vp['n']) / (utils.constants().R_earth * cos_lat) * \ (VpVnUp - VpVpUn) - \ tan_lat / utils.constants().R_earth * \ (VpUnUp + VpUpUn) + \ tan_lat / utils.constants().R_earth * \ (UpVnUp + UpVpUn) + \ (gpUpUngpOp + gpUpUpgpOn) + \ (gpVpVngpOp + gpVpVpgpOn) + \ 1 / (utils.constants().R_earth * cos_lat) * \ (glUpUnglVpc + glUpUpglVnc + glVpVnglVpc + glVpVpglVnc) energies['Ln_int'] = Ln_int if integrate: Ln = abs(_int_over_atmos(Ln_int, lat_name, lon_name, plevel_name, lon_dim=u[lon_name])) energies['Ln'] = Ln if ('Ca' in terms): if 'temp_Z' not in locals(): temp_Z = temp.mean(dim=lon_name) if 'V' not in locals(): Vp, Vn = _truncate(utils.fft(v, dim=lon_name, nfft=len(v[lon_name]), twosided=True, shift=True) / len(v[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name) if 'B' not in locals(): Bp, Bn = _truncate(utils.fft(temp, dim=lon_name, nfft=len(temp[lon_name]), twosided=True, shift=True) / len(temp[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name) if 'O' not in locals(): Op, On = _truncate(utils.fft(omega, dim=lon_name, nfft=len(omega[lon_name]), twosided=True, shift=True) / len(omega[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name) dtemp_Zdlat = utils.differentiate_wrt(temp_Z, dim=lat_name, x=(temp_Z[lat_name] * degtorad)) theta = temp * p_kap theta_Z = theta.mean(dim=lon_name) theta_Za = theta_Z - theta_A dtheta_Zadp = utils.differentiate_wrt(theta_Za, dim=plevel_name, x=(theta_Za[plevel_name] * 100)) Rn_int = gamma * utils.constants().C_pd * ((dtemp_Zdlat / utils.constants().R_earth) * (Vp * Bn + Vn * Bp) + (p_kap * dtheta_Zadp) * (Op * Bn + On * Bp)) energies['Rn_int'] = Rn_int if integrate: Rn = abs(_int_over_atmos(Rn_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name])) energies['Rn'] = Rn if ('Ce' in terms): if vgradz: if 'U' not in locals(): Up, Un = _truncate(utils.fft(u, dim=lon_name, nfft=len(u[lon_name]), twosided=True, shift=True) / len(u[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name) if 'V' not in locals(): Vp, Vn = _truncate(utils.fft(v, dim=lon_name, nfft=len(v[lon_name]), twosided=True, shift=True) / len(v[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name) Ap, An = _truncate(utils.fft(gh, dim=lon_name, nfft=len(gh[lon_name]), twosided=True, shift=True) / len(gh[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name) dApdlat = utils.differentiate_wrt(Ap, dim=lat_name, x=(Ap[lat_name] * degtorad)) dAndlat = utils.differentiate_wrt(An, dim=lat_name, x=(An[lat_name] * degtorad)) Cn_int = (((-1j * utils.constants().g * Up['n']) / \ (utils.constants().R_earth * xr.ufuncs.cos(Up[lat_name] * degtorad))) * \ (Ap * Un - An * Up)) - \ ((utils.constants().g / utils.constants().R_earth) * \ (dApdlat * Vn + dAndlat * Vp)) energies['Cn_int'] = Cn_int if integrate: Cn = abs(_int_over_atmos(Cn_int, lat_name, lon_name, plevel_name, lon_dim=u[lon_name])) energies['Cn'] = Cn else: if 'O' not in locals(): Op, On = _truncate(utils.fft(omega, dim=lon_name, nfft=len(omega[lon_name]), twosided=True, shift=True) / len(omega[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name) if 'B' not in locals(): Bp, Bn = _truncate(utils.fft(temp, dim=lon_name, nfft=len(temp[lon_name]), twosided=True, shift=True) / len(temp[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name) Cn_int = - (utils.constants().R_d / (omega[plevel_name] * 100)) * (Op * Bn + On * Bp) energies['Cn_int'] = Cn_int if integrate: Cn = abs(_int_over_atmos(Cn_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name])) energies['Cn'] = Cn if ('Ck' in terms): if 'v_Z' not in locals(): v_Z = v.mean(dim=lon_name) if 'u_Z' not in locals(): u_Z = u.mean(dim=lon_name) if 'U' not in locals(): Up, Un = _truncate(utils.fft(u, dim=lon_name, nfft=len(u[lon_name]), twosided=True, shift=True) / len(u[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name) if 'V' not in locals(): Vp, Vn = _truncate(utils.fft(v, dim=lon_name, nfft=len(v[lon_name]), twosided=True, shift=True) / len(v[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name) if 'O' not in locals(): Op, On = _truncate(utils.fft(omega, dim=lon_name, nfft=len(omega[lon_name]), twosided=True, shift=True) / len(omega[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name) dv_Zdlat = utils.differentiate_wrt(v_Z, dim=lat_name, x=(v[lat_name] * degtorad)) du_Zndlat = utils.differentiate_wrt(u_Z / xr.ufuncs.cos(u[lat_name] * degtorad), dim=lat_name, x=(u[lat_name] * degtorad)) dv_Zdp = utils.differentiate_wrt(v_Z, dim=plevel_name, x=(v[plevel_name] * 100)) du_Zdp = utils.differentiate_wrt(u_Z, dim=plevel_name, x=(u[plevel_name] * 100)) Mn_int = (-2 * Up * Un * v_Z * tan_lat / utils.constants().R_earth) + \ (2 * Vp * Vn * dv_Zdlat / utils.constants().R_earth + (Vp * On + Vn * Op) * dv_Zdp) + \ ((Up * On + Un * Op) * du_Zdp) + \ ((Up * Vn + Un * Vp) * xr.ufuncs.cos(u[lat_name] * degtorad) / \ utils.constants().R_earth * du_Zndlat) energies['Mn_int'] = Mn_int if integrate: Mn = abs(_int_over_atmos(Mn_int, lat_name, lon_name, plevel_name, lon_dim=u[lon_name])) energies['Mn'] = Mn else: if ('Pe' in terms): if 'temp_Z' not in locals(): temp_Z = temp.mean(dim=lon_name) temp_z = temp - temp_Z Pe_int = gamma * utils.constants().C_pd / 2 * (temp_z ** 2).mean(dim=lon_name) energies['Pe_int'] = Pe_int if integrate: Pe = _int_over_atmos(Pe_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name]) energies['Pe'] = Pe if ('Ke' in terms): if 'u_Z' not in locals(): u_Z = u.mean(dim=lon_name) if 'v_Z' not in locals(): v_Z = v.mean(dim=lon_name) u_z = u - u_Z v_z = v - v_Z Ke_int = 0.5 * (u_z ** 2 + v_z ** 2).mean(dim=lon_name) energies['Ke_int'] = Ke_int if integrate: Ke = _int_over_atmos(Ke_int, lat_name, lon_name, plevel_name, lon_dim=u[lon_name]) energies['Ke'] = Ke if ('Ca' in terms): if 'v_Z' not in locals(): v_Z = v.mean(dim=lon_name) if 'temp_Z' not in locals(): temp_Z = temp.mean(dim=lon_name) if 'omega_Z' not in locals(): omega_Z = omega.mean(dim=lon_name) if 'theta_Z' not in locals(): theta = temp * p_kap theta_Z = theta.mean(dim=lon_name) if 'dtemp_Zdlat' not in locals(): dtemp_Zdlat = utils.differentiate_wrt(temp_Z, dim=lat_name, x=(temp_Z[lat_name] * degtorad)) v_z = v - v_Z temp_z = temp - temp_Z omega_z = omega - omega_Z oT_Z = (omega_z * temp_z).mean(dim=lon_name) oT_A = utils.average(omega_z * temp_z, [lat_name, lon_name], weights=cos_lat) oT_Za = oT_Z - oT_A theta_Za = theta_Z - theta_A dtheta_Zadp = utils.differentiate_wrt(theta_Za, dim=plevel_name, x=(theta_Za[plevel_name] * 100)) Ca_int = - gamma * utils.constants().C_pd * \ (((v_z * temp_z).mean(dim=lon_name) * dtemp_Zdlat / utils.constants().R_earth) + \ (p_kap * oT_Za * dtheta_Zadp)) energies['Ca_int'] = Ca_int if integrate: Ca = _int_over_atmos(Ca_int, lat_name, lon_name, plevel_name, lon_dim=v[lon_name]) energies['Ca'] = Ca if ('Ce' in terms): if 'temp_Z' not in locals(): temp_Z = temp.mean(dim=lon_name) if 'omega_Z' not in locals(): omega_Z = omega.mean(dim=lon_name) temp_z = temp - temp_Z omega_z = omega - omega_Z Ce_int = - (utils.constants().R_d / (temp[plevel_name] * 100)) * \ (omega_z * temp_z).mean(dim=lon_name) energies['Ce_int'] = Ce_int if integrate: Ce = _int_over_atmos(Ce_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name]) energies['Ce'] = Ce if ('Ck' in terms): if 'u_Z' not in locals(): u_Z = u.mean(dim=lon_name) if 'v_Z' not in locals(): v_Z = v.mean(dim=lon_name) if 'omega_Z' not in locals(): omega_Z = omega.mean(dim=lon_name) u_z = u - u_Z v_z = v - v_Z omega_z = omega - omega_Z du_Zndlat = utils.differentiate_wrt(u_Z / cos_lat, dim=lat_name, x=(u_Z[lat_name] * degtorad)) dv_Zdlat = utils.differentiate_wrt(v_Z, dim=lat_name, x=(v_Z[lat_name] * degtorad)) du_Zdp = utils.differentiate_wrt(u_Z, dim=plevel_name, x=(u_Z[plevel_name] * 100)) dv_Zdp = utils.differentiate_wrt(v_Z, dim=plevel_name, x=(v_Z[plevel_name] * 100)) Ck_int = (u_z * v_z).mean(dim=lon_name) * cos_lat * du_Zndlat / utils.constants().R_earth + \ (u_z * omega_z).mean(dim=lon_name) * du_Zdp + \ (v_z ** 2).mean(dim=lon_name) * dv_Zdlat / utils.constants().R_earth + \ (v_z * omega_z).mean(dim=lon_name) * dv_Zdp - \ (u_z ** 2).mean(dim=lon_name) * v_Z * tan_lat / utils.constants().R_earth energies['Ck_int'] = Ck_int if integrate: Ck = _int_over_atmos(Ck_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name]) energies['Ck'] = Ck if ('Gz' in terms): if ('Cz' not in terms) | ('Ca' not in terms): raise ValueError('The rate of generation of zonal available potential energy, Gz, is computed from the sum of Cz and Ca. Please add these to the list, terms=[<terms>].') if spectral: warnings.warn('Rate of generation of zonal available potential energy is computed from the sum of Cz and Ca and cannot be computed in Fourier space. Returning Gz in real-space.') Ca_int = Rn_int.where(Rn_int.n > 0, drop=True).sum(dim='n').real Gz_int = Cz_int + Ca_int energies['Gz_int'] = Gz_int if integrate: Gz = _int_over_atmos(Gz_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name]) energies['Gz'] = Gz if ('Ge' in terms): if ('Ce' not in terms) | ('Ca' not in terms): raise ValueError('The rate of generation of eddy available potential energy, Ge, is computed from the residual of Ce and Ca. Please add these to the list, terms=[<terms>].') if spectral: warnings.warn('The rate of generation of eddy available potential energy is computed from the residual of Ce and Ca and cannot be computed in Fourier space. Returning Ge in real-space.') Ce_int = Cn_int.where(Cn_int.n > 0, drop=True).sum(dim='n').real if 'Ca_int' not in locals(): Ca_int = Rn_int.where(Rn_int.n > 0, drop=True).sum(dim='n').real Ge_int = Ce_int - Ca_int energies['Ge_int'] = Ge_int if integrate: Ge = _int_over_atmos(Ge_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name]) energies['Ge'] = Ge if ('Dz' in terms): if ('Cz' not in terms) | ('Ck' not in terms): raise ValueError('The rate of viscous dissipation of zonal kinetic energy, Dz, is computed from the residual of Cz and Ck. Please add these to the list, terms=[<terms>].') if spectral: warnings.warn('The rate of viscous dissipation of zonal kinetic energy, Dz, is computed from the residual of Cz and Ck and cannot be computed in Fourier space. Returning De in real-space.') Ck_int = Mn_int.where(Mn_int.n > 0, drop=True).sum(dim='n').real Dz_int = Cz_int - Ck_int energies['Dz_int'] = Dz_int if integrate: Dz = _int_over_atmos(Dz_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name]) energies['Dz'] = Dz if ('De' in terms): if ('Ce' not in terms) | ('Ck' not in terms): raise ValueError('The rate of viscous dissipation of eddy kinetic energy, De, is computed from the residual of Ce and Ck. Please add these to the list, terms=[<terms>].') if spectral: warnings.warn('The rate of viscous dissipation of eddy kinetic energy, De, is computed from the residual of Ce and Ck and cannot be computed in Fourier space. Returning De in real-space.') if 'Ce_int' not in locals(): Ce_int = Cn_int.where(Cn_int.n > 0, drop=True).sum(dim='n').real if 'Ck_int' not in locals(): Ck_int = Mn_int.where(Mn_int.n > 0, drop=True).sum(dim='n').real De_int = Ce_int - Ck_int energies['De_int'] = De_int if integrate: De = _int_over_atmos(De_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name]) energies['De'] = De return energies def auto_merge(paths, preprocess=None, parallel=True, **kwargs): if parallel: open_ = dask.delayed(xr.open_dataset) getattr_ = dask.delayed(getattr) if preprocess is not None: preprocess = dask.delayed(preprocess) else: open_ = open_dataset getattr_ = getattr datasets = [open_(p, **kwargs) for p in paths] file_objs = [getattr_(ds, '_file_obj') for ds in datasets] if preprocess is not None: datasets = [preprocess(ds) for ds in datasets] if parallel: datasets, file_objs = dask.compute(datasets, file_objs) def _combine_along_last_dim(datasets): merged = [] split_dims = [d for d in datasets[0].dims if len(np.unique([ds[d].values[0] for ds in datasets])) > 1] concat_dim = split_dims[-1] sorted_ds = sorted(datasets, key=lambda ds: tuple(ds[d].values[0] for d in split_dims)) for _, group in itertools.groupby( sorted_ds, key=lambda ds: tuple(ds[d].values[0] for d in split_dims[:-1]) ): merged.append(xr.auto_combine(group, concat_dim=concat_dim)) return merged merged = datasets while len(merged) > 1: merged = _combine_along_last_dim(merged) return merged[0]
true
true
f723c95d5658ceeced474744cccb763f8b5be84d
2,382
py
Python
Foundations_of_Private_Computation/Split_Learning/concepts-definitions-code/ite-repo/demos/analytical_values/demo_k_ejt1.py
gonzalo-munillag/Private_AI_OpenMined
c23da9cc1c914d10646a0c0bc1a2497fe2cbaaca
[ "MIT" ]
5
2021-01-06T16:49:22.000Z
2021-02-19T05:34:27.000Z
Foundations_of_Private_Computation/Split_Learning/concepts-definitions-code/ite-repo/demos/analytical_values/demo_k_ejt1.py
gonzalo-munillag/Private_AI_OpenMined
c23da9cc1c914d10646a0c0bc1a2497fe2cbaaca
[ "MIT" ]
null
null
null
Foundations_of_Private_Computation/Split_Learning/concepts-definitions-code/ite-repo/demos/analytical_values/demo_k_ejt1.py
gonzalo-munillag/Private_AI_OpenMined
c23da9cc1c914d10646a0c0bc1a2497fe2cbaaca
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 """ Demo for exponentiated Jensen-Tsallis kernel-1 estimators. Analytical vs estimated value is illustrated for spherical normal random variables. """ from numpy import eye from numpy.random import rand, multivariate_normal, randn from scipy import arange, zeros, ones import matplotlib.pyplot as plt from ite.cost.x_factory import co_factory from ite.cost.x_analytical_values import analytical_value_k_ejt1 def main(): # parameters: dim = 1 # dimension of the distribution num_of_samples_v = arange(1000, 50*1000+1, 2000) u = 0.8 # >0, parameter of the Jensen-Tsallis kernel cost_name = 'MKExpJT1_HT' # dim >= 1 # initialization: alpha = 2 # fixed; parameter of the Jensen-Tsallis kernel; for alpha = 2 we have # explicit formula for the Tsallis entropy, and hence for the # Jensen-Tsallis kernel(-1). distr = 'normal' # fixed num_of_samples_max = num_of_samples_v[-1] length = len(num_of_samples_v) co = co_factory(cost_name, mult=True, alpha=alpha, u=u) # cost object k_hat_v = zeros(length) # vector of estimated kernel values # distr, dim -> samples (y1,y2), distribution parameters (par1,par2), # analytical value (k): if distr == 'normal': # generate samples (y1,y2); y1~N(m1,s1^2xI), y2~N(m2,s2^2xI): m1, s1 = randn(dim), rand(1) m2, s2 = randn(dim), rand(1) y1 = multivariate_normal(m1, s1**2 * eye(dim), num_of_samples_max) y2 = multivariate_normal(m2, s2**2 * eye(dim), num_of_samples_max) par1 = {"mean": m1, "std": s1} par2 = {"mean": m2, "std": s2} else: raise Exception('Distribution=?') k = analytical_value_k_ejt1(distr, distr, u, par1, par2) # estimation: for (tk, num_of_samples) in enumerate(num_of_samples_v): k_hat_v[tk] = co.estimation(y1[0:num_of_samples], y2[0:num_of_samples]) # broadcast print("tk={0}/{1}".format(tk+1, length)) # plot: plt.plot(num_of_samples_v, k_hat_v, num_of_samples_v, ones(length)*k) plt.xlabel('Number of samples') plt.ylabel('Exponentiated Jensen-Tsallis kernel-1') plt.legend(('estimation', 'analytical value'), loc='best') plt.title("Estimator: " + cost_name) plt.show() if __name__ == "__main__": main()
33.549296
74
0.646096
from numpy import eye from numpy.random import rand, multivariate_normal, randn from scipy import arange, zeros, ones import matplotlib.pyplot as plt from ite.cost.x_factory import co_factory from ite.cost.x_analytical_values import analytical_value_k_ejt1 def main(): dim = 1 num_of_samples_v = arange(1000, 50*1000+1, 2000) u = 0.8 cost_name = 'MKExpJT1_HT' alpha = 2 distr = 'normal' num_of_samples_max = num_of_samples_v[-1] length = len(num_of_samples_v) co = co_factory(cost_name, mult=True, alpha=alpha, u=u) k_hat_v = zeros(length) if distr == 'normal': m1, s1 = randn(dim), rand(1) m2, s2 = randn(dim), rand(1) y1 = multivariate_normal(m1, s1**2 * eye(dim), num_of_samples_max) y2 = multivariate_normal(m2, s2**2 * eye(dim), num_of_samples_max) par1 = {"mean": m1, "std": s1} par2 = {"mean": m2, "std": s2} else: raise Exception('Distribution=?') k = analytical_value_k_ejt1(distr, distr, u, par1, par2) for (tk, num_of_samples) in enumerate(num_of_samples_v): k_hat_v[tk] = co.estimation(y1[0:num_of_samples], y2[0:num_of_samples]) print("tk={0}/{1}".format(tk+1, length)) plt.plot(num_of_samples_v, k_hat_v, num_of_samples_v, ones(length)*k) plt.xlabel('Number of samples') plt.ylabel('Exponentiated Jensen-Tsallis kernel-1') plt.legend(('estimation', 'analytical value'), loc='best') plt.title("Estimator: " + cost_name) plt.show() if __name__ == "__main__": main()
true
true
f723ca086eef0104ef4640b52705cddbd642cc36
2,063
py
Python
parsifal/reviews/migrations/0046_auto_20190717_2301.py
glauberferreira/parsifal-mec
66f85e0d48a270bddd1170caa2131bc74872462d
[ "MIT" ]
1
2019-06-13T16:09:26.000Z
2019-06-13T16:09:26.000Z
parsifal/reviews/migrations/0046_auto_20190717_2301.py
glauberferreira/parsifal-mec
66f85e0d48a270bddd1170caa2131bc74872462d
[ "MIT" ]
null
null
null
parsifal/reviews/migrations/0046_auto_20190717_2301.py
glauberferreira/parsifal-mec
66f85e0d48a270bddd1170caa2131bc74872462d
[ "MIT" ]
3
2019-10-05T04:16:59.000Z
2021-04-20T05:00:50.000Z
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import django.db.models.deletion from django.conf import settings class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('reviews', '0045_articlefile'), ] operations = [ migrations.CreateModel( name='ArticleEvaluation', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('status', models.CharField(default='U', max_length=1, choices=[('U', 'Unclassified'), ('R', 'Rejected'), ('A', 'Accepted'), ('D', 'Duplicated')])), ('comments', models.TextField(max_length=2000, null=True, blank=True)), ], ), migrations.RemoveField( model_name='article', name='comments', ), migrations.RemoveField( model_name='article', name='selection_criteria', ), migrations.RemoveField( model_name='article', name='status', ), migrations.AddField( model_name='articleevaluation', name='article', field=models.ForeignKey(related_name='evaluation_article', to='reviews.Article'), ), migrations.AddField( model_name='articleevaluation', name='review', field=models.ForeignKey(related_name='evaluation_review', to='reviews.Review'), ), migrations.AddField( model_name='articleevaluation', name='selection_criteria', field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, blank=True, to='reviews.SelectionCriteria', null=True), ), migrations.AddField( model_name='articleevaluation', name='user', field=models.ForeignKey(related_name='evaluation_user', to=settings.AUTH_USER_MODEL), ), ]
35.568966
164
0.596704
from __future__ import unicode_literals from django.db import models, migrations import django.db.models.deletion from django.conf import settings class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('reviews', '0045_articlefile'), ] operations = [ migrations.CreateModel( name='ArticleEvaluation', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('status', models.CharField(default='U', max_length=1, choices=[('U', 'Unclassified'), ('R', 'Rejected'), ('A', 'Accepted'), ('D', 'Duplicated')])), ('comments', models.TextField(max_length=2000, null=True, blank=True)), ], ), migrations.RemoveField( model_name='article', name='comments', ), migrations.RemoveField( model_name='article', name='selection_criteria', ), migrations.RemoveField( model_name='article', name='status', ), migrations.AddField( model_name='articleevaluation', name='article', field=models.ForeignKey(related_name='evaluation_article', to='reviews.Article'), ), migrations.AddField( model_name='articleevaluation', name='review', field=models.ForeignKey(related_name='evaluation_review', to='reviews.Review'), ), migrations.AddField( model_name='articleevaluation', name='selection_criteria', field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, blank=True, to='reviews.SelectionCriteria', null=True), ), migrations.AddField( model_name='articleevaluation', name='user', field=models.ForeignKey(related_name='evaluation_user', to=settings.AUTH_USER_MODEL), ), ]
true
true
f723ca1a6623fc6b30625bd969dcb19ab9192111
358
py
Python
Lessons/simpleWebBrowser.py
Luderio/Scientific-Computing-with-Python
c7eebcc3b46b68b3d5c08ad25fb802ae9ff42f7f
[ "MIT" ]
null
null
null
Lessons/simpleWebBrowser.py
Luderio/Scientific-Computing-with-Python
c7eebcc3b46b68b3d5c08ad25fb802ae9ff42f7f
[ "MIT" ]
null
null
null
Lessons/simpleWebBrowser.py
Luderio/Scientific-Computing-with-Python
c7eebcc3b46b68b3d5c08ad25fb802ae9ff42f7f
[ "MIT" ]
null
null
null
#Simple We Browser using sockets import socket mysock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) mysock.connect(('data.pr4e.org', 80)) cmd = 'GET http://data.pr4e.org/romeo.txt HTTP/1.0\r\n\r\n'.encode() mysock.send(cmd) while True : data = mysock.recv(512) if len(data) < 1 : break print(data.decode()) mysock.close()
23.866667
68
0.664804
import socket mysock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) mysock.connect(('data.pr4e.org', 80)) cmd = 'GET http://data.pr4e.org/romeo.txt HTTP/1.0\r\n\r\n'.encode() mysock.send(cmd) while True : data = mysock.recv(512) if len(data) < 1 : break print(data.decode()) mysock.close()
true
true
f723ca2e7cf32da6dc08be2afefe8b8e7395f254
287
py
Python
app.py
IIVIIIII/2020_Weathering
0759f2848ca912c8f1f9875f18c8e0aa604948f5
[ "MIT" ]
null
null
null
app.py
IIVIIIII/2020_Weathering
0759f2848ca912c8f1f9875f18c8e0aa604948f5
[ "MIT" ]
null
null
null
app.py
IIVIIIII/2020_Weathering
0759f2848ca912c8f1f9875f18c8e0aa604948f5
[ "MIT" ]
null
null
null
from flask import Flask, jsonify import data4app app = Flask(__name__) @app.route("/") def home(): return "Lets goooo!!!" @app.route("/<var>") def jsonified(var): data = data4app.get_data(var) return jsonify(data) if __name__ == "__main__": app.run(debug=True)
15.105263
33
0.648084
from flask import Flask, jsonify import data4app app = Flask(__name__) @app.route("/") def home(): return "Lets goooo!!!" @app.route("/<var>") def jsonified(var): data = data4app.get_data(var) return jsonify(data) if __name__ == "__main__": app.run(debug=True)
true
true
f723ca8574f5c324eb54931b511ea756f5d6462f
18,973
py
Python
sdk/python/tekton_pipeline/models/pod_template.py
jmcshane/experimental
3c47c7e87bcdadc6172941169f3f24fc3f159ae0
[ "Apache-2.0" ]
null
null
null
sdk/python/tekton_pipeline/models/pod_template.py
jmcshane/experimental
3c47c7e87bcdadc6172941169f3f24fc3f159ae0
[ "Apache-2.0" ]
null
null
null
sdk/python/tekton_pipeline/models/pod_template.py
jmcshane/experimental
3c47c7e87bcdadc6172941169f3f24fc3f159ae0
[ "Apache-2.0" ]
1
2020-07-30T15:55:45.000Z
2020-07-30T15:55:45.000Z
# Copyright 2020 The Tekton Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # coding: utf-8 """ Tekton Tekton Pipeline # noqa: E501 The version of the OpenAPI document: v0.17.2 Generated by: https://openapi-generator.tech """ import pprint import re # noqa: F401 import six from tekton_pipeline.configuration import Configuration class PodTemplate(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'affinity': 'V1Affinity', 'automount_service_account_token': 'bool', 'dns_config': 'V1PodDNSConfig', 'dns_policy': 'str', 'enable_service_links': 'bool', 'host_network': 'bool', 'image_pull_secrets': 'list[V1LocalObjectReference]', 'node_selector': 'dict(str, str)', 'priority_class_name': 'str', 'runtime_class_name': 'str', 'scheduler_name': 'str', 'security_context': 'V1PodSecurityContext', 'tolerations': 'list[V1Toleration]', 'volumes': 'list[V1Volume]' } attribute_map = { 'affinity': 'affinity', 'automount_service_account_token': 'automountServiceAccountToken', 'dns_config': 'dnsConfig', 'dns_policy': 'dnsPolicy', 'enable_service_links': 'enableServiceLinks', 'host_network': 'hostNetwork', 'image_pull_secrets': 'imagePullSecrets', 'node_selector': 'nodeSelector', 'priority_class_name': 'priorityClassName', 'runtime_class_name': 'runtimeClassName', 'scheduler_name': 'schedulerName', 'security_context': 'securityContext', 'tolerations': 'tolerations', 'volumes': 'volumes' } def __init__(self, affinity=None, automount_service_account_token=None, dns_config=None, dns_policy=None, enable_service_links=None, host_network=None, image_pull_secrets=None, node_selector=None, priority_class_name=None, runtime_class_name=None, scheduler_name=None, security_context=None, tolerations=None, volumes=None, local_vars_configuration=None): # noqa: E501 """PodTemplate - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._affinity = None self._automount_service_account_token = None self._dns_config = None self._dns_policy = None self._enable_service_links = None self._host_network = None self._image_pull_secrets = None self._node_selector = None self._priority_class_name = None self._runtime_class_name = None self._scheduler_name = None self._security_context = None self._tolerations = None self._volumes = None self.discriminator = None if affinity is not None: self.affinity = affinity if automount_service_account_token is not None: self.automount_service_account_token = automount_service_account_token if dns_config is not None: self.dns_config = dns_config if dns_policy is not None: self.dns_policy = dns_policy if enable_service_links is not None: self.enable_service_links = enable_service_links if host_network is not None: self.host_network = host_network if image_pull_secrets is not None: self.image_pull_secrets = image_pull_secrets if node_selector is not None: self.node_selector = node_selector if priority_class_name is not None: self.priority_class_name = priority_class_name if runtime_class_name is not None: self.runtime_class_name = runtime_class_name if scheduler_name is not None: self.scheduler_name = scheduler_name if security_context is not None: self.security_context = security_context if tolerations is not None: self.tolerations = tolerations if volumes is not None: self.volumes = volumes @property def affinity(self): """Gets the affinity of this PodTemplate. # noqa: E501 :return: The affinity of this PodTemplate. # noqa: E501 :rtype: V1Affinity """ return self._affinity @affinity.setter def affinity(self, affinity): """Sets the affinity of this PodTemplate. :param affinity: The affinity of this PodTemplate. # noqa: E501 :type: V1Affinity """ self._affinity = affinity @property def automount_service_account_token(self): """Gets the automount_service_account_token of this PodTemplate. # noqa: E501 AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. # noqa: E501 :return: The automount_service_account_token of this PodTemplate. # noqa: E501 :rtype: bool """ return self._automount_service_account_token @automount_service_account_token.setter def automount_service_account_token(self, automount_service_account_token): """Sets the automount_service_account_token of this PodTemplate. AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. # noqa: E501 :param automount_service_account_token: The automount_service_account_token of this PodTemplate. # noqa: E501 :type: bool """ self._automount_service_account_token = automount_service_account_token @property def dns_config(self): """Gets the dns_config of this PodTemplate. # noqa: E501 :return: The dns_config of this PodTemplate. # noqa: E501 :rtype: V1PodDNSConfig """ return self._dns_config @dns_config.setter def dns_config(self, dns_config): """Sets the dns_config of this PodTemplate. :param dns_config: The dns_config of this PodTemplate. # noqa: E501 :type: V1PodDNSConfig """ self._dns_config = dns_config @property def dns_policy(self): """Gets the dns_policy of this PodTemplate. # noqa: E501 Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. # noqa: E501 :return: The dns_policy of this PodTemplate. # noqa: E501 :rtype: str """ return self._dns_policy @dns_policy.setter def dns_policy(self, dns_policy): """Sets the dns_policy of this PodTemplate. Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. # noqa: E501 :param dns_policy: The dns_policy of this PodTemplate. # noqa: E501 :type: str """ self._dns_policy = dns_policy @property def enable_service_links(self): """Gets the enable_service_links of this PodTemplate. # noqa: E501 EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true. # noqa: E501 :return: The enable_service_links of this PodTemplate. # noqa: E501 :rtype: bool """ return self._enable_service_links @enable_service_links.setter def enable_service_links(self, enable_service_links): """Sets the enable_service_links of this PodTemplate. EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true. # noqa: E501 :param enable_service_links: The enable_service_links of this PodTemplate. # noqa: E501 :type: bool """ self._enable_service_links = enable_service_links @property def host_network(self): """Gets the host_network of this PodTemplate. # noqa: E501 HostNetwork specifies whether the pod may use the node network namespace # noqa: E501 :return: The host_network of this PodTemplate. # noqa: E501 :rtype: bool """ return self._host_network @host_network.setter def host_network(self, host_network): """Sets the host_network of this PodTemplate. HostNetwork specifies whether the pod may use the node network namespace # noqa: E501 :param host_network: The host_network of this PodTemplate. # noqa: E501 :type: bool """ self._host_network = host_network @property def image_pull_secrets(self): """Gets the image_pull_secrets of this PodTemplate. # noqa: E501 ImagePullSecrets gives the name of the secret used by the pod to pull the image if specified # noqa: E501 :return: The image_pull_secrets of this PodTemplate. # noqa: E501 :rtype: list[V1LocalObjectReference] """ return self._image_pull_secrets @image_pull_secrets.setter def image_pull_secrets(self, image_pull_secrets): """Sets the image_pull_secrets of this PodTemplate. ImagePullSecrets gives the name of the secret used by the pod to pull the image if specified # noqa: E501 :param image_pull_secrets: The image_pull_secrets of this PodTemplate. # noqa: E501 :type: list[V1LocalObjectReference] """ self._image_pull_secrets = image_pull_secrets @property def node_selector(self): """Gets the node_selector of this PodTemplate. # noqa: E501 NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ # noqa: E501 :return: The node_selector of this PodTemplate. # noqa: E501 :rtype: dict(str, str) """ return self._node_selector @node_selector.setter def node_selector(self, node_selector): """Sets the node_selector of this PodTemplate. NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ # noqa: E501 :param node_selector: The node_selector of this PodTemplate. # noqa: E501 :type: dict(str, str) """ self._node_selector = node_selector @property def priority_class_name(self): """Gets the priority_class_name of this PodTemplate. # noqa: E501 If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default. # noqa: E501 :return: The priority_class_name of this PodTemplate. # noqa: E501 :rtype: str """ return self._priority_class_name @priority_class_name.setter def priority_class_name(self, priority_class_name): """Sets the priority_class_name of this PodTemplate. If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default. # noqa: E501 :param priority_class_name: The priority_class_name of this PodTemplate. # noqa: E501 :type: str """ self._priority_class_name = priority_class_name @property def runtime_class_name(self): """Gets the runtime_class_name of this PodTemplate. # noqa: E501 RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14. # noqa: E501 :return: The runtime_class_name of this PodTemplate. # noqa: E501 :rtype: str """ return self._runtime_class_name @runtime_class_name.setter def runtime_class_name(self, runtime_class_name): """Sets the runtime_class_name of this PodTemplate. RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14. # noqa: E501 :param runtime_class_name: The runtime_class_name of this PodTemplate. # noqa: E501 :type: str """ self._runtime_class_name = runtime_class_name @property def scheduler_name(self): """Gets the scheduler_name of this PodTemplate. # noqa: E501 SchedulerName specifies the scheduler to be used to dispatch the Pod # noqa: E501 :return: The scheduler_name of this PodTemplate. # noqa: E501 :rtype: str """ return self._scheduler_name @scheduler_name.setter def scheduler_name(self, scheduler_name): """Sets the scheduler_name of this PodTemplate. SchedulerName specifies the scheduler to be used to dispatch the Pod # noqa: E501 :param scheduler_name: The scheduler_name of this PodTemplate. # noqa: E501 :type: str """ self._scheduler_name = scheduler_name @property def security_context(self): """Gets the security_context of this PodTemplate. # noqa: E501 :return: The security_context of this PodTemplate. # noqa: E501 :rtype: V1PodSecurityContext """ return self._security_context @security_context.setter def security_context(self, security_context): """Sets the security_context of this PodTemplate. :param security_context: The security_context of this PodTemplate. # noqa: E501 :type: V1PodSecurityContext """ self._security_context = security_context @property def tolerations(self): """Gets the tolerations of this PodTemplate. # noqa: E501 If specified, the pod's tolerations. # noqa: E501 :return: The tolerations of this PodTemplate. # noqa: E501 :rtype: list[V1Toleration] """ return self._tolerations @tolerations.setter def tolerations(self, tolerations): """Sets the tolerations of this PodTemplate. If specified, the pod's tolerations. # noqa: E501 :param tolerations: The tolerations of this PodTemplate. # noqa: E501 :type: list[V1Toleration] """ self._tolerations = tolerations @property def volumes(self): """Gets the volumes of this PodTemplate. # noqa: E501 List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes # noqa: E501 :return: The volumes of this PodTemplate. # noqa: E501 :rtype: list[V1Volume] """ return self._volumes @volumes.setter def volumes(self, volumes): """Sets the volumes of this PodTemplate. List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes # noqa: E501 :param volumes: The volumes of this PodTemplate. # noqa: E501 :type: list[V1Volume] """ self._volumes = volumes def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, PodTemplate): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, PodTemplate): return True return self.to_dict() != other.to_dict()
38.329293
485
0.669056
import pprint import re import six from tekton_pipeline.configuration import Configuration class PodTemplate(object): openapi_types = { 'affinity': 'V1Affinity', 'automount_service_account_token': 'bool', 'dns_config': 'V1PodDNSConfig', 'dns_policy': 'str', 'enable_service_links': 'bool', 'host_network': 'bool', 'image_pull_secrets': 'list[V1LocalObjectReference]', 'node_selector': 'dict(str, str)', 'priority_class_name': 'str', 'runtime_class_name': 'str', 'scheduler_name': 'str', 'security_context': 'V1PodSecurityContext', 'tolerations': 'list[V1Toleration]', 'volumes': 'list[V1Volume]' } attribute_map = { 'affinity': 'affinity', 'automount_service_account_token': 'automountServiceAccountToken', 'dns_config': 'dnsConfig', 'dns_policy': 'dnsPolicy', 'enable_service_links': 'enableServiceLinks', 'host_network': 'hostNetwork', 'image_pull_secrets': 'imagePullSecrets', 'node_selector': 'nodeSelector', 'priority_class_name': 'priorityClassName', 'runtime_class_name': 'runtimeClassName', 'scheduler_name': 'schedulerName', 'security_context': 'securityContext', 'tolerations': 'tolerations', 'volumes': 'volumes' } def __init__(self, affinity=None, automount_service_account_token=None, dns_config=None, dns_policy=None, enable_service_links=None, host_network=None, image_pull_secrets=None, node_selector=None, priority_class_name=None, runtime_class_name=None, scheduler_name=None, security_context=None, tolerations=None, volumes=None, local_vars_configuration=None): if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._affinity = None self._automount_service_account_token = None self._dns_config = None self._dns_policy = None self._enable_service_links = None self._host_network = None self._image_pull_secrets = None self._node_selector = None self._priority_class_name = None self._runtime_class_name = None self._scheduler_name = None self._security_context = None self._tolerations = None self._volumes = None self.discriminator = None if affinity is not None: self.affinity = affinity if automount_service_account_token is not None: self.automount_service_account_token = automount_service_account_token if dns_config is not None: self.dns_config = dns_config if dns_policy is not None: self.dns_policy = dns_policy if enable_service_links is not None: self.enable_service_links = enable_service_links if host_network is not None: self.host_network = host_network if image_pull_secrets is not None: self.image_pull_secrets = image_pull_secrets if node_selector is not None: self.node_selector = node_selector if priority_class_name is not None: self.priority_class_name = priority_class_name if runtime_class_name is not None: self.runtime_class_name = runtime_class_name if scheduler_name is not None: self.scheduler_name = scheduler_name if security_context is not None: self.security_context = security_context if tolerations is not None: self.tolerations = tolerations if volumes is not None: self.volumes = volumes @property def affinity(self): return self._affinity @affinity.setter def affinity(self, affinity): self._affinity = affinity @property def automount_service_account_token(self): return self._automount_service_account_token @automount_service_account_token.setter def automount_service_account_token(self, automount_service_account_token): self._automount_service_account_token = automount_service_account_token @property def dns_config(self): return self._dns_config @dns_config.setter def dns_config(self, dns_config): self._dns_config = dns_config @property def dns_policy(self): return self._dns_policy @dns_policy.setter def dns_policy(self, dns_policy): self._dns_policy = dns_policy @property def enable_service_links(self): return self._enable_service_links @enable_service_links.setter def enable_service_links(self, enable_service_links): self._enable_service_links = enable_service_links @property def host_network(self): return self._host_network @host_network.setter def host_network(self, host_network): self._host_network = host_network @property def image_pull_secrets(self): return self._image_pull_secrets @image_pull_secrets.setter def image_pull_secrets(self, image_pull_secrets): self._image_pull_secrets = image_pull_secrets @property def node_selector(self): return self._node_selector @node_selector.setter def node_selector(self, node_selector): self._node_selector = node_selector @property def priority_class_name(self): return self._priority_class_name @priority_class_name.setter def priority_class_name(self, priority_class_name): self._priority_class_name = priority_class_name @property def runtime_class_name(self): return self._runtime_class_name @runtime_class_name.setter def runtime_class_name(self, runtime_class_name): self._runtime_class_name = runtime_class_name @property def scheduler_name(self): return self._scheduler_name @scheduler_name.setter def scheduler_name(self, scheduler_name): self._scheduler_name = scheduler_name @property def security_context(self): return self._security_context @security_context.setter def security_context(self, security_context): self._security_context = security_context @property def tolerations(self): return self._tolerations @tolerations.setter def tolerations(self, tolerations): self._tolerations = tolerations @property def volumes(self): return self._volumes @volumes.setter def volumes(self, volumes): self._volumes = volumes def to_dict(self): result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): return pprint.pformat(self.to_dict()) def __repr__(self): return self.to_str() def __eq__(self, other): if not isinstance(other, PodTemplate): return False return self.to_dict() == other.to_dict() def __ne__(self, other): if not isinstance(other, PodTemplate): return True return self.to_dict() != other.to_dict()
true
true
f723cb619eaf3108159317787a8063eb46bfbdab
413
py
Python
experiments/fdtd-2d/tmp_files/144.py
LoopTilingBenchmark/benchmark
52a3d2e70216552a498fd91de02a2fa9cb62122c
[ "BSD-2-Clause" ]
null
null
null
experiments/fdtd-2d/tmp_files/144.py
LoopTilingBenchmark/benchmark
52a3d2e70216552a498fd91de02a2fa9cb62122c
[ "BSD-2-Clause" ]
null
null
null
experiments/fdtd-2d/tmp_files/144.py
LoopTilingBenchmark/benchmark
52a3d2e70216552a498fd91de02a2fa9cb62122c
[ "BSD-2-Clause" ]
null
null
null
from chill import * source('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/fdtd-2d/kernel.c') destination('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/experiments/fdtd-2d/tmp_files/144.c') procedure('kernel_fdtd_2d') loop(0) known(' nx > 1 ') known(' ny > 1 ') tile(1,2,128,2) tile(1,4,32,4) tile(2,2,128,2) tile(2,4,32,4) tile(3,2,128,2) tile(3,4,32,4)
22.944444
116
0.72155
from chill import * source('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/fdtd-2d/kernel.c') destination('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/experiments/fdtd-2d/tmp_files/144.c') procedure('kernel_fdtd_2d') loop(0) known(' nx > 1 ') known(' ny > 1 ') tile(1,2,128,2) tile(1,4,32,4) tile(2,2,128,2) tile(2,4,32,4) tile(3,2,128,2) tile(3,4,32,4)
true
true
f723cba44ec9074d909b91e8b3bb8d8e84df91e5
506
py
Python
tests/test_base.py
not-nexus/shelf
ea59703082402ad3b6454482f0487418295fbd19
[ "MIT" ]
4
2016-11-07T13:02:18.000Z
2019-09-03T02:04:05.000Z
tests/test_base.py
not-nexus/shelf
ea59703082402ad3b6454482f0487418295fbd19
[ "MIT" ]
21
2016-11-30T20:44:52.000Z
2017-05-02T15:38:56.000Z
tests/test_base.py
not-nexus/shelf
ea59703082402ad3b6454482f0487418295fbd19
[ "MIT" ]
2
2017-01-24T14:36:04.000Z
2020-01-13T16:10:05.000Z
import pyproctor class TestBase(pyproctor.TestBase): @classmethod def setUpClass(cls): """ This exists to make sure that no matter what, tests will log on stdout. Every call to basicConfig after this point will be a no-op """ # AGI-731 # See jira for more information # # https://github.com/gabrielfalcao/HTTPretty/issues/280 # # logging.basicConfig( # stream=sys.stdout # )
25.3
64
0.561265
import pyproctor class TestBase(pyproctor.TestBase): @classmethod def setUpClass(cls):
true
true
f723cc233a58a2785fb04f6b38b7dd40a29e256b
97
py
Python
tests/test_import.py
Quiltomics/indexd
95274d40f16de881492c2db70a969eb77c8f5e7c
[ "Apache-2.0" ]
2
2019-06-10T15:30:51.000Z
2020-01-18T23:24:13.000Z
tests/test_import.py
lookcrabs/indexd
646a7f336148496b07462ce3d3f8e930fa08a06c
[ "Apache-2.0" ]
15
2019-03-19T21:57:31.000Z
2021-08-11T21:01:33.000Z
tests/test_import.py
NCI-GDC/indexd
d159a82e7da100c807621bc41f2626dae64b4be9
[ "Apache-2.0" ]
1
2020-11-05T15:03:24.000Z
2020-11-05T15:03:24.000Z
def test_import_index(): ''' Try to import the indexd package. ''' import indexd
16.166667
37
0.608247
def test_import_index(): import indexd
true
true
f723ccb1d235fd916b20e25c9c162899696706eb
4,292
py
Python
tests/python/contrib/test_gemm_acc32_vnni.py
jiangzoi/incubator-tvm
144c6f45f7217b9df2f5605e06f0903e470ac11c
[ "Apache-2.0" ]
9
2019-12-17T08:03:54.000Z
2022-01-19T02:34:23.000Z
tests/python/contrib/test_gemm_acc32_vnni.py
jiangzoi/incubator-tvm
144c6f45f7217b9df2f5605e06f0903e470ac11c
[ "Apache-2.0" ]
2
2020-06-18T21:15:42.000Z
2020-06-24T17:38:37.000Z
tests/python/contrib/test_gemm_acc32_vnni.py
jiangzoi/incubator-tvm
144c6f45f7217b9df2f5605e06f0903e470ac11c
[ "Apache-2.0" ]
3
2020-10-04T20:30:18.000Z
2022-01-24T18:03:52.000Z
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=import-self, invalid-name, unused-argument, too-many-lines, len-as-condition import tvm from tvm import te import numpy as np from topi.x86.tensor_intrin import dot_16x1x16_uint8_int8_int32_cascadelake from topi.x86.tensor_intrin import dot_16x1x16_uint8_int8_int32 import pytest @pytest.mark.skip("skip because feature not enabled") def test_fc_int8_acc32(): m = 1024 n = 1024 k = 1024 X = te.placeholder((m, k), name='X', dtype="uint8") W = te.placeholder((n, k), name='W', dtype="int8") peak = 280 print("Peak {} Gops/s".format(peak)) memory_ops = m * k + n * k + 2 * m * n gops_per_mm = 2 * m * n * k # For LLVM < 8.0, it shows "'cascadelake' is not a recognized processor for this target # (ignoring processor)" error with the following setting. After LLVM 8.0 is enabled in the # test, we should use cascadelake setting. def verify(target="llvm -mcpu=cascadelake"): if not tvm.runtime.enabled(target): print("skip because %s is not enabled..." % target) return ctx = tvm.context(target, 0) pc = dot_16x1x16_uint8_int8_int32_cascadelake() ak = te.reduce_axis((0, k), name='k') packedW = te.placeholder( (n // 16, 16 * (k // 4), 4), name='packedW', dtype="int8") t_fc = te.compute((m, n), lambda i, j: te.sum(X[i, ak].astype( "int32") * packedW[j / 16, (ak / 4) * 16 + j % 16, ak % 4].astype("int32"), axis=ak), name="F") t_sch = te.create_schedule(t_fc.op) a_x, a_y = t_fc.op.axis a_k, = t_fc.op.reduce_axis a_yo, a_yi = t_sch[t_fc].split(a_y, factor=16) a_xo, a_xi = t_sch[t_fc].split(a_x, factor=32) a_ko, a_ki = t_sch[t_fc].split(a_k, factor=4) a_koo, a_koi = t_sch[t_fc].split(a_ko, factor=4) t_sch[t_fc].reorder(a_yo, a_xo, a_xi, a_koo, a_koi, a_yi, a_ki) t_sch[t_fc].unroll(a_koi) t_sch[t_fc].tensorize(a_yi, pc) t_func = tvm.build(t_sch, [X, packedW, t_fc], target, name="intrinsic") t_evaluator = t_func.time_evaluator(t_func.entry_name, ctx, number=10) # generate the plain data a_ = np.random.uniform(1, 10, size=(m, k)).astype("uint8") b_ = np.random.uniform(1, 10, size=(n, k)).astype("int8") packW = np.random.uniform(1, 10, size=( n // 16, 16 * (k // 4), 4)).astype("int8") # This occurs in pre_compute stage for r_idx in range(n // 16): for s_idx in range(16 * (k // 4)): for t_idx in range(4): packW[r_idx][s_idx][t_idx] = b_[r_idx * 16 + s_idx % 16][(s_idx // 16) * 4 + t_idx] x = tvm.nd.array(a_, ctx) w = tvm.nd.array(packW, ctx) y = tvm.nd.array(np.zeros((m, n), dtype="int32"), ctx) result = t_evaluator(x, w, y) gops_per_sec = gops_per_mm / result.mean / 1e9 # verify the correctness tvm.testing.assert_allclose(y.asnumpy(), np.dot(a_, b_.T), rtol=0) print('Tensorization: running time: {:.3f} ms, {:.2f} Gops/s, effiency: {:.2f}'.format( result.mean * 1000, gops_per_sec, gops_per_sec / peak)) t_func.export_library("tensorize_acc32.o") verify() if __name__ == "__main__": # The test requires Cascade Lake and newer Intel machines to generate the # correct AVX512 VNNI instruction. So, disabling the test. # test_fc_int8_acc32() pass
40.11215
107
0.627213
import tvm from tvm import te import numpy as np from topi.x86.tensor_intrin import dot_16x1x16_uint8_int8_int32_cascadelake from topi.x86.tensor_intrin import dot_16x1x16_uint8_int8_int32 import pytest @pytest.mark.skip("skip because feature not enabled") def test_fc_int8_acc32(): m = 1024 n = 1024 k = 1024 X = te.placeholder((m, k), name='X', dtype="uint8") W = te.placeholder((n, k), name='W', dtype="int8") peak = 280 print("Peak {} Gops/s".format(peak)) memory_ops = m * k + n * k + 2 * m * n gops_per_mm = 2 * m * n * k # (ignoring processor)" error with the following setting. After LLVM 8.0 is enabled in the def verify(target="llvm -mcpu=cascadelake"): if not tvm.runtime.enabled(target): print("skip because %s is not enabled..." % target) return ctx = tvm.context(target, 0) pc = dot_16x1x16_uint8_int8_int32_cascadelake() ak = te.reduce_axis((0, k), name='k') packedW = te.placeholder( (n // 16, 16 * (k // 4), 4), name='packedW', dtype="int8") t_fc = te.compute((m, n), lambda i, j: te.sum(X[i, ak].astype( "int32") * packedW[j / 16, (ak / 4) * 16 + j % 16, ak % 4].astype("int32"), axis=ak), name="F") t_sch = te.create_schedule(t_fc.op) a_x, a_y = t_fc.op.axis a_k, = t_fc.op.reduce_axis a_yo, a_yi = t_sch[t_fc].split(a_y, factor=16) a_xo, a_xi = t_sch[t_fc].split(a_x, factor=32) a_ko, a_ki = t_sch[t_fc].split(a_k, factor=4) a_koo, a_koi = t_sch[t_fc].split(a_ko, factor=4) t_sch[t_fc].reorder(a_yo, a_xo, a_xi, a_koo, a_koi, a_yi, a_ki) t_sch[t_fc].unroll(a_koi) t_sch[t_fc].tensorize(a_yi, pc) t_func = tvm.build(t_sch, [X, packedW, t_fc], target, name="intrinsic") t_evaluator = t_func.time_evaluator(t_func.entry_name, ctx, number=10) a_ = np.random.uniform(1, 10, size=(m, k)).astype("uint8") b_ = np.random.uniform(1, 10, size=(n, k)).astype("int8") packW = np.random.uniform(1, 10, size=( n // 16, 16 * (k // 4), 4)).astype("int8") for r_idx in range(n // 16): for s_idx in range(16 * (k // 4)): for t_idx in range(4): packW[r_idx][s_idx][t_idx] = b_[r_idx * 16 + s_idx % 16][(s_idx // 16) * 4 + t_idx] x = tvm.nd.array(a_, ctx) w = tvm.nd.array(packW, ctx) y = tvm.nd.array(np.zeros((m, n), dtype="int32"), ctx) result = t_evaluator(x, w, y) gops_per_sec = gops_per_mm / result.mean / 1e9 tvm.testing.assert_allclose(y.asnumpy(), np.dot(a_, b_.T), rtol=0) print('Tensorization: running time: {:.3f} ms, {:.2f} Gops/s, effiency: {:.2f}'.format( result.mean * 1000, gops_per_sec, gops_per_sec / peak)) t_func.export_library("tensorize_acc32.o") verify() if __name__ == "__main__": pass
true
true
f723ccc97ea70d87a17b104645eede99a1853ac3
2,675
py
Python
wings/api_client.py
KnowledgeCaptureAndDiscovery/wings-client
af1d068f4adc07d9060afa94dc99e0b2565be088
[ "Apache-2.0" ]
null
null
null
wings/api_client.py
KnowledgeCaptureAndDiscovery/wings-client
af1d068f4adc07d9060afa94dc99e0b2565be088
[ "Apache-2.0" ]
8
2019-07-28T17:04:38.000Z
2019-08-06T23:57:08.000Z
wings/api_client.py
KnowledgeCaptureAndDiscovery/wings-client
af1d068f4adc07d9060afa94dc99e0b2565be088
[ "Apache-2.0" ]
1
2019-07-29T22:53:41.000Z
2019-07-29T22:53:41.000Z
import atexit import importlib import logging import requests class ApiClient: def __init__(self, **kwargs): self.kwargs = kwargs self.session = requests.Session() self.libns = self.get_export_url() + "components/library.owl#" self.dcdom = self.get_export_url() + "data/ontology.owl#" self.dclib = self.get_export_url() + "data/library.owl#" self.xsdns = "http://www.w3.org/2001/XMLSchema#" self.topcls = "http://www.wings-workflows.org/ontology/component.owl#Component" if self.login(kwargs["password"]) is False: raise ValueError("Login failed") atexit.register(self.logout) def get_server(self): return self.server def get_username(self): return self.username def login(self, password): self.session.get(self.server + "/sparql") data = {"j_username": self.username, "j_password": password} response = self.session.post(self.server + "/j_security_check", data) if response.status_code == 403 or response.status_code == 200: return True return False def logout(self): self.session.get(self.server + "/jsp/login/logout.jsp") self.session.close() def session(self): return self.session def _initialize(self, name): try: module_ = importlib.import_module(".%s" % name, __package__) try: class_ = getattr(module_, name.title()) return class_(api_client=self) except AttributeError: logging.error("Class does not exist") except ImportError: logging.error("Module does not exist %s", name) def close(self): """ Shutdown sessions across all instantiated services """ self.logout() def __getattr__(self, attr): try: setattr(self, attr, self.kwargs[attr]) return getattr(self, attr) except KeyError: setattr(self, attr, self._initialize(attr)) return getattr(self, attr) def get_request_url(self): return self.server + "/users/" + self.username + "/" + self.domain + "/" def get_export_url(self): return ( self.export_url + "/export/users/" + self.username + "/" + self.domain + "/" ) @staticmethod def check_request(resp): try: resp.raise_for_status() except requests.exceptions.HTTPError: raise requests.exceptions.HTTPError except requests.exceptions.RequestException: raise requests.exceptions.RequestException return resp
31.104651
88
0.603738
import atexit import importlib import logging import requests class ApiClient: def __init__(self, **kwargs): self.kwargs = kwargs self.session = requests.Session() self.libns = self.get_export_url() + "components/library.owl#" self.dcdom = self.get_export_url() + "data/ontology.owl#" self.dclib = self.get_export_url() + "data/library.owl#" self.xsdns = "http://www.w3.org/2001/XMLSchema#" self.topcls = "http://www.wings-workflows.org/ontology/component.owl#Component" if self.login(kwargs["password"]) is False: raise ValueError("Login failed") atexit.register(self.logout) def get_server(self): return self.server def get_username(self): return self.username def login(self, password): self.session.get(self.server + "/sparql") data = {"j_username": self.username, "j_password": password} response = self.session.post(self.server + "/j_security_check", data) if response.status_code == 403 or response.status_code == 200: return True return False def logout(self): self.session.get(self.server + "/jsp/login/logout.jsp") self.session.close() def session(self): return self.session def _initialize(self, name): try: module_ = importlib.import_module(".%s" % name, __package__) try: class_ = getattr(module_, name.title()) return class_(api_client=self) except AttributeError: logging.error("Class does not exist") except ImportError: logging.error("Module does not exist %s", name) def close(self): self.logout() def __getattr__(self, attr): try: setattr(self, attr, self.kwargs[attr]) return getattr(self, attr) except KeyError: setattr(self, attr, self._initialize(attr)) return getattr(self, attr) def get_request_url(self): return self.server + "/users/" + self.username + "/" + self.domain + "/" def get_export_url(self): return ( self.export_url + "/export/users/" + self.username + "/" + self.domain + "/" ) @staticmethod def check_request(resp): try: resp.raise_for_status() except requests.exceptions.HTTPError: raise requests.exceptions.HTTPError except requests.exceptions.RequestException: raise requests.exceptions.RequestException return resp
true
true
f723ce701eeae1b756748f8b4615052fa8ad1b50
4,318
py
Python
ramp-database/ramp_database/tests/test_testing.py
agramfort/ramp-board
1c2cfe7af486e57ee0d4fb017b5266bb8ad152e3
[ "BSD-3-Clause" ]
null
null
null
ramp-database/ramp_database/tests/test_testing.py
agramfort/ramp-board
1c2cfe7af486e57ee0d4fb017b5266bb8ad152e3
[ "BSD-3-Clause" ]
null
null
null
ramp-database/ramp_database/tests/test_testing.py
agramfort/ramp-board
1c2cfe7af486e57ee0d4fb017b5266bb8ad152e3
[ "BSD-3-Clause" ]
null
null
null
import os import shutil import pytest from ramp_utils import read_config from ramp_utils import generate_ramp_config from ramp_utils.testing import database_config_template from ramp_utils.testing import ramp_config_template from ramp_database.utils import setup_db from ramp_database.utils import session_scope from ramp_database.model import Model from ramp_database.exceptions import NameClashError from ramp_database.tools.user import get_user_by_name from ramp_database.tools.event import get_problem from ramp_database.testing import create_test_db from ramp_database.testing import add_events from ramp_database.testing import add_users from ramp_database.testing import add_problems from ramp_database.testing import ramp_config_boston_housing from ramp_database.testing import ramp_config_iris from ramp_database.testing import setup_ramp_kit_ramp_data from ramp_database.testing import sign_up_teams_to_events from ramp_database.testing import submit_all_starting_kits @pytest.fixture(scope='module') def database_config(): return read_config(database_config_template()) @pytest.fixture(scope='module') def ramp_config(): return ramp_config_template() @pytest.fixture def session_scope_function(database_config, ramp_config): try: deployment_dir = create_test_db(database_config, ramp_config) with session_scope(database_config['sqlalchemy']) as session: yield session finally: shutil.rmtree(deployment_dir, ignore_errors=True) db, _ = setup_db(database_config['sqlalchemy']) Model.metadata.drop_all(db) def test_ramp_kit_ramp_data(session_scope_function, ramp_config): internal_ramp_config = generate_ramp_config(read_config(ramp_config)) setup_ramp_kit_ramp_data(internal_ramp_config, 'iris') msg_err = 'The RAMP kit repository was previously cloned.' with pytest.raises(ValueError, match=msg_err): setup_ramp_kit_ramp_data(internal_ramp_config, 'iris') # retrieve the path to the ramp kit to remove it shutil.rmtree(internal_ramp_config['ramp_kit_dir']) msg_err = 'The RAMP data repository was previously cloned.' with pytest.raises(ValueError, match=msg_err): setup_ramp_kit_ramp_data(internal_ramp_config, 'iris') setup_ramp_kit_ramp_data(internal_ramp_config, 'iris', force=True) def test_add_users(session_scope_function): add_users(session_scope_function) users = get_user_by_name(session_scope_function, None) for user in users: assert user.name in ('test_user', 'test_user_2', 'test_iris_admin') err_msg = 'username is already in use' with pytest.raises(NameClashError, match=err_msg): add_users(session_scope_function) def test_add_problems(session_scope_function): add_problems(session_scope_function) problems = get_problem(session_scope_function, None) for problem in problems: assert problem.name in ('iris', 'boston_housing') # trying to add twice the same problem will raise a git error since the # repositories already exist. msg_err = 'The RAMP kit repository was previously cloned.' with pytest.raises(ValueError, match=msg_err): add_problems(session_scope_function) def test_add_events(session_scope_function): add_problems(session_scope_function) add_events(session_scope_function) with pytest.raises(ValueError): add_events(session_scope_function) def test_sign_up_team_to_events(session_scope_function): add_users(session_scope_function) add_problems(session_scope_function) add_events(session_scope_function) sign_up_teams_to_events(session_scope_function) def test_submit_all_starting_kits(session_scope_function): add_users(session_scope_function) add_problems(session_scope_function) add_events(session_scope_function) sign_up_teams_to_events(session_scope_function) submit_all_starting_kits(session_scope_function) def test_ramp_config_iris(): filename = ramp_config_iris() assert os.path.join('tests', 'data', 'ramp_config_iris.yml') in filename def test_ramp_config_boston_housing(): filename = ramp_config_boston_housing() expected_path = os.path.join('tests', 'data', 'ramp_config_boston_housing.yml') assert expected_path in filename
34.822581
76
0.789717
import os import shutil import pytest from ramp_utils import read_config from ramp_utils import generate_ramp_config from ramp_utils.testing import database_config_template from ramp_utils.testing import ramp_config_template from ramp_database.utils import setup_db from ramp_database.utils import session_scope from ramp_database.model import Model from ramp_database.exceptions import NameClashError from ramp_database.tools.user import get_user_by_name from ramp_database.tools.event import get_problem from ramp_database.testing import create_test_db from ramp_database.testing import add_events from ramp_database.testing import add_users from ramp_database.testing import add_problems from ramp_database.testing import ramp_config_boston_housing from ramp_database.testing import ramp_config_iris from ramp_database.testing import setup_ramp_kit_ramp_data from ramp_database.testing import sign_up_teams_to_events from ramp_database.testing import submit_all_starting_kits @pytest.fixture(scope='module') def database_config(): return read_config(database_config_template()) @pytest.fixture(scope='module') def ramp_config(): return ramp_config_template() @pytest.fixture def session_scope_function(database_config, ramp_config): try: deployment_dir = create_test_db(database_config, ramp_config) with session_scope(database_config['sqlalchemy']) as session: yield session finally: shutil.rmtree(deployment_dir, ignore_errors=True) db, _ = setup_db(database_config['sqlalchemy']) Model.metadata.drop_all(db) def test_ramp_kit_ramp_data(session_scope_function, ramp_config): internal_ramp_config = generate_ramp_config(read_config(ramp_config)) setup_ramp_kit_ramp_data(internal_ramp_config, 'iris') msg_err = 'The RAMP kit repository was previously cloned.' with pytest.raises(ValueError, match=msg_err): setup_ramp_kit_ramp_data(internal_ramp_config, 'iris') shutil.rmtree(internal_ramp_config['ramp_kit_dir']) msg_err = 'The RAMP data repository was previously cloned.' with pytest.raises(ValueError, match=msg_err): setup_ramp_kit_ramp_data(internal_ramp_config, 'iris') setup_ramp_kit_ramp_data(internal_ramp_config, 'iris', force=True) def test_add_users(session_scope_function): add_users(session_scope_function) users = get_user_by_name(session_scope_function, None) for user in users: assert user.name in ('test_user', 'test_user_2', 'test_iris_admin') err_msg = 'username is already in use' with pytest.raises(NameClashError, match=err_msg): add_users(session_scope_function) def test_add_problems(session_scope_function): add_problems(session_scope_function) problems = get_problem(session_scope_function, None) for problem in problems: assert problem.name in ('iris', 'boston_housing') msg_err = 'The RAMP kit repository was previously cloned.' with pytest.raises(ValueError, match=msg_err): add_problems(session_scope_function) def test_add_events(session_scope_function): add_problems(session_scope_function) add_events(session_scope_function) with pytest.raises(ValueError): add_events(session_scope_function) def test_sign_up_team_to_events(session_scope_function): add_users(session_scope_function) add_problems(session_scope_function) add_events(session_scope_function) sign_up_teams_to_events(session_scope_function) def test_submit_all_starting_kits(session_scope_function): add_users(session_scope_function) add_problems(session_scope_function) add_events(session_scope_function) sign_up_teams_to_events(session_scope_function) submit_all_starting_kits(session_scope_function) def test_ramp_config_iris(): filename = ramp_config_iris() assert os.path.join('tests', 'data', 'ramp_config_iris.yml') in filename def test_ramp_config_boston_housing(): filename = ramp_config_boston_housing() expected_path = os.path.join('tests', 'data', 'ramp_config_boston_housing.yml') assert expected_path in filename
true
true
f723cf6dae464950f3d8b77edb889dcb5492b6ce
4,453
py
Python
python/ingestor/business.py
agahchen/RSBC-DataHub-API
d3742a09851d5753809e8eb8e1f7f6ca10b121ad
[ "Apache-2.0" ]
null
null
null
python/ingestor/business.py
agahchen/RSBC-DataHub-API
d3742a09851d5753809e8eb8e1f7f6ca10b121ad
[ "Apache-2.0" ]
null
null
null
python/ingestor/business.py
agahchen/RSBC-DataHub-API
d3742a09851d5753809e8eb8e1f7f6ca10b121ad
[ "Apache-2.0" ]
null
null
null
import python.common.middleware as middleware import python.common.actions as actions import python.common.rsi_email as rsi_email import python.common.rest as rest def get_available_time_slots() -> list: """ An application is ready for scheduling when all the payment rules are satisfied plus: - the application has been paid - the window to schedule the review has not elapsed """ return [ {"try": middleware.create_correlation_id, "fail": []}, {"try": middleware.determine_current_datetime, "fail": []}, {"try": middleware.clean_prohibition_number, "fail": []}, {"try": middleware.validate_prohibition_number, "fail": []}, {"try": middleware.get_vips_status, "fail": []}, {"try": middleware.prohibition_exists_in_vips, "fail": []}, {"try": middleware.user_submitted_last_name_matches_vips, "fail": []}, {"try": middleware.application_has_been_saved_to_vips, "fail": []}, {"try": middleware.get_payment_status, "fail": []}, {"try": middleware.received_valid_payment_status, "fail": []}, {"try": middleware.paid_not_more_than_24hrs_ago, "fail": []}, {"try": middleware.application_has_been_paid, "fail": []}, {"try": middleware.review_has_not_been_scheduled, "fail": []}, {"try": middleware.get_application_details, "fail": []}, {"try": middleware.valid_application_received_from_vips, "fail": []}, {"try": middleware.get_invoice_details, "fail": []}, {"try": middleware.calculate_schedule_window, "fail": []}, {"try": middleware.query_review_times_available, "fail": []}, {"try": middleware.does_applicant_have_enough_review_options, "fail": [ {"try": middleware.query_for_additional_review_times, "fail": []}, {"try": middleware.does_applicant_have_enough_review_options, "fail": [ {"try": rsi_email.insufficient_reviews_available, "fail": []}, ]} ]}, ] def ingest_form() -> list: return [ {"try": middleware.content_type_is_xml, "fail": [ {"try": rest.failed_validation, "fail": []}, ]}, {"try": middleware.content_length_within_bounds, "fail": [ {"try": rest.failed_validation, "fail": []}, ]}, {"try": middleware.form_name_provided, "fail": [ {"try": rest.failed_validation, "fail": []}, ]}, {"try": middleware.validate_form_name, "fail": [ {"try": rest.failed_validation, "fail": []}, ]}, {"try": middleware.add_encrypt_at_rest_attribute, "fail": []}, {"try": middleware.convert_xml_to_dictionary_object, "fail": [ {"try": rest.server_error, "fail": []}, ]}, {"try": middleware.get_xml_from_request, "fail": []}, {"try": middleware.base_64_encode_xml, "fail": []}, {"try": middleware.create_form_payload, "fail": []}, {"try": middleware.encode_payload, "fail": []}, {"try": middleware.get_queue_name_from_parameters, "fail": []}, {"try": actions.add_to_rabbitmq_queue, "fail": [ {"try": rest.server_error, "fail": []}, ]}, # Useful for debugging: {"try": rsi_email.send_form_xml_to_admin, "fail": []}, {"try": rest.okay, "fail": []} ] def is_okay_to_submit_evidence() -> list: """ Check to determine if an applicant can upload / submit evidence. To submit evidence an applicant must have: - submitted an application, - paid for a review, - scheduled a review date and - the review date must be at 24 hours in the future """ return [ {"try": middleware.create_correlation_id, "fail": []}, {"try": middleware.determine_current_datetime, "fail": []}, {"try": middleware.clean_prohibition_number, "fail": []}, {"try": middleware.validate_prohibition_number, "fail": []}, {"try": middleware.get_vips_status, "fail": []}, {"try": middleware.prohibition_exists_in_vips, "fail": []}, {"try": middleware.user_submitted_last_name_matches_vips, "fail": []}, {"try": middleware.application_has_been_saved_to_vips, "fail": []}, {"try": middleware.application_has_been_paid, "fail": []}, {"try": middleware.review_has_been_scheduled, "fail": []}, {"try": middleware.is_review_more_than_48_hours_in_the_future, "fail": []} ]
46.385417
89
0.614193
import python.common.middleware as middleware import python.common.actions as actions import python.common.rsi_email as rsi_email import python.common.rest as rest def get_available_time_slots() -> list: return [ {"try": middleware.create_correlation_id, "fail": []}, {"try": middleware.determine_current_datetime, "fail": []}, {"try": middleware.clean_prohibition_number, "fail": []}, {"try": middleware.validate_prohibition_number, "fail": []}, {"try": middleware.get_vips_status, "fail": []}, {"try": middleware.prohibition_exists_in_vips, "fail": []}, {"try": middleware.user_submitted_last_name_matches_vips, "fail": []}, {"try": middleware.application_has_been_saved_to_vips, "fail": []}, {"try": middleware.get_payment_status, "fail": []}, {"try": middleware.received_valid_payment_status, "fail": []}, {"try": middleware.paid_not_more_than_24hrs_ago, "fail": []}, {"try": middleware.application_has_been_paid, "fail": []}, {"try": middleware.review_has_not_been_scheduled, "fail": []}, {"try": middleware.get_application_details, "fail": []}, {"try": middleware.valid_application_received_from_vips, "fail": []}, {"try": middleware.get_invoice_details, "fail": []}, {"try": middleware.calculate_schedule_window, "fail": []}, {"try": middleware.query_review_times_available, "fail": []}, {"try": middleware.does_applicant_have_enough_review_options, "fail": [ {"try": middleware.query_for_additional_review_times, "fail": []}, {"try": middleware.does_applicant_have_enough_review_options, "fail": [ {"try": rsi_email.insufficient_reviews_available, "fail": []}, ]} ]}, ] def ingest_form() -> list: return [ {"try": middleware.content_type_is_xml, "fail": [ {"try": rest.failed_validation, "fail": []}, ]}, {"try": middleware.content_length_within_bounds, "fail": [ {"try": rest.failed_validation, "fail": []}, ]}, {"try": middleware.form_name_provided, "fail": [ {"try": rest.failed_validation, "fail": []}, ]}, {"try": middleware.validate_form_name, "fail": [ {"try": rest.failed_validation, "fail": []}, ]}, {"try": middleware.add_encrypt_at_rest_attribute, "fail": []}, {"try": middleware.convert_xml_to_dictionary_object, "fail": [ {"try": rest.server_error, "fail": []}, ]}, {"try": middleware.get_xml_from_request, "fail": []}, {"try": middleware.base_64_encode_xml, "fail": []}, {"try": middleware.create_form_payload, "fail": []}, {"try": middleware.encode_payload, "fail": []}, {"try": middleware.get_queue_name_from_parameters, "fail": []}, {"try": actions.add_to_rabbitmq_queue, "fail": [ {"try": rest.server_error, "fail": []}, ]}, {"try": rest.okay, "fail": []} ] def is_okay_to_submit_evidence() -> list: return [ {"try": middleware.create_correlation_id, "fail": []}, {"try": middleware.determine_current_datetime, "fail": []}, {"try": middleware.clean_prohibition_number, "fail": []}, {"try": middleware.validate_prohibition_number, "fail": []}, {"try": middleware.get_vips_status, "fail": []}, {"try": middleware.prohibition_exists_in_vips, "fail": []}, {"try": middleware.user_submitted_last_name_matches_vips, "fail": []}, {"try": middleware.application_has_been_saved_to_vips, "fail": []}, {"try": middleware.application_has_been_paid, "fail": []}, {"try": middleware.review_has_been_scheduled, "fail": []}, {"try": middleware.is_review_more_than_48_hours_in_the_future, "fail": []} ]
true
true
f723cf8e92377defaba93082120d82ab6f27e07b
343
py
Python
tests/test_models.py
devopsmakers/dj-prosftpd
fcef6b4a5b8b872c9830eaf0315f0467dd1944ff
[ "MIT" ]
null
null
null
tests/test_models.py
devopsmakers/dj-prosftpd
fcef6b4a5b8b872c9830eaf0315f0467dd1944ff
[ "MIT" ]
null
null
null
tests/test_models.py
devopsmakers/dj-prosftpd
fcef6b4a5b8b872c9830eaf0315f0467dd1944ff
[ "MIT" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- """ test_dj-prosftpd ------------ Tests for `dj-prosftpd` models module. """ from django.test import TestCase from dj_prosftpd import models class TestDj_prosftpd(TestCase): def setUp(self): pass def test_something(self): pass def tearDown(self): pass
13.192308
38
0.620991
from django.test import TestCase from dj_prosftpd import models class TestDj_prosftpd(TestCase): def setUp(self): pass def test_something(self): pass def tearDown(self): pass
true
true
f723cfc4543d31b1df927182b8dd2e714298c92e
14,427
py
Python
fermi_blind_search/database.py
giacomov/fermi_blind_search
f8d52cb8b61519223918d197682b4f70c78cce10
[ "BSD-3-Clause" ]
null
null
null
fermi_blind_search/database.py
giacomov/fermi_blind_search
f8d52cb8b61519223918d197682b4f70c78cce10
[ "BSD-3-Clause" ]
null
null
null
fermi_blind_search/database.py
giacomov/fermi_blind_search
f8d52cb8b61519223918d197682b4f70c78cce10
[ "BSD-3-Clause" ]
1
2017-04-01T10:42:07.000Z
2017-04-01T10:42:07.000Z
#!/usr/bin/env python from contextlib import contextmanager import argparse import sys import sshtunnel from sqlalchemy import * from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import sessionmaker from fermi_blind_search.configuration import get_config from fermi_blind_search import myLogging _logger = myLogging.log.getLogger("database") # will store the engine that will connect to the database _engine = None # we need this to handle the tables Base = declarative_base() # defines the class that will connect to the database Session = sessionmaker() @contextmanager def database_connection(config): if config.get("SSH db tunnel", "remote_host") != '': """ As of now, we are not using this in the real time search. Instead we are using an autossh connection to facilitate tunneling. However, we are keeping the code here in case an ssh tunnel needs to be established from a python script in the future. """ with sshtunnel.SSHTunnelForwarder(config.get("SSH db tunnel", "remote_host"), ssh_username=config.get("SSH db tunnel", "username"), host_pkey_directories=[ config.get("SSH db tunnel", "key_directory")], remote_bind_address=('127.0.0.1', int(config.get("SSH db tunnel", "tunnel_port"))), local_bind_address=('localhost', int(config.get('Real time', 'db_port'))), ): db_instance = Database(config) try: yield db_instance except: raise finally: db_instance.close() else: db_instance = Database(config) try: yield db_instance except: raise finally: db_instance.close() class Database(object): def __init__(self, config): global Base global Session global _engine # initialize the engine using parameters from the config file if config.get("Real time", "is_sqlite") == "True": engine_url = "sqlite:///" + config.get("Real time", "db_path") else: engine_url = config.get("Real time", "db_dialect") + "://" + config.get("Real time", "db_username") + ":" + \ config.get("Real time", "db_password") + "@" + config.get("Real time", "db_host") + ":" + \ config.get("Real time", "db_port") + "/" + config.get("Real time", "db_path") _logger.debug("Database engine URL: %s" % engine_url) _engine = create_engine(engine_url) # bind the engine to the Base Base.metadata.bind = _engine # bind the engine to the session Session.configure(bind=_engine) self._config = config def create_tables(self): # create the Analysis and Results tables Base.metadata.create_all(_engine) _logger.info("Successfully created database tables") def delete_analysis_table(self): # drop the table from the DB try: Analysis.__table__.drop() except: try: # another way to drop the table Analysis.__table__.drop(_engine) except: _logger.error('ERROR: Could not delete Analysis Table') raise else: _logger.info("Successfully deleted Analysis table") def delete_results_table(self): # drop the table from the DB try: Results.__table__.drop() except: try: # another way to drop the table Results.__table__.drop(_engine) except: _logger.error('ERROR: Could not delete Results Table') raise else: _logger.info("Successfully delete Results table") def add_analysis(self, analysis_vals): # TODO: which check that analysis_vals contains the correct field? # TODO: do we want to add a check that the analysis doesn't already exist? assert (analysis_vals['met_start'] is not None and analysis_vals['duration'] is not None and analysis_vals['counts'] is not None and analysis_vals['directory'] is not None), \ "One of the parameters to enter the analysis into the database is missing. Parameters are met_start, " \ "duration, counts, and directory" assert isinstance(analysis_vals["counts"], int), "Counts is not an integer" try: # set the values of the analysis to be added to the table new_analysis = Analysis(met_start=analysis_vals['met_start'], duration=analysis_vals['duration'], counts=analysis_vals['counts'], directory=analysis_vals['directory']) _logger.info("Adding this Analysis to the database: %s" % new_analysis) except KeyError: _logger.error('ERROR: The analysis you want to add does not have the proper fields!') raise except: raise else: # open a session, add the analysis to the table, close the session session = Session() session.add(new_analysis) try: session.commit() except: raise else: _logger.debug("Successfully added analysis to db") def update_analysis_counts(self, met_start, duration, new_counts): # open a session with the DB session = Session() # get the analysis to be updated results = session.query(Analysis).filter(Analysis.met_start == met_start).filter( Analysis.duration == duration).all() # check that there is only one analysis that matches these parameters assert len(results) != 0, "Cannot update this analysis because it does not exist" assert len(results) == 1, 'More than one analysis exists with these parameters! This should never happen' analysis = results[0] _logger.info("Updating this analysis: %s to have %s counts" % (analysis, new_counts)) # update the counts column of the analysis in question analysis.counts = new_counts try: # commit the change session.commit() except: raise else: _logger.debug("Successfully updated analysis") def add_candidate(self, candidate_vals): # TODO: which check that condidate_vals contains the correct field? # TODO: do we want to add a check that the candidate doesn't already exist? assert (candidate_vals['ra'] is not None and candidate_vals['dec'] is not None and candidate_vals['met_start'] is not None and candidate_vals['interval'] is not None and candidate_vals['email'] is not None), \ "One of the parameters to enter the candidate into the database is missing. Parameters are ra, dec, " \ "met_start, interval, email" try: # set the values of the result to be added to the table new_candidate = Results(ra=candidate_vals['ra'], dec=candidate_vals['dec'], met_start=candidate_vals['met_start'], interval=candidate_vals['interval'], email=candidate_vals['email']) _logger.info("Adding this result to the database %s" % new_candidate) except KeyError: _logger.error('ERROR: The result you want to add does not have the proper fields') raise except: raise else: # open a session, add the result to the table, close the session session = Session() session.add(new_candidate) try: session.commit() except: raise else: _logger.debug("Successfully added result to database") return new_candidate def get_analysis_between_times(self, start, stop): _logger.info("Fetching analyses using data between %s and %s" % (start, stop)) # open a session session = Session() # get all analyses with met_start or met_stop (met_start + duration) times within the range [start,stop] return session.query(Analysis).filter(or_(and_(Analysis.met_start >= start, Analysis.met_start <= stop), and_(Analysis.met_start + Analysis.duration >= start, Analysis.met_start + Analysis.duration <= stop))).all() def get_exact_analysis(self, start, stop): _logger.info("Fetching analysis with met_start = %s and met_start + duration = %s" % (start, stop)) # open a session session = Session() # get all analyses with start time and stop times exactly matching the parameters return session.query(Analysis).filter(and_(Analysis.met_start == start, Analysis.met_start + Analysis.duration == stop)).all() def get_results(self, candidate_vals): # check that candidate vals has the correct fields to perform a search assert (candidate_vals['ra'] is not None and candidate_vals['dec'] is not None and candidate_vals['met_start'] is not None and candidate_vals['interval'] is not None), \ "One of the parameters to enter the candidate into the database is missing. Parameters are ra, dec, " \ "met_start, interval" # open a session session = Session() # get the tolerance ranges for determining if we have a match ra_tol = float(self._config.get("Real time", "ra_tol")) dec_tol = float(self._config.get("Real time", "dec_tol")) start_tol = float(self._config.get("Real time", "start_tol")) int_tol = float(self._config.get("Real time", "int_tol")) _logger.info("Fetching results within %s of ra, %s of dec, %s of met_start, and %s of interval of %s" % (ra_tol, dec_tol, start_tol, int_tol, candidate_vals)) # get all results that match the passed candidate within a certain tolerance return session.query(Results).filter(and_(candidate_vals['ra'] - ra_tol <= Results.ra, Results.ra <= candidate_vals['ra'] + ra_tol, candidate_vals['dec'] - dec_tol <= Results.dec, Results.dec <= candidate_vals['dec'] + dec_tol, candidate_vals['met_start'] - start_tol <= Results.met_start, Results.met_start <= candidate_vals['met_start'] + start_tol, candidate_vals['interval'] - int_tol <= Results.interval, Results.interval <= candidate_vals['interval'] + int_tol)).all() def get_results_to_email(self): _logger.info("Fetching results with email = False (0 in database)") # open a session session = Session() # get all results that have not been emailed yet return session.query(Results).filter(Results.email == 0).all() def update_result_email(self, candidate, email_val=False): _logger.info("Updating result: %s to have email value: %s" % (candidate, email_val)) # open a session session = Session() # update the value of the candidate candidate.email = email_val try: # commit the change session.commit() except: raise else: _logger.debug("Successfully updated result") def close(self): global _logger _logger.info("Closing database") Session.close_all() class Analysis(Base): # give the table a name __tablename__ = 'analysis' # define the columns of the table met_start = Column(Float(32), Sequence('analysis_met_start_seq'), primary_key=True) duration = Column(Float(32), Sequence('analysis_duration_seq'), primary_key=True) counts = Column(Integer) directory = Column(String(250)) def __repr__(self): # formatting string so that printing rows from the table is more readable return "<Analysis(met_start= %s, duration= %s, counts= %s, directory= %s)>" % \ (self.met_start, self.duration, self.counts, self.directory) class Results(Base): # give the table a name __tablename__ = 'results' # define the columns of the table ra = Column(Float(32)) dec = Column(Float(32)) met_start = Column(Float(32), Sequence('results_met_start_seq'), primary_key=True) interval = Column(Float(32), Sequence('results_interval_seq'), primary_key=True) email = Column(Boolean) def __repr__(self): # formatting string so that printing rows from the table is more readable return "<Results(ra= %s, dec= %s, met_start= %s, interval= %s, email=%s)>" % (self.ra, self.dec, self.met_start, self.interval, self.email) if __name__ == "__main__": # Allows you to quickly delete and re-create the database. parser = argparse.ArgumentParser() parser.add_argument('--config', help='Path to config file', type=get_config, required=True) parser.add_argument('--clear', help="If set, delete the database tables, and recreate them", action="store_true") args = parser.parse_args() configuration = args.config # start db connection db = Database(configuration) if args.clear: # delete the tables db.delete_analysis_table() db.delete_results_table() # re-create the tables db.create_tables()
37.27907
121
0.582311
from contextlib import contextmanager import argparse import sys import sshtunnel from sqlalchemy import * from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import sessionmaker from fermi_blind_search.configuration import get_config from fermi_blind_search import myLogging _logger = myLogging.log.getLogger("database") _engine = None Base = declarative_base() Session = sessionmaker() @contextmanager def database_connection(config): if config.get("SSH db tunnel", "remote_host") != '': with sshtunnel.SSHTunnelForwarder(config.get("SSH db tunnel", "remote_host"), ssh_username=config.get("SSH db tunnel", "username"), host_pkey_directories=[ config.get("SSH db tunnel", "key_directory")], remote_bind_address=('127.0.0.1', int(config.get("SSH db tunnel", "tunnel_port"))), local_bind_address=('localhost', int(config.get('Real time', 'db_port'))), ): db_instance = Database(config) try: yield db_instance except: raise finally: db_instance.close() else: db_instance = Database(config) try: yield db_instance except: raise finally: db_instance.close() class Database(object): def __init__(self, config): global Base global Session global _engine if config.get("Real time", "is_sqlite") == "True": engine_url = "sqlite:///" + config.get("Real time", "db_path") else: engine_url = config.get("Real time", "db_dialect") + "://" + config.get("Real time", "db_username") + ":" + \ config.get("Real time", "db_password") + "@" + config.get("Real time", "db_host") + ":" + \ config.get("Real time", "db_port") + "/" + config.get("Real time", "db_path") _logger.debug("Database engine URL: %s" % engine_url) _engine = create_engine(engine_url) Base.metadata.bind = _engine Session.configure(bind=_engine) self._config = config def create_tables(self): Base.metadata.create_all(_engine) _logger.info("Successfully created database tables") def delete_analysis_table(self): try: Analysis.__table__.drop() except: try: Analysis.__table__.drop(_engine) except: _logger.error('ERROR: Could not delete Analysis Table') raise else: _logger.info("Successfully deleted Analysis table") def delete_results_table(self): try: Results.__table__.drop() except: try: Results.__table__.drop(_engine) except: _logger.error('ERROR: Could not delete Results Table') raise else: _logger.info("Successfully delete Results table") def add_analysis(self, analysis_vals): assert (analysis_vals['met_start'] is not None and analysis_vals['duration'] is not None and analysis_vals['counts'] is not None and analysis_vals['directory'] is not None), \ "One of the parameters to enter the analysis into the database is missing. Parameters are met_start, " \ "duration, counts, and directory" assert isinstance(analysis_vals["counts"], int), "Counts is not an integer" try: # set the values of the analysis to be added to the table new_analysis = Analysis(met_start=analysis_vals['met_start'], duration=analysis_vals['duration'], counts=analysis_vals['counts'], directory=analysis_vals['directory']) _logger.info("Adding this Analysis to the database: %s" % new_analysis) except KeyError: _logger.error('ERROR: The analysis you want to add does not have the proper fields!') raise except: raise else: # open a session, add the analysis to the table, close the session session = Session() session.add(new_analysis) try: session.commit() except: raise else: _logger.debug("Successfully added analysis to db") def update_analysis_counts(self, met_start, duration, new_counts): # open a session with the DB session = Session() # get the analysis to be updated results = session.query(Analysis).filter(Analysis.met_start == met_start).filter( Analysis.duration == duration).all() # check that there is only one analysis that matches these parameters assert len(results) != 0, "Cannot update this analysis because it does not exist" assert len(results) == 1, 'More than one analysis exists with these parameters! This should never happen' analysis = results[0] _logger.info("Updating this analysis: %s to have %s counts" % (analysis, new_counts)) # update the counts column of the analysis in question analysis.counts = new_counts try: # commit the change session.commit() except: raise else: _logger.debug("Successfully updated analysis") def add_candidate(self, candidate_vals): # TODO: which check that condidate_vals contains the correct field? # TODO: do we want to add a check that the candidate doesn't already exist? assert (candidate_vals['ra'] is not None and candidate_vals['dec'] is not None and candidate_vals['met_start'] is not None and candidate_vals['interval'] is not None and candidate_vals['email'] is not None), \ "One of the parameters to enter the candidate into the database is missing. Parameters are ra, dec, " \ "met_start, interval, email" try: new_candidate = Results(ra=candidate_vals['ra'], dec=candidate_vals['dec'], met_start=candidate_vals['met_start'], interval=candidate_vals['interval'], email=candidate_vals['email']) _logger.info("Adding this result to the database %s" % new_candidate) except KeyError: _logger.error('ERROR: The result you want to add does not have the proper fields') raise except: raise else: session = Session() session.add(new_candidate) try: session.commit() except: raise else: _logger.debug("Successfully added result to database") return new_candidate def get_analysis_between_times(self, start, stop): _logger.info("Fetching analyses using data between %s and %s" % (start, stop)) session = Session() return session.query(Analysis).filter(or_(and_(Analysis.met_start >= start, Analysis.met_start <= stop), and_(Analysis.met_start + Analysis.duration >= start, Analysis.met_start + Analysis.duration <= stop))).all() def get_exact_analysis(self, start, stop): _logger.info("Fetching analysis with met_start = %s and met_start + duration = %s" % (start, stop)) session = Session() return session.query(Analysis).filter(and_(Analysis.met_start == start, Analysis.met_start + Analysis.duration == stop)).all() def get_results(self, candidate_vals): assert (candidate_vals['ra'] is not None and candidate_vals['dec'] is not None and candidate_vals['met_start'] is not None and candidate_vals['interval'] is not None), \ "One of the parameters to enter the candidate into the database is missing. Parameters are ra, dec, " \ "met_start, interval" session = Session() ra_tol = float(self._config.get("Real time", "ra_tol")) dec_tol = float(self._config.get("Real time", "dec_tol")) start_tol = float(self._config.get("Real time", "start_tol")) int_tol = float(self._config.get("Real time", "int_tol")) _logger.info("Fetching results within %s of ra, %s of dec, %s of met_start, and %s of interval of %s" % (ra_tol, dec_tol, start_tol, int_tol, candidate_vals)) return session.query(Results).filter(and_(candidate_vals['ra'] - ra_tol <= Results.ra, Results.ra <= candidate_vals['ra'] + ra_tol, candidate_vals['dec'] - dec_tol <= Results.dec, Results.dec <= candidate_vals['dec'] + dec_tol, candidate_vals['met_start'] - start_tol <= Results.met_start, Results.met_start <= candidate_vals['met_start'] + start_tol, candidate_vals['interval'] - int_tol <= Results.interval, Results.interval <= candidate_vals['interval'] + int_tol)).all() def get_results_to_email(self): _logger.info("Fetching results with email = False (0 in database)") session = Session() return session.query(Results).filter(Results.email == 0).all() def update_result_email(self, candidate, email_val=False): _logger.info("Updating result: %s to have email value: %s" % (candidate, email_val)) session = Session() candidate.email = email_val try: session.commit() except: raise else: _logger.debug("Successfully updated result") def close(self): global _logger _logger.info("Closing database") Session.close_all() class Analysis(Base): __tablename__ = 'analysis' met_start = Column(Float(32), Sequence('analysis_met_start_seq'), primary_key=True) duration = Column(Float(32), Sequence('analysis_duration_seq'), primary_key=True) counts = Column(Integer) directory = Column(String(250)) def __repr__(self): return "<Analysis(met_start= %s, duration= %s, counts= %s, directory= %s)>" % \ (self.met_start, self.duration, self.counts, self.directory) class Results(Base): __tablename__ = 'results' ra = Column(Float(32)) dec = Column(Float(32)) met_start = Column(Float(32), Sequence('results_met_start_seq'), primary_key=True) interval = Column(Float(32), Sequence('results_interval_seq'), primary_key=True) email = Column(Boolean) def __repr__(self): return "<Results(ra= %s, dec= %s, met_start= %s, interval= %s, email=%s)>" % (self.ra, self.dec, self.met_start, self.interval, self.email) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('--config', help='Path to config file', type=get_config, required=True) parser.add_argument('--clear', help="If set, delete the database tables, and recreate them", action="store_true") args = parser.parse_args() configuration = args.config db = Database(configuration) if args.clear: db.delete_analysis_table() db.delete_results_table() db.create_tables()
true
true
f723cfd084712344629373ab39822ddba59ccffa
2,538
py
Python
irrigator_pro/farms/migrations/0012_copy_field_list_to_field_for_waterhistory_and_probe.py
warnes/irrigatorpro
4838f8832bdbf87f394a0298adc5dabfc26e82e8
[ "MIT" ]
null
null
null
irrigator_pro/farms/migrations/0012_copy_field_list_to_field_for_waterhistory_and_probe.py
warnes/irrigatorpro
4838f8832bdbf87f394a0298adc5dabfc26e82e8
[ "MIT" ]
null
null
null
irrigator_pro/farms/migrations/0012_copy_field_list_to_field_for_waterhistory_and_probe.py
warnes/irrigatorpro
4838f8832bdbf87f394a0298adc5dabfc26e82e8
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import django.utils.timezone import sys from django.db import connection def convert_probe_field_list_to_field(apps, schema_editor): """ For each current Probe, copy field_list[0] into field, and create new records for field_list[1:] ** For some reason, probe.field_list.all() doesn't work here, so use direct SQL instead. ** """ Probe = apps.get_model("farms", "Probe") cursor = connection.cursor() cursor.execute("select * from farms_probe_field_list") previous_id = -1 for (id, probe_id, field_id) in cursor.fetchall(): #print "Working on record #%d: Probe #%d, Field #%d" %(id, probe_id, field_id) """ If there are multiple fields assigned just preserve the first one otherwise a unique constraing on the pair (cdop_season_id, radio_id) will be violated. This is necessary only for Alain's test server. """ if probe_id != previous_id: previous_id = probe_id probe = Probe.objects.get(id=probe_id) if probe.field_id is None: probe.field_id = field_id else: probe.id = None probe.pk = None probe.field_id = field_id probe.save() def convert_waterhistory_field_list_to_field(apps, schema_editor): """ For each current WaterHistory object, copy field_list[0] into field, and create new records for field_list[1:] ** For some reason, waterhistory.field_list.all() doesn't work here, so use direct SQL instead. ** """ WaterHistory = apps.get_model("farms", "WaterHistory") cursor = connection.cursor() cursor.execute("select * from farms_waterhistory_field_list") for (id, waterhistory_id, field_id) in cursor.fetchall(): #print "Working on ", (id, waterhistory_id, field_id) waterhistory = WaterHistory.objects.get(id=waterhistory_id) if waterhistory.field_id is None: waterhistory.field_id = field_id else: waterhistory.id = None waterhistory.pk = None waterhistory.field_id = field_id waterhistory.save() class Migration(migrations.Migration): dependencies = [ ('farms', '0011_add_ForeignKey_for_field_to_waterhistory_and_probe'), ] operations = [ migrations.RunPython( convert_probe_field_list_to_field ), migrations.RunPython( convert_waterhistory_field_list_to_field), ]
29.511628
84
0.670607
from __future__ import unicode_literals from django.db import models, migrations import django.utils.timezone import sys from django.db import connection def convert_probe_field_list_to_field(apps, schema_editor): Probe = apps.get_model("farms", "Probe") cursor = connection.cursor() cursor.execute("select * from farms_probe_field_list") previous_id = -1 for (id, probe_id, field_id) in cursor.fetchall(): if probe_id != previous_id: previous_id = probe_id probe = Probe.objects.get(id=probe_id) if probe.field_id is None: probe.field_id = field_id else: probe.id = None probe.pk = None probe.field_id = field_id probe.save() def convert_waterhistory_field_list_to_field(apps, schema_editor): WaterHistory = apps.get_model("farms", "WaterHistory") cursor = connection.cursor() cursor.execute("select * from farms_waterhistory_field_list") for (id, waterhistory_id, field_id) in cursor.fetchall(): waterhistory = WaterHistory.objects.get(id=waterhistory_id) if waterhistory.field_id is None: waterhistory.field_id = field_id else: waterhistory.id = None waterhistory.pk = None waterhistory.field_id = field_id waterhistory.save() class Migration(migrations.Migration): dependencies = [ ('farms', '0011_add_ForeignKey_for_field_to_waterhistory_and_probe'), ] operations = [ migrations.RunPython( convert_probe_field_list_to_field ), migrations.RunPython( convert_waterhistory_field_list_to_field), ]
true
true
f723d086e096878de8624880928432132d608eb2
7,000
py
Python
rllib/agents/marwil/tests/test_marwil.py
kifarid/ray
43c97c2afb979987be82fa50048674e9b6776d5d
[ "Apache-2.0" ]
3
2021-08-29T20:41:21.000Z
2022-01-31T18:47:51.000Z
rllib/agents/marwil/tests/test_marwil.py
kifarid/ray
43c97c2afb979987be82fa50048674e9b6776d5d
[ "Apache-2.0" ]
61
2021-06-05T07:05:08.000Z
2022-03-19T07:14:56.000Z
rllib/agents/marwil/tests/test_marwil.py
kifarid/ray
43c97c2afb979987be82fa50048674e9b6776d5d
[ "Apache-2.0" ]
null
null
null
import numpy as np import os from pathlib import Path import unittest import ray import ray.rllib.agents.marwil as marwil from ray.rllib.evaluation.postprocessing import compute_advantages from ray.rllib.offline import JsonReader from ray.rllib.utils.framework import try_import_tf, try_import_torch from ray.rllib.utils.test_utils import check, check_compute_single_action, \ framework_iterator tf1, tf, tfv = try_import_tf() torch, _ = try_import_torch() class TestMARWIL(unittest.TestCase): @classmethod def setUpClass(cls): ray.init(num_cpus=4) @classmethod def tearDownClass(cls): ray.shutdown() def test_marwil_compilation_and_learning_from_offline_file(self): """Test whether a MARWILTrainer can be built with all frameworks. Learns from a historic-data file. To generate this data, first run: $ ./train.py --run=PPO --env=CartPole-v0 \ --stop='{"timesteps_total": 50000}' \ --config='{"output": "/tmp/out", "batch_mode": "complete_episodes"}' """ rllib_dir = Path(__file__).parent.parent.parent.parent print("rllib dir={}".format(rllib_dir)) data_file = os.path.join(rllib_dir, "tests/data/cartpole/large.json") print("data_file={} exists={}".format(data_file, os.path.isfile(data_file))) config = marwil.DEFAULT_CONFIG.copy() config["num_workers"] = 2 config["evaluation_num_workers"] = 1 config["evaluation_interval"] = 2 # Evaluate on actual environment. config["evaluation_config"] = {"input": "sampler"} # Learn from offline data. config["input"] = [data_file] num_iterations = 350 min_reward = 70.0 # Test for all frameworks. for _ in framework_iterator(config, frameworks=("tf", "torch")): trainer = marwil.MARWILTrainer(config=config, env="CartPole-v0") learnt = False for i in range(num_iterations): eval_results = trainer.train().get("evaluation") if eval_results: print("iter={} R={} ".format( i, eval_results["episode_reward_mean"])) # Learn until some reward is reached on an actual live env. if eval_results["episode_reward_mean"] > min_reward: print("learnt!") learnt = True break if not learnt: raise ValueError( "MARWILTrainer did not reach {} reward from expert " "offline data!".format(min_reward)) check_compute_single_action( trainer, include_prev_action_reward=True) trainer.stop() def test_marwil_loss_function(self): """ To generate the historic data used in this test case, first run: $ ./train.py --run=PPO --env=CartPole-v0 \ --stop='{"timesteps_total": 50000}' \ --config='{"output": "/tmp/out", "batch_mode": "complete_episodes"}' """ rllib_dir = Path(__file__).parent.parent.parent.parent print("rllib dir={}".format(rllib_dir)) data_file = os.path.join(rllib_dir, "tests/data/cartpole/small.json") print("data_file={} exists={}".format(data_file, os.path.isfile(data_file))) config = marwil.DEFAULT_CONFIG.copy() config["num_workers"] = 0 # Run locally. # Learn from offline data. config["input"] = [data_file] for fw, sess in framework_iterator(config, session=True): reader = JsonReader(inputs=[data_file]) batch = reader.next() trainer = marwil.MARWILTrainer(config=config, env="CartPole-v0") policy = trainer.get_policy() model = policy.model # Calculate our own expected values (to then compare against the # agent's loss output). cummulative_rewards = compute_advantages( batch, 0.0, config["gamma"], 1.0, False, False)["advantages"] if fw == "torch": cummulative_rewards = torch.tensor(cummulative_rewards) if fw != "tf": batch = policy._lazy_tensor_dict(batch) model_out, _ = model.from_batch(batch) vf_estimates = model.value_function() if fw == "tf": model_out, vf_estimates = \ policy.get_session().run([model_out, vf_estimates]) adv = cummulative_rewards - vf_estimates if fw == "torch": adv = adv.detach().cpu().numpy() adv_squared = np.mean(np.square(adv)) c_2 = 100.0 + 1e-8 * (adv_squared - 100.0) c = np.sqrt(c_2) exp_advs = np.exp(config["beta"] * (adv / c)) dist = policy.dist_class(model_out, model) logp = dist.logp(batch["actions"]) if fw == "torch": logp = logp.detach().cpu().numpy() elif fw == "tf": logp = sess.run(logp) # Calculate all expected loss components. expected_vf_loss = 0.5 * adv_squared expected_pol_loss = -1.0 * np.mean(exp_advs * logp) expected_loss = \ expected_pol_loss + config["vf_coeff"] * expected_vf_loss # Calculate the algorithm's loss (to check against our own # calculation above). batch.set_get_interceptor(None) postprocessed_batch = policy.postprocess_trajectory(batch) loss_func = marwil.marwil_tf_policy.marwil_loss if fw != "torch" \ else marwil.marwil_torch_policy.marwil_loss if fw != "tf": policy._lazy_tensor_dict(postprocessed_batch) loss_out = loss_func(policy, model, policy.dist_class, postprocessed_batch) else: loss_out, v_loss, p_loss = policy.get_session().run( [policy._loss, policy.loss.v_loss, policy.loss.p_loss], feed_dict=policy._get_loss_inputs_dict( postprocessed_batch, shuffle=False)) # Check all components. if fw == "torch": check(policy.v_loss, expected_vf_loss, decimals=4) check(policy.p_loss, expected_pol_loss, decimals=4) elif fw == "tf": check(v_loss, expected_vf_loss, decimals=4) check(p_loss, expected_pol_loss, decimals=4) else: check(policy.loss.v_loss, expected_vf_loss, decimals=4) check(policy.loss.p_loss, expected_pol_loss, decimals=4) check(loss_out, expected_loss, decimals=3) if __name__ == "__main__": import pytest import sys sys.exit(pytest.main(["-v", __file__]))
41.666667
79
0.576857
import numpy as np import os from pathlib import Path import unittest import ray import ray.rllib.agents.marwil as marwil from ray.rllib.evaluation.postprocessing import compute_advantages from ray.rllib.offline import JsonReader from ray.rllib.utils.framework import try_import_tf, try_import_torch from ray.rllib.utils.test_utils import check, check_compute_single_action, \ framework_iterator tf1, tf, tfv = try_import_tf() torch, _ = try_import_torch() class TestMARWIL(unittest.TestCase): @classmethod def setUpClass(cls): ray.init(num_cpus=4) @classmethod def tearDownClass(cls): ray.shutdown() def test_marwil_compilation_and_learning_from_offline_file(self): rllib_dir = Path(__file__).parent.parent.parent.parent print("rllib dir={}".format(rllib_dir)) data_file = os.path.join(rllib_dir, "tests/data/cartpole/large.json") print("data_file={} exists={}".format(data_file, os.path.isfile(data_file))) config = marwil.DEFAULT_CONFIG.copy() config["num_workers"] = 2 config["evaluation_num_workers"] = 1 config["evaluation_interval"] = 2 config["evaluation_config"] = {"input": "sampler"} config["input"] = [data_file] num_iterations = 350 min_reward = 70.0 for _ in framework_iterator(config, frameworks=("tf", "torch")): trainer = marwil.MARWILTrainer(config=config, env="CartPole-v0") learnt = False for i in range(num_iterations): eval_results = trainer.train().get("evaluation") if eval_results: print("iter={} R={} ".format( i, eval_results["episode_reward_mean"])) if eval_results["episode_reward_mean"] > min_reward: print("learnt!") learnt = True break if not learnt: raise ValueError( "MARWILTrainer did not reach {} reward from expert " "offline data!".format(min_reward)) check_compute_single_action( trainer, include_prev_action_reward=True) trainer.stop() def test_marwil_loss_function(self): rllib_dir = Path(__file__).parent.parent.parent.parent print("rllib dir={}".format(rllib_dir)) data_file = os.path.join(rllib_dir, "tests/data/cartpole/small.json") print("data_file={} exists={}".format(data_file, os.path.isfile(data_file))) config = marwil.DEFAULT_CONFIG.copy() config["num_workers"] = 0 config["input"] = [data_file] for fw, sess in framework_iterator(config, session=True): reader = JsonReader(inputs=[data_file]) batch = reader.next() trainer = marwil.MARWILTrainer(config=config, env="CartPole-v0") policy = trainer.get_policy() model = policy.model cummulative_rewards = compute_advantages( batch, 0.0, config["gamma"], 1.0, False, False)["advantages"] if fw == "torch": cummulative_rewards = torch.tensor(cummulative_rewards) if fw != "tf": batch = policy._lazy_tensor_dict(batch) model_out, _ = model.from_batch(batch) vf_estimates = model.value_function() if fw == "tf": model_out, vf_estimates = \ policy.get_session().run([model_out, vf_estimates]) adv = cummulative_rewards - vf_estimates if fw == "torch": adv = adv.detach().cpu().numpy() adv_squared = np.mean(np.square(adv)) c_2 = 100.0 + 1e-8 * (adv_squared - 100.0) c = np.sqrt(c_2) exp_advs = np.exp(config["beta"] * (adv / c)) dist = policy.dist_class(model_out, model) logp = dist.logp(batch["actions"]) if fw == "torch": logp = logp.detach().cpu().numpy() elif fw == "tf": logp = sess.run(logp) # Calculate all expected loss components. expected_vf_loss = 0.5 * adv_squared expected_pol_loss = -1.0 * np.mean(exp_advs * logp) expected_loss = \ expected_pol_loss + config["vf_coeff"] * expected_vf_loss # Calculate the algorithm's loss (to check against our own batch.set_get_interceptor(None) postprocessed_batch = policy.postprocess_trajectory(batch) loss_func = marwil.marwil_tf_policy.marwil_loss if fw != "torch" \ else marwil.marwil_torch_policy.marwil_loss if fw != "tf": policy._lazy_tensor_dict(postprocessed_batch) loss_out = loss_func(policy, model, policy.dist_class, postprocessed_batch) else: loss_out, v_loss, p_loss = policy.get_session().run( [policy._loss, policy.loss.v_loss, policy.loss.p_loss], feed_dict=policy._get_loss_inputs_dict( postprocessed_batch, shuffle=False)) if fw == "torch": check(policy.v_loss, expected_vf_loss, decimals=4) check(policy.p_loss, expected_pol_loss, decimals=4) elif fw == "tf": check(v_loss, expected_vf_loss, decimals=4) check(p_loss, expected_pol_loss, decimals=4) else: check(policy.loss.v_loss, expected_vf_loss, decimals=4) check(policy.loss.p_loss, expected_pol_loss, decimals=4) check(loss_out, expected_loss, decimals=3) if __name__ == "__main__": import pytest import sys sys.exit(pytest.main(["-v", __file__]))
true
true
f723d0fdbd1846134fc03f67f0123e5aedd5f7e6
1,938
py
Python
config.py
LucaMalavolta/q2
d4cd62c3ea898c99334ea84e2b41ec75db9558f7
[ "BSD-2-Clause" ]
null
null
null
config.py
LucaMalavolta/q2
d4cd62c3ea898c99334ea84e2b41ec75db9558f7
[ "BSD-2-Clause" ]
null
null
null
config.py
LucaMalavolta/q2
d4cd62c3ea898c99334ea84e2b41ec75db9558f7
[ "BSD-2-Clause" ]
null
null
null
import os import logging import matplotlib.pyplot as plt logger = logging.getLogger(__name__) path = os.path.dirname(os.path.realpath(__file__)) path = os.path.join(path, 'Data') COLORTEFF_PATH = os.path.join(path, 'ColorTeff') MODATM_PATH = os.path.join(path, 'ModelAtmospheres') ISOCHRONES_PATH = os.path.join(path, 'Isochrones') OTHER_PATH = os.path.join(path, 'Other') plt.rc("font", family='serif', serif='Ubuntu', monospace='Ubuntu Mono', \ size=14) plt.rc("axes", labelsize=15, titlesize=12) plt.rc("xtick", top=True, direction='in', labelsize=14) plt.rc("xtick.major", size=8, width=1) plt.rc("ytick", right=True, direction='in', labelsize=14) plt.rc("ytick.major", size=8, width=1) plt.rc("lines", markersize=10, markeredgewidth=2) plt.rc("lines", linewidth=3) def moog_is_available(): """You should be able to run MOOGSILENT from the command line in order to use the MOOG features included in q2. This function checks if MOOG is available on your system. If False, you wont be able to connect q2 to MOOG and many things will fail. """ if os.system('which MOOGSILENT >/dev/null'): logger.warning("MOOGSILENT is not available") return False else: logger.info("MOOGSILENT is available") return True def data_are_available(): """q2 needs data files with model atmosphere and isochrone grids. These files can be downloaded from: http://www.astrochasqui.com/projects/astro/share/q2Data.tar.gz They need to be extracted inside the q2 directory. 'tar xvfz q2Data.tar.gz' will create the Data folder. """ if os.path.exists(path): logger.info("Data folder exists") return True else: logger.warning("Data folder does not exist. See the 'Data' section "\ "at https://github.com/astroChasqui/q2") return False
36.566038
78
0.663055
import os import logging import matplotlib.pyplot as plt logger = logging.getLogger(__name__) path = os.path.dirname(os.path.realpath(__file__)) path = os.path.join(path, 'Data') COLORTEFF_PATH = os.path.join(path, 'ColorTeff') MODATM_PATH = os.path.join(path, 'ModelAtmospheres') ISOCHRONES_PATH = os.path.join(path, 'Isochrones') OTHER_PATH = os.path.join(path, 'Other') plt.rc("font", family='serif', serif='Ubuntu', monospace='Ubuntu Mono', \ size=14) plt.rc("axes", labelsize=15, titlesize=12) plt.rc("xtick", top=True, direction='in', labelsize=14) plt.rc("xtick.major", size=8, width=1) plt.rc("ytick", right=True, direction='in', labelsize=14) plt.rc("ytick.major", size=8, width=1) plt.rc("lines", markersize=10, markeredgewidth=2) plt.rc("lines", linewidth=3) def moog_is_available(): if os.system('which MOOGSILENT >/dev/null'): logger.warning("MOOGSILENT is not available") return False else: logger.info("MOOGSILENT is available") return True def data_are_available(): if os.path.exists(path): logger.info("Data folder exists") return True else: logger.warning("Data folder does not exist. See the 'Data' section "\ "at https://github.com/astroChasqui/q2") return False
true
true
f723d13c5a9085305d4224504015e2a6d6c0eef1
1,341
py
Python
concepts/recursion/fibonacci_sesies.py
dnootana/Python
2881bafe8bc378fa3cae50a747fcea1a55630c63
[ "MIT" ]
1
2021-02-19T11:00:11.000Z
2021-02-19T11:00:11.000Z
concepts/recursion/fibonacci_sesies.py
dnootana/Python
2881bafe8bc378fa3cae50a747fcea1a55630c63
[ "MIT" ]
null
null
null
concepts/recursion/fibonacci_sesies.py
dnootana/Python
2881bafe8bc378fa3cae50a747fcea1a55630c63
[ "MIT" ]
null
null
null
#!/usr/bin/env python3.8 """ given a number N generate N no of fibonacci numbers """ from memoize import memoize number = 25 fib_list = [None] * (number) def fibonacci(num): """ fibonacci series using iteration """ a, b = 0, 1 for i in range(num-1): fib_list[i], fib_list[i+1] = a, b a, b = b, a+b return a fibonacci(number) print("fibonacci series using iteration : ", fib_list) fib_list = [None] * (number) def fibonacci_a(num): fib_list[0:2] = [0,1] for i in range(2,num): fib_list[i] = fib_list[i-2] + fib_list[i-1] fibonacci_a(number) print("fibonacci series using iteration : ", fib_list) fib_list = [None] * (number) def fibonacci1(num): """ fibonacci series using recursion """ global fib_list if fib_list[num] is None: if num == 0 or num == 1: fib_list[num] = num else: fib_list[num] = fibonacci1(num-1) + fibonacci1(num-2) return fib_list[num] fibonacci1(number-1) print("fibonacci series using recursion : ",fib_list) fib_list = [None] * (number) def fibonacci2(num): """ fibonacci series using recursion with memoization """ global fib_list def fib(N): if N==0 or N==1: return N else: return fib(N-1) + fib(N-2) fib = memoize(fib) for i in range(num): fib_list[i] = fib(i) fibonacci2(number) print("fibonacci series using recursion with memoization : ", fib_list)
19.434783
71
0.671141
from memoize import memoize number = 25 fib_list = [None] * (number) def fibonacci(num): a, b = 0, 1 for i in range(num-1): fib_list[i], fib_list[i+1] = a, b a, b = b, a+b return a fibonacci(number) print("fibonacci series using iteration : ", fib_list) fib_list = [None] * (number) def fibonacci_a(num): fib_list[0:2] = [0,1] for i in range(2,num): fib_list[i] = fib_list[i-2] + fib_list[i-1] fibonacci_a(number) print("fibonacci series using iteration : ", fib_list) fib_list = [None] * (number) def fibonacci1(num): global fib_list if fib_list[num] is None: if num == 0 or num == 1: fib_list[num] = num else: fib_list[num] = fibonacci1(num-1) + fibonacci1(num-2) return fib_list[num] fibonacci1(number-1) print("fibonacci series using recursion : ",fib_list) fib_list = [None] * (number) def fibonacci2(num): global fib_list def fib(N): if N==0 or N==1: return N else: return fib(N-1) + fib(N-2) fib = memoize(fib) for i in range(num): fib_list[i] = fib(i) fibonacci2(number) print("fibonacci series using recursion with memoization : ", fib_list)
true
true
f723d38041e06dcc3fe6b444f4b546e4e28b9d49
754
py
Python
pydatastructs/linear_data_structures/__init__.py
hpnightowl/pydatastructs
ec69ef887fee200390bff41ab6859a4ab0b26fbf
[ "BSD-3-Clause" ]
null
null
null
pydatastructs/linear_data_structures/__init__.py
hpnightowl/pydatastructs
ec69ef887fee200390bff41ab6859a4ab0b26fbf
[ "BSD-3-Clause" ]
null
null
null
pydatastructs/linear_data_structures/__init__.py
hpnightowl/pydatastructs
ec69ef887fee200390bff41ab6859a4ab0b26fbf
[ "BSD-3-Clause" ]
1
2021-02-05T04:49:55.000Z
2021-02-05T04:49:55.000Z
__all__ = [] from pydatastructs.linear_data_structures import arrays, linked_lists, algorithms from pydatastructs.linear_data_structures.arrays import OneDimensionalArray, DynamicOneDimensionalArray, \ MultiDimensionalArray from pydatastructs.linear_data_structures.algorithms import merge_sort_parallel, brick_sort, brick_sort_parallel, \ heapsort, matrix_multiply_parallel, counting_sort, bucket_sort, cocktail_shaker_sort, quick_sort, \ longest_common_subsequence from pydatastructs.linear_data_structures.linked_lists import SinglyLinkedList, DoublyLinkedList, \ SinglyCircularLinkedList, DoublyCircularLinkedList, SkipList __all__.extend(arrays.__all__) __all__.extend(linked_lists.__all__) __all__.extend(algorithms.__all__)
37.7
115
0.855438
__all__ = [] from pydatastructs.linear_data_structures import arrays, linked_lists, algorithms from pydatastructs.linear_data_structures.arrays import OneDimensionalArray, DynamicOneDimensionalArray, \ MultiDimensionalArray from pydatastructs.linear_data_structures.algorithms import merge_sort_parallel, brick_sort, brick_sort_parallel, \ heapsort, matrix_multiply_parallel, counting_sort, bucket_sort, cocktail_shaker_sort, quick_sort, \ longest_common_subsequence from pydatastructs.linear_data_structures.linked_lists import SinglyLinkedList, DoublyLinkedList, \ SinglyCircularLinkedList, DoublyCircularLinkedList, SkipList __all__.extend(arrays.__all__) __all__.extend(linked_lists.__all__) __all__.extend(algorithms.__all__)
true
true
f723d3bddf9b5128923113de82df064942d27440
391
py
Python
Epitome/wsgi.py
pbout/ept
7da64e606c9c163ffc8285cd8c77288807f4f477
[ "RSA-MD" ]
null
null
null
Epitome/wsgi.py
pbout/ept
7da64e606c9c163ffc8285cd8c77288807f4f477
[ "RSA-MD" ]
null
null
null
Epitome/wsgi.py
pbout/ept
7da64e606c9c163ffc8285cd8c77288807f4f477
[ "RSA-MD" ]
null
null
null
""" WSGI config for Epitome project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Epitome.settings") application = get_wsgi_application()
23
78
0.785166
import os from django.core.wsgi import get_wsgi_application os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Epitome.settings") application = get_wsgi_application()
true
true
f723d4daf4d337355a5c29418d6ad94c37865a78
3,109
py
Python
meiduotest/meiduotest/settings.py
littrell0/meiduo_project_test
3d2f6a6e528e6c45a5ffc1db0ddc5de9b4e52bf8
[ "MIT" ]
null
null
null
meiduotest/meiduotest/settings.py
littrell0/meiduo_project_test
3d2f6a6e528e6c45a5ffc1db0ddc5de9b4e52bf8
[ "MIT" ]
null
null
null
meiduotest/meiduotest/settings.py
littrell0/meiduo_project_test
3d2f6a6e528e6c45a5ffc1db0ddc5de9b4e52bf8
[ "MIT" ]
null
null
null
""" Django settings for meiduotest project. Generated by 'django-admin startproject' using Django 1.11.11. For more information on this file, see https://docs.djangoproject.com/en/1.11/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.11/ref/settings/ """ import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '8zj*1u%iy2m+d0hg$@#5d(nrr5_-2u))cqkf^_3wj&f3ayr)o=' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'meiduotest.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'meiduotest.wsgi.application' # Database # https://docs.djangoproject.com/en/1.11/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/1.11/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.11/howto/static-files/ STATIC_URL = '/static/'
25.694215
91
0.69733
import os BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) SECRET_KEY = '8zj*1u%iy2m+d0hg$@#5d(nrr5_-2u))cqkf^_3wj&f3ayr)o=' DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'meiduotest.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'meiduotest.wsgi.application' # Database # https://docs.djangoproject.com/en/1.11/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/1.11/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.11/howto/static-files/ STATIC_URL = '/static/'
true
true
f723d5a84455b1eb99d7bfc740ea30ad3707a3a0
4,334
py
Python
mmdet/models/seg_heads/panoptic_fpn_head.py
SeHwanJoo/mmdetection_body
1e1cadc6df91926fc99c4afbae383df0ea9cfed3
[ "Apache-2.0" ]
7
2021-08-08T08:34:30.000Z
2022-01-10T18:37:47.000Z
mmdet/models/seg_heads/panoptic_fpn_head.py
SeHwanJoo/mmdetection_body
1e1cadc6df91926fc99c4afbae383df0ea9cfed3
[ "Apache-2.0" ]
null
null
null
mmdet/models/seg_heads/panoptic_fpn_head.py
SeHwanJoo/mmdetection_body
1e1cadc6df91926fc99c4afbae383df0ea9cfed3
[ "Apache-2.0" ]
2
2021-08-11T05:57:50.000Z
2022-01-04T11:13:32.000Z
import torch import torch.nn as nn from mmcv.runner import ModuleList from ..builder import HEADS from ..utils import ConvUpsample from .base_semantic_head import BaseSemanticHead @HEADS.register_module() class PanopticFPNHead(BaseSemanticHead): """PanopticFPNHead used in Panoptic FPN. Arg: num_classes (int): Number of classes, including all stuff classes and one thing class. in_channels (int): Number of channels in the input feature map. inner_channels (int): Number of channels in inner features. start_level (int): The start level of the input features used in PanopticFPN. end_level (int): The end level of the used features, the `end_level`-th layer will not be used. fg_range (tuple): Range of the foreground classes. bg_range (tuple): Range of the background classes. conv_cfg (dict): Dictionary to construct and config conv layer. Default: None. norm_cfg (dict): Dictionary to construct and config norm layer. Use ``GN`` by default. init_cfg (dict or list[dict], optional): Initialization config dict. loss_seg (dict): the loss of the semantic head. """ def __init__(self, num_classes, in_channels=256, inner_channels=128, start_level=0, end_level=4, fg_range=(1, 80), bg_range=(81, 133), conv_cfg=None, norm_cfg=dict(type='GN', num_groups=32, requires_grad=True), init_cfg=None, loss_seg=dict( type='CrossEntropyLoss', ignore_index=-1, loss_weight=1.0)): super(PanopticFPNHead, self).__init__(num_classes, init_cfg, loss_seg) self.fg_range = fg_range self.bg_range = bg_range self.fg_nums = self.fg_range[1] - self.fg_range[0] + 1 self.bg_nums = self.bg_range[1] - self.bg_range[0] + 1 # Used feature layers are [start_level, end_level) self.start_level = start_level self.end_level = end_level self.num_stages = end_level - start_level self.inner_channels = inner_channels self.conv_upsample_layers = ModuleList() for i in range(start_level, end_level): self.conv_upsample_layers.append( ConvUpsample( in_channels, inner_channels, num_layers=i if i > 0 else 1, num_upsample=i if i > 0 else 0, conv_cfg=conv_cfg, norm_cfg=norm_cfg, )) self.conv_logits = nn.Conv2d(inner_channels, num_classes, 1) def _set_things_to_void(self, gt_semantic_seg): """Merge thing classes to one class.""" gt_semantic_seg = gt_semantic_seg.int() fg_mask = (gt_semantic_seg >= self.fg_range[0]) * ( gt_semantic_seg <= self.fg_range[1]) bg_mask = (gt_semantic_seg >= self.bg_range[0]) * ( gt_semantic_seg <= self.bg_range[1]) new_gt_seg = fg_mask.int() * (self.bg_nums + 1) new_gt_seg = torch.where(bg_mask, gt_semantic_seg - self.fg_nums, new_gt_seg) return new_gt_seg def loss(self, seg_preds, gt_semantic_seg, label_bias=-1): """The loss of PanopticFPN head. Things classes will be merged to one class in PanopticFPN. """ gt_semantic_seg = self._set_things_to_void(gt_semantic_seg) return super().loss(seg_preds, gt_semantic_seg, label_bias) def init_weights(self): super().init_weights() nn.init.normal_(self.conv_logits.weight.data, 0, 0.01) self.conv_logits.bias.data.zero_() def forward(self, x): # the number of subnets must be not more than # the length of features. assert self.num_stages <= len(x) feats = [] for i, layer in enumerate(self.conv_upsample_layers): f = layer(x[self.start_level + i]) feats.append(f) feats = torch.sum(torch.stack(feats, dim=0), dim=0) seg_preds = self.conv_logits(feats) out = dict(seg_preds=seg_preds, feats=feats) return out
38.696429
78
0.601292
import torch import torch.nn as nn from mmcv.runner import ModuleList from ..builder import HEADS from ..utils import ConvUpsample from .base_semantic_head import BaseSemanticHead @HEADS.register_module() class PanopticFPNHead(BaseSemanticHead): def __init__(self, num_classes, in_channels=256, inner_channels=128, start_level=0, end_level=4, fg_range=(1, 80), bg_range=(81, 133), conv_cfg=None, norm_cfg=dict(type='GN', num_groups=32, requires_grad=True), init_cfg=None, loss_seg=dict( type='CrossEntropyLoss', ignore_index=-1, loss_weight=1.0)): super(PanopticFPNHead, self).__init__(num_classes, init_cfg, loss_seg) self.fg_range = fg_range self.bg_range = bg_range self.fg_nums = self.fg_range[1] - self.fg_range[0] + 1 self.bg_nums = self.bg_range[1] - self.bg_range[0] + 1 self.start_level = start_level self.end_level = end_level self.num_stages = end_level - start_level self.inner_channels = inner_channels self.conv_upsample_layers = ModuleList() for i in range(start_level, end_level): self.conv_upsample_layers.append( ConvUpsample( in_channels, inner_channels, num_layers=i if i > 0 else 1, num_upsample=i if i > 0 else 0, conv_cfg=conv_cfg, norm_cfg=norm_cfg, )) self.conv_logits = nn.Conv2d(inner_channels, num_classes, 1) def _set_things_to_void(self, gt_semantic_seg): gt_semantic_seg = gt_semantic_seg.int() fg_mask = (gt_semantic_seg >= self.fg_range[0]) * ( gt_semantic_seg <= self.fg_range[1]) bg_mask = (gt_semantic_seg >= self.bg_range[0]) * ( gt_semantic_seg <= self.bg_range[1]) new_gt_seg = fg_mask.int() * (self.bg_nums + 1) new_gt_seg = torch.where(bg_mask, gt_semantic_seg - self.fg_nums, new_gt_seg) return new_gt_seg def loss(self, seg_preds, gt_semantic_seg, label_bias=-1): gt_semantic_seg = self._set_things_to_void(gt_semantic_seg) return super().loss(seg_preds, gt_semantic_seg, label_bias) def init_weights(self): super().init_weights() nn.init.normal_(self.conv_logits.weight.data, 0, 0.01) self.conv_logits.bias.data.zero_() def forward(self, x): assert self.num_stages <= len(x) feats = [] for i, layer in enumerate(self.conv_upsample_layers): f = layer(x[self.start_level + i]) feats.append(f) feats = torch.sum(torch.stack(feats, dim=0), dim=0) seg_preds = self.conv_logits(feats) out = dict(seg_preds=seg_preds, feats=feats) return out
true
true
f723d8eb5daadf4e81d3c79aabb857dd8a2823c2
276
py
Python
dashboard/apps.py
lynetteoh/COVID19dashboard
61193d35acf004999443c47a30f0b9f9c6220c03
[ "MIT" ]
null
null
null
dashboard/apps.py
lynetteoh/COVID19dashboard
61193d35acf004999443c47a30f0b9f9c6220c03
[ "MIT" ]
null
null
null
dashboard/apps.py
lynetteoh/COVID19dashboard
61193d35acf004999443c47a30f0b9f9c6220c03
[ "MIT" ]
null
null
null
from django.apps import AppConfig import pandas as pd import sys class DashboardConfig(AppConfig): name = 'dashboard' def ready(self): if 'runserver' not in sys.argv: return True from dashboard.models import Case, State, Country
21.230769
57
0.65942
from django.apps import AppConfig import pandas as pd import sys class DashboardConfig(AppConfig): name = 'dashboard' def ready(self): if 'runserver' not in sys.argv: return True from dashboard.models import Case, State, Country
true
true
f723d9b3d877c48369368b51d3310d0d2b722ba2
372
py
Python
exercicios-turtle/.history/flower_20210624115429.py
Aleff13/poo-ufsc
bc1574df26f840a3c0fd5b1e0c72e5d69f61493d
[ "MIT" ]
1
2021-11-28T18:49:21.000Z
2021-11-28T18:49:21.000Z
exercicios-turtle/.history/flower_20210624115429.py
Aleff13/poo-ufsc
bc1574df26f840a3c0fd5b1e0c72e5d69f61493d
[ "MIT" ]
null
null
null
exercicios-turtle/.history/flower_20210624115429.py
Aleff13/poo-ufsc
bc1574df26f840a3c0fd5b1e0c72e5d69f61493d
[ "MIT" ]
null
null
null
import turtle tortuguita = turtle.Turtle() tortuguita.color('blue') tortuguita.speed(100) for i in range (18): tortuguita.circle(200,100) tortuguita.left(110) tortuguita.up() tortuguita.left(35) tortuguita.forward(160) tortuguita.down() tortuguita.dot(70,"black") tortuguita.left(35) tortuguita.up() tortuguita.color('green') tortuguita.forward() turtle.done()
17.714286
30
0.752688
import turtle tortuguita = turtle.Turtle() tortuguita.color('blue') tortuguita.speed(100) for i in range (18): tortuguita.circle(200,100) tortuguita.left(110) tortuguita.up() tortuguita.left(35) tortuguita.forward(160) tortuguita.down() tortuguita.dot(70,"black") tortuguita.left(35) tortuguita.up() tortuguita.color('green') tortuguita.forward() turtle.done()
true
true
f723d9cabe29840070a01c4f2c8c2878b8b99f27
5,505
py
Python
ccc/elasticsearch.py
mliepold/cc-utils
3f8c4b0d11d6a52d1605026f478371411daab81e
[ "BSD-3-Clause" ]
15
2018-04-18T13:25:30.000Z
2022-03-04T09:25:41.000Z
ccc/elasticsearch.py
mliepold/cc-utils
3f8c4b0d11d6a52d1605026f478371411daab81e
[ "BSD-3-Clause" ]
221
2018-04-12T06:29:43.000Z
2022-03-27T03:01:40.000Z
ccc/elasticsearch.py
mliepold/cc-utils
3f8c4b0d11d6a52d1605026f478371411daab81e
[ "BSD-3-Clause" ]
29
2018-04-11T14:42:23.000Z
2021-11-09T16:26:32.000Z
# Copyright (c) 2019-2020 SAP SE or an SAP affiliate company. All rights reserved. This file is # licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import functools import os import json import elasticsearch import ci.util import concourse.util import model.elasticsearch def default_client_if_available(): if not ci.util._running_on_ci(): return None cfg_factory = ci.util.ctx().cfg_factory() cfg_set = cfg_factory.cfg_set(ci.util.current_config_set_name()) es_config = cfg_set.elasticsearch() return from_cfg(elasticsearch_cfg=es_config) def from_cfg( elasticsearch_cfg:model.elasticsearch.ElasticSearchConfig ): return ElasticSearchClient( elasticsearch=_from_cfg(elasticsearch_cfg=elasticsearch_cfg) ) def _from_cfg( elasticsearch_cfg:model.elasticsearch.ElasticSearchConfig ): credentials = elasticsearch_cfg.credentials() return elasticsearch.Elasticsearch( elasticsearch_cfg.endpoints(), http_auth=(credentials.username(), credentials.passwd()), ) @functools.lru_cache() def _metadata_dict(): # XXX mv to concourse package; deduplicate with notify step if not ci.util._running_on_ci(): return {} build = concourse.util.find_own_running_build() pipeline_metadata = concourse.util.get_pipeline_metadata() config_set = ci.util.ctx().cfg_factory().cfg_set(pipeline_metadata.current_config_set_name) concourse_cfg = config_set.concourse() meta_dict = { 'build-id': build.id(), 'build-name': build.build_number(), 'build-job-name': pipeline_metadata.job_name, 'build-team-name': pipeline_metadata.team_name, 'build-pipeline-name': pipeline_metadata.pipeline_name, 'atc-external-url': concourse_cfg.external_url(), } # XXX deduplicate; mv to concourse package meta_dict['concourse_url'] = ci.util.urljoin( meta_dict['atc-external-url'], 'teams', meta_dict['build-team-name'], 'pipelines', meta_dict['build-pipeline-name'], 'jobs', meta_dict['build-job-name'], 'builds', meta_dict['build-name'], ) # XXX do not hard-code env variables meta_dict['effective_version'] = os.environ.get('EFFECTIVE_VERSION') meta_dict['component_name'] = os.environ.get('COMPONENT_NAME') meta_dict['creation_date'] = datetime.datetime.now().isoformat() return meta_dict class ElasticSearchClient: def __init__( self, elasticsearch: elasticsearch.Elasticsearch, ): self._api = elasticsearch def store_document( self, index: str, body: dict, inject_metadata=True, *args, **kwargs, ): ci.util.check_type(index, str) ci.util.check_type(body, dict) if 'doc_type' in kwargs: raise ValueError( ''' doc_type attribute has been deprecated - see: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/removal-of-types.html ''' ) if inject_metadata and _metadata_dict(): md = _metadata_dict() body['cc_meta'] = md return self._api.index( index=index, doc_type='_doc', body=body, *args, **kwargs, ) def store_documents( self, index: str, body: [dict], inject_metadata=True, *args, **kwargs, ): # Bulk-loading uses a special format: A json specifying index name and doc-type # (always _doc) followed by the actual document json. These pairs (one for each document) # are then converted to newline delimited json # The index json does not change for bulk-loading into a single index. index_json = json.dumps({ 'index': { '_index': index, '_type': '_doc' } }) return self.store_bulk( body='\n'.join([f'{index_json}\n{json.dumps(d)}' for d in body]), inject_metadata=inject_metadata, *args, **kwargs, ) def store_bulk( self, body: str, inject_metadata=True, *args, **kwargs, ): ci.util.check_type(body, str) if inject_metadata and _metadata_dict(): def inject_meta(line): parsed = json.loads(line) if 'index' not in parsed: parsed['cc_meta'] = md return json.dumps(parsed) return line md = _metadata_dict() patched_body = '\n'.join([inject_meta(line) for line in body.splitlines()]) body = patched_body return self._api.bulk( body=body, *args, **kwargs, )
29.438503
97
0.622525
import datetime import functools import os import json import elasticsearch import ci.util import concourse.util import model.elasticsearch def default_client_if_available(): if not ci.util._running_on_ci(): return None cfg_factory = ci.util.ctx().cfg_factory() cfg_set = cfg_factory.cfg_set(ci.util.current_config_set_name()) es_config = cfg_set.elasticsearch() return from_cfg(elasticsearch_cfg=es_config) def from_cfg( elasticsearch_cfg:model.elasticsearch.ElasticSearchConfig ): return ElasticSearchClient( elasticsearch=_from_cfg(elasticsearch_cfg=elasticsearch_cfg) ) def _from_cfg( elasticsearch_cfg:model.elasticsearch.ElasticSearchConfig ): credentials = elasticsearch_cfg.credentials() return elasticsearch.Elasticsearch( elasticsearch_cfg.endpoints(), http_auth=(credentials.username(), credentials.passwd()), ) @functools.lru_cache() def _metadata_dict(): if not ci.util._running_on_ci(): return {} build = concourse.util.find_own_running_build() pipeline_metadata = concourse.util.get_pipeline_metadata() config_set = ci.util.ctx().cfg_factory().cfg_set(pipeline_metadata.current_config_set_name) concourse_cfg = config_set.concourse() meta_dict = { 'build-id': build.id(), 'build-name': build.build_number(), 'build-job-name': pipeline_metadata.job_name, 'build-team-name': pipeline_metadata.team_name, 'build-pipeline-name': pipeline_metadata.pipeline_name, 'atc-external-url': concourse_cfg.external_url(), } meta_dict['concourse_url'] = ci.util.urljoin( meta_dict['atc-external-url'], 'teams', meta_dict['build-team-name'], 'pipelines', meta_dict['build-pipeline-name'], 'jobs', meta_dict['build-job-name'], 'builds', meta_dict['build-name'], ) meta_dict['effective_version'] = os.environ.get('EFFECTIVE_VERSION') meta_dict['component_name'] = os.environ.get('COMPONENT_NAME') meta_dict['creation_date'] = datetime.datetime.now().isoformat() return meta_dict class ElasticSearchClient: def __init__( self, elasticsearch: elasticsearch.Elasticsearch, ): self._api = elasticsearch def store_document( self, index: str, body: dict, inject_metadata=True, *args, **kwargs, ): ci.util.check_type(index, str) ci.util.check_type(body, dict) if 'doc_type' in kwargs: raise ValueError( ''' doc_type attribute has been deprecated - see: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/removal-of-types.html ''' ) if inject_metadata and _metadata_dict(): md = _metadata_dict() body['cc_meta'] = md return self._api.index( index=index, doc_type='_doc', body=body, *args, **kwargs, ) def store_documents( self, index: str, body: [dict], inject_metadata=True, *args, **kwargs, ): index_json = json.dumps({ 'index': { '_index': index, '_type': '_doc' } }) return self.store_bulk( body='\n'.join([f'{index_json}\n{json.dumps(d)}' for d in body]), inject_metadata=inject_metadata, *args, **kwargs, ) def store_bulk( self, body: str, inject_metadata=True, *args, **kwargs, ): ci.util.check_type(body, str) if inject_metadata and _metadata_dict(): def inject_meta(line): parsed = json.loads(line) if 'index' not in parsed: parsed['cc_meta'] = md return json.dumps(parsed) return line md = _metadata_dict() patched_body = '\n'.join([inject_meta(line) for line in body.splitlines()]) body = patched_body return self._api.bulk( body=body, *args, **kwargs, )
true
true
f723da3d31fc0d1c210b41c7779cc8ee4f4cd08c
6,876
py
Python
src/wallet/wallet_block_store.py
DONG-Jason/chia-blockchain
27b28d62f6b315e45bc00231e007c775f07a414a
[ "Apache-2.0" ]
null
null
null
src/wallet/wallet_block_store.py
DONG-Jason/chia-blockchain
27b28d62f6b315e45bc00231e007c775f07a414a
[ "Apache-2.0" ]
null
null
null
src/wallet/wallet_block_store.py
DONG-Jason/chia-blockchain
27b28d62f6b315e45bc00231e007c775f07a414a
[ "Apache-2.0" ]
null
null
null
from typing import Dict, Optional, Tuple, List import aiosqlite from src.consensus.sub_block_record import SubBlockRecord from src.types.header_block import HeaderBlock from src.util.ints import uint32, uint64 from src.wallet.block_record import HeaderBlockRecord from src.types.sized_bytes import bytes32 class WalletBlockStore: """ This object handles HeaderBlocks and SubBlocks stored in DB used by wallet. """ db: aiosqlite.Connection @classmethod async def create(cls, connection: aiosqlite.Connection): self = cls() self.db = connection await self.db.execute( "CREATE TABLE IF NOT EXISTS header_blocks(header_hash text PRIMARY KEY, sub_height int, height int," " timestamp int, block blob)" ) await self.db.execute("CREATE INDEX IF NOT EXISTS header_hash on header_blocks(header_hash)") await self.db.execute("CREATE INDEX IF NOT EXISTS timestamp on header_blocks(timestamp)") await self.db.execute("CREATE INDEX IF NOT EXISTS sub_height on header_blocks(sub_height)") await self.db.execute("CREATE INDEX IF NOT EXISTS height on header_blocks(height)") # Sub block records await self.db.execute( "CREATE TABLE IF NOT EXISTS sub_block_records(header_hash " "text PRIMARY KEY, prev_hash text, sub_height bigint, height int, weight bigint, total_iters text," "sub_block blob, is_peak tinyint)" ) # Height index so we can look up in order of height for sync purposes await self.db.execute("CREATE INDEX IF NOT EXISTS sub_block_height on sub_block_records(sub_height)") await self.db.execute("CREATE INDEX IF NOT EXISTS height on sub_block_records(height)") await self.db.execute("CREATE INDEX IF NOT EXISTS hh on sub_block_records(header_hash)") await self.db.execute("CREATE INDEX IF NOT EXISTS peak on sub_block_records(is_peak)") await self.db.commit() await self.db.commit() return self async def _clear_database(self): cursor_2 = await self.db.execute("DELETE FROM header_blocks") await cursor_2.close() await self.db.commit() async def rollback_lca_to_block(self, block_index): # TODO pass async def add_block_record(self, block_record: HeaderBlockRecord, sub_block: SubBlockRecord): """ Adds a block record to the database. This block record is assumed to be connected to the chain, but it may or may not be in the LCA path. """ if block_record.header.foliage_block is not None: timestamp = block_record.header.foliage_block.timestamp else: timestamp = uint64(0) cursor = await self.db.execute( "INSERT OR REPLACE INTO header_blocks VALUES(?, ?, ?, ?, ?)", ( block_record.header_hash.hex(), block_record.sub_block_height, sub_block.height, timestamp, bytes(block_record), ), ) await cursor.close() cursor_2 = await self.db.execute( "INSERT OR REPLACE INTO sub_block_records VALUES(?, ?, ?, ?, ?, ?, ?, ?)", ( block_record.header.header_hash.hex(), block_record.header.prev_header_hash.hex(), block_record.header.sub_block_height, block_record.header.height, block_record.header.weight.to_bytes(128 // 8, "big", signed=False).hex(), block_record.header.total_iters.to_bytes(128 // 8, "big", signed=False).hex(), bytes(sub_block), False, ), ) await cursor_2.close() await self.db.commit() async def get_header_block(self, header_hash: bytes32) -> Optional[HeaderBlock]: """Gets a block record from the database, if present""" cursor = await self.db.execute("SELECT * from header_blocks WHERE header_hash=?", (header_hash.hex(),)) row = await cursor.fetchone() await cursor.close() if row is not None: hbr = HeaderBlockRecord.from_bytes(row[4]) return hbr.header else: return None async def get_header_block_at(self, sub_heights: List[uint32]) -> List[HeaderBlock]: if len(sub_heights) == 0: return [] heights_db = tuple(sub_heights) formatted_str = f'SELECT block from header_blocks WHERE sub_height in ({"?," * (len(heights_db) - 1)}?)' cursor = await self.db.execute(formatted_str, heights_db) rows = await cursor.fetchall() await cursor.close() return [HeaderBlock.from_bytes(row[0]) for row in rows] async def get_header_block_record(self, header_hash: bytes32) -> Optional[HeaderBlockRecord]: """Gets a block record from the database, if present""" cursor = await self.db.execute("SELECT * from header_blocks WHERE header_hash=?", (header_hash.hex(),)) row = await cursor.fetchone() await cursor.close() if row is not None: hbr = HeaderBlockRecord.from_bytes(row[4]) return hbr else: return None async def get_sub_block_record(self, header_hash: bytes32) -> Optional[SubBlockRecord]: cursor = await self.db.execute( "SELECT sub_block from sub_block_records WHERE header_hash=?", (header_hash.hex(),), ) row = await cursor.fetchone() await cursor.close() if row is not None: return SubBlockRecord.from_bytes(row[0]) return None async def get_sub_block_records( self, ) -> Tuple[Dict[bytes32, SubBlockRecord], Optional[bytes32]]: """ Returns a dictionary with all sub blocks, as well as the header hash of the peak, if present. """ cursor = await self.db.execute("SELECT * from sub_block_records") rows = await cursor.fetchall() await cursor.close() ret: Dict[bytes32, SubBlockRecord] = {} peak: Optional[bytes32] = None for row in rows: header_hash = bytes.fromhex(row[0]) ret[header_hash] = SubBlockRecord.from_bytes(row[6]) if row[7]: assert peak is None # Sanity check, only one peak peak = header_hash return ret, peak async def set_peak(self, header_hash: bytes32) -> None: cursor_1 = await self.db.execute("UPDATE sub_block_records SET is_peak=0 WHERE is_peak=1") await cursor_1.close() cursor_2 = await self.db.execute( "UPDATE sub_block_records SET is_peak=1 WHERE header_hash=?", (header_hash.hex(),), ) await cursor_2.close() await self.db.commit()
39.745665
112
0.627981
from typing import Dict, Optional, Tuple, List import aiosqlite from src.consensus.sub_block_record import SubBlockRecord from src.types.header_block import HeaderBlock from src.util.ints import uint32, uint64 from src.wallet.block_record import HeaderBlockRecord from src.types.sized_bytes import bytes32 class WalletBlockStore: db: aiosqlite.Connection @classmethod async def create(cls, connection: aiosqlite.Connection): self = cls() self.db = connection await self.db.execute( "CREATE TABLE IF NOT EXISTS header_blocks(header_hash text PRIMARY KEY, sub_height int, height int," " timestamp int, block blob)" ) await self.db.execute("CREATE INDEX IF NOT EXISTS header_hash on header_blocks(header_hash)") await self.db.execute("CREATE INDEX IF NOT EXISTS timestamp on header_blocks(timestamp)") await self.db.execute("CREATE INDEX IF NOT EXISTS sub_height on header_blocks(sub_height)") await self.db.execute("CREATE INDEX IF NOT EXISTS height on header_blocks(height)") await self.db.execute( "CREATE TABLE IF NOT EXISTS sub_block_records(header_hash " "text PRIMARY KEY, prev_hash text, sub_height bigint, height int, weight bigint, total_iters text," "sub_block blob, is_peak tinyint)" ) await self.db.execute("CREATE INDEX IF NOT EXISTS sub_block_height on sub_block_records(sub_height)") await self.db.execute("CREATE INDEX IF NOT EXISTS height on sub_block_records(height)") await self.db.execute("CREATE INDEX IF NOT EXISTS hh on sub_block_records(header_hash)") await self.db.execute("CREATE INDEX IF NOT EXISTS peak on sub_block_records(is_peak)") await self.db.commit() await self.db.commit() return self async def _clear_database(self): cursor_2 = await self.db.execute("DELETE FROM header_blocks") await cursor_2.close() await self.db.commit() async def rollback_lca_to_block(self, block_index): pass async def add_block_record(self, block_record: HeaderBlockRecord, sub_block: SubBlockRecord): if block_record.header.foliage_block is not None: timestamp = block_record.header.foliage_block.timestamp else: timestamp = uint64(0) cursor = await self.db.execute( "INSERT OR REPLACE INTO header_blocks VALUES(?, ?, ?, ?, ?)", ( block_record.header_hash.hex(), block_record.sub_block_height, sub_block.height, timestamp, bytes(block_record), ), ) await cursor.close() cursor_2 = await self.db.execute( "INSERT OR REPLACE INTO sub_block_records VALUES(?, ?, ?, ?, ?, ?, ?, ?)", ( block_record.header.header_hash.hex(), block_record.header.prev_header_hash.hex(), block_record.header.sub_block_height, block_record.header.height, block_record.header.weight.to_bytes(128 // 8, "big", signed=False).hex(), block_record.header.total_iters.to_bytes(128 // 8, "big", signed=False).hex(), bytes(sub_block), False, ), ) await cursor_2.close() await self.db.commit() async def get_header_block(self, header_hash: bytes32) -> Optional[HeaderBlock]: cursor = await self.db.execute("SELECT * from header_blocks WHERE header_hash=?", (header_hash.hex(),)) row = await cursor.fetchone() await cursor.close() if row is not None: hbr = HeaderBlockRecord.from_bytes(row[4]) return hbr.header else: return None async def get_header_block_at(self, sub_heights: List[uint32]) -> List[HeaderBlock]: if len(sub_heights) == 0: return [] heights_db = tuple(sub_heights) formatted_str = f'SELECT block from header_blocks WHERE sub_height in ({"?," * (len(heights_db) - 1)}?)' cursor = await self.db.execute(formatted_str, heights_db) rows = await cursor.fetchall() await cursor.close() return [HeaderBlock.from_bytes(row[0]) for row in rows] async def get_header_block_record(self, header_hash: bytes32) -> Optional[HeaderBlockRecord]: cursor = await self.db.execute("SELECT * from header_blocks WHERE header_hash=?", (header_hash.hex(),)) row = await cursor.fetchone() await cursor.close() if row is not None: hbr = HeaderBlockRecord.from_bytes(row[4]) return hbr else: return None async def get_sub_block_record(self, header_hash: bytes32) -> Optional[SubBlockRecord]: cursor = await self.db.execute( "SELECT sub_block from sub_block_records WHERE header_hash=?", (header_hash.hex(),), ) row = await cursor.fetchone() await cursor.close() if row is not None: return SubBlockRecord.from_bytes(row[0]) return None async def get_sub_block_records( self, ) -> Tuple[Dict[bytes32, SubBlockRecord], Optional[bytes32]]: cursor = await self.db.execute("SELECT * from sub_block_records") rows = await cursor.fetchall() await cursor.close() ret: Dict[bytes32, SubBlockRecord] = {} peak: Optional[bytes32] = None for row in rows: header_hash = bytes.fromhex(row[0]) ret[header_hash] = SubBlockRecord.from_bytes(row[6]) if row[7]: assert peak is None peak = header_hash return ret, peak async def set_peak(self, header_hash: bytes32) -> None: cursor_1 = await self.db.execute("UPDATE sub_block_records SET is_peak=0 WHERE is_peak=1") await cursor_1.close() cursor_2 = await self.db.execute( "UPDATE sub_block_records SET is_peak=1 WHERE header_hash=?", (header_hash.hex(),), ) await cursor_2.close() await self.db.commit()
true
true
f723db4534fac7bbdbc1fca243f9cc9d68e9c713
56,361
py
Python
plugins/modules/oci_loadbalancer_backend_set.py
LaudateCorpus1/oci-ansible-collection
2b1cd87b4d652a97c1ca752cfc4fdc4bdb37a7e7
[ "Apache-2.0" ]
null
null
null
plugins/modules/oci_loadbalancer_backend_set.py
LaudateCorpus1/oci-ansible-collection
2b1cd87b4d652a97c1ca752cfc4fdc4bdb37a7e7
[ "Apache-2.0" ]
null
null
null
plugins/modules/oci_loadbalancer_backend_set.py
LaudateCorpus1/oci-ansible-collection
2b1cd87b4d652a97c1ca752cfc4fdc4bdb37a7e7
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/python # Copyright (c) 2020, 2022 Oracle and/or its affiliates. # This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # Apache License v2.0 # See LICENSE.TXT for details. # GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = { "metadata_version": "1.1", "status": ["preview"], "supported_by": "community", } DOCUMENTATION = """ --- module: oci_loadbalancer_backend_set short_description: Manage a BackendSet resource in Oracle Cloud Infrastructure description: - This module allows the user to create, update and delete a BackendSet resource in Oracle Cloud Infrastructure - For I(state=present), adds a backend set to a load balancer. version_added: "2.9.0" author: Oracle (@oracle) options: name: description: - A friendly name for the backend set. It must be unique and it cannot be changed. - Valid backend set names include only alphanumeric characters, dashes, and underscores. Backend set names cannot contain spaces. Avoid entering confidential information. - "Example: `example_backend_set`" type: str required: true policy: description: - The load balancer policy for the backend set. To get a list of available policies, use the L(ListPolicies,https://docs.cloud.oracle.com/en-us/iaas/api/#/en/loadbalancer/20170115/LoadBalancerPolicy/ListPolicies) operation. - "Example: `LEAST_CONNECTIONS`" - Required for create using I(state=present), update using I(state=present) with name present. type: str backends: description: - "" - Required for update using I(state=present) with name present. type: list elements: dict suboptions: ip_address: description: - The IP address of the backend server. - "Example: `10.0.0.3`" type: str required: true port: description: - The communication port for the backend server. - "Example: `8080`" type: int required: true weight: description: - The load balancing policy weight assigned to the server. Backend servers with a higher weight receive a larger proportion of incoming traffic. For example, a server weighted '3' receives 3 times the number of new connections as a server weighted '1'. For more information on load balancing policies, see L(How Load Balancing Policies Work,https://docs.cloud.oracle.com/Content/Balance/Reference/lbpolicies.htm). - "Example: `3`" type: int backup: description: - "Whether the load balancer should treat this server as a backup unit. If `true`, the load balancer forwards no ingress traffic to this backend server unless all other backend servers not marked as \\"backup\\" fail the health check policy." - "**Note:** You cannot add a backend server marked as `backup` to a backend set that uses the IP Hash policy." - "Example: `false`" type: bool drain: description: - "Whether the load balancer should drain this server. Servers marked \\"drain\\" receive no new incoming traffic." - "Example: `false`" type: bool offline: description: - Whether the load balancer should treat this server as offline. Offline servers receive no incoming traffic. - "Example: `false`" type: bool health_checker: description: - "" - Required for create using I(state=present), update using I(state=present) with name present. type: dict suboptions: protocol: description: - The protocol the health check must use; either HTTP or TCP. - "Example: `HTTP`" type: str required: true url_path: description: - The path against which to run the health check. - "Example: `/healthcheck`" type: str port: description: - The backend server port against which to run the health check. If the port is not specified, the load balancer uses the port information from the `Backend` object. - "Example: `8080`" type: int return_code: description: - The status code a healthy backend server should return. - "Example: `200`" type: int retries: description: - "The number of retries to attempt before a backend server is considered \\"unhealthy\\". This number also applies when recovering a server to the \\"healthy\\" state." - "Example: `3`" type: int timeout_in_millis: description: - The maximum time, in milliseconds, to wait for a reply to a health check. A health check is successful only if a reply returns within this timeout period. - "Example: `3000`" type: int interval_in_millis: description: - The interval between health checks, in milliseconds. - "Example: `10000`" type: int response_body_regex: description: - A regular expression for parsing the response body from the backend server. - "Example: `^((?!false).|\\\\s)*$`" type: str ssl_configuration: description: - "" - This parameter is updatable. type: dict suboptions: verify_depth: description: - The maximum depth for peer certificate chain verification. - "Example: `3`" type: int verify_peer_certificate: description: - Whether the load balancer listener should verify peer certificates. - "Example: `true`" type: bool trusted_certificate_authority_ids: description: - Ids for OCI certificates service CA or CA bundles for the load balancer to trust. - "Example: `[ocid1.cabundle.oc1.us-ashburn-1.amaaaaaaav3bgsaagl4zzyqdop5i2vuwoqewdvauuw34llqa74otq2jdsfyq]`" type: list elements: str certificate_ids: description: - Ids for OCI certificates service certificates. Currently only a single Id may be passed. - "Example: `[ocid1.certificate.oc1.us-ashburn-1.amaaaaaaav3bgsaa5o2q7rh5nfmkkukfkogasqhk6af2opufhjlqg7m6jqzq]`" type: list elements: str certificate_name: description: - A friendly name for the certificate bundle. It must be unique and it cannot be changed. Valid certificate bundle names include only alphanumeric characters, dashes, and underscores. Certificate bundle names cannot contain spaces. Avoid entering confidential information. - "Example: `example_certificate_bundle`" type: str protocols: description: - A list of SSL protocols the load balancer must support for HTTPS or SSL connections. - The load balancer uses SSL protocols to establish a secure connection between a client and a server. A secure connection ensures that all data passed between the client and the server is private. - "The Load Balancing service supports the following protocols:" - "* TLSv1 * TLSv1.1 * TLSv1.2" - If this field is not specified, TLSv1.2 is the default. - "**Warning:** All SSL listeners created on a given port must use the same set of SSL protocols." - "**Notes:**" - "* The handshake to establish an SSL connection fails if the client supports none of the specified protocols. * You must ensure compatibility between the specified SSL protocols and the ciphers configured in the cipher suite. * For all existing load balancer listeners and backend sets that predate this feature, the `GET` operation displays a list of SSL protocols currently used by those resources." - "example: `[\\"TLSv1.1\\", \\"TLSv1.2\\"]`" type: list elements: str cipher_suite_name: description: - The name of the cipher suite to use for HTTPS or SSL connections. - If this field is not specified, the default is `oci-default-ssl-cipher-suite-v1`. - "**Notes:**" - "* You must ensure compatibility between the specified SSL protocols and the ciphers configured in the cipher suite. Clients cannot perform an SSL handshake if there is an incompatible configuration. * You must ensure compatibility between the ciphers configured in the cipher suite and the configured certificates. For example, RSA-based ciphers require RSA certificates and ECDSA-based ciphers require ECDSA certificates. * If the cipher configuration is not modified after load balancer creation, the `GET` operation returns `oci-default-ssl-cipher-suite-v1` as the value of this field in the SSL configuration for existing listeners that predate this feature. * If the cipher configuration was modified using Oracle operations after load balancer creation, the `GET` operation returns `oci-customized-ssl-cipher-suite` as the value of this field in the SSL configuration for existing listeners that predate this feature. * The `GET` operation returns `oci-wider-compatible-ssl-cipher-suite-v1` as the value of this field in the SSL configuration for existing backend sets that predate this feature. * If the `GET` operation on a listener returns `oci-customized-ssl-cipher-suite` as the value of this field, you must specify an appropriate predefined or custom cipher suite name when updating the resource. * The `oci-customized-ssl-cipher-suite` Oracle reserved cipher suite name is not accepted as valid input for this field." - "example: `example_cipher_suite`" type: str server_order_preference: description: - When this attribute is set to ENABLED, the system gives preference to the server ciphers over the client ciphers. - "**Note:** This configuration is applicable only when the load balancer is acting as an SSL/HTTPS server. This field is ignored when the `SSLConfiguration` object is associated with a backend set." type: str choices: - "ENABLED" - "DISABLED" session_persistence_configuration: description: - "" - This parameter is updatable. type: dict suboptions: cookie_name: description: - "The name of the cookie used to detect a session initiated by the backend server. Use '*' to specify that any cookie set by the backend causes the session to persist." - "Example: `example_cookie`" type: str required: true disable_fallback: description: - Whether the load balancer is prevented from directing traffic from a persistent session client to a different backend server if the original server is unavailable. Defaults to false. - "Example: `false`" type: bool lb_cookie_session_persistence_configuration: description: - "" - This parameter is updatable. type: dict suboptions: cookie_name: description: - "The name of the cookie inserted by the load balancer. If this field is not configured, the cookie name defaults to \\"X-Oracle-BMC-LBS-Route\\"." - "Example: `example_cookie`" - "**Notes:**" - "* Ensure that the cookie name used at the backend application servers is different from the cookie name used at the load balancer. To minimize the chance of name collision, Oracle recommends that you use a prefix such as \\"X-Oracle-OCI-\\" for this field." - "* If a backend server and the load balancer both insert cookies with the same name, the client or browser behavior can vary depending on the domain and path values associated with the cookie. If the name, domain, and path values of the `Set-cookie` generated by a backend server and the `Set-cookie` generated by the load balancer are all the same, the client or browser treats them as one cookie and returns only one of the cookie values in subsequent requests. If both `Set-cookie` names are the same, but the domain and path names are different, the client or browser treats them as two different cookies." type: str disable_fallback: description: - Whether the load balancer is prevented from directing traffic from a persistent session client to a different backend server if the original server is unavailable. Defaults to false. - "Example: `false`" type: bool domain: description: - The domain in which the cookie is valid. The `Set-cookie` header inserted by the load balancer contains a domain attribute with the specified value. - This attribute has no default value. If you do not specify a value, the load balancer does not insert the domain attribute into the `Set-cookie` header. - "**Notes:**" - "* L(RFC 6265 - HTTP State Management Mechanism,https://www.ietf.org/rfc/rfc6265.txt) describes client and browser behavior when the domain attribute is present or not present in the `Set-cookie` header." - If the value of the `Domain` attribute is `example.com` in the `Set-cookie` header, the client includes the same cookie in the `Cookie` header when making HTTP requests to `example.com`, `www.example.com`, and `www.abc.example.com`. If the `Domain` attribute is not present, the client returns the cookie only for the domain to which the original request was made. - "* Ensure that this attribute specifies the correct domain value. If the `Domain` attribute in the `Set-cookie` header does not include the domain to which the original request was made, the client or browser might reject the cookie. As specified in RFC 6265, the client accepts a cookie with the `Domain` attribute value `example.com` or `www.example.com` sent from `www.example.com`. It does not accept a cookie with the `Domain` attribute `abc.example.com` or `www.abc.example.com` sent from `www.example.com`." - "Example: `example.com`" type: str path: description: - The path in which the cookie is valid. The `Set-cookie header` inserted by the load balancer contains a `Path` attribute with the specified value. - Clients include the cookie in an HTTP request only if the path portion of the request-uri matches, or is a subdirectory of, the cookie's `Path` attribute. - The default value is `/`. - "Example: `/example`" type: str max_age_in_seconds: description: - The amount of time the cookie remains valid. The `Set-cookie` header inserted by the load balancer contains a `Max-Age` attribute with the specified value. - The specified value must be at least one second. There is no default value for this attribute. If you do not specify a value, the load balancer does not include the `Max-Age` attribute in the `Set-cookie` header. In most cases, the client or browser retains the cookie until the current session ends, as defined by the client. - "Example: `3600`" type: int is_secure: description: - Whether the `Set-cookie` header should contain the `Secure` attribute. If `true`, the `Set-cookie` header inserted by the load balancer contains the `Secure` attribute, which directs the client or browser to send the cookie only using a secure protocol. - "**Note:** If you set this field to `true`, you cannot associate the corresponding backend set with an HTTP listener." - "Example: `true`" type: bool is_http_only: description: - Whether the `Set-cookie` header should contain the `HttpOnly` attribute. If `true`, the `Set-cookie` header inserted by the load balancer contains the `HttpOnly` attribute, which limits the scope of the cookie to HTTP requests. This attribute directs the client or browser to omit the cookie when providing access to cookies through non-HTTP APIs. For example, it restricts the cookie from JavaScript channels. - "Example: `true`" type: bool load_balancer_id: description: - The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the load balancer on which to add a backend set. type: str aliases: ["id"] required: true state: description: - The state of the BackendSet. - Use I(state=present) to create or update a BackendSet. - Use I(state=absent) to delete a BackendSet. type: str required: false default: 'present' choices: ["present", "absent"] extends_documentation_fragment: [ oracle.oci.oracle, oracle.oci.oracle_creatable_resource, oracle.oci.oracle_wait_options ] """ EXAMPLES = """ - name: Create backend_set oci_loadbalancer_backend_set: # required name: name_example policy: policy_example health_checker: # required protocol: protocol_example # optional url_path: url_path_example port: 56 return_code: 56 retries: 56 timeout_in_millis: 56 interval_in_millis: 56 response_body_regex: response_body_regex_example load_balancer_id: "ocid1.loadbalancer.oc1..xxxxxxEXAMPLExxxxxx" # optional backends: - # required ip_address: ip_address_example port: 56 # optional weight: 56 backup: true drain: true offline: true ssl_configuration: # optional verify_depth: 56 verify_peer_certificate: true trusted_certificate_authority_ids: [ "trusted_certificate_authority_ids_example" ] certificate_ids: [ "certificate_ids_example" ] certificate_name: certificate_name_example protocols: [ "protocols_example" ] cipher_suite_name: cipher_suite_name_example server_order_preference: ENABLED session_persistence_configuration: # required cookie_name: cookie_name_example # optional disable_fallback: true lb_cookie_session_persistence_configuration: # optional cookie_name: cookie_name_example disable_fallback: true domain: domain_example path: path_example max_age_in_seconds: 56 is_secure: true is_http_only: true - name: Update backend_set oci_loadbalancer_backend_set: # required name: name_example policy: policy_example backends: - # required ip_address: ip_address_example port: 56 # optional weight: 56 backup: true drain: true offline: true health_checker: # required protocol: protocol_example # optional url_path: url_path_example port: 56 return_code: 56 retries: 56 timeout_in_millis: 56 interval_in_millis: 56 response_body_regex: response_body_regex_example load_balancer_id: "ocid1.loadbalancer.oc1..xxxxxxEXAMPLExxxxxx" # optional ssl_configuration: # optional verify_depth: 56 verify_peer_certificate: true trusted_certificate_authority_ids: [ "trusted_certificate_authority_ids_example" ] certificate_ids: [ "certificate_ids_example" ] certificate_name: certificate_name_example protocols: [ "protocols_example" ] cipher_suite_name: cipher_suite_name_example server_order_preference: ENABLED session_persistence_configuration: # required cookie_name: cookie_name_example # optional disable_fallback: true lb_cookie_session_persistence_configuration: # optional cookie_name: cookie_name_example disable_fallback: true domain: domain_example path: path_example max_age_in_seconds: 56 is_secure: true is_http_only: true - name: Delete backend_set oci_loadbalancer_backend_set: # required name: name_example load_balancer_id: "ocid1.loadbalancer.oc1..xxxxxxEXAMPLExxxxxx" state: absent """ RETURN = """ backend_set: description: - Details of the BackendSet resource acted upon by the current operation returned: on success type: complex contains: name: description: - A friendly name for the backend set. It must be unique and it cannot be changed. - Valid backend set names include only alphanumeric characters, dashes, and underscores. Backend set names cannot contain spaces. Avoid entering confidential information. - "Example: `example_backend_set`" returned: on success type: str sample: name_example policy: description: - The load balancer policy for the backend set. To get a list of available policies, use the L(ListPolicies,https://docs.cloud.oracle.com/en-us/iaas/api/#/en/loadbalancer/20170115/LoadBalancerPolicy/ListPolicies) operation. - "Example: `LEAST_CONNECTIONS`" returned: on success type: str sample: policy_example backends: description: - "" returned: on success type: complex contains: name: description: - A read-only field showing the IP address and port that uniquely identify this backend server in the backend set. - "Example: `10.0.0.3:8080`" returned: on success type: str sample: name_example ip_address: description: - The IP address of the backend server. - "Example: `10.0.0.3`" returned: on success type: str sample: ip_address_example port: description: - The communication port for the backend server. - "Example: `8080`" returned: on success type: int sample: 56 weight: description: - The load balancing policy weight assigned to the server. Backend servers with a higher weight receive a larger proportion of incoming traffic. For example, a server weighted '3' receives 3 times the number of new connections as a server weighted '1'. For more information on load balancing policies, see L(How Load Balancing Policies Work,https://docs.cloud.oracle.com/Content/Balance/Reference/lbpolicies.htm). - "Example: `3`" returned: on success type: int sample: 56 drain: description: - "Whether the load balancer should drain this server. Servers marked \\"drain\\" receive no new incoming traffic." - "Example: `false`" returned: on success type: bool sample: true backup: description: - "Whether the load balancer should treat this server as a backup unit. If `true`, the load balancer forwards no ingress traffic to this backend server unless all other backend servers not marked as \\"backup\\" fail the health check policy." - "**Note:** You cannot add a backend server marked as `backup` to a backend set that uses the IP Hash policy." - "Example: `false`" returned: on success type: bool sample: true offline: description: - Whether the load balancer should treat this server as offline. Offline servers receive no incoming traffic. - "Example: `false`" returned: on success type: bool sample: true health_checker: description: - "" returned: on success type: complex contains: protocol: description: - The protocol the health check must use; either HTTP or TCP. - "Example: `HTTP`" returned: on success type: str sample: protocol_example url_path: description: - The path against which to run the health check. - "Example: `/healthcheck`" returned: on success type: str sample: url_path_example port: description: - The backend server port against which to run the health check. If the port is not specified, the load balancer uses the port information from the `Backend` object. - "Example: `8080`" returned: on success type: int sample: 56 return_code: description: - "The status code a healthy backend server should return. If you configure the health check policy to use the HTTP protocol, you can use common HTTP status codes such as \\"200\\"." - "Example: `200`" returned: on success type: int sample: 56 retries: description: - "The number of retries to attempt before a backend server is considered \\"unhealthy\\". This number also applies when recovering a server to the \\"healthy\\" state. Defaults to 3." - "Example: `3`" returned: on success type: int sample: 56 timeout_in_millis: description: - The maximum time, in milliseconds, to wait for a reply to a health check. A health check is successful only if a reply returns within this timeout period. Defaults to 3000 (3 seconds). - "Example: `3000`" returned: on success type: int sample: 56 interval_in_millis: description: - The interval between health checks, in milliseconds. The default is 10000 (10 seconds). - "Example: `10000`" returned: on success type: int sample: 56 response_body_regex: description: - A regular expression for parsing the response body from the backend server. - "Example: `^((?!false).|\\\\s)*$`" returned: on success type: str sample: response_body_regex_example ssl_configuration: description: - "" returned: on success type: complex contains: verify_depth: description: - The maximum depth for peer certificate chain verification. - "Example: `3`" returned: on success type: int sample: 56 verify_peer_certificate: description: - Whether the load balancer listener should verify peer certificates. - "Example: `true`" returned: on success type: bool sample: true trusted_certificate_authority_ids: description: - Ids for OCI certificates service CA or CA bundles for the load balancer to trust. - "Example: `[ocid1.cabundle.oc1.us-ashburn-1.amaaaaaaav3bgsaagl4zzyqdop5i2vuwoqewdvauuw34llqa74otq2jdsfyq]`" returned: on success type: list sample: [] certificate_ids: description: - Ids for OCI certificates service certificates. Currently only a single Id may be passed. - "Example: `[ocid1.certificate.oc1.us-ashburn-1.amaaaaaaav3bgsaa5o2q7rh5nfmkkukfkogasqhk6af2opufhjlqg7m6jqzq]`" returned: on success type: list sample: [] certificate_name: description: - A friendly name for the certificate bundle. It must be unique and it cannot be changed. Valid certificate bundle names include only alphanumeric characters, dashes, and underscores. Certificate bundle names cannot contain spaces. Avoid entering confidential information. - "Example: `example_certificate_bundle`" returned: on success type: str sample: certificate_name_example server_order_preference: description: - When this attribute is set to ENABLED, the system gives preference to the server ciphers over the client ciphers. - "**Note:** This configuration is applicable only when the load balancer is acting as an SSL/HTTPS server. This field is ignored when the `SSLConfiguration` object is associated with a backend set." returned: on success type: str sample: ENABLED cipher_suite_name: description: - The name of the cipher suite to use for HTTPS or SSL connections. - If this field is not specified, the default is `oci-default-ssl-cipher-suite-v1`. - "**Notes:**" - "* You must ensure compatibility between the specified SSL protocols and the ciphers configured in the cipher suite. Clients cannot perform an SSL handshake if there is an incompatible configuration. * You must ensure compatibility between the ciphers configured in the cipher suite and the configured certificates. For example, RSA-based ciphers require RSA certificates and ECDSA-based ciphers require ECDSA certificates. * If the cipher configuration is not modified after load balancer creation, the `GET` operation returns `oci-default-ssl-cipher-suite-v1` as the value of this field in the SSL configuration for existing listeners that predate this feature. * If the cipher configuration was modified using Oracle operations after load balancer creation, the `GET` operation returns `oci-customized-ssl-cipher-suite` as the value of this field in the SSL configuration for existing listeners that predate this feature. * The `GET` operation returns `oci-wider-compatible-ssl-cipher-suite-v1` as the value of this field in the SSL configuration for existing backend sets that predate this feature. * If the `GET` operation on a listener returns `oci-customized-ssl-cipher-suite` as the value of this field, you must specify an appropriate predefined or custom cipher suite name when updating the resource. * The `oci-customized-ssl-cipher-suite` Oracle reserved cipher suite name is not accepted as valid input for this field." - "example: `example_cipher_suite`" returned: on success type: str sample: cipher_suite_name_example protocols: description: - A list of SSL protocols the load balancer must support for HTTPS or SSL connections. - The load balancer uses SSL protocols to establish a secure connection between a client and a server. A secure connection ensures that all data passed between the client and the server is private. - "The Load Balancing service supports the following protocols:" - "* TLSv1 * TLSv1.1 * TLSv1.2" - If this field is not specified, TLSv1.2 is the default. - "**Warning:** All SSL listeners created on a given port must use the same set of SSL protocols." - "**Notes:**" - "* The handshake to establish an SSL connection fails if the client supports none of the specified protocols. * You must ensure compatibility between the specified SSL protocols and the ciphers configured in the cipher suite. * For all existing load balancer listeners and backend sets that predate this feature, the `GET` operation displays a list of SSL protocols currently used by those resources." - "example: `[\\"TLSv1.1\\", \\"TLSv1.2\\"]`" returned: on success type: list sample: [] session_persistence_configuration: description: - "" returned: on success type: complex contains: cookie_name: description: - "The name of the cookie used to detect a session initiated by the backend server. Use '*' to specify that any cookie set by the backend causes the session to persist." - "Example: `example_cookie`" returned: on success type: str sample: cookie_name_example disable_fallback: description: - Whether the load balancer is prevented from directing traffic from a persistent session client to a different backend server if the original server is unavailable. Defaults to false. - "Example: `false`" returned: on success type: bool sample: true lb_cookie_session_persistence_configuration: description: - "" returned: on success type: complex contains: cookie_name: description: - "The name of the cookie inserted by the load balancer. If this field is not configured, the cookie name defaults to \\"X-Oracle-BMC-LBS-Route\\"." - "Example: `example_cookie`" - "**Notes:**" - "* Ensure that the cookie name used at the backend application servers is different from the cookie name used at the load balancer. To minimize the chance of name collision, Oracle recommends that you use a prefix such as \\"X-Oracle-OCI-\\" for this field." - "* If a backend server and the load balancer both insert cookies with the same name, the client or browser behavior can vary depending on the domain and path values associated with the cookie. If the name, domain, and path values of the `Set-cookie` generated by a backend server and the `Set-cookie` generated by the load balancer are all the same, the client or browser treats them as one cookie and returns only one of the cookie values in subsequent requests. If both `Set-cookie` names are the same, but the domain and path names are different, the client or browser treats them as two different cookies." returned: on success type: str sample: cookie_name_example disable_fallback: description: - Whether the load balancer is prevented from directing traffic from a persistent session client to a different backend server if the original server is unavailable. Defaults to false. - "Example: `false`" returned: on success type: bool sample: true domain: description: - The domain in which the cookie is valid. The `Set-cookie` header inserted by the load balancer contains a domain attribute with the specified value. - This attribute has no default value. If you do not specify a value, the load balancer does not insert the domain attribute into the `Set-cookie` header. - "**Notes:**" - "* L(RFC 6265 - HTTP State Management Mechanism,https://www.ietf.org/rfc/rfc6265.txt) describes client and browser behavior when the domain attribute is present or not present in the `Set-cookie` header." - If the value of the `Domain` attribute is `example.com` in the `Set-cookie` header, the client includes the same cookie in the `Cookie` header when making HTTP requests to `example.com`, `www.example.com`, and `www.abc.example.com`. If the `Domain` attribute is not present, the client returns the cookie only for the domain to which the original request was made. - "* Ensure that this attribute specifies the correct domain value. If the `Domain` attribute in the `Set-cookie` header does not include the domain to which the original request was made, the client or browser might reject the cookie. As specified in RFC 6265, the client accepts a cookie with the `Domain` attribute value `example.com` or `www.example.com` sent from `www.example.com`. It does not accept a cookie with the `Domain` attribute `abc.example.com` or `www.abc.example.com` sent from `www.example.com`." - "Example: `example.com`" returned: on success type: str sample: domain_example path: description: - The path in which the cookie is valid. The `Set-cookie header` inserted by the load balancer contains a `Path` attribute with the specified value. - Clients include the cookie in an HTTP request only if the path portion of the request-uri matches, or is a subdirectory of, the cookie's `Path` attribute. - The default value is `/`. - "Example: `/example`" returned: on success type: str sample: path_example max_age_in_seconds: description: - The amount of time the cookie remains valid. The `Set-cookie` header inserted by the load balancer contains a `Max-Age` attribute with the specified value. - The specified value must be at least one second. There is no default value for this attribute. If you do not specify a value, the load balancer does not include the `Max-Age` attribute in the `Set-cookie` header. In most cases, the client or browser retains the cookie until the current session ends, as defined by the client. - "Example: `3600`" returned: on success type: int sample: 56 is_secure: description: - Whether the `Set-cookie` header should contain the `Secure` attribute. If `true`, the `Set-cookie` header inserted by the load balancer contains the `Secure` attribute, which directs the client or browser to send the cookie only using a secure protocol. - "**Note:** If you set this field to `true`, you cannot associate the corresponding backend set with an HTTP listener." - "Example: `true`" returned: on success type: bool sample: true is_http_only: description: - Whether the `Set-cookie` header should contain the `HttpOnly` attribute. If `true`, the `Set-cookie` header inserted by the load balancer contains the `HttpOnly` attribute, which limits the scope of the cookie to HTTP requests. This attribute directs the client or browser to omit the cookie when providing access to cookies through non-HTTP APIs. For example, it restricts the cookie from JavaScript channels. - "Example: `true`" returned: on success type: bool sample: true sample: { "name": "name_example", "policy": "policy_example", "backends": [{ "name": "name_example", "ip_address": "ip_address_example", "port": 56, "weight": 56, "drain": true, "backup": true, "offline": true }], "health_checker": { "protocol": "protocol_example", "url_path": "url_path_example", "port": 56, "return_code": 56, "retries": 56, "timeout_in_millis": 56, "interval_in_millis": 56, "response_body_regex": "response_body_regex_example" }, "ssl_configuration": { "verify_depth": 56, "verify_peer_certificate": true, "trusted_certificate_authority_ids": [], "certificate_ids": [], "certificate_name": "certificate_name_example", "server_order_preference": "ENABLED", "cipher_suite_name": "cipher_suite_name_example", "protocols": [] }, "session_persistence_configuration": { "cookie_name": "cookie_name_example", "disable_fallback": true }, "lb_cookie_session_persistence_configuration": { "cookie_name": "cookie_name_example", "disable_fallback": true, "domain": "domain_example", "path": "path_example", "max_age_in_seconds": 56, "is_secure": true, "is_http_only": true } } """ from ansible.module_utils.basic import AnsibleModule from ansible_collections.oracle.oci.plugins.module_utils import ( oci_common_utils, oci_wait_utils, ) from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import ( OCIResourceHelperBase, get_custom_class, ) try: from oci.load_balancer import LoadBalancerClient from oci.load_balancer.models import CreateBackendSetDetails from oci.load_balancer.models import UpdateBackendSetDetails HAS_OCI_PY_SDK = True except ImportError: HAS_OCI_PY_SDK = False class BackendSetHelperGen(OCIResourceHelperBase): """Supported operations: create, update, get, list and delete""" def get_possible_entity_types(self): return super(BackendSetHelperGen, self).get_possible_entity_types() + [ "backendset", "backendsets", "loadBalancerbackendset", "loadBalancerbackendsets", "backendsetresource", "backendsetsresource", "loadbalancer", ] def get_module_resource_id_param(self): return "name" def get_module_resource_id(self): return self.module.params.get("name") def get_get_fn(self): return self.client.get_backend_set def get_resource(self): return oci_common_utils.call_with_backoff( self.client.get_backend_set, load_balancer_id=self.module.params.get("load_balancer_id"), backend_set_name=self.module.params.get("name"), ) def get_required_kwargs_for_list(self): required_list_method_params = [ "load_balancer_id", ] return dict( (param, self.module.params[param]) for param in required_list_method_params ) def get_optional_kwargs_for_list(self): return dict() def list_resources(self): required_kwargs = self.get_required_kwargs_for_list() optional_kwargs = self.get_optional_kwargs_for_list() kwargs = oci_common_utils.merge_dicts(required_kwargs, optional_kwargs) return oci_common_utils.list_all_resources( self.client.list_backend_sets, **kwargs ) def get_create_model_class(self): return CreateBackendSetDetails def is_update(self): if not self.module.params.get("state") == "present": return False return self.does_resource_exist() def is_create(self): if not self.module.params.get("state") == "present": return False return not self.does_resource_exist() def create_resource(self): create_details = self.get_create_model() return oci_wait_utils.call_and_wait( call_fn=self.client.create_backend_set, call_fn_args=(), call_fn_kwargs=dict( create_backend_set_details=create_details, load_balancer_id=self.module.params.get("load_balancer_id"), ), waiter_type=oci_wait_utils.WORK_REQUEST_WAITER_KEY, operation=oci_common_utils.CREATE_OPERATION_KEY, waiter_client=self.get_waiter_client(), resource_helper=self, wait_for_states=oci_common_utils.get_work_request_completed_states(), ) def get_update_model_class(self): return UpdateBackendSetDetails def update_resource(self): update_details = self.get_update_model() return oci_wait_utils.call_and_wait( call_fn=self.client.update_backend_set, call_fn_args=(), call_fn_kwargs=dict( update_backend_set_details=update_details, load_balancer_id=self.module.params.get("load_balancer_id"), backend_set_name=self.module.params.get("name"), ), waiter_type=oci_wait_utils.WORK_REQUEST_WAITER_KEY, operation=oci_common_utils.UPDATE_OPERATION_KEY, waiter_client=self.get_waiter_client(), resource_helper=self, wait_for_states=oci_common_utils.get_work_request_completed_states(), ) def delete_resource(self): return oci_wait_utils.call_and_wait( call_fn=self.client.delete_backend_set, call_fn_args=(), call_fn_kwargs=dict( load_balancer_id=self.module.params.get("load_balancer_id"), backend_set_name=self.module.params.get("name"), ), waiter_type=oci_wait_utils.WORK_REQUEST_WAITER_KEY, operation=oci_common_utils.DELETE_OPERATION_KEY, waiter_client=self.get_waiter_client(), resource_helper=self, wait_for_states=oci_common_utils.get_work_request_completed_states(), ) BackendSetHelperCustom = get_custom_class("BackendSetHelperCustom") class ResourceHelper(BackendSetHelperCustom, BackendSetHelperGen): pass def main(): module_args = oci_common_utils.get_common_arg_spec( supports_create=True, supports_wait=True ) module_args.update( dict( name=dict(type="str", required=True), policy=dict(type="str"), backends=dict( type="list", elements="dict", options=dict( ip_address=dict(type="str", required=True), port=dict(type="int", required=True), weight=dict(type="int"), backup=dict(type="bool"), drain=dict(type="bool"), offline=dict(type="bool"), ), ), health_checker=dict( type="dict", options=dict( protocol=dict(type="str", required=True), url_path=dict(type="str"), port=dict(type="int"), return_code=dict(type="int"), retries=dict(type="int"), timeout_in_millis=dict(type="int"), interval_in_millis=dict(type="int"), response_body_regex=dict(type="str"), ), ), ssl_configuration=dict( type="dict", options=dict( verify_depth=dict(type="int"), verify_peer_certificate=dict(type="bool"), trusted_certificate_authority_ids=dict(type="list", elements="str"), certificate_ids=dict(type="list", elements="str"), certificate_name=dict(type="str"), protocols=dict(type="list", elements="str"), cipher_suite_name=dict(type="str"), server_order_preference=dict( type="str", choices=["ENABLED", "DISABLED"] ), ), ), session_persistence_configuration=dict( type="dict", options=dict( cookie_name=dict(type="str", required=True), disable_fallback=dict(type="bool"), ), ), lb_cookie_session_persistence_configuration=dict( type="dict", options=dict( cookie_name=dict(type="str"), disable_fallback=dict(type="bool"), domain=dict(type="str"), path=dict(type="str"), max_age_in_seconds=dict(type="int"), is_secure=dict(type="bool"), is_http_only=dict(type="bool"), ), ), load_balancer_id=dict(aliases=["id"], type="str", required=True), state=dict(type="str", default="present", choices=["present", "absent"]), ) ) module = AnsibleModule(argument_spec=module_args, supports_check_mode=True) if not HAS_OCI_PY_SDK: module.fail_json(msg="oci python sdk required for this module.") resource_helper = ResourceHelper( module=module, resource_type="backend_set", service_client_class=LoadBalancerClient, namespace="load_balancer", ) result = dict(changed=False) if resource_helper.is_delete(): result = resource_helper.delete() elif resource_helper.is_update(): result = resource_helper.update() elif resource_helper.is_create(): result = resource_helper.create() module.exit_json(**result) if __name__ == "__main__": main()
49.657269
149
0.558134
from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = { "metadata_version": "1.1", "status": ["preview"], "supported_by": "community", } DOCUMENTATION = """ --- module: oci_loadbalancer_backend_set short_description: Manage a BackendSet resource in Oracle Cloud Infrastructure description: - This module allows the user to create, update and delete a BackendSet resource in Oracle Cloud Infrastructure - For I(state=present), adds a backend set to a load balancer. version_added: "2.9.0" author: Oracle (@oracle) options: name: description: - A friendly name for the backend set. It must be unique and it cannot be changed. - Valid backend set names include only alphanumeric characters, dashes, and underscores. Backend set names cannot contain spaces. Avoid entering confidential information. - "Example: `example_backend_set`" type: str required: true policy: description: - The load balancer policy for the backend set. To get a list of available policies, use the L(ListPolicies,https://docs.cloud.oracle.com/en-us/iaas/api/#/en/loadbalancer/20170115/LoadBalancerPolicy/ListPolicies) operation. - "Example: `LEAST_CONNECTIONS`" - Required for create using I(state=present), update using I(state=present) with name present. type: str backends: description: - "" - Required for update using I(state=present) with name present. type: list elements: dict suboptions: ip_address: description: - The IP address of the backend server. - "Example: `10.0.0.3`" type: str required: true port: description: - The communication port for the backend server. - "Example: `8080`" type: int required: true weight: description: - The load balancing policy weight assigned to the server. Backend servers with a higher weight receive a larger proportion of incoming traffic. For example, a server weighted '3' receives 3 times the number of new connections as a server weighted '1'. For more information on load balancing policies, see L(How Load Balancing Policies Work,https://docs.cloud.oracle.com/Content/Balance/Reference/lbpolicies.htm). - "Example: `3`" type: int backup: description: - "Whether the load balancer should treat this server as a backup unit. If `true`, the load balancer forwards no ingress traffic to this backend server unless all other backend servers not marked as \\"backup\\" fail the health check policy." - "**Note:** You cannot add a backend server marked as `backup` to a backend set that uses the IP Hash policy." - "Example: `false`" type: bool drain: description: - "Whether the load balancer should drain this server. Servers marked \\"drain\\" receive no new incoming traffic." - "Example: `false`" type: bool offline: description: - Whether the load balancer should treat this server as offline. Offline servers receive no incoming traffic. - "Example: `false`" type: bool health_checker: description: - "" - Required for create using I(state=present), update using I(state=present) with name present. type: dict suboptions: protocol: description: - The protocol the health check must use; either HTTP or TCP. - "Example: `HTTP`" type: str required: true url_path: description: - The path against which to run the health check. - "Example: `/healthcheck`" type: str port: description: - The backend server port against which to run the health check. If the port is not specified, the load balancer uses the port information from the `Backend` object. - "Example: `8080`" type: int return_code: description: - The status code a healthy backend server should return. - "Example: `200`" type: int retries: description: - "The number of retries to attempt before a backend server is considered \\"unhealthy\\". This number also applies when recovering a server to the \\"healthy\\" state." - "Example: `3`" type: int timeout_in_millis: description: - The maximum time, in milliseconds, to wait for a reply to a health check. A health check is successful only if a reply returns within this timeout period. - "Example: `3000`" type: int interval_in_millis: description: - The interval between health checks, in milliseconds. - "Example: `10000`" type: int response_body_regex: description: - A regular expression for parsing the response body from the backend server. - "Example: `^((?!false).|\\\\s)*$`" type: str ssl_configuration: description: - "" - This parameter is updatable. type: dict suboptions: verify_depth: description: - The maximum depth for peer certificate chain verification. - "Example: `3`" type: int verify_peer_certificate: description: - Whether the load balancer listener should verify peer certificates. - "Example: `true`" type: bool trusted_certificate_authority_ids: description: - Ids for OCI certificates service CA or CA bundles for the load balancer to trust. - "Example: `[ocid1.cabundle.oc1.us-ashburn-1.amaaaaaaav3bgsaagl4zzyqdop5i2vuwoqewdvauuw34llqa74otq2jdsfyq]`" type: list elements: str certificate_ids: description: - Ids for OCI certificates service certificates. Currently only a single Id may be passed. - "Example: `[ocid1.certificate.oc1.us-ashburn-1.amaaaaaaav3bgsaa5o2q7rh5nfmkkukfkogasqhk6af2opufhjlqg7m6jqzq]`" type: list elements: str certificate_name: description: - A friendly name for the certificate bundle. It must be unique and it cannot be changed. Valid certificate bundle names include only alphanumeric characters, dashes, and underscores. Certificate bundle names cannot contain spaces. Avoid entering confidential information. - "Example: `example_certificate_bundle`" type: str protocols: description: - A list of SSL protocols the load balancer must support for HTTPS or SSL connections. - The load balancer uses SSL protocols to establish a secure connection between a client and a server. A secure connection ensures that all data passed between the client and the server is private. - "The Load Balancing service supports the following protocols:" - "* TLSv1 * TLSv1.1 * TLSv1.2" - If this field is not specified, TLSv1.2 is the default. - "**Warning:** All SSL listeners created on a given port must use the same set of SSL protocols." - "**Notes:**" - "* The handshake to establish an SSL connection fails if the client supports none of the specified protocols. * You must ensure compatibility between the specified SSL protocols and the ciphers configured in the cipher suite. * For all existing load balancer listeners and backend sets that predate this feature, the `GET` operation displays a list of SSL protocols currently used by those resources." - "example: `[\\"TLSv1.1\\", \\"TLSv1.2\\"]`" type: list elements: str cipher_suite_name: description: - The name of the cipher suite to use for HTTPS or SSL connections. - If this field is not specified, the default is `oci-default-ssl-cipher-suite-v1`. - "**Notes:**" - "* You must ensure compatibility between the specified SSL protocols and the ciphers configured in the cipher suite. Clients cannot perform an SSL handshake if there is an incompatible configuration. * You must ensure compatibility between the ciphers configured in the cipher suite and the configured certificates. For example, RSA-based ciphers require RSA certificates and ECDSA-based ciphers require ECDSA certificates. * If the cipher configuration is not modified after load balancer creation, the `GET` operation returns `oci-default-ssl-cipher-suite-v1` as the value of this field in the SSL configuration for existing listeners that predate this feature. * If the cipher configuration was modified using Oracle operations after load balancer creation, the `GET` operation returns `oci-customized-ssl-cipher-suite` as the value of this field in the SSL configuration for existing listeners that predate this feature. * The `GET` operation returns `oci-wider-compatible-ssl-cipher-suite-v1` as the value of this field in the SSL configuration for existing backend sets that predate this feature. * If the `GET` operation on a listener returns `oci-customized-ssl-cipher-suite` as the value of this field, you must specify an appropriate predefined or custom cipher suite name when updating the resource. * The `oci-customized-ssl-cipher-suite` Oracle reserved cipher suite name is not accepted as valid input for this field." - "example: `example_cipher_suite`" type: str server_order_preference: description: - When this attribute is set to ENABLED, the system gives preference to the server ciphers over the client ciphers. - "**Note:** This configuration is applicable only when the load balancer is acting as an SSL/HTTPS server. This field is ignored when the `SSLConfiguration` object is associated with a backend set." type: str choices: - "ENABLED" - "DISABLED" session_persistence_configuration: description: - "" - This parameter is updatable. type: dict suboptions: cookie_name: description: - "The name of the cookie used to detect a session initiated by the backend server. Use '*' to specify that any cookie set by the backend causes the session to persist." - "Example: `example_cookie`" type: str required: true disable_fallback: description: - Whether the load balancer is prevented from directing traffic from a persistent session client to a different backend server if the original server is unavailable. Defaults to false. - "Example: `false`" type: bool lb_cookie_session_persistence_configuration: description: - "" - This parameter is updatable. type: dict suboptions: cookie_name: description: - "The name of the cookie inserted by the load balancer. If this field is not configured, the cookie name defaults to \\"X-Oracle-BMC-LBS-Route\\"." - "Example: `example_cookie`" - "**Notes:**" - "* Ensure that the cookie name used at the backend application servers is different from the cookie name used at the load balancer. To minimize the chance of name collision, Oracle recommends that you use a prefix such as \\"X-Oracle-OCI-\\" for this field." - "* If a backend server and the load balancer both insert cookies with the same name, the client or browser behavior can vary depending on the domain and path values associated with the cookie. If the name, domain, and path values of the `Set-cookie` generated by a backend server and the `Set-cookie` generated by the load balancer are all the same, the client or browser treats them as one cookie and returns only one of the cookie values in subsequent requests. If both `Set-cookie` names are the same, but the domain and path names are different, the client or browser treats them as two different cookies." type: str disable_fallback: description: - Whether the load balancer is prevented from directing traffic from a persistent session client to a different backend server if the original server is unavailable. Defaults to false. - "Example: `false`" type: bool domain: description: - The domain in which the cookie is valid. The `Set-cookie` header inserted by the load balancer contains a domain attribute with the specified value. - This attribute has no default value. If you do not specify a value, the load balancer does not insert the domain attribute into the `Set-cookie` header. - "**Notes:**" - "* L(RFC 6265 - HTTP State Management Mechanism,https://www.ietf.org/rfc/rfc6265.txt) describes client and browser behavior when the domain attribute is present or not present in the `Set-cookie` header." - If the value of the `Domain` attribute is `example.com` in the `Set-cookie` header, the client includes the same cookie in the `Cookie` header when making HTTP requests to `example.com`, `www.example.com`, and `www.abc.example.com`. If the `Domain` attribute is not present, the client returns the cookie only for the domain to which the original request was made. - "* Ensure that this attribute specifies the correct domain value. If the `Domain` attribute in the `Set-cookie` header does not include the domain to which the original request was made, the client or browser might reject the cookie. As specified in RFC 6265, the client accepts a cookie with the `Domain` attribute value `example.com` or `www.example.com` sent from `www.example.com`. It does not accept a cookie with the `Domain` attribute `abc.example.com` or `www.abc.example.com` sent from `www.example.com`." - "Example: `example.com`" type: str path: description: - The path in which the cookie is valid. The `Set-cookie header` inserted by the load balancer contains a `Path` attribute with the specified value. - Clients include the cookie in an HTTP request only if the path portion of the request-uri matches, or is a subdirectory of, the cookie's `Path` attribute. - The default value is `/`. - "Example: `/example`" type: str max_age_in_seconds: description: - The amount of time the cookie remains valid. The `Set-cookie` header inserted by the load balancer contains a `Max-Age` attribute with the specified value. - The specified value must be at least one second. There is no default value for this attribute. If you do not specify a value, the load balancer does not include the `Max-Age` attribute in the `Set-cookie` header. In most cases, the client or browser retains the cookie until the current session ends, as defined by the client. - "Example: `3600`" type: int is_secure: description: - Whether the `Set-cookie` header should contain the `Secure` attribute. If `true`, the `Set-cookie` header inserted by the load balancer contains the `Secure` attribute, which directs the client or browser to send the cookie only using a secure protocol. - "**Note:** If you set this field to `true`, you cannot associate the corresponding backend set with an HTTP listener." - "Example: `true`" type: bool is_http_only: description: - Whether the `Set-cookie` header should contain the `HttpOnly` attribute. If `true`, the `Set-cookie` header inserted by the load balancer contains the `HttpOnly` attribute, which limits the scope of the cookie to HTTP requests. This attribute directs the client or browser to omit the cookie when providing access to cookies through non-HTTP APIs. For example, it restricts the cookie from JavaScript channels. - "Example: `true`" type: bool load_balancer_id: description: - The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the load balancer on which to add a backend set. type: str aliases: ["id"] required: true state: description: - The state of the BackendSet. - Use I(state=present) to create or update a BackendSet. - Use I(state=absent) to delete a BackendSet. type: str required: false default: 'present' choices: ["present", "absent"] extends_documentation_fragment: [ oracle.oci.oracle, oracle.oci.oracle_creatable_resource, oracle.oci.oracle_wait_options ] """ EXAMPLES = """ - name: Create backend_set oci_loadbalancer_backend_set: # required name: name_example policy: policy_example health_checker: # required protocol: protocol_example # optional url_path: url_path_example port: 56 return_code: 56 retries: 56 timeout_in_millis: 56 interval_in_millis: 56 response_body_regex: response_body_regex_example load_balancer_id: "ocid1.loadbalancer.oc1..xxxxxxEXAMPLExxxxxx" # optional backends: - # required ip_address: ip_address_example port: 56 # optional weight: 56 backup: true drain: true offline: true ssl_configuration: # optional verify_depth: 56 verify_peer_certificate: true trusted_certificate_authority_ids: [ "trusted_certificate_authority_ids_example" ] certificate_ids: [ "certificate_ids_example" ] certificate_name: certificate_name_example protocols: [ "protocols_example" ] cipher_suite_name: cipher_suite_name_example server_order_preference: ENABLED session_persistence_configuration: # required cookie_name: cookie_name_example # optional disable_fallback: true lb_cookie_session_persistence_configuration: # optional cookie_name: cookie_name_example disable_fallback: true domain: domain_example path: path_example max_age_in_seconds: 56 is_secure: true is_http_only: true - name: Update backend_set oci_loadbalancer_backend_set: # required name: name_example policy: policy_example backends: - # required ip_address: ip_address_example port: 56 # optional weight: 56 backup: true drain: true offline: true health_checker: # required protocol: protocol_example # optional url_path: url_path_example port: 56 return_code: 56 retries: 56 timeout_in_millis: 56 interval_in_millis: 56 response_body_regex: response_body_regex_example load_balancer_id: "ocid1.loadbalancer.oc1..xxxxxxEXAMPLExxxxxx" # optional ssl_configuration: # optional verify_depth: 56 verify_peer_certificate: true trusted_certificate_authority_ids: [ "trusted_certificate_authority_ids_example" ] certificate_ids: [ "certificate_ids_example" ] certificate_name: certificate_name_example protocols: [ "protocols_example" ] cipher_suite_name: cipher_suite_name_example server_order_preference: ENABLED session_persistence_configuration: # required cookie_name: cookie_name_example # optional disable_fallback: true lb_cookie_session_persistence_configuration: # optional cookie_name: cookie_name_example disable_fallback: true domain: domain_example path: path_example max_age_in_seconds: 56 is_secure: true is_http_only: true - name: Delete backend_set oci_loadbalancer_backend_set: # required name: name_example load_balancer_id: "ocid1.loadbalancer.oc1..xxxxxxEXAMPLExxxxxx" state: absent """ RETURN = """ backend_set: description: - Details of the BackendSet resource acted upon by the current operation returned: on success type: complex contains: name: description: - A friendly name for the backend set. It must be unique and it cannot be changed. - Valid backend set names include only alphanumeric characters, dashes, and underscores. Backend set names cannot contain spaces. Avoid entering confidential information. - "Example: `example_backend_set`" returned: on success type: str sample: name_example policy: description: - The load balancer policy for the backend set. To get a list of available policies, use the L(ListPolicies,https://docs.cloud.oracle.com/en-us/iaas/api/#/en/loadbalancer/20170115/LoadBalancerPolicy/ListPolicies) operation. - "Example: `LEAST_CONNECTIONS`" returned: on success type: str sample: policy_example backends: description: - "" returned: on success type: complex contains: name: description: - A read-only field showing the IP address and port that uniquely identify this backend server in the backend set. - "Example: `10.0.0.3:8080`" returned: on success type: str sample: name_example ip_address: description: - The IP address of the backend server. - "Example: `10.0.0.3`" returned: on success type: str sample: ip_address_example port: description: - The communication port for the backend server. - "Example: `8080`" returned: on success type: int sample: 56 weight: description: - The load balancing policy weight assigned to the server. Backend servers with a higher weight receive a larger proportion of incoming traffic. For example, a server weighted '3' receives 3 times the number of new connections as a server weighted '1'. For more information on load balancing policies, see L(How Load Balancing Policies Work,https://docs.cloud.oracle.com/Content/Balance/Reference/lbpolicies.htm). - "Example: `3`" returned: on success type: int sample: 56 drain: description: - "Whether the load balancer should drain this server. Servers marked \\"drain\\" receive no new incoming traffic." - "Example: `false`" returned: on success type: bool sample: true backup: description: - "Whether the load balancer should treat this server as a backup unit. If `true`, the load balancer forwards no ingress traffic to this backend server unless all other backend servers not marked as \\"backup\\" fail the health check policy." - "**Note:** You cannot add a backend server marked as `backup` to a backend set that uses the IP Hash policy." - "Example: `false`" returned: on success type: bool sample: true offline: description: - Whether the load balancer should treat this server as offline. Offline servers receive no incoming traffic. - "Example: `false`" returned: on success type: bool sample: true health_checker: description: - "" returned: on success type: complex contains: protocol: description: - The protocol the health check must use; either HTTP or TCP. - "Example: `HTTP`" returned: on success type: str sample: protocol_example url_path: description: - The path against which to run the health check. - "Example: `/healthcheck`" returned: on success type: str sample: url_path_example port: description: - The backend server port against which to run the health check. If the port is not specified, the load balancer uses the port information from the `Backend` object. - "Example: `8080`" returned: on success type: int sample: 56 return_code: description: - "The status code a healthy backend server should return. If you configure the health check policy to use the HTTP protocol, you can use common HTTP status codes such as \\"200\\"." - "Example: `200`" returned: on success type: int sample: 56 retries: description: - "The number of retries to attempt before a backend server is considered \\"unhealthy\\". This number also applies when recovering a server to the \\"healthy\\" state. Defaults to 3." - "Example: `3`" returned: on success type: int sample: 56 timeout_in_millis: description: - The maximum time, in milliseconds, to wait for a reply to a health check. A health check is successful only if a reply returns within this timeout period. Defaults to 3000 (3 seconds). - "Example: `3000`" returned: on success type: int sample: 56 interval_in_millis: description: - The interval between health checks, in milliseconds. The default is 10000 (10 seconds). - "Example: `10000`" returned: on success type: int sample: 56 response_body_regex: description: - A regular expression for parsing the response body from the backend server. - "Example: `^((?!false).|\\\\s)*$`" returned: on success type: str sample: response_body_regex_example ssl_configuration: description: - "" returned: on success type: complex contains: verify_depth: description: - The maximum depth for peer certificate chain verification. - "Example: `3`" returned: on success type: int sample: 56 verify_peer_certificate: description: - Whether the load balancer listener should verify peer certificates. - "Example: `true`" returned: on success type: bool sample: true trusted_certificate_authority_ids: description: - Ids for OCI certificates service CA or CA bundles for the load balancer to trust. - "Example: `[ocid1.cabundle.oc1.us-ashburn-1.amaaaaaaav3bgsaagl4zzyqdop5i2vuwoqewdvauuw34llqa74otq2jdsfyq]`" returned: on success type: list sample: [] certificate_ids: description: - Ids for OCI certificates service certificates. Currently only a single Id may be passed. - "Example: `[ocid1.certificate.oc1.us-ashburn-1.amaaaaaaav3bgsaa5o2q7rh5nfmkkukfkogasqhk6af2opufhjlqg7m6jqzq]`" returned: on success type: list sample: [] certificate_name: description: - A friendly name for the certificate bundle. It must be unique and it cannot be changed. Valid certificate bundle names include only alphanumeric characters, dashes, and underscores. Certificate bundle names cannot contain spaces. Avoid entering confidential information. - "Example: `example_certificate_bundle`" returned: on success type: str sample: certificate_name_example server_order_preference: description: - When this attribute is set to ENABLED, the system gives preference to the server ciphers over the client ciphers. - "**Note:** This configuration is applicable only when the load balancer is acting as an SSL/HTTPS server. This field is ignored when the `SSLConfiguration` object is associated with a backend set." returned: on success type: str sample: ENABLED cipher_suite_name: description: - The name of the cipher suite to use for HTTPS or SSL connections. - If this field is not specified, the default is `oci-default-ssl-cipher-suite-v1`. - "**Notes:**" - "* You must ensure compatibility between the specified SSL protocols and the ciphers configured in the cipher suite. Clients cannot perform an SSL handshake if there is an incompatible configuration. * You must ensure compatibility between the ciphers configured in the cipher suite and the configured certificates. For example, RSA-based ciphers require RSA certificates and ECDSA-based ciphers require ECDSA certificates. * If the cipher configuration is not modified after load balancer creation, the `GET` operation returns `oci-default-ssl-cipher-suite-v1` as the value of this field in the SSL configuration for existing listeners that predate this feature. * If the cipher configuration was modified using Oracle operations after load balancer creation, the `GET` operation returns `oci-customized-ssl-cipher-suite` as the value of this field in the SSL configuration for existing listeners that predate this feature. * The `GET` operation returns `oci-wider-compatible-ssl-cipher-suite-v1` as the value of this field in the SSL configuration for existing backend sets that predate this feature. * If the `GET` operation on a listener returns `oci-customized-ssl-cipher-suite` as the value of this field, you must specify an appropriate predefined or custom cipher suite name when updating the resource. * The `oci-customized-ssl-cipher-suite` Oracle reserved cipher suite name is not accepted as valid input for this field." - "example: `example_cipher_suite`" returned: on success type: str sample: cipher_suite_name_example protocols: description: - A list of SSL protocols the load balancer must support for HTTPS or SSL connections. - The load balancer uses SSL protocols to establish a secure connection between a client and a server. A secure connection ensures that all data passed between the client and the server is private. - "The Load Balancing service supports the following protocols:" - "* TLSv1 * TLSv1.1 * TLSv1.2" - If this field is not specified, TLSv1.2 is the default. - "**Warning:** All SSL listeners created on a given port must use the same set of SSL protocols." - "**Notes:**" - "* The handshake to establish an SSL connection fails if the client supports none of the specified protocols. * You must ensure compatibility between the specified SSL protocols and the ciphers configured in the cipher suite. * For all existing load balancer listeners and backend sets that predate this feature, the `GET` operation displays a list of SSL protocols currently used by those resources." - "example: `[\\"TLSv1.1\\", \\"TLSv1.2\\"]`" returned: on success type: list sample: [] session_persistence_configuration: description: - "" returned: on success type: complex contains: cookie_name: description: - "The name of the cookie used to detect a session initiated by the backend server. Use '*' to specify that any cookie set by the backend causes the session to persist." - "Example: `example_cookie`" returned: on success type: str sample: cookie_name_example disable_fallback: description: - Whether the load balancer is prevented from directing traffic from a persistent session client to a different backend server if the original server is unavailable. Defaults to false. - "Example: `false`" returned: on success type: bool sample: true lb_cookie_session_persistence_configuration: description: - "" returned: on success type: complex contains: cookie_name: description: - "The name of the cookie inserted by the load balancer. If this field is not configured, the cookie name defaults to \\"X-Oracle-BMC-LBS-Route\\"." - "Example: `example_cookie`" - "**Notes:**" - "* Ensure that the cookie name used at the backend application servers is different from the cookie name used at the load balancer. To minimize the chance of name collision, Oracle recommends that you use a prefix such as \\"X-Oracle-OCI-\\" for this field." - "* If a backend server and the load balancer both insert cookies with the same name, the client or browser behavior can vary depending on the domain and path values associated with the cookie. If the name, domain, and path values of the `Set-cookie` generated by a backend server and the `Set-cookie` generated by the load balancer are all the same, the client or browser treats them as one cookie and returns only one of the cookie values in subsequent requests. If both `Set-cookie` names are the same, but the domain and path names are different, the client or browser treats them as two different cookies." returned: on success type: str sample: cookie_name_example disable_fallback: description: - Whether the load balancer is prevented from directing traffic from a persistent session client to a different backend server if the original server is unavailable. Defaults to false. - "Example: `false`" returned: on success type: bool sample: true domain: description: - The domain in which the cookie is valid. The `Set-cookie` header inserted by the load balancer contains a domain attribute with the specified value. - This attribute has no default value. If you do not specify a value, the load balancer does not insert the domain attribute into the `Set-cookie` header. - "**Notes:**" - "* L(RFC 6265 - HTTP State Management Mechanism,https://www.ietf.org/rfc/rfc6265.txt) describes client and browser behavior when the domain attribute is present or not present in the `Set-cookie` header." - If the value of the `Domain` attribute is `example.com` in the `Set-cookie` header, the client includes the same cookie in the `Cookie` header when making HTTP requests to `example.com`, `www.example.com`, and `www.abc.example.com`. If the `Domain` attribute is not present, the client returns the cookie only for the domain to which the original request was made. - "* Ensure that this attribute specifies the correct domain value. If the `Domain` attribute in the `Set-cookie` header does not include the domain to which the original request was made, the client or browser might reject the cookie. As specified in RFC 6265, the client accepts a cookie with the `Domain` attribute value `example.com` or `www.example.com` sent from `www.example.com`. It does not accept a cookie with the `Domain` attribute `abc.example.com` or `www.abc.example.com` sent from `www.example.com`." - "Example: `example.com`" returned: on success type: str sample: domain_example path: description: - The path in which the cookie is valid. The `Set-cookie header` inserted by the load balancer contains a `Path` attribute with the specified value. - Clients include the cookie in an HTTP request only if the path portion of the request-uri matches, or is a subdirectory of, the cookie's `Path` attribute. - The default value is `/`. - "Example: `/example`" returned: on success type: str sample: path_example max_age_in_seconds: description: - The amount of time the cookie remains valid. The `Set-cookie` header inserted by the load balancer contains a `Max-Age` attribute with the specified value. - The specified value must be at least one second. There is no default value for this attribute. If you do not specify a value, the load balancer does not include the `Max-Age` attribute in the `Set-cookie` header. In most cases, the client or browser retains the cookie until the current session ends, as defined by the client. - "Example: `3600`" returned: on success type: int sample: 56 is_secure: description: - Whether the `Set-cookie` header should contain the `Secure` attribute. If `true`, the `Set-cookie` header inserted by the load balancer contains the `Secure` attribute, which directs the client or browser to send the cookie only using a secure protocol. - "**Note:** If you set this field to `true`, you cannot associate the corresponding backend set with an HTTP listener." - "Example: `true`" returned: on success type: bool sample: true is_http_only: description: - Whether the `Set-cookie` header should contain the `HttpOnly` attribute. If `true`, the `Set-cookie` header inserted by the load balancer contains the `HttpOnly` attribute, which limits the scope of the cookie to HTTP requests. This attribute directs the client or browser to omit the cookie when providing access to cookies through non-HTTP APIs. For example, it restricts the cookie from JavaScript channels. - "Example: `true`" returned: on success type: bool sample: true sample: { "name": "name_example", "policy": "policy_example", "backends": [{ "name": "name_example", "ip_address": "ip_address_example", "port": 56, "weight": 56, "drain": true, "backup": true, "offline": true }], "health_checker": { "protocol": "protocol_example", "url_path": "url_path_example", "port": 56, "return_code": 56, "retries": 56, "timeout_in_millis": 56, "interval_in_millis": 56, "response_body_regex": "response_body_regex_example" }, "ssl_configuration": { "verify_depth": 56, "verify_peer_certificate": true, "trusted_certificate_authority_ids": [], "certificate_ids": [], "certificate_name": "certificate_name_example", "server_order_preference": "ENABLED", "cipher_suite_name": "cipher_suite_name_example", "protocols": [] }, "session_persistence_configuration": { "cookie_name": "cookie_name_example", "disable_fallback": true }, "lb_cookie_session_persistence_configuration": { "cookie_name": "cookie_name_example", "disable_fallback": true, "domain": "domain_example", "path": "path_example", "max_age_in_seconds": 56, "is_secure": true, "is_http_only": true } } """ from ansible.module_utils.basic import AnsibleModule from ansible_collections.oracle.oci.plugins.module_utils import ( oci_common_utils, oci_wait_utils, ) from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import ( OCIResourceHelperBase, get_custom_class, ) try: from oci.load_balancer import LoadBalancerClient from oci.load_balancer.models import CreateBackendSetDetails from oci.load_balancer.models import UpdateBackendSetDetails HAS_OCI_PY_SDK = True except ImportError: HAS_OCI_PY_SDK = False class BackendSetHelperGen(OCIResourceHelperBase): def get_possible_entity_types(self): return super(BackendSetHelperGen, self).get_possible_entity_types() + [ "backendset", "backendsets", "loadBalancerbackendset", "loadBalancerbackendsets", "backendsetresource", "backendsetsresource", "loadbalancer", ] def get_module_resource_id_param(self): return "name" def get_module_resource_id(self): return self.module.params.get("name") def get_get_fn(self): return self.client.get_backend_set def get_resource(self): return oci_common_utils.call_with_backoff( self.client.get_backend_set, load_balancer_id=self.module.params.get("load_balancer_id"), backend_set_name=self.module.params.get("name"), ) def get_required_kwargs_for_list(self): required_list_method_params = [ "load_balancer_id", ] return dict( (param, self.module.params[param]) for param in required_list_method_params ) def get_optional_kwargs_for_list(self): return dict() def list_resources(self): required_kwargs = self.get_required_kwargs_for_list() optional_kwargs = self.get_optional_kwargs_for_list() kwargs = oci_common_utils.merge_dicts(required_kwargs, optional_kwargs) return oci_common_utils.list_all_resources( self.client.list_backend_sets, **kwargs ) def get_create_model_class(self): return CreateBackendSetDetails def is_update(self): if not self.module.params.get("state") == "present": return False return self.does_resource_exist() def is_create(self): if not self.module.params.get("state") == "present": return False return not self.does_resource_exist() def create_resource(self): create_details = self.get_create_model() return oci_wait_utils.call_and_wait( call_fn=self.client.create_backend_set, call_fn_args=(), call_fn_kwargs=dict( create_backend_set_details=create_details, load_balancer_id=self.module.params.get("load_balancer_id"), ), waiter_type=oci_wait_utils.WORK_REQUEST_WAITER_KEY, operation=oci_common_utils.CREATE_OPERATION_KEY, waiter_client=self.get_waiter_client(), resource_helper=self, wait_for_states=oci_common_utils.get_work_request_completed_states(), ) def get_update_model_class(self): return UpdateBackendSetDetails def update_resource(self): update_details = self.get_update_model() return oci_wait_utils.call_and_wait( call_fn=self.client.update_backend_set, call_fn_args=(), call_fn_kwargs=dict( update_backend_set_details=update_details, load_balancer_id=self.module.params.get("load_balancer_id"), backend_set_name=self.module.params.get("name"), ), waiter_type=oci_wait_utils.WORK_REQUEST_WAITER_KEY, operation=oci_common_utils.UPDATE_OPERATION_KEY, waiter_client=self.get_waiter_client(), resource_helper=self, wait_for_states=oci_common_utils.get_work_request_completed_states(), ) def delete_resource(self): return oci_wait_utils.call_and_wait( call_fn=self.client.delete_backend_set, call_fn_args=(), call_fn_kwargs=dict( load_balancer_id=self.module.params.get("load_balancer_id"), backend_set_name=self.module.params.get("name"), ), waiter_type=oci_wait_utils.WORK_REQUEST_WAITER_KEY, operation=oci_common_utils.DELETE_OPERATION_KEY, waiter_client=self.get_waiter_client(), resource_helper=self, wait_for_states=oci_common_utils.get_work_request_completed_states(), ) BackendSetHelperCustom = get_custom_class("BackendSetHelperCustom") class ResourceHelper(BackendSetHelperCustom, BackendSetHelperGen): pass def main(): module_args = oci_common_utils.get_common_arg_spec( supports_create=True, supports_wait=True ) module_args.update( dict( name=dict(type="str", required=True), policy=dict(type="str"), backends=dict( type="list", elements="dict", options=dict( ip_address=dict(type="str", required=True), port=dict(type="int", required=True), weight=dict(type="int"), backup=dict(type="bool"), drain=dict(type="bool"), offline=dict(type="bool"), ), ), health_checker=dict( type="dict", options=dict( protocol=dict(type="str", required=True), url_path=dict(type="str"), port=dict(type="int"), return_code=dict(type="int"), retries=dict(type="int"), timeout_in_millis=dict(type="int"), interval_in_millis=dict(type="int"), response_body_regex=dict(type="str"), ), ), ssl_configuration=dict( type="dict", options=dict( verify_depth=dict(type="int"), verify_peer_certificate=dict(type="bool"), trusted_certificate_authority_ids=dict(type="list", elements="str"), certificate_ids=dict(type="list", elements="str"), certificate_name=dict(type="str"), protocols=dict(type="list", elements="str"), cipher_suite_name=dict(type="str"), server_order_preference=dict( type="str", choices=["ENABLED", "DISABLED"] ), ), ), session_persistence_configuration=dict( type="dict", options=dict( cookie_name=dict(type="str", required=True), disable_fallback=dict(type="bool"), ), ), lb_cookie_session_persistence_configuration=dict( type="dict", options=dict( cookie_name=dict(type="str"), disable_fallback=dict(type="bool"), domain=dict(type="str"), path=dict(type="str"), max_age_in_seconds=dict(type="int"), is_secure=dict(type="bool"), is_http_only=dict(type="bool"), ), ), load_balancer_id=dict(aliases=["id"], type="str", required=True), state=dict(type="str", default="present", choices=["present", "absent"]), ) ) module = AnsibleModule(argument_spec=module_args, supports_check_mode=True) if not HAS_OCI_PY_SDK: module.fail_json(msg="oci python sdk required for this module.") resource_helper = ResourceHelper( module=module, resource_type="backend_set", service_client_class=LoadBalancerClient, namespace="load_balancer", ) result = dict(changed=False) if resource_helper.is_delete(): result = resource_helper.delete() elif resource_helper.is_update(): result = resource_helper.update() elif resource_helper.is_create(): result = resource_helper.create() module.exit_json(**result) if __name__ == "__main__": main()
true
true
f723dcbf78bff84e6aa9186e1ba18550f5791807
4,002
py
Python
CodingEasy/settings.py
Atif0604/CodingEasy
75d7e88dd7ab514ee4fdaa4b1b80175d78c5a91c
[ "MIT" ]
40
2021-12-22T15:16:03.000Z
2022-03-26T08:24:04.000Z
CodingEasy/settings.py
Atif0604/CodingEasy
75d7e88dd7ab514ee4fdaa4b1b80175d78c5a91c
[ "MIT" ]
222
2021-12-14T05:37:10.000Z
2022-03-31T16:38:59.000Z
CodingEasy/settings.py
Atif0604/CodingEasy
75d7e88dd7ab514ee4fdaa4b1b80175d78c5a91c
[ "MIT" ]
89
2021-12-14T05:00:23.000Z
2022-03-29T10:55:25.000Z
""" Django settings for CodingEasy project. Generated by 'django-admin startproject' using Django 4.0.1. For more information on this file, see https://docs.djangoproject.com/en/4.0/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/4.0/ref/settings/ """ from pathlib import Path # Build paths inside the project like this: BASE_DIR / 'subdir'. BASE_DIR = Path(__file__).resolve().parent.parent # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/4.0/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'django-insecure-x_1=q*b(j*34f(dg0^2sa)-f$k^!0d(qa=@geze9s@8)-(!hy5' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = ["*"] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'home.apps.HomeConfig', 'blog.apps.BlogConfig', 'crispy_forms', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'CodingEasy.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [BASE_DIR / 'templates'], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'CodingEasy.wsgi.application' # Database # https://docs.djangoproject.com/en/4.0/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': BASE_DIR / 'db.sqlite3', } } # Password validation # https://docs.djangoproject.com/en/4.0/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/4.0/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/4.0/howto/static-files/ STATIC_URL = 'static/' STATICFILES_DIRS = [ BASE_DIR / "static", '/var/www/static/', ] # Make a directory to save user profile images MEDIA_ROOT = BASE_DIR / 'media' MEDIA_URL = '/media/' # Default primary key field type # https://docs.djangoproject.com/en/4.0/ref/settings/#default-auto-field DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField' # Crispy Forms Styling Engine definition CRISPY_TEMPLATE_PACK = 'bootstrap4' # Register the Login Redirect URL LOGIN_REDIRECT_URL = 'home-index' # Restricted pages automatically redirect at login form LOGIN_URL = 'login' EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend' EMAIL_HOST = 'smtp.gmail.com' EMAIL_PORT = 587 EMAIL_USE_TLS = True EMAIL_HOST_USER = '##########' # Put your gmail address here EMAIL_HOST_PASSWORD = '##########' # Put your gmail address password here
26.156863
91
0.703648
from pathlib import Path BASE_DIR = Path(__file__).resolve().parent.parent SECRET_KEY = 'django-insecure-x_1=q*b(j*34f(dg0^2sa)-f$k^!0d(qa=@geze9s@8)-(!hy5' DEBUG = True ALLOWED_HOSTS = ["*"] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'home.apps.HomeConfig', 'blog.apps.BlogConfig', 'crispy_forms', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'CodingEasy.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [BASE_DIR / 'templates'], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'CodingEasy.wsgi.application' # Database # https://docs.djangoproject.com/en/4.0/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': BASE_DIR / 'db.sqlite3', } } # Password validation # https://docs.djangoproject.com/en/4.0/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/4.0/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/4.0/howto/static-files/ STATIC_URL = 'static/' STATICFILES_DIRS = [ BASE_DIR / "static", '/var/www/static/', ] # Make a directory to save user profile images MEDIA_ROOT = BASE_DIR / 'media' MEDIA_URL = '/media/' # Default primary key field type # https://docs.djangoproject.com/en/4.0/ref/settings/#default-auto-field DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField' # Crispy Forms Styling Engine definition CRISPY_TEMPLATE_PACK = 'bootstrap4' # Register the Login Redirect URL LOGIN_REDIRECT_URL = 'home-index' # Restricted pages automatically redirect at login form LOGIN_URL = 'login' EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend' EMAIL_HOST = 'smtp.gmail.com' EMAIL_PORT = 587 EMAIL_USE_TLS = True EMAIL_HOST_USER = '
true
true
f723def610c1a3cf3e0216947eb0d00eb4392e68
9,125
py
Python
tf_object_detection/to_tfrecords.py
AndresGarciaEscalante/bstld
cc37fb3388b7731be9e76fd1c4e2be13b6716afe
[ "MIT" ]
null
null
null
tf_object_detection/to_tfrecords.py
AndresGarciaEscalante/bstld
cc37fb3388b7731be9e76fd1c4e2be13b6716afe
[ "MIT" ]
null
null
null
tf_object_detection/to_tfrecords.py
AndresGarciaEscalante/bstld
cc37fb3388b7731be9e76fd1c4e2be13b6716afe
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 """ Creates full-image tfrecords to use the Bosch Small Traffic Lights Dataset with the Tensorflow Object Detection API. The training set is split into training and validation. Tfrecords are created for a training, validation, and test set. Labels are grouped by their respective colors to simplify training and because the test-set does not contain any arrows. Depending on the training method, you may want to look into creating random crops from the images which can increase training performance due to translated inputs. The tfrecords come without any image augmentation. The created tfrecords will be about 18GB. Usage: In the folder with the extracted traffic lights dataset, run python /path/to/this/file/to_tfrecords.py and it will create the tfrecords there. The path of the annotation files, tfrecords, and dataset folder can be specified. Note that this is a tutorial file. There are only few checks and no logging. """ import argparse from collections import OrderedDict, defaultdict import hashlib import os from random import shuffle import cv2 import tensorflow as tf import tqdm # https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/installation.md from object_detection.utils import dataset_util import sys # getting the name of the directory # where the this file is present. current = os.path.dirname(os.path.realpath(__file__)) # Getting the parent directory name # where the current directory is present. parent = os.path.dirname(current) # adding the parent directory to # the sys.path. sys.path.append(parent) from read_label_file import get_all_labels from tf_object_detection import constants def label_id(label_string): """ For detections without classification """ # For object proposals only, you could return 1 return constants.TF_ID_MAP[constants.SIMPLIFIED_CLASSES[label_string]] def modified_label_string(label_string): """ To simplify the problem, training classes are grouped by color """ return constants.SIMPLIFIED_CLASSES[label_string].encode('utf8') def list_of_dicts_to_dict_of_lists(list_of_dicts): """ [{'a': 0, 'b':3}, {'a': 3, 'b':5}] --> {'a': [0, 3], 'b': [3, 5]}""" assert isinstance(list_of_dicts, list) dict_lists = defaultdict(list) for some_dict in list_of_dicts: for key, value in some_dict.items(): dict_lists[key].append(value) return dict_lists def clip(some_value): """ Clip values outside [0, 1]. float -> float """ # Just in case some very eager annotators detected lights outside the image. It happens return max(0, min(some_value, 1)) def create_object_detection_tfrecords(labels, tfrecords_path, dataset_folder, set_name=''): """ Creates a tfrecord dataset specific to tensorflow/models/research/objection_detection params: labels: list of annotations as defined in annotation yamls tfrecords_path: output path to create tfrecords dataset_folder: path to bstld folder, must include rgb directory """ #shuffle(labels) writer = tf.io.TFRecordWriter(tfrecords_path) for label in tqdm.tqdm(labels, desc='Creating {}-set'.format(set_name)): image_path = os.path.join(dataset_folder, label['path']) image = cv2.imread(image_path) if image is None: print('Did you extract the training, validation, and additional images?') raise IOError('Missing: {}'.format(image_path)) height, width, _ = image.shape boxes = list_of_dicts_to_dict_of_lists(label['boxes']) classes = boxes['label'] xmin = list(map(lambda x: clip(x / float(width)), boxes['x_min'])) ymin = list(map(lambda y: clip(y / float(height)), boxes['y_min'])) xmax = list(map(lambda x: clip(x / float(width)), boxes['x_max'])) ymax = list(map(lambda y: clip(y / float(height)), boxes['y_max'])) assert len(xmin) == len(xmax) == len(ymin) assert len(ymax) == len(classes) == len(label['boxes']) if not classes: continue # We don't need empty images, there are enough negatives _, image = cv2.imencode('.png', image) # Assuming that works image = image.tostring() sha256 = hashlib.sha256(image).hexdigest() image_format = 'png' complete_example = tf.train.Example(features=tf.train.Features(feature={ 'image/height': dataset_util.int64_feature(height), 'image/width': dataset_util.int64_feature(width), 'image/filename': dataset_util.bytes_feature(image_path.encode('utf8')), 'image/encoded': tf.train.Feature(bytes_list=tf.train.BytesList(value=[image])), 'image/format': dataset_util.bytes_feature(image_format.encode('utf8')), 'image/source_id': dataset_util.bytes_feature(image_path.encode('utf8')), 'image/key/sha256': dataset_util.bytes_feature(sha256.encode('utf8')), 'image/object/bbox/xmin': dataset_util.float_list_feature(xmin), 'image/object/bbox/xmax': dataset_util.float_list_feature(xmax), 'image/object/bbox/ymin': dataset_util.float_list_feature(ymin), 'image/object/bbox/ymax': dataset_util.float_list_feature(ymax), 'image/object/class/text': dataset_util.bytes_list_feature( list(map(modified_label_string, classes))), 'image/object/class/label': dataset_util.int64_list_feature( list(map(label_id, classes))), })) writer.write(complete_example.SerializeToString()) writer.close() def split_train_labels(train_labels): # one entry for each image in a folder/video to check their sizes later train_videos = [os.path.split(os.path.split(train_label['path'])[0])[1] for train_label in train_labels] # NOTE Because set order is not guaranteed (and we want to support different Python versions) video_dict = OrderedDict().fromkeys(train_videos) video_lengths = [train_videos.count(video) for video in video_dict.keys()] # The first three videos are used for the validation set. # Note that this may not be a completely clean validation set as the sequences # were captured independently but may be on the same day and are taken within # the same general area. This split is for object detection demonstation # purposes only. For clean dataset separation, the sequences would need to be # recorded on separate days and preferably in different areas. # # validation samples: 933, training samples: 4160 (+215 additional) num_valid_samples = sum(video_lengths[:3]) return train_labels[num_valid_samples:], train_labels[:num_valid_samples] def create_datasets(config): """ Splits labels and creates datasets """ train_labels = get_all_labels(config['train_yaml']) test_labels = get_all_labels(config['test_yaml']) if config['additional_yaml']: additional_labels = get_all_labels(config['additional_yaml']) # Split training labels into training and validation for "more correct" validation train_labels, valid_labels = split_train_labels(train_labels) train_labels.extend(additional_labels) # add unappealing images to training set if not os.path.isdir(config['dataset_folder']) or\ not os.path.isdir(os.path.join(config['dataset_folder'], 'rgb')): print('Dataset_folder needs to contain extracted dataset, including the rgb folder') print('{} does not fulfill those requirements'.format(config['dataset_folder'])) create_object_detection_tfrecords( train_labels, config['train_tfrecord'], config['dataset_folder'], 'train') create_object_detection_tfrecords( valid_labels, config['valid_tfrecord'], config['dataset_folder'], 'valid') create_object_detection_tfrecords( test_labels, config['test_tfrecord'], config['dataset_folder'], 'test') print('Done creating tfrecords') def parse_args(): """ Command line args to tfrecords creation config """ parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('--train_yaml', default='train.yaml', help='Path to train.yaml') parser.add_argument('--test_yaml', default='test.yaml', help='Path to test.yaml') parser.add_argument('--additional_yaml', default='additional_train.yaml', help='Path to train_additional.yaml') parser.add_argument('--dataset_folder', default='.', help='Path to dataset folder') parser.add_argument('--train_tfrecord', default='train.tfrecords', help='Path to train.tfrecord') parser.add_argument('--valid_tfrecord', default='valid.tfrecords', help='Path to valid.tfrecord') parser.add_argument('--test_tfrecord', default='test.tfrecords', help='Path to test.tfrecord') args = vars(parser.parse_args()) return args if __name__ == '__main__': config = parse_args() create_datasets(config)
43.452381
98
0.699288
import argparse from collections import OrderedDict, defaultdict import hashlib import os from random import shuffle import cv2 import tensorflow as tf import tqdm from object_detection.utils import dataset_util import sys current = os.path.dirname(os.path.realpath(__file__)) parent = os.path.dirname(current) sys.path.append(parent) from read_label_file import get_all_labels from tf_object_detection import constants def label_id(label_string): return constants.TF_ID_MAP[constants.SIMPLIFIED_CLASSES[label_string]] def modified_label_string(label_string): return constants.SIMPLIFIED_CLASSES[label_string].encode('utf8') def list_of_dicts_to_dict_of_lists(list_of_dicts): assert isinstance(list_of_dicts, list) dict_lists = defaultdict(list) for some_dict in list_of_dicts: for key, value in some_dict.items(): dict_lists[key].append(value) return dict_lists def clip(some_value): return max(0, min(some_value, 1)) def create_object_detection_tfrecords(labels, tfrecords_path, dataset_folder, set_name=''): writer = tf.io.TFRecordWriter(tfrecords_path) for label in tqdm.tqdm(labels, desc='Creating {}-set'.format(set_name)): image_path = os.path.join(dataset_folder, label['path']) image = cv2.imread(image_path) if image is None: print('Did you extract the training, validation, and additional images?') raise IOError('Missing: {}'.format(image_path)) height, width, _ = image.shape boxes = list_of_dicts_to_dict_of_lists(label['boxes']) classes = boxes['label'] xmin = list(map(lambda x: clip(x / float(width)), boxes['x_min'])) ymin = list(map(lambda y: clip(y / float(height)), boxes['y_min'])) xmax = list(map(lambda x: clip(x / float(width)), boxes['x_max'])) ymax = list(map(lambda y: clip(y / float(height)), boxes['y_max'])) assert len(xmin) == len(xmax) == len(ymin) assert len(ymax) == len(classes) == len(label['boxes']) if not classes: continue _, image = cv2.imencode('.png', image) # Assuming that works image = image.tostring() sha256 = hashlib.sha256(image).hexdigest() image_format = 'png' complete_example = tf.train.Example(features=tf.train.Features(feature={ 'image/height': dataset_util.int64_feature(height), 'image/width': dataset_util.int64_feature(width), 'image/filename': dataset_util.bytes_feature(image_path.encode('utf8')), 'image/encoded': tf.train.Feature(bytes_list=tf.train.BytesList(value=[image])), 'image/format': dataset_util.bytes_feature(image_format.encode('utf8')), 'image/source_id': dataset_util.bytes_feature(image_path.encode('utf8')), 'image/key/sha256': dataset_util.bytes_feature(sha256.encode('utf8')), 'image/object/bbox/xmin': dataset_util.float_list_feature(xmin), 'image/object/bbox/xmax': dataset_util.float_list_feature(xmax), 'image/object/bbox/ymin': dataset_util.float_list_feature(ymin), 'image/object/bbox/ymax': dataset_util.float_list_feature(ymax), 'image/object/class/text': dataset_util.bytes_list_feature( list(map(modified_label_string, classes))), 'image/object/class/label': dataset_util.int64_list_feature( list(map(label_id, classes))), })) writer.write(complete_example.SerializeToString()) writer.close() def split_train_labels(train_labels): # one entry for each image in a folder/video to check their sizes later train_videos = [os.path.split(os.path.split(train_label['path'])[0])[1] for train_label in train_labels] # NOTE Because set order is not guaranteed (and we want to support different Python versions) video_dict = OrderedDict().fromkeys(train_videos) video_lengths = [train_videos.count(video) for video in video_dict.keys()] # The first three videos are used for the validation set. # Note that this may not be a completely clean validation set as the sequences # were captured independently but may be on the same day and are taken within # the same general area. This split is for object detection demonstation # purposes only. For clean dataset separation, the sequences would need to be # recorded on separate days and preferably in different areas. # # validation samples: 933, training samples: 4160 (+215 additional) num_valid_samples = sum(video_lengths[:3]) return train_labels[num_valid_samples:], train_labels[:num_valid_samples] def create_datasets(config): train_labels = get_all_labels(config['train_yaml']) test_labels = get_all_labels(config['test_yaml']) if config['additional_yaml']: additional_labels = get_all_labels(config['additional_yaml']) # Split training labels into training and validation for "more correct" validation train_labels, valid_labels = split_train_labels(train_labels) train_labels.extend(additional_labels) # add unappealing images to training set if not os.path.isdir(config['dataset_folder']) or\ not os.path.isdir(os.path.join(config['dataset_folder'], 'rgb')): print('Dataset_folder needs to contain extracted dataset, including the rgb folder') print('{} does not fulfill those requirements'.format(config['dataset_folder'])) create_object_detection_tfrecords( train_labels, config['train_tfrecord'], config['dataset_folder'], 'train') create_object_detection_tfrecords( valid_labels, config['valid_tfrecord'], config['dataset_folder'], 'valid') create_object_detection_tfrecords( test_labels, config['test_tfrecord'], config['dataset_folder'], 'test') print('Done creating tfrecords') def parse_args(): parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('--train_yaml', default='train.yaml', help='Path to train.yaml') parser.add_argument('--test_yaml', default='test.yaml', help='Path to test.yaml') parser.add_argument('--additional_yaml', default='additional_train.yaml', help='Path to train_additional.yaml') parser.add_argument('--dataset_folder', default='.', help='Path to dataset folder') parser.add_argument('--train_tfrecord', default='train.tfrecords', help='Path to train.tfrecord') parser.add_argument('--valid_tfrecord', default='valid.tfrecords', help='Path to valid.tfrecord') parser.add_argument('--test_tfrecord', default='test.tfrecords', help='Path to test.tfrecord') args = vars(parser.parse_args()) return args if __name__ == '__main__': config = parse_args() create_datasets(config)
true
true
f723e0a0bf9e03267963a7c69b5889fe7fcdda12
2,069
py
Python
scripts/generate_delta_sysroot_unittest.py
hustwei/chromite
10eb79abeb64e859362546214b7e039096ac9830
[ "BSD-3-Clause" ]
null
null
null
scripts/generate_delta_sysroot_unittest.py
hustwei/chromite
10eb79abeb64e859362546214b7e039096ac9830
[ "BSD-3-Clause" ]
null
null
null
scripts/generate_delta_sysroot_unittest.py
hustwei/chromite
10eb79abeb64e859362546214b7e039096ac9830
[ "BSD-3-Clause" ]
null
null
null
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Unittests for generate_delta_sysroot.""" from __future__ import print_function import os from chromite.lib import cros_build_lib from chromite.lib import cros_test_lib from chromite.scripts import generate_delta_sysroot as gds # pylint: disable=W0212 def _Parse(argv): return gds._ParseCommandLine(argv) class InterfaceTest(cros_test_lib.OutputTestCase, cros_test_lib.TempDirTestCase): """Test the commandline interface of the script""" def testNoBoard(self): """Test no board specified.""" argv = ['--out-dir', '/path/to/nowhere'] self.assertParseError(argv) def testNoOutDir(self): """Test no out dir specified.""" argv = ['--board', 'link'] self.assertParseError(argv) def testCorrectArgv(self): """Test successful parsing""" argv = ['--board', 'link', '--out-dir', self.tempdir] options = _Parse(argv) gds.FinishParsing(options) def testTestsSet(self): """Test successful parsing""" argv = ['--board', 'link', '--out-dir', self.tempdir] options = _Parse(argv) self.assertTrue(options.build_tests) def testNoTestsSet(self): """Test successful parsing""" argv = ['--board', 'link', '--out-dir', self.tempdir, '--skip-tests'] options = _Parse(argv) self.assertFalse(options.build_tests) def assertParseError(self, argv): """Helper to assert parsing error, given argv.""" with self.OutputCapturer(): self.assertRaises2(SystemExit, _Parse, argv) class TestCreateBatchFile(cros_test_lib.TempDirTestCase): """Test the batch file creation.""" def testSourceDirDoesNotExist(self): """Test error is raised if there is no source directory.""" no_source = os.path.join(self.tempdir, 'foo/bar/cow') self.assertRaises2( cros_build_lib.RunCommandError, gds.CreateBatchFile, no_source, self.tempdir, os.path.join(self.tempdir, 'batch'))
29.985507
73
0.697438
from __future__ import print_function import os from chromite.lib import cros_build_lib from chromite.lib import cros_test_lib from chromite.scripts import generate_delta_sysroot as gds def _Parse(argv): return gds._ParseCommandLine(argv) class InterfaceTest(cros_test_lib.OutputTestCase, cros_test_lib.TempDirTestCase): def testNoBoard(self): argv = ['--out-dir', '/path/to/nowhere'] self.assertParseError(argv) def testNoOutDir(self): argv = ['--board', 'link'] self.assertParseError(argv) def testCorrectArgv(self): argv = ['--board', 'link', '--out-dir', self.tempdir] options = _Parse(argv) gds.FinishParsing(options) def testTestsSet(self): argv = ['--board', 'link', '--out-dir', self.tempdir] options = _Parse(argv) self.assertTrue(options.build_tests) def testNoTestsSet(self): argv = ['--board', 'link', '--out-dir', self.tempdir, '--skip-tests'] options = _Parse(argv) self.assertFalse(options.build_tests) def assertParseError(self, argv): with self.OutputCapturer(): self.assertRaises2(SystemExit, _Parse, argv) class TestCreateBatchFile(cros_test_lib.TempDirTestCase): def testSourceDirDoesNotExist(self): no_source = os.path.join(self.tempdir, 'foo/bar/cow') self.assertRaises2( cros_build_lib.RunCommandError, gds.CreateBatchFile, no_source, self.tempdir, os.path.join(self.tempdir, 'batch'))
true
true
f723e168770bfc02d7b2018b83bc4abe150a4e30
678
py
Python
ex9_1_applications_agumentation.py
soyoung9306/-3-keras
e65f40171aadef3fe0b59c649b55b3f0bd09ca41
[ "MIT" ]
200
2017-10-23T05:05:34.000Z
2022-01-25T00:58:45.000Z
ex9_1_applications_agumentation.py
MyeongHaHwang/keraspp
4090fcc86072cda816d1d6056b5113ace49534ae
[ "MIT" ]
8
2018-02-07T08:33:49.000Z
2020-09-11T20:59:30.000Z
ex9_1_applications_agumentation.py
MyeongHaHwang/keraspp
4090fcc86072cda816d1d6056b5113ace49534ae
[ "MIT" ]
135
2017-12-15T05:41:47.000Z
2021-12-15T12:21:09.000Z
""" CH 9.1 Applications/Image Augmentation """ from sklearn import model_selection from keras import datasets import keras assert keras.backend.image_data_format() == 'channels_last' from keraspp import aigen class Machine(aigen.Machine_Generator): def __init__(self): (x_train, y_train), (x_test, y_test) = datasets.cifar10.load_data() _, X, _, y = model_selection.train_test_split(x_train, y_train, test_size=0.02) X = X.astype(float) gen_param_dict = {'rotation_range': 10} super().__init__(X, y, nb_classes=10, gen_param_dict=gen_param_dict) def main(): m = Machine() m.run() if __name__ == '__main__': main()
23.37931
87
0.690265
from sklearn import model_selection from keras import datasets import keras assert keras.backend.image_data_format() == 'channels_last' from keraspp import aigen class Machine(aigen.Machine_Generator): def __init__(self): (x_train, y_train), (x_test, y_test) = datasets.cifar10.load_data() _, X, _, y = model_selection.train_test_split(x_train, y_train, test_size=0.02) X = X.astype(float) gen_param_dict = {'rotation_range': 10} super().__init__(X, y, nb_classes=10, gen_param_dict=gen_param_dict) def main(): m = Machine() m.run() if __name__ == '__main__': main()
true
true
f723e1dd1990acf6f94ec2386a12f7b48ddc2589
274
py
Python
core/index_db/index_object.py
AliRezaBeigy/Gitools
b7defd332bb144cb45962a351b1f56e941c8ca4b
[ "MIT" ]
6
2021-01-06T05:18:06.000Z
2022-03-17T06:44:29.000Z
core/index_db/index_object.py
AliRezaBeigy/Gitools
b7defd332bb144cb45962a351b1f56e941c8ca4b
[ "MIT" ]
null
null
null
core/index_db/index_object.py
AliRezaBeigy/Gitools
b7defd332bb144cb45962a351b1f56e941c8ca4b
[ "MIT" ]
1
2021-04-15T20:51:26.000Z
2021-04-15T20:51:26.000Z
class IndexObject: hash: str crc32: int pack_end_offset: int pack_start_offset: int def __init__(self, hash: str, crc32: int, pack_start_offset: int): self.hash = hash self.crc32 = crc32 self.pack_start_offset = pack_start_offset
27.4
70
0.667883
class IndexObject: hash: str crc32: int pack_end_offset: int pack_start_offset: int def __init__(self, hash: str, crc32: int, pack_start_offset: int): self.hash = hash self.crc32 = crc32 self.pack_start_offset = pack_start_offset
true
true
f723e22ccbf007fd40e2124e7515473ede314c02
1,440
py
Python
setup.py
dharif23/xtermcolors
fb35b9b7a04fbf7a0ea236bb94275240c6322b1a
[ "MIT" ]
null
null
null
setup.py
dharif23/xtermcolors
fb35b9b7a04fbf7a0ea236bb94275240c6322b1a
[ "MIT" ]
null
null
null
setup.py
dharif23/xtermcolors
fb35b9b7a04fbf7a0ea236bb94275240c6322b1a
[ "MIT" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- __author__ = 'Dalwar Hossain' __email__ = 'dalwar.hossain@protonmail.com' from setuptools import setup def readme(): with open('README.md') as f: return f.read() setup(name='pyrainbowterm', version='1.0', description='pyrainbowterm - Smart custom print function with color and log information support', long_description=readme(), classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Topic :: Software Development :: Libraries :: Python Modules'], keywords='terminal colors xterm python colored output', url='https://github.com/dharif23/pyrainbowterm', author='Dalwar Hossain', author_email='dalwar.hossain@protonmail.com', license='MIT', packages=['pyrainbowterm'], include_package_data=True, zip_safe=False, )
32.727273
103
0.603472
__author__ = 'Dalwar Hossain' __email__ = 'dalwar.hossain@protonmail.com' from setuptools import setup def readme(): with open('README.md') as f: return f.read() setup(name='pyrainbowterm', version='1.0', description='pyrainbowterm - Smart custom print function with color and log information support', long_description=readme(), classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Topic :: Software Development :: Libraries :: Python Modules'], keywords='terminal colors xterm python colored output', url='https://github.com/dharif23/pyrainbowterm', author='Dalwar Hossain', author_email='dalwar.hossain@protonmail.com', license='MIT', packages=['pyrainbowterm'], include_package_data=True, zip_safe=False, )
true
true
f723e3633555478d9d24ed98d7498972bfe2deda
13,424
py
Python
pyabsa/core/tc/prediction/text_classifier.py
yangheng95/PyABSA
f5b46047a58fa8054a0469486be3f1cada933814
[ "MIT" ]
199
2021-06-07T15:07:28.000Z
2022-03-31T11:53:28.000Z
pyabsa/core/tc/prediction/text_classifier.py
yangheng95/PyABSA
f5b46047a58fa8054a0469486be3f1cada933814
[ "MIT" ]
98
2021-06-06T06:01:02.000Z
2022-03-31T15:48:28.000Z
pyabsa/core/tc/prediction/text_classifier.py
yangheng95/PyABSA
f5b46047a58fa8054a0469486be3f1cada933814
[ "MIT" ]
55
2021-06-10T08:52:17.000Z
2022-03-31T11:08:58.000Z
# -*- coding: utf-8 -*- # file: text_classifier.py # author: yangheng <yangheng@m.scnu.edu.cn> # Copyright (C) 2020. All Rights Reserved. import json import os import pickle import random import numpy import torch from findfile import find_file from termcolor import colored from torch.utils.data import DataLoader from transformers import AutoTokenizer, AutoModel from pyabsa.functional.dataset import detect_infer_dataset from ..models import GloVeClassificationModelList, BERTClassificationModelList from ..classic.__glove__.dataset_utils.data_utils_for_inferring import GloVeClassificationDataset from ..classic.__bert__.dataset_utils.data_utils_for_inferring import BERTClassificationDataset from ..classic.__glove__.dataset_utils.data_utils_for_training import LABEL_PADDING, build_embedding_matrix, build_tokenizer from pyabsa.utils.pyabsa_utils import print_args, TransformerConnectionError class TextClassifier: def __init__(self, model_arg=None, label_map=None, eval_batch_size=128): ''' from_train_model: load inferring_tutorials model from trained model ''' self.initializers = { 'xavier_uniform_': torch.nn.init.xavier_uniform_, 'xavier_normal_': torch.nn.init.xavier_normal, 'orthogonal_': torch.nn.init.orthogonal_ } # load from a training if not isinstance(model_arg, str): print('Load text classifier from training') self.model = model_arg[0] self.opt = model_arg[1] self.tokenizer = model_arg[2] else: try: if 'fine-tuned' in model_arg: raise ValueError('Do not support to directly load a fine-tuned model, please load a .state_dict or .model instead!') print('Load text classifier from', model_arg) state_dict_path = find_file(model_arg, '.state_dict', exclude_key=['__MACOSX']) model_path = find_file(model_arg, '.model', exclude_key=['__MACOSX']) tokenizer_path = find_file(model_arg, '.tokenizer', exclude_key=['__MACOSX']) config_path = find_file(model_arg, '.config', exclude_key=['__MACOSX']) print('config: {}'.format(config_path)) print('state_dict: {}'.format(state_dict_path)) print('model: {}'.format(model_path)) print('tokenizer: {}'.format(tokenizer_path)) self.opt = pickle.load(open(config_path, mode='rb')) if state_dict_path or model_path: if not hasattr(GloVeClassificationModelList, self.opt.model.__name__.upper()): if 'pretrained_bert_name' in self.opt.args or 'pretrained_bert' in self.opt.args: if 'pretrained_bert_name' in self.opt.args: self.opt.pretrained_bert = self.opt.pretrained_bert_name if state_dict_path: try: self.bert = AutoModel.from_pretrained(self.opt.pretrained_bert) self.model = self.opt.model(self.bert, self.opt) except ValueError: raise TransformerConnectionError() elif model_path: if model_path: self.model = torch.load(model_path, map_location='cpu') if tokenizer_path: self.tokenizer = pickle.load(open(tokenizer_path, mode='rb')) else: raise ValueError('No .tokenizer found!') else: self.tokenizer = build_tokenizer( dataset_list=self.opt.dataset_file, max_seq_len=self.opt.max_seq_len, dat_fname='{0}_tokenizer.dat'.format(os.path.basename(self.opt.dataset_name)), opt=self.opt ) if model_path: self.model = torch.load(model_path, map_location='cpu') else: self.embedding_matrix = build_embedding_matrix( word2idx=self.tokenizer.word2idx, embed_dim=self.opt.embed_dim, dat_fname='{0}_{1}_embedding_matrix.dat'.format(str(self.opt.embed_dim), os.path.basename(self.opt.dataset_name)), opt=self.opt ) self.model = self.opt.model(self.embedding_matrix, self.opt).to(self.opt.device) self.model.load_state_dict(torch.load(state_dict_path, map_location='cpu')) print('Config used in Training:') print_args(self.opt, mode=1) except Exception as e: raise RuntimeError('Exception: {} Fail to load the model from {}! '.format(e, model_arg)) if not hasattr(GloVeClassificationModelList, self.model.__class__.__name__) \ and not hasattr(BERTClassificationModelList, self.model.__class__.__name__): raise KeyError('The checkpoint you are loading is not from classifier model.') if hasattr(BERTClassificationModelList, self.opt.model.__name__): self.dataset = BERTClassificationDataset(tokenizer=self.tokenizer, opt=self.opt) elif hasattr(GloVeClassificationModelList, self.opt.model.__name__): self.dataset = GloVeClassificationDataset(tokenizer=self.tokenizer, opt=self.opt) self.opt.inputs_cols = self.model.inputs self.infer_dataloader = None self.opt.eval_batch_size = eval_batch_size if self.opt.seed is not None: random.seed(self.opt.seed) numpy.random.seed(self.opt.seed) torch.manual_seed(self.opt.seed) torch.cuda.manual_seed(self.opt.seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False self.opt.initializer = self.opt.initializer self.label_map = None self.set_label_map(label_map) def set_label_map(self, label_map): if label_map: print(colored('Warning: label map is deprecated, please directly set labels within dataset.', 'red')) label_map[LABEL_PADDING] = '' self.label_map = label_map def to(self, device=None): self.opt.device = device self.model.to(device) def cpu(self): self.opt.device = 'cpu' self.model.to('cpu') def cuda(self, device='cuda:0'): self.opt.device = device self.model.to(device) def _log_write_args(self): n_trainable_params, n_nontrainable_params = 0, 0 for p in self.model.parameters(): n_params = torch.prod(torch.tensor(p.shape)) if p.requires_grad: n_trainable_params += n_params else: n_nontrainable_params += n_params print( 'n_trainable_params: {0}, n_nontrainable_params: {1}'.format(n_trainable_params, n_nontrainable_params)) for arg in vars(self.opt): if getattr(self.opt, arg) is not None: print('>>> {0}: {1}'.format(arg, getattr(self.opt, arg))) def batch_infer(self, target_file=None, print_result=True, save_result=False, clear_input_samples=True, ignore_error=True): if clear_input_samples: self.clear_input_samples() save_path = os.path.join(os.getcwd(), 'text_classification.result.json') target_file = detect_infer_dataset(target_file, task='text_classification') if not target_file: raise FileNotFoundError('Can not find inference datasets!') self.dataset.prepare_infer_dataset(target_file, ignore_error=ignore_error) self.infer_dataloader = DataLoader(dataset=self.dataset, batch_size=self.opt.eval_batch_size, pin_memory=True, shuffle=False) return self._infer(save_path=save_path if save_result else None, print_result=print_result) def infer(self, text: str = None, print_result=True, clear_input_samples=True): if clear_input_samples: self.clear_input_samples() if text: self.dataset.prepare_infer_sample(text) else: raise RuntimeError('Please specify your datasets path!') self.infer_dataloader = DataLoader(dataset=self.dataset, batch_size=self.opt.eval_batch_size, shuffle=False) return self._infer(print_result=print_result) def merge_results(self, results): """ merge APC results have the same input text """ final_res = [] for result in results: if final_res and "".join(final_res[-1]['text'].split()) == "".join(result['text'].split()): final_res[-1]['label'].append(result['label']) final_res[-1]['ref_label'].append(result['ref_label']) final_res[-1]['ref_check'].append(result['ref_check']) else: final_res.append( { 'text': result['text'].replace(' ', ' '), 'label': [result['label']], 'ref_label': [result['ref_label']], 'ref_check': [result['ref_check']] } ) return final_res def _infer(self, save_path=None, print_result=True): _params = filter(lambda p: p.requires_grad, self.model.parameters()) correct = {True: 'Correct', False: 'Wrong'} results = [] with torch.no_grad(): self.model.eval() n_correct = 0 n_labeled = 0 n_total = 0 for _, sample in enumerate(self.infer_dataloader): inputs = [sample[col].to(self.opt.device) for col in self.opt.inputs_cols if col != 'label'] self.model.eval() outputs = self.model(inputs) sen_logits = outputs t_probs = torch.softmax(sen_logits, dim=-1).cpu().numpy() for i, i_probs in enumerate(t_probs): if 'index_to_label' in self.opt.args and int(i_probs.argmax(axis=-1)): sent = self.opt.index_to_label[int(i_probs.argmax(axis=-1))] if sample['label'] != -999: real_sent = sample['label'][i] if isinstance(sample['label'][i], str) else self.opt.index_to_label.get(int(sample['label'][i]), 'N.A.') else: real_sent = 'N.A.' if real_sent != -999 and real_sent != '-999': n_labeled += 1 if sent == real_sent: n_correct += 1 else: # for the former versions until 1.2.0 sent = int(i_probs.argmax(axis=-1)) real_sent = int(sample['label'][i]) text_raw = sample['text_raw'][i] results.append({ 'text': text_raw, 'label': sent, 'ref_label': real_sent, 'ref_check': correct[sent == real_sent] if real_sent != '-999' else '', }) n_total += 1 if len(self.infer_dataloader) > 1: print('Total samples:{}'.format(n_total)) print('Labeled samples:{}'.format(n_labeled)) print('Prediction Accuracy:{}%'.format(100 * n_correct / n_labeled if n_labeled else 'N.A.')) try: if print_result: for result in results: text_printing = result['text'] if result['ref_label'] != -999: if result['label'] == result['ref_label']: text_info = colored(' -> {}(ref:{})'.format(result['label'], result['ref_label']), 'green') else: text_info = colored(' -> {}(ref:{})'.format(result['label'], result['ref_label']), 'red') else: text_info = ' -> {}'.format(result['label']) text_printing += text_info print(text_printing) if save_path: fout = open(save_path, 'w', encoding='utf8') json.dump(json.JSONEncoder().encode({'results': results}), fout, ensure_ascii=False) # fout.write('Total samples:{}\n'.format(n_total)) # fout.write('Labeled samples:{}\n'.format(n_labeled)) # fout.write('Prediction Accuracy:{}%\n'.format(100 * n_correct / n_labeled)) if n_labeled else 'N.A.' print('inference result saved in: {}'.format(save_path)) except Exception as e: print('Can not save result: {}, Exception: {}'.format(text_raw, e)) return results def clear_input_samples(self): self.dataset.all_data = []
45.505085
163
0.560489
import json import os import pickle import random import numpy import torch from findfile import find_file from termcolor import colored from torch.utils.data import DataLoader from transformers import AutoTokenizer, AutoModel from pyabsa.functional.dataset import detect_infer_dataset from ..models import GloVeClassificationModelList, BERTClassificationModelList from ..classic.__glove__.dataset_utils.data_utils_for_inferring import GloVeClassificationDataset from ..classic.__bert__.dataset_utils.data_utils_for_inferring import BERTClassificationDataset from ..classic.__glove__.dataset_utils.data_utils_for_training import LABEL_PADDING, build_embedding_matrix, build_tokenizer from pyabsa.utils.pyabsa_utils import print_args, TransformerConnectionError class TextClassifier: def __init__(self, model_arg=None, label_map=None, eval_batch_size=128): self.initializers = { 'xavier_uniform_': torch.nn.init.xavier_uniform_, 'xavier_normal_': torch.nn.init.xavier_normal, 'orthogonal_': torch.nn.init.orthogonal_ } if not isinstance(model_arg, str): print('Load text classifier from training') self.model = model_arg[0] self.opt = model_arg[1] self.tokenizer = model_arg[2] else: try: if 'fine-tuned' in model_arg: raise ValueError('Do not support to directly load a fine-tuned model, please load a .state_dict or .model instead!') print('Load text classifier from', model_arg) state_dict_path = find_file(model_arg, '.state_dict', exclude_key=['__MACOSX']) model_path = find_file(model_arg, '.model', exclude_key=['__MACOSX']) tokenizer_path = find_file(model_arg, '.tokenizer', exclude_key=['__MACOSX']) config_path = find_file(model_arg, '.config', exclude_key=['__MACOSX']) print('config: {}'.format(config_path)) print('state_dict: {}'.format(state_dict_path)) print('model: {}'.format(model_path)) print('tokenizer: {}'.format(tokenizer_path)) self.opt = pickle.load(open(config_path, mode='rb')) if state_dict_path or model_path: if not hasattr(GloVeClassificationModelList, self.opt.model.__name__.upper()): if 'pretrained_bert_name' in self.opt.args or 'pretrained_bert' in self.opt.args: if 'pretrained_bert_name' in self.opt.args: self.opt.pretrained_bert = self.opt.pretrained_bert_name if state_dict_path: try: self.bert = AutoModel.from_pretrained(self.opt.pretrained_bert) self.model = self.opt.model(self.bert, self.opt) except ValueError: raise TransformerConnectionError() elif model_path: if model_path: self.model = torch.load(model_path, map_location='cpu') if tokenizer_path: self.tokenizer = pickle.load(open(tokenizer_path, mode='rb')) else: raise ValueError('No .tokenizer found!') else: self.tokenizer = build_tokenizer( dataset_list=self.opt.dataset_file, max_seq_len=self.opt.max_seq_len, dat_fname='{0}_tokenizer.dat'.format(os.path.basename(self.opt.dataset_name)), opt=self.opt ) if model_path: self.model = torch.load(model_path, map_location='cpu') else: self.embedding_matrix = build_embedding_matrix( word2idx=self.tokenizer.word2idx, embed_dim=self.opt.embed_dim, dat_fname='{0}_{1}_embedding_matrix.dat'.format(str(self.opt.embed_dim), os.path.basename(self.opt.dataset_name)), opt=self.opt ) self.model = self.opt.model(self.embedding_matrix, self.opt).to(self.opt.device) self.model.load_state_dict(torch.load(state_dict_path, map_location='cpu')) print('Config used in Training:') print_args(self.opt, mode=1) except Exception as e: raise RuntimeError('Exception: {} Fail to load the model from {}! '.format(e, model_arg)) if not hasattr(GloVeClassificationModelList, self.model.__class__.__name__) \ and not hasattr(BERTClassificationModelList, self.model.__class__.__name__): raise KeyError('The checkpoint you are loading is not from classifier model.') if hasattr(BERTClassificationModelList, self.opt.model.__name__): self.dataset = BERTClassificationDataset(tokenizer=self.tokenizer, opt=self.opt) elif hasattr(GloVeClassificationModelList, self.opt.model.__name__): self.dataset = GloVeClassificationDataset(tokenizer=self.tokenizer, opt=self.opt) self.opt.inputs_cols = self.model.inputs self.infer_dataloader = None self.opt.eval_batch_size = eval_batch_size if self.opt.seed is not None: random.seed(self.opt.seed) numpy.random.seed(self.opt.seed) torch.manual_seed(self.opt.seed) torch.cuda.manual_seed(self.opt.seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False self.opt.initializer = self.opt.initializer self.label_map = None self.set_label_map(label_map) def set_label_map(self, label_map): if label_map: print(colored('Warning: label map is deprecated, please directly set labels within dataset.', 'red')) label_map[LABEL_PADDING] = '' self.label_map = label_map def to(self, device=None): self.opt.device = device self.model.to(device) def cpu(self): self.opt.device = 'cpu' self.model.to('cpu') def cuda(self, device='cuda:0'): self.opt.device = device self.model.to(device) def _log_write_args(self): n_trainable_params, n_nontrainable_params = 0, 0 for p in self.model.parameters(): n_params = torch.prod(torch.tensor(p.shape)) if p.requires_grad: n_trainable_params += n_params else: n_nontrainable_params += n_params print( 'n_trainable_params: {0}, n_nontrainable_params: {1}'.format(n_trainable_params, n_nontrainable_params)) for arg in vars(self.opt): if getattr(self.opt, arg) is not None: print('>>> {0}: {1}'.format(arg, getattr(self.opt, arg))) def batch_infer(self, target_file=None, print_result=True, save_result=False, clear_input_samples=True, ignore_error=True): if clear_input_samples: self.clear_input_samples() save_path = os.path.join(os.getcwd(), 'text_classification.result.json') target_file = detect_infer_dataset(target_file, task='text_classification') if not target_file: raise FileNotFoundError('Can not find inference datasets!') self.dataset.prepare_infer_dataset(target_file, ignore_error=ignore_error) self.infer_dataloader = DataLoader(dataset=self.dataset, batch_size=self.opt.eval_batch_size, pin_memory=True, shuffle=False) return self._infer(save_path=save_path if save_result else None, print_result=print_result) def infer(self, text: str = None, print_result=True, clear_input_samples=True): if clear_input_samples: self.clear_input_samples() if text: self.dataset.prepare_infer_sample(text) else: raise RuntimeError('Please specify your datasets path!') self.infer_dataloader = DataLoader(dataset=self.dataset, batch_size=self.opt.eval_batch_size, shuffle=False) return self._infer(print_result=print_result) def merge_results(self, results): final_res = [] for result in results: if final_res and "".join(final_res[-1]['text'].split()) == "".join(result['text'].split()): final_res[-1]['label'].append(result['label']) final_res[-1]['ref_label'].append(result['ref_label']) final_res[-1]['ref_check'].append(result['ref_check']) else: final_res.append( { 'text': result['text'].replace(' ', ' '), 'label': [result['label']], 'ref_label': [result['ref_label']], 'ref_check': [result['ref_check']] } ) return final_res def _infer(self, save_path=None, print_result=True): _params = filter(lambda p: p.requires_grad, self.model.parameters()) correct = {True: 'Correct', False: 'Wrong'} results = [] with torch.no_grad(): self.model.eval() n_correct = 0 n_labeled = 0 n_total = 0 for _, sample in enumerate(self.infer_dataloader): inputs = [sample[col].to(self.opt.device) for col in self.opt.inputs_cols if col != 'label'] self.model.eval() outputs = self.model(inputs) sen_logits = outputs t_probs = torch.softmax(sen_logits, dim=-1).cpu().numpy() for i, i_probs in enumerate(t_probs): if 'index_to_label' in self.opt.args and int(i_probs.argmax(axis=-1)): sent = self.opt.index_to_label[int(i_probs.argmax(axis=-1))] if sample['label'] != -999: real_sent = sample['label'][i] if isinstance(sample['label'][i], str) else self.opt.index_to_label.get(int(sample['label'][i]), 'N.A.') else: real_sent = 'N.A.' if real_sent != -999 and real_sent != '-999': n_labeled += 1 if sent == real_sent: n_correct += 1 else: sent = int(i_probs.argmax(axis=-1)) real_sent = int(sample['label'][i]) text_raw = sample['text_raw'][i] results.append({ 'text': text_raw, 'label': sent, 'ref_label': real_sent, 'ref_check': correct[sent == real_sent] if real_sent != '-999' else '', }) n_total += 1 if len(self.infer_dataloader) > 1: print('Total samples:{}'.format(n_total)) print('Labeled samples:{}'.format(n_labeled)) print('Prediction Accuracy:{}%'.format(100 * n_correct / n_labeled if n_labeled else 'N.A.')) try: if print_result: for result in results: text_printing = result['text'] if result['ref_label'] != -999: if result['label'] == result['ref_label']: text_info = colored(' -> {}(ref:{})'.format(result['label'], result['ref_label']), 'green') else: text_info = colored(' -> {}(ref:{})'.format(result['label'], result['ref_label']), 'red') else: text_info = ' -> {}'.format(result['label']) text_printing += text_info print(text_printing) if save_path: fout = open(save_path, 'w', encoding='utf8') json.dump(json.JSONEncoder().encode({'results': results}), fout, ensure_ascii=False) print('inference result saved in: {}'.format(save_path)) except Exception as e: print('Can not save result: {}, Exception: {}'.format(text_raw, e)) return results def clear_input_samples(self): self.dataset.all_data = []
true
true
f723e39e9c8312aae14d30f2933e41983a1269e0
16,210
py
Python
syfertext/language.py
AlexKer/SyferText
021eea2255d9d8e1fc49c98c7b5f98b9e516ba21
[ "Apache-2.0" ]
null
null
null
syfertext/language.py
AlexKer/SyferText
021eea2255d9d8e1fc49c98c7b5f98b9e516ba21
[ "Apache-2.0" ]
null
null
null
syfertext/language.py
AlexKer/SyferText
021eea2255d9d8e1fc49c98c7b5f98b9e516ba21
[ "Apache-2.0" ]
null
null
null
from .tokenizer import Tokenizer from .vocab import Vocab from .doc import Doc from .pointers.doc_pointer import DocPointer from .pipeline import SubPipeline from syft.generic.object import AbstractObject from syft.workers.base import BaseWorker from syft.generic.string import String from syft.generic.pointers.string_pointer import StringPointer from syft.generic.pointers.object_pointer import ObjectPointer from typing import List, Union, Tuple class BaseDefaults(object): """A class that defines all the defaults of the Language class """ @classmethod def create_vocab(cls, model_name) -> Vocab: """ Creates the Vocab object that holds the vocabulary along with vocabulary meta data Todo: I started by a very simple Vocab class that contains only a variable called 'vectors' of type DICT to hold word vectors vocab.vectors['word'] = float. To be reviewed for more complex functionality. """ # Instantiate the Vocab object vocab = Vocab(model_name) return vocab @classmethod def create_tokenizer(cls, vocab,) -> Tokenizer: """Creates a Tokenizer object that will be used to create the Doc object, which is the main container for annotated tokens. """ # Instantiate the Tokenizer object and return it tokenizer = Tokenizer(vocab,) return tokenizer class Language(AbstractObject): """Inspired by spaCy Language class. Orchestrates the interactions between different components of the pipeline to accomplish core text-processing task. It create the Doc object which is the container into which all text-processing pipeline components feed their results. """ def __init__( self, model_name, id: int = None, owner: BaseWorker = None, tags: List[str] = None, description: str = None, ): # Define the default settings self.Defaults = BaseDefaults # Create the vocabulary self.vocab = self.Defaults.create_vocab(model_name) # Create a dictionary that associates to the name of each text-processing component # of the pipeline, an object that is charged to accomplish the job. self.factories = {"tokenizer": self.Defaults.create_tokenizer(self.vocab)} # Initialize the subpipeline template # It only contains the tokenizer at initialization self.pipeline_template = [{"remote": True, "name": "tokenizer"}] # Intialize the main pipeline self._reset_pipeline() super(Language, self).__init__(id=id, owner=owner, tags=tags, description=description) @property def pipe_names(self) -> List[str]: """Returns a list of component names in the pipeline in order of execution. Returns: (list): List of all pipeline component name in order of execution. """ return [pipe_template["name"] for pipe_template in self.pipeline_template] def _parse_pipeline_template(self): """Parses the `pipeline_template` property to create the `subpipeline_templates` property. """ # Initialize a subpipeline template with the # tokenizer. The tokenizer alway has 'remote' set # to True. subpipeline_template = dict( remote=self.pipeline_template[0]["remote"], names=[self.pipeline_template[0]["name"]], ) # Initialize the subpipeline templates list as a class property self.subpipeline_templates = [subpipeline_template] # Loop through the pipeline template elements for pipe_template in self.pipeline_template[1:]: # compare `remote` properties between templates: # If the pipe template has the same `remote` value, # it is appended to the existing subpipeline template if pipe_template["remote"] == subpipeline_template["remote"]: subpipeline_template["names"].append(pipe_template["name"]) # Otherwise, create a new subpipeline template and add the # pipe template to it else: subpipeline_template = dict( remote=pipe_template["remote"], names=[pipe_template["name"]] ) self.subpipeline_templates.append(subpipeline_template) def _reset_pipeline(self): """Reset the `pipeline` class property. """ # Read the pipeline components from the template and aggregate them into # a list of subpipline templates. # This method will create the instance variable # self.subpipeline_templates self._parse_pipeline_template() # Get the number of subpipelines subpipeline_count = len(self.subpipeline_templates) # Initialize a new empty pipeline with as many # empty dicts as there are subpipelines self.pipeline = [dict() for i in range(subpipeline_count)] def add_pipe( self, component: callable, remote: bool = False, name: str = None, before: str = None, after: str = None, first: bool = False, last: bool = True, ): """Adds a pipe template to a subpipeline tempaltes. A pipe template is a dict of the form `{'remote': remote, 'name': name}`. Few main steps are carried out here: 1- The new pipe name is added at the right position in the pipeline template. Here is an example of how pipeline template list looks like self.pipeline_template = [{'remote': True, 'name': 'tokenizer'}, {'remote': True, 'name': <pipe_1_name>}, {'remote': True, 'name': <pipe_2_name>}, {'remote': False, 'name': <pipe_3_name>}, {'remote': False, 'name': <pipe_4_name>}] 2- The pipeline template is parsed into a list or subpipeline templates. Each subpipeline template is an aggregation of adjacent pipes with the same value for 'remote' Here is an example of how the subpipeline template list for the above pipeline template would look like: self.subpipeline_templates = [{'remote': True, 'names': ['tokenizer', 'pipe_1_name', 'pipe_2_name']}, {'remote': False, 'name': ['pipe_3_name', 'pipe_4_name']} ] 3- The pipeline is initialize by creating a list with as many empty dicts as there are subpipelines: self.pipeline = [dict(), dict()] Args: component (callable): This is a callable that takes a Doc object and modifies it inplace. name (str): The name of the pipeline component to be added. Defaults to None. remote (bool): If True, the pipe component will be sent to the remote worker where the Doc object resides. If False, the pipe will operate locally, either on a Doc object directly, or on a DocPointer returned by the previous component in the pipeline. Defaults to False. before (str): The name of the pipeline component before which the new component is to be added. Defaults to None. after (str): The name of the pipeline component after which the new component is to be added. Defaults to None. first (bool): if set to True, the new pipeline component will be add as the first element of the pipeline (after the tokenizer). Defaults to False. last (bool): if set to True, the new pipeline component will be add as the last element of the pipeline (after the tokenizer). Defaults to True. """ # The component argument must be callable # [TODO] An exception with a custom error message should be thrown assert hasattr(component, "__call__"), "Argument `component` is not a callable." # Make sure the `component` argument is an object that has a `factory()` method assert hasattr( component, "factory" ), "Argument `component` should be an object that has a `factory()` method" # [TODO] The following requirement should be relaxed and a name should be # automatically assigned in case `name` is None. This would be convenient # as done by spaCy assert ( isinstance(name, str) and len(name) >= 1 ), "Argument `name` should be of type `str` with at least one character." # [TODO] Add custom error message assert ( name not in self.pipe_names ), "Pipeline component name '{}' that you have chosen is already used by another pipeline component.".format( name ) # Make sure only one of 'before', 'after', 'first' or 'last' is set # [TODO] Add custom error message assert ( sum([bool(before), bool(after), bool(first), bool(last)]) < 2 ), "Only one among arguments 'before', 'after', 'first' or 'last' should be set." # Add the new pipe component to the list of factories self.factories[name] = component # Create the pipe template that will be added the pipeline # template pipe_template = dict(remote=remote, name=name) # Add the pipe template at the right position if last or not any([before, after, first]): self.pipeline_template.append(pipe_template) elif first: # The index 0 is reserved for the tokenizer self.pipeline_template.insert(index=1, element=pipe_template) elif before in self.pipe_names: self.pipeline_template.insert( index=self.pipe_names.index(before), element=pipe_template ) elif after in self.pipe_names: self.pipeline_template.insert( index=self.pipe_names.index(after) + 1, element=pipe_template ) else: # [TODO] Raise exception with custom error message assert ( False ), "component cannot be added to the pipeline, \ please double check argument values of the `add_pipe` method call." # Reset the pipeline. # The instance variable that will be affected is: # self.pipeline self._reset_pipeline() def remove_pipe(self, name: str) -> Tuple[str, callable]: """Removes the pipeline whose name is 'name' Args: name (str): The name of the pipeline component to remove. Returns: The removed pipe """ # [TODO] Add custom error message assert ( name in self.pipe_names ), "No pipeline component with the specified name '{}' was found".format(name) # Get the index of the pipeline to be removed in the # self.pipeline list pipe_index = self.pipe_names.index(name) # Delete the pipe using its index pipe = self.pipeline_template.pop(pipe_index) # Parse the pipeline template again # to create the subpipeline templates self._parse_pipeline_template() # Reset the pipeline. self._reset_pipeline() return pipe def _run_subpipeline_from_template( self, template_index: int, input=Union[str, String, StringPointer, Doc, DocPointer], ) -> Union[Doc, DocPointer]: """Runs the subpipeline at position `template_index` of self.pipeline on the appropriate worker. The worker on which the subpipeline is run is either the the same worker on which `input` lives, if the `remote` property of the subpipeline template is True. Or, it is the local worker if `remote` is False. If no subpipeline is yet created for the specified worker, one is created using the template, and added to the pipeline. Args: template_index (int): The index of the subpipeline template in `self.subpipelines_templates` input (str, String, StringPointer, Doc, DocPointer): The input on which the subpipeline operates. It can be either the text to tokenize (or a pointer to it) for the subpipeline at index 0, or it could be the Doc (or its pointer) for all subsequent subpipelines. Returns: (Doc or DocPointer): The new or updated Doc object or a pointer to a Doc object. """ # Get the location ID of the worker where the text to be tokenized, # or the Doc to be processed is located if isinstance(input, ObjectPointer): location_id = input.location.id else: location_id = self.owner.id # Create a new SubPipeline object if one doesn't already exist on the # worker where the input is located if location_id not in self.pipeline[template_index]: # Get the subpipeline template subpipeline_template = self.subpipeline_templates[template_index] # Is the pipeline a remote one? remote = subpipeline_template["remote"] # Instantiate a subpipeline and load the subpipeline template subpipeline = SubPipeline() subpipeline.load_template(template=subpipeline_template, factories=self.factories) # Add the subpipeline to the pipeline self.pipeline[template_index][location_id] = subpipeline # Send the subpipeline to the worker where the input is located if ( isinstance(input, ObjectPointer) and input.location != self.owner # Is the input remote? and remote # Is the subpipeline is sendable? ): self.pipeline[template_index][location_id] = self.pipeline[template_index][ location_id ].send(input.location) # Apply the subpipeline and get the doc or the Doc id. # If a Doc ID is obtained, this signifies the ID of the # Doc object on the remote worker. doc_or_id = self.pipeline[template_index][location_id](input) # If the doc is of type str or int, this means that a # DocPointer should be created if isinstance(doc_or_id, int) or isinstance(doc_or_id, str): doc = DocPointer(location=input.location, id_at_location=doc_or_id, owner=self.owner) # This is of type Doc then else: doc = doc_or_id # return the doc return doc def __call__(self, text: Union[str, String, StringPointer]) -> Union[Doc, DocPointer]: """The text is tokenized and pipeline components are called here, and the Doc object is returned. Args: text (str, String or StringPointer): the text to be tokenized and processed by the pipeline components. Returns: (Doc or DocPointer): The Doc object or a pointer to a Doc object. This object provides access to all token data. """ # Runs the first subpipeline. # The first subpipeline is the one that has the tokenizer doc = self._run_subpipeline_from_template(template_index=0, input=text) # Apply the the rest of subpipelines sequentially # Each subpipeline will modify the document `doc` inplace for i, subpipeline in enumerate(self.pipeline[1:], start=1): doc = self._run_subpipeline_from_template(template_index=i, input=doc) # return the Doc object return doc
39.440389
117
0.613017
from .tokenizer import Tokenizer from .vocab import Vocab from .doc import Doc from .pointers.doc_pointer import DocPointer from .pipeline import SubPipeline from syft.generic.object import AbstractObject from syft.workers.base import BaseWorker from syft.generic.string import String from syft.generic.pointers.string_pointer import StringPointer from syft.generic.pointers.object_pointer import ObjectPointer from typing import List, Union, Tuple class BaseDefaults(object): @classmethod def create_vocab(cls, model_name) -> Vocab: vocab = Vocab(model_name) return vocab @classmethod def create_tokenizer(cls, vocab,) -> Tokenizer: tokenizer = Tokenizer(vocab,) return tokenizer class Language(AbstractObject): def __init__( self, model_name, id: int = None, owner: BaseWorker = None, tags: List[str] = None, description: str = None, ): self.Defaults = BaseDefaults self.vocab = self.Defaults.create_vocab(model_name) self.factories = {"tokenizer": self.Defaults.create_tokenizer(self.vocab)} self.pipeline_template = [{"remote": True, "name": "tokenizer"}] self._reset_pipeline() super(Language, self).__init__(id=id, owner=owner, tags=tags, description=description) @property def pipe_names(self) -> List[str]: return [pipe_template["name"] for pipe_template in self.pipeline_template] def _parse_pipeline_template(self): subpipeline_template = dict( remote=self.pipeline_template[0]["remote"], names=[self.pipeline_template[0]["name"]], ) self.subpipeline_templates = [subpipeline_template] for pipe_template in self.pipeline_template[1:]: if pipe_template["remote"] == subpipeline_template["remote"]: subpipeline_template["names"].append(pipe_template["name"]) else: subpipeline_template = dict( remote=pipe_template["remote"], names=[pipe_template["name"]] ) self.subpipeline_templates.append(subpipeline_template) def _reset_pipeline(self): self._parse_pipeline_template() subpipeline_count = len(self.subpipeline_templates) self.pipeline = [dict() for i in range(subpipeline_count)] def add_pipe( self, component: callable, remote: bool = False, name: str = None, before: str = None, after: str = None, first: bool = False, last: bool = True, ): assert hasattr(component, "__call__"), "Argument `component` is not a callable." assert hasattr( component, "factory" ), "Argument `component` should be an object that has a `factory()` method" assert ( isinstance(name, str) and len(name) >= 1 ), "Argument `name` should be of type `str` with at least one character." assert ( name not in self.pipe_names ), "Pipeline component name '{}' that you have chosen is already used by another pipeline component.".format( name ) assert ( sum([bool(before), bool(after), bool(first), bool(last)]) < 2 ), "Only one among arguments 'before', 'after', 'first' or 'last' should be set." self.factories[name] = component pipe_template = dict(remote=remote, name=name) if last or not any([before, after, first]): self.pipeline_template.append(pipe_template) elif first: self.pipeline_template.insert(index=1, element=pipe_template) elif before in self.pipe_names: self.pipeline_template.insert( index=self.pipe_names.index(before), element=pipe_template ) elif after in self.pipe_names: self.pipeline_template.insert( index=self.pipe_names.index(after) + 1, element=pipe_template ) else: assert ( False ), "component cannot be added to the pipeline, \ please double check argument values of the `add_pipe` method call." self._reset_pipeline() def remove_pipe(self, name: str) -> Tuple[str, callable]: assert ( name in self.pipe_names ), "No pipeline component with the specified name '{}' was found".format(name) pipe_index = self.pipe_names.index(name) pipe = self.pipeline_template.pop(pipe_index) self._parse_pipeline_template() self._reset_pipeline() return pipe def _run_subpipeline_from_template( self, template_index: int, input=Union[str, String, StringPointer, Doc, DocPointer], ) -> Union[Doc, DocPointer]: if isinstance(input, ObjectPointer): location_id = input.location.id else: location_id = self.owner.id # worker where the input is located if location_id not in self.pipeline[template_index]: # Get the subpipeline template subpipeline_template = self.subpipeline_templates[template_index] # Is the pipeline a remote one? remote = subpipeline_template["remote"] # Instantiate a subpipeline and load the subpipeline template subpipeline = SubPipeline() subpipeline.load_template(template=subpipeline_template, factories=self.factories) # Add the subpipeline to the pipeline self.pipeline[template_index][location_id] = subpipeline # Send the subpipeline to the worker where the input is located if ( isinstance(input, ObjectPointer) and input.location != self.owner # Is the input remote? and remote # Is the subpipeline is sendable? ): self.pipeline[template_index][location_id] = self.pipeline[template_index][ location_id ].send(input.location) # Apply the subpipeline and get the doc or the Doc id. # If a Doc ID is obtained, this signifies the ID of the # Doc object on the remote worker. doc_or_id = self.pipeline[template_index][location_id](input) # If the doc is of type str or int, this means that a # DocPointer should be created if isinstance(doc_or_id, int) or isinstance(doc_or_id, str): doc = DocPointer(location=input.location, id_at_location=doc_or_id, owner=self.owner) # This is of type Doc then else: doc = doc_or_id # return the doc return doc def __call__(self, text: Union[str, String, StringPointer]) -> Union[Doc, DocPointer]: # Runs the first subpipeline. # The first subpipeline is the one that has the tokenizer doc = self._run_subpipeline_from_template(template_index=0, input=text) # Apply the the rest of subpipelines sequentially # Each subpipeline will modify the document `doc` inplace for i, subpipeline in enumerate(self.pipeline[1:], start=1): doc = self._run_subpipeline_from_template(template_index=i, input=doc) # return the Doc object return doc
true
true
f723e3b85ffbd830c43dccf93a9dd2bd55bc2e30
31,635
py
Python
sdk/python/pulumi_google_native/apigee/v1/rate_plan.py
AaronFriel/pulumi-google-native
75d1cda425e33d4610348972cd70bddf35f1770d
[ "Apache-2.0" ]
44
2021-04-18T23:00:48.000Z
2022-02-14T17:43:15.000Z
sdk/python/pulumi_google_native/apigee/v1/rate_plan.py
AaronFriel/pulumi-google-native
75d1cda425e33d4610348972cd70bddf35f1770d
[ "Apache-2.0" ]
354
2021-04-16T16:48:39.000Z
2022-03-31T17:16:39.000Z
sdk/python/pulumi_google_native/apigee/v1/rate_plan.py
AaronFriel/pulumi-google-native
75d1cda425e33d4610348972cd70bddf35f1770d
[ "Apache-2.0" ]
8
2021-04-24T17:46:51.000Z
2022-01-05T10:40:21.000Z
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from ... import _utilities from . import outputs from ._enums import * from ._inputs import * __all__ = ['RatePlanArgs', 'RatePlan'] @pulumi.input_type class RatePlanArgs: def __init__(__self__, *, apiproduct_id: pulumi.Input[str], organization_id: pulumi.Input[str], apiproduct: Optional[pulumi.Input[str]] = None, billing_period: Optional[pulumi.Input['RatePlanBillingPeriod']] = None, consumption_pricing_rates: Optional[pulumi.Input[Sequence[pulumi.Input['GoogleCloudApigeeV1RateRangeArgs']]]] = None, consumption_pricing_type: Optional[pulumi.Input['RatePlanConsumptionPricingType']] = None, currency_code: Optional[pulumi.Input[str]] = None, description: Optional[pulumi.Input[str]] = None, display_name: Optional[pulumi.Input[str]] = None, end_time: Optional[pulumi.Input[str]] = None, fixed_fee_frequency: Optional[pulumi.Input[int]] = None, fixed_recurring_fee: Optional[pulumi.Input['GoogleTypeMoneyArgs']] = None, revenue_share_rates: Optional[pulumi.Input[Sequence[pulumi.Input['GoogleCloudApigeeV1RevenueShareRangeArgs']]]] = None, revenue_share_type: Optional[pulumi.Input['RatePlanRevenueShareType']] = None, setup_fee: Optional[pulumi.Input['GoogleTypeMoneyArgs']] = None, start_time: Optional[pulumi.Input[str]] = None, state: Optional[pulumi.Input['RatePlanState']] = None): """ The set of arguments for constructing a RatePlan resource. :param pulumi.Input[str] apiproduct: Name of the API product that the rate plan is associated with. :param pulumi.Input['RatePlanBillingPeriod'] billing_period: Frequency at which the customer will be billed. :param pulumi.Input[Sequence[pulumi.Input['GoogleCloudApigeeV1RateRangeArgs']]] consumption_pricing_rates: API call volume ranges and the fees charged when the total number of API calls is within a given range. The method used to calculate the final fee depends on the selected pricing model. For example, if the pricing model is `STAIRSTEP` and the ranges are defined as follows: ``` { "start": 1, "end": 100, "fee": 75 }, { "start": 101, "end": 200, "fee": 100 }, } ``` Then the following fees would be charged based on the total number of API calls (assuming the currency selected is `USD`): * 1 call costs $75 * 50 calls cost $75 * 150 calls cost $100 The number of API calls cannot exceed 200. :param pulumi.Input['RatePlanConsumptionPricingType'] consumption_pricing_type: Pricing model used for consumption-based charges. :param pulumi.Input[str] currency_code: Currency to be used for billing. Consists of a three-letter code as defined by the [ISO 4217](https://en.wikipedia.org/wiki/ISO_4217) standard. :param pulumi.Input[str] description: Description of the rate plan. :param pulumi.Input[str] display_name: Display name of the rate plan. :param pulumi.Input[str] end_time: Time when the rate plan will expire in milliseconds since epoch. Set to 0 or `null` to indicate that the rate plan should never expire. :param pulumi.Input[int] fixed_fee_frequency: Frequency at which the fixed fee is charged. :param pulumi.Input['GoogleTypeMoneyArgs'] fixed_recurring_fee: Fixed amount that is charged at a defined interval and billed in advance of use of the API product. The fee will be prorated for the first billing period. :param pulumi.Input[Sequence[pulumi.Input['GoogleCloudApigeeV1RevenueShareRangeArgs']]] revenue_share_rates: Details of the revenue sharing model. :param pulumi.Input['RatePlanRevenueShareType'] revenue_share_type: Method used to calculate the revenue that is shared with developers. :param pulumi.Input['GoogleTypeMoneyArgs'] setup_fee: Initial, one-time fee paid when purchasing the API product. :param pulumi.Input[str] start_time: Time when the rate plan becomes active in milliseconds since epoch. :param pulumi.Input['RatePlanState'] state: Current state of the rate plan (draft or published). """ pulumi.set(__self__, "apiproduct_id", apiproduct_id) pulumi.set(__self__, "organization_id", organization_id) if apiproduct is not None: pulumi.set(__self__, "apiproduct", apiproduct) if billing_period is not None: pulumi.set(__self__, "billing_period", billing_period) if consumption_pricing_rates is not None: pulumi.set(__self__, "consumption_pricing_rates", consumption_pricing_rates) if consumption_pricing_type is not None: pulumi.set(__self__, "consumption_pricing_type", consumption_pricing_type) if currency_code is not None: pulumi.set(__self__, "currency_code", currency_code) if description is not None: pulumi.set(__self__, "description", description) if display_name is not None: pulumi.set(__self__, "display_name", display_name) if end_time is not None: pulumi.set(__self__, "end_time", end_time) if fixed_fee_frequency is not None: pulumi.set(__self__, "fixed_fee_frequency", fixed_fee_frequency) if fixed_recurring_fee is not None: pulumi.set(__self__, "fixed_recurring_fee", fixed_recurring_fee) if revenue_share_rates is not None: pulumi.set(__self__, "revenue_share_rates", revenue_share_rates) if revenue_share_type is not None: pulumi.set(__self__, "revenue_share_type", revenue_share_type) if setup_fee is not None: pulumi.set(__self__, "setup_fee", setup_fee) if start_time is not None: pulumi.set(__self__, "start_time", start_time) if state is not None: pulumi.set(__self__, "state", state) @property @pulumi.getter(name="apiproductId") def apiproduct_id(self) -> pulumi.Input[str]: return pulumi.get(self, "apiproduct_id") @apiproduct_id.setter def apiproduct_id(self, value: pulumi.Input[str]): pulumi.set(self, "apiproduct_id", value) @property @pulumi.getter(name="organizationId") def organization_id(self) -> pulumi.Input[str]: return pulumi.get(self, "organization_id") @organization_id.setter def organization_id(self, value: pulumi.Input[str]): pulumi.set(self, "organization_id", value) @property @pulumi.getter def apiproduct(self) -> Optional[pulumi.Input[str]]: """ Name of the API product that the rate plan is associated with. """ return pulumi.get(self, "apiproduct") @apiproduct.setter def apiproduct(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "apiproduct", value) @property @pulumi.getter(name="billingPeriod") def billing_period(self) -> Optional[pulumi.Input['RatePlanBillingPeriod']]: """ Frequency at which the customer will be billed. """ return pulumi.get(self, "billing_period") @billing_period.setter def billing_period(self, value: Optional[pulumi.Input['RatePlanBillingPeriod']]): pulumi.set(self, "billing_period", value) @property @pulumi.getter(name="consumptionPricingRates") def consumption_pricing_rates(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GoogleCloudApigeeV1RateRangeArgs']]]]: """ API call volume ranges and the fees charged when the total number of API calls is within a given range. The method used to calculate the final fee depends on the selected pricing model. For example, if the pricing model is `STAIRSTEP` and the ranges are defined as follows: ``` { "start": 1, "end": 100, "fee": 75 }, { "start": 101, "end": 200, "fee": 100 }, } ``` Then the following fees would be charged based on the total number of API calls (assuming the currency selected is `USD`): * 1 call costs $75 * 50 calls cost $75 * 150 calls cost $100 The number of API calls cannot exceed 200. """ return pulumi.get(self, "consumption_pricing_rates") @consumption_pricing_rates.setter def consumption_pricing_rates(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['GoogleCloudApigeeV1RateRangeArgs']]]]): pulumi.set(self, "consumption_pricing_rates", value) @property @pulumi.getter(name="consumptionPricingType") def consumption_pricing_type(self) -> Optional[pulumi.Input['RatePlanConsumptionPricingType']]: """ Pricing model used for consumption-based charges. """ return pulumi.get(self, "consumption_pricing_type") @consumption_pricing_type.setter def consumption_pricing_type(self, value: Optional[pulumi.Input['RatePlanConsumptionPricingType']]): pulumi.set(self, "consumption_pricing_type", value) @property @pulumi.getter(name="currencyCode") def currency_code(self) -> Optional[pulumi.Input[str]]: """ Currency to be used for billing. Consists of a three-letter code as defined by the [ISO 4217](https://en.wikipedia.org/wiki/ISO_4217) standard. """ return pulumi.get(self, "currency_code") @currency_code.setter def currency_code(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "currency_code", value) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: """ Description of the rate plan. """ return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value) @property @pulumi.getter(name="displayName") def display_name(self) -> Optional[pulumi.Input[str]]: """ Display name of the rate plan. """ return pulumi.get(self, "display_name") @display_name.setter def display_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "display_name", value) @property @pulumi.getter(name="endTime") def end_time(self) -> Optional[pulumi.Input[str]]: """ Time when the rate plan will expire in milliseconds since epoch. Set to 0 or `null` to indicate that the rate plan should never expire. """ return pulumi.get(self, "end_time") @end_time.setter def end_time(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "end_time", value) @property @pulumi.getter(name="fixedFeeFrequency") def fixed_fee_frequency(self) -> Optional[pulumi.Input[int]]: """ Frequency at which the fixed fee is charged. """ return pulumi.get(self, "fixed_fee_frequency") @fixed_fee_frequency.setter def fixed_fee_frequency(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "fixed_fee_frequency", value) @property @pulumi.getter(name="fixedRecurringFee") def fixed_recurring_fee(self) -> Optional[pulumi.Input['GoogleTypeMoneyArgs']]: """ Fixed amount that is charged at a defined interval and billed in advance of use of the API product. The fee will be prorated for the first billing period. """ return pulumi.get(self, "fixed_recurring_fee") @fixed_recurring_fee.setter def fixed_recurring_fee(self, value: Optional[pulumi.Input['GoogleTypeMoneyArgs']]): pulumi.set(self, "fixed_recurring_fee", value) @property @pulumi.getter(name="revenueShareRates") def revenue_share_rates(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GoogleCloudApigeeV1RevenueShareRangeArgs']]]]: """ Details of the revenue sharing model. """ return pulumi.get(self, "revenue_share_rates") @revenue_share_rates.setter def revenue_share_rates(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['GoogleCloudApigeeV1RevenueShareRangeArgs']]]]): pulumi.set(self, "revenue_share_rates", value) @property @pulumi.getter(name="revenueShareType") def revenue_share_type(self) -> Optional[pulumi.Input['RatePlanRevenueShareType']]: """ Method used to calculate the revenue that is shared with developers. """ return pulumi.get(self, "revenue_share_type") @revenue_share_type.setter def revenue_share_type(self, value: Optional[pulumi.Input['RatePlanRevenueShareType']]): pulumi.set(self, "revenue_share_type", value) @property @pulumi.getter(name="setupFee") def setup_fee(self) -> Optional[pulumi.Input['GoogleTypeMoneyArgs']]: """ Initial, one-time fee paid when purchasing the API product. """ return pulumi.get(self, "setup_fee") @setup_fee.setter def setup_fee(self, value: Optional[pulumi.Input['GoogleTypeMoneyArgs']]): pulumi.set(self, "setup_fee", value) @property @pulumi.getter(name="startTime") def start_time(self) -> Optional[pulumi.Input[str]]: """ Time when the rate plan becomes active in milliseconds since epoch. """ return pulumi.get(self, "start_time") @start_time.setter def start_time(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "start_time", value) @property @pulumi.getter def state(self) -> Optional[pulumi.Input['RatePlanState']]: """ Current state of the rate plan (draft or published). """ return pulumi.get(self, "state") @state.setter def state(self, value: Optional[pulumi.Input['RatePlanState']]): pulumi.set(self, "state", value) class RatePlan(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, apiproduct: Optional[pulumi.Input[str]] = None, apiproduct_id: Optional[pulumi.Input[str]] = None, billing_period: Optional[pulumi.Input['RatePlanBillingPeriod']] = None, consumption_pricing_rates: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['GoogleCloudApigeeV1RateRangeArgs']]]]] = None, consumption_pricing_type: Optional[pulumi.Input['RatePlanConsumptionPricingType']] = None, currency_code: Optional[pulumi.Input[str]] = None, description: Optional[pulumi.Input[str]] = None, display_name: Optional[pulumi.Input[str]] = None, end_time: Optional[pulumi.Input[str]] = None, fixed_fee_frequency: Optional[pulumi.Input[int]] = None, fixed_recurring_fee: Optional[pulumi.Input[pulumi.InputType['GoogleTypeMoneyArgs']]] = None, organization_id: Optional[pulumi.Input[str]] = None, revenue_share_rates: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['GoogleCloudApigeeV1RevenueShareRangeArgs']]]]] = None, revenue_share_type: Optional[pulumi.Input['RatePlanRevenueShareType']] = None, setup_fee: Optional[pulumi.Input[pulumi.InputType['GoogleTypeMoneyArgs']]] = None, start_time: Optional[pulumi.Input[str]] = None, state: Optional[pulumi.Input['RatePlanState']] = None, __props__=None): """ Create a rate plan that is associated with an API product in an organization. Using rate plans, API product owners can monetize their API products by configuring one or more of the following: - Billing frequency - Initial setup fees for using an API product - Payment funding model (postpaid only) - Fixed recurring or consumption-based charges for using an API product - Revenue sharing with developer partners An API product can have multiple rate plans associated with it but *only one* rate plan can be active at any point of time. **Note: From the developer's perspective, they purchase API products not rate plans. Auto-naming is currently not supported for this resource. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] apiproduct: Name of the API product that the rate plan is associated with. :param pulumi.Input['RatePlanBillingPeriod'] billing_period: Frequency at which the customer will be billed. :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['GoogleCloudApigeeV1RateRangeArgs']]]] consumption_pricing_rates: API call volume ranges and the fees charged when the total number of API calls is within a given range. The method used to calculate the final fee depends on the selected pricing model. For example, if the pricing model is `STAIRSTEP` and the ranges are defined as follows: ``` { "start": 1, "end": 100, "fee": 75 }, { "start": 101, "end": 200, "fee": 100 }, } ``` Then the following fees would be charged based on the total number of API calls (assuming the currency selected is `USD`): * 1 call costs $75 * 50 calls cost $75 * 150 calls cost $100 The number of API calls cannot exceed 200. :param pulumi.Input['RatePlanConsumptionPricingType'] consumption_pricing_type: Pricing model used for consumption-based charges. :param pulumi.Input[str] currency_code: Currency to be used for billing. Consists of a three-letter code as defined by the [ISO 4217](https://en.wikipedia.org/wiki/ISO_4217) standard. :param pulumi.Input[str] description: Description of the rate plan. :param pulumi.Input[str] display_name: Display name of the rate plan. :param pulumi.Input[str] end_time: Time when the rate plan will expire in milliseconds since epoch. Set to 0 or `null` to indicate that the rate plan should never expire. :param pulumi.Input[int] fixed_fee_frequency: Frequency at which the fixed fee is charged. :param pulumi.Input[pulumi.InputType['GoogleTypeMoneyArgs']] fixed_recurring_fee: Fixed amount that is charged at a defined interval and billed in advance of use of the API product. The fee will be prorated for the first billing period. :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['GoogleCloudApigeeV1RevenueShareRangeArgs']]]] revenue_share_rates: Details of the revenue sharing model. :param pulumi.Input['RatePlanRevenueShareType'] revenue_share_type: Method used to calculate the revenue that is shared with developers. :param pulumi.Input[pulumi.InputType['GoogleTypeMoneyArgs']] setup_fee: Initial, one-time fee paid when purchasing the API product. :param pulumi.Input[str] start_time: Time when the rate plan becomes active in milliseconds since epoch. :param pulumi.Input['RatePlanState'] state: Current state of the rate plan (draft or published). """ ... @overload def __init__(__self__, resource_name: str, args: RatePlanArgs, opts: Optional[pulumi.ResourceOptions] = None): """ Create a rate plan that is associated with an API product in an organization. Using rate plans, API product owners can monetize their API products by configuring one or more of the following: - Billing frequency - Initial setup fees for using an API product - Payment funding model (postpaid only) - Fixed recurring or consumption-based charges for using an API product - Revenue sharing with developer partners An API product can have multiple rate plans associated with it but *only one* rate plan can be active at any point of time. **Note: From the developer's perspective, they purchase API products not rate plans. Auto-naming is currently not supported for this resource. :param str resource_name: The name of the resource. :param RatePlanArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. """ ... def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(RatePlanArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, apiproduct: Optional[pulumi.Input[str]] = None, apiproduct_id: Optional[pulumi.Input[str]] = None, billing_period: Optional[pulumi.Input['RatePlanBillingPeriod']] = None, consumption_pricing_rates: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['GoogleCloudApigeeV1RateRangeArgs']]]]] = None, consumption_pricing_type: Optional[pulumi.Input['RatePlanConsumptionPricingType']] = None, currency_code: Optional[pulumi.Input[str]] = None, description: Optional[pulumi.Input[str]] = None, display_name: Optional[pulumi.Input[str]] = None, end_time: Optional[pulumi.Input[str]] = None, fixed_fee_frequency: Optional[pulumi.Input[int]] = None, fixed_recurring_fee: Optional[pulumi.Input[pulumi.InputType['GoogleTypeMoneyArgs']]] = None, organization_id: Optional[pulumi.Input[str]] = None, revenue_share_rates: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['GoogleCloudApigeeV1RevenueShareRangeArgs']]]]] = None, revenue_share_type: Optional[pulumi.Input['RatePlanRevenueShareType']] = None, setup_fee: Optional[pulumi.Input[pulumi.InputType['GoogleTypeMoneyArgs']]] = None, start_time: Optional[pulumi.Input[str]] = None, state: Optional[pulumi.Input['RatePlanState']] = None, __props__=None): if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = RatePlanArgs.__new__(RatePlanArgs) __props__.__dict__["apiproduct"] = apiproduct if apiproduct_id is None and not opts.urn: raise TypeError("Missing required property 'apiproduct_id'") __props__.__dict__["apiproduct_id"] = apiproduct_id __props__.__dict__["billing_period"] = billing_period __props__.__dict__["consumption_pricing_rates"] = consumption_pricing_rates __props__.__dict__["consumption_pricing_type"] = consumption_pricing_type __props__.__dict__["currency_code"] = currency_code __props__.__dict__["description"] = description __props__.__dict__["display_name"] = display_name __props__.__dict__["end_time"] = end_time __props__.__dict__["fixed_fee_frequency"] = fixed_fee_frequency __props__.__dict__["fixed_recurring_fee"] = fixed_recurring_fee if organization_id is None and not opts.urn: raise TypeError("Missing required property 'organization_id'") __props__.__dict__["organization_id"] = organization_id __props__.__dict__["revenue_share_rates"] = revenue_share_rates __props__.__dict__["revenue_share_type"] = revenue_share_type __props__.__dict__["setup_fee"] = setup_fee __props__.__dict__["start_time"] = start_time __props__.__dict__["state"] = state __props__.__dict__["created_at"] = None __props__.__dict__["last_modified_at"] = None __props__.__dict__["name"] = None super(RatePlan, __self__).__init__( 'google-native:apigee/v1:RatePlan', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None) -> 'RatePlan': """ Get an existing RatePlan resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = RatePlanArgs.__new__(RatePlanArgs) __props__.__dict__["apiproduct"] = None __props__.__dict__["billing_period"] = None __props__.__dict__["consumption_pricing_rates"] = None __props__.__dict__["consumption_pricing_type"] = None __props__.__dict__["created_at"] = None __props__.__dict__["currency_code"] = None __props__.__dict__["description"] = None __props__.__dict__["display_name"] = None __props__.__dict__["end_time"] = None __props__.__dict__["fixed_fee_frequency"] = None __props__.__dict__["fixed_recurring_fee"] = None __props__.__dict__["last_modified_at"] = None __props__.__dict__["name"] = None __props__.__dict__["revenue_share_rates"] = None __props__.__dict__["revenue_share_type"] = None __props__.__dict__["setup_fee"] = None __props__.__dict__["start_time"] = None __props__.__dict__["state"] = None return RatePlan(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter def apiproduct(self) -> pulumi.Output[str]: """ Name of the API product that the rate plan is associated with. """ return pulumi.get(self, "apiproduct") @property @pulumi.getter(name="billingPeriod") def billing_period(self) -> pulumi.Output[str]: """ Frequency at which the customer will be billed. """ return pulumi.get(self, "billing_period") @property @pulumi.getter(name="consumptionPricingRates") def consumption_pricing_rates(self) -> pulumi.Output[Sequence['outputs.GoogleCloudApigeeV1RateRangeResponse']]: """ API call volume ranges and the fees charged when the total number of API calls is within a given range. The method used to calculate the final fee depends on the selected pricing model. For example, if the pricing model is `STAIRSTEP` and the ranges are defined as follows: ``` { "start": 1, "end": 100, "fee": 75 }, { "start": 101, "end": 200, "fee": 100 }, } ``` Then the following fees would be charged based on the total number of API calls (assuming the currency selected is `USD`): * 1 call costs $75 * 50 calls cost $75 * 150 calls cost $100 The number of API calls cannot exceed 200. """ return pulumi.get(self, "consumption_pricing_rates") @property @pulumi.getter(name="consumptionPricingType") def consumption_pricing_type(self) -> pulumi.Output[str]: """ Pricing model used for consumption-based charges. """ return pulumi.get(self, "consumption_pricing_type") @property @pulumi.getter(name="createdAt") def created_at(self) -> pulumi.Output[str]: """ Time that the rate plan was created in milliseconds since epoch. """ return pulumi.get(self, "created_at") @property @pulumi.getter(name="currencyCode") def currency_code(self) -> pulumi.Output[str]: """ Currency to be used for billing. Consists of a three-letter code as defined by the [ISO 4217](https://en.wikipedia.org/wiki/ISO_4217) standard. """ return pulumi.get(self, "currency_code") @property @pulumi.getter def description(self) -> pulumi.Output[str]: """ Description of the rate plan. """ return pulumi.get(self, "description") @property @pulumi.getter(name="displayName") def display_name(self) -> pulumi.Output[str]: """ Display name of the rate plan. """ return pulumi.get(self, "display_name") @property @pulumi.getter(name="endTime") def end_time(self) -> pulumi.Output[str]: """ Time when the rate plan will expire in milliseconds since epoch. Set to 0 or `null` to indicate that the rate plan should never expire. """ return pulumi.get(self, "end_time") @property @pulumi.getter(name="fixedFeeFrequency") def fixed_fee_frequency(self) -> pulumi.Output[int]: """ Frequency at which the fixed fee is charged. """ return pulumi.get(self, "fixed_fee_frequency") @property @pulumi.getter(name="fixedRecurringFee") def fixed_recurring_fee(self) -> pulumi.Output['outputs.GoogleTypeMoneyResponse']: """ Fixed amount that is charged at a defined interval and billed in advance of use of the API product. The fee will be prorated for the first billing period. """ return pulumi.get(self, "fixed_recurring_fee") @property @pulumi.getter(name="lastModifiedAt") def last_modified_at(self) -> pulumi.Output[str]: """ Time the rate plan was last modified in milliseconds since epoch. """ return pulumi.get(self, "last_modified_at") @property @pulumi.getter def name(self) -> pulumi.Output[str]: """ Name of the rate plan. """ return pulumi.get(self, "name") @property @pulumi.getter(name="revenueShareRates") def revenue_share_rates(self) -> pulumi.Output[Sequence['outputs.GoogleCloudApigeeV1RevenueShareRangeResponse']]: """ Details of the revenue sharing model. """ return pulumi.get(self, "revenue_share_rates") @property @pulumi.getter(name="revenueShareType") def revenue_share_type(self) -> pulumi.Output[str]: """ Method used to calculate the revenue that is shared with developers. """ return pulumi.get(self, "revenue_share_type") @property @pulumi.getter(name="setupFee") def setup_fee(self) -> pulumi.Output['outputs.GoogleTypeMoneyResponse']: """ Initial, one-time fee paid when purchasing the API product. """ return pulumi.get(self, "setup_fee") @property @pulumi.getter(name="startTime") def start_time(self) -> pulumi.Output[str]: """ Time when the rate plan becomes active in milliseconds since epoch. """ return pulumi.get(self, "start_time") @property @pulumi.getter def state(self) -> pulumi.Output[str]: """ Current state of the rate plan (draft or published). """ return pulumi.get(self, "state")
53.168067
724
0.676656
import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from ... import _utilities from . import outputs from ._enums import * from ._inputs import * __all__ = ['RatePlanArgs', 'RatePlan'] @pulumi.input_type class RatePlanArgs: def __init__(__self__, *, apiproduct_id: pulumi.Input[str], organization_id: pulumi.Input[str], apiproduct: Optional[pulumi.Input[str]] = None, billing_period: Optional[pulumi.Input['RatePlanBillingPeriod']] = None, consumption_pricing_rates: Optional[pulumi.Input[Sequence[pulumi.Input['GoogleCloudApigeeV1RateRangeArgs']]]] = None, consumption_pricing_type: Optional[pulumi.Input['RatePlanConsumptionPricingType']] = None, currency_code: Optional[pulumi.Input[str]] = None, description: Optional[pulumi.Input[str]] = None, display_name: Optional[pulumi.Input[str]] = None, end_time: Optional[pulumi.Input[str]] = None, fixed_fee_frequency: Optional[pulumi.Input[int]] = None, fixed_recurring_fee: Optional[pulumi.Input['GoogleTypeMoneyArgs']] = None, revenue_share_rates: Optional[pulumi.Input[Sequence[pulumi.Input['GoogleCloudApigeeV1RevenueShareRangeArgs']]]] = None, revenue_share_type: Optional[pulumi.Input['RatePlanRevenueShareType']] = None, setup_fee: Optional[pulumi.Input['GoogleTypeMoneyArgs']] = None, start_time: Optional[pulumi.Input[str]] = None, state: Optional[pulumi.Input['RatePlanState']] = None): pulumi.set(__self__, "apiproduct_id", apiproduct_id) pulumi.set(__self__, "organization_id", organization_id) if apiproduct is not None: pulumi.set(__self__, "apiproduct", apiproduct) if billing_period is not None: pulumi.set(__self__, "billing_period", billing_period) if consumption_pricing_rates is not None: pulumi.set(__self__, "consumption_pricing_rates", consumption_pricing_rates) if consumption_pricing_type is not None: pulumi.set(__self__, "consumption_pricing_type", consumption_pricing_type) if currency_code is not None: pulumi.set(__self__, "currency_code", currency_code) if description is not None: pulumi.set(__self__, "description", description) if display_name is not None: pulumi.set(__self__, "display_name", display_name) if end_time is not None: pulumi.set(__self__, "end_time", end_time) if fixed_fee_frequency is not None: pulumi.set(__self__, "fixed_fee_frequency", fixed_fee_frequency) if fixed_recurring_fee is not None: pulumi.set(__self__, "fixed_recurring_fee", fixed_recurring_fee) if revenue_share_rates is not None: pulumi.set(__self__, "revenue_share_rates", revenue_share_rates) if revenue_share_type is not None: pulumi.set(__self__, "revenue_share_type", revenue_share_type) if setup_fee is not None: pulumi.set(__self__, "setup_fee", setup_fee) if start_time is not None: pulumi.set(__self__, "start_time", start_time) if state is not None: pulumi.set(__self__, "state", state) @property @pulumi.getter(name="apiproductId") def apiproduct_id(self) -> pulumi.Input[str]: return pulumi.get(self, "apiproduct_id") @apiproduct_id.setter def apiproduct_id(self, value: pulumi.Input[str]): pulumi.set(self, "apiproduct_id", value) @property @pulumi.getter(name="organizationId") def organization_id(self) -> pulumi.Input[str]: return pulumi.get(self, "organization_id") @organization_id.setter def organization_id(self, value: pulumi.Input[str]): pulumi.set(self, "organization_id", value) @property @pulumi.getter def apiproduct(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "apiproduct") @apiproduct.setter def apiproduct(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "apiproduct", value) @property @pulumi.getter(name="billingPeriod") def billing_period(self) -> Optional[pulumi.Input['RatePlanBillingPeriod']]: return pulumi.get(self, "billing_period") @billing_period.setter def billing_period(self, value: Optional[pulumi.Input['RatePlanBillingPeriod']]): pulumi.set(self, "billing_period", value) @property @pulumi.getter(name="consumptionPricingRates") def consumption_pricing_rates(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GoogleCloudApigeeV1RateRangeArgs']]]]: return pulumi.get(self, "consumption_pricing_rates") @consumption_pricing_rates.setter def consumption_pricing_rates(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['GoogleCloudApigeeV1RateRangeArgs']]]]): pulumi.set(self, "consumption_pricing_rates", value) @property @pulumi.getter(name="consumptionPricingType") def consumption_pricing_type(self) -> Optional[pulumi.Input['RatePlanConsumptionPricingType']]: return pulumi.get(self, "consumption_pricing_type") @consumption_pricing_type.setter def consumption_pricing_type(self, value: Optional[pulumi.Input['RatePlanConsumptionPricingType']]): pulumi.set(self, "consumption_pricing_type", value) @property @pulumi.getter(name="currencyCode") def currency_code(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "currency_code") @currency_code.setter def currency_code(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "currency_code", value) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value) @property @pulumi.getter(name="displayName") def display_name(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "display_name") @display_name.setter def display_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "display_name", value) @property @pulumi.getter(name="endTime") def end_time(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "end_time") @end_time.setter def end_time(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "end_time", value) @property @pulumi.getter(name="fixedFeeFrequency") def fixed_fee_frequency(self) -> Optional[pulumi.Input[int]]: return pulumi.get(self, "fixed_fee_frequency") @fixed_fee_frequency.setter def fixed_fee_frequency(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "fixed_fee_frequency", value) @property @pulumi.getter(name="fixedRecurringFee") def fixed_recurring_fee(self) -> Optional[pulumi.Input['GoogleTypeMoneyArgs']]: return pulumi.get(self, "fixed_recurring_fee") @fixed_recurring_fee.setter def fixed_recurring_fee(self, value: Optional[pulumi.Input['GoogleTypeMoneyArgs']]): pulumi.set(self, "fixed_recurring_fee", value) @property @pulumi.getter(name="revenueShareRates") def revenue_share_rates(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GoogleCloudApigeeV1RevenueShareRangeArgs']]]]: return pulumi.get(self, "revenue_share_rates") @revenue_share_rates.setter def revenue_share_rates(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['GoogleCloudApigeeV1RevenueShareRangeArgs']]]]): pulumi.set(self, "revenue_share_rates", value) @property @pulumi.getter(name="revenueShareType") def revenue_share_type(self) -> Optional[pulumi.Input['RatePlanRevenueShareType']]: return pulumi.get(self, "revenue_share_type") @revenue_share_type.setter def revenue_share_type(self, value: Optional[pulumi.Input['RatePlanRevenueShareType']]): pulumi.set(self, "revenue_share_type", value) @property @pulumi.getter(name="setupFee") def setup_fee(self) -> Optional[pulumi.Input['GoogleTypeMoneyArgs']]: return pulumi.get(self, "setup_fee") @setup_fee.setter def setup_fee(self, value: Optional[pulumi.Input['GoogleTypeMoneyArgs']]): pulumi.set(self, "setup_fee", value) @property @pulumi.getter(name="startTime") def start_time(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "start_time") @start_time.setter def start_time(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "start_time", value) @property @pulumi.getter def state(self) -> Optional[pulumi.Input['RatePlanState']]: return pulumi.get(self, "state") @state.setter def state(self, value: Optional[pulumi.Input['RatePlanState']]): pulumi.set(self, "state", value) class RatePlan(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, apiproduct: Optional[pulumi.Input[str]] = None, apiproduct_id: Optional[pulumi.Input[str]] = None, billing_period: Optional[pulumi.Input['RatePlanBillingPeriod']] = None, consumption_pricing_rates: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['GoogleCloudApigeeV1RateRangeArgs']]]]] = None, consumption_pricing_type: Optional[pulumi.Input['RatePlanConsumptionPricingType']] = None, currency_code: Optional[pulumi.Input[str]] = None, description: Optional[pulumi.Input[str]] = None, display_name: Optional[pulumi.Input[str]] = None, end_time: Optional[pulumi.Input[str]] = None, fixed_fee_frequency: Optional[pulumi.Input[int]] = None, fixed_recurring_fee: Optional[pulumi.Input[pulumi.InputType['GoogleTypeMoneyArgs']]] = None, organization_id: Optional[pulumi.Input[str]] = None, revenue_share_rates: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['GoogleCloudApigeeV1RevenueShareRangeArgs']]]]] = None, revenue_share_type: Optional[pulumi.Input['RatePlanRevenueShareType']] = None, setup_fee: Optional[pulumi.Input[pulumi.InputType['GoogleTypeMoneyArgs']]] = None, start_time: Optional[pulumi.Input[str]] = None, state: Optional[pulumi.Input['RatePlanState']] = None, __props__=None): ... @overload def __init__(__self__, resource_name: str, args: RatePlanArgs, opts: Optional[pulumi.ResourceOptions] = None): ... def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(RatePlanArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, apiproduct: Optional[pulumi.Input[str]] = None, apiproduct_id: Optional[pulumi.Input[str]] = None, billing_period: Optional[pulumi.Input['RatePlanBillingPeriod']] = None, consumption_pricing_rates: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['GoogleCloudApigeeV1RateRangeArgs']]]]] = None, consumption_pricing_type: Optional[pulumi.Input['RatePlanConsumptionPricingType']] = None, currency_code: Optional[pulumi.Input[str]] = None, description: Optional[pulumi.Input[str]] = None, display_name: Optional[pulumi.Input[str]] = None, end_time: Optional[pulumi.Input[str]] = None, fixed_fee_frequency: Optional[pulumi.Input[int]] = None, fixed_recurring_fee: Optional[pulumi.Input[pulumi.InputType['GoogleTypeMoneyArgs']]] = None, organization_id: Optional[pulumi.Input[str]] = None, revenue_share_rates: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['GoogleCloudApigeeV1RevenueShareRangeArgs']]]]] = None, revenue_share_type: Optional[pulumi.Input['RatePlanRevenueShareType']] = None, setup_fee: Optional[pulumi.Input[pulumi.InputType['GoogleTypeMoneyArgs']]] = None, start_time: Optional[pulumi.Input[str]] = None, state: Optional[pulumi.Input['RatePlanState']] = None, __props__=None): if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = RatePlanArgs.__new__(RatePlanArgs) __props__.__dict__["apiproduct"] = apiproduct if apiproduct_id is None and not opts.urn: raise TypeError("Missing required property 'apiproduct_id'") __props__.__dict__["apiproduct_id"] = apiproduct_id __props__.__dict__["billing_period"] = billing_period __props__.__dict__["consumption_pricing_rates"] = consumption_pricing_rates __props__.__dict__["consumption_pricing_type"] = consumption_pricing_type __props__.__dict__["currency_code"] = currency_code __props__.__dict__["description"] = description __props__.__dict__["display_name"] = display_name __props__.__dict__["end_time"] = end_time __props__.__dict__["fixed_fee_frequency"] = fixed_fee_frequency __props__.__dict__["fixed_recurring_fee"] = fixed_recurring_fee if organization_id is None and not opts.urn: raise TypeError("Missing required property 'organization_id'") __props__.__dict__["organization_id"] = organization_id __props__.__dict__["revenue_share_rates"] = revenue_share_rates __props__.__dict__["revenue_share_type"] = revenue_share_type __props__.__dict__["setup_fee"] = setup_fee __props__.__dict__["start_time"] = start_time __props__.__dict__["state"] = state __props__.__dict__["created_at"] = None __props__.__dict__["last_modified_at"] = None __props__.__dict__["name"] = None super(RatePlan, __self__).__init__( 'google-native:apigee/v1:RatePlan', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None) -> 'RatePlan': opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = RatePlanArgs.__new__(RatePlanArgs) __props__.__dict__["apiproduct"] = None __props__.__dict__["billing_period"] = None __props__.__dict__["consumption_pricing_rates"] = None __props__.__dict__["consumption_pricing_type"] = None __props__.__dict__["created_at"] = None __props__.__dict__["currency_code"] = None __props__.__dict__["description"] = None __props__.__dict__["display_name"] = None __props__.__dict__["end_time"] = None __props__.__dict__["fixed_fee_frequency"] = None __props__.__dict__["fixed_recurring_fee"] = None __props__.__dict__["last_modified_at"] = None __props__.__dict__["name"] = None __props__.__dict__["revenue_share_rates"] = None __props__.__dict__["revenue_share_type"] = None __props__.__dict__["setup_fee"] = None __props__.__dict__["start_time"] = None __props__.__dict__["state"] = None return RatePlan(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter def apiproduct(self) -> pulumi.Output[str]: return pulumi.get(self, "apiproduct") @property @pulumi.getter(name="billingPeriod") def billing_period(self) -> pulumi.Output[str]: return pulumi.get(self, "billing_period") @property @pulumi.getter(name="consumptionPricingRates") def consumption_pricing_rates(self) -> pulumi.Output[Sequence['outputs.GoogleCloudApigeeV1RateRangeResponse']]: return pulumi.get(self, "consumption_pricing_rates") @property @pulumi.getter(name="consumptionPricingType") def consumption_pricing_type(self) -> pulumi.Output[str]: return pulumi.get(self, "consumption_pricing_type") @property @pulumi.getter(name="createdAt") def created_at(self) -> pulumi.Output[str]: return pulumi.get(self, "created_at") @property @pulumi.getter(name="currencyCode") def currency_code(self) -> pulumi.Output[str]: return pulumi.get(self, "currency_code") @property @pulumi.getter def description(self) -> pulumi.Output[str]: return pulumi.get(self, "description") @property @pulumi.getter(name="displayName") def display_name(self) -> pulumi.Output[str]: return pulumi.get(self, "display_name") @property @pulumi.getter(name="endTime") def end_time(self) -> pulumi.Output[str]: return pulumi.get(self, "end_time") @property @pulumi.getter(name="fixedFeeFrequency") def fixed_fee_frequency(self) -> pulumi.Output[int]: return pulumi.get(self, "fixed_fee_frequency") @property @pulumi.getter(name="fixedRecurringFee") def fixed_recurring_fee(self) -> pulumi.Output['outputs.GoogleTypeMoneyResponse']: return pulumi.get(self, "fixed_recurring_fee") @property @pulumi.getter(name="lastModifiedAt") def last_modified_at(self) -> pulumi.Output[str]: return pulumi.get(self, "last_modified_at") @property @pulumi.getter def name(self) -> pulumi.Output[str]: return pulumi.get(self, "name") @property @pulumi.getter(name="revenueShareRates") def revenue_share_rates(self) -> pulumi.Output[Sequence['outputs.GoogleCloudApigeeV1RevenueShareRangeResponse']]: return pulumi.get(self, "revenue_share_rates") @property @pulumi.getter(name="revenueShareType") def revenue_share_type(self) -> pulumi.Output[str]: return pulumi.get(self, "revenue_share_type") @property @pulumi.getter(name="setupFee") def setup_fee(self) -> pulumi.Output['outputs.GoogleTypeMoneyResponse']: return pulumi.get(self, "setup_fee") @property @pulumi.getter(name="startTime") def start_time(self) -> pulumi.Output[str]: return pulumi.get(self, "start_time") @property @pulumi.getter def state(self) -> pulumi.Output[str]: return pulumi.get(self, "state")
true
true
f723e43173495e3c39cb07e034e93d2ac42f8401
2,275
py
Python
frappe/cache_manager.py
karthikeyan5/frappe
d2c652ef3a3cc6997eedcc3925e359e216b8a569
[ "MIT" ]
null
null
null
frappe/cache_manager.py
karthikeyan5/frappe
d2c652ef3a3cc6997eedcc3925e359e216b8a569
[ "MIT" ]
null
null
null
frappe/cache_manager.py
karthikeyan5/frappe
d2c652ef3a3cc6997eedcc3925e359e216b8a569
[ "MIT" ]
null
null
null
# Copyright (c) 2018, Frappe Technologies Pvt. Ltd. and Contributors # MIT License. See license.txt from __future__ import unicode_literals import frappe import frappe.defaults from frappe.desk.notifications import (delete_notification_count_for, clear_notifications) common_default_keys = ["__default", "__global"] def clear_user_cache(user=None): cache = frappe.cache() groups = ("bootinfo", "user_recent", "roles", "user_doc", "lang", "defaults", "user_permissions", "home_page", "linked_with", "desktop_icons", 'portal_menu_items') # this will automatically reload the global cache # so it is important to clear this first clear_notifications(user) if user: for name in groups: cache.hdel(name, user) cache.delete_keys("user:" + user) clear_defaults_cache(user) else: for name in groups: cache.delete_key(name) clear_defaults_cache() clear_global_cache() def clear_global_cache(): from frappe.website.render import clear_cache as clear_website_cache clear_doctype_cache() clear_website_cache() frappe.cache().delete_value(["app_hooks", "installed_apps", "app_modules", "module_app", "notification_config", 'system_settings', 'scheduler_events', 'time_zone', 'webhooks', 'active_domains', 'active_modules']) frappe.setup_module_map() def clear_defaults_cache(user=None): if user: for p in ([user] + common_default_keys): frappe.cache().hdel("defaults", p) elif frappe.flags.in_install!="frappe": frappe.cache().delete_key("defaults") def clear_doctype_cache(doctype=None): cache = frappe.cache() if getattr(frappe.local, 'meta_cache') and (doctype in frappe.local.meta_cache): del frappe.local.meta_cache[doctype] for key in ('is_table', 'doctype_modules'): cache.delete_value(key) groups = ["meta", "form_meta", "table_columns", "last_modified", "linked_doctypes", 'notifications', 'workflow'] def clear_single(dt): for name in groups: cache.hdel(name, dt) if doctype: clear_single(doctype) # clear all parent doctypes for dt in frappe.db.get_all('DocField', 'parent', dict(fieldtype='Table', options=doctype)): clear_single(dt.parent) # clear all notifications delete_notification_count_for(doctype) else: # clear all for name in groups: cache.delete_value(name)
27.083333
94
0.745495
from __future__ import unicode_literals import frappe import frappe.defaults from frappe.desk.notifications import (delete_notification_count_for, clear_notifications) common_default_keys = ["__default", "__global"] def clear_user_cache(user=None): cache = frappe.cache() groups = ("bootinfo", "user_recent", "roles", "user_doc", "lang", "defaults", "user_permissions", "home_page", "linked_with", "desktop_icons", 'portal_menu_items') clear_notifications(user) if user: for name in groups: cache.hdel(name, user) cache.delete_keys("user:" + user) clear_defaults_cache(user) else: for name in groups: cache.delete_key(name) clear_defaults_cache() clear_global_cache() def clear_global_cache(): from frappe.website.render import clear_cache as clear_website_cache clear_doctype_cache() clear_website_cache() frappe.cache().delete_value(["app_hooks", "installed_apps", "app_modules", "module_app", "notification_config", 'system_settings', 'scheduler_events', 'time_zone', 'webhooks', 'active_domains', 'active_modules']) frappe.setup_module_map() def clear_defaults_cache(user=None): if user: for p in ([user] + common_default_keys): frappe.cache().hdel("defaults", p) elif frappe.flags.in_install!="frappe": frappe.cache().delete_key("defaults") def clear_doctype_cache(doctype=None): cache = frappe.cache() if getattr(frappe.local, 'meta_cache') and (doctype in frappe.local.meta_cache): del frappe.local.meta_cache[doctype] for key in ('is_table', 'doctype_modules'): cache.delete_value(key) groups = ["meta", "form_meta", "table_columns", "last_modified", "linked_doctypes", 'notifications', 'workflow'] def clear_single(dt): for name in groups: cache.hdel(name, dt) if doctype: clear_single(doctype) for dt in frappe.db.get_all('DocField', 'parent', dict(fieldtype='Table', options=doctype)): clear_single(dt.parent) delete_notification_count_for(doctype) else: for name in groups: cache.delete_value(name)
true
true
f723e7c0843a98cbd38780ee04a037d43c171c68
15,765
py
Python
dataset_loader.py
fqnchina/NeuralRouting
333dc95cb2d9a779de88e2349883a0002111d1b3
[ "MIT" ]
58
2021-03-25T19:18:56.000Z
2022-03-30T04:59:32.000Z
dataset_loader.py
fqnchina/NeuralRouting
333dc95cb2d9a779de88e2349883a0002111d1b3
[ "MIT" ]
null
null
null
dataset_loader.py
fqnchina/NeuralRouting
333dc95cb2d9a779de88e2349883a0002111d1b3
[ "MIT" ]
6
2021-06-19T03:48:50.000Z
2021-07-02T13:05:04.000Z
import numpy as np import torch from torch.utils.data import Dataset, DataLoader from torchvision import transforms as tfs from PIL import Image import os, cv2, copy, time from config import * # args. image_height, image_width = opt.image_height, opt.image_width intrinsics = opt.intrinsics close_radius, far_radiuses = 0, opt.far_radiuses n_neighb_pts = opt.n_neighb_pts def isSon(son, fa): for i in range(len(fa)): if son[i] != fa[i]: return False return True # todo: to be migrated... def depth2local(depth): # depth: float32, meter. cx, cy, fx, fy = intrinsics[0, 2], intrinsics[1, 2], intrinsics[0, 0], intrinsics[1, 1] u_base = np.tile(np.arange(image_width), (image_height, 1)) v_base = np.tile(np.arange(image_height)[:, np.newaxis], (1, image_width)) X = (u_base - cx) * depth / fx Y = (v_base - cy) * depth / fy coord_local = np.stack((X, Y, depth), axis=2) return coord_local def partial_pts(pts_all_in, p, r_min, r_max): # pts_all_in.shape (#points, #channel) pts_all = copy.deepcopy(pts_all_in) p_mat = p[np.newaxis, 0:3].repeat(pts_all.shape[0], axis=0) norms = np.linalg.norm((p_mat - pts_all[:, 0:3]), axis=1) return pts_all[np.logical_and(norms >= r_min, norms <= r_max)] def sample_pts(pts_in, num): # pts_in.shape (#points, #channel) pts = copy.deepcopy(pts_in) while pts.shape[0] < num: pts = np.concatenate((pts, pts), axis=0) rand_ids = np.arange(pts.shape[0]) np.random.shuffle(rand_ids) return pts[rand_ids[0:num], :] def sample_pts_rc(pts_in, rcs_in, num): # pts_in.shape (#points, #channel) pts = copy.deepcopy(pts_in) rcs = copy.deepcopy(rcs_in) while pts.shape[0] < num: pts = np.concatenate((pts, pts), axis=0) rand_ids = np.arange(pts.shape[0]) np.random.shuffle(rand_ids) return pts[rand_ids[0:num], :], rcs_in[rand_ids[0:num], :] def sample_pts9d_r3d(pts_in, num, radius): # pts_in.shape (#points, #channel) pts = copy.deepcopy(pts_in) thresh = 500 # remove background by 3d radius xyz = pts[:, 0:3] pts = pts[np.linalg.norm(xyz, axis=1) <= radius] # print('pt num after r3d {}'.format(pts.shape[0])) if pts.shape[0] < thresh: # avoid infinite loop. return None while pts.shape[0] < num: pts = np.concatenate((pts, pts), axis=0) rand_ids = np.arange(pts.shape[0]) np.random.shuffle(rand_ids) return pts[rand_ids[0:num], :] def shift_pts(pts_in, cen): # pts_in.shape (#points, #channel) pts = copy.deepcopy(pts_in) cen_mat = cen[np.newaxis, :].repeat(pts.shape[0], axis=0) pts[:, 0:3] = pts[:, 0:3] - cen_mat return pts def shift_pts6d(pts_in, cen): # pts_in.shape (#points, #channel) pts = copy.deepcopy(pts_in) cen_mat = cen[np.newaxis, :].repeat(pts.shape[0], axis=0) pts[:, :] = pts[:, :] - cen_mat return pts def shift_pts9d(pts_in, cen): # pts_in.shape (#points, #channel) cpt = copy.deepcopy(cen) cpt[3:6] = np.zeros(3) # remove shift of normal pts = copy.deepcopy(pts_in) cpt_mat = cpt[np.newaxis, :].repeat(pts.shape[0], axis=0) pts[:, :] = pts[:, :] - cpt_mat return pts def make_ppf(pts9d, cen9d): # (N,9), (9,) # prepare n_pts = pts9d.shape[0] d = pts9d[:, 0:3] n2 = pts9d[:, 3:6] n1 = np.repeat(cen9d[3:6].reshape(1, 3), n_pts, axis=0) # ppf dim1 = np.linalg.norm(d, axis=1).reshape(n_pts, 1) d = d / (dim1.reshape(n_pts, 1)) dim2 = np.sum(n1 * d, axis=1).reshape(n_pts, 1) dim3 = np.sum(n2 * d, axis=1).reshape(n_pts, 1) dim4 = np.sum(n1 * n2, axis=1).reshape(n_pts, 1) ppf = np.concatenate((dim1, dim2, dim3, dim4), axis=1) ppf7d = np.concatenate((ppf, pts9d[:, 6:9]), axis=1) return ppf7d def compute_points_normal(pts): raw_shape = pts.shape normal = np.zeros((raw_shape)) # (r,c,3) t0 = time.time() for r in range(2, raw_shape[0] - 2): for c in range(2, raw_shape[1] - 2): pts_local = pts[r - 2:r + 3, c - 2:c + 3, :] # (5,5,3) pts_local = pts_local.reshape(-1, 3) # (N,3) pts_local = pts_local[np.linalg.norm(pts_local - pts[r, c, :], axis=1) < 0.1] # remove outliers. if pts_local.shape[0] < 4: continue pts_local = pts_local - np.mean(pts_local, axis=0) C = pts_local.T @ pts_local / pts_local.shape[0] e, v = np.linalg.eig(C) d = v[:, np.where(e == np.min(e))[0][0]] n = d / np.linalg.norm(d) if np.dot(n, np.array([0, 0, 1])) > 0: n = -n normal[r, c, :] = n t1 = time.time() print('preprocess data: compute normal cost {:.2f}s'.format(t1 - t0)) return normal # for depth adaptive 2d def partial_pts_2d(pts_rc, cen_rc, list_drdc): result = None r_max, c_max = int(image_height / 4 - 1), int(image_width / 4 - 1) mat_drdc = (np.array(list_drdc) / 4).astype(int) mat_cen_rc = np.array(cen_rc) mat_targ_rc = cen_rc + mat_drdc mat_targ_rc[mat_targ_rc < 0] = 0 targ_r = mat_targ_rc[:, 0] targ_r[targ_r > r_max] = r_max targ_c = mat_targ_rc[:, 1] targ_c[targ_c > c_max] = c_max result = pts_rc[targ_r, targ_c] return copy.deepcopy(result) # for depth adaptive 2d def partial_pts_2d_rc(pts_rc, cen_rc, list_drdc): result = None r_max, c_max = int(image_height / 4 - 1), int(image_width / 4 - 1) mat_drdc = (np.array(list_drdc) / 4).astype(int) mat_cen_rc = np.array(cen_rc) mat_targ_rc = cen_rc + mat_drdc mat_targ_rc[mat_targ_rc < 0] = 0 targ_r = mat_targ_rc[:, 0] targ_r[targ_r > r_max] = r_max targ_c = mat_targ_rc[:, 1] targ_c[targ_c > c_max] = c_max result = pts_rc[targ_r, targ_c] return copy.deepcopy(result), copy.deepcopy( np.concatenate((targ_r.reshape(targ_r.shape[0], 1), targ_c.reshape(targ_c.shape[0], 1)), axis=1)) # for depth adaptive 2d with dynamics label def partial_pts_2d_with_label(pts_rc, cen_rc, list_drdc, mask): # mask: 0 for static pixel, 255 for dynamic pixel. result = None r_max, c_max = int(image_height / 4 - 1), int(image_width / 4 - 1) mat_drdc = (np.array(list_drdc) / 4).astype(int) mat_cen_rc = np.array(cen_rc) mat_targ_rc = cen_rc + mat_drdc mat_targ_rc[mat_targ_rc < 0] = 0 targ_r = mat_targ_rc[:, 0] targ_r[targ_r > r_max] = r_max targ_c = mat_targ_rc[:, 1] targ_c[targ_c > c_max] = c_max m1 = np.zeros((mask.shape[0], mask.shape[1])) m1[mask == 0] = 1 m2 = np.zeros((mask.shape[0], mask.shape[1])) m2[targ_r, targ_c] = 1 m3 = np.logical_and(m1, m2) result = pts_rc[m3] return copy.deepcopy(result) class LevelDataset_PPF(Dataset): def __init__(self, data_dir, the_list, n_pts_per_frame=opt.n_pts_per_frame, neighbor_da2d=None, far_radius=None, enable_color_aug=True, specified_node=None): super().__init__() self.data_dir, self.the_list = data_dir, the_list self.n_pts_per_frame = n_pts_per_frame self.neighbor_da2d = neighbor_da2d # (n_pts, dim_pt). self.far_radius = far_radius # scalar. self.enable_color_aug = enable_color_aug self.specified_node = specified_node def __len__(self): return len(self.the_list) def __getitem__(self, idx): fid, rc_route = self.the_list[idx] # load depth = cv2.imread('{}/{}'.format(self.data_dir, opt.depth_format.format(fid)), cv2.IMREAD_UNCHANGED) / 1000.0 color = cv2.imread('{}/{}'.format(self.data_dir, opt.color_format.format(fid)), cv2.IMREAD_UNCHANGED)[:, :, 0:3] # color jitter if self.enable_color_aug: img = Image.fromarray(color) if np.random.rand() < 0.5: img = tfs.ColorJitter(brightness=1.)(img) if np.random.rand() < 0.5: img = tfs.ColorJitter(contrast=1.)(img) if np.random.rand() < 0.5: img = tfs.ColorJitter(saturation=1.)(img) color = np.array(img) if np.max(color) > 1: color = color / 255. - 0.5 local = depth2local(depth) r_ids, c_ids = list(range(0, image_height, 4)), list(range(0, image_width, 4)) depth, color, local = depth[r_ids, :][:, c_ids], color[r_ids, :, :][:, c_ids, :], local[r_ids, :, :][:, c_ids, :] # normal by 3d neighbor plane fitting. normal_path = '{}/frame-{:06d}.scaled.normal.npy'.format(self.data_dir, fid) if os.path.exists(normal_path): # print('fid {}'.format(fid)) # to debug rio10 scene09 10 # normal = np.load(normal_path) if os.path.getsize(normal_path) > 1: normal = np.load(normal_path, encoding='bytes', allow_pickle=True) else: normal = compute_points_normal(local) np.save(normal_path, normal) else: normal = compute_points_normal(local) np.save(normal_path, normal) lclnmlclr = np.concatenate((np.concatenate((local, normal), axis=2), color), axis=2) # build a patch rand_ids = np.arange(len(rc_route)) np.random.shuffle(rand_ids) selected_ids = rand_ids[0:self.n_pts_per_frame * 2] # more candidates pt_in = torch.zeros((self.n_pts_per_frame, 7, 1)) nb_in = torch.zeros((self.n_pts_per_frame, 7, opt.n_neighb_pts)) route_labs = torch.zeros((self.n_pts_per_frame, opt.tree_height - 1)).fill_(ary) rc_list = [] # da2d+3d neighbor if not self.neighbor_da2d is None: sid = 0 for tmp_idx in range(len(selected_ids)): r, c = rc_route[selected_ids[tmp_idx]][0], rc_route[selected_ids[tmp_idx]][1] if np.isnan(lclnmlclr[r, c, 3]): continue if self.specified_node: if not isSon(rc_route[selected_ids[tmp_idx]][2], self.specified_node): continue route_labs[sid] = torch.Tensor(rc_route[selected_ids[tmp_idx]][2]) rc_list.append([r, c]) pt_in[sid] = torch.Tensor( np.concatenate((np.array([[0.], [0.], [0.], [0.]]), color[r, c, 0:3][:, np.newaxis]), axis=0)) da2d_list = (np.array(self.neighbor_da2d) / depth[r, c]).astype(int) # ppf pts9d = shift_pts9d(sample_pts(partial_pts_2d(lclnmlclr, (r, c), da2d_list), opt.n_neighb_pts), lclnmlclr[r, c, :]) cen9d = copy.deepcopy(lclnmlclr[r, c, :]) cen9d[0:3] = np.zeros(3) ppf7d = make_ppf(pts9d, cen9d) # (N,9), (9,) ppf7d[np.isnan(ppf7d)] = 0.0 nb_in[sid] = torch.Tensor(ppf7d).transpose(1, 0) # remove background by 3d radius xyz = pts9d[:, 0:3] ids_out_of_bound = np.linalg.norm(xyz, axis=1) > self.far_radius nb_in[sid, :, ids_out_of_bound] = 0. # count sid += 1 if sid >= self.n_pts_per_frame: break pt_in = pt_in[:sid] nb_in = nb_in[:sid] route_labs = route_labs[:sid] return pt_in, nb_in, route_labs, fid, torch.Tensor(np.array(rc_list)) class TestDataset_PPF(Dataset): def __init__(self, data_dir, the_list, n_pts_per_frame=opt.n_pts_per_frame, neighbor_da2d=None): super().__init__() self.data_dir, self.the_list = data_dir, the_list self.n_pts_per_frame = n_pts_per_frame self.neighbor_da2d = neighbor_da2d # list of (n_pts, dim_pt) def __len__(self): return len(self.the_list) def __getitem__(self, idx): fid = self.the_list[idx] # load depth = cv2.imread('{}/{}'.format(self.data_dir, opt.depth_format.format(fid)), cv2.IMREAD_UNCHANGED) / 1000.0 color = cv2.imread('{}/{}'.format(self.data_dir, opt.color_format.format(fid)), cv2.IMREAD_UNCHANGED)[:, :, 0:3] if np.max(color) > 1: color = color / 255. - 0.5 local = depth2local(depth) r_ids, c_ids = list(range(0, image_height, 4)), list(range(0, image_width, 4)) depth, color, local = depth[r_ids, :][:, c_ids], color[r_ids, :, :][:, c_ids, :], local[r_ids, :, :][:, c_ids, :] # normal by 3d neighbor plane fitting. normal_path = '{}/frame-{:06d}.scaled.normal.npy'.format(self.data_dir, fid) if os.path.exists(normal_path): # normal = np.load(normal_path) if os.path.getsize(normal_path) > 1: normal = np.load(normal_path, encoding='bytes', allow_pickle=True) else: normal = compute_points_normal(local) np.save(normal_path, normal) else: normal = compute_points_normal(local) np.save(normal_path, normal) lclnmlclr = np.concatenate((np.concatenate((local, normal), axis=2), color), axis=2) # build a patch pt_in = torch.zeros((self.n_pts_per_frame, 7, 1)) nb_ms_in = torch.zeros((self.n_pts_per_frame, opt.tree_height - 1, 7, opt.n_neighb_pts)) route_labs = torch.zeros((self.n_pts_per_frame, opt.tree_height - 1)) r_max, c_max = int(image_height / 4 - 1), int(image_width / 4 - 1) rc_list = [] # da2d+3d neighbor if not self.neighbor_da2d is None: sid, count_crt, count_max = 0, 0, 9999 mask = np.zeros((r_max, c_max)) while len(rc_list) < self.n_pts_per_frame: # avoid infinite loop count_crt += 1 if count_crt > count_max: break r, c = np.random.randint(0, r_max), np.random.randint(0, c_max) if depth[r, c] == 0. or mask[r, c] == 1.: continue if np.isnan(lclnmlclr[r, c, 3]): continue mask[r, c] = 1. rc_list.append([r, c]) pt_in[sid] = torch.Tensor( np.concatenate((np.array([[0.], [0.], [0.], [0.]]), color[r, c, 0:3][:, np.newaxis]), axis=0)) for lid in range(opt.tree_height - 1): da2d_list = (np.array(self.neighbor_da2d[lid]) / depth[r, c]).astype(int) # ppf pts9d = shift_pts9d( sample_pts(partial_pts_2d(lclnmlclr, (r, c), da2d_list), opt.n_neighb_pts), lclnmlclr[r, c, :]) cen9d = copy.deepcopy(lclnmlclr[r, c, :]) cen9d[0:3] = np.zeros(3) ppf7d = make_ppf(pts9d, cen9d) # (N,9), (9,) ppf7d[np.isnan(ppf7d)] = 0.0 nb_ms_in[sid, lid, :, :] = torch.Tensor(ppf7d).transpose(1, 0) # remove background by 3d radius xyz = pts9d[:, 0:3] ids_out_of_bound = np.linalg.norm(xyz, axis=1) > opt.far_radiuses[lid] nb_ms_in[sid, lid, :, ids_out_of_bound] = 0. # count sid += 1 return pt_in, nb_ms_in, -1, fid, torch.Tensor(np.array(rc_list)) # # debug # if __name__ == '__main__': # print('done.')
44.159664
122
0.562195
import numpy as np import torch from torch.utils.data import Dataset, DataLoader from torchvision import transforms as tfs from PIL import Image import os, cv2, copy, time from config import * image_height, image_width = opt.image_height, opt.image_width intrinsics = opt.intrinsics close_radius, far_radiuses = 0, opt.far_radiuses n_neighb_pts = opt.n_neighb_pts def isSon(son, fa): for i in range(len(fa)): if son[i] != fa[i]: return False return True def depth2local(depth): cx, cy, fx, fy = intrinsics[0, 2], intrinsics[1, 2], intrinsics[0, 0], intrinsics[1, 1] u_base = np.tile(np.arange(image_width), (image_height, 1)) v_base = np.tile(np.arange(image_height)[:, np.newaxis], (1, image_width)) X = (u_base - cx) * depth / fx Y = (v_base - cy) * depth / fy coord_local = np.stack((X, Y, depth), axis=2) return coord_local def partial_pts(pts_all_in, p, r_min, r_max): pts_all_in) p_mat = p[np.newaxis, 0:3].repeat(pts_all.shape[0], axis=0) norms = np.linalg.norm((p_mat - pts_all[:, 0:3]), axis=1) return pts_all[np.logical_and(norms >= r_min, norms <= r_max)] def sample_pts(pts_in, num): in) while pts.shape[0] < num: pts = np.concatenate((pts, pts), axis=0) rand_ids = np.arange(pts.shape[0]) np.random.shuffle(rand_ids) return pts[rand_ids[0:num], :] def sample_pts_rc(pts_in, rcs_in, num): in) rcs = copy.deepcopy(rcs_in) while pts.shape[0] < num: pts = np.concatenate((pts, pts), axis=0) rand_ids = np.arange(pts.shape[0]) np.random.shuffle(rand_ids) return pts[rand_ids[0:num], :], rcs_in[rand_ids[0:num], :] def sample_pts9d_r3d(pts_in, num, radius): in) thresh = 500 xyz = pts[:, 0:3] pts = pts[np.linalg.norm(xyz, axis=1) <= radius] if pts.shape[0] < thresh: return None while pts.shape[0] < num: pts = np.concatenate((pts, pts), axis=0) rand_ids = np.arange(pts.shape[0]) np.random.shuffle(rand_ids) return pts[rand_ids[0:num], :] def shift_pts(pts_in, cen): in) cen_mat = cen[np.newaxis, :].repeat(pts.shape[0], axis=0) pts[:, 0:3] = pts[:, 0:3] - cen_mat return pts def shift_pts6d(pts_in, cen): in) cen_mat = cen[np.newaxis, :].repeat(pts.shape[0], axis=0) pts[:, :] = pts[:, :] - cen_mat return pts def shift_pts9d(pts_in, cen): cpt[3:6] = np.zeros(3) pts = copy.deepcopy(pts_in) cpt_mat = cpt[np.newaxis, :].repeat(pts.shape[0], axis=0) pts[:, :] = pts[:, :] - cpt_mat return pts def make_ppf(pts9d, cen9d): n_pts = pts9d.shape[0] d = pts9d[:, 0:3] n2 = pts9d[:, 3:6] n1 = np.repeat(cen9d[3:6].reshape(1, 3), n_pts, axis=0) dim1 = np.linalg.norm(d, axis=1).reshape(n_pts, 1) d = d / (dim1.reshape(n_pts, 1)) dim2 = np.sum(n1 * d, axis=1).reshape(n_pts, 1) dim3 = np.sum(n2 * d, axis=1).reshape(n_pts, 1) dim4 = np.sum(n1 * n2, axis=1).reshape(n_pts, 1) ppf = np.concatenate((dim1, dim2, dim3, dim4), axis=1) ppf7d = np.concatenate((ppf, pts9d[:, 6:9]), axis=1) return ppf7d def compute_points_normal(pts): raw_shape = pts.shape normal = np.zeros((raw_shape)) t0 = time.time() for r in range(2, raw_shape[0] - 2): for c in range(2, raw_shape[1] - 2): pts_local = pts[r - 2:r + 3, c - 2:c + 3, :] pts_local = pts_local.reshape(-1, 3) pts_local = pts_local[np.linalg.norm(pts_local - pts[r, c, :], axis=1) < 0.1] if pts_local.shape[0] < 4: continue pts_local = pts_local - np.mean(pts_local, axis=0) C = pts_local.T @ pts_local / pts_local.shape[0] e, v = np.linalg.eig(C) d = v[:, np.where(e == np.min(e))[0][0]] n = d / np.linalg.norm(d) if np.dot(n, np.array([0, 0, 1])) > 0: n = -n normal[r, c, :] = n t1 = time.time() print('preprocess data: compute normal cost {:.2f}s'.format(t1 - t0)) return normal def partial_pts_2d(pts_rc, cen_rc, list_drdc): result = None r_max, c_max = int(image_height / 4 - 1), int(image_width / 4 - 1) mat_drdc = (np.array(list_drdc) / 4).astype(int) mat_cen_rc = np.array(cen_rc) mat_targ_rc = cen_rc + mat_drdc mat_targ_rc[mat_targ_rc < 0] = 0 targ_r = mat_targ_rc[:, 0] targ_r[targ_r > r_max] = r_max targ_c = mat_targ_rc[:, 1] targ_c[targ_c > c_max] = c_max result = pts_rc[targ_r, targ_c] return copy.deepcopy(result) def partial_pts_2d_rc(pts_rc, cen_rc, list_drdc): result = None r_max, c_max = int(image_height / 4 - 1), int(image_width / 4 - 1) mat_drdc = (np.array(list_drdc) / 4).astype(int) mat_cen_rc = np.array(cen_rc) mat_targ_rc = cen_rc + mat_drdc mat_targ_rc[mat_targ_rc < 0] = 0 targ_r = mat_targ_rc[:, 0] targ_r[targ_r > r_max] = r_max targ_c = mat_targ_rc[:, 1] targ_c[targ_c > c_max] = c_max result = pts_rc[targ_r, targ_c] return copy.deepcopy(result), copy.deepcopy( np.concatenate((targ_r.reshape(targ_r.shape[0], 1), targ_c.reshape(targ_c.shape[0], 1)), axis=1)) def partial_pts_2d_with_label(pts_rc, cen_rc, list_drdc, mask): result = None r_max, c_max = int(image_height / 4 - 1), int(image_width / 4 - 1) mat_drdc = (np.array(list_drdc) / 4).astype(int) mat_cen_rc = np.array(cen_rc) mat_targ_rc = cen_rc + mat_drdc mat_targ_rc[mat_targ_rc < 0] = 0 targ_r = mat_targ_rc[:, 0] targ_r[targ_r > r_max] = r_max targ_c = mat_targ_rc[:, 1] targ_c[targ_c > c_max] = c_max m1 = np.zeros((mask.shape[0], mask.shape[1])) m1[mask == 0] = 1 m2 = np.zeros((mask.shape[0], mask.shape[1])) m2[targ_r, targ_c] = 1 m3 = np.logical_and(m1, m2) result = pts_rc[m3] return copy.deepcopy(result) class LevelDataset_PPF(Dataset): def __init__(self, data_dir, the_list, n_pts_per_frame=opt.n_pts_per_frame, neighbor_da2d=None, far_radius=None, enable_color_aug=True, specified_node=None): super().__init__() self.data_dir, self.the_list = data_dir, the_list self.n_pts_per_frame = n_pts_per_frame self.neighbor_da2d = neighbor_da2d self.far_radius = far_radius self.enable_color_aug = enable_color_aug self.specified_node = specified_node def __len__(self): return len(self.the_list) def __getitem__(self, idx): fid, rc_route = self.the_list[idx] depth = cv2.imread('{}/{}'.format(self.data_dir, opt.depth_format.format(fid)), cv2.IMREAD_UNCHANGED) / 1000.0 color = cv2.imread('{}/{}'.format(self.data_dir, opt.color_format.format(fid)), cv2.IMREAD_UNCHANGED)[:, :, 0:3] if self.enable_color_aug: img = Image.fromarray(color) if np.random.rand() < 0.5: img = tfs.ColorJitter(brightness=1.)(img) if np.random.rand() < 0.5: img = tfs.ColorJitter(contrast=1.)(img) if np.random.rand() < 0.5: img = tfs.ColorJitter(saturation=1.)(img) color = np.array(img) if np.max(color) > 1: color = color / 255. - 0.5 local = depth2local(depth) r_ids, c_ids = list(range(0, image_height, 4)), list(range(0, image_width, 4)) depth, color, local = depth[r_ids, :][:, c_ids], color[r_ids, :, :][:, c_ids, :], local[r_ids, :, :][:, c_ids, :] normal_path = '{}/frame-{:06d}.scaled.normal.npy'.format(self.data_dir, fid) if os.path.exists(normal_path): os.path.getsize(normal_path) > 1: normal = np.load(normal_path, encoding='bytes', allow_pickle=True) else: normal = compute_points_normal(local) np.save(normal_path, normal) else: normal = compute_points_normal(local) np.save(normal_path, normal) lclnmlclr = np.concatenate((np.concatenate((local, normal), axis=2), color), axis=2) rand_ids = np.arange(len(rc_route)) np.random.shuffle(rand_ids) selected_ids = rand_ids[0:self.n_pts_per_frame * 2] pt_in = torch.zeros((self.n_pts_per_frame, 7, 1)) nb_in = torch.zeros((self.n_pts_per_frame, 7, opt.n_neighb_pts)) route_labs = torch.zeros((self.n_pts_per_frame, opt.tree_height - 1)).fill_(ary) rc_list = [] if not self.neighbor_da2d is None: sid = 0 for tmp_idx in range(len(selected_ids)): r, c = rc_route[selected_ids[tmp_idx]][0], rc_route[selected_ids[tmp_idx]][1] if np.isnan(lclnmlclr[r, c, 3]): continue if self.specified_node: if not isSon(rc_route[selected_ids[tmp_idx]][2], self.specified_node): continue route_labs[sid] = torch.Tensor(rc_route[selected_ids[tmp_idx]][2]) rc_list.append([r, c]) pt_in[sid] = torch.Tensor( np.concatenate((np.array([[0.], [0.], [0.], [0.]]), color[r, c, 0:3][:, np.newaxis]), axis=0)) da2d_list = (np.array(self.neighbor_da2d) / depth[r, c]).astype(int) pts9d = shift_pts9d(sample_pts(partial_pts_2d(lclnmlclr, (r, c), da2d_list), opt.n_neighb_pts), lclnmlclr[r, c, :]) cen9d = copy.deepcopy(lclnmlclr[r, c, :]) cen9d[0:3] = np.zeros(3) ppf7d = make_ppf(pts9d, cen9d) ppf7d[np.isnan(ppf7d)] = 0.0 nb_in[sid] = torch.Tensor(ppf7d).transpose(1, 0) xyz = pts9d[:, 0:3] ids_out_of_bound = np.linalg.norm(xyz, axis=1) > self.far_radius nb_in[sid, :, ids_out_of_bound] = 0. sid += 1 if sid >= self.n_pts_per_frame: break pt_in = pt_in[:sid] nb_in = nb_in[:sid] route_labs = route_labs[:sid] return pt_in, nb_in, route_labs, fid, torch.Tensor(np.array(rc_list)) class TestDataset_PPF(Dataset): def __init__(self, data_dir, the_list, n_pts_per_frame=opt.n_pts_per_frame, neighbor_da2d=None): super().__init__() self.data_dir, self.the_list = data_dir, the_list self.n_pts_per_frame = n_pts_per_frame self.neighbor_da2d = neighbor_da2d def __len__(self): return len(self.the_list) def __getitem__(self, idx): fid = self.the_list[idx] depth = cv2.imread('{}/{}'.format(self.data_dir, opt.depth_format.format(fid)), cv2.IMREAD_UNCHANGED) / 1000.0 color = cv2.imread('{}/{}'.format(self.data_dir, opt.color_format.format(fid)), cv2.IMREAD_UNCHANGED)[:, :, 0:3] if np.max(color) > 1: color = color / 255. - 0.5 local = depth2local(depth) r_ids, c_ids = list(range(0, image_height, 4)), list(range(0, image_width, 4)) depth, color, local = depth[r_ids, :][:, c_ids], color[r_ids, :, :][:, c_ids, :], local[r_ids, :, :][:, c_ids, :] normal_path = '{}/frame-{:06d}.scaled.normal.npy'.format(self.data_dir, fid) if os.path.exists(normal_path): if os.path.getsize(normal_path) > 1: normal = np.load(normal_path, encoding='bytes', allow_pickle=True) else: normal = compute_points_normal(local) np.save(normal_path, normal) else: normal = compute_points_normal(local) np.save(normal_path, normal) lclnmlclr = np.concatenate((np.concatenate((local, normal), axis=2), color), axis=2) pt_in = torch.zeros((self.n_pts_per_frame, 7, 1)) nb_ms_in = torch.zeros((self.n_pts_per_frame, opt.tree_height - 1, 7, opt.n_neighb_pts)) route_labs = torch.zeros((self.n_pts_per_frame, opt.tree_height - 1)) r_max, c_max = int(image_height / 4 - 1), int(image_width / 4 - 1) rc_list = [] if not self.neighbor_da2d is None: sid, count_crt, count_max = 0, 0, 9999 mask = np.zeros((r_max, c_max)) while len(rc_list) < self.n_pts_per_frame: count_crt += 1 if count_crt > count_max: break r, c = np.random.randint(0, r_max), np.random.randint(0, c_max) if depth[r, c] == 0. or mask[r, c] == 1.: continue if np.isnan(lclnmlclr[r, c, 3]): continue mask[r, c] = 1. rc_list.append([r, c]) pt_in[sid] = torch.Tensor( np.concatenate((np.array([[0.], [0.], [0.], [0.]]), color[r, c, 0:3][:, np.newaxis]), axis=0)) for lid in range(opt.tree_height - 1): da2d_list = (np.array(self.neighbor_da2d[lid]) / depth[r, c]).astype(int) pts9d = shift_pts9d( sample_pts(partial_pts_2d(lclnmlclr, (r, c), da2d_list), opt.n_neighb_pts), lclnmlclr[r, c, :]) cen9d = copy.deepcopy(lclnmlclr[r, c, :]) cen9d[0:3] = np.zeros(3) ppf7d = make_ppf(pts9d, cen9d) ppf7d[np.isnan(ppf7d)] = 0.0 nb_ms_in[sid, lid, :, :] = torch.Tensor(ppf7d).transpose(1, 0) xyz = pts9d[:, 0:3] ids_out_of_bound = np.linalg.norm(xyz, axis=1) > opt.far_radiuses[lid] nb_ms_in[sid, lid, :, ids_out_of_bound] = 0. sid += 1 return pt_in, nb_ms_in, -1, fid, torch.Tensor(np.array(rc_list))
true
true
f723e84d3365845116c25dda9340902ab173b6cd
799
py
Python
core/data/dataloader/__init__.py
HareshKarnan/awesome-semantic-segmentation-pytorch
3c53fc004973abcb88882dcc8be899570c3053cf
[ "Apache-2.0" ]
null
null
null
core/data/dataloader/__init__.py
HareshKarnan/awesome-semantic-segmentation-pytorch
3c53fc004973abcb88882dcc8be899570c3053cf
[ "Apache-2.0" ]
null
null
null
core/data/dataloader/__init__.py
HareshKarnan/awesome-semantic-segmentation-pytorch
3c53fc004973abcb88882dcc8be899570c3053cf
[ "Apache-2.0" ]
null
null
null
""" This module provides data loaders and transformers for popular vision datasets. """ from .mscoco import COCOSegmentation from .cityscapes import CitySegmentation from .ade import ADE20KSegmentation from .pascal_voc import VOCSegmentation from .pascal_aug import VOCAugSegmentation from .sbu_shadow import SBUSegmentation from .ycb import YCBSegmentation from .robocup import RobocupSegmentation datasets = { 'ade20k': ADE20KSegmentation, 'pascal_voc': VOCSegmentation, 'pascal_aug': VOCAugSegmentation, 'coco': COCOSegmentation, 'citys': CitySegmentation, 'sbu': SBUSegmentation, 'ycb': YCBSegmentation, 'robocup': RobocupSegmentation, } def get_segmentation_dataset(name, **kwargs): """Segmentation Datasets""" return datasets[name.lower()](**kwargs)
28.535714
79
0.767209
from .mscoco import COCOSegmentation from .cityscapes import CitySegmentation from .ade import ADE20KSegmentation from .pascal_voc import VOCSegmentation from .pascal_aug import VOCAugSegmentation from .sbu_shadow import SBUSegmentation from .ycb import YCBSegmentation from .robocup import RobocupSegmentation datasets = { 'ade20k': ADE20KSegmentation, 'pascal_voc': VOCSegmentation, 'pascal_aug': VOCAugSegmentation, 'coco': COCOSegmentation, 'citys': CitySegmentation, 'sbu': SBUSegmentation, 'ycb': YCBSegmentation, 'robocup': RobocupSegmentation, } def get_segmentation_dataset(name, **kwargs): return datasets[name.lower()](**kwargs)
true
true
f723e8b52caef4e5fb333357c7a8f97adccc1b2a
11,436
py
Python
src/roslaunch2/package.py
CodeFinder2/roslaunch2
5c2aa58129671647aa8e5cbc0541caf280accffb
[ "BSD-3-Clause" ]
10
2019-11-19T12:35:30.000Z
2022-01-16T15:59:44.000Z
src/roslaunch2/package.py
CodeFinder2/roslaunch2
5c2aa58129671647aa8e5cbc0541caf280accffb
[ "BSD-3-Clause" ]
1
2022-01-11T09:30:36.000Z
2022-02-07T22:03:36.000Z
src/roslaunch2/package.py
CodeFinder2/roslaunch2
5c2aa58129671647aa8e5cbc0541caf280accffb
[ "BSD-3-Clause" ]
3
2019-08-01T08:50:00.000Z
2021-05-02T01:27:47.000Z
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Author: Adrian Böckenkamp # License: BSD (https://opensource.org/licenses/BSD-3-Clause) # Date: 08/06/2020 import rospkg import os import sys import Pyro4 from . import logger class Package: """ Encapsulates a ROS package and its ability to find files in the package directory structure. A caching mechanism is used to speedup `*find()` commands. """ __pkg_cache = {} __dir_cache = {} __find_cache = {} @staticmethod def invalidate_cache(): """ Invalidates the package, directory and file cache for finding packages, enforcing re-lookups. """ Package.__pkg_cache = {} Package.__dir_cache = {} Package.__find_cache = {} @staticmethod def get_paths_to_file(start_dir, file_comp): """ Searches for file_comp in `start_dir` recursively (also using a cache for speedup). :param start_dir: root directory where to start the search :param file_comp: file path component (like some/dir/myfile.xml) name to search for :return: Set of files found (with their full path) """ file_name = os.path.basename(file_comp) dir_comp = os.path.dirname(file_comp) result = [] if start_dir in Package.__dir_cache: # use cached file listing of $start_dir for root, file_set in Package.__dir_cache[start_dir]: for a_file in file_set: if a_file == file_name and root.endswith(dir_comp): result.append(os.path.join(root, a_file)) else: # crawl the file system at $start_dir (and cache for future requests) cache_entry = [] for root, _, file_set in os.walk(start_dir): cache_entry.append((root, file_set)) for a_file in file_set: if a_file == file_name and root.endswith(dir_comp): result.append(os.path.join(root, a_file)) Package.__dir_cache[start_dir] = cache_entry return result @staticmethod def __get_pkg_path_cached(name): """ Tries to find the given package name in the cache. If its not present in the cache, the cache is updated by a (slower) filesystem lookup. :param name: Name of ROS package :return: Path to package """ if name not in Package.__pkg_cache: Package.__pkg_cache[name] = rospkg.RosPack().get_path(name) # may throws rospkg.ResourceNotFound return Package.__pkg_cache[name] def __init__(self, name=None, silent=False): """ Initializes the ROS package given its name. The path to the package will automatically be resolved on construction. :param name: Name of ROS package :param silent: True if no exceptions should be thrown if the package was not found """ try: self.set_name(name) except rospkg.ResourceNotFound: if not silent: raise @Pyro4.expose def get_name(self): """ Returns the package name. :return: ROS package name """ return self.__name @Pyro4.expose def set_name(self, name): """ Updates/sets the package name. :param name: ROS package name """ self.__name = name self.__path = Package.__get_pkg_path_cached(name) name = property(get_name, set_name) @Pyro4.expose def get_path(self): """ Retrieves the package path. :return: ROS package path """ return self.__path def _set_path(self, pkg_path): # not exposed to Pyro! if self.__name: self.__path = pkg_path else: self.__path = None def __nonzero__(self): return bool(self.__path) # for Python 2.x def __bool__(self): return self.__nonzero__() # for Python 3.x path = property(get_path, _set_path) def __str__(self): return self.__name @staticmethod def valid(pkg): """ Tests whether pkg is a valid ROS package on the current system. :param pkg: Name of ROS package (type: str) or a valid package.Package object) :return: Path to pkg if valid or None if not found """ try: if type(pkg) is str: name = pkg elif isinstance(pkg, Package): name = pkg.name else: raise ValueError('Cannot process type {}'.format(str(type(pkg)))) return Package.__get_pkg_path_cached(name) except rospkg.ResourceNotFound: return None def has_node(self, node_name, warn=True): """ Tests if a ROS node actually exists. This method checks whether a ROS node named $node_name exists in the current ROS package. :param node_name: name of ROS node to test :param warn: True if a warning about the missing node should be emitted :return: True if node exists, False otherwise """ pkg = os.path.join(self.__path, '../..') # Just consider files that are executable: if [f for f in Package.get_paths_to_file(pkg, node_name) if os.access(f, os.X_OK)]: # if len(res) > 1: # log.warning("Found {} executable files named {}, assuming existence." # .format(len(res), node_name, res[0])) return True else: if warn: logger.warning("Node '{}' in package '{}' not found.".format(node_name, self.__name)) return False @staticmethod def include(pkg_name, path_comp, **kwargs): """ Like use() but static for convenience. :param pkg_name: Name of ROS package to be used for search of path_comp :param path_comp: (partial) path or file name to launch module (if it does not end with .pyl, this is added automatically) :param kwargs: optional arguments to be passed to the main() function of the launch module :return: GeneratorBase object as returned by the main() function """ assert type(pkg_name) is str return Package(pkg_name).use(path_comp, **kwargs) def use(self, path_comp, **kwargs): """ Imports (aka uses) the content of a launch module located in the current package (self). :param path_comp: (partial) path or file name to launch module (if it does not end with .pyl, this is added automatically) :param kwargs: optional arguments to be passed to the main() function of the launch module :return: GeneratorBase object as returned by the main() function """ if not os.path.splitext(path_comp)[1]: path_comp += '.pyl' mod_path = self.find(path_comp, True) if not mod_path: raise ValueError("Launch module '{:s}' in package '{:s}' not found.".format(path_comp, self.__name)) m = Package.import_launch_module(mod_path) return m.main(**kwargs) @staticmethod def import_launch_module(full_module_path): """ Rather internal helper function for important a Python module (i. e., a roslaunch2 launch module/file). This function handles all various cases related to different versions of Python. :param full_module_path: Full path to module file :return: Handle to imported module (like "foo" in "import bar as foo") """ if sys.version_info < (2, 4): # Python < 2.4 is not supported raise RuntimeError('Must use Python version >= 2.4!') if not os.path.isfile(full_module_path): raise ValueError("Launch module '{:s}' not found.".format(full_module_path)) module_name = os.path.splitext(full_module_path)[0] # Hot-patch PYTHONPATH to find . imports: search_path = os.path.dirname(os.path.abspath(module_name)) if search_path not in sys.path: sys.path.append(search_path) if sys.version_info < (3, 3): # Python 2.x and 3.y where x >= 4 and y < 3 import imp return imp.load_source(module_name, full_module_path) elif sys.version_info < (3, 4): # Python 3.3 and 3.4 import importlib.machinery return importlib.machinery.SourceFileLoader(module_name, full_module_path).load_module() elif sys.version_info >= (3, 5): # Python 3.5+ import importlib.util import importlib.machinery # Allow any extenstions (not only .py and .so, and .pyl in particular): importlib.machinery.SOURCE_SUFFIXES.append('') spec = importlib.util.spec_from_file_location(module_name, full_module_path) m = importlib.util.module_from_spec(spec) spec.loader.exec_module(m) return m @Pyro4.expose def find(self, path_comp, silent=False): """ Searches for a file or directory in the current package (self). :param path_comp: (partial) path or file name :param silent: if True return None when nothing is found, otherwise an IOError is raised in case of failure :return: first found file (full path) or None if silent==True and nothing found """ key = ''.join([self.__name, path_comp]) if key in Package.__find_cache: return Package.__find_cache[key] if not path_comp: return self.__path dir_path = os.path.join(self.__path, path_comp if not path_comp.startswith(os.path.sep) else path_comp[1:]) if os.path.isdir(dir_path): Package.__find_cache[key] = dir_path return dir_path f = Package.get_paths_to_file(self.__path, path_comp) if len(f) > 1: logger.log("Found {} files, unique selection impossible (using first).".format(', '.join(f))) if not f: if not silent: raise IOError("No files like '{}' found in '{}'.".format(path_comp, self.__name)) else: return None Package.__find_cache[key] = f[0] return f[0] @Pyro4.expose def selective_find(self, path_comp_options, path_comp_prefix='', silent=False): """ Searches for a set of files or directories in the current package (self). Tries to find any path from the path_comp_options list starting at the first element. Once a path is found the search for the remaining paths is canceled and the found path is returned. :param path_comp_options: list of (partial) path or file names :param path_comp_prefix: prefix to each element of path_comp_options :param silent: if True return None when nothing is found, otherwise an IOError is raised in case of failure :return: first found file (full path) or None if silent==True and nothing found """ for path_comp in path_comp_options: path = self.find(path_comp=os.path.join(path_comp_prefix, path_comp), silent=True) if path is not None: return path # Nothing found if not silent: raise IOError("None of the queried files found in '{}'.".format(self.__name)) else: return None
39.030717
119
0.612977
import rospkg import os import sys import Pyro4 from . import logger class Package: __pkg_cache = {} __dir_cache = {} __find_cache = {} @staticmethod def invalidate_cache(): Package.__pkg_cache = {} Package.__dir_cache = {} Package.__find_cache = {} @staticmethod def get_paths_to_file(start_dir, file_comp): file_name = os.path.basename(file_comp) dir_comp = os.path.dirname(file_comp) result = [] if start_dir in Package.__dir_cache: for root, file_set in Package.__dir_cache[start_dir]: for a_file in file_set: if a_file == file_name and root.endswith(dir_comp): result.append(os.path.join(root, a_file)) else: cache_entry = [] for root, _, file_set in os.walk(start_dir): cache_entry.append((root, file_set)) for a_file in file_set: if a_file == file_name and root.endswith(dir_comp): result.append(os.path.join(root, a_file)) Package.__dir_cache[start_dir] = cache_entry return result @staticmethod def __get_pkg_path_cached(name): if name not in Package.__pkg_cache: Package.__pkg_cache[name] = rospkg.RosPack().get_path(name) return Package.__pkg_cache[name] def __init__(self, name=None, silent=False): try: self.set_name(name) except rospkg.ResourceNotFound: if not silent: raise @Pyro4.expose def get_name(self): return self.__name @Pyro4.expose def set_name(self, name): self.__name = name self.__path = Package.__get_pkg_path_cached(name) name = property(get_name, set_name) @Pyro4.expose def get_path(self): return self.__path def _set_path(self, pkg_path): if self.__name: self.__path = pkg_path else: self.__path = None def __nonzero__(self): return bool(self.__path) def __bool__(self): return self.__nonzero__() path = property(get_path, _set_path) def __str__(self): return self.__name @staticmethod def valid(pkg): try: if type(pkg) is str: name = pkg elif isinstance(pkg, Package): name = pkg.name else: raise ValueError('Cannot process type {}'.format(str(type(pkg)))) return Package.__get_pkg_path_cached(name) except rospkg.ResourceNotFound: return None def has_node(self, node_name, warn=True): pkg = os.path.join(self.__path, '../..') if [f for f in Package.get_paths_to_file(pkg, node_name) if os.access(f, os.X_OK)]: return True else: if warn: logger.warning("Node '{}' in package '{}' not found.".format(node_name, self.__name)) return False @staticmethod def include(pkg_name, path_comp, **kwargs): assert type(pkg_name) is str return Package(pkg_name).use(path_comp, **kwargs) def use(self, path_comp, **kwargs): if not os.path.splitext(path_comp)[1]: path_comp += '.pyl' mod_path = self.find(path_comp, True) if not mod_path: raise ValueError("Launch module '{:s}' in package '{:s}' not found.".format(path_comp, self.__name)) m = Package.import_launch_module(mod_path) return m.main(**kwargs) @staticmethod def import_launch_module(full_module_path): if sys.version_info < (2, 4): raise RuntimeError('Must use Python version >= 2.4!') if not os.path.isfile(full_module_path): raise ValueError("Launch module '{:s}' not found.".format(full_module_path)) module_name = os.path.splitext(full_module_path)[0] search_path = os.path.dirname(os.path.abspath(module_name)) if search_path not in sys.path: sys.path.append(search_path) if sys.version_info < (3, 3): import imp return imp.load_source(module_name, full_module_path) elif sys.version_info < (3, 4): import importlib.machinery return importlib.machinery.SourceFileLoader(module_name, full_module_path).load_module() elif sys.version_info >= (3, 5): import importlib.util import importlib.machinery importlib.machinery.SOURCE_SUFFIXES.append('') spec = importlib.util.spec_from_file_location(module_name, full_module_path) m = importlib.util.module_from_spec(spec) spec.loader.exec_module(m) return m @Pyro4.expose def find(self, path_comp, silent=False): key = ''.join([self.__name, path_comp]) if key in Package.__find_cache: return Package.__find_cache[key] if not path_comp: return self.__path dir_path = os.path.join(self.__path, path_comp if not path_comp.startswith(os.path.sep) else path_comp[1:]) if os.path.isdir(dir_path): Package.__find_cache[key] = dir_path return dir_path f = Package.get_paths_to_file(self.__path, path_comp) if len(f) > 1: logger.log("Found {} files, unique selection impossible (using first).".format(', '.join(f))) if not f: if not silent: raise IOError("No files like '{}' found in '{}'.".format(path_comp, self.__name)) else: return None Package.__find_cache[key] = f[0] return f[0] @Pyro4.expose def selective_find(self, path_comp_options, path_comp_prefix='', silent=False): for path_comp in path_comp_options: path = self.find(path_comp=os.path.join(path_comp_prefix, path_comp), silent=True) if path is not None: return path if not silent: raise IOError("None of the queried files found in '{}'.".format(self.__name)) else: return None
true
true
f723e8f7914d47ae6d2455622f8a55aacd1c6ccf
72
py
Python
cibin/__init__.py
betochimasan/AntibodyCocktailEfficiency
67850eb85d502af80b691b49001803f2e3091a0b
[ "BSD-3-Clause" ]
null
null
null
cibin/__init__.py
betochimasan/AntibodyCocktailEfficiency
67850eb85d502af80b691b49001803f2e3091a0b
[ "BSD-3-Clause" ]
null
null
null
cibin/__init__.py
betochimasan/AntibodyCocktailEfficiency
67850eb85d502af80b691b49001803f2e3091a0b
[ "BSD-3-Clause" ]
null
null
null
""" The Cibin package. """ __version__ = "0.0.1" from .cibin import *
9
21
0.611111
__version__ = "0.0.1" from .cibin import *
true
true
f723e91ac20eac5aead5f13782edc311af79e501
4,293
py
Python
python/cairo/text_align_center.py
jeremiedecock/snippets
4bd4e7f459eee610d5cf19f845299ca942ff4b64
[ "MIT" ]
23
2015-06-08T13:01:00.000Z
2021-12-30T08:20:04.000Z
python/cairo/text_align_center.py
jeremiedecock/snippets
4bd4e7f459eee610d5cf19f845299ca942ff4b64
[ "MIT" ]
1
2020-10-22T02:36:10.000Z
2020-10-22T02:36:10.000Z
python/cairo/text_align_center.py
jeremiedecock/snippets
4bd4e7f459eee610d5cf19f845299ca942ff4b64
[ "MIT" ]
7
2017-10-31T09:48:14.000Z
2022-01-04T15:59:45.000Z
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright (c) 2014 Jérémie DECOCK (http://www.jdhp.org) # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # SEE: # - http://cairographics.org/samples/ # - http://cairographics.org/documentation/pycairo/3/reference/index.html # - http://cairographics.org/pycairo/tutorial/ # - http://www.tortall.net/mu/wiki/CairoTutorial import cairo import math WIDTH, HEIGHT = 400, 400 def main(): """Main function""" # Image surfaces provide the ability to render to memory buffers either # allocated by cairo or by the calling code. # List of supported surfaces: http://www.cairographics.org/manual/cairo-surfaces.html surface = cairo.SVGSurface("text_align_center.svg", WIDTH, HEIGHT) # cairo.Context is the object that you send your drawing commands to. context = cairo.Context(surface) ### DRAW ### # context.set_source_rgb(0., 0., 0.) # context.set_source_rgba(0., 0., 0., 1.) # Sets the source pattern within context to an opaque color. This opaque color # will then be used for any subsequent drawing operation until a new source # pattern is set. # The color components are floating point numbers in the range 0 to 1. If # the values passed in are outside that range, they will be clamped. # The default source pattern is opaque black, (that is, it is equivalent to # cairo_set_source_rgb(context, 0.0, 0.0, 0.0)). # Using set_source_rgb(r, g, b) is equivalent to using # set_source_rgba(r, g, b, 1.0), and it sets your source color to use # full opacity. # # context.stroke() # The stroke() operation takes a virtual pen along the current path # according to the current line width, line join, line cap, and dash # settings. After cairo_stroke(), the current path will be cleared from # the cairo context. # See http://www.cairographics.org/manual/cairo-cairo-t.html#cairo-stroke # # context.fill() # A drawing operator that fills the current path according to the current # fill rule, (each sub-path is implicitly closed before being filled). # After cairo_fill(), the current path will be cleared from the cairo # context. # See http://www.cairographics.org/manual/cairo-cairo-t.html#cairo-fill context.set_line_width(0.02) context.set_source_rgb(1, 1, 1) context.rectangle(0, 0, WIDTH, HEIGHT) context.fill() # TEXT context.set_source_rgb(0, 0, 0) context.select_font_face("Sans", cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_NORMAL) context.set_font_size(52.0) (x, y, width, height, dx, dy) = context.text_extents("Hello") context.move_to(WIDTH/2. - width/2., HEIGHT/2. + height/2.) context.show_text("Hello") # DRAW HELPING LINES context.set_source_rgba(1, 0.2, 0.2, 0.6) context.set_line_width(6.0) context.arc(WIDTH/2. - width/2., HEIGHT/2. + height/2., 5, 0, math.radians(360)) context.fill() context.move_to(WIDTH/2., 0) context.line_to(WIDTH/2, HEIGHT) context.stroke() context.move_to(0, HEIGHT/2.) context.line_to(WIDTH, HEIGHT/2.) context.stroke() ### WRITE THE SVG FILE ### surface.finish() if __name__ == '__main__': main()
37.008621
89
0.699278
import cairo import math WIDTH, HEIGHT = 400, 400 def main(): surface = cairo.SVGSurface("text_align_center.svg", WIDTH, HEIGHT) context = cairo.Context(surface) xt.set_line_width(0.02) context.set_source_rgb(1, 1, 1) context.rectangle(0, 0, WIDTH, HEIGHT) context.fill() context.set_source_rgb(0, 0, 0) context.select_font_face("Sans", cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_NORMAL) context.set_font_size(52.0) (x, y, width, height, dx, dy) = context.text_extents("Hello") context.move_to(WIDTH/2. - width/2., HEIGHT/2. + height/2.) context.show_text("Hello") context.set_source_rgba(1, 0.2, 0.2, 0.6) context.set_line_width(6.0) context.arc(WIDTH/2. - width/2., HEIGHT/2. + height/2., 5, 0, math.radians(360)) context.fill() context.move_to(WIDTH/2., 0) context.line_to(WIDTH/2, HEIGHT) context.stroke() context.move_to(0, HEIGHT/2.) context.line_to(WIDTH, HEIGHT/2.) context.stroke() main()
true
true
f723e929ff2525653af0e5fc4da9eb8c8a0a5165
532
py
Python
deploy.py
oneofthezombies/cpp-enum-class-string-idl
e1188e6a94d5c7c6a4bc6a7d025b12f4d22c53e1
[ "MIT" ]
1
2021-05-08T08:34:09.000Z
2021-05-08T08:34:09.000Z
deploy.py
oneofthezombies/argparse-best-practice
c29bffb014bb66d7694e4e27ad2911cc5abb3eef
[ "MIT" ]
null
null
null
deploy.py
oneofthezombies/argparse-best-practice
c29bffb014bb66d7694e4e27ad2911cc5abb3eef
[ "MIT" ]
null
null
null
from shutil import rmtree from subprocess import run from pathlib import Path from itertools import chain from more_itertools import consume dirnames = ['build', 'dist'] paths = map(lambda path: Path(path), dirnames) outputs = chain(paths, Path().glob('*.egg-info')) exists = filter(lambda path: path.exists(), outputs) deletes = map(lambda path: rmtree(path), exists) consume(deletes) run(['python3', 'setup.py', 'sdist', 'bdist_wheel']).check_returncode() run(['python3', '-m', 'twine', 'upload', 'dist/*']).check_returncode()
31.294118
71
0.721805
from shutil import rmtree from subprocess import run from pathlib import Path from itertools import chain from more_itertools import consume dirnames = ['build', 'dist'] paths = map(lambda path: Path(path), dirnames) outputs = chain(paths, Path().glob('*.egg-info')) exists = filter(lambda path: path.exists(), outputs) deletes = map(lambda path: rmtree(path), exists) consume(deletes) run(['python3', 'setup.py', 'sdist', 'bdist_wheel']).check_returncode() run(['python3', '-m', 'twine', 'upload', 'dist/*']).check_returncode()
true
true
f723ea0c1c6678a854eaf1b914ce06a246ec9ada
32,902
py
Python
api/registrations/serializers.py
mattclarkcos/osf.io
0e5ee8c0ff2a9bc7449061124ac8ce6d00f775ca
[ "Apache-2.0" ]
null
null
null
api/registrations/serializers.py
mattclarkcos/osf.io
0e5ee8c0ff2a9bc7449061124ac8ce6d00f775ca
[ "Apache-2.0" ]
4
2022-02-26T03:28:02.000Z
2022-03-08T23:36:45.000Z
api/registrations/serializers.py
mattclarkcos/osf.io
0e5ee8c0ff2a9bc7449061124ac8ce6d00f775ca
[ "Apache-2.0" ]
null
null
null
import pytz import json from unicodedata import normalize from distutils.version import StrictVersion from django.core.exceptions import ValidationError from rest_framework import serializers as ser from rest_framework import exceptions from api.base.exceptions import Conflict, InvalidModelValueError, JSONAPIException from api.base.serializers import is_anonymized from api.base.utils import absolute_reverse, get_user_auth, is_truthy from api.base.versioning import CREATE_REGISTRATION_FIELD_CHANGE_VERSION from website.project.model import NodeUpdateError from api.files.serializers import OsfStorageFileSerializer from api.nodes.serializers import ( NodeSerializer, NodeStorageProviderSerializer, NodeLicenseRelationshipField, NodeLinksSerializer, update_institutions, NodeLicenseSerializer, NodeContributorsSerializer, RegistrationProviderRelationshipField, get_license_details, ) from api.base.serializers import ( IDField, RelationshipField, LinksField, HideIfWithdrawal, FileRelationshipField, NodeFileHyperLinkField, HideIfRegistration, ShowIfVersion, VersionedDateTimeField, ValuesListField, ) from framework.auth.core import Auth from osf.exceptions import ValidationValueError, NodeStateError from osf.models import Node, AbstractNode from osf.utils.registrations import strip_registered_meta_comments from framework.sentry import log_exception class RegistrationSerializer(NodeSerializer): admin_only_editable_fields = [ 'custom_citation', 'is_pending_retraction', 'is_public', 'withdrawal_justification', ] # Remember to add new RegistrationSerializer fields to this list # if you don't need them to be anonymized non_anonymized_fields = NodeSerializer.non_anonymized_fields + [ 'archiving', 'article_doi', 'date_registered', 'date_withdrawn', 'embargo_end_date', 'embargoed', 'pending_embargo_approval', 'pending_embargo_termination_approval', 'pending_registration_approval', 'pending_withdrawal', 'provider', 'registered_by', 'registered_from', 'registered_meta', 'registration_responses', 'registration_schema', 'registration_supplement', 'withdrawal_justification', 'withdrawn', ] reviews_state = ser.CharField(source='moderation_state', read_only=True) title = ser.CharField(read_only=True) description = ser.CharField(required=False, allow_blank=True, allow_null=True) category_choices = NodeSerializer.category_choices category_choices_string = NodeSerializer.category_choices_string category = ser.ChoiceField(required=False, choices=category_choices, help_text='Choices: ' + category_choices_string) date_modified = VersionedDateTimeField(source='last_logged', read_only=True) fork = HideIfWithdrawal(ser.BooleanField(read_only=True, source='is_fork')) collection = HideIfWithdrawal(ser.BooleanField(read_only=True, source='is_collection')) access_requests_enabled = HideIfWithdrawal(ser.BooleanField(read_only=True)) node_license = HideIfWithdrawal(NodeLicenseSerializer(required=False, source='license')) tags = HideIfWithdrawal(ValuesListField(attr_name='name', child=ser.CharField(), required=False)) article_doi = ser.CharField(required=False, allow_null=True) public = HideIfWithdrawal(ser.BooleanField( source='is_public', required=False, help_text='Nodes that are made public will give read-only access ' 'to everyone. Private nodes require explicit read ' 'permission. Write and admin access are the same for ' 'public and private nodes. Administrators on a parent ' 'node have implicit read permissions for all child nodes', )) current_user_permissions = HideIfWithdrawal(ser.SerializerMethodField( help_text='List of strings representing the permissions ' 'for the current user on this node.', )) pending_embargo_approval = HideIfWithdrawal(ser.BooleanField( read_only=True, source='is_pending_embargo', help_text='The associated Embargo is awaiting approval by project admins.', )) pending_embargo_termination_approval = HideIfWithdrawal(ser.BooleanField( read_only=True, source='is_pending_embargo_termination', help_text='The associated Embargo early termination is awaiting approval by project admins', )) embargoed = HideIfWithdrawal(ser.BooleanField(read_only=True, source='is_embargoed')) pending_registration_approval = HideIfWithdrawal(ser.BooleanField( source='is_pending_registration', read_only=True, help_text='The associated RegistrationApproval is awaiting approval by project admins.', )) archiving = HideIfWithdrawal(ser.BooleanField(read_only=True)) pending_withdrawal = HideIfWithdrawal(ser.BooleanField( source='is_pending_retraction', read_only=True, help_text='The registration is awaiting withdrawal approval by project admins.', )) withdrawn = ser.BooleanField( source='is_retracted', read_only=True, help_text='The registration has been withdrawn.', ) has_project = ser.SerializerMethodField() date_registered = VersionedDateTimeField(source='registered_date', read_only=True, help_text='Date time of registration.') date_withdrawn = VersionedDateTimeField(read_only=True, help_text='Date time of when this registration was retracted.') embargo_end_date = HideIfWithdrawal(ser.SerializerMethodField(help_text='When the embargo on this registration will be lifted.')) custom_citation = HideIfWithdrawal(ser.CharField(allow_blank=True, required=False)) withdrawal_justification = ser.CharField(read_only=True) template_from = HideIfWithdrawal(ser.CharField( read_only=True, allow_blank=False, allow_null=False, help_text='Specify a node id for a node you would like to use as a template for the ' 'new node. Templating is like forking, except that you do not copy the ' 'files, only the project structure. Some information is changed on the top ' 'level project by submitting the appropriate fields in the request body, ' 'and some information will not change. By default, the description will ' 'be cleared and the project will be made private.', )) registration_supplement = ser.SerializerMethodField() # Will be deprecated in favor of registration_responses registered_meta = HideIfWithdrawal(ser.SerializerMethodField( help_text='A dictionary with supplemental registration questions and responses.', )) registration_responses = HideIfWithdrawal(ser.SerializerMethodField( help_text='A dictionary with supplemental registration questions and responses.', )) registered_by = HideIfWithdrawal(RelationshipField( related_view='users:user-detail', related_view_kwargs={'user_id': '<registered_user._id>'}, )) registered_from = RelationshipField( related_view='nodes:node-detail', related_view_kwargs={'node_id': '<registered_from._id>'}, ) children = HideIfWithdrawal(RelationshipField( related_view='registrations:registration-children', related_view_kwargs={'node_id': '<_id>'}, related_meta={'count': 'get_node_count'}, )) comments = HideIfWithdrawal(RelationshipField( related_view='registrations:registration-comments', related_view_kwargs={'node_id': '<_id>'}, related_meta={ 'unread': 'get_unread_comments_count', 'count': 'get_total_comments_count', }, filter={'target': '<_id>'}, )) contributors = RelationshipField( related_view='registrations:registration-contributors', related_view_kwargs={'node_id': '<_id>'}, related_meta={'count': 'get_contrib_count'}, ) bibliographic_contributors = RelationshipField( related_view='registrations:registration-bibliographic-contributors', related_view_kwargs={'node_id': '<_id>'}, ) implicit_contributors = RelationshipField( related_view='registrations:registration-implicit-contributors', related_view_kwargs={'node_id': '<_id>'}, help_text='This feature is experimental and being tested. It may be deprecated.', ) files = HideIfWithdrawal(RelationshipField( related_view='registrations:registration-storage-providers', related_view_kwargs={'node_id': '<_id>'}, related_meta={'count': 'get_files_count'}, )) wikis = HideIfWithdrawal(RelationshipField( related_view='registrations:registration-wikis', related_view_kwargs={'node_id': '<_id>'}, related_meta={'count': 'get_wiki_page_count'}, )) forked_from = HideIfWithdrawal(RelationshipField( related_view=lambda n: 'registrations:registration-detail' if getattr(n, 'is_registration', False) else 'nodes:node-detail', related_view_kwargs={'node_id': '<forked_from_id>'}, )) template_node = HideIfWithdrawal(RelationshipField( related_view='nodes:node-detail', related_view_kwargs={'node_id': '<template_node._id>'}, )) license = HideIfWithdrawal(NodeLicenseRelationshipField( related_view='licenses:license-detail', related_view_kwargs={'license_id': '<license.node_license._id>'}, read_only=False, )) logs = HideIfWithdrawal(RelationshipField( related_view='registrations:registration-logs', related_view_kwargs={'node_id': '<_id>'}, )) forks = HideIfWithdrawal(RelationshipField( related_view='registrations:registration-forks', related_view_kwargs={'node_id': '<_id>'}, related_meta={'count': 'get_forks_count'}, )) groups = HideIfRegistration(RelationshipField( related_view='nodes:node-groups', related_view_kwargs={'node_id': '<_id>'}, )) node_links = ShowIfVersion( HideIfWithdrawal(RelationshipField( related_view='registrations:registration-pointers', related_view_kwargs={'node_id': '<_id>'}, related_meta={'count': 'get_pointers_count'}, help_text='This feature is deprecated as of version 2.1. Use linked_nodes instead.', )), min_version='2.0', max_version='2.0', ) linked_by_nodes = HideIfWithdrawal(RelationshipField( related_view='registrations:registration-linked-by-nodes', related_view_kwargs={'node_id': '<_id>'}, related_meta={'count': 'get_linked_by_nodes_count'}, )) linked_by_registrations = HideIfWithdrawal(RelationshipField( related_view='registrations:registration-linked-by-registrations', related_view_kwargs={'node_id': '<_id>'}, related_meta={'count': 'get_linked_by_registrations_count'}, )) parent = RelationshipField( related_view='registrations:registration-detail', related_view_kwargs={'node_id': '<parent_node._id>'}, filter_key='parent_node', ) root = RelationshipField( related_view='registrations:registration-detail', related_view_kwargs={'node_id': '<root._id>'}, ) region = HideIfWithdrawal(RelationshipField( related_view='regions:region-detail', related_view_kwargs={'region_id': '<osfstorage_region._id>'}, read_only=True, )) affiliated_institutions = RelationshipField( related_view='registrations:registration-institutions', related_view_kwargs={'node_id': '<_id>'}, self_view='registrations:registration-relationships-institutions', self_view_kwargs={'node_id': '<_id>'}, read_only=False, many=True, required=False, ) registration_schema = RelationshipField( related_view='schemas:registration-schema-detail', related_view_kwargs={'schema_id': '<registered_schema_id>'}, ) settings = HideIfRegistration(RelationshipField( related_view='nodes:node-settings', related_view_kwargs={'node_id': '<_id>'}, )) registrations = HideIfRegistration(RelationshipField( related_view='nodes:node-registrations', related_view_kwargs={'node_id': '<_id>'}, )) draft_registrations = HideIfRegistration(RelationshipField( related_view='nodes:node-draft-registrations', related_view_kwargs={'node_id': '<_id>'}, )) preprints = HideIfWithdrawal(HideIfRegistration(RelationshipField( related_view='nodes:node-preprints', related_view_kwargs={'node_id': '<_id>'}, ))) identifiers = RelationshipField( related_view='registrations:identifier-list', related_view_kwargs={'node_id': '<_id>'}, ) linked_nodes = HideIfWithdrawal(RelationshipField( related_view='registrations:linked-nodes', related_view_kwargs={'node_id': '<_id>'}, related_meta={'count': 'get_node_links_count'}, self_view='registrations:node-pointer-relationship', self_view_kwargs={'node_id': '<_id>'}, )) linked_registrations = HideIfWithdrawal(RelationshipField( related_view='registrations:linked-registrations', related_view_kwargs={'node_id': '<_id>'}, related_meta={'count': 'get_registration_links_count'}, self_view='registrations:node-registration-pointer-relationship', self_view_kwargs={'node_id': '<_id>'}, )) view_only_links = HideIfWithdrawal(RelationshipField( related_view='registrations:registration-view-only-links', related_view_kwargs={'node_id': '<_id>'}, related_meta={'count': 'get_view_only_links_count'}, )) citation = HideIfWithdrawal(RelationshipField( related_view='registrations:registration-citation', related_view_kwargs={'node_id': '<_id>'}, )) provider = RegistrationProviderRelationshipField( related_view='providers:registration-providers:registration-provider-detail', related_view_kwargs={'provider_id': '<provider._id>'}, read_only=True, ) review_actions = RelationshipField( related_view='registrations:registration-actions-list', related_view_kwargs={'node_id': '<_id>'}, ) requests = HideIfWithdrawal(RelationshipField( related_view='registrations:registration-requests-list', related_view_kwargs={'node_id': '<_id>'}, )) @property def subjects_related_view(self): # Overrides TaxonomizableSerializerMixin return 'registrations:registration-subjects' @property def subjects_self_view(self): # Overrides TaxonomizableSerializerMixin return 'registrations:registration-relationships-subjects' links = LinksField({'html': 'get_absolute_html_url'}) def get_has_project(self, obj): return obj.has_project def get_absolute_url(self, obj): return obj.get_absolute_url() def get_registered_meta(self, obj): if obj.registered_meta: meta_values = self.anonymize_registered_meta(obj) try: return json.loads(meta_values) except TypeError: return meta_values except ValueError: return meta_values return None def get_registration_responses(self, obj): if obj.registration_responses: return self.anonymize_registration_responses(obj) return None def get_embargo_end_date(self, obj): if obj.embargo_end_date: return obj.embargo_end_date return None def get_registration_supplement(self, obj): if obj.registered_schema: schema = obj.registered_schema.first() if schema is None: return None return schema.name return None def get_current_user_permissions(self, obj): return NodeSerializer.get_current_user_permissions(self, obj) def get_view_only_links_count(self, obj): return obj.private_links.filter(is_deleted=False).count() def get_total_comments_count(self, obj): return obj.comment_set.filter(page='node', is_deleted=False).count() def get_files_count(self, obj): return obj.files_count or 0 def anonymize_registered_meta(self, obj): """ Looks at every question on every page of the schema, for any titles that have a contributor-input block type. If present, deletes that question's response from meta_values. """ cleaned_registered_meta = strip_registered_meta_comments(list(obj.registered_meta.values())[0]) return self.anonymize_fields(obj, cleaned_registered_meta) def anonymize_registration_responses(self, obj): """ For any questions that have a `contributor-input` block type, delete that question's response from registration_responses. We want to make sure author's names that need to be anonymized aren't surfaced when viewed through an anonymous VOL """ return self.anonymize_fields(obj, obj.registration_responses) def anonymize_fields(self, obj, data): """ Consolidates logic to anonymize fields with contributor information on both registered_meta and registration_responses """ if is_anonymized(self.context['request']): anonymous_registration_response_keys = obj.get_contributor_registration_response_keys() for key in anonymous_registration_response_keys: if key in data: del data[key] return data def check_admin_perms(self, registration, user, validated_data): """ While admin/write users can make both make modifications to registrations, most fields are restricted to admin-only edits. You must be an admin contributor on the registration; you cannot have gotten your admin permissions through group membership. Add fields that need admin perms to admin_only_editable_fields """ user_is_admin = registration.is_admin_contributor(user) for field in validated_data: if field in self.admin_only_editable_fields and not user_is_admin: raise exceptions.PermissionDenied() def update_registration_tags(self, registration, validated_data, auth): new_tags = validated_data.pop('tags', []) try: registration.update_tags(new_tags, auth=auth) except NodeStateError as err: raise Conflict(str(err)) def retract_registration(self, registration, validated_data, user): is_pending_retraction = validated_data.pop('is_pending_retraction', None) withdrawal_justification = validated_data.pop('withdrawal_justification', None) if withdrawal_justification and not is_pending_retraction: raise exceptions.ValidationError( 'You cannot provide a withdrawal_justification without a concurrent withdrawal request.', ) if is_truthy(is_pending_retraction): if registration.is_pending_retraction: raise exceptions.ValidationError('This registration is already pending withdrawal.') try: retraction = registration.retract_registration(user, withdrawal_justification, save=True) except NodeStateError as err: raise exceptions.ValidationError(str(err)) retraction.ask(registration.get_active_contributors_recursive(unique_users=True)) elif is_pending_retraction is not None: raise exceptions.ValidationError('You cannot set is_pending_withdrawal to False.') def update(self, registration, validated_data): user = self.context['request'].user auth = Auth(user) self.check_admin_perms(registration, user, validated_data) validated_data.pop('_id', None) if 'tags' in validated_data: self.update_registration_tags(registration, validated_data, auth) if 'custom_citation' in validated_data: registration.update_custom_citation(validated_data.pop('custom_citation'), auth) if 'license_type' in validated_data or 'license' in validated_data: license_details = get_license_details(registration, validated_data) validated_data['node_license'] = license_details validated_data.pop('license_type', None) validated_data.pop('license', None) if 'affiliated_institutions' in validated_data: institutions_list = validated_data.pop('affiliated_institutions') new_institutions = [{'_id': institution} for institution in institutions_list] update_institutions(registration, new_institutions, user) registration.save() if 'subjects' in validated_data: subjects = validated_data.pop('subjects', None) self.update_subjects(registration, subjects, auth) if 'withdrawal_justification' in validated_data or 'is_pending_retraction' in validated_data: self.retract_registration(registration, validated_data, user) if 'is_public' in validated_data: if validated_data.get('is_public') is False: raise exceptions.ValidationError('Registrations can only be turned from private to public.') try: registration.update(validated_data, auth=auth) except ValidationError as e: raise InvalidModelValueError(detail=e.messages[0]) except NodeUpdateError as err: raise exceptions.ValidationError(err.reason) except NodeStateError as err: raise exceptions.ValidationError(str(err)) return registration class Meta: type_ = 'registrations' class RegistrationCreateSerializer(RegistrationSerializer): """ Overrides RegistrationSerializer to add draft_registration, registration_choice, and lift_embargo fields - """ def expect_cleaner_attributes(self, request): return StrictVersion(getattr(request, 'version', '2.0')) >= StrictVersion(CREATE_REGISTRATION_FIELD_CHANGE_VERSION) def __init__(self, *args, **kwargs): super(RegistrationCreateSerializer, self).__init__(*args, **kwargs) request = kwargs['context']['request'] # required fields defined here for the different versions if self.expect_cleaner_attributes(request): self.fields['draft_registration_id'] = ser.CharField(write_only=True) else: self.fields['draft_registration'] = ser.CharField(write_only=True) # For newer versions embargo_end_date = VersionedDateTimeField(write_only=True, allow_null=True, default=None) included_node_ids = ser.ListField(write_only=True, required=False) # For older versions lift_embargo = VersionedDateTimeField(write_only=True, default=None, input_formats=['%Y-%m-%dT%H:%M:%S']) children = ser.ListField(write_only=True, required=False) registration_choice = ser.ChoiceField(write_only=True, required=False, choices=['immediate', 'embargo']) users = RelationshipField( related_view='users:user-detail', related_view_kwargs={'user_id': '<user._id>'}, always_embed=True, required=False, ) def get_registration_choice_by_version(self, validated_data): """ Old API versions should pass in "immediate" or "embargo" under `registration_choice`. New API versions should pass in an "embargo_end_date" if it should be embargoed, else it will be None """ if self.expect_cleaner_attributes(self.context['request']): if validated_data.get('registration_choice'): raise JSONAPIException( source={'pointer': '/data/attributes/registration_choice'}, detail=f'Deprecated in version {CREATE_REGISTRATION_FIELD_CHANGE_VERSION}. Use embargo_end_date instead.', ) return 'embargo' if validated_data.get('embargo_end_date', None) else 'immediate' return validated_data.get('registration_choice', 'immediate') def get_embargo_end_date_by_version(self, validated_data): """ Old API versions should pass in "lift_embargo". New API versions should pass in "embargo_end_date" """ if self.expect_cleaner_attributes(self.context['request']): if validated_data.get('lift_embargo'): raise JSONAPIException( source={'pointer': '/data/attributes/lift_embargo'}, detail=f'Deprecated in version {CREATE_REGISTRATION_FIELD_CHANGE_VERSION}. Use embargo_end_date instead.', ) return validated_data.get('embargo_end_date', None) return validated_data.get('lift_embargo') def get_children_by_version(self, validated_data): """ Old API versions should pass in 'children' New API versions should pass in 'included_node_ids'. """ if self.expect_cleaner_attributes(self.context['request']): return validated_data.get('included_node_ids', []) return validated_data.get('children', []) def create(self, validated_data): auth = get_user_auth(self.context['request']) draft = validated_data.pop('draft', None) registration_choice = self.get_registration_choice_by_version(validated_data) embargo_lifted = self.get_embargo_end_date_by_version(validated_data) children = self.get_children_by_version(validated_data) if children: # First check that all children are valid child_nodes = Node.objects.filter(guids___id__in=children) if child_nodes.count() != len(children): raise exceptions.ValidationError('Some child nodes could not be found.') # Second check that metadata doesn't have files that are not in the child nodes being registered. registering = children + [draft.branched_from._id] orphan_files = self._find_orphan_files(registering, draft) if orphan_files: orphan_files_names = [file_data['selectedFileName'] for file_data in orphan_files] raise exceptions.ValidationError('All files attached to this form must be registered to complete the process. ' 'The following file(s) are attached, but are not part of a component being' ' registered: {}'.format(', '.join(orphan_files_names))) try: # Still validating metadata, but whether `registration_responses` or `registration_metadata` were populated # on the draft, the other field was built and populated as well. Both should exist. draft.validate_metadata(metadata=draft.registration_metadata, required_fields=True) except ValidationValueError: log_exception() # Probably indicates a bug on our end, so log to sentry # TODO: Raise an error once our JSON schemas are updated try: registration = draft.register(auth, save=True, child_ids=children) except NodeStateError as err: raise exceptions.ValidationError(err) if registration_choice == 'embargo': if not embargo_lifted: raise exceptions.ValidationError('lift_embargo must be specified.') embargo_end_date = embargo_lifted.replace(tzinfo=pytz.utc) try: registration.embargo_registration(auth.user, embargo_end_date) except ValidationError as err: raise exceptions.ValidationError(err.message) else: try: registration.require_approval(auth.user) except NodeStateError as err: raise exceptions.ValidationError(err) registration.save() return registration def _find_orphan_files(self, registering, draft): from website.archiver.utils import find_selected_files files = find_selected_files(draft.registration_schema, draft.registration_metadata) orphan_files = [] for key, value in files.items(): if 'extra' in value: for file_metadata in value['extra']: if not self._is_attached_file_valid(file_metadata, registering): orphan_files.append(file_metadata) return orphan_files def _is_attached_file_valid(self, file_metadata, registering): """ Validation of file information on registration_metadata. Theoretically, the file information on registration_responses does not have to be valid, so we enforce their accuracy here, to ensure file links load properly. Verifying that nodeId in the file_metadata is one of the files we're registering. Verify that selectedFileName is the name of a file on the node. Verify that the sha256 matches a version on that file. :param file_metadata - under "registration_metadata" :param registering - node ids you are registering :return boolean """ node_id = file_metadata.get('nodeId') if node_id not in registering: return False node = AbstractNode.load(node_id) if not node: # node in registration_metadata doesn't exist return False specified_sha = file_metadata.get('sha256', '') file = node.files.filter(name=normalize('NFD', file_metadata.get('selectedFileName', ''))).first() or \ node.files.filter(name=normalize('NFC', file_metadata.get('selectedFileName', ''))).first() if not file: # file with this name does not exist on the node return False match = False for version in file.versions.all(): if specified_sha == version.metadata.get('sha256'): match = True if not match: # Specified sha256 does not match a version on the specified file return False return True class RegistrationDetailSerializer(RegistrationSerializer): """ Overrides RegistrationSerializer make _id required and other fields writeable """ id = IDField(source='_id', required=True) pending_withdrawal = HideIfWithdrawal(ser.BooleanField( source='is_pending_retraction', required=False, help_text='The registration is awaiting withdrawal approval by project admins.', )) withdrawal_justification = ser.CharField(required=False) class RegistrationNodeLinksSerializer(NodeLinksSerializer): def get_absolute_url(self, obj): return absolute_reverse( 'registrations:registration-pointer-detail', kwargs={ 'node_link_id': obj._id, 'node_id': self.context['request'].parser_context['kwargs']['node_id'], 'version': self.context['request'].parser_context['kwargs']['version'], }, ) class RegistrationContributorsSerializer(NodeContributorsSerializer): def get_absolute_url(self, obj): return absolute_reverse( 'registrations:registration-contributor-detail', kwargs={ 'user_id': obj.user._id, 'node_id': self.context['request'].parser_context['kwargs']['node_id'], 'version': self.context['request'].parser_context['kwargs']['version'], }, ) class RegistrationFileSerializer(OsfStorageFileSerializer): files = NodeFileHyperLinkField( related_view='registrations:registration-files', related_view_kwargs={'node_id': '<target._id>', 'path': '<path>', 'provider': '<provider>'}, kind='folder', ) comments = FileRelationshipField( related_view='registrations:registration-comments', related_view_kwargs={'node_id': '<target._id>'}, related_meta={'unread': 'get_unread_comments_count'}, filter={'target': 'get_file_guid'}, ) node = RelationshipField( related_view='registrations:registration-detail', related_view_kwargs={'node_id': '<target._id>'}, help_text='The registration that this file belongs to', ) class RegistrationStorageProviderSerializer(NodeStorageProviderSerializer): """ Overrides NodeStorageProviderSerializer to lead to correct registration file links """ files = NodeFileHyperLinkField( related_view='registrations:registration-files', related_view_kwargs={'node_id': '<target._id>', 'path': '<path>', 'provider': '<provider>'}, kind='folder', never_embed=True, )
42.619171
133
0.687891
import pytz import json from unicodedata import normalize from distutils.version import StrictVersion from django.core.exceptions import ValidationError from rest_framework import serializers as ser from rest_framework import exceptions from api.base.exceptions import Conflict, InvalidModelValueError, JSONAPIException from api.base.serializers import is_anonymized from api.base.utils import absolute_reverse, get_user_auth, is_truthy from api.base.versioning import CREATE_REGISTRATION_FIELD_CHANGE_VERSION from website.project.model import NodeUpdateError from api.files.serializers import OsfStorageFileSerializer from api.nodes.serializers import ( NodeSerializer, NodeStorageProviderSerializer, NodeLicenseRelationshipField, NodeLinksSerializer, update_institutions, NodeLicenseSerializer, NodeContributorsSerializer, RegistrationProviderRelationshipField, get_license_details, ) from api.base.serializers import ( IDField, RelationshipField, LinksField, HideIfWithdrawal, FileRelationshipField, NodeFileHyperLinkField, HideIfRegistration, ShowIfVersion, VersionedDateTimeField, ValuesListField, ) from framework.auth.core import Auth from osf.exceptions import ValidationValueError, NodeStateError from osf.models import Node, AbstractNode from osf.utils.registrations import strip_registered_meta_comments from framework.sentry import log_exception class RegistrationSerializer(NodeSerializer): admin_only_editable_fields = [ 'custom_citation', 'is_pending_retraction', 'is_public', 'withdrawal_justification', ] non_anonymized_fields = NodeSerializer.non_anonymized_fields + [ 'archiving', 'article_doi', 'date_registered', 'date_withdrawn', 'embargo_end_date', 'embargoed', 'pending_embargo_approval', 'pending_embargo_termination_approval', 'pending_registration_approval', 'pending_withdrawal', 'provider', 'registered_by', 'registered_from', 'registered_meta', 'registration_responses', 'registration_schema', 'registration_supplement', 'withdrawal_justification', 'withdrawn', ] reviews_state = ser.CharField(source='moderation_state', read_only=True) title = ser.CharField(read_only=True) description = ser.CharField(required=False, allow_blank=True, allow_null=True) category_choices = NodeSerializer.category_choices category_choices_string = NodeSerializer.category_choices_string category = ser.ChoiceField(required=False, choices=category_choices, help_text='Choices: ' + category_choices_string) date_modified = VersionedDateTimeField(source='last_logged', read_only=True) fork = HideIfWithdrawal(ser.BooleanField(read_only=True, source='is_fork')) collection = HideIfWithdrawal(ser.BooleanField(read_only=True, source='is_collection')) access_requests_enabled = HideIfWithdrawal(ser.BooleanField(read_only=True)) node_license = HideIfWithdrawal(NodeLicenseSerializer(required=False, source='license')) tags = HideIfWithdrawal(ValuesListField(attr_name='name', child=ser.CharField(), required=False)) article_doi = ser.CharField(required=False, allow_null=True) public = HideIfWithdrawal(ser.BooleanField( source='is_public', required=False, help_text='Nodes that are made public will give read-only access ' 'to everyone. Private nodes require explicit read ' 'permission. Write and admin access are the same for ' 'public and private nodes. Administrators on a parent ' 'node have implicit read permissions for all child nodes', )) current_user_permissions = HideIfWithdrawal(ser.SerializerMethodField( help_text='List of strings representing the permissions ' 'for the current user on this node.', )) pending_embargo_approval = HideIfWithdrawal(ser.BooleanField( read_only=True, source='is_pending_embargo', help_text='The associated Embargo is awaiting approval by project admins.', )) pending_embargo_termination_approval = HideIfWithdrawal(ser.BooleanField( read_only=True, source='is_pending_embargo_termination', help_text='The associated Embargo early termination is awaiting approval by project admins', )) embargoed = HideIfWithdrawal(ser.BooleanField(read_only=True, source='is_embargoed')) pending_registration_approval = HideIfWithdrawal(ser.BooleanField( source='is_pending_registration', read_only=True, help_text='The associated RegistrationApproval is awaiting approval by project admins.', )) archiving = HideIfWithdrawal(ser.BooleanField(read_only=True)) pending_withdrawal = HideIfWithdrawal(ser.BooleanField( source='is_pending_retraction', read_only=True, help_text='The registration is awaiting withdrawal approval by project admins.', )) withdrawn = ser.BooleanField( source='is_retracted', read_only=True, help_text='The registration has been withdrawn.', ) has_project = ser.SerializerMethodField() date_registered = VersionedDateTimeField(source='registered_date', read_only=True, help_text='Date time of registration.') date_withdrawn = VersionedDateTimeField(read_only=True, help_text='Date time of when this registration was retracted.') embargo_end_date = HideIfWithdrawal(ser.SerializerMethodField(help_text='When the embargo on this registration will be lifted.')) custom_citation = HideIfWithdrawal(ser.CharField(allow_blank=True, required=False)) withdrawal_justification = ser.CharField(read_only=True) template_from = HideIfWithdrawal(ser.CharField( read_only=True, allow_blank=False, allow_null=False, help_text='Specify a node id for a node you would like to use as a template for the ' 'new node. Templating is like forking, except that you do not copy the ' 'files, only the project structure. Some information is changed on the top ' 'level project by submitting the appropriate fields in the request body, ' 'and some information will not change. By default, the description will ' 'be cleared and the project will be made private.', )) registration_supplement = ser.SerializerMethodField() # Will be deprecated in favor of registration_responses registered_meta = HideIfWithdrawal(ser.SerializerMethodField( help_text='A dictionary with supplemental registration questions and responses.', )) registration_responses = HideIfWithdrawal(ser.SerializerMethodField( help_text='A dictionary with supplemental registration questions and responses.', )) registered_by = HideIfWithdrawal(RelationshipField( related_view='users:user-detail', related_view_kwargs={'user_id': '<registered_user._id>'}, )) registered_from = RelationshipField( related_view='nodes:node-detail', related_view_kwargs={'node_id': '<registered_from._id>'}, ) children = HideIfWithdrawal(RelationshipField( related_view='registrations:registration-children', related_view_kwargs={'node_id': '<_id>'}, related_meta={'count': 'get_node_count'}, )) comments = HideIfWithdrawal(RelationshipField( related_view='registrations:registration-comments', related_view_kwargs={'node_id': '<_id>'}, related_meta={ 'unread': 'get_unread_comments_count', 'count': 'get_total_comments_count', }, filter={'target': '<_id>'}, )) contributors = RelationshipField( related_view='registrations:registration-contributors', related_view_kwargs={'node_id': '<_id>'}, related_meta={'count': 'get_contrib_count'}, ) bibliographic_contributors = RelationshipField( related_view='registrations:registration-bibliographic-contributors', related_view_kwargs={'node_id': '<_id>'}, ) implicit_contributors = RelationshipField( related_view='registrations:registration-implicit-contributors', related_view_kwargs={'node_id': '<_id>'}, help_text='This feature is experimental and being tested. It may be deprecated.', ) files = HideIfWithdrawal(RelationshipField( related_view='registrations:registration-storage-providers', related_view_kwargs={'node_id': '<_id>'}, related_meta={'count': 'get_files_count'}, )) wikis = HideIfWithdrawal(RelationshipField( related_view='registrations:registration-wikis', related_view_kwargs={'node_id': '<_id>'}, related_meta={'count': 'get_wiki_page_count'}, )) forked_from = HideIfWithdrawal(RelationshipField( related_view=lambda n: 'registrations:registration-detail' if getattr(n, 'is_registration', False) else 'nodes:node-detail', related_view_kwargs={'node_id': '<forked_from_id>'}, )) template_node = HideIfWithdrawal(RelationshipField( related_view='nodes:node-detail', related_view_kwargs={'node_id': '<template_node._id>'}, )) license = HideIfWithdrawal(NodeLicenseRelationshipField( related_view='licenses:license-detail', related_view_kwargs={'license_id': '<license.node_license._id>'}, read_only=False, )) logs = HideIfWithdrawal(RelationshipField( related_view='registrations:registration-logs', related_view_kwargs={'node_id': '<_id>'}, )) forks = HideIfWithdrawal(RelationshipField( related_view='registrations:registration-forks', related_view_kwargs={'node_id': '<_id>'}, related_meta={'count': 'get_forks_count'}, )) groups = HideIfRegistration(RelationshipField( related_view='nodes:node-groups', related_view_kwargs={'node_id': '<_id>'}, )) node_links = ShowIfVersion( HideIfWithdrawal(RelationshipField( related_view='registrations:registration-pointers', related_view_kwargs={'node_id': '<_id>'}, related_meta={'count': 'get_pointers_count'}, help_text='This feature is deprecated as of version 2.1. Use linked_nodes instead.', )), min_version='2.0', max_version='2.0', ) linked_by_nodes = HideIfWithdrawal(RelationshipField( related_view='registrations:registration-linked-by-nodes', related_view_kwargs={'node_id': '<_id>'}, related_meta={'count': 'get_linked_by_nodes_count'}, )) linked_by_registrations = HideIfWithdrawal(RelationshipField( related_view='registrations:registration-linked-by-registrations', related_view_kwargs={'node_id': '<_id>'}, related_meta={'count': 'get_linked_by_registrations_count'}, )) parent = RelationshipField( related_view='registrations:registration-detail', related_view_kwargs={'node_id': '<parent_node._id>'}, filter_key='parent_node', ) root = RelationshipField( related_view='registrations:registration-detail', related_view_kwargs={'node_id': '<root._id>'}, ) region = HideIfWithdrawal(RelationshipField( related_view='regions:region-detail', related_view_kwargs={'region_id': '<osfstorage_region._id>'}, read_only=True, )) affiliated_institutions = RelationshipField( related_view='registrations:registration-institutions', related_view_kwargs={'node_id': '<_id>'}, self_view='registrations:registration-relationships-institutions', self_view_kwargs={'node_id': '<_id>'}, read_only=False, many=True, required=False, ) registration_schema = RelationshipField( related_view='schemas:registration-schema-detail', related_view_kwargs={'schema_id': '<registered_schema_id>'}, ) settings = HideIfRegistration(RelationshipField( related_view='nodes:node-settings', related_view_kwargs={'node_id': '<_id>'}, )) registrations = HideIfRegistration(RelationshipField( related_view='nodes:node-registrations', related_view_kwargs={'node_id': '<_id>'}, )) draft_registrations = HideIfRegistration(RelationshipField( related_view='nodes:node-draft-registrations', related_view_kwargs={'node_id': '<_id>'}, )) preprints = HideIfWithdrawal(HideIfRegistration(RelationshipField( related_view='nodes:node-preprints', related_view_kwargs={'node_id': '<_id>'}, ))) identifiers = RelationshipField( related_view='registrations:identifier-list', related_view_kwargs={'node_id': '<_id>'}, ) linked_nodes = HideIfWithdrawal(RelationshipField( related_view='registrations:linked-nodes', related_view_kwargs={'node_id': '<_id>'}, related_meta={'count': 'get_node_links_count'}, self_view='registrations:node-pointer-relationship', self_view_kwargs={'node_id': '<_id>'}, )) linked_registrations = HideIfWithdrawal(RelationshipField( related_view='registrations:linked-registrations', related_view_kwargs={'node_id': '<_id>'}, related_meta={'count': 'get_registration_links_count'}, self_view='registrations:node-registration-pointer-relationship', self_view_kwargs={'node_id': '<_id>'}, )) view_only_links = HideIfWithdrawal(RelationshipField( related_view='registrations:registration-view-only-links', related_view_kwargs={'node_id': '<_id>'}, related_meta={'count': 'get_view_only_links_count'}, )) citation = HideIfWithdrawal(RelationshipField( related_view='registrations:registration-citation', related_view_kwargs={'node_id': '<_id>'}, )) provider = RegistrationProviderRelationshipField( related_view='providers:registration-providers:registration-provider-detail', related_view_kwargs={'provider_id': '<provider._id>'}, read_only=True, ) review_actions = RelationshipField( related_view='registrations:registration-actions-list', related_view_kwargs={'node_id': '<_id>'}, ) requests = HideIfWithdrawal(RelationshipField( related_view='registrations:registration-requests-list', related_view_kwargs={'node_id': '<_id>'}, )) @property def subjects_related_view(self): # Overrides TaxonomizableSerializerMixin return 'registrations:registration-subjects' @property def subjects_self_view(self): # Overrides TaxonomizableSerializerMixin return 'registrations:registration-relationships-subjects' links = LinksField({'html': 'get_absolute_html_url'}) def get_has_project(self, obj): return obj.has_project def get_absolute_url(self, obj): return obj.get_absolute_url() def get_registered_meta(self, obj): if obj.registered_meta: meta_values = self.anonymize_registered_meta(obj) try: return json.loads(meta_values) except TypeError: return meta_values except ValueError: return meta_values return None def get_registration_responses(self, obj): if obj.registration_responses: return self.anonymize_registration_responses(obj) return None def get_embargo_end_date(self, obj): if obj.embargo_end_date: return obj.embargo_end_date return None def get_registration_supplement(self, obj): if obj.registered_schema: schema = obj.registered_schema.first() if schema is None: return None return schema.name return None def get_current_user_permissions(self, obj): return NodeSerializer.get_current_user_permissions(self, obj) def get_view_only_links_count(self, obj): return obj.private_links.filter(is_deleted=False).count() def get_total_comments_count(self, obj): return obj.comment_set.filter(page='node', is_deleted=False).count() def get_files_count(self, obj): return obj.files_count or 0 def anonymize_registered_meta(self, obj): cleaned_registered_meta = strip_registered_meta_comments(list(obj.registered_meta.values())[0]) return self.anonymize_fields(obj, cleaned_registered_meta) def anonymize_registration_responses(self, obj): return self.anonymize_fields(obj, obj.registration_responses) def anonymize_fields(self, obj, data): if is_anonymized(self.context['request']): anonymous_registration_response_keys = obj.get_contributor_registration_response_keys() for key in anonymous_registration_response_keys: if key in data: del data[key] return data def check_admin_perms(self, registration, user, validated_data): user_is_admin = registration.is_admin_contributor(user) for field in validated_data: if field in self.admin_only_editable_fields and not user_is_admin: raise exceptions.PermissionDenied() def update_registration_tags(self, registration, validated_data, auth): new_tags = validated_data.pop('tags', []) try: registration.update_tags(new_tags, auth=auth) except NodeStateError as err: raise Conflict(str(err)) def retract_registration(self, registration, validated_data, user): is_pending_retraction = validated_data.pop('is_pending_retraction', None) withdrawal_justification = validated_data.pop('withdrawal_justification', None) if withdrawal_justification and not is_pending_retraction: raise exceptions.ValidationError( 'You cannot provide a withdrawal_justification without a concurrent withdrawal request.', ) if is_truthy(is_pending_retraction): if registration.is_pending_retraction: raise exceptions.ValidationError('This registration is already pending withdrawal.') try: retraction = registration.retract_registration(user, withdrawal_justification, save=True) except NodeStateError as err: raise exceptions.ValidationError(str(err)) retraction.ask(registration.get_active_contributors_recursive(unique_users=True)) elif is_pending_retraction is not None: raise exceptions.ValidationError('You cannot set is_pending_withdrawal to False.') def update(self, registration, validated_data): user = self.context['request'].user auth = Auth(user) self.check_admin_perms(registration, user, validated_data) validated_data.pop('_id', None) if 'tags' in validated_data: self.update_registration_tags(registration, validated_data, auth) if 'custom_citation' in validated_data: registration.update_custom_citation(validated_data.pop('custom_citation'), auth) if 'license_type' in validated_data or 'license' in validated_data: license_details = get_license_details(registration, validated_data) validated_data['node_license'] = license_details validated_data.pop('license_type', None) validated_data.pop('license', None) if 'affiliated_institutions' in validated_data: institutions_list = validated_data.pop('affiliated_institutions') new_institutions = [{'_id': institution} for institution in institutions_list] update_institutions(registration, new_institutions, user) registration.save() if 'subjects' in validated_data: subjects = validated_data.pop('subjects', None) self.update_subjects(registration, subjects, auth) if 'withdrawal_justification' in validated_data or 'is_pending_retraction' in validated_data: self.retract_registration(registration, validated_data, user) if 'is_public' in validated_data: if validated_data.get('is_public') is False: raise exceptions.ValidationError('Registrations can only be turned from private to public.') try: registration.update(validated_data, auth=auth) except ValidationError as e: raise InvalidModelValueError(detail=e.messages[0]) except NodeUpdateError as err: raise exceptions.ValidationError(err.reason) except NodeStateError as err: raise exceptions.ValidationError(str(err)) return registration class Meta: type_ = 'registrations' class RegistrationCreateSerializer(RegistrationSerializer): def expect_cleaner_attributes(self, request): return StrictVersion(getattr(request, 'version', '2.0')) >= StrictVersion(CREATE_REGISTRATION_FIELD_CHANGE_VERSION) def __init__(self, *args, **kwargs): super(RegistrationCreateSerializer, self).__init__(*args, **kwargs) request = kwargs['context']['request'] # required fields defined here for the different versions if self.expect_cleaner_attributes(request): self.fields['draft_registration_id'] = ser.CharField(write_only=True) else: self.fields['draft_registration'] = ser.CharField(write_only=True) # For newer versions embargo_end_date = VersionedDateTimeField(write_only=True, allow_null=True, default=None) included_node_ids = ser.ListField(write_only=True, required=False) # For older versions lift_embargo = VersionedDateTimeField(write_only=True, default=None, input_formats=['%Y-%m-%dT%H:%M:%S']) children = ser.ListField(write_only=True, required=False) registration_choice = ser.ChoiceField(write_only=True, required=False, choices=['immediate', 'embargo']) users = RelationshipField( related_view='users:user-detail', related_view_kwargs={'user_id': '<user._id>'}, always_embed=True, required=False, ) def get_registration_choice_by_version(self, validated_data): if self.expect_cleaner_attributes(self.context['request']): if validated_data.get('registration_choice'): raise JSONAPIException( source={'pointer': '/data/attributes/registration_choice'}, detail=f'Deprecated in version {CREATE_REGISTRATION_FIELD_CHANGE_VERSION}. Use embargo_end_date instead.', ) return 'embargo' if validated_data.get('embargo_end_date', None) else 'immediate' return validated_data.get('registration_choice', 'immediate') def get_embargo_end_date_by_version(self, validated_data): if self.expect_cleaner_attributes(self.context['request']): if validated_data.get('lift_embargo'): raise JSONAPIException( source={'pointer': '/data/attributes/lift_embargo'}, detail=f'Deprecated in version {CREATE_REGISTRATION_FIELD_CHANGE_VERSION}. Use embargo_end_date instead.', ) return validated_data.get('embargo_end_date', None) return validated_data.get('lift_embargo') def get_children_by_version(self, validated_data): if self.expect_cleaner_attributes(self.context['request']): return validated_data.get('included_node_ids', []) return validated_data.get('children', []) def create(self, validated_data): auth = get_user_auth(self.context['request']) draft = validated_data.pop('draft', None) registration_choice = self.get_registration_choice_by_version(validated_data) embargo_lifted = self.get_embargo_end_date_by_version(validated_data) children = self.get_children_by_version(validated_data) if children: # First check that all children are valid child_nodes = Node.objects.filter(guids___id__in=children) if child_nodes.count() != len(children): raise exceptions.ValidationError('Some child nodes could not be found.') # Second check that metadata doesn't have files that are not in the child nodes being registered. registering = children + [draft.branched_from._id] orphan_files = self._find_orphan_files(registering, draft) if orphan_files: orphan_files_names = [file_data['selectedFileName'] for file_data in orphan_files] raise exceptions.ValidationError('All files attached to this form must be registered to complete the process. ' 'The following file(s) are attached, but are not part of a component being' ' registered: {}'.format(', '.join(orphan_files_names))) try: draft.validate_metadata(metadata=draft.registration_metadata, required_fields=True) except ValidationValueError: log_exception() try: registration = draft.register(auth, save=True, child_ids=children) except NodeStateError as err: raise exceptions.ValidationError(err) if registration_choice == 'embargo': if not embargo_lifted: raise exceptions.ValidationError('lift_embargo must be specified.') embargo_end_date = embargo_lifted.replace(tzinfo=pytz.utc) try: registration.embargo_registration(auth.user, embargo_end_date) except ValidationError as err: raise exceptions.ValidationError(err.message) else: try: registration.require_approval(auth.user) except NodeStateError as err: raise exceptions.ValidationError(err) registration.save() return registration def _find_orphan_files(self, registering, draft): from website.archiver.utils import find_selected_files files = find_selected_files(draft.registration_schema, draft.registration_metadata) orphan_files = [] for key, value in files.items(): if 'extra' in value: for file_metadata in value['extra']: if not self._is_attached_file_valid(file_metadata, registering): orphan_files.append(file_metadata) return orphan_files def _is_attached_file_valid(self, file_metadata, registering): node_id = file_metadata.get('nodeId') if node_id not in registering: return False node = AbstractNode.load(node_id) if not node: return False specified_sha = file_metadata.get('sha256', '') file = node.files.filter(name=normalize('NFD', file_metadata.get('selectedFileName', ''))).first() or \ node.files.filter(name=normalize('NFC', file_metadata.get('selectedFileName', ''))).first() if not file: # file with this name does not exist on the node return False match = False for version in file.versions.all(): if specified_sha == version.metadata.get('sha256'): match = True if not match: # Specified sha256 does not match a version on the specified file return False return True class RegistrationDetailSerializer(RegistrationSerializer): id = IDField(source='_id', required=True) pending_withdrawal = HideIfWithdrawal(ser.BooleanField( source='is_pending_retraction', required=False, help_text='The registration is awaiting withdrawal approval by project admins.', )) withdrawal_justification = ser.CharField(required=False) class RegistrationNodeLinksSerializer(NodeLinksSerializer): def get_absolute_url(self, obj): return absolute_reverse( 'registrations:registration-pointer-detail', kwargs={ 'node_link_id': obj._id, 'node_id': self.context['request'].parser_context['kwargs']['node_id'], 'version': self.context['request'].parser_context['kwargs']['version'], }, ) class RegistrationContributorsSerializer(NodeContributorsSerializer): def get_absolute_url(self, obj): return absolute_reverse( 'registrations:registration-contributor-detail', kwargs={ 'user_id': obj.user._id, 'node_id': self.context['request'].parser_context['kwargs']['node_id'], 'version': self.context['request'].parser_context['kwargs']['version'], }, ) class RegistrationFileSerializer(OsfStorageFileSerializer): files = NodeFileHyperLinkField( related_view='registrations:registration-files', related_view_kwargs={'node_id': '<target._id>', 'path': '<path>', 'provider': '<provider>'}, kind='folder', ) comments = FileRelationshipField( related_view='registrations:registration-comments', related_view_kwargs={'node_id': '<target._id>'}, related_meta={'unread': 'get_unread_comments_count'}, filter={'target': 'get_file_guid'}, ) node = RelationshipField( related_view='registrations:registration-detail', related_view_kwargs={'node_id': '<target._id>'}, help_text='The registration that this file belongs to', ) class RegistrationStorageProviderSerializer(NodeStorageProviderSerializer): files = NodeFileHyperLinkField( related_view='registrations:registration-files', related_view_kwargs={'node_id': '<target._id>', 'path': '<path>', 'provider': '<provider>'}, kind='folder', never_embed=True, )
true
true
f723eb375382b38526c20e9d0239da24728a5d3d
698
py
Python
tests/nested_foreign_keys/models.py
Yoann-Vie/esgi-hearthstone
115d03426c7e8e80d89883b78ac72114c29bed12
[ "PSF-2.0", "BSD-3-Clause" ]
null
null
null
tests/nested_foreign_keys/models.py
Yoann-Vie/esgi-hearthstone
115d03426c7e8e80d89883b78ac72114c29bed12
[ "PSF-2.0", "BSD-3-Clause" ]
null
null
null
tests/nested_foreign_keys/models.py
Yoann-Vie/esgi-hearthstone
115d03426c7e8e80d89883b78ac72114c29bed12
[ "PSF-2.0", "BSD-3-Clause" ]
null
null
null
from django.db import models class Person(models.Model): name = models.CharField(max_length=200) class Movie(models.Model): title = models.CharField(max_length=200) director = models.ForeignKey(Person, models.CASCADE) class Event(models.Model): pass class Screening(Event): movie = models.ForeignKey(Movie, models.CASCADE) class ScreeningNullFK(Event): movie = models.ForeignKey(Movie, models.SET_NULL, null=True) class Package(models.Model): screening = models.ForeignKey(Screening, models.SET_NULL, null=True) class PackageNullFK(models.Model): screening = models.ForeignKey(ScreeningNullFK, models.SET_NULL, null=True)
22.516129
79
0.717765
from django.db import models class Person(models.Model): name = models.CharField(max_length=200) class Movie(models.Model): title = models.CharField(max_length=200) director = models.ForeignKey(Person, models.CASCADE) class Event(models.Model): pass class Screening(Event): movie = models.ForeignKey(Movie, models.CASCADE) class ScreeningNullFK(Event): movie = models.ForeignKey(Movie, models.SET_NULL, null=True) class Package(models.Model): screening = models.ForeignKey(Screening, models.SET_NULL, null=True) class PackageNullFK(models.Model): screening = models.ForeignKey(ScreeningNullFK, models.SET_NULL, null=True)
true
true
f723eb9b8474a649714e9207663dcbb042ef7f19
14,422
py
Python
scipy/optimize/tests/test_linesearch.py
ischrot/scipy_rmt_bsc
1dd8f7f0ee7ac1311ed1735ca6b6025150524418
[ "BSD-3-Clause" ]
null
null
null
scipy/optimize/tests/test_linesearch.py
ischrot/scipy_rmt_bsc
1dd8f7f0ee7ac1311ed1735ca6b6025150524418
[ "BSD-3-Clause" ]
null
null
null
scipy/optimize/tests/test_linesearch.py
ischrot/scipy_rmt_bsc
1dd8f7f0ee7ac1311ed1735ca6b6025150524418
[ "BSD-3-Clause" ]
null
null
null
""" Tests for line search routines """ from numpy.testing import (assert_, assert_equal, assert_array_almost_equal, assert_array_almost_equal_nulp, assert_warns, suppress_warnings) import scipy.optimize.linesearch as ls import scipy.optimize.nonlin as nl #(LS) from scipy.linalg import norm from scipy.optimize.linesearch import LineSearchWarning import numpy as np from copy import deepcopy # (IS) def assert_wolfe(s, phi, derphi, c1=1e-4, c2=0.9, err_msg=""): """ Check that strong Wolfe conditions apply """ phi1 = phi(s) phi0 = phi(0) derphi0 = derphi(0) derphi1 = derphi(s) msg = "s = %s; phi(0) = %s; phi(s) = %s; phi'(0) = %s; phi'(s) = %s; %s" % ( s, phi0, phi1, derphi0, derphi1, err_msg) assert_(phi1 <= phi0 + c1*s*derphi0, "Wolfe 1 failed: " + msg) assert_(abs(derphi1) <= abs(c2*derphi0), "Wolfe 2 failed: " + msg) def assert_armijo(s, phi, c1=1e-4, err_msg=""): """ Check that Armijo condition applies """ phi1 = phi(s) phi0 = phi(0) msg = "s = %s; phi(0) = %s; phi(s) = %s; %s" % (s, phi0, phi1, err_msg) assert_(phi1 <= (1 - c1*s)*phi0, msg) ###(LS)### def assert_rmt(alpha, dx, F0, Fx_new, jacobian, param, c1=1e-4, err_msg=""): """ Check that RMT condition applies """ parameters = ls.prepare_parameters('rmt',param,jacobian,dx) rmt_eta_upper = parameters['rmt_eta_upper'] rmt_eta_lower = parameters['rmt_eta_lower'] amin = parameters['amin'] #Step 1: Eval t_dx_omega dxbar = jacobian.solve( Fx_new ) dx_diff = dxbar + (1 - alpha) * dx # note that dx = - J(x_k)^(-1)F(x_k) nominator = 2 * norm(dx_diff) denominator = alpha * norm(dx) t_dx_omega = nominator / denominator tester = (rmt_eta_lower <= t_dx_omega and t_dx_omega <= rmt_eta_upper) or (rmt_eta_lower > t_dx_omega and alpha == 1.0) msg = "s = %s; phi(0) = %s; phi(s) = %s; %s" % (alpha, F0, Fx_new, err_msg) assert_(tester or (alpha<amin), msg) def assert_bsc(alpha, x, dx, func, old_jacobian, param, err_msg): parameters = ls.prepare_parameters('bsc',param, old_jacobian, dx) H_lower = parameters['H_lower'] H_upper = parameters['H_upper'] amin = parameters['amin'] x_new = x + alpha * dx Fx_new = func(x_new) jacobian = deepcopy(old_jacobian) jacobian.update( x_new.copy(), Fx_new ) dx_next_it = -jacobian.solve( Fx_new ) dx_diff = dx_next_it - dx H_prime = alpha * norm(dx_diff) tester = (H_lower <= H_prime and H_prime <= H_upper) or (H_lower > H_prime and alpha >= 1.0) msg = "s = %s; phi(0) = %s; phi(s) = %s; %s" % (alpha, func(x), Fx_new, err_msg) assert_(tester or (alpha<amin), msg) ###(LS)### def assert_line_wolfe(x, p, s, f, fprime, **kw): assert_wolfe(s, phi=lambda sp: f(x + p*sp), derphi=lambda sp: np.dot(fprime(x + p*sp), p), **kw) def assert_line_armijo(x, p, s, f, **kw): assert_armijo(s, phi=lambda sp: f(x + p*sp), **kw) def assert_fp_equal(x, y, err_msg="", nulp=50): """Assert two arrays are equal, up to some floating-point rounding error""" try: assert_array_almost_equal_nulp(x, y, nulp) except AssertionError as e: raise AssertionError("%s\n%s" % (e, err_msg)) from e class TestLineSearch(object): # -- scalar functions; must have dphi(0.) < 0 def _scalar_func_1(self, s): self.fcount += 1 p = -s - s**3 + s**4 dp = -1 - 3*s**2 + 4*s**3 return p, dp def _scalar_func_2(self, s): self.fcount += 1 p = np.exp(-4*s) + s**2 dp = -4*np.exp(-4*s) + 2*s return p, dp def _scalar_func_3(self, s): self.fcount += 1 p = -np.sin(10*s) dp = -10*np.cos(10*s) return p, dp # -- n-d functions def _line_func_1(self, x): self.fcount += 1 f = np.dot(x, x) df = 2*x return f, df def _line_func_2(self, x): self.fcount += 1 f = np.dot(x, np.dot(self.A, x)) + 1 df = np.dot(self.A + self.A.T, x) return f, df # -- def setup_method(self): self.scalar_funcs = [] self.line_funcs = [] self.N = 20 self.fcount = 0 def bind_index(func, idx): # Remember Python's closure semantics! return lambda *a, **kw: func(*a, **kw)[idx] for name in sorted(dir(self)): if name.startswith('_scalar_func_'): value = getattr(self, name) self.scalar_funcs.append( (name, bind_index(value, 0), bind_index(value, 1))) elif name.startswith('_line_func_'): value = getattr(self, name) self.line_funcs.append( (name, bind_index(value, 0), bind_index(value, 1))) np.random.seed(1234) self.A = np.random.randn(self.N, self.N) def scalar_iter(self): for name, phi, derphi in self.scalar_funcs: for old_phi0 in np.random.randn(3): yield name, phi, derphi, old_phi0 def line_iter(self): for name, f, fprime in self.line_funcs: k = 0 while k < 9: x = np.random.randn(self.N) p = np.random.randn(self.N) if np.dot(p, fprime(x)) >= 0: # always pick a descent direction continue k += 1 old_fv = float(np.random.randn()) yield name, f, fprime, x, p, old_fv # -- Generic scalar searches def test_scalar_search_wolfe1(self): c = 0 for name, phi, derphi, old_phi0 in self.scalar_iter(): c += 1 s, phi1, phi0 = ls.scalar_search_wolfe1(phi, derphi, phi(0), old_phi0, derphi(0)) assert_fp_equal(phi0, phi(0), name) assert_fp_equal(phi1, phi(s), name) assert_wolfe(s, phi, derphi, err_msg=name) assert_(c > 3) # check that the iterator really works... def test_scalar_search_wolfe2(self): for name, phi, derphi, old_phi0 in self.scalar_iter(): s, phi1, phi0, derphi1 = ls.scalar_search_wolfe2( phi, derphi, phi(0), old_phi0, derphi(0)) assert_fp_equal(phi0, phi(0), name) assert_fp_equal(phi1, phi(s), name) if derphi1 is not None: assert_fp_equal(derphi1, derphi(s), name) assert_wolfe(s, phi, derphi, err_msg="%s %g" % (name, old_phi0)) def test_scalar_search_wolfe2_with_low_amax(self): def phi(alpha): return (alpha - 5) ** 2 def derphi(alpha): return 2 * (alpha - 5) s, _, _, _ = assert_warns(LineSearchWarning, ls.scalar_search_wolfe2, phi, derphi, amax=0.001) assert_(s is None) def test_scalar_search_wolfe2_regression(self): # Regression test for gh-12157 # This phi has its minimum at alpha=4/3 ~ 1.333. def phi(alpha): if alpha < 1: return - 3*np.pi/2 * (alpha - 1) else: return np.cos(3*np.pi/2 * alpha - np.pi) def derphi(alpha): if alpha < 1: return - 3*np.pi/2 else: return - 3*np.pi/2 * np.sin(3*np.pi/2 * alpha - np.pi) s, _, _, _ = ls.scalar_search_wolfe2(phi, derphi) # Without the fix in gh-13073, the scalar_search_wolfe2 # returned s=2.0 instead. assert_(s < 1.5) def test_scalar_search_armijo(self): for name, phi, derphi, old_phi0 in self.scalar_iter(): s, phi1 = ls.scalar_search_armijo(phi, phi(0), derphi(0)) assert_fp_equal(phi1, phi(s), name) assert_armijo(s, phi, err_msg="%s %g" % (name, old_phi0)) ###(LS)### ##RMT not usefull for scalar functions, thus no need for test_scalar_search_rmt? def test_line_search_rmt(self): #There is at least 1 function R^20->R to be tested, but this leads to s=None for name, f, fprime, x, p, old_f in self.line_iter(): jac = lambda x: fprime(x) x0 = nl._as_inexact(x) func = lambda z: nl._as_inexact(f(nl._array_like(z, x0))).flatten() x = x0.flatten() jacobian = nl.asjacobian(jac) jacobian.setup(x.copy(), f(x), func) options = {'jacobian': jacobian, 'jac_tol': min(1e-03,1e-03*norm(f(x))), 'amin':1e-8} #print("1: ",f(x),np.shape(fprime(x))) s, dxbar, f_new = ls.scalar_search_rmt(f, x, fprime(x), parameters=options) #print("2: ",p_new, s) assert_fp_equal(f_new, x+s*fprime(x), name) assert_rmt(s, fprime(x), f(x), f_new, jacobian, options, err_msg="%s %g" % name) def test_line_search_bsc(self): #There is at least 1 function R^20->R to be tested, but this leads to s=None for name, f, fprime, x, p, old_f in self.line_iter(): jac = lambda x: fprime(x) x0 = nl._as_inexact(x) func = lambda z: nl._as_inexact(f(nl._array_like(z, x0))).flatten() x = x0.flatten() jacobian = nl.asjacobian(jac) jacobian.setup(x.copy(), f(x), func) options = {'jacobian': jacobian, 'jac_tol': min(1e-03,1e-03*norm(f(x))), 'amin':1e-8} #print("1: ",f(x),np.shape(dp(x))) s, f_new= ls.scalar_search_bsc(func, x, fprime(x), f(x), parameters=options) #print("2: ",p_new, s) assert_fp_equal(f_new, x+s*fprime(x), name) assert_bsc(s, x, fprime(x), func, jacobian, options, err_msg="%s %g" % name) ###(LS)### # -- Generic line searches def test_line_search_wolfe1(self): c = 0 smax = 100 for name, f, fprime, x, p, old_f in self.line_iter(): f0 = f(x) g0 = fprime(x) self.fcount = 0 s, fc, gc, fv, ofv, gv = ls.line_search_wolfe1(f, fprime, x, p, g0, f0, old_f, amax=smax) assert_equal(self.fcount, fc+gc) assert_fp_equal(ofv, f(x)) if s is None: continue assert_fp_equal(fv, f(x + s*p)) assert_array_almost_equal(gv, fprime(x + s*p), decimal=14) if s < smax: c += 1 assert_line_wolfe(x, p, s, f, fprime, err_msg=name) assert_(c > 3) # check that the iterator really works... def test_line_search_wolfe2(self): c = 0 smax = 512 for name, f, fprime, x, p, old_f in self.line_iter(): f0 = f(x) g0 = fprime(x) self.fcount = 0 with suppress_warnings() as sup: sup.filter(LineSearchWarning, "The line search algorithm could not find a solution") sup.filter(LineSearchWarning, "The line search algorithm did not converge") s, fc, gc, fv, ofv, gv = ls.line_search_wolfe2(f, fprime, x, p, g0, f0, old_f, amax=smax) assert_equal(self.fcount, fc+gc) assert_fp_equal(ofv, f(x)) assert_fp_equal(fv, f(x + s*p)) if gv is not None: assert_array_almost_equal(gv, fprime(x + s*p), decimal=14) if s < smax: c += 1 assert_line_wolfe(x, p, s, f, fprime, err_msg=name) assert_(c > 3) # check that the iterator really works... def test_line_search_wolfe2_bounds(self): # See gh-7475 # For this f and p, starting at a point on axis 0, the strong Wolfe # condition 2 is met if and only if the step length s satisfies # |x + s| <= c2 * |x| f = lambda x: np.dot(x, x) fp = lambda x: 2 * x p = np.array([1, 0]) # Smallest s satisfying strong Wolfe conditions for these arguments is 30 x = -60 * p c2 = 0.5 s, _, _, _, _, _ = ls.line_search_wolfe2(f, fp, x, p, amax=30, c2=c2) assert_line_wolfe(x, p, s, f, fp) s, _, _, _, _, _ = assert_warns(LineSearchWarning, ls.line_search_wolfe2, f, fp, x, p, amax=29, c2=c2) assert_(s is None) # s=30 will only be tried on the 6th iteration, so this won't converge assert_warns(LineSearchWarning, ls.line_search_wolfe2, f, fp, x, p, c2=c2, maxiter=5) def test_line_search_armijo(self): c = 0 for name, f, fprime, x, p, old_f in self.line_iter(): f0 = f(x) g0 = fprime(x) self.fcount = 0 s, fc, fv = ls.line_search_armijo(f, x, p, g0, f0) c += 1 assert_equal(self.fcount, fc) assert_fp_equal(fv, f(x + s*p)) assert_line_armijo(x, p, s, f, err_msg=name) assert_(c >= 9) # -- More specific tests def test_armijo_terminate_1(self): # Armijo should evaluate the function only once if the trial step # is already suitable count = [0] def phi(s): count[0] += 1 return -s + 0.01*s**2 s, phi1 = ls.scalar_search_armijo(phi, phi(0), -1, alpha0=1) assert_equal(s, 1) assert_equal(count[0], 2) assert_armijo(s, phi) def test_wolfe_terminate(self): # wolfe1 and wolfe2 should also evaluate the function only a few # times if the trial step is already suitable def phi(s): count[0] += 1 return -s + 0.05*s**2 def derphi(s): count[0] += 1 return -1 + 0.05*2*s for func in [ls.scalar_search_wolfe1, ls.scalar_search_wolfe2]: count = [0] r = func(phi, derphi, phi(0), None, derphi(0)) assert_(r[0] is not None, (r, func)) assert_(count[0] <= 2 + 2, (count, func)) assert_wolfe(r[0], phi, derphi, err_msg=str(func))
35.348039
123
0.533491
from numpy.testing import (assert_, assert_equal, assert_array_almost_equal, assert_array_almost_equal_nulp, assert_warns, suppress_warnings) import scipy.optimize.linesearch as ls import scipy.optimize.nonlin as nl from scipy.linalg import norm from scipy.optimize.linesearch import LineSearchWarning import numpy as np from copy import deepcopy def assert_wolfe(s, phi, derphi, c1=1e-4, c2=0.9, err_msg=""): phi1 = phi(s) phi0 = phi(0) derphi0 = derphi(0) derphi1 = derphi(s) msg = "s = %s; phi(0) = %s; phi(s) = %s; phi'(0) = %s; phi'(s) = %s; %s" % ( s, phi0, phi1, derphi0, derphi1, err_msg) assert_(phi1 <= phi0 + c1*s*derphi0, "Wolfe 1 failed: " + msg) assert_(abs(derphi1) <= abs(c2*derphi0), "Wolfe 2 failed: " + msg) def assert_armijo(s, phi, c1=1e-4, err_msg=""): phi1 = phi(s) phi0 = phi(0) msg = "s = %s; phi(0) = %s; phi(s) = %s; %s" % (s, phi0, phi1, err_msg) assert_(phi1 <= (1 - c1*s)*phi0, msg) dx, F0, Fx_new, jacobian, param, c1=1e-4, err_msg=""): parameters = ls.prepare_parameters('rmt',param,jacobian,dx) rmt_eta_upper = parameters['rmt_eta_upper'] rmt_eta_lower = parameters['rmt_eta_lower'] amin = parameters['amin'] dxbar = jacobian.solve( Fx_new ) dx_diff = dxbar + (1 - alpha) * dx nominator = 2 * norm(dx_diff) denominator = alpha * norm(dx) t_dx_omega = nominator / denominator tester = (rmt_eta_lower <= t_dx_omega and t_dx_omega <= rmt_eta_upper) or (rmt_eta_lower > t_dx_omega and alpha == 1.0) msg = "s = %s; phi(0) = %s; phi(s) = %s; %s" % (alpha, F0, Fx_new, err_msg) assert_(tester or (alpha<amin), msg) def assert_bsc(alpha, x, dx, func, old_jacobian, param, err_msg): parameters = ls.prepare_parameters('bsc',param, old_jacobian, dx) H_lower = parameters['H_lower'] H_upper = parameters['H_upper'] amin = parameters['amin'] x_new = x + alpha * dx Fx_new = func(x_new) jacobian = deepcopy(old_jacobian) jacobian.update( x_new.copy(), Fx_new ) dx_next_it = -jacobian.solve( Fx_new ) dx_diff = dx_next_it - dx H_prime = alpha * norm(dx_diff) tester = (H_lower <= H_prime and H_prime <= H_upper) or (H_lower > H_prime and alpha >= 1.0) msg = "s = %s; phi(0) = %s; phi(s) = %s; %s" % (alpha, func(x), Fx_new, err_msg) assert_(tester or (alpha<amin), msg) (x, p, s, f, fprime, **kw): assert_wolfe(s, phi=lambda sp: f(x + p*sp), derphi=lambda sp: np.dot(fprime(x + p*sp), p), **kw) def assert_line_armijo(x, p, s, f, **kw): assert_armijo(s, phi=lambda sp: f(x + p*sp), **kw) def assert_fp_equal(x, y, err_msg="", nulp=50): try: assert_array_almost_equal_nulp(x, y, nulp) except AssertionError as e: raise AssertionError("%s\n%s" % (e, err_msg)) from e class TestLineSearch(object): def _scalar_func_1(self, s): self.fcount += 1 p = -s - s**3 + s**4 dp = -1 - 3*s**2 + 4*s**3 return p, dp def _scalar_func_2(self, s): self.fcount += 1 p = np.exp(-4*s) + s**2 dp = -4*np.exp(-4*s) + 2*s return p, dp def _scalar_func_3(self, s): self.fcount += 1 p = -np.sin(10*s) dp = -10*np.cos(10*s) return p, dp def _line_func_1(self, x): self.fcount += 1 f = np.dot(x, x) df = 2*x return f, df def _line_func_2(self, x): self.fcount += 1 f = np.dot(x, np.dot(self.A, x)) + 1 df = np.dot(self.A + self.A.T, x) return f, df def setup_method(self): self.scalar_funcs = [] self.line_funcs = [] self.N = 20 self.fcount = 0 def bind_index(func, idx): return lambda *a, **kw: func(*a, **kw)[idx] for name in sorted(dir(self)): if name.startswith('_scalar_func_'): value = getattr(self, name) self.scalar_funcs.append( (name, bind_index(value, 0), bind_index(value, 1))) elif name.startswith('_line_func_'): value = getattr(self, name) self.line_funcs.append( (name, bind_index(value, 0), bind_index(value, 1))) np.random.seed(1234) self.A = np.random.randn(self.N, self.N) def scalar_iter(self): for name, phi, derphi in self.scalar_funcs: for old_phi0 in np.random.randn(3): yield name, phi, derphi, old_phi0 def line_iter(self): for name, f, fprime in self.line_funcs: k = 0 while k < 9: x = np.random.randn(self.N) p = np.random.randn(self.N) if np.dot(p, fprime(x)) >= 0: # always pick a descent direction continue k += 1 old_fv = float(np.random.randn()) yield name, f, fprime, x, p, old_fv # -- Generic scalar searches def test_scalar_search_wolfe1(self): c = 0 for name, phi, derphi, old_phi0 in self.scalar_iter(): c += 1 s, phi1, phi0 = ls.scalar_search_wolfe1(phi, derphi, phi(0), old_phi0, derphi(0)) assert_fp_equal(phi0, phi(0), name) assert_fp_equal(phi1, phi(s), name) assert_wolfe(s, phi, derphi, err_msg=name) assert_(c > 3) # check that the iterator really works... def test_scalar_search_wolfe2(self): for name, phi, derphi, old_phi0 in self.scalar_iter(): s, phi1, phi0, derphi1 = ls.scalar_search_wolfe2( phi, derphi, phi(0), old_phi0, derphi(0)) assert_fp_equal(phi0, phi(0), name) assert_fp_equal(phi1, phi(s), name) if derphi1 is not None: assert_fp_equal(derphi1, derphi(s), name) assert_wolfe(s, phi, derphi, err_msg="%s %g" % (name, old_phi0)) def test_scalar_search_wolfe2_with_low_amax(self): def phi(alpha): return (alpha - 5) ** 2 def derphi(alpha): return 2 * (alpha - 5) s, _, _, _ = assert_warns(LineSearchWarning, ls.scalar_search_wolfe2, phi, derphi, amax=0.001) assert_(s is None) def test_scalar_search_wolfe2_regression(self): # Regression test for gh-12157 # This phi has its minimum at alpha=4/3 ~ 1.333. def phi(alpha): if alpha < 1: return - 3*np.pi/2 * (alpha - 1) else: return np.cos(3*np.pi/2 * alpha - np.pi) def derphi(alpha): if alpha < 1: return - 3*np.pi/2 else: return - 3*np.pi/2 * np.sin(3*np.pi/2 * alpha - np.pi) s, _, _, _ = ls.scalar_search_wolfe2(phi, derphi) # Without the fix in gh-13073, the scalar_search_wolfe2 # returned s=2.0 instead. assert_(s < 1.5) def test_scalar_search_armijo(self): for name, phi, derphi, old_phi0 in self.scalar_iter(): s, phi1 = ls.scalar_search_armijo(phi, phi(0), derphi(0)) assert_fp_equal(phi1, phi(s), name) assert_armijo(s, phi, err_msg="%s %g" % (name, old_phi0)) ###(LS)### ##RMT not usefull for scalar functions, thus no need for test_scalar_search_rmt? def test_line_search_rmt(self): #There is at least 1 function R^20->R to be tested, but this leads to s=None for name, f, fprime, x, p, old_f in self.line_iter(): jac = lambda x: fprime(x) x0 = nl._as_inexact(x) func = lambda z: nl._as_inexact(f(nl._array_like(z, x0))).flatten() x = x0.flatten() jacobian = nl.asjacobian(jac) jacobian.setup(x.copy(), f(x), func) options = {'jacobian': jacobian, 'jac_tol': min(1e-03,1e-03*norm(f(x))), 'amin':1e-8} #print("1: ",f(x),np.shape(fprime(x))) s, dxbar, f_new = ls.scalar_search_rmt(f, x, fprime(x), parameters=options) #print("2: ",p_new, s) assert_fp_equal(f_new, x+s*fprime(x), name) assert_rmt(s, fprime(x), f(x), f_new, jacobian, options, err_msg="%s %g" % name) def test_line_search_bsc(self): #There is at least 1 function R^20->R to be tested, but this leads to s=None for name, f, fprime, x, p, old_f in self.line_iter(): jac = lambda x: fprime(x) x0 = nl._as_inexact(x) func = lambda z: nl._as_inexact(f(nl._array_like(z, x0))).flatten() x = x0.flatten() jacobian = nl.asjacobian(jac) jacobian.setup(x.copy(), f(x), func) options = {'jacobian': jacobian, 'jac_tol': min(1e-03,1e-03*norm(f(x))), 'amin':1e-8} #print("1: ",f(x),np.shape(dp(x))) s, f_new= ls.scalar_search_bsc(func, x, fprime(x), f(x), parameters=options) #print("2: ",p_new, s) assert_fp_equal(f_new, x+s*fprime(x), name) assert_bsc(s, x, fprime(x), func, jacobian, options, err_msg="%s %g" % name) ###(LS)### # -- Generic line searches def test_line_search_wolfe1(self): c = 0 smax = 100 for name, f, fprime, x, p, old_f in self.line_iter(): f0 = f(x) g0 = fprime(x) self.fcount = 0 s, fc, gc, fv, ofv, gv = ls.line_search_wolfe1(f, fprime, x, p, g0, f0, old_f, amax=smax) assert_equal(self.fcount, fc+gc) assert_fp_equal(ofv, f(x)) if s is None: continue assert_fp_equal(fv, f(x + s*p)) assert_array_almost_equal(gv, fprime(x + s*p), decimal=14) if s < smax: c += 1 assert_line_wolfe(x, p, s, f, fprime, err_msg=name) assert_(c > 3) # check that the iterator really works... def test_line_search_wolfe2(self): c = 0 smax = 512 for name, f, fprime, x, p, old_f in self.line_iter(): f0 = f(x) g0 = fprime(x) self.fcount = 0 with suppress_warnings() as sup: sup.filter(LineSearchWarning, "The line search algorithm could not find a solution") sup.filter(LineSearchWarning, "The line search algorithm did not converge") s, fc, gc, fv, ofv, gv = ls.line_search_wolfe2(f, fprime, x, p, g0, f0, old_f, amax=smax) assert_equal(self.fcount, fc+gc) assert_fp_equal(ofv, f(x)) assert_fp_equal(fv, f(x + s*p)) if gv is not None: assert_array_almost_equal(gv, fprime(x + s*p), decimal=14) if s < smax: c += 1 assert_line_wolfe(x, p, s, f, fprime, err_msg=name) assert_(c > 3) # check that the iterator really works... def test_line_search_wolfe2_bounds(self): # See gh-7475 # For this f and p, starting at a point on axis 0, the strong Wolfe # condition 2 is met if and only if the step length s satisfies # |x + s| <= c2 * |x| f = lambda x: np.dot(x, x) fp = lambda x: 2 * x p = np.array([1, 0]) # Smallest s satisfying strong Wolfe conditions for these arguments is 30 x = -60 * p c2 = 0.5 s, _, _, _, _, _ = ls.line_search_wolfe2(f, fp, x, p, amax=30, c2=c2) assert_line_wolfe(x, p, s, f, fp) s, _, _, _, _, _ = assert_warns(LineSearchWarning, ls.line_search_wolfe2, f, fp, x, p, amax=29, c2=c2) assert_(s is None) # s=30 will only be tried on the 6th iteration, so this won't converge assert_warns(LineSearchWarning, ls.line_search_wolfe2, f, fp, x, p, c2=c2, maxiter=5) def test_line_search_armijo(self): c = 0 for name, f, fprime, x, p, old_f in self.line_iter(): f0 = f(x) g0 = fprime(x) self.fcount = 0 s, fc, fv = ls.line_search_armijo(f, x, p, g0, f0) c += 1 assert_equal(self.fcount, fc) assert_fp_equal(fv, f(x + s*p)) assert_line_armijo(x, p, s, f, err_msg=name) assert_(c >= 9) def test_armijo_terminate_1(self): count = [0] def phi(s): count[0] += 1 return -s + 0.01*s**2 s, phi1 = ls.scalar_search_armijo(phi, phi(0), -1, alpha0=1) assert_equal(s, 1) assert_equal(count[0], 2) assert_armijo(s, phi) def test_wolfe_terminate(self): def phi(s): count[0] += 1 return -s + 0.05*s**2 def derphi(s): count[0] += 1 return -1 + 0.05*2*s for func in [ls.scalar_search_wolfe1, ls.scalar_search_wolfe2]: count = [0] r = func(phi, derphi, phi(0), None, derphi(0)) assert_(r[0] is not None, (r, func)) assert_(count[0] <= 2 + 2, (count, func)) assert_wolfe(r[0], phi, derphi, err_msg=str(func))
true
true
f723ece8e845f677ad57a09ea90a361f54d50c23
12,408
bzl
Python
apple/internal/apple_toolchains.bzl
wendyliga/rules_apple
ac43c1e467564d9df6b3355ff93fcaf224f2c0f9
[ "Apache-2.0" ]
null
null
null
apple/internal/apple_toolchains.bzl
wendyliga/rules_apple
ac43c1e467564d9df6b3355ff93fcaf224f2c0f9
[ "Apache-2.0" ]
null
null
null
apple/internal/apple_toolchains.bzl
wendyliga/rules_apple
ac43c1e467564d9df6b3355ff93fcaf224f2c0f9
[ "Apache-2.0" ]
null
null
null
# Copyright 2021 The Bazel Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Shared toolchain required for processing Apple bundling rules.""" AppleMacToolsToolchainInfo = provider( doc = """ Propagates information about an Apple toolchain to internal bundling rules that use the toolchain. This provider exists as an internal detail for the rules to reference common, executable tools and files used as script templates for the purposes of executing Apple actions. Defined by the `apple_mac_tools_toolchain` rule. This toolchain is for the tools (and support files) for actions that *must* run on a Mac. """, fields = { "dsym_info_plist_template": """\ A `File` referencing a plist template for dSYM bundles. """, "process_and_sign_template": """\ A `File` referencing a template for a shell script to process and sign. """, "resolved_alticonstool": """\ A `struct` from `ctx.resolve_tools` referencing a tool to insert alternate icons entries in the app bundle's `Info.plist`. """, "resolved_bundletool_experimental": """\ A `struct` from `ctx.resolve_tools` referencing an experimental tool to create an Apple bundle by combining the bundling, post-processing, and signing steps into a single action that eliminates the archiving step. """, "resolved_clangrttool": """\ A `struct` from `ctx.resolve_tools` referencing a tool to find all Clang runtime libs linked to a binary. """, "resolved_codesigningtool": """\ A `struct` from `ctx.resolve_tools` referencing a tool to select the appropriate signing identity for Apple apps and Apple executable bundles. """, "resolved_dossier_codesigningtool": """\ A `struct` from `ctx.resolve_tools` referencing a tool to generate codesigning dossiers. """, "resolved_environment_plist_tool": """\ A `struct` from `ctx.resolve_tools` referencing a tool for collecting dev environment values. """, "resolved_imported_dynamic_framework_processor": """\ A `struct` from `ctx.resolve_tools` referencing a tool to process an imported dynamic framework such that the given framework only contains the same slices as the app binary, every file belonging to the dynamic framework is copied to a temporary location, and the dynamic framework is codesigned and zipped as a cacheable artifact. """, "resolved_plisttool": """\ A `struct` from `ctx.resolve_tools` referencing a tool to perform plist operations such as variable substitution, merging, and conversion of plist files to binary format. """, "resolved_provisioning_profile_tool": """\ A `struct` from `ctx.resolve_tools` referencing a tool that extracts entitlements from a provisioning profile. """, "resolved_swift_stdlib_tool": """\ A `struct` from `ctx.resolve_tools` referencing a tool that copies and lipos Swift stdlibs required for the target to run. """, "resolved_xctoolrunner": """\ A `struct` from `ctx.resolve_tools` referencing a tool that acts as a wrapper for xcrun actions. """, }, ) AppleXPlatToolsToolchainInfo = provider( doc = """ Propagates information about an Apple toolchain to internal bundling rules that use the toolchain. This provider exists as an internal detail for the rules to reference common, executable tools and files used as script templates for the purposes of executing Apple actions. Defined by the `apple_xplat_tools_toolchain` rule. This toolchain is for the tools (and support files) for actions that can run on any platform, i.e. - they do *not* have to run on a Mac. """, fields = { "resolved_bundletool": """\ A `struct` from `ctx.resolve_tools` referencing a tool to create an Apple bundle by taking a list of files/ZIPs and destinations paths to build the directory structure for those files. """, "resolved_versiontool": """\ A `struct` from `ctx.resolve_tools` referencing a tool that acts as a wrapper for xcrun actions. """, }, ) def _shared_attrs(): """Private attributes on every rule to provide access to bundling tools and other file deps.""" return { "_mac_toolchain": attr.label( default = Label("@build_bazel_rules_apple//apple/internal:mac_tools_toolchain"), providers = [[AppleMacToolsToolchainInfo]], ), "_xplat_toolchain": attr.label( default = Label("@build_bazel_rules_apple//apple/internal:xplat_tools_toolchain"), providers = [[AppleXPlatToolsToolchainInfo]], ), } def _resolve_tools_for_executable(*, rule_ctx, attr_name): """Helper macro to resolve executable runfile dependencies across the rule boundary.""" # TODO(b/111036105) Migrate away from this helper and its outputs once ctx.executable works # across rule boundaries. executable = getattr(rule_ctx.executable, attr_name) target = getattr(rule_ctx.attr, attr_name) inputs, input_manifests = rule_ctx.resolve_tools(tools = [target]) return struct( executable = executable, inputs = inputs, input_manifests = input_manifests, ) def _apple_mac_tools_toolchain_impl(ctx): return [ AppleMacToolsToolchainInfo( dsym_info_plist_template = ctx.file.dsym_info_plist_template, process_and_sign_template = ctx.file.process_and_sign_template, resolved_alticonstool = _resolve_tools_for_executable( attr_name = "alticonstool", rule_ctx = ctx, ), resolved_bundletool_experimental = _resolve_tools_for_executable( attr_name = "bundletool_experimental", rule_ctx = ctx, ), resolved_codesigningtool = _resolve_tools_for_executable( attr_name = "codesigningtool", rule_ctx = ctx, ), resolved_dossier_codesigningtool = _resolve_tools_for_executable( attr_name = "dossier_codesigningtool", rule_ctx = ctx, ), resolved_clangrttool = _resolve_tools_for_executable( attr_name = "clangrttool", rule_ctx = ctx, ), resolved_environment_plist_tool = _resolve_tools_for_executable( attr_name = "environment_plist_tool", rule_ctx = ctx, ), resolved_imported_dynamic_framework_processor = _resolve_tools_for_executable( attr_name = "imported_dynamic_framework_processor", rule_ctx = ctx, ), resolved_plisttool = _resolve_tools_for_executable( attr_name = "plisttool", rule_ctx = ctx, ), resolved_provisioning_profile_tool = _resolve_tools_for_executable( attr_name = "provisioning_profile_tool", rule_ctx = ctx, ), resolved_swift_stdlib_tool = _resolve_tools_for_executable( attr_name = "swift_stdlib_tool", rule_ctx = ctx, ), resolved_xctoolrunner = _resolve_tools_for_executable( attr_name = "xctoolrunner", rule_ctx = ctx, ), ), DefaultInfo(), ] apple_mac_tools_toolchain = rule( attrs = { "alticonstool": attr.label( cfg = "exec", executable = True, doc = """ A `File` referencing a tool to insert alternate icons entries in the app bundle's `Info.plist`. """, ), "bundletool_experimental": attr.label( cfg = "exec", executable = True, doc = """ A `File` referencing an experimental tool to create an Apple bundle by combining the bundling, post-processing, and signing steps into a single action that eliminates the archiving step. """, ), "clangrttool": attr.label( cfg = "exec", executable = True, doc = "A `File` referencing a tool to find all Clang runtime libs linked to a binary.", ), "codesigningtool": attr.label( cfg = "exec", executable = True, doc = "A `File` referencing a tool to assist in signing bundles.", ), "dossier_codesigningtool": attr.label( cfg = "exec", executable = True, doc = "A `File` referencing a tool to assist in generating signing dossiers.", ), "dsym_info_plist_template": attr.label( cfg = "exec", allow_single_file = True, doc = "A `File` referencing a plist template for dSYM bundles.", ), "environment_plist_tool": attr.label( cfg = "exec", executable = True, doc = """ A `File` referencing a tool to collect data from the development environment to be record into final bundles. """, ), "imported_dynamic_framework_processor": attr.label( cfg = "exec", executable = True, doc = """ A `File` referencing a tool to process an imported dynamic framework such that the given framework only contains the same slices as the app binary, every file belonging to the dynamic framework is copied to a temporary location, and the dynamic framework is codesigned and zipped as a cacheable artifact. """, ), "plisttool": attr.label( cfg = "exec", executable = True, doc = """ A `File` referencing a tool to perform plist operations such as variable substitution, merging, and conversion of plist files to binary format. """, ), "process_and_sign_template": attr.label( allow_single_file = True, doc = "A `File` referencing a template for a shell script to process and sign.", ), "provisioning_profile_tool": attr.label( cfg = "exec", executable = True, doc = """ A `File` referencing a tool that extracts entitlements from a provisioning profile. """, ), "swift_stdlib_tool": attr.label( cfg = "exec", executable = True, doc = """ A `File` referencing a tool that copies and lipos Swift stdlibs required for the target to run. """, ), "xctoolrunner": attr.label( cfg = "exec", executable = True, doc = "A `File` referencing a tool that acts as a wrapper for xcrun actions.", ), }, doc = """Represents an Apple support toolchain for tools that must run on a Mac""", implementation = _apple_mac_tools_toolchain_impl, ) def _apple_xplat_tools_toolchain_impl(ctx): return [ AppleXPlatToolsToolchainInfo( resolved_bundletool = _resolve_tools_for_executable( attr_name = "bundletool", rule_ctx = ctx, ), resolved_versiontool = _resolve_tools_for_executable( attr_name = "versiontool", rule_ctx = ctx, ), ), DefaultInfo(), ] apple_xplat_tools_toolchain = rule( attrs = { "bundletool": attr.label( cfg = "exec", executable = True, doc = """ A `File` referencing a tool to create an Apple bundle by taking a list of files/ZIPs and destination paths to build the directory structure for those files. """, ), "versiontool": attr.label( cfg = "exec", executable = True, doc = """ A `File` referencing a tool for extracting version info from builds. """, ), }, doc = """Represents an Apple support toolchain for tools that can run on any platform""", implementation = _apple_xplat_tools_toolchain_impl, ) # Define the loadable module that lists the exported symbols in this file. apple_toolchain_utils = struct( shared_attrs = _shared_attrs, )
39.390476
100
0.651273
AppleMacToolsToolchainInfo = provider( doc = """ Propagates information about an Apple toolchain to internal bundling rules that use the toolchain. This provider exists as an internal detail for the rules to reference common, executable tools and files used as script templates for the purposes of executing Apple actions. Defined by the `apple_mac_tools_toolchain` rule. This toolchain is for the tools (and support files) for actions that *must* run on a Mac. """, fields = { "dsym_info_plist_template": """\ A `File` referencing a plist template for dSYM bundles. """, "process_and_sign_template": """\ A `File` referencing a template for a shell script to process and sign. """, "resolved_alticonstool": """\ A `struct` from `ctx.resolve_tools` referencing a tool to insert alternate icons entries in the app bundle's `Info.plist`. """, "resolved_bundletool_experimental": """\ A `struct` from `ctx.resolve_tools` referencing an experimental tool to create an Apple bundle by combining the bundling, post-processing, and signing steps into a single action that eliminates the archiving step. """, "resolved_clangrttool": """\ A `struct` from `ctx.resolve_tools` referencing a tool to find all Clang runtime libs linked to a binary. """, "resolved_codesigningtool": """\ A `struct` from `ctx.resolve_tools` referencing a tool to select the appropriate signing identity for Apple apps and Apple executable bundles. """, "resolved_dossier_codesigningtool": """\ A `struct` from `ctx.resolve_tools` referencing a tool to generate codesigning dossiers. """, "resolved_environment_plist_tool": """\ A `struct` from `ctx.resolve_tools` referencing a tool for collecting dev environment values. """, "resolved_imported_dynamic_framework_processor": """\ A `struct` from `ctx.resolve_tools` referencing a tool to process an imported dynamic framework such that the given framework only contains the same slices as the app binary, every file belonging to the dynamic framework is copied to a temporary location, and the dynamic framework is codesigned and zipped as a cacheable artifact. """, "resolved_plisttool": """\ A `struct` from `ctx.resolve_tools` referencing a tool to perform plist operations such as variable substitution, merging, and conversion of plist files to binary format. """, "resolved_provisioning_profile_tool": """\ A `struct` from `ctx.resolve_tools` referencing a tool that extracts entitlements from a provisioning profile. """, "resolved_swift_stdlib_tool": """\ A `struct` from `ctx.resolve_tools` referencing a tool that copies and lipos Swift stdlibs required for the target to run. """, "resolved_xctoolrunner": """\ A `struct` from `ctx.resolve_tools` referencing a tool that acts as a wrapper for xcrun actions. """, }, ) AppleXPlatToolsToolchainInfo = provider( doc = """ Propagates information about an Apple toolchain to internal bundling rules that use the toolchain. This provider exists as an internal detail for the rules to reference common, executable tools and files used as script templates for the purposes of executing Apple actions. Defined by the `apple_xplat_tools_toolchain` rule. This toolchain is for the tools (and support files) for actions that can run on any platform, i.e. - they do *not* have to run on a Mac. """, fields = { "resolved_bundletool": """\ A `struct` from `ctx.resolve_tools` referencing a tool to create an Apple bundle by taking a list of files/ZIPs and destinations paths to build the directory structure for those files. """, "resolved_versiontool": """\ A `struct` from `ctx.resolve_tools` referencing a tool that acts as a wrapper for xcrun actions. """, }, ) def _shared_attrs(): return { "_mac_toolchain": attr.label( default = Label("@build_bazel_rules_apple//apple/internal:mac_tools_toolchain"), providers = [[AppleMacToolsToolchainInfo]], ), "_xplat_toolchain": attr.label( default = Label("@build_bazel_rules_apple//apple/internal:xplat_tools_toolchain"), providers = [[AppleXPlatToolsToolchainInfo]], ), } def _resolve_tools_for_executable(*, rule_ctx, attr_name): # TODO(b/111036105) Migrate away from this helper and its outputs once ctx.executable works # across rule boundaries. executable = getattr(rule_ctx.executable, attr_name) target = getattr(rule_ctx.attr, attr_name) inputs, input_manifests = rule_ctx.resolve_tools(tools = [target]) return struct( executable = executable, inputs = inputs, input_manifests = input_manifests, ) def _apple_mac_tools_toolchain_impl(ctx): return [ AppleMacToolsToolchainInfo( dsym_info_plist_template = ctx.file.dsym_info_plist_template, process_and_sign_template = ctx.file.process_and_sign_template, resolved_alticonstool = _resolve_tools_for_executable( attr_name = "alticonstool", rule_ctx = ctx, ), resolved_bundletool_experimental = _resolve_tools_for_executable( attr_name = "bundletool_experimental", rule_ctx = ctx, ), resolved_codesigningtool = _resolve_tools_for_executable( attr_name = "codesigningtool", rule_ctx = ctx, ), resolved_dossier_codesigningtool = _resolve_tools_for_executable( attr_name = "dossier_codesigningtool", rule_ctx = ctx, ), resolved_clangrttool = _resolve_tools_for_executable( attr_name = "clangrttool", rule_ctx = ctx, ), resolved_environment_plist_tool = _resolve_tools_for_executable( attr_name = "environment_plist_tool", rule_ctx = ctx, ), resolved_imported_dynamic_framework_processor = _resolve_tools_for_executable( attr_name = "imported_dynamic_framework_processor", rule_ctx = ctx, ), resolved_plisttool = _resolve_tools_for_executable( attr_name = "plisttool", rule_ctx = ctx, ), resolved_provisioning_profile_tool = _resolve_tools_for_executable( attr_name = "provisioning_profile_tool", rule_ctx = ctx, ), resolved_swift_stdlib_tool = _resolve_tools_for_executable( attr_name = "swift_stdlib_tool", rule_ctx = ctx, ), resolved_xctoolrunner = _resolve_tools_for_executable( attr_name = "xctoolrunner", rule_ctx = ctx, ), ), DefaultInfo(), ] apple_mac_tools_toolchain = rule( attrs = { "alticonstool": attr.label( cfg = "exec", executable = True, doc = """ A `File` referencing a tool to insert alternate icons entries in the app bundle's `Info.plist`. """, ), "bundletool_experimental": attr.label( cfg = "exec", executable = True, doc = """ A `File` referencing an experimental tool to create an Apple bundle by combining the bundling, post-processing, and signing steps into a single action that eliminates the archiving step. """, ), "clangrttool": attr.label( cfg = "exec", executable = True, doc = "A `File` referencing a tool to find all Clang runtime libs linked to a binary.", ), "codesigningtool": attr.label( cfg = "exec", executable = True, doc = "A `File` referencing a tool to assist in signing bundles.", ), "dossier_codesigningtool": attr.label( cfg = "exec", executable = True, doc = "A `File` referencing a tool to assist in generating signing dossiers.", ), "dsym_info_plist_template": attr.label( cfg = "exec", allow_single_file = True, doc = "A `File` referencing a plist template for dSYM bundles.", ), "environment_plist_tool": attr.label( cfg = "exec", executable = True, doc = """ A `File` referencing a tool to collect data from the development environment to be record into final bundles. """, ), "imported_dynamic_framework_processor": attr.label( cfg = "exec", executable = True, doc = """ A `File` referencing a tool to process an imported dynamic framework such that the given framework only contains the same slices as the app binary, every file belonging to the dynamic framework is copied to a temporary location, and the dynamic framework is codesigned and zipped as a cacheable artifact. """, ), "plisttool": attr.label( cfg = "exec", executable = True, doc = """ A `File` referencing a tool to perform plist operations such as variable substitution, merging, and conversion of plist files to binary format. """, ), "process_and_sign_template": attr.label( allow_single_file = True, doc = "A `File` referencing a template for a shell script to process and sign.", ), "provisioning_profile_tool": attr.label( cfg = "exec", executable = True, doc = """ A `File` referencing a tool that extracts entitlements from a provisioning profile. """, ), "swift_stdlib_tool": attr.label( cfg = "exec", executable = True, doc = """ A `File` referencing a tool that copies and lipos Swift stdlibs required for the target to run. """, ), "xctoolrunner": attr.label( cfg = "exec", executable = True, doc = "A `File` referencing a tool that acts as a wrapper for xcrun actions.", ), }, doc = """Represents an Apple support toolchain for tools that must run on a Mac""", implementation = _apple_mac_tools_toolchain_impl, ) def _apple_xplat_tools_toolchain_impl(ctx): return [ AppleXPlatToolsToolchainInfo( resolved_bundletool = _resolve_tools_for_executable( attr_name = "bundletool", rule_ctx = ctx, ), resolved_versiontool = _resolve_tools_for_executable( attr_name = "versiontool", rule_ctx = ctx, ), ), DefaultInfo(), ] apple_xplat_tools_toolchain = rule( attrs = { "bundletool": attr.label( cfg = "exec", executable = True, doc = """ A `File` referencing a tool to create an Apple bundle by taking a list of files/ZIPs and destination paths to build the directory structure for those files. """, ), "versiontool": attr.label( cfg = "exec", executable = True, doc = """ A `File` referencing a tool for extracting version info from builds. """, ), }, doc = """Represents an Apple support toolchain for tools that can run on any platform""", implementation = _apple_xplat_tools_toolchain_impl, ) apple_toolchain_utils = struct( shared_attrs = _shared_attrs, )
true
true
f723ee4aca2ba51e913883657260206b3974214b
597
py
Python
psaw/decorators.py
LeartS/PSAW
fd0faac7205e10cc6fcb3654de8e2b23a0d79bf2
[ "MIT" ]
null
null
null
psaw/decorators.py
LeartS/PSAW
fd0faac7205e10cc6fcb3654de8e2b23a0d79bf2
[ "MIT" ]
null
null
null
psaw/decorators.py
LeartS/PSAW
fd0faac7205e10cc6fcb3654de8e2b23a0d79bf2
[ "MIT" ]
null
null
null
from .exceptions import PSAWException def requires_private_key(method): def wrapper(self, *args, **kwargs): if not self.private_key: raise PSAWException( 'The {} method requires a private key'.format(method.__name__)) return method(self, *args, **kwargs) return wrapper def requires_api_key(method): def wrapper(self, *args, **kwargs): if not self.api_key: raise PSAWException( 'The {} method requires an API key'.format(method.__name__)) return method(self, *args, **kwargs) return wrapper
33.166667
79
0.633166
from .exceptions import PSAWException def requires_private_key(method): def wrapper(self, *args, **kwargs): if not self.private_key: raise PSAWException( 'The {} method requires a private key'.format(method.__name__)) return method(self, *args, **kwargs) return wrapper def requires_api_key(method): def wrapper(self, *args, **kwargs): if not self.api_key: raise PSAWException( 'The {} method requires an API key'.format(method.__name__)) return method(self, *args, **kwargs) return wrapper
true
true
f723ee57e5b3ea5abd16c6bfccb377f6f8af7698
532
py
Python
stats/attendance.py
lxchen2019/Python-Baseball
0498830e92c67de8221aac1777651ae141df0ec6
[ "MIT" ]
null
null
null
stats/attendance.py
lxchen2019/Python-Baseball
0498830e92c67de8221aac1777651ae141df0ec6
[ "MIT" ]
null
null
null
stats/attendance.py
lxchen2019/Python-Baseball
0498830e92c67de8221aac1777651ae141df0ec6
[ "MIT" ]
null
null
null
import pandas as pd import matplotlib.pyplot as plt from data import games attendance = games.loc[(games['type'] == 'info') & (games['multi2'] == 'attendance'), ['year', 'multi3']] attendance.columns = ['year', 'attendance'] attendance.loc[:, 'attendance'] = pd.to_numeric(attendance.loc[:, 'attendance']) attendance.plot(x='year', y='attendance', figsize = (15, 7), kind = 'bar') plt.xlabel('Year') plt.ylabel('Attendance') plt.axhline(y=attendance['attendance'].mean(), label='Mean', linestyle='--', color='green') plt.show()
33.25
105
0.680451
import pandas as pd import matplotlib.pyplot as plt from data import games attendance = games.loc[(games['type'] == 'info') & (games['multi2'] == 'attendance'), ['year', 'multi3']] attendance.columns = ['year', 'attendance'] attendance.loc[:, 'attendance'] = pd.to_numeric(attendance.loc[:, 'attendance']) attendance.plot(x='year', y='attendance', figsize = (15, 7), kind = 'bar') plt.xlabel('Year') plt.ylabel('Attendance') plt.axhline(y=attendance['attendance'].mean(), label='Mean', linestyle='--', color='green') plt.show()
true
true
f723ef0ead92ce0867c0219f60d16635c90e2cd6
2,755
py
Python
ch1/recipe4/load_save_model.py
xinglu/Tensorflow-2.0-Computer-Vision-Cookbook
d02c57d566f9df8b5980d58fc51a1194faef442c
[ "MIT" ]
1
2021-11-27T05:44:01.000Z
2021-11-27T05:44:01.000Z
ch1/recipe4/load_save_model.py
ArjunVarma39/Tensorflow-2.0-Computer-Vision-Cookbook
92ea6713f664cff9eccaaccea8ac756f808e2066
[ "MIT" ]
null
null
null
ch1/recipe4/load_save_model.py
ArjunVarma39/Tensorflow-2.0-Computer-Vision-Cookbook
92ea6713f664cff9eccaaccea8ac756f808e2066
[ "MIT" ]
1
2021-01-21T04:36:33.000Z
2021-01-21T04:36:33.000Z
import numpy as np from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelBinarizer from tensorflow.keras.datasets import mnist from tensorflow.keras.layers import * from tensorflow.keras.models import * def load_data(): (X_train, y_train), (X_test, y_test) = mnist.load_data() # Normalize data. X_train = X_train.astype('float32') / 255.0 X_test = X_test.astype('float32') / 255.0 # Reshape grayscale to include channel dimension. X_train = np.expand_dims(X_train, axis=3) X_test = np.expand_dims(X_test, axis=3) # Process labels. label_binarizer = LabelBinarizer() y_train = label_binarizer.fit_transform(y_train) y_test = label_binarizer.fit_transform(y_test) return X_train, y_train, X_test, y_test def build_network(): input_layer = Input(shape=(28, 28, 1), name='input_layer') convolution_1 = Conv2D(kernel_size=(2, 2), padding='same', strides=(2, 2), filters=32, name='convolution_1')(input_layer) activation_1 = ReLU(name='activation_1')(convolution_1) batch_normalization_1 = BatchNormalization(name='batch_normalization_1')(activation_1) pooling_1 = MaxPooling2D(pool_size=(2, 2), strides=(1, 1), padding='same', name='pooling_1')(batch_normalization_1) dropout = Dropout(rate=0.5, name='dropout')(pooling_1) flatten = Flatten(name='flatten')(dropout) dense_1 = Dense(units=128, name='dense_1')(flatten) activation_2 = ReLU(name='activation_2')(dense_1) dense_2 = Dense(units=10, name='dense_2')(activation_2) output = Softmax(name='output')(dense_2) network = Model(inputs=input_layer, outputs=output, name='my_model') return network def evaluate(model, X_test, y_test): _, accuracy = model.evaluate(X_test, y_test, verbose=0) print(f'Accuracy: {accuracy}') print('Loading and pre-processing data.') X_train, y_train, X_test, y_test = load_data() # Split dataset. X_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, train_size=0.8) # Build network. model = build_network() # Compile and train model. print('Training network...') model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) model.fit(X_train, y_train, validation_data=(X_valid, y_valid), epochs=40, batch_size=1024) print('Saving model and weights as HDF5.') model.save('model_and_weights.hdf5') print('Loading model and weights as HDF5.') loaded_model = load_model('model_and_weights.hdf5') print('Evaluating using loaded model.') evaluate(loaded_model, X_test, y_test)
34.012346
91
0.684211
import numpy as np from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelBinarizer from tensorflow.keras.datasets import mnist from tensorflow.keras.layers import * from tensorflow.keras.models import * def load_data(): (X_train, y_train), (X_test, y_test) = mnist.load_data() X_train = X_train.astype('float32') / 255.0 X_test = X_test.astype('float32') / 255.0 X_train = np.expand_dims(X_train, axis=3) X_test = np.expand_dims(X_test, axis=3) label_binarizer = LabelBinarizer() y_train = label_binarizer.fit_transform(y_train) y_test = label_binarizer.fit_transform(y_test) return X_train, y_train, X_test, y_test def build_network(): input_layer = Input(shape=(28, 28, 1), name='input_layer') convolution_1 = Conv2D(kernel_size=(2, 2), padding='same', strides=(2, 2), filters=32, name='convolution_1')(input_layer) activation_1 = ReLU(name='activation_1')(convolution_1) batch_normalization_1 = BatchNormalization(name='batch_normalization_1')(activation_1) pooling_1 = MaxPooling2D(pool_size=(2, 2), strides=(1, 1), padding='same', name='pooling_1')(batch_normalization_1) dropout = Dropout(rate=0.5, name='dropout')(pooling_1) flatten = Flatten(name='flatten')(dropout) dense_1 = Dense(units=128, name='dense_1')(flatten) activation_2 = ReLU(name='activation_2')(dense_1) dense_2 = Dense(units=10, name='dense_2')(activation_2) output = Softmax(name='output')(dense_2) network = Model(inputs=input_layer, outputs=output, name='my_model') return network def evaluate(model, X_test, y_test): _, accuracy = model.evaluate(X_test, y_test, verbose=0) print(f'Accuracy: {accuracy}') print('Loading and pre-processing data.') X_train, y_train, X_test, y_test = load_data() X_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, train_size=0.8) model = build_network() print('Training network...') model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) model.fit(X_train, y_train, validation_data=(X_valid, y_valid), epochs=40, batch_size=1024) print('Saving model and weights as HDF5.') model.save('model_and_weights.hdf5') print('Loading model and weights as HDF5.') loaded_model = load_model('model_and_weights.hdf5') print('Evaluating using loaded model.') evaluate(loaded_model, X_test, y_test)
true
true
f723f0b24682ef7fb196a22b5764bea459ee3007
1,103
py
Python
String-Algorithms/String-Algorithms-master/One Edit Away/oneEditAway.py
SrijaniSom/dsa-code-store
148292c8f963214629f271ec8601e73d3d0e145e
[ "MIT" ]
3
2021-02-19T07:09:46.000Z
2021-10-04T10:12:45.000Z
String-Algorithms/String-Algorithms-master/One Edit Away/oneEditAway.py
SrijaniSom/dsa-code-store
148292c8f963214629f271ec8601e73d3d0e145e
[ "MIT" ]
6
2021-02-21T19:35:18.000Z
2021-05-06T11:51:37.000Z
String-Algorithms/String-Algorithms-master/One Edit Away/oneEditAway.py
SrijaniSom/dsa-code-store
148292c8f963214629f271ec8601e73d3d0e145e
[ "MIT" ]
6
2021-02-21T19:28:03.000Z
2021-10-04T03:35:57.000Z
class solution: def oneEditAwayInsert(self,input1,input2): index1 = 0 index2 = 0 while((index2 < len(input2)) and (index1 < len(input1))): if(input1[index1] != input2[index2]): if(index1 != index2): return False index2+=1 else: index1+=1 index2+=1 return True def oneEditAwayReplace(self,input1,input2): flag = False for i in range(len(input1)): if(input2[i]!=input1[i]): if(flag): return False flag = True return True def oneEditAway(self,input1,input2): if(len(input1)==len(input2)): return self.oneEditAwayReplace(input1,input2) elif(len(input1)+1==len(input2)): return self.oneEditAwayInsert(input1,input2) elif(len(input1)-1==len(input2)): return self.oneEditAwayInsert(input2,input1) return False input1 = input() input2 = input() sol = solution() print(sol.oneEditAway(input1,input2))
29.810811
65
0.537625
class solution: def oneEditAwayInsert(self,input1,input2): index1 = 0 index2 = 0 while((index2 < len(input2)) and (index1 < len(input1))): if(input1[index1] != input2[index2]): if(index1 != index2): return False index2+=1 else: index1+=1 index2+=1 return True def oneEditAwayReplace(self,input1,input2): flag = False for i in range(len(input1)): if(input2[i]!=input1[i]): if(flag): return False flag = True return True def oneEditAway(self,input1,input2): if(len(input1)==len(input2)): return self.oneEditAwayReplace(input1,input2) elif(len(input1)+1==len(input2)): return self.oneEditAwayInsert(input1,input2) elif(len(input1)-1==len(input2)): return self.oneEditAwayInsert(input2,input1) return False input1 = input() input2 = input() sol = solution() print(sol.oneEditAway(input1,input2))
true
true
f723f2da7cb69cc97d7d2508483485aef439d3c6
5,384
py
Python
1-50/p13.py
YiWeiShen/Project-Euler-Hints
a79cacab075dd98d393516f083aaa7ffc6115a06
[ "MIT" ]
1
2019-02-25T13:00:31.000Z
2019-02-25T13:00:31.000Z
1-50/p13.py
YiWeiShen/Project-Euler-Hints
a79cacab075dd98d393516f083aaa7ffc6115a06
[ "MIT" ]
null
null
null
1-50/p13.py
YiWeiShen/Project-Euler-Hints
a79cacab075dd98d393516f083aaa7ffc6115a06
[ "MIT" ]
null
null
null
l = [] a = '37107287533902102798797998220837590246510135740250\ 46376937677490009712648124896970078050417018260538\ 74324986199524741059474233309513058123726617309629\ 91942213363574161572522430563301811072406154908250\ 23067588207539346171171980310421047513778063246676\ 89261670696623633820136378418383684178734361726757\ 28112879812849979408065481931592621691275889832738\ 44274228917432520321923589422876796487670272189318\ 47451445736001306439091167216856844588711603153276\ 70386486105843025439939619828917593665686757934951\ 62176457141856560629502157223196586755079324193331\ 64906352462741904929101432445813822663347944758178\ 92575867718337217661963751590579239728245598838407\ 58203565325359399008402633568948830189458628227828\ 80181199384826282014278194139940567587151170094390\ 35398664372827112653829987240784473053190104293586\ 86515506006295864861532075273371959191420517255829\ 71693888707715466499115593487603532921714970056938\ 54370070576826684624621495650076471787294438377604\ 53282654108756828443191190634694037855217779295145\ 36123272525000296071075082563815656710885258350721\ 45876576172410976447339110607218265236877223636045\ 17423706905851860660448207621209813287860733969412\ 81142660418086830619328460811191061556940512689692\ 51934325451728388641918047049293215058642563049483\ 62467221648435076201727918039944693004732956340691\ 15732444386908125794514089057706229429197107928209\ 55037687525678773091862540744969844508330393682126\ 18336384825330154686196124348767681297534375946515\ 80386287592878490201521685554828717201219257766954\ 78182833757993103614740356856449095527097864797581\ 16726320100436897842553539920931837441497806860984\ 48403098129077791799088218795327364475675590848030\ 87086987551392711854517078544161852424320693150332\ 59959406895756536782107074926966537676326235447210\ 69793950679652694742597709739166693763042633987085\ 41052684708299085211399427365734116182760315001271\ 65378607361501080857009149939512557028198746004375\ 35829035317434717326932123578154982629742552737307\ 94953759765105305946966067683156574377167401875275\ 88902802571733229619176668713819931811048770190271\ 25267680276078003013678680992525463401061632866526\ 36270218540497705585629946580636237993140746255962\ 24074486908231174977792365466257246923322810917141\ 91430288197103288597806669760892938638285025333403\ 34413065578016127815921815005561868836468420090470\ 23053081172816430487623791969842487255036638784583\ 11487696932154902810424020138335124462181441773470\ 63783299490636259666498587618221225225512486764533\ 67720186971698544312419572409913959008952310058822\ 95548255300263520781532296796249481641953868218774\ 76085327132285723110424803456124867697064507995236\ 37774242535411291684276865538926205024910326572967\ 23701913275725675285653248258265463092207058596522\ 29798860272258331913126375147341994889534765745501\ 18495701454879288984856827726077713721403798879715\ 38298203783031473527721580348144513491373226651381\ 34829543829199918180278916522431027392251122869539\ 40957953066405232632538044100059654939159879593635\ 29746152185502371307642255121183693803580388584903\ 41698116222072977186158236678424689157993532961922\ 62467957194401269043877107275048102390895523597457\ 23189706772547915061505504953922979530901129967519\ 86188088225875314529584099251203829009407770775672\ 11306739708304724483816533873502340845647058077308\ 82959174767140363198008187129011875491310547126581\ 97623331044818386269515456334926366572897563400500\ 42846280183517070527831839425882145521227251250327\ 55121603546981200581762165212827652751691296897789\ 32238195734329339946437501907836945765883352399886\ 75506164965184775180738168837861091527357929701337\ 62177842752192623401942399639168044983993173312731\ 32924185707147349566916674687634660915035914677504\ 99518671430235219628894890102423325116913619626622\ 73267460800591547471830798392868535206946944540724\ 76841822524674417161514036427982273348055556214818\ 97142617910342598647204516893989422179826088076852\ 87783646182799346313767754307809363333018982642090\ 10848802521674670883215120185883543223812876952786\ 71329612474782464538636993009049310363619763878039\ 62184073572399794223406235393808339651327408011116\ 66627891981488087797941876876144230030984490851411\ 60661826293682836764744779239180335110989069790714\ 85786944089552990653640447425576083659976645795096\ 66024396409905389607120198219976047599490197230297\ 64913982680032973156037120041377903785566085089252\ 16730939319872750275468906903707539413042652315011\ 94809377245048795150954100921645863754710598436791\ 78639167021187492431995700641917969777599028300699\ 15368713711936614952811305876380278410754449733078\ 40789923115535562561142322423255033685442488917353\ 44889911501440648020369068063960672322193204149535\ 41503128880339536053299340368006977710650566631954\ 81234880673210146739058568557934581403627822703280\ 82616570773948327592232845941706525094512325230608\ 22918802058777319719839450180888072429661980811197\ 77158542502016545090413245809786882778948721859617\ 72107838435069186155435662884062257473692284509516\ 20849603980134001723930671666823555245252804609722\ 53503534226472524250874054075591789781264330331690' if __name__ == '__main__': for x in range(0, 5000, 50): l.append(''.join(list(a)[x:x+50])) sum = 0 for x in l: sum += int(x) print(sum)
47.646018
56
0.944279
l = [] a = '37107287533902102798797998220837590246510135740250\ 46376937677490009712648124896970078050417018260538\ 74324986199524741059474233309513058123726617309629\ 91942213363574161572522430563301811072406154908250\ 23067588207539346171171980310421047513778063246676\ 89261670696623633820136378418383684178734361726757\ 28112879812849979408065481931592621691275889832738\ 44274228917432520321923589422876796487670272189318\ 47451445736001306439091167216856844588711603153276\ 70386486105843025439939619828917593665686757934951\ 62176457141856560629502157223196586755079324193331\ 64906352462741904929101432445813822663347944758178\ 92575867718337217661963751590579239728245598838407\ 58203565325359399008402633568948830189458628227828\ 80181199384826282014278194139940567587151170094390\ 35398664372827112653829987240784473053190104293586\ 86515506006295864861532075273371959191420517255829\ 71693888707715466499115593487603532921714970056938\ 54370070576826684624621495650076471787294438377604\ 53282654108756828443191190634694037855217779295145\ 36123272525000296071075082563815656710885258350721\ 45876576172410976447339110607218265236877223636045\ 17423706905851860660448207621209813287860733969412\ 81142660418086830619328460811191061556940512689692\ 51934325451728388641918047049293215058642563049483\ 62467221648435076201727918039944693004732956340691\ 15732444386908125794514089057706229429197107928209\ 55037687525678773091862540744969844508330393682126\ 18336384825330154686196124348767681297534375946515\ 80386287592878490201521685554828717201219257766954\ 78182833757993103614740356856449095527097864797581\ 16726320100436897842553539920931837441497806860984\ 48403098129077791799088218795327364475675590848030\ 87086987551392711854517078544161852424320693150332\ 59959406895756536782107074926966537676326235447210\ 69793950679652694742597709739166693763042633987085\ 41052684708299085211399427365734116182760315001271\ 65378607361501080857009149939512557028198746004375\ 35829035317434717326932123578154982629742552737307\ 94953759765105305946966067683156574377167401875275\ 88902802571733229619176668713819931811048770190271\ 25267680276078003013678680992525463401061632866526\ 36270218540497705585629946580636237993140746255962\ 24074486908231174977792365466257246923322810917141\ 91430288197103288597806669760892938638285025333403\ 34413065578016127815921815005561868836468420090470\ 23053081172816430487623791969842487255036638784583\ 11487696932154902810424020138335124462181441773470\ 63783299490636259666498587618221225225512486764533\ 67720186971698544312419572409913959008952310058822\ 95548255300263520781532296796249481641953868218774\ 76085327132285723110424803456124867697064507995236\ 37774242535411291684276865538926205024910326572967\ 23701913275725675285653248258265463092207058596522\ 29798860272258331913126375147341994889534765745501\ 18495701454879288984856827726077713721403798879715\ 38298203783031473527721580348144513491373226651381\ 34829543829199918180278916522431027392251122869539\ 40957953066405232632538044100059654939159879593635\ 29746152185502371307642255121183693803580388584903\ 41698116222072977186158236678424689157993532961922\ 62467957194401269043877107275048102390895523597457\ 23189706772547915061505504953922979530901129967519\ 86188088225875314529584099251203829009407770775672\ 11306739708304724483816533873502340845647058077308\ 82959174767140363198008187129011875491310547126581\ 97623331044818386269515456334926366572897563400500\ 42846280183517070527831839425882145521227251250327\ 55121603546981200581762165212827652751691296897789\ 32238195734329339946437501907836945765883352399886\ 75506164965184775180738168837861091527357929701337\ 62177842752192623401942399639168044983993173312731\ 32924185707147349566916674687634660915035914677504\ 99518671430235219628894890102423325116913619626622\ 73267460800591547471830798392868535206946944540724\ 76841822524674417161514036427982273348055556214818\ 97142617910342598647204516893989422179826088076852\ 87783646182799346313767754307809363333018982642090\ 10848802521674670883215120185883543223812876952786\ 71329612474782464538636993009049310363619763878039\ 62184073572399794223406235393808339651327408011116\ 66627891981488087797941876876144230030984490851411\ 60661826293682836764744779239180335110989069790714\ 85786944089552990653640447425576083659976645795096\ 66024396409905389607120198219976047599490197230297\ 64913982680032973156037120041377903785566085089252\ 16730939319872750275468906903707539413042652315011\ 94809377245048795150954100921645863754710598436791\ 78639167021187492431995700641917969777599028300699\ 15368713711936614952811305876380278410754449733078\ 40789923115535562561142322423255033685442488917353\ 44889911501440648020369068063960672322193204149535\ 41503128880339536053299340368006977710650566631954\ 81234880673210146739058568557934581403627822703280\ 82616570773948327592232845941706525094512325230608\ 22918802058777319719839450180888072429661980811197\ 77158542502016545090413245809786882778948721859617\ 72107838435069186155435662884062257473692284509516\ 20849603980134001723930671666823555245252804609722\ 53503534226472524250874054075591789781264330331690' if __name__ == '__main__': for x in range(0, 5000, 50): l.append(''.join(list(a)[x:x+50])) sum = 0 for x in l: sum += int(x) print(sum)
true
true
f723f3fb8c08361f204ed3b00ea1c886a4107a59
1,457
py
Python
botutils/searchforlinks.py
yashprakash13/Honeysuckle
d8adb83a63318a8c4994d18aea6fd28116b46f4e
[ "MIT" ]
2
2020-08-11T17:23:05.000Z
2021-02-20T04:02:33.000Z
botutils/searchforlinks.py
yashprakash13/Honeysuckle
d8adb83a63318a8c4994d18aea6fd28116b46f4e
[ "MIT" ]
null
null
null
botutils/searchforlinks.py
yashprakash13/Honeysuckle
d8adb83a63318a8c4994d18aea6fd28116b46f4e
[ "MIT" ]
1
2020-08-15T05:29:14.000Z
2020-08-15T05:29:14.000Z
import re import requests from bs4 import BeautifulSoup from botutils.constants import IS_URL_REGEX def get_ffn_url_from_query(query): ffn_list = [] href = [] url = 'https://www.google.com/search?q=' + \ query+"+fanfiction" page = requests.get(url) soup = BeautifulSoup(page.content, 'html.parser') found = soup.findAll('a') for link in found: href.append(link['href']) for i in range(len(href)): if re.search(r"fanfiction.net/s/", href[i]) is not None: ffn_list.append(href[i]) if not ffn_list: return None ffn_url = re.search(IS_URL_REGEX, ffn_list[0]) return ffn_url.group(0) def get_ao3_url_from_query(query): ao3_list = [] href = [] url = 'https://www.google.com/search?q=' + \ query+"+archiveofourown" page = requests.get(url) soup = BeautifulSoup(page.content, 'html.parser') found = soup.findAll('a') for link in found: href.append(link['href']) for i in range(len(href)): # append /works/ first if re.search(r"\barchiveofourown.org/works/\b", href[i]) is not None: ao3_list.append(href[i]) # append /chapters/ next if re.search(r"\barchiveofourown.org/chapters/\b", href[i]) is not None: ao3_list.append(href[i]) if not ao3_list: return None ao3_url = re.search(IS_URL_REGEX, ao3_list[0]) return ao3_url.group(0)
22.075758
80
0.614276
import re import requests from bs4 import BeautifulSoup from botutils.constants import IS_URL_REGEX def get_ffn_url_from_query(query): ffn_list = [] href = [] url = 'https://www.google.com/search?q=' + \ query+"+fanfiction" page = requests.get(url) soup = BeautifulSoup(page.content, 'html.parser') found = soup.findAll('a') for link in found: href.append(link['href']) for i in range(len(href)): if re.search(r"fanfiction.net/s/", href[i]) is not None: ffn_list.append(href[i]) if not ffn_list: return None ffn_url = re.search(IS_URL_REGEX, ffn_list[0]) return ffn_url.group(0) def get_ao3_url_from_query(query): ao3_list = [] href = [] url = 'https://www.google.com/search?q=' + \ query+"+archiveofourown" page = requests.get(url) soup = BeautifulSoup(page.content, 'html.parser') found = soup.findAll('a') for link in found: href.append(link['href']) for i in range(len(href)): if re.search(r"\barchiveofourown.org/works/\b", href[i]) is not None: ao3_list.append(href[i]) if re.search(r"\barchiveofourown.org/chapters/\b", href[i]) is not None: ao3_list.append(href[i]) if not ao3_list: return None ao3_url = re.search(IS_URL_REGEX, ao3_list[0]) return ao3_url.group(0)
true
true
f723f47402f819e72e9efeddd056d1ccea3fb2f6
1,107
py
Python
bach/tests/unit/bach/test_series_json.py
objectiv/objectiv-analytics
86ec1508f71c2d61ea7d67479800e4dc417a46e1
[ "Apache-2.0" ]
23
2021-11-10T21:37:42.000Z
2022-03-30T11:46:19.000Z
bach/tests/unit/bach/test_series_json.py
objectiv/objectiv-analytics
86ec1508f71c2d61ea7d67479800e4dc417a46e1
[ "Apache-2.0" ]
163
2021-11-10T10:11:26.000Z
2022-03-31T16:04:27.000Z
bach/tests/unit/bach/test_series_json.py
objectiv/objectiv-analytics
86ec1508f71c2d61ea7d67479800e4dc417a46e1
[ "Apache-2.0" ]
null
null
null
""" Copyright 2022 Objectiv B.V. """ import pytest from bach.series.series_json import JsonBigQueryAccessorImpl from tests.unit.bach.util import get_fake_df @pytest.mark.skip_postgres def test_bq_get_slice_partial_expr(dialect): # Here we test the _get_slice_partial_expr function of the BigQuery specific JsonBigQueryAccessor. So # skipping all other dialects df = get_fake_df( dialect=dialect, index_names=['i'], data_names=['a'], dtype='json' ) jbqa = JsonBigQueryAccessorImpl(df.a) assert jbqa._get_slice_partial_expr(None, True).to_sql(dialect) == '0' assert jbqa._get_slice_partial_expr(None, False).to_sql(dialect) == '9223372036854775807' assert jbqa._get_slice_partial_expr(5, False).to_sql(dialect) == '5' assert jbqa._get_slice_partial_expr(5, True).to_sql(dialect) == '5' assert jbqa._get_slice_partial_expr(-5, False).to_sql(dialect) == \ '(ARRAY_LENGTH(JSON_QUERY_ARRAY(`a`)) -5)' assert jbqa._get_slice_partial_expr(-5, True).to_sql(dialect) == \ '(ARRAY_LENGTH(JSON_QUERY_ARRAY(`a`)) -5)'
36.9
105
0.712737
import pytest from bach.series.series_json import JsonBigQueryAccessorImpl from tests.unit.bach.util import get_fake_df @pytest.mark.skip_postgres def test_bq_get_slice_partial_expr(dialect): df = get_fake_df( dialect=dialect, index_names=['i'], data_names=['a'], dtype='json' ) jbqa = JsonBigQueryAccessorImpl(df.a) assert jbqa._get_slice_partial_expr(None, True).to_sql(dialect) == '0' assert jbqa._get_slice_partial_expr(None, False).to_sql(dialect) == '9223372036854775807' assert jbqa._get_slice_partial_expr(5, False).to_sql(dialect) == '5' assert jbqa._get_slice_partial_expr(5, True).to_sql(dialect) == '5' assert jbqa._get_slice_partial_expr(-5, False).to_sql(dialect) == \ '(ARRAY_LENGTH(JSON_QUERY_ARRAY(`a`)) -5)' assert jbqa._get_slice_partial_expr(-5, True).to_sql(dialect) == \ '(ARRAY_LENGTH(JSON_QUERY_ARRAY(`a`)) -5)'
true
true
f723f53cdb2ac0facc06438330246f32fcc5c7e7
3,101
py
Python
pupil/models/clustering.py
hadi-gharibi/pupil
9d266572cc1ebf659e87206be6e5f1548959d510
[ "Apache-2.0" ]
2
2022-03-31T23:17:14.000Z
2022-03-31T23:24:58.000Z
pupil/models/clustering.py
hadi-gharibi/pupil
9d266572cc1ebf659e87206be6e5f1548959d510
[ "Apache-2.0" ]
null
null
null
pupil/models/clustering.py
hadi-gharibi/pupil
9d266572cc1ebf659e87206be6e5f1548959d510
[ "Apache-2.0" ]
null
null
null
from abc import ABC, abstractmethod from typing import Dict, Protocol, Tuple import faiss import numpy as np from pupil.types import NDArray2D from sklearn.cluster import AgglomerativeClustering class Clustering(Protocol): n_clusters: int def fit(self, X: NDArray2D): ... def predict(self, X: NDArray2D) -> Tuple[NDArray2D, NDArray2D]: ... def distance_to_cluster_centers(self, X: NDArray2D) -> Tuple[NDArray2D, NDArray2D]: """After having the center of your clusters, you can use this function to see the distance from X and center of all clusters Args: X (NDArray2D): The input to check. Returns: Tuple[NDArray2D, NDArray2D]: Return (Distances, cluster_ids). Shape of each: (#queries, #clusters) """ ... class FaissKMeansClustering: def __init__( self, n_clusters: int, n_init: int = 10, max_iter: int = 100, ) -> None: self.n_clusters = n_clusters self.n_init = n_init self.max_iter = max_iter self.cluster_centers_ = None self.inertia_ = None def fit(self, X: NDArray2D) -> None: self.kmeans = faiss.Kmeans( d=X.shape[1], k=self.n_clusters, niter=self.max_iter, nredo=self.n_init, ) X = X / np.linalg.norm(X) self.kmeans.train(X.astype(np.float32)) self.cluster_centers_ = self.kmeans.centroids self.inertia_ = self.kmeans.obj[-1] def predict(self, X: NDArray2D) -> Tuple[NDArray2D, NDArray2D]: X = X / np.linalg.norm(X) return self.kmeans.index.search(X.astype(np.float32), 1) # type: ignore def distance_to_cluster_centers(self, X: NDArray2D) -> Tuple[NDArray2D, NDArray2D]: X = X / np.linalg.norm(X) D, I = self.kmeans.index.search(X.astype(np.float32), self.n_clusters) # type: ignore return D, I class Splitter(Protocol): def fit(self, X: NDArray2D, clsuter_inds: NDArray2D): ... @property def splits( self, ): ... class Distance1DSplitter: def __init__(self, nsplits=3): self.nsplits = nsplits def fit(self, X: NDArray2D, clsuter_inds: NDArray2D) -> None: self.clsuter_inds = clsuter_inds self.alg = AgglomerativeClustering(n_clusters=self.nsplits) self.alg.fit(X.reshape((-1, 1))) self._tag_to_index_dict = self._tag_to_index() def _tag_to_index(self) -> Dict[str, Tuple[int, int]]: tags = ["priority_" + str(i) for i in range(self.nsplits)] inds = np.argwhere(np.diff(self.alg.labels_) != 0).flatten().tolist() inds.insert(0, -1) inds.append(len(self.alg.labels_)) tag_dict = {} for i, end in enumerate(inds[1:]): start = inds[i] + 1 tag_dict[tags[i]] = (start, end + 1) return tag_dict @property def splits(self): res = {} for k, v in self._tag_to_index_dict.items(): res[k] = self.clsuter_inds[0][v[0] : v[1]] return res
28.981308
132
0.603354
from abc import ABC, abstractmethod from typing import Dict, Protocol, Tuple import faiss import numpy as np from pupil.types import NDArray2D from sklearn.cluster import AgglomerativeClustering class Clustering(Protocol): n_clusters: int def fit(self, X: NDArray2D): ... def predict(self, X: NDArray2D) -> Tuple[NDArray2D, NDArray2D]: ... def distance_to_cluster_centers(self, X: NDArray2D) -> Tuple[NDArray2D, NDArray2D]: ... class FaissKMeansClustering: def __init__( self, n_clusters: int, n_init: int = 10, max_iter: int = 100, ) -> None: self.n_clusters = n_clusters self.n_init = n_init self.max_iter = max_iter self.cluster_centers_ = None self.inertia_ = None def fit(self, X: NDArray2D) -> None: self.kmeans = faiss.Kmeans( d=X.shape[1], k=self.n_clusters, niter=self.max_iter, nredo=self.n_init, ) X = X / np.linalg.norm(X) self.kmeans.train(X.astype(np.float32)) self.cluster_centers_ = self.kmeans.centroids self.inertia_ = self.kmeans.obj[-1] def predict(self, X: NDArray2D) -> Tuple[NDArray2D, NDArray2D]: X = X / np.linalg.norm(X) return self.kmeans.index.search(X.astype(np.float32), 1) def distance_to_cluster_centers(self, X: NDArray2D) -> Tuple[NDArray2D, NDArray2D]: X = X / np.linalg.norm(X) D, I = self.kmeans.index.search(X.astype(np.float32), self.n_clusters) return D, I class Splitter(Protocol): def fit(self, X: NDArray2D, clsuter_inds: NDArray2D): ... @property def splits( self, ): ... class Distance1DSplitter: def __init__(self, nsplits=3): self.nsplits = nsplits def fit(self, X: NDArray2D, clsuter_inds: NDArray2D) -> None: self.clsuter_inds = clsuter_inds self.alg = AgglomerativeClustering(n_clusters=self.nsplits) self.alg.fit(X.reshape((-1, 1))) self._tag_to_index_dict = self._tag_to_index() def _tag_to_index(self) -> Dict[str, Tuple[int, int]]: tags = ["priority_" + str(i) for i in range(self.nsplits)] inds = np.argwhere(np.diff(self.alg.labels_) != 0).flatten().tolist() inds.insert(0, -1) inds.append(len(self.alg.labels_)) tag_dict = {} for i, end in enumerate(inds[1:]): start = inds[i] + 1 tag_dict[tags[i]] = (start, end + 1) return tag_dict @property def splits(self): res = {} for k, v in self._tag_to_index_dict.items(): res[k] = self.clsuter_inds[0][v[0] : v[1]] return res
true
true
f723f5a286cb6bd6f00ddea9012b0aaff76a5524
6,044
py
Python
sdks/python/apache_beam/testing/benchmarks/nexmark/queries/query3.py
eyal0/beam
9c6922976cc2a5c6a2ef836c1986ff769cda99a5
[ "Apache-2.0" ]
2
2017-12-19T18:34:54.000Z
2019-05-14T21:50:06.000Z
sdks/python/apache_beam/testing/benchmarks/nexmark/queries/query3.py
eyal0/beam
9c6922976cc2a5c6a2ef836c1986ff769cda99a5
[ "Apache-2.0" ]
80
2020-01-16T09:55:09.000Z
2020-10-03T13:43:07.000Z
sdks/python/apache_beam/testing/benchmarks/nexmark/queries/query3.py
eyal0/beam
9c6922976cc2a5c6a2ef836c1986ff769cda99a5
[ "Apache-2.0" ]
1
2020-11-11T18:45:54.000Z
2020-11-11T18:45:54.000Z
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Query 3, 'Local Item Suggestion'. Who is selling in OR, ID or CA in category 10, and for what auction ids? In CQL syntax:: SELECT Istream(P.name, P.city, P.state, A.id) FROM Auction A [ROWS UNBOUNDED], Person P [ROWS UNBOUNDED] WHERE A.seller = P.id AND (P.state = `OR' OR P.state = `ID' OR P.state = `CA') AND A.category = 10; We'll implement this query to allow 'new auction' events to come before the 'new person' events for the auction seller. Those auctions will be stored until the matching person is seen. Then all subsequent auctions for a person will use the stored person record. """ from __future__ import absolute_import import logging import apache_beam as beam from apache_beam.testing.benchmarks.nexmark.models import nexmark_model from apache_beam.testing.benchmarks.nexmark.queries import nexmark_query_util from apache_beam.testing.benchmarks.nexmark.queries.nexmark_query_util import ResultNames from apache_beam.transforms import trigger from apache_beam.transforms import userstate from apache_beam.transforms import window from apache_beam.transforms.userstate import on_timer def load(events, metadata=None): num_events_in_pane = 30 windowed_events = ( events | beam.WindowInto( window.GlobalWindows(), trigger=trigger.Repeatedly(trigger.AfterCount(num_events_in_pane)), accumulation_mode=trigger.AccumulationMode.DISCARDING)) auction_by_seller_id = ( windowed_events | nexmark_query_util.JustAuctions() | 'query3_filter_category' >> beam.Filter(lambda auc: auc.category == 10) | 'query3_key_by_seller' >> beam.ParDo( nexmark_query_util.AuctionBySellerFn())) person_by_id = ( windowed_events | nexmark_query_util.JustPerson() | 'query3_filter_region' >> beam.Filter(lambda person: person.state in ['OR', 'ID', 'CA']) | 'query3_key_by_person_id' >> beam.ParDo( nexmark_query_util.PersonByIdFn())) return ({ nexmark_query_util.AUCTION_TAG: auction_by_seller_id, nexmark_query_util.PERSON_TAG: person_by_id, } | beam.CoGroupByKey() | 'query3_join' >> beam.ParDo( JoinFn(metadata.get('max_auction_waiting_time'))) | 'query3_output' >> beam.Map( lambda t: { ResultNames.NAME: t[1].name, ResultNames.CITY: t[1].city, ResultNames.STATE: t[1].state, ResultNames.AUCTION_ID: t[0].id })) class JoinFn(beam.DoFn): """ Join auctions and person by person id and emit their product one pair at a time. We know a person may submit any number of auctions. Thus new person event must have the person record stored in persistent state in order to match future auctions by that person. However we know that each auction is associated with at most one person, so only need to store auction records in persistent state until we have seen the corresponding person record. And of course may have already seen that record. """ AUCTIONS = 'auctions_state' PERSON = 'person_state' PERSON_EXPIRING = 'person_state_expiring' auction_spec = userstate.BagStateSpec(AUCTIONS, nexmark_model.Auction.CODER) person_spec = userstate.ReadModifyWriteStateSpec( PERSON, nexmark_model.Person.CODER) person_timer_spec = userstate.TimerSpec( PERSON_EXPIRING, userstate.TimeDomain.WATERMARK) def __init__(self, max_auction_wait_time): self.max_auction_wait_time = max_auction_wait_time def process( self, element, auction_state=beam.DoFn.StateParam(auction_spec), person_state=beam.DoFn.StateParam(person_spec), person_timer=beam.DoFn.TimerParam(person_timer_spec)): # extract group with tags from element tuple _, group = element existing_person = person_state.read() if existing_person: # the person exists in person_state for this person id for auction in group[nexmark_query_util.AUCTION_TAG]: yield auction, existing_person return new_person = None for person in group[nexmark_query_util.PERSON_TAG]: if not new_person: new_person = person else: logging.error( 'two new person wtih same key: %s and %s' % (person, new_person)) continue # read all pending auctions for this person id, output and flush it pending_auctions = auction_state.read() if pending_auctions: for pending_auction in pending_auctions: yield pending_auction, new_person auction_state.clear() # output new auction for this person id for auction in group[nexmark_query_util.AUCTION_TAG]: yield auction, new_person # remember person for max_auction_wait_time seconds for future auctions person_state.write(new_person) person_timer.set(new_person.date_time + self.max_auction_wait_time) # we are done if we have seen a new person if new_person: return # remember auction until we see person for auction in group[nexmark_query_util.AUCTION_TAG]: auction_state.add(auction) @on_timer(person_timer_spec) def expiry(self, person_state=beam.DoFn.StateParam(person_spec)): person_state.clear()
38.012579
89
0.720218
from __future__ import absolute_import import logging import apache_beam as beam from apache_beam.testing.benchmarks.nexmark.models import nexmark_model from apache_beam.testing.benchmarks.nexmark.queries import nexmark_query_util from apache_beam.testing.benchmarks.nexmark.queries.nexmark_query_util import ResultNames from apache_beam.transforms import trigger from apache_beam.transforms import userstate from apache_beam.transforms import window from apache_beam.transforms.userstate import on_timer def load(events, metadata=None): num_events_in_pane = 30 windowed_events = ( events | beam.WindowInto( window.GlobalWindows(), trigger=trigger.Repeatedly(trigger.AfterCount(num_events_in_pane)), accumulation_mode=trigger.AccumulationMode.DISCARDING)) auction_by_seller_id = ( windowed_events | nexmark_query_util.JustAuctions() | 'query3_filter_category' >> beam.Filter(lambda auc: auc.category == 10) | 'query3_key_by_seller' >> beam.ParDo( nexmark_query_util.AuctionBySellerFn())) person_by_id = ( windowed_events | nexmark_query_util.JustPerson() | 'query3_filter_region' >> beam.Filter(lambda person: person.state in ['OR', 'ID', 'CA']) | 'query3_key_by_person_id' >> beam.ParDo( nexmark_query_util.PersonByIdFn())) return ({ nexmark_query_util.AUCTION_TAG: auction_by_seller_id, nexmark_query_util.PERSON_TAG: person_by_id, } | beam.CoGroupByKey() | 'query3_join' >> beam.ParDo( JoinFn(metadata.get('max_auction_waiting_time'))) | 'query3_output' >> beam.Map( lambda t: { ResultNames.NAME: t[1].name, ResultNames.CITY: t[1].city, ResultNames.STATE: t[1].state, ResultNames.AUCTION_ID: t[0].id })) class JoinFn(beam.DoFn): AUCTIONS = 'auctions_state' PERSON = 'person_state' PERSON_EXPIRING = 'person_state_expiring' auction_spec = userstate.BagStateSpec(AUCTIONS, nexmark_model.Auction.CODER) person_spec = userstate.ReadModifyWriteStateSpec( PERSON, nexmark_model.Person.CODER) person_timer_spec = userstate.TimerSpec( PERSON_EXPIRING, userstate.TimeDomain.WATERMARK) def __init__(self, max_auction_wait_time): self.max_auction_wait_time = max_auction_wait_time def process( self, element, auction_state=beam.DoFn.StateParam(auction_spec), person_state=beam.DoFn.StateParam(person_spec), person_timer=beam.DoFn.TimerParam(person_timer_spec)): _, group = element existing_person = person_state.read() if existing_person: for auction in group[nexmark_query_util.AUCTION_TAG]: yield auction, existing_person return new_person = None for person in group[nexmark_query_util.PERSON_TAG]: if not new_person: new_person = person else: logging.error( 'two new person wtih same key: %s and %s' % (person, new_person)) continue pending_auctions = auction_state.read() if pending_auctions: for pending_auction in pending_auctions: yield pending_auction, new_person auction_state.clear() for auction in group[nexmark_query_util.AUCTION_TAG]: yield auction, new_person person_state.write(new_person) person_timer.set(new_person.date_time + self.max_auction_wait_time) if new_person: return for auction in group[nexmark_query_util.AUCTION_TAG]: auction_state.add(auction) @on_timer(person_timer_spec) def expiry(self, person_state=beam.DoFn.StateParam(person_spec)): person_state.clear()
true
true
f723f5e67a95794ea25c2e636d620a2789ac60ad
48,091
py
Python
core/controllers/admin.py
Ragify/oppia
a530c7e4d5274b646afc7dd7040d13c7ed45b829
[ "Apache-2.0" ]
4
2021-09-16T16:46:53.000Z
2022-02-06T13:00:14.000Z
core/controllers/admin.py
Ragify/oppia
a530c7e4d5274b646afc7dd7040d13c7ed45b829
[ "Apache-2.0" ]
null
null
null
core/controllers/admin.py
Ragify/oppia
a530c7e4d5274b646afc7dd7040d13c7ed45b829
[ "Apache-2.0" ]
null
null
null
# Copyright 2014 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Controllers for the admin view.""" from __future__ import absolute_import from __future__ import unicode_literals import io import logging import random from core import feconf from core import python_utils from core import utils from core.constants import constants from core.controllers import acl_decorators from core.controllers import base from core.controllers import domain_objects_validator as validation_method from core.domain import auth_services from core.domain import blog_services from core.domain import collection_services from core.domain import config_domain from core.domain import config_services from core.domain import email_manager from core.domain import exp_domain from core.domain import exp_fetchers from core.domain import exp_services from core.domain import opportunity_services from core.domain import platform_feature_services as feature_services from core.domain import platform_parameter_domain as parameter_domain from core.domain import question_domain from core.domain import question_services from core.domain import recommendations_services from core.domain import rights_manager from core.domain import role_services from core.domain import search_services from core.domain import skill_domain from core.domain import skill_services from core.domain import state_domain from core.domain import stats_services from core.domain import story_domain from core.domain import story_services from core.domain import subtopic_page_domain from core.domain import subtopic_page_services from core.domain import topic_domain from core.domain import topic_fetchers from core.domain import topic_services from core.domain import user_services from core.domain import wipeout_service class AdminPage(base.BaseHandler): """Admin page shown in the App Engine admin console.""" URL_PATH_ARGS_SCHEMAS = {} HANDLER_ARGS_SCHEMAS = {'GET': {}} @acl_decorators.can_access_admin_page def get(self): """Handles GET requests.""" self.render_template('admin-page.mainpage.html') class AdminHandler(base.BaseHandler): """Handler for the admin page.""" GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = {} HANDLER_ARGS_SCHEMAS = { 'GET': {}, 'POST': { 'action': { 'schema': { 'type': 'basestring', 'choices': [ 'reload_exploration', 'reload_collection', 'generate_dummy_explorations', 'clear_search_index', 'generate_dummy_new_structures_data', 'generate_dummy_new_skill_data', 'save_config_properties', 'revert_config_property', 'upload_topic_similarities', 'regenerate_topic_related_opportunities', 'update_feature_flag_rules' ] }, # TODO(#13331): Remove default_value when it is confirmed that, # for clearing the search indices of exploration & collection # 'action' field must be provided in the payload. 'default_value': None }, 'exploration_id': { 'schema': { 'type': 'basestring' }, 'default_value': None }, 'collection_id': { 'schema': { 'type': 'basestring' }, 'default_value': None }, 'num_dummy_exps_to_generate': { 'schema': { 'type': 'int' }, 'default_value': None }, 'num_dummy_exps_to_publish': { 'schema': { 'type': 'int' }, 'default_value': None }, 'new_config_property_values': { 'schema': { 'type': 'object_dict', 'validation_method': ( validation_method.validate_new_config_property_values) }, 'default_value': None }, 'config_property_id': { 'schema': { 'type': 'basestring' }, 'default_value': None }, 'data': { 'schema': { 'type': 'basestring' }, 'default_value': None }, 'topic_id': { 'schema': { 'type': 'basestring' }, 'default_value': None }, 'feature_name': { 'schema': { 'type': 'basestring' }, 'default_value': None }, 'commit_message': { 'schema': { 'type': 'basestring' }, 'default_value': None }, 'new_rules': { 'schema': { 'type': 'list', 'items': { 'type': 'object_dict', 'object_class': parameter_domain.PlatformParameterRule } }, 'default_value': None } } } @acl_decorators.can_access_admin_page def get(self): """Handles GET requests.""" demo_exploration_ids = list(feconf.DEMO_EXPLORATIONS.keys()) topic_summaries = topic_fetchers.get_all_topic_summaries() topic_summary_dicts = [ summary.to_dict() for summary in topic_summaries] feature_flag_dicts = feature_services.get_all_feature_flag_dicts() config_properties = config_domain.Registry.get_config_property_schemas() # Removes promo-bar related configs as promo-bar is handlded by # release coordinators in /release-coordinator page. del config_properties['promo_bar_enabled'] del config_properties['promo_bar_message'] # Remove blog related configs as they will be handled by 'blog admins' # on blog admin page. del config_properties['max_number_of_tags_assigned_to_blog_post'] del config_properties['list_of_default_tags_for_blog_post'] self.render_json({ 'config_properties': config_properties, 'demo_collections': sorted(feconf.DEMO_COLLECTIONS.items()), 'demo_explorations': sorted(feconf.DEMO_EXPLORATIONS.items()), 'demo_exploration_ids': demo_exploration_ids, 'updatable_roles': role_services.UPDATABLE_ROLES, 'viewable_roles': role_services.VIEWABLE_ROLES, 'human_readable_roles': role_services.HUMAN_READABLE_ROLES, 'role_to_actions': role_services.get_role_actions(), 'topic_summaries': topic_summary_dicts, 'feature_flags': feature_flag_dicts, }) @acl_decorators.can_access_admin_page def post(self): """Handles POST requests.""" action = self.normalized_payload.get('action') try: result = {} if action == 'reload_exploration': exploration_id = self.normalized_payload.get('exploration_id') self._reload_exploration(exploration_id) elif action == 'reload_collection': collection_id = self.normalized_payload.get('collection_id') self._reload_collection(collection_id) elif action == 'generate_dummy_explorations': num_dummy_exps_to_generate = self.normalized_payload.get( 'num_dummy_exps_to_generate') num_dummy_exps_to_publish = self.normalized_payload.get( 'num_dummy_exps_to_publish') if num_dummy_exps_to_generate < num_dummy_exps_to_publish: raise self.InvalidInputException( 'Generate count cannot be less than publish count') else: self._generate_dummy_explorations( num_dummy_exps_to_generate, num_dummy_exps_to_publish) elif action == 'clear_search_index': search_services.clear_collection_search_index() search_services.clear_exploration_search_index() elif action == 'generate_dummy_new_structures_data': self._load_dummy_new_structures_data() elif action == 'generate_dummy_new_skill_data': self._generate_dummy_skill_and_questions() elif action == 'save_config_properties': new_config_property_values = self.normalized_payload.get( 'new_config_property_values') logging.info( '[ADMIN] %s saved config property values: %s' % (self.user_id, new_config_property_values)) for (name, value) in new_config_property_values.items(): config_services.set_property(self.user_id, name, value) elif action == 'revert_config_property': config_property_id = self.normalized_payload.get( 'config_property_id') logging.info( '[ADMIN] %s reverted config property: %s' % (self.user_id, config_property_id)) config_services.revert_property( self.user_id, config_property_id) elif action == 'upload_topic_similarities': data = self.normalized_payload.get('data') recommendations_services.update_topic_similarities(data) elif action == 'regenerate_topic_related_opportunities': topic_id = self.normalized_payload.get('topic_id') opportunities_count = ( opportunity_services .regenerate_opportunities_related_to_topic( topic_id, delete_existing_opportunities=True)) result = { 'opportunities_count': opportunities_count } elif action == 'update_feature_flag_rules': feature_name = self.normalized_payload.get('feature_name') new_rule_dicts = self.normalized_payload.get('new_rules') commit_message = self.normalized_payload.get('commit_message') try: feature_services.update_feature_flag_rules( feature_name, self.user_id, commit_message, new_rule_dicts) except ( utils.ValidationError, feature_services.FeatureFlagNotFoundException) as e: raise self.InvalidInputException(e) logging.info( '[ADMIN] %s updated feature %s with new rules: ' '%s.' % (self.user_id, feature_name, new_rule_dicts)) self.render_json(result) except Exception as e: logging.exception('[ADMIN] %s', e) self.render_json({'error': python_utils.UNICODE(e)}) python_utils.reraise_exception() def _reload_exploration(self, exploration_id): """Reloads the exploration in dev_mode corresponding to the given exploration id. Args: exploration_id: str. The exploration id. Raises: Exception. Cannot reload an exploration in production. """ if constants.DEV_MODE: logging.info( '[ADMIN] %s reloaded exploration %s' % (self.user_id, exploration_id)) exp_services.load_demo(python_utils.UNICODE(exploration_id)) rights_manager.release_ownership_of_exploration( user_services.get_system_user(), python_utils.UNICODE(exploration_id)) else: raise Exception('Cannot reload an exploration in production.') def _create_dummy_question( self, question_id, question_content, linked_skill_ids): """Creates a dummy question object with the given question ID. Args: question_id: str. The ID of the question to be created. question_content: str. The question content. linked_skill_ids: list(str). The IDs of the skills to which the question is linked to. Returns: Question. The dummy question with given values. """ state = state_domain.State.create_default_state( 'ABC', is_initial_state=True) state.update_interaction_id('TextInput') state.update_interaction_customization_args({ 'placeholder': { 'value': { 'content_id': 'ca_placeholder_0', 'unicode_str': '' } }, 'rows': {'value': 1} }) state.update_next_content_id_index(1) state.update_linked_skill_id(None) state.update_content(state_domain.SubtitledHtml('1', question_content)) recorded_voiceovers = state_domain.RecordedVoiceovers({}) written_translations = state_domain.WrittenTranslations({}) recorded_voiceovers.add_content_id_for_voiceover('ca_placeholder_0') recorded_voiceovers.add_content_id_for_voiceover('1') recorded_voiceovers.add_content_id_for_voiceover('default_outcome') written_translations.add_content_id_for_translation('ca_placeholder_0') written_translations.add_content_id_for_translation('1') written_translations.add_content_id_for_translation('default_outcome') state.update_recorded_voiceovers(recorded_voiceovers) state.update_written_translations(written_translations) solution = state_domain.Solution( 'TextInput', False, 'Solution', state_domain.SubtitledHtml( 'solution', '<p>This is a solution.</p>')) hints_list = [ state_domain.Hint( state_domain.SubtitledHtml('hint_1', '<p>This is a hint.</p>') ) ] state.update_interaction_solution(solution) state.update_interaction_hints(hints_list) state.update_interaction_default_outcome( state_domain.Outcome( None, state_domain.SubtitledHtml( 'feedback_id', '<p>Dummy Feedback</p>'), True, [], None, None ) ) question = question_domain.Question( question_id, state, feconf.CURRENT_STATE_SCHEMA_VERSION, constants.DEFAULT_LANGUAGE_CODE, 0, linked_skill_ids, []) return question def _create_dummy_skill(self, skill_id, skill_description, explanation): """Creates a dummy skill object with the given values. Args: skill_id: str. The ID of the skill to be created. skill_description: str. The description of the skill. explanation: str. The review material for the skill. Returns: Skill. The dummy skill with given values. """ rubrics = [ skill_domain.Rubric( constants.SKILL_DIFFICULTIES[0], ['Explanation 1']), skill_domain.Rubric( constants.SKILL_DIFFICULTIES[1], ['Explanation 2']), skill_domain.Rubric( constants.SKILL_DIFFICULTIES[2], ['Explanation 3'])] skill = skill_domain.Skill.create_default_skill( skill_id, skill_description, rubrics) skill.update_explanation(state_domain.SubtitledHtml('1', explanation)) return skill def _load_dummy_new_structures_data(self): """Loads the database with two topics (one of which is empty), a story and three skills in the topic (two of them in a subtopic) and a question attached to each skill. Raises: Exception. Cannot load new structures data in production mode. Exception. User does not have enough rights to generate data. """ if constants.DEV_MODE: if feconf.ROLE_ID_CURRICULUM_ADMIN not in self.user.roles: raise Exception( 'User does not have enough rights to generate data.') topic_id_1 = topic_fetchers.get_new_topic_id() topic_id_2 = topic_fetchers.get_new_topic_id() story_id = story_services.get_new_story_id() skill_id_1 = skill_services.get_new_skill_id() skill_id_2 = skill_services.get_new_skill_id() skill_id_3 = skill_services.get_new_skill_id() question_id_1 = question_services.get_new_question_id() question_id_2 = question_services.get_new_question_id() question_id_3 = question_services.get_new_question_id() skill_1 = self._create_dummy_skill( skill_id_1, 'Dummy Skill 1', '<p>Dummy Explanation 1</p>') skill_2 = self._create_dummy_skill( skill_id_2, 'Dummy Skill 2', '<p>Dummy Explanation 2</p>') skill_3 = self._create_dummy_skill( skill_id_3, 'Dummy Skill 3', '<p>Dummy Explanation 3</p>') question_1 = self._create_dummy_question( question_id_1, 'Question 1', [skill_id_1]) question_2 = self._create_dummy_question( question_id_2, 'Question 2', [skill_id_2]) question_3 = self._create_dummy_question( question_id_3, 'Question 3', [skill_id_3]) question_services.add_question(self.user_id, question_1) question_services.add_question(self.user_id, question_2) question_services.add_question(self.user_id, question_3) question_services.create_new_question_skill_link( self.user_id, question_id_1, skill_id_1, 0.3) question_services.create_new_question_skill_link( self.user_id, question_id_2, skill_id_2, 0.5) question_services.create_new_question_skill_link( self.user_id, question_id_3, skill_id_3, 0.7) topic_1 = topic_domain.Topic.create_default_topic( topic_id_1, 'Dummy Topic 1', 'dummy-topic-one', 'description') topic_2 = topic_domain.Topic.create_default_topic( topic_id_2, 'Empty Topic', 'empty-topic', 'description') topic_1.add_canonical_story(story_id) topic_1.add_uncategorized_skill_id(skill_id_1) topic_1.add_uncategorized_skill_id(skill_id_2) topic_1.add_uncategorized_skill_id(skill_id_3) topic_1.add_subtopic(1, 'Dummy Subtopic Title') topic_1.move_skill_id_to_subtopic(None, 1, skill_id_2) topic_1.move_skill_id_to_subtopic(None, 1, skill_id_3) subtopic_page = ( subtopic_page_domain.SubtopicPage.create_default_subtopic_page( 1, topic_id_1)) # These explorations were chosen since they pass the validations # for published stories. self._reload_exploration('15') self._reload_exploration('25') self._reload_exploration('13') exp_services.update_exploration( self.user_id, '15', [exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, 'property_name': 'correctness_feedback_enabled', 'new_value': True })], 'Changed correctness_feedback_enabled.') exp_services.update_exploration( self.user_id, '25', [exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, 'property_name': 'correctness_feedback_enabled', 'new_value': True })], 'Changed correctness_feedback_enabled.') exp_services.update_exploration( self.user_id, '13', [exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, 'property_name': 'correctness_feedback_enabled', 'new_value': True })], 'Changed correctness_feedback_enabled.') story = story_domain.Story.create_default_story( story_id, 'Help Jaime win the Arcade', 'Description', topic_id_1, 'help-jamie-win-arcade') story_node_dicts = [{ 'exp_id': '15', 'title': 'What are the place values?', 'description': 'Jaime learns the place value of each digit ' + 'in a big number.' }, { 'exp_id': '25', 'title': 'Finding the value of a number', 'description': 'Jaime understands the value of his ' + 'arcade score.' }, { 'exp_id': '13', 'title': 'Comparing Numbers', 'description': 'Jaime learns if a number is smaller or ' + 'greater than another number.' }] def generate_dummy_story_nodes(node_id, exp_id, title, description): """Generates and connects sequential story nodes. Args: node_id: int. The node id. exp_id: str. The exploration id. title: str. The title of the story node. description: str. The description of the story node. """ story.add_node( '%s%d' % (story_domain.NODE_ID_PREFIX, node_id), title) story.update_node_description( '%s%d' % (story_domain.NODE_ID_PREFIX, node_id), description) story.update_node_exploration_id( '%s%d' % (story_domain.NODE_ID_PREFIX, node_id), exp_id) if node_id != len(story_node_dicts): story.update_node_destination_node_ids( '%s%d' % (story_domain.NODE_ID_PREFIX, node_id), ['%s%d' % (story_domain.NODE_ID_PREFIX, node_id + 1)]) exp_services.update_exploration( self.user_id, exp_id, [exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, 'property_name': 'category', 'new_value': 'Astronomy' })], 'Change category') for i, story_node_dict in enumerate(story_node_dicts): generate_dummy_story_nodes(i + 1, **story_node_dict) skill_services.save_new_skill(self.user_id, skill_1) skill_services.save_new_skill(self.user_id, skill_2) skill_services.save_new_skill(self.user_id, skill_3) story_services.save_new_story(self.user_id, story) topic_services.save_new_topic(self.user_id, topic_1) topic_services.save_new_topic(self.user_id, topic_2) subtopic_page_services.save_subtopic_page( self.user_id, subtopic_page, 'Added subtopic', [topic_domain.TopicChange({ 'cmd': topic_domain.CMD_ADD_SUBTOPIC, 'subtopic_id': 1, 'title': 'Dummy Subtopic Title' })] ) # Generates translation opportunities for the Contributor Dashboard. exp_ids_in_story = story.story_contents.get_all_linked_exp_ids() opportunity_services.add_new_exploration_opportunities( story_id, exp_ids_in_story) topic_services.publish_story(topic_id_1, story_id, self.user_id) else: raise Exception('Cannot load new structures data in production.') def _generate_dummy_skill_and_questions(self): """Generate and loads the database with a skill and 15 questions linked to the skill. Raises: Exception. Cannot load new structures data in production mode. Exception. User does not have enough rights to generate data. """ if constants.DEV_MODE: if feconf.ROLE_ID_CURRICULUM_ADMIN not in self.user.roles: raise Exception( 'User does not have enough rights to generate data.') skill_id = skill_services.get_new_skill_id() skill_name = 'Dummy Skill %s' % python_utils.UNICODE( random.getrandbits(32)) skill = self._create_dummy_skill( skill_id, skill_name, '<p>Dummy Explanation 1</p>') skill_services.save_new_skill(self.user_id, skill) for i in range(15): question_id = question_services.get_new_question_id() question_name = 'Question number %s %s' % ( python_utils.UNICODE(i), skill_name) question = self._create_dummy_question( question_id, question_name, [skill_id]) question_services.add_question(self.user_id, question) question_difficulty = list( constants.SKILL_DIFFICULTY_LABEL_TO_FLOAT.values()) random_difficulty = random.choice(question_difficulty) question_services.create_new_question_skill_link( self.user_id, question_id, skill_id, random_difficulty) else: raise Exception('Cannot generate dummy skills in production.') def _reload_collection(self, collection_id): """Reloads the collection in dev_mode corresponding to the given collection id. Args: collection_id: str. The collection id. Raises: Exception. Cannot reload a collection in production. """ if constants.DEV_MODE: logging.info( '[ADMIN] %s reloaded collection %s' % (self.user_id, collection_id)) collection_services.load_demo(collection_id) rights_manager.release_ownership_of_collection( user_services.get_system_user(), collection_id) else: raise Exception('Cannot reload a collection in production.') def _generate_dummy_explorations( self, num_dummy_exps_to_generate, num_dummy_exps_to_publish): """Generates and publishes the given number of dummy explorations. Args: num_dummy_exps_to_generate: int. Count of dummy explorations to be generated. num_dummy_exps_to_publish: int. Count of explorations to be published. Raises: Exception. Environment is not DEVMODE. """ if constants.DEV_MODE: logging.info( '[ADMIN] %s generated %s number of dummy explorations' % (self.user_id, num_dummy_exps_to_generate)) possible_titles = ['Hulk Neuroscience', 'Quantum Starks', 'Wonder Anatomy', 'Elvish, language of "Lord of the Rings', 'The Science of Superheroes'] exploration_ids_to_publish = [] for i in range(num_dummy_exps_to_generate): title = random.choice(possible_titles) category = random.choice(constants.SEARCH_DROPDOWN_CATEGORIES) new_exploration_id = exp_fetchers.get_new_exploration_id() exploration = exp_domain.Exploration.create_default_exploration( new_exploration_id, title=title, category=category, objective='Dummy Objective') exp_services.save_new_exploration(self.user_id, exploration) if i <= num_dummy_exps_to_publish - 1: exploration_ids_to_publish.append(new_exploration_id) rights_manager.publish_exploration( self.user, new_exploration_id) exp_services.index_explorations_given_ids( exploration_ids_to_publish) else: raise Exception('Cannot generate dummy explorations in production.') class AdminRoleHandler(base.BaseHandler): """Handler for roles tab of admin page. Used to view and update roles.""" GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = {} HANDLER_ARGS_SCHEMAS = { 'GET': { 'filter_criterion': { 'schema': { 'type': 'basestring', 'choices': [ feconf.USER_FILTER_CRITERION_ROLE, feconf.USER_FILTER_CRITERION_USERNAME ] } }, 'role': { 'schema': { 'type': 'basestring', 'choices': role_services.VIEWABLE_ROLES }, 'default_value': None }, 'username': { 'schema': { 'type': 'basestring' }, 'default_value': None } }, 'PUT': { 'role': { 'schema': { 'type': 'basestring', 'choices': feconf.ALLOWED_USER_ROLES } }, 'username': { 'schema': { 'type': 'basestring' } } }, 'DELETE': { 'role': { 'schema': { 'type': 'basestring', 'choices': feconf.ALLOWED_USER_ROLES } }, 'username': { 'schema': { 'type': 'basestring' } } } } @acl_decorators.can_access_admin_page def get(self): filter_criterion = self.normalized_request.get( 'filter_criterion') if filter_criterion == feconf.USER_FILTER_CRITERION_ROLE: role = self.normalized_request.get( feconf.USER_FILTER_CRITERION_ROLE) role_services.log_role_query( self.user_id, feconf.ROLE_ACTION_VIEW_BY_ROLE, role=role) self.render_json({ 'usernames': user_services.get_usernames_by_role(role) }) elif filter_criterion == feconf.USER_FILTER_CRITERION_USERNAME: username = self.normalized_request.get( feconf.USER_FILTER_CRITERION_USERNAME) user_id = user_services.get_user_id_from_username(username) role_services.log_role_query( self.user_id, feconf.ROLE_ACTION_VIEW_BY_USERNAME, username=username) if user_id is None: raise self.InvalidInputException( 'User with given username does not exist.') user_settings = user_services.get_user_settings(user_id) user_roles = user_settings.roles managed_topic_ids = [] if feconf.ROLE_ID_TOPIC_MANAGER in user_roles: managed_topic_ids = [ rights.id for rights in topic_fetchers.get_topic_rights_with_user(user_id)] user_roles_dict = { 'roles': user_roles, 'managed_topic_ids': managed_topic_ids, 'banned': user_settings.banned } self.render_json(user_roles_dict) @acl_decorators.can_access_admin_page def put(self): username = self.payload.get('username') role = self.payload.get('role') user_settings = user_services.get_user_settings_from_username(username) if user_settings is None: raise self.InvalidInputException( 'User with given username does not exist.') if role == feconf.ROLE_ID_TOPIC_MANAGER: # The Topic manager role assignment is handled via # TopicManagerRoleHandler. raise self.InvalidInputException( 'Unsupported role for this handler.') user_services.add_user_role(user_settings.user_id, role) self.render_json({}) @acl_decorators.can_access_admin_page def delete(self): username = self.request.get('username') role = self.request.get('role') user_id = user_services.get_user_id_from_username(username) if user_id is None: raise self.InvalidInputException( 'User with given username does not exist.') if role == feconf.ROLE_ID_TOPIC_MANAGER: topic_services.deassign_user_from_all_topics(self.user, user_id) user_services.remove_user_role(user_id, role) self.render_json({}) class TopicManagerRoleHandler(base.BaseHandler): """Handler to assign or deassigning manager to a topic.""" GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = {} HANDLER_ARGS_SCHEMAS = { 'PUT': { 'username': { 'schema': { 'type': 'basestring' } }, 'action': { 'schema': { 'type': 'basestring', 'choices': ['assign', 'deassign'] } }, 'topic_id': { 'schema': { 'type': 'basestring' } } } } @acl_decorators.can_access_admin_page def put(self): username = self.normalized_payload.get('username') action = self.normalized_payload.get('action') topic_id = self.normalized_payload.get('topic_id') user_settings = user_services.get_user_settings_from_username(username) if user_settings is None: raise self.InvalidInputException( 'User with given username does not exist.') user_id = user_settings.user_id if action == 'assign': if not feconf.ROLE_ID_TOPIC_MANAGER in user_settings.roles: user_services.add_user_role( user_id, feconf.ROLE_ID_TOPIC_MANAGER) topic_manager = user_services.get_user_actions_info(user_id) topic_services.assign_role( user_services.get_system_user(), topic_manager, topic_domain.ROLE_MANAGER, topic_id) elif action == 'deassign': topic_services.deassign_manager_role_from_topic( user_services.get_system_user(), user_id, topic_id) if not topic_fetchers.get_topic_rights_with_user(user_id): user_services.remove_user_role( user_id, feconf.ROLE_ID_TOPIC_MANAGER) self.render_json({}) class BannedUsersHandler(base.BaseHandler): """Handler to ban and unban users.""" GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = {} HANDLER_ARGS_SCHEMAS = { 'PUT': { 'username': { 'schema': { 'type': 'basestring' } } }, 'DELETE': { 'username': { 'schema': { 'type': 'basestring' } } } } @acl_decorators.can_access_admin_page def put(self): username = self.normalized_payload.get('username') user_id = user_services.get_user_id_from_username(username) if user_id is None: raise self.InvalidInputException( 'User with given username does not exist.') topic_services.deassign_user_from_all_topics(self.user, user_id) user_services.mark_user_banned(user_id) self.render_json({}) @acl_decorators.can_access_admin_page def delete(self): username = self.normalized_request.get('username') user_id = user_services.get_user_id_from_username(username) if user_id is None: raise self.InvalidInputException( 'User with given username does not exist.') user_services.unmark_user_banned(user_id) self.render_json({}) class AdminSuperAdminPrivilegesHandler(base.BaseHandler): """Handler for granting a user super admin privileges.""" PUT_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON DELETE_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = {} HANDLER_ARGS_SCHEMAS = { 'PUT': { 'username': { 'schema': { 'type': 'basestring' } } }, 'DELETE': { 'username': { 'schema': { 'type': 'basestring' } } } } @acl_decorators.can_access_admin_page def put(self): if self.email != feconf.ADMIN_EMAIL_ADDRESS: raise self.UnauthorizedUserException( 'Only the default system admin can manage super admins') username = self.normalized_payload.get('username') user_id = user_services.get_user_id_from_username(username) if user_id is None: raise self.InvalidInputException('No such user exists') auth_services.grant_super_admin_privileges(user_id) self.render_json(self.values) @acl_decorators.can_access_admin_page def delete(self): if self.email != feconf.ADMIN_EMAIL_ADDRESS: raise self.UnauthorizedUserException( 'Only the default system admin can manage super admins') username = self.normalized_request.get('username') user_settings = user_services.get_user_settings_from_username(username) if user_settings is None: raise self.InvalidInputException('No such user exists') if user_settings.email == feconf.ADMIN_EMAIL_ADDRESS: raise self.InvalidInputException( 'Cannot revoke privileges from the default super admin account') auth_services.revoke_super_admin_privileges(user_settings.user_id) self.render_json(self.values) class AdminTopicsCsvFileDownloader(base.BaseHandler): """Retrieves topic similarity data for download.""" GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_DOWNLOADABLE URL_PATH_ARGS_SCHEMAS = {} HANDLER_ARGS_SCHEMAS = {'GET': {}} @acl_decorators.can_access_admin_page def get(self): topic_similarities = ( recommendations_services.get_topic_similarities_as_csv() ) # Downloadable file accepts only bytes, so we need to encode # topic_similarities to bytes. self.render_downloadable_file( io.BytesIO(topic_similarities.encode('utf-8')), 'topic_similarities.csv', 'text/csv' ) class DataExtractionQueryHandler(base.BaseHandler): """Handler for data extraction query.""" GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = {} HANDLER_ARGS_SCHEMAS = { 'GET': { 'exp_id': { 'schema': { 'type': 'basestring' } }, 'exp_version': { 'schema': { 'type': 'int' } }, 'state_name': { 'schema': { 'type': 'basestring' } }, 'num_answers': { 'schema': { 'type': 'int' } } } } @acl_decorators.can_access_admin_page def get(self): exp_id = self.normalized_request.get('exp_id') exp_version = self.normalized_request.get('exp_version') exploration = exp_fetchers.get_exploration_by_id( exp_id, strict=False, version=exp_version) if exploration is None: raise self.InvalidInputException( 'Entity for exploration with id %s and version %s not found.' % (exp_id, exp_version)) state_name = self.normalized_request.get('state_name') num_answers = self.normalized_request.get('num_answers') if state_name not in exploration.states: raise self.InvalidInputException( 'Exploration \'%s\' does not have \'%s\' state.' % (exp_id, state_name)) state_answers = stats_services.get_state_answers( exp_id, exp_version, state_name) extracted_answers = state_answers.get_submitted_answer_dict_list() if num_answers > 0: extracted_answers = extracted_answers[:num_answers] response = { 'data': extracted_answers } self.render_json(response) class SendDummyMailToAdminHandler(base.BaseHandler): """This function handles sending test emails.""" URL_PATH_ARGS_SCHEMAS = {} HANDLER_ARGS_SCHEMAS = {'POST': {}} @acl_decorators.can_access_admin_page def post(self): username = self.username if feconf.CAN_SEND_EMAILS: email_manager.send_dummy_mail_to_admin(username) self.render_json({}) else: raise self.InvalidInputException('This app cannot send emails.') class UpdateUsernameHandler(base.BaseHandler): """Handler for renaming usernames.""" URL_PATH_ARGS_SCHEMAS = {} HANDLER_ARGS_SCHEMAS = { 'PUT': { 'old_username': { 'schema': { 'type': 'basestring' } }, 'new_username': { 'schema': { 'type': 'basestring', 'validators': [{ 'id': 'has_length_at_most', 'max_value': constants.MAX_USERNAME_LENGTH }] } } } } @acl_decorators.can_access_admin_page def put(self): old_username = self.normalized_payload.get('old_username') new_username = self.normalized_payload.get('new_username') user_id = user_services.get_user_id_from_username(old_username) if user_id is None: raise self.InvalidInputException( 'Invalid username: %s' % old_username) if user_services.is_username_taken(new_username): raise self.InvalidInputException('Username already taken.') user_services.set_username(user_id, new_username) user_services.log_username_change( self.user_id, old_username, new_username) self.render_json({}) class NumberOfDeletionRequestsHandler(base.BaseHandler): """Handler for getting the number of pending deletion requests via admin page. """ GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = {} HANDLER_ARGS_SCHEMAS = {'GET': {}} @acl_decorators.can_access_admin_page def get(self): self.render_json({ 'number_of_pending_deletion_models': ( wipeout_service.get_number_of_pending_deletion_requests()) }) class VerifyUserModelsDeletedHandler(base.BaseHandler): """Handler for getting whether any models exist for specific user ID.""" GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = {} HANDLER_ARGS_SCHEMAS = { 'GET': { 'user_id': { 'schema': { 'type': 'basestring' } } } } @acl_decorators.can_access_admin_page def get(self): user_id = self.normalized_request.get('user_id') user_is_deleted = wipeout_service.verify_user_deleted( user_id, include_delete_at_end_models=True) self.render_json({'related_models_exist': not user_is_deleted}) class DeleteUserHandler(base.BaseHandler): """Handler for deleting a user with specific ID.""" URL_PATH_ARGS_SCHEMAS = {} HANDLER_ARGS_SCHEMAS = { 'DELETE': { 'user_id': { 'schema': { 'type': 'basestring' } }, 'username': { 'schema': { 'type': 'basestring' } } } } @acl_decorators.can_delete_any_user def delete(self): user_id = self.normalized_request.get('user_id') username = self.normalized_request.get('username') user_id_from_username = ( user_services.get_user_id_from_username(username)) if user_id_from_username is None: raise self.InvalidInputException( 'The username doesn\'t belong to any user' ) if user_id_from_username != user_id: raise self.InvalidInputException( 'The user ID retrieved from the username and ' 'the user ID provided by admin differ.' ) wipeout_service.pre_delete_user(user_id) self.render_json({'success': True}) class UpdateBlogPostHandler(base.BaseHandler): """Handler for changing author ids and published on date in blog posts.""" URL_PATH_ARGS_SCHEMAS = {} HANDLER_ARGS_SCHEMAS = { 'PUT': { 'blog_post_id': { 'schema': { 'type': 'basestring' } }, 'author_username': { 'schema': { 'type': 'basestring', 'validators': [{ 'id': 'has_length_at_most', 'max_value': constants.MAX_USERNAME_LENGTH }] } }, 'published_on': { 'schema': { 'type': 'basestring' } } } } @acl_decorators.can_access_admin_page def put(self): blog_post_id = self.normalized_payload.get('blog_post_id') author_username = self.normalized_payload.get('author_username') published_on = self.normalized_payload.get('published_on') author_id = user_services.get_user_id_from_username(author_username) if author_id is None: raise self.InvalidInputException( 'Invalid username: %s' % author_username) user_actions = user_services.get_user_actions_info(author_id).actions if role_services.ACTION_ACCESS_BLOG_DASHBOARD not in user_actions: raise self.InvalidInputException( 'User does not have enough rights to be blog post author.') blog_post = ( blog_services.get_blog_post_by_id(blog_post_id, strict=False)) if blog_post is None: raise self.PageNotFoundException( Exception( 'The blog post with the given id or url doesn\'t exist.')) blog_services.update_blog_models_author_and_published_on_date( blog_post_id, author_id, published_on) self.render_json({})
38.814366
80
0.587823
from __future__ import absolute_import from __future__ import unicode_literals import io import logging import random from core import feconf from core import python_utils from core import utils from core.constants import constants from core.controllers import acl_decorators from core.controllers import base from core.controllers import domain_objects_validator as validation_method from core.domain import auth_services from core.domain import blog_services from core.domain import collection_services from core.domain import config_domain from core.domain import config_services from core.domain import email_manager from core.domain import exp_domain from core.domain import exp_fetchers from core.domain import exp_services from core.domain import opportunity_services from core.domain import platform_feature_services as feature_services from core.domain import platform_parameter_domain as parameter_domain from core.domain import question_domain from core.domain import question_services from core.domain import recommendations_services from core.domain import rights_manager from core.domain import role_services from core.domain import search_services from core.domain import skill_domain from core.domain import skill_services from core.domain import state_domain from core.domain import stats_services from core.domain import story_domain from core.domain import story_services from core.domain import subtopic_page_domain from core.domain import subtopic_page_services from core.domain import topic_domain from core.domain import topic_fetchers from core.domain import topic_services from core.domain import user_services from core.domain import wipeout_service class AdminPage(base.BaseHandler): URL_PATH_ARGS_SCHEMAS = {} HANDLER_ARGS_SCHEMAS = {'GET': {}} @acl_decorators.can_access_admin_page def get(self): self.render_template('admin-page.mainpage.html') class AdminHandler(base.BaseHandler): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = {} HANDLER_ARGS_SCHEMAS = { 'GET': {}, 'POST': { 'action': { 'schema': { 'type': 'basestring', 'choices': [ 'reload_exploration', 'reload_collection', 'generate_dummy_explorations', 'clear_search_index', 'generate_dummy_new_structures_data', 'generate_dummy_new_skill_data', 'save_config_properties', 'revert_config_property', 'upload_topic_similarities', 'regenerate_topic_related_opportunities', 'update_feature_flag_rules' ] }, ult_value': None }, 'exploration_id': { 'schema': { 'type': 'basestring' }, 'default_value': None }, 'collection_id': { 'schema': { 'type': 'basestring' }, 'default_value': None }, 'num_dummy_exps_to_generate': { 'schema': { 'type': 'int' }, 'default_value': None }, 'num_dummy_exps_to_publish': { 'schema': { 'type': 'int' }, 'default_value': None }, 'new_config_property_values': { 'schema': { 'type': 'object_dict', 'validation_method': ( validation_method.validate_new_config_property_values) }, 'default_value': None }, 'config_property_id': { 'schema': { 'type': 'basestring' }, 'default_value': None }, 'data': { 'schema': { 'type': 'basestring' }, 'default_value': None }, 'topic_id': { 'schema': { 'type': 'basestring' }, 'default_value': None }, 'feature_name': { 'schema': { 'type': 'basestring' }, 'default_value': None }, 'commit_message': { 'schema': { 'type': 'basestring' }, 'default_value': None }, 'new_rules': { 'schema': { 'type': 'list', 'items': { 'type': 'object_dict', 'object_class': parameter_domain.PlatformParameterRule } }, 'default_value': None } } } @acl_decorators.can_access_admin_page def get(self): demo_exploration_ids = list(feconf.DEMO_EXPLORATIONS.keys()) topic_summaries = topic_fetchers.get_all_topic_summaries() topic_summary_dicts = [ summary.to_dict() for summary in topic_summaries] feature_flag_dicts = feature_services.get_all_feature_flag_dicts() config_properties = config_domain.Registry.get_config_property_schemas() del config_properties['promo_bar_enabled'] del config_properties['promo_bar_message'] del config_properties['max_number_of_tags_assigned_to_blog_post'] del config_properties['list_of_default_tags_for_blog_post'] self.render_json({ 'config_properties': config_properties, 'demo_collections': sorted(feconf.DEMO_COLLECTIONS.items()), 'demo_explorations': sorted(feconf.DEMO_EXPLORATIONS.items()), 'demo_exploration_ids': demo_exploration_ids, 'updatable_roles': role_services.UPDATABLE_ROLES, 'viewable_roles': role_services.VIEWABLE_ROLES, 'human_readable_roles': role_services.HUMAN_READABLE_ROLES, 'role_to_actions': role_services.get_role_actions(), 'topic_summaries': topic_summary_dicts, 'feature_flags': feature_flag_dicts, }) @acl_decorators.can_access_admin_page def post(self): action = self.normalized_payload.get('action') try: result = {} if action == 'reload_exploration': exploration_id = self.normalized_payload.get('exploration_id') self._reload_exploration(exploration_id) elif action == 'reload_collection': collection_id = self.normalized_payload.get('collection_id') self._reload_collection(collection_id) elif action == 'generate_dummy_explorations': num_dummy_exps_to_generate = self.normalized_payload.get( 'num_dummy_exps_to_generate') num_dummy_exps_to_publish = self.normalized_payload.get( 'num_dummy_exps_to_publish') if num_dummy_exps_to_generate < num_dummy_exps_to_publish: raise self.InvalidInputException( 'Generate count cannot be less than publish count') else: self._generate_dummy_explorations( num_dummy_exps_to_generate, num_dummy_exps_to_publish) elif action == 'clear_search_index': search_services.clear_collection_search_index() search_services.clear_exploration_search_index() elif action == 'generate_dummy_new_structures_data': self._load_dummy_new_structures_data() elif action == 'generate_dummy_new_skill_data': self._generate_dummy_skill_and_questions() elif action == 'save_config_properties': new_config_property_values = self.normalized_payload.get( 'new_config_property_values') logging.info( '[ADMIN] %s saved config property values: %s' % (self.user_id, new_config_property_values)) for (name, value) in new_config_property_values.items(): config_services.set_property(self.user_id, name, value) elif action == 'revert_config_property': config_property_id = self.normalized_payload.get( 'config_property_id') logging.info( '[ADMIN] %s reverted config property: %s' % (self.user_id, config_property_id)) config_services.revert_property( self.user_id, config_property_id) elif action == 'upload_topic_similarities': data = self.normalized_payload.get('data') recommendations_services.update_topic_similarities(data) elif action == 'regenerate_topic_related_opportunities': topic_id = self.normalized_payload.get('topic_id') opportunities_count = ( opportunity_services .regenerate_opportunities_related_to_topic( topic_id, delete_existing_opportunities=True)) result = { 'opportunities_count': opportunities_count } elif action == 'update_feature_flag_rules': feature_name = self.normalized_payload.get('feature_name') new_rule_dicts = self.normalized_payload.get('new_rules') commit_message = self.normalized_payload.get('commit_message') try: feature_services.update_feature_flag_rules( feature_name, self.user_id, commit_message, new_rule_dicts) except ( utils.ValidationError, feature_services.FeatureFlagNotFoundException) as e: raise self.InvalidInputException(e) logging.info( '[ADMIN] %s updated feature %s with new rules: ' '%s.' % (self.user_id, feature_name, new_rule_dicts)) self.render_json(result) except Exception as e: logging.exception('[ADMIN] %s', e) self.render_json({'error': python_utils.UNICODE(e)}) python_utils.reraise_exception() def _reload_exploration(self, exploration_id): if constants.DEV_MODE: logging.info( '[ADMIN] %s reloaded exploration %s' % (self.user_id, exploration_id)) exp_services.load_demo(python_utils.UNICODE(exploration_id)) rights_manager.release_ownership_of_exploration( user_services.get_system_user(), python_utils.UNICODE(exploration_id)) else: raise Exception('Cannot reload an exploration in production.') def _create_dummy_question( self, question_id, question_content, linked_skill_ids): state = state_domain.State.create_default_state( 'ABC', is_initial_state=True) state.update_interaction_id('TextInput') state.update_interaction_customization_args({ 'placeholder': { 'value': { 'content_id': 'ca_placeholder_0', 'unicode_str': '' } }, 'rows': {'value': 1} }) state.update_next_content_id_index(1) state.update_linked_skill_id(None) state.update_content(state_domain.SubtitledHtml('1', question_content)) recorded_voiceovers = state_domain.RecordedVoiceovers({}) written_translations = state_domain.WrittenTranslations({}) recorded_voiceovers.add_content_id_for_voiceover('ca_placeholder_0') recorded_voiceovers.add_content_id_for_voiceover('1') recorded_voiceovers.add_content_id_for_voiceover('default_outcome') written_translations.add_content_id_for_translation('ca_placeholder_0') written_translations.add_content_id_for_translation('1') written_translations.add_content_id_for_translation('default_outcome') state.update_recorded_voiceovers(recorded_voiceovers) state.update_written_translations(written_translations) solution = state_domain.Solution( 'TextInput', False, 'Solution', state_domain.SubtitledHtml( 'solution', '<p>This is a solution.</p>')) hints_list = [ state_domain.Hint( state_domain.SubtitledHtml('hint_1', '<p>This is a hint.</p>') ) ] state.update_interaction_solution(solution) state.update_interaction_hints(hints_list) state.update_interaction_default_outcome( state_domain.Outcome( None, state_domain.SubtitledHtml( 'feedback_id', '<p>Dummy Feedback</p>'), True, [], None, None ) ) question = question_domain.Question( question_id, state, feconf.CURRENT_STATE_SCHEMA_VERSION, constants.DEFAULT_LANGUAGE_CODE, 0, linked_skill_ids, []) return question def _create_dummy_skill(self, skill_id, skill_description, explanation): rubrics = [ skill_domain.Rubric( constants.SKILL_DIFFICULTIES[0], ['Explanation 1']), skill_domain.Rubric( constants.SKILL_DIFFICULTIES[1], ['Explanation 2']), skill_domain.Rubric( constants.SKILL_DIFFICULTIES[2], ['Explanation 3'])] skill = skill_domain.Skill.create_default_skill( skill_id, skill_description, rubrics) skill.update_explanation(state_domain.SubtitledHtml('1', explanation)) return skill def _load_dummy_new_structures_data(self): if constants.DEV_MODE: if feconf.ROLE_ID_CURRICULUM_ADMIN not in self.user.roles: raise Exception( 'User does not have enough rights to generate data.') topic_id_1 = topic_fetchers.get_new_topic_id() topic_id_2 = topic_fetchers.get_new_topic_id() story_id = story_services.get_new_story_id() skill_id_1 = skill_services.get_new_skill_id() skill_id_2 = skill_services.get_new_skill_id() skill_id_3 = skill_services.get_new_skill_id() question_id_1 = question_services.get_new_question_id() question_id_2 = question_services.get_new_question_id() question_id_3 = question_services.get_new_question_id() skill_1 = self._create_dummy_skill( skill_id_1, 'Dummy Skill 1', '<p>Dummy Explanation 1</p>') skill_2 = self._create_dummy_skill( skill_id_2, 'Dummy Skill 2', '<p>Dummy Explanation 2</p>') skill_3 = self._create_dummy_skill( skill_id_3, 'Dummy Skill 3', '<p>Dummy Explanation 3</p>') question_1 = self._create_dummy_question( question_id_1, 'Question 1', [skill_id_1]) question_2 = self._create_dummy_question( question_id_2, 'Question 2', [skill_id_2]) question_3 = self._create_dummy_question( question_id_3, 'Question 3', [skill_id_3]) question_services.add_question(self.user_id, question_1) question_services.add_question(self.user_id, question_2) question_services.add_question(self.user_id, question_3) question_services.create_new_question_skill_link( self.user_id, question_id_1, skill_id_1, 0.3) question_services.create_new_question_skill_link( self.user_id, question_id_2, skill_id_2, 0.5) question_services.create_new_question_skill_link( self.user_id, question_id_3, skill_id_3, 0.7) topic_1 = topic_domain.Topic.create_default_topic( topic_id_1, 'Dummy Topic 1', 'dummy-topic-one', 'description') topic_2 = topic_domain.Topic.create_default_topic( topic_id_2, 'Empty Topic', 'empty-topic', 'description') topic_1.add_canonical_story(story_id) topic_1.add_uncategorized_skill_id(skill_id_1) topic_1.add_uncategorized_skill_id(skill_id_2) topic_1.add_uncategorized_skill_id(skill_id_3) topic_1.add_subtopic(1, 'Dummy Subtopic Title') topic_1.move_skill_id_to_subtopic(None, 1, skill_id_2) topic_1.move_skill_id_to_subtopic(None, 1, skill_id_3) subtopic_page = ( subtopic_page_domain.SubtopicPage.create_default_subtopic_page( 1, topic_id_1)) self._reload_exploration('15') self._reload_exploration('25') self._reload_exploration('13') exp_services.update_exploration( self.user_id, '15', [exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, 'property_name': 'correctness_feedback_enabled', 'new_value': True })], 'Changed correctness_feedback_enabled.') exp_services.update_exploration( self.user_id, '25', [exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, 'property_name': 'correctness_feedback_enabled', 'new_value': True })], 'Changed correctness_feedback_enabled.') exp_services.update_exploration( self.user_id, '13', [exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, 'property_name': 'correctness_feedback_enabled', 'new_value': True })], 'Changed correctness_feedback_enabled.') story = story_domain.Story.create_default_story( story_id, 'Help Jaime win the Arcade', 'Description', topic_id_1, 'help-jamie-win-arcade') story_node_dicts = [{ 'exp_id': '15', 'title': 'What are the place values?', 'description': 'Jaime learns the place value of each digit ' + 'in a big number.' }, { 'exp_id': '25', 'title': 'Finding the value of a number', 'description': 'Jaime understands the value of his ' + 'arcade score.' }, { 'exp_id': '13', 'title': 'Comparing Numbers', 'description': 'Jaime learns if a number is smaller or ' + 'greater than another number.' }] def generate_dummy_story_nodes(node_id, exp_id, title, description): story.add_node( '%s%d' % (story_domain.NODE_ID_PREFIX, node_id), title) story.update_node_description( '%s%d' % (story_domain.NODE_ID_PREFIX, node_id), description) story.update_node_exploration_id( '%s%d' % (story_domain.NODE_ID_PREFIX, node_id), exp_id) if node_id != len(story_node_dicts): story.update_node_destination_node_ids( '%s%d' % (story_domain.NODE_ID_PREFIX, node_id), ['%s%d' % (story_domain.NODE_ID_PREFIX, node_id + 1)]) exp_services.update_exploration( self.user_id, exp_id, [exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY, 'property_name': 'category', 'new_value': 'Astronomy' })], 'Change category') for i, story_node_dict in enumerate(story_node_dicts): generate_dummy_story_nodes(i + 1, **story_node_dict) skill_services.save_new_skill(self.user_id, skill_1) skill_services.save_new_skill(self.user_id, skill_2) skill_services.save_new_skill(self.user_id, skill_3) story_services.save_new_story(self.user_id, story) topic_services.save_new_topic(self.user_id, topic_1) topic_services.save_new_topic(self.user_id, topic_2) subtopic_page_services.save_subtopic_page( self.user_id, subtopic_page, 'Added subtopic', [topic_domain.TopicChange({ 'cmd': topic_domain.CMD_ADD_SUBTOPIC, 'subtopic_id': 1, 'title': 'Dummy Subtopic Title' })] ) exp_ids_in_story = story.story_contents.get_all_linked_exp_ids() opportunity_services.add_new_exploration_opportunities( story_id, exp_ids_in_story) topic_services.publish_story(topic_id_1, story_id, self.user_id) else: raise Exception('Cannot load new structures data in production.') def _generate_dummy_skill_and_questions(self): if constants.DEV_MODE: if feconf.ROLE_ID_CURRICULUM_ADMIN not in self.user.roles: raise Exception( 'User does not have enough rights to generate data.') skill_id = skill_services.get_new_skill_id() skill_name = 'Dummy Skill %s' % python_utils.UNICODE( random.getrandbits(32)) skill = self._create_dummy_skill( skill_id, skill_name, '<p>Dummy Explanation 1</p>') skill_services.save_new_skill(self.user_id, skill) for i in range(15): question_id = question_services.get_new_question_id() question_name = 'Question number %s %s' % ( python_utils.UNICODE(i), skill_name) question = self._create_dummy_question( question_id, question_name, [skill_id]) question_services.add_question(self.user_id, question) question_difficulty = list( constants.SKILL_DIFFICULTY_LABEL_TO_FLOAT.values()) random_difficulty = random.choice(question_difficulty) question_services.create_new_question_skill_link( self.user_id, question_id, skill_id, random_difficulty) else: raise Exception('Cannot generate dummy skills in production.') def _reload_collection(self, collection_id): if constants.DEV_MODE: logging.info( '[ADMIN] %s reloaded collection %s' % (self.user_id, collection_id)) collection_services.load_demo(collection_id) rights_manager.release_ownership_of_collection( user_services.get_system_user(), collection_id) else: raise Exception('Cannot reload a collection in production.') def _generate_dummy_explorations( self, num_dummy_exps_to_generate, num_dummy_exps_to_publish): if constants.DEV_MODE: logging.info( '[ADMIN] %s generated %s number of dummy explorations' % (self.user_id, num_dummy_exps_to_generate)) possible_titles = ['Hulk Neuroscience', 'Quantum Starks', 'Wonder Anatomy', 'Elvish, language of "Lord of the Rings', 'The Science of Superheroes'] exploration_ids_to_publish = [] for i in range(num_dummy_exps_to_generate): title = random.choice(possible_titles) category = random.choice(constants.SEARCH_DROPDOWN_CATEGORIES) new_exploration_id = exp_fetchers.get_new_exploration_id() exploration = exp_domain.Exploration.create_default_exploration( new_exploration_id, title=title, category=category, objective='Dummy Objective') exp_services.save_new_exploration(self.user_id, exploration) if i <= num_dummy_exps_to_publish - 1: exploration_ids_to_publish.append(new_exploration_id) rights_manager.publish_exploration( self.user, new_exploration_id) exp_services.index_explorations_given_ids( exploration_ids_to_publish) else: raise Exception('Cannot generate dummy explorations in production.') class AdminRoleHandler(base.BaseHandler): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = {} HANDLER_ARGS_SCHEMAS = { 'GET': { 'filter_criterion': { 'schema': { 'type': 'basestring', 'choices': [ feconf.USER_FILTER_CRITERION_ROLE, feconf.USER_FILTER_CRITERION_USERNAME ] } }, 'role': { 'schema': { 'type': 'basestring', 'choices': role_services.VIEWABLE_ROLES }, 'default_value': None }, 'username': { 'schema': { 'type': 'basestring' }, 'default_value': None } }, 'PUT': { 'role': { 'schema': { 'type': 'basestring', 'choices': feconf.ALLOWED_USER_ROLES } }, 'username': { 'schema': { 'type': 'basestring' } } }, 'DELETE': { 'role': { 'schema': { 'type': 'basestring', 'choices': feconf.ALLOWED_USER_ROLES } }, 'username': { 'schema': { 'type': 'basestring' } } } } @acl_decorators.can_access_admin_page def get(self): filter_criterion = self.normalized_request.get( 'filter_criterion') if filter_criterion == feconf.USER_FILTER_CRITERION_ROLE: role = self.normalized_request.get( feconf.USER_FILTER_CRITERION_ROLE) role_services.log_role_query( self.user_id, feconf.ROLE_ACTION_VIEW_BY_ROLE, role=role) self.render_json({ 'usernames': user_services.get_usernames_by_role(role) }) elif filter_criterion == feconf.USER_FILTER_CRITERION_USERNAME: username = self.normalized_request.get( feconf.USER_FILTER_CRITERION_USERNAME) user_id = user_services.get_user_id_from_username(username) role_services.log_role_query( self.user_id, feconf.ROLE_ACTION_VIEW_BY_USERNAME, username=username) if user_id is None: raise self.InvalidInputException( 'User with given username does not exist.') user_settings = user_services.get_user_settings(user_id) user_roles = user_settings.roles managed_topic_ids = [] if feconf.ROLE_ID_TOPIC_MANAGER in user_roles: managed_topic_ids = [ rights.id for rights in topic_fetchers.get_topic_rights_with_user(user_id)] user_roles_dict = { 'roles': user_roles, 'managed_topic_ids': managed_topic_ids, 'banned': user_settings.banned } self.render_json(user_roles_dict) @acl_decorators.can_access_admin_page def put(self): username = self.payload.get('username') role = self.payload.get('role') user_settings = user_services.get_user_settings_from_username(username) if user_settings is None: raise self.InvalidInputException( 'User with given username does not exist.') if role == feconf.ROLE_ID_TOPIC_MANAGER: # The Topic manager role assignment is handled via # TopicManagerRoleHandler. raise self.InvalidInputException( 'Unsupported role for this handler.') user_services.add_user_role(user_settings.user_id, role) self.render_json({}) @acl_decorators.can_access_admin_page def delete(self): username = self.request.get('username') role = self.request.get('role') user_id = user_services.get_user_id_from_username(username) if user_id is None: raise self.InvalidInputException( 'User with given username does not exist.') if role == feconf.ROLE_ID_TOPIC_MANAGER: topic_services.deassign_user_from_all_topics(self.user, user_id) user_services.remove_user_role(user_id, role) self.render_json({}) class TopicManagerRoleHandler(base.BaseHandler): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = {} HANDLER_ARGS_SCHEMAS = { 'PUT': { 'username': { 'schema': { 'type': 'basestring' } }, 'action': { 'schema': { 'type': 'basestring', 'choices': ['assign', 'deassign'] } }, 'topic_id': { 'schema': { 'type': 'basestring' } } } } @acl_decorators.can_access_admin_page def put(self): username = self.normalized_payload.get('username') action = self.normalized_payload.get('action') topic_id = self.normalized_payload.get('topic_id') user_settings = user_services.get_user_settings_from_username(username) if user_settings is None: raise self.InvalidInputException( 'User with given username does not exist.') user_id = user_settings.user_id if action == 'assign': if not feconf.ROLE_ID_TOPIC_MANAGER in user_settings.roles: user_services.add_user_role( user_id, feconf.ROLE_ID_TOPIC_MANAGER) topic_manager = user_services.get_user_actions_info(user_id) topic_services.assign_role( user_services.get_system_user(), topic_manager, topic_domain.ROLE_MANAGER, topic_id) elif action == 'deassign': topic_services.deassign_manager_role_from_topic( user_services.get_system_user(), user_id, topic_id) if not topic_fetchers.get_topic_rights_with_user(user_id): user_services.remove_user_role( user_id, feconf.ROLE_ID_TOPIC_MANAGER) self.render_json({}) class BannedUsersHandler(base.BaseHandler): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = {} HANDLER_ARGS_SCHEMAS = { 'PUT': { 'username': { 'schema': { 'type': 'basestring' } } }, 'DELETE': { 'username': { 'schema': { 'type': 'basestring' } } } } @acl_decorators.can_access_admin_page def put(self): username = self.normalized_payload.get('username') user_id = user_services.get_user_id_from_username(username) if user_id is None: raise self.InvalidInputException( 'User with given username does not exist.') topic_services.deassign_user_from_all_topics(self.user, user_id) user_services.mark_user_banned(user_id) self.render_json({}) @acl_decorators.can_access_admin_page def delete(self): username = self.normalized_request.get('username') user_id = user_services.get_user_id_from_username(username) if user_id is None: raise self.InvalidInputException( 'User with given username does not exist.') user_services.unmark_user_banned(user_id) self.render_json({}) class AdminSuperAdminPrivilegesHandler(base.BaseHandler): PUT_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON DELETE_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = {} HANDLER_ARGS_SCHEMAS = { 'PUT': { 'username': { 'schema': { 'type': 'basestring' } } }, 'DELETE': { 'username': { 'schema': { 'type': 'basestring' } } } } @acl_decorators.can_access_admin_page def put(self): if self.email != feconf.ADMIN_EMAIL_ADDRESS: raise self.UnauthorizedUserException( 'Only the default system admin can manage super admins') username = self.normalized_payload.get('username') user_id = user_services.get_user_id_from_username(username) if user_id is None: raise self.InvalidInputException('No such user exists') auth_services.grant_super_admin_privileges(user_id) self.render_json(self.values) @acl_decorators.can_access_admin_page def delete(self): if self.email != feconf.ADMIN_EMAIL_ADDRESS: raise self.UnauthorizedUserException( 'Only the default system admin can manage super admins') username = self.normalized_request.get('username') user_settings = user_services.get_user_settings_from_username(username) if user_settings is None: raise self.InvalidInputException('No such user exists') if user_settings.email == feconf.ADMIN_EMAIL_ADDRESS: raise self.InvalidInputException( 'Cannot revoke privileges from the default super admin account') auth_services.revoke_super_admin_privileges(user_settings.user_id) self.render_json(self.values) class AdminTopicsCsvFileDownloader(base.BaseHandler): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_DOWNLOADABLE URL_PATH_ARGS_SCHEMAS = {} HANDLER_ARGS_SCHEMAS = {'GET': {}} @acl_decorators.can_access_admin_page def get(self): topic_similarities = ( recommendations_services.get_topic_similarities_as_csv() ) # Downloadable file accepts only bytes, so we need to encode # topic_similarities to bytes. self.render_downloadable_file( io.BytesIO(topic_similarities.encode('utf-8')), 'topic_similarities.csv', 'text/csv' ) class DataExtractionQueryHandler(base.BaseHandler): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = {} HANDLER_ARGS_SCHEMAS = { 'GET': { 'exp_id': { 'schema': { 'type': 'basestring' } }, 'exp_version': { 'schema': { 'type': 'int' } }, 'state_name': { 'schema': { 'type': 'basestring' } }, 'num_answers': { 'schema': { 'type': 'int' } } } } @acl_decorators.can_access_admin_page def get(self): exp_id = self.normalized_request.get('exp_id') exp_version = self.normalized_request.get('exp_version') exploration = exp_fetchers.get_exploration_by_id( exp_id, strict=False, version=exp_version) if exploration is None: raise self.InvalidInputException( 'Entity for exploration with id %s and version %s not found.' % (exp_id, exp_version)) state_name = self.normalized_request.get('state_name') num_answers = self.normalized_request.get('num_answers') if state_name not in exploration.states: raise self.InvalidInputException( 'Exploration \'%s\' does not have \'%s\' state.' % (exp_id, state_name)) state_answers = stats_services.get_state_answers( exp_id, exp_version, state_name) extracted_answers = state_answers.get_submitted_answer_dict_list() if num_answers > 0: extracted_answers = extracted_answers[:num_answers] response = { 'data': extracted_answers } self.render_json(response) class SendDummyMailToAdminHandler(base.BaseHandler): URL_PATH_ARGS_SCHEMAS = {} HANDLER_ARGS_SCHEMAS = {'POST': {}} @acl_decorators.can_access_admin_page def post(self): username = self.username if feconf.CAN_SEND_EMAILS: email_manager.send_dummy_mail_to_admin(username) self.render_json({}) else: raise self.InvalidInputException('This app cannot send emails.') class UpdateUsernameHandler(base.BaseHandler): URL_PATH_ARGS_SCHEMAS = {} HANDLER_ARGS_SCHEMAS = { 'PUT': { 'old_username': { 'schema': { 'type': 'basestring' } }, 'new_username': { 'schema': { 'type': 'basestring', 'validators': [{ 'id': 'has_length_at_most', 'max_value': constants.MAX_USERNAME_LENGTH }] } } } } @acl_decorators.can_access_admin_page def put(self): old_username = self.normalized_payload.get('old_username') new_username = self.normalized_payload.get('new_username') user_id = user_services.get_user_id_from_username(old_username) if user_id is None: raise self.InvalidInputException( 'Invalid username: %s' % old_username) if user_services.is_username_taken(new_username): raise self.InvalidInputException('Username already taken.') user_services.set_username(user_id, new_username) user_services.log_username_change( self.user_id, old_username, new_username) self.render_json({}) class NumberOfDeletionRequestsHandler(base.BaseHandler): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = {} HANDLER_ARGS_SCHEMAS = {'GET': {}} @acl_decorators.can_access_admin_page def get(self): self.render_json({ 'number_of_pending_deletion_models': ( wipeout_service.get_number_of_pending_deletion_requests()) }) class VerifyUserModelsDeletedHandler(base.BaseHandler): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = {} HANDLER_ARGS_SCHEMAS = { 'GET': { 'user_id': { 'schema': { 'type': 'basestring' } } } } @acl_decorators.can_access_admin_page def get(self): user_id = self.normalized_request.get('user_id') user_is_deleted = wipeout_service.verify_user_deleted( user_id, include_delete_at_end_models=True) self.render_json({'related_models_exist': not user_is_deleted}) class DeleteUserHandler(base.BaseHandler): URL_PATH_ARGS_SCHEMAS = {} HANDLER_ARGS_SCHEMAS = { 'DELETE': { 'user_id': { 'schema': { 'type': 'basestring' } }, 'username': { 'schema': { 'type': 'basestring' } } } } @acl_decorators.can_delete_any_user def delete(self): user_id = self.normalized_request.get('user_id') username = self.normalized_request.get('username') user_id_from_username = ( user_services.get_user_id_from_username(username)) if user_id_from_username is None: raise self.InvalidInputException( 'The username doesn\'t belong to any user' ) if user_id_from_username != user_id: raise self.InvalidInputException( 'The user ID retrieved from the username and ' 'the user ID provided by admin differ.' ) wipeout_service.pre_delete_user(user_id) self.render_json({'success': True}) class UpdateBlogPostHandler(base.BaseHandler): URL_PATH_ARGS_SCHEMAS = {} HANDLER_ARGS_SCHEMAS = { 'PUT': { 'blog_post_id': { 'schema': { 'type': 'basestring' } }, 'author_username': { 'schema': { 'type': 'basestring', 'validators': [{ 'id': 'has_length_at_most', 'max_value': constants.MAX_USERNAME_LENGTH }] } }, 'published_on': { 'schema': { 'type': 'basestring' } } } } @acl_decorators.can_access_admin_page def put(self): blog_post_id = self.normalized_payload.get('blog_post_id') author_username = self.normalized_payload.get('author_username') published_on = self.normalized_payload.get('published_on') author_id = user_services.get_user_id_from_username(author_username) if author_id is None: raise self.InvalidInputException( 'Invalid username: %s' % author_username) user_actions = user_services.get_user_actions_info(author_id).actions if role_services.ACTION_ACCESS_BLOG_DASHBOARD not in user_actions: raise self.InvalidInputException( 'User does not have enough rights to be blog post author.') blog_post = ( blog_services.get_blog_post_by_id(blog_post_id, strict=False)) if blog_post is None: raise self.PageNotFoundException( Exception( 'The blog post with the given id or url doesn\'t exist.')) blog_services.update_blog_models_author_and_published_on_date( blog_post_id, author_id, published_on) self.render_json({})
true
true
f723f6aed494f61892583333010aad58dbc25f9a
1,503
py
Python
test/test_maintenance_configuration_api.py
cvent/octopus-deploy-api-client
0e03e842e1beb29b132776aee077df570b88366a
[ "Apache-2.0" ]
null
null
null
test/test_maintenance_configuration_api.py
cvent/octopus-deploy-api-client
0e03e842e1beb29b132776aee077df570b88366a
[ "Apache-2.0" ]
null
null
null
test/test_maintenance_configuration_api.py
cvent/octopus-deploy-api-client
0e03e842e1beb29b132776aee077df570b88366a
[ "Apache-2.0" ]
null
null
null
# coding: utf-8 """ Octopus Server API No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501 OpenAPI spec version: 2019.6.7+Branch.tags-2019.6.7.Sha.aa18dc6809953218c66f57eff7d26481d9b23d6a Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import unittest import octopus_deploy_swagger_client from octopus_deploy_client.maintenance_configuration_api import MaintenanceConfigurationApi # noqa: E501 from octopus_deploy_swagger_client.rest import ApiException class TestMaintenanceConfigurationApi(unittest.TestCase): """MaintenanceConfigurationApi unit test stubs""" def setUp(self): self.api = octopus_deploy_client.maintenance_configuration_api.MaintenanceConfigurationApi() # noqa: E501 def tearDown(self): pass def test_custom_action_response_descriptor_octopus_server_web_api_actions_maintenance_configuration_get_action(self): """Test case for custom_action_response_descriptor_octopus_server_web_api_actions_maintenance_configuration_get_action """ pass def test_custom_action_response_descriptor_octopus_server_web_api_actions_maintenance_configuration_update_action(self): """Test case for custom_action_response_descriptor_octopus_server_web_api_actions_maintenance_configuration_update_action """ pass if __name__ == '__main__': unittest.main()
31.978723
129
0.795742
from __future__ import absolute_import import unittest import octopus_deploy_swagger_client from octopus_deploy_client.maintenance_configuration_api import MaintenanceConfigurationApi from octopus_deploy_swagger_client.rest import ApiException class TestMaintenanceConfigurationApi(unittest.TestCase): def setUp(self): self.api = octopus_deploy_client.maintenance_configuration_api.MaintenanceConfigurationApi() def tearDown(self): pass def test_custom_action_response_descriptor_octopus_server_web_api_actions_maintenance_configuration_get_action(self): pass def test_custom_action_response_descriptor_octopus_server_web_api_actions_maintenance_configuration_update_action(self): pass if __name__ == '__main__': unittest.main()
true
true
f723f6d3146fbf9c9696d5956391430c0797a221
6,843
py
Python
src/azure-cli/setup.py
t-bzhan/azure-cli
d64b25204b661438e9284f261bc5a11f3221c837
[ "MIT" ]
null
null
null
src/azure-cli/setup.py
t-bzhan/azure-cli
d64b25204b661438e9284f261bc5a11f3221c837
[ "MIT" ]
null
null
null
src/azure-cli/setup.py
t-bzhan/azure-cli
d64b25204b661438e9284f261bc5a11f3221c837
[ "MIT" ]
null
null
null
#!/usr/bin/env python # -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- from __future__ import print_function from codecs import open from setuptools import setup, find_packages import sys try: from azure_cli_bdist_wheel import cmdclass except ImportError: from distutils import log as logger logger.warn("Wheel is not available, disabling bdist_wheel hook") cmdclass = {} VERSION = "2.20.0" # If we have source, validate that our version numbers match # This should prevent uploading releases with mismatched versions. try: with open('azure/cli/__main__.py', 'r', encoding='utf-8') as f: content = f.read() except OSError: pass else: import re m = re.search(r'__version__\s*=\s*[\'"](.+?)[\'"]', content) if not m: print('Could not find __version__ in azure/cli/__main__.py') sys.exit(1) if m.group(1) != VERSION: print('Expected __version__ = "{}"; found "{}"'.format(VERSION, m.group(1))) sys.exit(1) CLASSIFIERS = [ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'Intended Audience :: System Administrators', 'Programming Language :: Python', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'License :: OSI Approved :: MIT License', ] DEPENDENCIES = [ 'antlr4-python3-runtime~=4.7.2', 'azure-appconfiguration~=1.1.1', 'azure-batch~=10.0.0', 'azure-cli-core=={}'.format(VERSION), 'azure-cosmos~=3.0,>=3.0.2', 'azure-datalake-store~=0.0.49', 'azure-functions-devops-build~=0.0.22', 'azure-graphrbac~=0.60.0', 'azure-keyvault~=1.1.0', 'azure-keyvault-administration==4.0.0b3', 'azure-mgmt-advisor>=2.0.1,<3.0.0', 'azure-mgmt-apimanagement~=0.2.0', 'azure-mgmt-applicationinsights~=0.1.1', 'azure-mgmt-appconfiguration~=1.0.1', 'azure-mgmt-authorization~=0.61.0', 'azure-mgmt-batch~=9.0.0', 'azure-mgmt-batchai~=2.0', 'azure-mgmt-billing==1.0.0', 'azure-mgmt-botservice~=0.3.0', 'azure-mgmt-cdn==7.0.0', 'azure-mgmt-cognitiveservices~=6.3.0', 'azure-mgmt-compute~=19.0.0', 'azure-mgmt-consumption~=2.0', 'azure-mgmt-containerinstance~=1.4', 'azure-mgmt-containerregistry==3.0.0rc17', 'azure-mgmt-cosmosdb~=3.0.0', 'azure-mgmt-containerservice~=9.4.0', 'azure-mgmt-databoxedge~=0.2.0', 'azure-mgmt-datalake-analytics~=0.2.1', 'azure-mgmt-datalake-store~=0.5.0', 'azure-mgmt-datamigration~=4.1.0', 'azure-mgmt-deploymentmanager~=0.2.0', 'azure-mgmt-devtestlabs~=4.0', 'azure-mgmt-dns~=2.1', 'azure-mgmt-eventgrid==3.0.0rc7', 'azure-mgmt-eventhub~=4.1.0', 'azure-mgmt-hdinsight~=2.2.0', 'azure-mgmt-imagebuilder~=0.4.0', 'azure-mgmt-iotcentral~=4.1.0', 'azure-mgmt-iothub~=0.12.0', 'azure-mgmt-iothubprovisioningservices~=0.2.0', 'azure-mgmt-keyvault==8.0.0', 'azure-mgmt-kusto~=0.3.0', 'azure-mgmt-loganalytics~=8.0.0', 'azure-mgmt-managedservices~=1.0', 'azure-mgmt-managementgroups~=0.1', 'azure-mgmt-maps~=0.1.0', 'azure-mgmt-marketplaceordering~=0.1', 'azure-mgmt-media~=3.0', 'azure-mgmt-monitor~=2.0.0', 'azure-mgmt-msi~=0.2', 'azure-mgmt-netapp~=0.16.0', 'azure-mgmt-network~=17.1.0', 'azure-mgmt-policyinsights~=0.5.0', 'azure-mgmt-privatedns~=0.1.0', 'azure-mgmt-rdbms~=3.1.0rc1', 'azure-mgmt-recoveryservices~=0.4.0', 'azure-mgmt-recoveryservicesbackup~=0.11.0', 'azure-mgmt-redhatopenshift==0.1.0', 'azure-mgmt-redis~=7.0.0rc1', 'azure-mgmt-relay~=0.1.0', # 'azure-mgmt-reservations~=0.6.0', 'azure-mgmt-reservations==0.6.0', # TODO: Use requirements.txt instead of '==' #9781 'azure-mgmt-resource==12.0.0', 'azure-mgmt-search~=8.0', 'azure-mgmt-security~=0.6.0', 'azure-mgmt-servicebus~=0.6.0', 'azure-mgmt-servicefabric~=0.5.0', 'azure-mgmt-signalr~=0.4.0', 'azure-mgmt-sql~=0.26.0', 'azure-mgmt-sqlvirtualmachine~=0.5.0', 'azure-mgmt-storage~=17.0.0', 'azure-mgmt-trafficmanager~=0.51.0', 'azure-mgmt-web~=0.48.0', 'azure-mgmt-synapse~=0.6.0', 'azure-multiapi-storage~=0.6.0', 'azure-loganalytics~=0.1.0', 'azure-storage-common~=1.4', 'azure-synapse-accesscontrol~=0.2.0', 'azure-synapse-artifacts~=0.3.0', 'azure-synapse-spark~=0.2.0', 'fabric~=2.4', 'jsmin~=2.2.2', 'pytz==2019.1', 'scp~=0.13.2', 'sshtunnel~=0.1.4', 'urllib3[secure]>=1.25.9,<2.0.0', 'vsts-cd-manager~=1.0.0,>=1.0.2', 'websocket-client~=0.56.0', 'xmltodict~=0.12', 'javaproperties==0.5.1', 'jsondiff==1.2.0', 'semver==2.13.0' ] TESTS_REQUIRE = [ 'mock~=4.0' ] with open('README.rst', 'r', encoding='utf-8') as f: README = f.read() with open('HISTORY.rst', 'r', encoding='utf-8') as f: HISTORY = f.read() setup( name='azure-cli', version=VERSION, description='Microsoft Azure Command-Line Tools', long_description=README + '\n\n' + HISTORY, license='MIT', author='Microsoft Corporation', author_email='azpycli@microsoft.com', url='https://github.com/Azure/azure-cli', zip_safe=False, classifiers=CLASSIFIERS, scripts=[ 'az', 'az.completion.sh', 'az.bat', ], packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests", "azure", "azure.cli"]), install_requires=DEPENDENCIES, python_requires='>=3.6.0', package_data={ 'azure.cli.command_modules.acr': ['*.json'], 'azure.cli.command_modules.botservice': ['*.json', '*.config'], 'azure.cli.command_modules.monitor.operations': ['autoscale-parameters-template.json'], 'azure.cli.command_modules.servicefabric': [ 'template/windows/template.json', 'template/windows/parameter.json', 'template/linux/template.json', 'template/linux/parameter.json', 'template/service/template.json', 'template/service/parameter.json' ], 'azure.cli.command_modules.appservice': [ 'resources/WindowsFunctionsStacks.json', 'resources/LinuxFunctionsStacks.json', 'resources/WebappRuntimeStacks.json', 'resources/GenerateRandomAppNames.json' ], 'azure.cli.command_modules.rdbms': [ 'randomname/adjectives.txt', 'randomname/nouns.txt' ] }, cmdclass=cmdclass )
34.044776
103
0.604998
from __future__ import print_function from codecs import open from setuptools import setup, find_packages import sys try: from azure_cli_bdist_wheel import cmdclass except ImportError: from distutils import log as logger logger.warn("Wheel is not available, disabling bdist_wheel hook") cmdclass = {} VERSION = "2.20.0" try: with open('azure/cli/__main__.py', 'r', encoding='utf-8') as f: content = f.read() except OSError: pass else: import re m = re.search(r'__version__\s*=\s*[\'"](.+?)[\'"]', content) if not m: print('Could not find __version__ in azure/cli/__main__.py') sys.exit(1) if m.group(1) != VERSION: print('Expected __version__ = "{}"; found "{}"'.format(VERSION, m.group(1))) sys.exit(1) CLASSIFIERS = [ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'Intended Audience :: System Administrators', 'Programming Language :: Python', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'License :: OSI Approved :: MIT License', ] DEPENDENCIES = [ 'antlr4-python3-runtime~=4.7.2', 'azure-appconfiguration~=1.1.1', 'azure-batch~=10.0.0', 'azure-cli-core=={}'.format(VERSION), 'azure-cosmos~=3.0,>=3.0.2', 'azure-datalake-store~=0.0.49', 'azure-functions-devops-build~=0.0.22', 'azure-graphrbac~=0.60.0', 'azure-keyvault~=1.1.0', 'azure-keyvault-administration==4.0.0b3', 'azure-mgmt-advisor>=2.0.1,<3.0.0', 'azure-mgmt-apimanagement~=0.2.0', 'azure-mgmt-applicationinsights~=0.1.1', 'azure-mgmt-appconfiguration~=1.0.1', 'azure-mgmt-authorization~=0.61.0', 'azure-mgmt-batch~=9.0.0', 'azure-mgmt-batchai~=2.0', 'azure-mgmt-billing==1.0.0', 'azure-mgmt-botservice~=0.3.0', 'azure-mgmt-cdn==7.0.0', 'azure-mgmt-cognitiveservices~=6.3.0', 'azure-mgmt-compute~=19.0.0', 'azure-mgmt-consumption~=2.0', 'azure-mgmt-containerinstance~=1.4', 'azure-mgmt-containerregistry==3.0.0rc17', 'azure-mgmt-cosmosdb~=3.0.0', 'azure-mgmt-containerservice~=9.4.0', 'azure-mgmt-databoxedge~=0.2.0', 'azure-mgmt-datalake-analytics~=0.2.1', 'azure-mgmt-datalake-store~=0.5.0', 'azure-mgmt-datamigration~=4.1.0', 'azure-mgmt-deploymentmanager~=0.2.0', 'azure-mgmt-devtestlabs~=4.0', 'azure-mgmt-dns~=2.1', 'azure-mgmt-eventgrid==3.0.0rc7', 'azure-mgmt-eventhub~=4.1.0', 'azure-mgmt-hdinsight~=2.2.0', 'azure-mgmt-imagebuilder~=0.4.0', 'azure-mgmt-iotcentral~=4.1.0', 'azure-mgmt-iothub~=0.12.0', 'azure-mgmt-iothubprovisioningservices~=0.2.0', 'azure-mgmt-keyvault==8.0.0', 'azure-mgmt-kusto~=0.3.0', 'azure-mgmt-loganalytics~=8.0.0', 'azure-mgmt-managedservices~=1.0', 'azure-mgmt-managementgroups~=0.1', 'azure-mgmt-maps~=0.1.0', 'azure-mgmt-marketplaceordering~=0.1', 'azure-mgmt-media~=3.0', 'azure-mgmt-monitor~=2.0.0', 'azure-mgmt-msi~=0.2', 'azure-mgmt-netapp~=0.16.0', 'azure-mgmt-network~=17.1.0', 'azure-mgmt-policyinsights~=0.5.0', 'azure-mgmt-privatedns~=0.1.0', 'azure-mgmt-rdbms~=3.1.0rc1', 'azure-mgmt-recoveryservices~=0.4.0', 'azure-mgmt-recoveryservicesbackup~=0.11.0', 'azure-mgmt-redhatopenshift==0.1.0', 'azure-mgmt-redis~=7.0.0rc1', 'azure-mgmt-relay~=0.1.0', 'azure-mgmt-reservations==0.6.0', 'azure-mgmt-resource==12.0.0', 'azure-mgmt-search~=8.0', 'azure-mgmt-security~=0.6.0', 'azure-mgmt-servicebus~=0.6.0', 'azure-mgmt-servicefabric~=0.5.0', 'azure-mgmt-signalr~=0.4.0', 'azure-mgmt-sql~=0.26.0', 'azure-mgmt-sqlvirtualmachine~=0.5.0', 'azure-mgmt-storage~=17.0.0', 'azure-mgmt-trafficmanager~=0.51.0', 'azure-mgmt-web~=0.48.0', 'azure-mgmt-synapse~=0.6.0', 'azure-multiapi-storage~=0.6.0', 'azure-loganalytics~=0.1.0', 'azure-storage-common~=1.4', 'azure-synapse-accesscontrol~=0.2.0', 'azure-synapse-artifacts~=0.3.0', 'azure-synapse-spark~=0.2.0', 'fabric~=2.4', 'jsmin~=2.2.2', 'pytz==2019.1', 'scp~=0.13.2', 'sshtunnel~=0.1.4', 'urllib3[secure]>=1.25.9,<2.0.0', 'vsts-cd-manager~=1.0.0,>=1.0.2', 'websocket-client~=0.56.0', 'xmltodict~=0.12', 'javaproperties==0.5.1', 'jsondiff==1.2.0', 'semver==2.13.0' ] TESTS_REQUIRE = [ 'mock~=4.0' ] with open('README.rst', 'r', encoding='utf-8') as f: README = f.read() with open('HISTORY.rst', 'r', encoding='utf-8') as f: HISTORY = f.read() setup( name='azure-cli', version=VERSION, description='Microsoft Azure Command-Line Tools', long_description=README + '\n\n' + HISTORY, license='MIT', author='Microsoft Corporation', author_email='azpycli@microsoft.com', url='https://github.com/Azure/azure-cli', zip_safe=False, classifiers=CLASSIFIERS, scripts=[ 'az', 'az.completion.sh', 'az.bat', ], packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests", "azure", "azure.cli"]), install_requires=DEPENDENCIES, python_requires='>=3.6.0', package_data={ 'azure.cli.command_modules.acr': ['*.json'], 'azure.cli.command_modules.botservice': ['*.json', '*.config'], 'azure.cli.command_modules.monitor.operations': ['autoscale-parameters-template.json'], 'azure.cli.command_modules.servicefabric': [ 'template/windows/template.json', 'template/windows/parameter.json', 'template/linux/template.json', 'template/linux/parameter.json', 'template/service/template.json', 'template/service/parameter.json' ], 'azure.cli.command_modules.appservice': [ 'resources/WindowsFunctionsStacks.json', 'resources/LinuxFunctionsStacks.json', 'resources/WebappRuntimeStacks.json', 'resources/GenerateRandomAppNames.json' ], 'azure.cli.command_modules.rdbms': [ 'randomname/adjectives.txt', 'randomname/nouns.txt' ] }, cmdclass=cmdclass )
true
true
f723f6f2ec0f62ea9f018768408724d303030146
1,583
py
Python
valid_subsequence.py
GerardCod/algoexpert-python
35d7f635f68e0d28eaead815f653bf749aa275cb
[ "Apache-2.0" ]
null
null
null
valid_subsequence.py
GerardCod/algoexpert-python
35d7f635f68e0d28eaead815f653bf749aa275cb
[ "Apache-2.0" ]
null
null
null
valid_subsequence.py
GerardCod/algoexpert-python
35d7f635f68e0d28eaead815f653bf749aa275cb
[ "Apache-2.0" ]
null
null
null
def validateSubSequence(array, sequence): """ ### Description validateSubSequence -> validates if a sequence of elements is a subsequence of a list. ### Parameters - array: the list where it will validate the subsequence. - sequence: the potential subsequence of elements ### Returns - True when the sequence is a valid subsequence of array. - False when the sequence is not a valid subsequence of array. """ arrIdx = 0 seqIdx = 0 while arrIdx < len(array) and seqIdx < len(sequence): if array[arrIdx] == sequence[seqIdx]: seqIdx += 1 arrIdx += 1 return seqIdx == len(sequence) def validateSubSequenceFor(array, sequence): """ ### Description validateSubSequence -> validates if a sequence of elements is a subsequence of a list. ### Parameters array: the list where it will validate the subsequence. sequence: the potential subsequence of elements ### Returns - True when the sequence is a valid subsequence of array. - False when the sequence is not a valid subsequence of array. """ seqIdx = 0 for element in array: if seqIdx == len(sequence): break if element == sequence[seqIdx]: seqIdx += 1 return seqIdx == len(sequence) if __name__ == "__main__": print(validateSubSequence([5, 1, 22, 25, 6, -1, 8, 10], [1, 6, -1, 10])) print(validateSubSequenceFor([5, 1, 22, 25, 6, -1, 8, 10], [1, 6, -1, 10]))
29.867925
94
0.598863
def validateSubSequence(array, sequence): arrIdx = 0 seqIdx = 0 while arrIdx < len(array) and seqIdx < len(sequence): if array[arrIdx] == sequence[seqIdx]: seqIdx += 1 arrIdx += 1 return seqIdx == len(sequence) def validateSubSequenceFor(array, sequence): seqIdx = 0 for element in array: if seqIdx == len(sequence): break if element == sequence[seqIdx]: seqIdx += 1 return seqIdx == len(sequence) if __name__ == "__main__": print(validateSubSequence([5, 1, 22, 25, 6, -1, 8, 10], [1, 6, -1, 10])) print(validateSubSequenceFor([5, 1, 22, 25, 6, -1, 8, 10], [1, 6, -1, 10]))
true
true
f723f81e9a3f4e12dffa9c9a8c28e3e333ab9a3c
3,092
py
Python
fhirbug/Fhir/Resources/address.py
VerdantAI/fhirbug
8a8e2555c0edfeee0a7edbc8d67f2fcb2edd3c2d
[ "MIT" ]
8
2019-01-06T18:11:20.000Z
2022-02-24T02:06:55.000Z
fhirbug/Fhir/Resources/address.py
VerdantAI/fhirbug
8a8e2555c0edfeee0a7edbc8d67f2fcb2edd3c2d
[ "MIT" ]
5
2019-01-25T14:15:35.000Z
2021-06-01T23:22:41.000Z
fhirbug/Fhir/Resources/address.py
VerdantAI/fhirbug
8a8e2555c0edfeee0a7edbc8d67f2fcb2edd3c2d
[ "MIT" ]
3
2020-10-14T23:09:29.000Z
2021-08-09T19:27:31.000Z
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Generated from FHIR 4.0.0-a53ec6ee1b (http://hl7.org/fhir/StructureDefinition/Address) on 2019-01-25. # 2019, SMART Health IT. ## from . import element class Address(element.Element): """ An address expressed using postal conventions (as opposed to GPS or other location definition formats). An address expressed using postal conventions (as opposed to GPS or other location definition formats). This data type may be used to convey addresses for use in delivering mail as well as for visiting locations which might not be valid for mail delivery. There are a variety of postal address formats defined around the world. """ resource_type = "Address" def __init__(self, jsondict=None, strict=True, **kwargs): """ Initialize all valid properties. :raises: FHIRValidationError on validation errors, unless strict is False :param dict jsondict: A JSON dictionary to use for initialization :param bool strict: If True (the default), invalid variables will raise a TypeError """ self.city = None """ Name of city, town etc.. Type `str`. """ self.country = None """ Country (e.g. can be ISO 3166 2 or 3 letter code). Type `str`. """ self.district = None """ District name (aka county). Type `str`. """ self.line = None """ Street name, number, direction & P.O. Box etc.. List of `str` items. """ self.period = None """ Time period when address was/is in use. Type `Period` (represented as `dict` in JSON). """ self.postalCode = None """ Postal code for area. Type `str`. """ self.state = None """ Sub-unit of country (abbreviations ok). Type `str`. """ self.text = None """ Text representation of the address. Type `str`. """ self.type = None """ postal | physical | both. Type `str`. """ self.use = None """ home | work | temp | old | billing - purpose of this address. Type `str`. """ super(Address, self).__init__(jsondict=jsondict, strict=strict, **kwargs) def elementProperties(self): js = super(Address, self).elementProperties() js.extend([ ("city", "city", str, False, None, False), ("country", "country", str, False, None, False), ("district", "district", str, False, None, False), ("line", "line", str, True, None, False), ("period", "period", period.Period, False, None, False), ("postalCode", "postalCode", str, False, None, False), ("state", "state", str, False, None, False), ("text", "text", str, False, None, False), ("type", "type", str, False, None, False), ("use", "use", str, False, None, False), ]) return js import sys try: from . import period except ImportError: period = sys.modules[__package__ + '.period']
32.547368
104
0.583441
from . import element class Address(element.Element): resource_type = "Address" def __init__(self, jsondict=None, strict=True, **kwargs): self.city = None self.country = None self.district = None self.line = None self.period = None self.postalCode = None self.state = None self.text = None self.type = None self.use = None super(Address, self).__init__(jsondict=jsondict, strict=strict, **kwargs) def elementProperties(self): js = super(Address, self).elementProperties() js.extend([ ("city", "city", str, False, None, False), ("country", "country", str, False, None, False), ("district", "district", str, False, None, False), ("line", "line", str, True, None, False), ("period", "period", period.Period, False, None, False), ("postalCode", "postalCode", str, False, None, False), ("state", "state", str, False, None, False), ("text", "text", str, False, None, False), ("type", "type", str, False, None, False), ("use", "use", str, False, None, False), ]) return js import sys try: from . import period except ImportError: period = sys.modules[__package__ + '.period']
true
true
f723f8a8d4760f751862af7dd8140ab1cad4f937
755
py
Python
setup.py
salesforce/bite
0619bc6d87b81ec65cf311906da3889043176ead
[ "BSD-3-Clause" ]
6
2020-12-09T01:57:13.000Z
2021-10-09T01:50:21.000Z
setup.py
salesforce/bite
0619bc6d87b81ec65cf311906da3889043176ead
[ "BSD-3-Clause" ]
1
2021-02-16T14:50:09.000Z
2021-02-23T07:29:15.000Z
setup.py
salesforce/bite
0619bc6d87b81ec65cf311906da3889043176ead
[ "BSD-3-Clause" ]
null
null
null
from setuptools import setup with open("README.md", "r") as fh: long_description = fh.read() setuptools.setup( name="bite", version="0.1", author="Samson Tan", author_email="samson.tan@salesforce.com", description="A tokenizer that splits words into bases and inflections.", long_description=long_description, long_description_content_type="text/markdown", url="https://github.com/salesforce/bite", package_dir={"": "src"}, packages=setuptools.find_packages("src"), classifiers=[ "Programming Language :: Python :: 3", #"License :: OSI Approved :: BSD License", "Development Status :: 3 - Alpha", "Operating System :: OS Independent", ], python_requires='>=3.6', )
30.2
76
0.65298
from setuptools import setup with open("README.md", "r") as fh: long_description = fh.read() setuptools.setup( name="bite", version="0.1", author="Samson Tan", author_email="samson.tan@salesforce.com", description="A tokenizer that splits words into bases and inflections.", long_description=long_description, long_description_content_type="text/markdown", url="https://github.com/salesforce/bite", package_dir={"": "src"}, packages=setuptools.find_packages("src"), classifiers=[ "Programming Language :: Python :: 3", "Development Status :: 3 - Alpha", "Operating System :: OS Independent", ], python_requires='>=3.6', )
true
true
f723f8c5703335b1a0fa5a181861df8ecc2a13f8
6,240
py
Python
python/istio_api/mixer/v1/config/client/service_pb2.py
mt-inside/api
3197d4dee332beb55f830899f37091c9899833f9
[ "Apache-2.0" ]
3
2020-11-30T15:35:37.000Z
2022-01-06T14:17:18.000Z
python/istio_api/mixer/v1/config/client/service_pb2.py
mt-inside/api
3197d4dee332beb55f830899f37091c9899833f9
[ "Apache-2.0" ]
54
2020-06-23T17:34:04.000Z
2022-03-31T02:04:06.000Z
python/istio_api/mixer/v1/config/client/service_pb2.py
mt-inside/api
3197d4dee332beb55f830899f37091c9899833f9
[ "Apache-2.0" ]
12
2020-07-14T23:59:57.000Z
2022-03-22T09:59:18.000Z
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: mixer/v1/config/client/service.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from gogoproto import gogo_pb2 as gogoproto_dot_gogo__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='mixer/v1/config/client/service.proto', package='istio.mixer.v1.config.client', syntax='proto3', serialized_options=_b('Z#istio.io/api/mixer/v1/config/client\310\341\036\000\250\342\036\000\360\341\036\000\330\342\036\001'), serialized_pb=_b('\n$mixer/v1/config/client/service.proto\x12\x1cistio.mixer.v1.config.client\x1a\x14gogoproto/gogo.proto\"\xc7\x01\n\x0cIstioService\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x11\n\tnamespace\x18\x02 \x01(\t\x12\x0e\n\x06\x64omain\x18\x03 \x01(\t\x12\x0f\n\x07service\x18\x04 \x01(\t\x12\x46\n\x06labels\x18\x05 \x03(\x0b\x32\x36.istio.mixer.v1.config.client.IstioService.LabelsEntry\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\x35Z#istio.io/api/mixer/v1/config/client\xc8\xe1\x1e\x00\xa8\xe2\x1e\x00\xf0\xe1\x1e\x00\xd8\xe2\x1e\x01\x62\x06proto3') , dependencies=[gogoproto_dot_gogo__pb2.DESCRIPTOR,]) _ISTIOSERVICE_LABELSENTRY = _descriptor.Descriptor( name='LabelsEntry', full_name='istio.mixer.v1.config.client.IstioService.LabelsEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='istio.mixer.v1.config.client.IstioService.LabelsEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='istio.mixer.v1.config.client.IstioService.LabelsEntry.value', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=_b('8\001'), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=247, serialized_end=292, ) _ISTIOSERVICE = _descriptor.Descriptor( name='IstioService', full_name='istio.mixer.v1.config.client.IstioService', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='name', full_name='istio.mixer.v1.config.client.IstioService.name', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='namespace', full_name='istio.mixer.v1.config.client.IstioService.namespace', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='domain', full_name='istio.mixer.v1.config.client.IstioService.domain', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='service', full_name='istio.mixer.v1.config.client.IstioService.service', index=3, number=4, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='labels', full_name='istio.mixer.v1.config.client.IstioService.labels', index=4, number=5, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[_ISTIOSERVICE_LABELSENTRY, ], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=93, serialized_end=292, ) _ISTIOSERVICE_LABELSENTRY.containing_type = _ISTIOSERVICE _ISTIOSERVICE.fields_by_name['labels'].message_type = _ISTIOSERVICE_LABELSENTRY DESCRIPTOR.message_types_by_name['IstioService'] = _ISTIOSERVICE _sym_db.RegisterFileDescriptor(DESCRIPTOR) IstioService = _reflection.GeneratedProtocolMessageType('IstioService', (_message.Message,), { 'LabelsEntry' : _reflection.GeneratedProtocolMessageType('LabelsEntry', (_message.Message,), { 'DESCRIPTOR' : _ISTIOSERVICE_LABELSENTRY, '__module__' : 'mixer.v1.config.client.service_pb2' # @@protoc_insertion_point(class_scope:istio.mixer.v1.config.client.IstioService.LabelsEntry) }) , 'DESCRIPTOR' : _ISTIOSERVICE, '__module__' : 'mixer.v1.config.client.service_pb2' # @@protoc_insertion_point(class_scope:istio.mixer.v1.config.client.IstioService) }) _sym_db.RegisterMessage(IstioService) _sym_db.RegisterMessage(IstioService.LabelsEntry) DESCRIPTOR._options = None _ISTIOSERVICE_LABELSENTRY._options = None # @@protoc_insertion_point(module_scope)
41.6
624
0.747276
import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database _sym_db = _symbol_database.Default() from gogoproto import gogo_pb2 as gogoproto_dot_gogo__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='mixer/v1/config/client/service.proto', package='istio.mixer.v1.config.client', syntax='proto3', serialized_options=_b('Z#istio.io/api/mixer/v1/config/client\310\341\036\000\250\342\036\000\360\341\036\000\330\342\036\001'), serialized_pb=_b('\n$mixer/v1/config/client/service.proto\x12\x1cistio.mixer.v1.config.client\x1a\x14gogoproto/gogo.proto\"\xc7\x01\n\x0cIstioService\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x11\n\tnamespace\x18\x02 \x01(\t\x12\x0e\n\x06\x64omain\x18\x03 \x01(\t\x12\x0f\n\x07service\x18\x04 \x01(\t\x12\x46\n\x06labels\x18\x05 \x03(\x0b\x32\x36.istio.mixer.v1.config.client.IstioService.LabelsEntry\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\x35Z#istio.io/api/mixer/v1/config/client\xc8\xe1\x1e\x00\xa8\xe2\x1e\x00\xf0\xe1\x1e\x00\xd8\xe2\x1e\x01\x62\x06proto3') , dependencies=[gogoproto_dot_gogo__pb2.DESCRIPTOR,]) _ISTIOSERVICE_LABELSENTRY = _descriptor.Descriptor( name='LabelsEntry', full_name='istio.mixer.v1.config.client.IstioService.LabelsEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='istio.mixer.v1.config.client.IstioService.LabelsEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='istio.mixer.v1.config.client.IstioService.LabelsEntry.value', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=_b('8\001'), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=247, serialized_end=292, ) _ISTIOSERVICE = _descriptor.Descriptor( name='IstioService', full_name='istio.mixer.v1.config.client.IstioService', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='name', full_name='istio.mixer.v1.config.client.IstioService.name', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='namespace', full_name='istio.mixer.v1.config.client.IstioService.namespace', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='domain', full_name='istio.mixer.v1.config.client.IstioService.domain', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='service', full_name='istio.mixer.v1.config.client.IstioService.service', index=3, number=4, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='labels', full_name='istio.mixer.v1.config.client.IstioService.labels', index=4, number=5, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[_ISTIOSERVICE_LABELSENTRY, ], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=93, serialized_end=292, ) _ISTIOSERVICE_LABELSENTRY.containing_type = _ISTIOSERVICE _ISTIOSERVICE.fields_by_name['labels'].message_type = _ISTIOSERVICE_LABELSENTRY DESCRIPTOR.message_types_by_name['IstioService'] = _ISTIOSERVICE _sym_db.RegisterFileDescriptor(DESCRIPTOR) IstioService = _reflection.GeneratedProtocolMessageType('IstioService', (_message.Message,), { 'LabelsEntry' : _reflection.GeneratedProtocolMessageType('LabelsEntry', (_message.Message,), { 'DESCRIPTOR' : _ISTIOSERVICE_LABELSENTRY, '__module__' : 'mixer.v1.config.client.service_pb2' # @@protoc_insertion_point(class_scope:istio.mixer.v1.config.client.IstioService.LabelsEntry) }) , 'DESCRIPTOR' : _ISTIOSERVICE, '__module__' : 'mixer.v1.config.client.service_pb2' # @@protoc_insertion_point(class_scope:istio.mixer.v1.config.client.IstioService) }) _sym_db.RegisterMessage(IstioService) _sym_db.RegisterMessage(IstioService.LabelsEntry) DESCRIPTOR._options = None _ISTIOSERVICE_LABELSENTRY._options = None # @@protoc_insertion_point(module_scope)
true
true
f723fa96b4b622359c95f9d0ad4cdd27364d7401
3,267
py
Python
tests/test_paper_collection.py
h1-the-swan/paper_collection
f07ad5cd8c40ddd75df2031b15c49eee60f1d914
[ "MIT" ]
null
null
null
tests/test_paper_collection.py
h1-the-swan/paper_collection
f07ad5cd8c40ddd75df2031b15c49eee60f1d914
[ "MIT" ]
2
2020-03-31T11:20:29.000Z
2020-03-31T15:20:21.000Z
tests/test_paper_collection.py
h1-the-swan/paper_collection
f07ad5cd8c40ddd75df2031b15c49eee60f1d914
[ "MIT" ]
null
null
null
#!/usr/bin/env python """Tests for `paper_collection` package.""" import unittest from paper_collection import paper_collection import pandas as pd import numpy as np class TestPaper_collection(unittest.TestCase): """Tests for `paper_collection` package.""" def setUp(self): """Set up test fixtures, if any.""" self.df_papers = pd.read_csv('tests/jw_papers_mag2019.tsv', sep='\t') self.df_papers.drop_duplicates(subset=['PaperId'], inplace=True) self.num_papers = len(self.df_papers) self.df_citations = pd.read_csv('tests/jw_citations_mag2019.tsv', sep='\t') self.num_citations = len(self.df_citations) self.df_authors = pd.read_csv('tests/jw_PaperAuthorAffiliations_mag2019.tsv', sep='\t') self.authors_by_paper = self.get_authors_by_paper(self.df_authors) def tearDown(self): """Tear down test fixtures, if any.""" def get_authors_by_paper(self, df_authors): """Get a dictionary mapping paper_id to author data """ author_data = {} for paper_id, group in df_authors.groupby('PaperId'): group = group.sort_values('AuthorSequenceNumber') this_authors = [] for _, row in group.iterrows(): this_authors.append({'name': row.OriginalAuthor, 'author_id': row.AuthorId}) author_data[paper_id] = this_authors return author_data def load_paper(self, prow): paper_id = prow.PaperId authors = self.authors_by_paper[paper_id] return paper_collection.Paper(dataset='mag', dataset_version='mag-2019-11-22', paper_id=paper_id, title=prow.PaperTitle, display_title=prow.OriginalTitle, doi=prow.Doi, pub_date=prow.Date, year=prow.Year, venue=prow.OriginalVenue, authors=authors, node_rank=prow.flow) def test_000_single_paper(self): """Load a single paper""" prow = self.df_papers.iloc[0] p = self.load_paper(prow) assert p.display_title is not None assert len(p.display_title) def test_001_collection(self): """Load a collection""" coll = paper_collection.PaperCollection(description="Paper Collection") for _, prow in self.df_papers.iterrows(): p = self.load_paper(prow) coll.papers.append(p) assert len(coll) == self.num_papers def test_002_graph(self): """Construct graph""" coll = paper_collection.PaperCollection(description="Paper Collection") for _, prow in self.df_papers.iterrows(): p = self.load_paper(prow) coll.papers.append(p) for _, row in self.df_citations.iterrows(): coll.citations.append((row.PaperId, row.PaperReferenceId)) G = coll.construct_graph() assert G.number_of_nodes() == self.num_papers assert G.number_of_edges() == self.num_citations
38.435294
95
0.585859
import unittest from paper_collection import paper_collection import pandas as pd import numpy as np class TestPaper_collection(unittest.TestCase): def setUp(self): self.df_papers = pd.read_csv('tests/jw_papers_mag2019.tsv', sep='\t') self.df_papers.drop_duplicates(subset=['PaperId'], inplace=True) self.num_papers = len(self.df_papers) self.df_citations = pd.read_csv('tests/jw_citations_mag2019.tsv', sep='\t') self.num_citations = len(self.df_citations) self.df_authors = pd.read_csv('tests/jw_PaperAuthorAffiliations_mag2019.tsv', sep='\t') self.authors_by_paper = self.get_authors_by_paper(self.df_authors) def tearDown(self): def get_authors_by_paper(self, df_authors): author_data = {} for paper_id, group in df_authors.groupby('PaperId'): group = group.sort_values('AuthorSequenceNumber') this_authors = [] for _, row in group.iterrows(): this_authors.append({'name': row.OriginalAuthor, 'author_id': row.AuthorId}) author_data[paper_id] = this_authors return author_data def load_paper(self, prow): paper_id = prow.PaperId authors = self.authors_by_paper[paper_id] return paper_collection.Paper(dataset='mag', dataset_version='mag-2019-11-22', paper_id=paper_id, title=prow.PaperTitle, display_title=prow.OriginalTitle, doi=prow.Doi, pub_date=prow.Date, year=prow.Year, venue=prow.OriginalVenue, authors=authors, node_rank=prow.flow) def test_000_single_paper(self): prow = self.df_papers.iloc[0] p = self.load_paper(prow) assert p.display_title is not None assert len(p.display_title) def test_001_collection(self): coll = paper_collection.PaperCollection(description="Paper Collection") for _, prow in self.df_papers.iterrows(): p = self.load_paper(prow) coll.papers.append(p) assert len(coll) == self.num_papers def test_002_graph(self): coll = paper_collection.PaperCollection(description="Paper Collection") for _, prow in self.df_papers.iterrows(): p = self.load_paper(prow) coll.papers.append(p) for _, row in self.df_citations.iterrows(): coll.citations.append((row.PaperId, row.PaperReferenceId)) G = coll.construct_graph() assert G.number_of_nodes() == self.num_papers assert G.number_of_edges() == self.num_citations
true
true
f723faadd5d49357ab40cc0e11d377652ed79a23
541
py
Python
scripts/idc/sortsymbols.py
camden314/CacaoSDK
57b7e0654595eb7a432ef1faec9b239a3854cf45
[ "MIT" ]
4
2021-01-26T10:00:43.000Z
2021-08-06T21:35:15.000Z
scripts/idc/sortsymbols.py
camden314/CacaoSDK
57b7e0654595eb7a432ef1faec9b239a3854cf45
[ "MIT" ]
2
2021-07-26T01:55:43.000Z
2021-07-26T17:42:20.000Z
scripts/idc/sortsymbols.py
camden314/CacaoSDK
57b7e0654595eb7a432ef1faec9b239a3854cf45
[ "MIT" ]
1
2021-06-01T17:40:01.000Z
2021-06-01T17:40:01.000Z
import re l = [] with open("functionsthunk.txt", "r") as f: s = f.readlines() for k, m in zip(s[0::2], s[1::2]): if 'non-virtual' not in k: l.append((k, m)) def sfun(a): m = re.search(r"(?:non-virtual thunk to )?(.+?\(.*\)(?: const)?\n.+\n\n)", a) print(a) return m.group(1) with open("functions.txt", 'w') as f: def sfun(a): m = re.search(r"(?:non-virtual thunk to )?(.+?)\(.*\)(?: const)?", a[0]) print(a) return m.group(1) l.sort(key=sfun) f.write("".join([k + m for k, m in l]))
24.590909
81
0.502773
import re l = [] with open("functionsthunk.txt", "r") as f: s = f.readlines() for k, m in zip(s[0::2], s[1::2]): if 'non-virtual' not in k: l.append((k, m)) def sfun(a): m = re.search(r"(?:non-virtual thunk to )?(.+?\(.*\)(?: const)?\n.+\n\n)", a) print(a) return m.group(1) with open("functions.txt", 'w') as f: def sfun(a): m = re.search(r"(?:non-virtual thunk to )?(.+?)\(.*\)(?: const)?", a[0]) print(a) return m.group(1) l.sort(key=sfun) f.write("".join([k + m for k, m in l]))
true
true
f723fbc0745a9cea8325676af39c388286cf6b75
10,985
py
Python
search/skip_list_search.py
AstiaSun/Search-Engine
2c04031f1c21d7ea78e6ce61a53349d538905cbe
[ "MIT" ]
null
null
null
search/skip_list_search.py
AstiaSun/Search-Engine
2c04031f1c21d7ea78e6ce61a53349d538905cbe
[ "MIT" ]
null
null
null
search/skip_list_search.py
AstiaSun/Search-Engine
2c04031f1c21d7ea78e6ce61a53349d538905cbe
[ "MIT" ]
null
null
null
from dataclasses import dataclass from enum import Enum from typing import Optional from common.constants import PATH_TO_LIST_OF_FILES, SPLIT OPERATION_CODES = Enum('OPERATION_CODES', 'AND OR NOT') ALL = '*' @dataclass class DocumentNode: """ Data structure used to implement skip list. Contains a reference to next node. If the next None is None - the next node is the following in the list. Structure: <document_id, reference_to_next_node> """ id: int # For memory optimisation None is used. # if field is None the next index is considered to be the # following item in the list next_id_index: Optional[int] def __eq__(self, other): return self.id == other.id def __lt__(self, other): return self.id < other.id def __le__(self, other): return self.id <= other.id def __gt__(self, other): return self.id > other.id def __ge__(self, other): return self.id >= other.id def __str__(self): return f'<{self.id, self.next_id_index}>' class DocumentSkipList: """ Proposed structure of skip list: 4 7 ___________________________ _____________________ | | | | | V | V token->(doc_id1->doc_id2->doc_id3->doc_id4->doc_id5->doc_id6->doc_id7->doc_id8) This data structure accelerates the search of common documents in the dictionary. """ def __init__(self, doc_ids=None): self.doc_ids = list() # every 4 of 5 items in list are skipped self.skip_number = 5 self.last_not_skipped_node = 0 self.add_list(doc_ids) def add_list(self, doc_ids: list) -> None: """ Append a list of items to the skip list :param doc_ids: array of document ids """ if doc_ids is None: return for doc_id in doc_ids: self.add(int(doc_id)) def add(self, doc_id: int) -> None: """ Appends the document id to the list of documents. if a new node is N-th skipped one, then the last node which has been marked as not skipped is assigned with a reference to the new node. The new node is marked as last not skipped. :param doc_id: id of the document """ next_index = len(self.doc_ids) if self.last_not_skipped_node + self.skip_number == next_index: self.doc_ids[self.last_not_skipped_node].next_id_index = next_index self.last_not_skipped_node = next_index self.doc_ids.append(DocumentNode(doc_id, None)) def skip_until_ge(self, index: int, other_value) -> int: """ Skips nodes in skip list until the value of node is greater then or equal to the provided value [other_value] :param index: current index of list :param other_value: value to compare with :return: index of node in list which value is greater then of equal to the provided value [other_value] """ while index < len(self) and self[index] < other_value: next_index = self[index].next_id_index \ if self[index].next_id_index else index + 1 while next_index < len(self) and self[next_index] < other_value: next_index = self[next_index].next_id_index \ if self[next_index].next_id_index else next_index + 1 index = next_index return index def to_str(self) -> str: """ returns string representation of list with documents ids only """ return ','.join([str(node.id) for node in self.doc_ids]) def to_list(self) -> list: return [node.id for node in self.doc_ids] def __iter__(self): return iter(self.doc_ids) def __len__(self): return len(self.doc_ids) def __getitem__(self, item): return self.doc_ids[item] def __setitem__(self, key, value): self.doc_ids[key] = value def __str__(self): return str(self.doc_ids) class SearchDictionary: def __init__(self, inverted_index: dict, file_dictionary: str = PATH_TO_LIST_OF_FILES): def get_all_file_ids() -> DocumentSkipList: with open(file_dictionary) as file: result = [int(line.split(SPLIT)[1].strip()) for line in file] return DocumentSkipList(result) self.inverted_index = inverted_index assert ALL not in self.inverted_index self.inverted_index[ALL] = get_all_file_ids() @staticmethod def _intersect(t1: DocumentSkipList, t2: DocumentSkipList ) -> DocumentSkipList: """ AND operation Algorithm: While the end of one of the document lists is not found: 1. while t1[i] == t2[j] -> append to results 2. while t1[i] > t2[j] -> i = succ(i) 3. while t1[i] < t2[j] -> j = succ(j) :param t1: list of document ids where the first token is present :param t2: list of document ids where the second token is present :return: list of documents where both tokens are present """ def is_not_end() -> bool: return i < len(t1) and j < len(t2) result = DocumentSkipList() i, j = 0, 0 while is_not_end(): while is_not_end() and t1[i] == t2[j]: result.add(t1[i].id) i += 1 j += 1 if j < len(t2): i = t1.skip_until_ge(i, t2[j]) if i < len(t1): j = t2.skip_until_ge(j, t1[i]) return result @staticmethod def _concatenate(t1: DocumentSkipList, t2: DocumentSkipList ) -> DocumentSkipList: """ OR operation Algorithm: While the end of one of the document lists is not found: 1. while t1[i] == t2[j] -> append to results 2. while t1[i] > t2[j] -> i = succ(i) 3. append to result list values in t1 which are less then current value in t2 4. while t1[i] < t2[j] -> j = succ(j) 5. append to result list values in t2 which are less then a current value in t1 :param t1: list of document ids where the first token is present :param t2: list of document ids where the second token is present :return: list of documents where either one of tokens is present present """ def is_not_end() -> bool: return i < len(t1) and j < len(t2) result = DocumentSkipList() i, j = 0, 0 while is_not_end(): while is_not_end() and t1[i] == t2[j]: result.add(t1[i].id) i += 1 j += 1 if j < len(t2): next_i = t1.skip_until_ge(i, t2[j]) result.add_list([node.id for node in t1[i:next_i]]) i = next_i if i < len(t1): next_j = t2.skip_until_ge(j, t1[i]) result.add_list([node.id for node in t2[j:next_j]]) j = next_j return result def exclude(self, document_list: DocumentSkipList, args ) -> DocumentSkipList: """ Find documents where the provided token is not met :param document_list: list of documents where the token is met :param args: spike solution :return: list of documents where the token is not present """ documents_to_exclude = [node.id for node in document_list] result = DocumentSkipList() for doc_id in self.inverted_index[ALL]: if doc_id.id not in documents_to_exclude: result.add(doc_id.id) return result # idea: improve search in inverted index, current complexity - O(n) def get_ids(self, token) -> Optional[DocumentSkipList]: """ :param token: token is represented as a ley in the inverted index :return: list of documents where the provided token is met """ try: return self.inverted_index[token] except KeyError: pass def process_operation(self, operator: str, t1: DocumentSkipList, t2: DocumentSkipList = None) -> DocumentSkipList: """ :param operator: operation between two lists or an operation done on a single list :param t1: first list of documents :param t2: second list of documents. If None, extraction is done. :return: result of operation between the lists of documents """ try: options = { OPERATION_CODES.AND: self._intersect, OPERATION_CODES.OR: self._concatenate, OPERATION_CODES.NOT: self.exclude } return options[operator](t1, t2) except KeyError: raise NotImplementedError( f'Operator "{operator}" is not supported') except TypeError as e: print(operator, t1, t2) raise TypeError(e) def _search_not_null_query(self, notation: list): def pop_last_result() -> DocumentSkipList: """ if an item on the top of the stack is a token, find a list of :return: skip list of document ids """ last_token = stack.pop() if isinstance(last_token, str): last_token = self.get_ids(last_token) return last_token def process_operation_and_put_result_to_stack(): if token == OPERATION_CODES.NOT: last_result = pop_last_result() stack.append(self.process_operation(token, last_result)) else: t1, t2 = pop_last_result(), pop_last_result() if t1 and t2: stack.append(self.process_operation(token, t1, t2)) elif t1: stack.append(t1) elif t2: stack.append(t2) stack = list() for token in notation: if isinstance(token, OPERATION_CODES): process_operation_and_put_result_to_stack() else: stack.append(token) if len(stack) > 1: raise AttributeError(f'"{notation}" is incorrect or there is ' f'a bug in the algorythm implementation.\n' f'Stack is not empty at the end: f{stack}') return pop_last_result().to_list() def search(self, notation: list) -> list: if len(notation) == 0 or notation is None: return self.inverted_index[ALL].to_list() return self._search_not_null_query(notation)
35.665584
83
0.570505
from dataclasses import dataclass from enum import Enum from typing import Optional from common.constants import PATH_TO_LIST_OF_FILES, SPLIT OPERATION_CODES = Enum('OPERATION_CODES', 'AND OR NOT') ALL = '*' @dataclass class DocumentNode: id: int next_id_index: Optional[int] def __eq__(self, other): return self.id == other.id def __lt__(self, other): return self.id < other.id def __le__(self, other): return self.id <= other.id def __gt__(self, other): return self.id > other.id def __ge__(self, other): return self.id >= other.id def __str__(self): return f'<{self.id, self.next_id_index}>' class DocumentSkipList: def __init__(self, doc_ids=None): self.doc_ids = list() self.skip_number = 5 self.last_not_skipped_node = 0 self.add_list(doc_ids) def add_list(self, doc_ids: list) -> None: if doc_ids is None: return for doc_id in doc_ids: self.add(int(doc_id)) def add(self, doc_id: int) -> None: next_index = len(self.doc_ids) if self.last_not_skipped_node + self.skip_number == next_index: self.doc_ids[self.last_not_skipped_node].next_id_index = next_index self.last_not_skipped_node = next_index self.doc_ids.append(DocumentNode(doc_id, None)) def skip_until_ge(self, index: int, other_value) -> int: while index < len(self) and self[index] < other_value: next_index = self[index].next_id_index \ if self[index].next_id_index else index + 1 while next_index < len(self) and self[next_index] < other_value: next_index = self[next_index].next_id_index \ if self[next_index].next_id_index else next_index + 1 index = next_index return index def to_str(self) -> str: return ','.join([str(node.id) for node in self.doc_ids]) def to_list(self) -> list: return [node.id for node in self.doc_ids] def __iter__(self): return iter(self.doc_ids) def __len__(self): return len(self.doc_ids) def __getitem__(self, item): return self.doc_ids[item] def __setitem__(self, key, value): self.doc_ids[key] = value def __str__(self): return str(self.doc_ids) class SearchDictionary: def __init__(self, inverted_index: dict, file_dictionary: str = PATH_TO_LIST_OF_FILES): def get_all_file_ids() -> DocumentSkipList: with open(file_dictionary) as file: result = [int(line.split(SPLIT)[1].strip()) for line in file] return DocumentSkipList(result) self.inverted_index = inverted_index assert ALL not in self.inverted_index self.inverted_index[ALL] = get_all_file_ids() @staticmethod def _intersect(t1: DocumentSkipList, t2: DocumentSkipList ) -> DocumentSkipList: def is_not_end() -> bool: return i < len(t1) and j < len(t2) result = DocumentSkipList() i, j = 0, 0 while is_not_end(): while is_not_end() and t1[i] == t2[j]: result.add(t1[i].id) i += 1 j += 1 if j < len(t2): i = t1.skip_until_ge(i, t2[j]) if i < len(t1): j = t2.skip_until_ge(j, t1[i]) return result @staticmethod def _concatenate(t1: DocumentSkipList, t2: DocumentSkipList ) -> DocumentSkipList: def is_not_end() -> bool: return i < len(t1) and j < len(t2) result = DocumentSkipList() i, j = 0, 0 while is_not_end(): while is_not_end() and t1[i] == t2[j]: result.add(t1[i].id) i += 1 j += 1 if j < len(t2): next_i = t1.skip_until_ge(i, t2[j]) result.add_list([node.id for node in t1[i:next_i]]) i = next_i if i < len(t1): next_j = t2.skip_until_ge(j, t1[i]) result.add_list([node.id for node in t2[j:next_j]]) j = next_j return result def exclude(self, document_list: DocumentSkipList, args ) -> DocumentSkipList: documents_to_exclude = [node.id for node in document_list] result = DocumentSkipList() for doc_id in self.inverted_index[ALL]: if doc_id.id not in documents_to_exclude: result.add(doc_id.id) return result def get_ids(self, token) -> Optional[DocumentSkipList]: try: return self.inverted_index[token] except KeyError: pass def process_operation(self, operator: str, t1: DocumentSkipList, t2: DocumentSkipList = None) -> DocumentSkipList: try: options = { OPERATION_CODES.AND: self._intersect, OPERATION_CODES.OR: self._concatenate, OPERATION_CODES.NOT: self.exclude } return options[operator](t1, t2) except KeyError: raise NotImplementedError( f'Operator "{operator}" is not supported') except TypeError as e: print(operator, t1, t2) raise TypeError(e) def _search_not_null_query(self, notation: list): def pop_last_result() -> DocumentSkipList: last_token = stack.pop() if isinstance(last_token, str): last_token = self.get_ids(last_token) return last_token def process_operation_and_put_result_to_stack(): if token == OPERATION_CODES.NOT: last_result = pop_last_result() stack.append(self.process_operation(token, last_result)) else: t1, t2 = pop_last_result(), pop_last_result() if t1 and t2: stack.append(self.process_operation(token, t1, t2)) elif t1: stack.append(t1) elif t2: stack.append(t2) stack = list() for token in notation: if isinstance(token, OPERATION_CODES): process_operation_and_put_result_to_stack() else: stack.append(token) if len(stack) > 1: raise AttributeError(f'"{notation}" is incorrect or there is ' f'a bug in the algorythm implementation.\n' f'Stack is not empty at the end: f{stack}') return pop_last_result().to_list() def search(self, notation: list) -> list: if len(notation) == 0 or notation is None: return self.inverted_index[ALL].to_list() return self._search_not_null_query(notation)
true
true
f723fc45ed2a21c26e65c590692824efb0148cf6
395
py
Python
backend/main.py
SwiftWinds/LAHacks
2bfeb7e073e6624ab5bd91ce9feceb3eab7965e7
[ "MIT" ]
1
2020-03-29T06:10:04.000Z
2020-03-29T06:10:04.000Z
backend/main.py
SwiftWinds/LAHacks
2bfeb7e073e6624ab5bd91ce9feceb3eab7965e7
[ "MIT" ]
8
2020-03-29T01:47:47.000Z
2022-02-27T12:43:08.000Z
backend/main.py
SwiftWinds/LAHacks
2bfeb7e073e6624ab5bd91ce9feceb3eab7965e7
[ "MIT" ]
null
null
null
from flask import Flask, session, request app = Flask(__name__) @app.route('/upload', methods=['GET', 'POST']) def hello_world(): if request.method == 'POST': session['audio_data'] = request.form['audio_data'] print(session['audio_data']) # abc = vars(request) # for i in abc: # print(i) return "Uploaded Audio" return 'Hello, World!'
30.384615
58
0.594937
from flask import Flask, session, request app = Flask(__name__) @app.route('/upload', methods=['GET', 'POST']) def hello_world(): if request.method == 'POST': session['audio_data'] = request.form['audio_data'] print(session['audio_data']) return "Uploaded Audio" return 'Hello, World!'
true
true
f723fcc14afc0f2cce14de94db3fff3d2351f6e1
1,122
py
Python
var/spack/repos/builtin/packages/chrony/package.py
robertodr/spack
9b809e01b47d48f01b3d257912fe1b752943cd3d
[ "ECL-2.0", "Apache-2.0", "MIT-0", "MIT" ]
9
2018-04-18T07:51:40.000Z
2021-09-10T03:56:57.000Z
var/spack/repos/builtin/packages/chrony/package.py
robertodr/spack
9b809e01b47d48f01b3d257912fe1b752943cd3d
[ "ECL-2.0", "Apache-2.0", "MIT-0", "MIT" ]
907
2018-04-18T11:17:57.000Z
2022-03-31T13:20:25.000Z
var/spack/repos/builtin/packages/chrony/package.py
robertodr/spack
9b809e01b47d48f01b3d257912fe1b752943cd3d
[ "ECL-2.0", "Apache-2.0", "MIT-0", "MIT" ]
29
2018-11-05T16:14:23.000Z
2022-02-03T16:07:09.000Z
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class Chrony(AutotoolsPackage): """chrony is a versatile implementation of the Network Time Protocol (NTP). It can synchronise the system clock with NTP servers, reference clocks(e.g. GPS receiver), and manual input using wristwatch and keyboard.""" homepage = "https://chrony.tuxfamily.org/" url = "https://github.com/mlichvar/chrony/archive/3.5.1.tar.gz" version('3.5.1', sha256='881085b944a14853402e1c5cff4de5d815ff104ec6e12eea51c12e42f32f71bd') version('3.5', sha256='145a270fe4df42931f175e37dd3771a7e714122ae361921a4b93082e648a08c5') version('3.4', sha256='85fbe433f5a3ee961a20c47a72367760b074448587a9e2d3a6788a95750ed77e') version('3.3', sha256='0dd7323b5ed9e3208236c1b39fcabf2ad03469fa07ac516ba9c682206133f66d') depends_on('ruby-asciidoctor') def setup_run_environment(self, env): env.prepend_path('PATH', self.prefix.sbin)
41.555556
95
0.757576
from spack import * class Chrony(AutotoolsPackage): homepage = "https://chrony.tuxfamily.org/" url = "https://github.com/mlichvar/chrony/archive/3.5.1.tar.gz" version('3.5.1', sha256='881085b944a14853402e1c5cff4de5d815ff104ec6e12eea51c12e42f32f71bd') version('3.5', sha256='145a270fe4df42931f175e37dd3771a7e714122ae361921a4b93082e648a08c5') version('3.4', sha256='85fbe433f5a3ee961a20c47a72367760b074448587a9e2d3a6788a95750ed77e') version('3.3', sha256='0dd7323b5ed9e3208236c1b39fcabf2ad03469fa07ac516ba9c682206133f66d') depends_on('ruby-asciidoctor') def setup_run_environment(self, env): env.prepend_path('PATH', self.prefix.sbin)
true
true
f723fdf48d7b23a6b7b5b3707c12e799fe8951e0
45
py
Python
sbody/alignment/mesh_distance/__init__.py
Rubikplayer/flame-fitting
db3d622547b83ab158dceb46a5d049781e6e6c3a
[ "AAL" ]
394
2017-11-25T13:26:27.000Z
2022-03-28T07:19:05.000Z
sbody/alignment/mesh_distance/__init__.py
Rubikplayer/flame-fitting
db3d622547b83ab158dceb46a5d049781e6e6c3a
[ "AAL" ]
25
2017-11-30T04:26:32.000Z
2022-03-10T10:27:54.000Z
sbody/alignment/mesh_distance/__init__.py
Rubikplayer/flame-fitting
db3d622547b83ab158dceb46a5d049781e6e6c3a
[ "AAL" ]
84
2017-11-27T05:42:43.000Z
2022-03-30T02:56:48.000Z
from sbody.alignment.mesh_distance import *
22.5
44
0.822222
from sbody.alignment.mesh_distance import *
true
true
f723ff274099a624e2dce58b9a204bbe4255d1c3
67,751
py
Python
mindspore/nn/optim/thor.py
mindspore-ai/mindspore
a9fbb25530a2874166ff0045ddcdfc73207bf5eb
[ "Apache-2.0" ]
3,200
2020-02-17T12:45:41.000Z
2022-03-31T20:21:16.000Z
mindspore/nn/optim/thor.py
mindspore-ai/mindspore
a9fbb25530a2874166ff0045ddcdfc73207bf5eb
[ "Apache-2.0" ]
176
2020-02-12T02:52:11.000Z
2022-03-28T22:15:55.000Z
mindspore/nn/optim/thor.py
mindspore-ai/mindspore
a9fbb25530a2874166ff0045ddcdfc73207bf5eb
[ "Apache-2.0" ]
621
2020-03-09T01:31:41.000Z
2022-03-30T03:43:19.000Z
# Copyright 2021 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """thor""" import numpy as np from mindspore.ops import functional as F, composite as C, operations as P from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter, ParameterTuple from mindspore.common.tensor import Tensor import mindspore.nn as nn import mindspore.common.dtype as mstype import mindspore.log as logger from mindspore._checkparam import Validator from mindspore.nn.optim.optimizer import Optimizer from mindspore.parallel._utils import _get_device_num, _get_gradients_mean from mindspore import context from mindspore.context import ParallelMode from mindspore.nn.layer import DenseThor, Conv2dThor, EmbeddingThor, EmbeddingLookupThor from mindspore.nn.wrap import DistributedGradReducer from mindspore.train.train_thor.convert_utils import ConvertNetUtils from mindspore.parallel._auto_parallel_context import auto_parallel_context # Enumerates types of Layer Other = -1 Conv = 1 FC = 2 Embedding = 3 LayerNorm = 4 BatchNorm = 5 op_add = P.AddN() apply_decay = C.MultitypeFuncGraph("apply_decay") _momentum_opt = C.MultitypeFuncGraph("momentum_opt") @apply_decay.register("Number", "Bool", "Tensor", "Tensor") def _tensor_apply_decay(weight_decay, if_apply, weight, gradient): """Get grad with weight_decay.""" if if_apply: return op_add((weight * weight_decay, gradient)) return gradient @_momentum_opt.register("Function", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor") def _tensor_run_opt_ext(opt, momentum, learning_rate, gradient, weight, moment): """Apply momentum optimizer to the weight parameter using Tensor.""" success = True success = F.depend(success, opt(weight, moment, learning_rate, gradient, momentum)) return success IS_ENABLE_GLOBAL_NORM = False GRADIENT_CLIP_TYPE = 1 GRADIENT_CLIP_VALUE = 1.0 clip_grad = C.MultitypeFuncGraph("clip_grad") hyper_map_op = C.HyperMap() @clip_grad.register("Number", "Number", "Tensor") def _clip_grad(clip_type, clip_value, grad): """ Clip gradients. Inputs: clip_type (int): The way to clip, 0 for 'value', 1 for 'norm'. clip_value (float): Specifies how much to clip. grad (tuple[Tensor]): Gradients. Outputs: tuple[Tensor], clipped gradients. """ if clip_type not in [0, 1]: return grad dt = F.dtype(grad) if clip_type == 0: new_grad = C.clip_by_value(grad, F.cast(F.tuple_to_array((-clip_value,)), dt), F.cast(F.tuple_to_array((clip_value,)), dt)) else: new_grad = nn.ClipByNorm()(grad, F.cast(F.tuple_to_array((clip_value,)), dt)) return new_grad def clip_gradient(enable_clip_grad, gradients): """clip gradients""" if enable_clip_grad: if IS_ENABLE_GLOBAL_NORM: gradients = C.clip_by_global_norm(gradients, GRADIENT_CLIP_VALUE, None) else: gradients = hyper_map_op(F.partial(clip_grad, GRADIENT_CLIP_TYPE, GRADIENT_CLIP_VALUE), gradients) return gradients C0 = 16 def _check_param(momentum, frequency, lr, cls_name): """Check param.""" Validator.check_value_type("momentum", momentum, [float], cls_name) if isinstance(momentum, float) and momentum < 0.0: raise ValueError("momentum should be at least 0.0, but got momentum {}".format(momentum)) Validator.check_value_type("frequency", frequency, [int], cls_name) if isinstance(frequency, int) and frequency < 2: raise ValueError("frequency should be at least 2, but got frequency {}".format(frequency)) Validator.check_value_type("learning rate", lr, [Tensor], cls_name) def caculate_device_shape(matrix_dim, channel, is_a): if is_a: if channel // C0 == 0: matrix_dim = (matrix_dim / channel) * C0 ll = (int(matrix_dim // C0), int(matrix_dim // C0), C0, C0), int(matrix_dim) return ll def is_conv_matmul_support_shape(matrix_a_shape, matrix_g_shape): """is conv layer matmul support shape""" temp = (matrix_g_shape, matrix_a_shape) support_shape = [((4, 4, 16, 16), (49, 49, 16, 16)), ((4, 4, 16, 16), (4, 4, 16, 16)), ((4, 4, 16, 16), (36, 36, 16, 16)), ((16, 16, 16, 16), (4, 4, 16, 16)), ((4, 4, 16, 16), (16, 16, 16, 16)), ((8, 8, 16, 16), (16, 16, 16, 16)), ((8, 8, 16, 16), (72, 72, 16, 16)), ((32, 32, 16, 16), (8, 8, 16, 16)), ((32, 32, 16, 16), (16, 16, 16, 16)), ((8, 8, 16, 16), (32, 32, 16, 16)), ((16, 16, 16, 16), (32, 32, 16, 16)), ((16, 16, 16, 16), (144, 144, 16, 16)), ((64, 64, 16, 16), (16, 16, 16, 16)), ((64, 64, 16, 16), (32, 32, 16, 16)), ((16, 16, 16, 16), (64, 64, 16, 16)), ((32, 32, 16, 16), (64, 64, 16, 16)), ((32, 32, 16, 16), (288, 288, 16, 16)), ((128, 128, 16, 16), (32, 32, 16, 16)), ((128, 128, 16, 16), (64, 64, 16, 16)), ((32, 32, 16, 16), (128, 128, 16, 16))] if temp in support_shape: return True return False def caculate_matmul_shape(matrix_a_dim, matrix_g_dim, split_dim): """get matmul shape""" split_dima = split_dim split_dimg = split_dim if matrix_a_dim % split_dim == 0: batch_w = matrix_a_dim // split_dim else: if matrix_a_dim < split_dim: batch_w = 1 split_dima = matrix_a_dim else: batch_w = matrix_a_dim // split_dim + 1 if matrix_g_dim % split_dim == 0: batch_h = matrix_g_dim // split_dim else: if matrix_g_dim < split_dim: batch_h = 1 split_dimg = matrix_g_dim else: batch_h = matrix_g_dim // split_dim + 1 matrix_a_shape = (batch_h, batch_w, split_dima, split_dima) matrix_g_shape = (batch_h, split_dimg, split_dimg) return matrix_a_shape, matrix_g_shape def get_layer_type_for_dense_and_conv(subcell, prefix, layertype_map): """get layer type for dense layer and conv layer""" if subcell.weight.requires_grad: if "rpn_with_loss.rpn_convs_list." not in prefix.lower() \ or "rpn_with_loss.rpn_convs_list.0." in prefix.lower(): layertype_map.append(Other) def find_net_layertype_recur(net, layertype_map): """get net layer type recursively.""" cells = net.name_cells() for name in cells: subcell = cells[name] prefix = subcell.param_prefix if subcell == net: continue elif isinstance(subcell, Conv2dThor): layertype_map.append(Conv) elif isinstance(subcell, DenseThor): layertype_map.append(FC) elif isinstance(subcell, (EmbeddingThor, EmbeddingLookupThor)): layertype_map.append(Embedding) elif isinstance(subcell, nn.LayerNorm): layertype_map.append(LayerNorm) elif isinstance(subcell, nn.BatchNorm2d): if subcell.gamma.requires_grad: layertype_map.append(BatchNorm) elif isinstance(subcell, (nn.Conv2d, nn.Dense, nn.Embedding, nn.Conv2dTranspose, nn.Conv1d, nn.Conv1dTranspose, nn.BatchNorm1d, nn.GroupNorm, nn.GlobalBatchNorm)): if isinstance(subcell, (nn.Dense, nn.Conv2d)): get_layer_type_for_dense_and_conv(subcell, prefix, layertype_map) else: layertype_map.append(Other) else: find_net_layertype_recur(subcell, layertype_map) def get_net_layertype_mask(net): layertype_map = [] find_net_layertype_recur(net, layertype_map) return layertype_map def get_layer_counter(layer_type, layer_counter, params, idx): """get layer counter""" if layer_type in [Conv, FC]: if "bias" in params[idx].name.lower(): layer_counter = layer_counter + 1 else: if idx < len(params) - 1 and "bias" not in params[idx + 1].name.lower(): layer_counter = layer_counter + 1 elif layer_type in [LayerNorm, BatchNorm]: if "beta" in params[idx].name.lower(): layer_counter = layer_counter + 1 else: if "bias" in params[idx].name.lower(): layer_counter = layer_counter + 1 elif "weight" in params[idx].name.lower(): if idx < len(params) - 1 and "bias" not in params[idx + 1].name.lower(): layer_counter = layer_counter + 1 else: layer_counter = layer_counter + 1 return layer_counter def thor(net, learning_rate, damping, momentum, weight_decay=0.0, loss_scale=1.0, batch_size=32, use_nesterov=False, decay_filter=lambda x: x.name not in [], split_indices=None, enable_clip_grad=False, frequency=100): r""" Updates gradients by second-order algorithm--THOR. Trace-based Hardware-driven layer-ORiented Natural Gradient Descent Computation (THOR) algorithm is proposed in: `THOR: Trace-based Hardware-driven layer-ORiented Natural Gradient Descent Computation <https://www.aaai.org/AAAI21Papers/AAAI-6611.ChenM.pdf>`_ The updating formulas are as follows, .. math:: \begin{array}{ll} \\ A_i = a_i{a_i}^T \\ G_i = D_{s_i}{ D_{s_i}}^T \\ m_i = \beta * m_i + ({G_i^{(k)}}+\lambda I)^{-1}) g_i ({\overline A_{i-1}^{(k)}}+\lambda I)^{-1} \\ w_i = w_i - \alpha * m_i \\ \end{array} :math:`D_{s_i}` represents the derivative of the loss function of the output of the i-th layer, :math:`a_{i-1}` represents the input of i-th layer,and which is the activations of previous layer, :math:`\beta` represents momentum, :math:`I` represents the identity matrix, :math:`\overline A` represents the transpose of matrix A, :math:`\lambda` represents 'damping', :math:`g_i` represents gradients of the i-th layer, :math:`\otimes` represents Kronecker product, :math:`\alpha` represents 'learning rate' Note: When separating parameter groups, the weight decay in each group will be applied on the parameters if the weight decay is positive. When not separating parameter groups, the `weight_decay` in the API will be applied on the parameters without 'beta' or 'gamma' in their names if `weight_decay` is positive. When separating parameter groups, if you want to centralize the gradient, set grad_centralization to True, but the gradient centralization can only be applied to the parameters of the convolution layer. If the parameters of the non convolution layer are set to True, an error will be reported. To improve parameter groups performance, the customized order of parameters can be supported. Args: net (Cell): The training network. learning_rate (Tensor): A value for the learning rate. damping (Tensor): A value for the damping. momentum (float): Hyper-parameter of type float, means momentum for the moving average. It must be at least 0.0. weight_decay (int, float): Weight decay (L2 penalty). It must be equal to or greater than 0.0. Default: 0.0. loss_scale (float): A value for the loss scale. It must be greater than 0.0. In general, use the default value. Default: 1.0. batch_size (int): The size of a batch. Default: 32 use_nesterov (bool): Enable Nesterov momentum. Default: False. decay_filter (function): A function to determine which layers the weight decay applied to. And it only works when the weight_decay > 0. Default: lambda x: x.name not in [] split_indices (list): Set allreduce fusion strategy by A/G layer indices . Only works when distributed computing. ResNet50 as an example, there are 54 layers of A/G respectively, when split_indices is set to [26, 53], it means A/G is divided into two groups to allreduce, one is 0~26 layer, and the other is 27~53. Default: None enable_clip_grad (bool): Whether to clip the gradients. Default: False frequency(int): The update interval of A/G and $A^{-1}/G^{-1}$. When frequency equals N (N is greater than 1), A/G and $A^{-1}/G^{-1}$ will be updated every N steps, and other steps will use the stale A/G and $A^{-1}/G^{-1}$ to update weights. Default: 100. Inputs: - **gradients** (tuple[Tensor]) - The gradients of `params`, the shape is the same as `params`. Outputs: tuple[bool], all elements are True. Raises: TypeError: If `learning_rate` is not Tensor. TypeError: If `loss_scale`,`momentum` or `frequency` is not a float. TypeError: If `weight_decay` is neither float nor int. TypeError: If `use_nesterov` is not a bool. ValueError: If `loss_scale` is less than or equal to 0. ValueError: If `weight_decay` or `momentum` is less than 0. ValueError: If `frequency` is not int. ValueError: If `frequency` is less than 2. Supported Platforms: ``Ascend`` ``GPU`` Examples: >>> from mindspore.nn import thor >>> from mindspore import Model >>> from mindspore import FixedLossScaleManager >>> from mindspore.train.callback import LossMonitor >>> from mindspore.train.train_thor import ConvertModelUtils >>> from mindspore import nn >>> from mindspore import Tensor >>> >>> net = Net() >>> dataset = create_dataset() >>> temp = Tensor([4e-4, 1e-4, 1e-5, 1e-5], mstype.float32) >>> optim = thor(net, learning_rate=temp, damping=temp, momentum=0.9, loss_scale=128, frequency=4) >>> loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') >>> loss_scale = FixedLossScaleManager(128, drop_overflow_update=False) >>> model = Model(net, loss_fn=loss, optimizer=optim, loss_scale_manager=loss_scale, metrics={'acc'}, ... amp_level="O2", keep_batchnorm_fp32=False) >>> model = ConvertModelUtils.convert_to_thor_model(model=model, network=net, loss_fn=loss, optimizer=optim, ... loss_scale_manager=loss_scale, metrics={'acc'}, ... amp_level="O2", keep_batchnorm_fp32=False) >>> loss_cb = LossMonitor() >>> model.train(1, dataset, callbacks=loss_cb, sink_size=4, dataset_sink_mode=True) """ context.set_context(max_call_depth=10000) ConvertNetUtils().convert_to_thor_net(net) if context.get_context("device_target") == "Ascend": return ThorAscend(net, learning_rate, damping, momentum, weight_decay, loss_scale, batch_size, decay_filter, split_indices=split_indices, enable_clip_grad=enable_clip_grad, frequency=frequency) return ThorGpu(net, learning_rate, damping, momentum, weight_decay, loss_scale, batch_size, use_nesterov, decay_filter, split_indices=split_indices, enable_clip_grad=enable_clip_grad, frequency=frequency) class ThorGpu(Optimizer): """ ThorGpu """ def __init__(self, net, learning_rate, damping, momentum, weight_decay=0.0, loss_scale=1.0, batch_size=32, use_nesterov=False, decay_filter=lambda x: x.name not in [], split_indices=None, enable_clip_grad=False, frequency=100): params = filter(lambda x: x.requires_grad, net.get_parameters()) super(ThorGpu, self).__init__(learning_rate, params, weight_decay, loss_scale) _check_param(momentum, frequency, learning_rate, self.__class__.__name__) self.momentum = Parameter(Tensor(momentum, mstype.float32), name="momentum") self.params = self.parameters self.use_nesterov = Validator.check_bool(use_nesterov) self.moments = self.params.clone(prefix="moments", init='zeros') self.hyper_map = C.HyperMap() self.opt = P.ApplyMomentum(use_nesterov=self.use_nesterov) self.net = net self.matrix_a_cov = ParameterTuple(filter(lambda x: 'matrix_a' in x.name, net.get_parameters())) self.matrix_g_cov = ParameterTuple(filter(lambda x: 'matrix_g' in x.name, net.get_parameters())) self.a_normalizer = ParameterTuple(filter(lambda x: 'a_normalizer' in x.name, net.get_parameters())) self.g_normalizer = ParameterTuple(filter(lambda x: 'g_normalizer' in x.name, net.get_parameters())) self.batch_size = Tensor(batch_size, mstype.float32) self.loss_scale = Tensor(1 / (loss_scale * loss_scale), mstype.float32) self.batch_size_scale = Tensor(batch_size * batch_size, mstype.float32) self.damping = damping self._define_gpu_operator() logger.info("matrix_a_cov len is {}".format(len(self.matrix_a_cov))) self.thor = True self.matrix_a = () self.matrix_g = () self.matrix_a_shape = () self.thor_layer_count = 0 self.conv_layer_count = 0 self.weight_fim_idx_map = () self.weight_conv_idx_map = () self.weight_layertype_idx_map = () self._process_matrix_init_and_weight_idx_map(self.net) self.matrix_a = ParameterTuple(self.matrix_a) self.matrix_g = ParameterTuple(self.matrix_g) self.weight_decay = weight_decay self.decay_flags = tuple(decay_filter(x) for x in self.parameters) self.update_gradient = P.UpdateThorGradient(split_dim=self.split_dim) self.enable_clip_grad = enable_clip_grad self.frequency = frequency self._define_gpu_reducer(split_indices) def get_frequency(self): """get thor frequency""" return self.frequency def _define_gpu_operator(self): """define gpu operator""" self.transpose = P.Transpose() self.shape = P.Shape() self.reshape = P.Reshape() self.matmul = P.MatMul() self.assign = P.Assign() self.mul = P.Mul() self.gather = P.GatherV2() self.one = Tensor(1, mstype.int32) self.feature_map = Tensor(1.0, mstype.float32) self.axis = 0 self.cov_step = Parameter(initializer(0, [1], mstype.int32), name="cov_step", requires_grad=False) self.cast = P.Cast() self.sqrt = P.Sqrt() self.eye = P.Eye() self.split_dim = 128 self.embedding_cholesky = P.CholeskyTrsm() self.cholesky = P.CholeskyTrsm(split_dim=self.split_dim) self.vector_matmul = P.BatchMatMul(transpose_a=True) self.reduce_sum = P.ReduceSum(keep_dims=False) self.inv = P.Reciprocal() self.square = P.Square() self.expand = P.ExpandDims() def _define_gpu_reducer(self, split_indices): """define gpu reducer""" self.parallel_mode = context.get_auto_parallel_context("parallel_mode") self.is_distributed = (self.parallel_mode != ParallelMode.STAND_ALONE) if self.is_distributed: mean = _get_gradients_mean() degree = _get_device_num() if not split_indices: self.split_indices = [len(self.matrix_a_cov) - 1] else: self.split_indices = split_indices auto_parallel_context().set_all_reduce_fusion_split_indices(self.split_indices, "hccl_world_groupsum6") auto_parallel_context().set_all_reduce_fusion_split_indices(self.split_indices, "hccl_world_groupsum8") self.grad_reducer_a = DistributedGradReducer(self.matrix_a_cov, mean, degree, fusion_type=6) self.grad_reducer_g = DistributedGradReducer(self.matrix_a_cov, mean, degree, fusion_type=8) def _process_matrix_init_and_weight_idx_map(self, net): """for GPU, process matrix init shape, and get weight idx map""" layer_type_map = get_net_layertype_mask(net) layer_counter = 0 for idx in range(len(self.params)): layer_type = layer_type_map[layer_counter] weight = self.params[idx] weight_shape = self.shape(weight) if layer_type in [Conv, FC] and "bias" not in self.params[idx].name.lower(): in_channels = weight_shape[1] out_channels = weight_shape[0] matrix_a_dim = in_channels if layer_type == Conv: matrix_a_dim = in_channels * weight_shape[2] * weight_shape[3] matrix_g_dim = out_channels matrix_a_shape, matrix_g_shape = caculate_matmul_shape(matrix_a_dim, matrix_g_dim, self.split_dim) matrix_a_inv = Parameter(np.zeros(matrix_a_shape).astype(np.float32), name='matrix_a_inv_' + str(self.thor_layer_count), requires_grad=False) matrix_g_inv = Parameter(np.zeros(matrix_g_shape).astype(np.float32), name="matrix_g_inv_" + str(self.thor_layer_count), requires_grad=False) self.matrix_a = self.matrix_a + (matrix_a_inv,) self.matrix_g = self.matrix_g + (matrix_g_inv,) self.matrix_a_shape = self.matrix_a_shape + (matrix_a_shape,) elif layer_type == Embedding: vocab_size = weight_shape[0] embedding_size = weight_shape[1] matrix_a_inv = Parameter(Tensor(np.zeros([vocab_size]).astype(np.float32)), name='matrix_a_inv_' + str(self.thor_layer_count), requires_grad=False) matrix_g_inv = Parameter(Tensor(np.zeros([embedding_size, embedding_size]).astype(np.float32)), name="matrix_g_inv_" + str(self.thor_layer_count), requires_grad=False) self.matrix_a = self.matrix_a + (matrix_a_inv,) self.matrix_g = self.matrix_g + (matrix_g_inv,) self.matrix_a_shape = self.matrix_a_shape + ((vocab_size,),) if layer_type in [Conv, FC, Embedding] and "bias" not in self.params[idx].name.lower(): self.weight_fim_idx_map = self.weight_fim_idx_map + (self.thor_layer_count,) self.thor_layer_count = self.thor_layer_count + 1 self.weight_layertype_idx_map = self.weight_layertype_idx_map + (layer_type,) if layer_type == Conv: self.weight_conv_idx_map = self.weight_conv_idx_map + (self.conv_layer_count,) self.conv_layer_count = self.conv_layer_count + 1 else: self.weight_conv_idx_map = self.weight_conv_idx_map + (-1,) else: self.weight_conv_idx_map = self.weight_conv_idx_map + (-1,) self.weight_fim_idx_map = self.weight_fim_idx_map + (-1,) if layer_type == LayerNorm: self.weight_layertype_idx_map = self.weight_layertype_idx_map + (LayerNorm,) else: self.weight_layertype_idx_map = self.weight_layertype_idx_map + (Other,) # bert.cls1.output_bias: not a network layer, only a trainable param if "output_bias" not in self.params[idx].name.lower(): layer_counter = get_layer_counter(layer_type, layer_counter, self.params, idx) def _get_ainv_ginv_list(self, gradients, damping_step, matrix_a_allreduce, matrix_g_allreduce): """get matrixA inverse list and matrix G inverse list""" for i in range(len(self.params)): thor_layer_count = self.weight_fim_idx_map[i] conv_layer_count = self.weight_conv_idx_map[i] layer_type = self.weight_layertype_idx_map[i] if layer_type in [Conv, FC, Embedding]: g = gradients[i] matrix_a = self.matrix_a_cov[thor_layer_count] matrix_g = self.matrix_g_cov[thor_layer_count] matrix_a = F.depend(matrix_a, g) matrix_g = F.depend(matrix_g, g) damping_a = damping_step damping_g = damping_step feature_map = self.feature_map if layer_type == Conv: a_normalizer = self.a_normalizer[conv_layer_count] g_normalizer = self.g_normalizer[conv_layer_count] a_normalizer = F.depend(a_normalizer, g) g_normalizer = F.depend(g_normalizer, g) damping_a = self.mul(damping_step, 1.0 / a_normalizer) damping_g = self.mul(damping_step, 1.0 / g_normalizer) feature_map = self.sqrt(1.0 / a_normalizer) a_shape = self.shape(matrix_a) a_eye = self.eye(a_shape[0], a_shape[0], mstype.float32) damping_a = self.sqrt(damping_a) damping_g = self.sqrt(damping_g) g_shape = self.shape(matrix_g) g_eye = self.eye(g_shape[0], g_shape[1], mstype.float32) matrix_g = self.mul(matrix_g, self.loss_scale) matrix_g = self.mul(matrix_g, self.batch_size_scale) matrix_g = matrix_g + damping_g * g_eye if layer_type == Embedding: a_eye = P.OnesLike()(matrix_a) matrix_a = self.mul(matrix_a, 1.0 / self.batch_size) matrix_a = matrix_a + damping_a * a_eye matrix_a = self.inv(matrix_a) matrix_g = self.embedding_cholesky(matrix_g) matrix_g = self.matmul(matrix_g, matrix_g) else: matrix_a = matrix_a + damping_a * a_eye matrix_a = self.cholesky(matrix_a) matrix_a = self.vector_matmul(matrix_a, matrix_a) matrix_a = P.BroadcastTo(self.matrix_a_shape[thor_layer_count])(matrix_a) matrix_g = self.cholesky(matrix_g) matrix_g = self.vector_matmul(matrix_g, matrix_g) matrix_a = self.mul(matrix_a, feature_map) matrix_g = self.mul(matrix_g, feature_map) matrix_a_allreduce = matrix_a_allreduce + (matrix_a,) matrix_g_allreduce = matrix_g_allreduce + (matrix_g,) return matrix_a_allreduce, matrix_g_allreduce def _process_layernorm(self, damping_step, gradient): """process layernorm""" damping = self.sqrt(damping_step) normalizer = self.batch_size normalizer = self.cast(normalizer, mstype.float32) fim_cov = self.square(gradient) fim_cov = self.mul(fim_cov, 1.0 / normalizer) fim_cov = fim_cov + damping fim_inv = self.inv(fim_cov) gradient = self.mul(fim_inv, gradient) return gradient def _reshape_gradient(self, conv_layer_count, g, g_shape): """reshape gradient""" if conv_layer_count != -1: g = self.reshape(g, g_shape) return g def construct(self, gradients): params = self.params moments = self.moments gradients = self.scale_grad(gradients) damping_step = self.gather(self.damping, self.cov_step, self.axis) damping_step = self.cast(damping_step, mstype.float32) new_grads = () if self.thor: matrix_ainv_list = () matrix_ginv_list = () matrix_a_allreduce, matrix_g_allreduce = self._get_ainv_ginv_list(gradients, damping_step, matrix_ainv_list, matrix_ginv_list) if self.is_distributed: matrix_a_allreduce = self.grad_reducer_a(matrix_a_allreduce) matrix_g_allreduce = self.grad_reducer_g(matrix_g_allreduce) for i in range(len(self.params)): g = gradients[i] thor_layer_count = self.weight_fim_idx_map[i] conv_layer_count = self.weight_conv_idx_map[i] layer_type = self.weight_layertype_idx_map[i] if layer_type in [Conv, FC]: g_shape = self.shape(g) g = self.reshape(g, (g_shape[0], -1)) matrix_a = matrix_a_allreduce[thor_layer_count] matrix_g = matrix_g_allreduce[thor_layer_count] g = self.update_gradient(matrix_g, g, matrix_a) self.assign(self.matrix_a[thor_layer_count], matrix_a) self.assign(self.matrix_g[thor_layer_count], matrix_g) g = self._reshape_gradient(conv_layer_count, g, g_shape) elif layer_type == Embedding: matrix_a = matrix_a_allreduce[thor_layer_count] matrix_g = matrix_g_allreduce[thor_layer_count] self.assign(self.matrix_a[thor_layer_count], matrix_a) self.assign(self.matrix_g[thor_layer_count], matrix_g) temp_a = self.expand(matrix_a, 1) g = self.mul(temp_a, g) g = self.matmul(g, matrix_g) elif layer_type == LayerNorm: g = self._process_layernorm(damping_step, g) new_grads = new_grads + (g,) else: for j in range(len(self.params)): g = gradients[j] thor_layer_count = self.weight_fim_idx_map[j] conv_layer_count = self.weight_conv_idx_map[j] layer_type = self.weight_layertype_idx_map[j] if layer_type in [Conv, FC]: g_shape = self.shape(g) g = self.reshape(g, (g_shape[0], -1)) matrix_a = self.matrix_a[thor_layer_count] matrix_g = self.matrix_g[thor_layer_count] g = self.update_gradient(matrix_g, g, matrix_a) g = self._reshape_gradient(conv_layer_count, g, g_shape) elif layer_type == Embedding: matrix_a = self.matrix_a[thor_layer_count] matrix_g = self.matrix_g[thor_layer_count] g = gradients[j] temp_a = self.expand(matrix_a, 1) g = self.mul(temp_a, g) g = self.matmul(g, matrix_g) elif layer_type == LayerNorm: g = self._process_layernorm(damping_step, g) new_grads = new_grads + (g,) gradients = new_grads self.cov_step = self.cov_step + self.one if self.weight_decay > 0: gradients = self.hyper_map(F.partial(apply_decay, self.weight_decay), self.decay_flags, params, gradients) gradients = clip_gradient(self.enable_clip_grad, gradients) lr = self.get_lr() success = self.hyper_map(F.partial(_momentum_opt, self.opt, self.momentum, lr), gradients, params, moments) return success class ThorAscend(Optimizer): """ThorAscend""" def __init__(self, net, learning_rate, damping, momentum, weight_decay=0.0, loss_scale=1.0, batch_size=32, decay_filter=lambda x: x.name not in [], split_indices=None, enable_clip_grad=False, frequency=100): params = filter(lambda x: x.requires_grad, net.get_parameters()) super(ThorAscend, self).__init__(learning_rate, params, weight_decay, loss_scale) _check_param(momentum, frequency, learning_rate, self.__class__.__name__) self.momentum = Parameter(Tensor(momentum, mstype.float32), name="momentum") self.params = self.parameters self.moments = self.params.clone(prefix="moments", init='zeros') self.hyper_map = C.HyperMap() self.opt = P.ApplyMomentum() self.net = net self.matrix_a_cov = ParameterTuple(filter(lambda x: 'matrix_a' in x.name, net.get_parameters())) self.matrix_g_cov = ParameterTuple(filter(lambda x: 'matrix_g' in x.name, net.get_parameters())) self.a_normalizer = ParameterTuple(filter(lambda x: 'a_normalizer' in x.name, net.get_parameters())) self.g_normalizer = ParameterTuple(filter(lambda x: 'g_normalizer' in x.name, net.get_parameters())) logger.info("matrix_a_cov len is {}".format(len(self.matrix_a_cov))) self._define_ascend_operator() self.C0 = 16 self.device_shape_pad_flag = () self.diag_block_dim = 128 self.matrix_a = () self.matrix_g = () self.thor_layer_count = 0 self.conv_layer_count = 0 self.weight_conv_idx_map = () self.weight_fim_idx_map = () self.weight_layertype_idx_map = () self.a_split_pad_dim_map = () self.g_split_pad_dim_map = () self.conv_matmul_support_map = () self.batch_matmul_support_list = [1, 2, 4, 5, 6, 8, 9, 16, 18, 24, 32, 36] self.abs_max_support_list = [1, 2, 4, 8, 16, 5, 9, 18, 36, 32] self._process_matrix_init_and_weight_idx_map(self.net) self.matrix_a = ParameterTuple(self.matrix_a) self.matrix_g = ParameterTuple(self.matrix_g) self.matrix_max_inv = () for i in range(len(self.matrix_a)): self.matrix_max_inv = self.matrix_max_inv + ( Parameter(initializer(1, [1], mstype.float32), name="matrix_max" + str(i), requires_grad=False),) self.matrix_max_inv = ParameterTuple(self.matrix_max_inv) self.thor = True self.weight_decay = weight_decay self.decay_flags = tuple(decay_filter(x) for x in self.parameters) self.damping = damping self.batch_size = Tensor(batch_size, mstype.float32) self.loss_scale = Tensor(1 / (loss_scale * loss_scale), mstype.float32) self.batch_size_scale = Tensor(batch_size * batch_size, mstype.float32) self.enable_clip_grad = enable_clip_grad self.frequency = frequency self._define_ascend_reducer(split_indices) def get_frequency(self): """get thor frequency""" return self.frequency def _get_pad_dim(self, matrix_dim): """get diag split pad dim """ split_pad_dim = 0 if matrix_dim == 64: return split_pad_dim res = matrix_dim % self.diag_block_dim if res != 0: split_pad_dim = self.diag_block_dim - res return split_pad_dim def _define_ascend_operator(self): """define ascend operator""" self.cube_matmul_left = P.CusMatMulCubeFraczLeftCast() self.cube_matmul_left_fc = P.CusMatMulCubeDenseLeft() self.cube_matmul_right_fc = P.CusMatMulCubeDenseRight() self.cube_matmul_right_mul = P.CusMatMulCubeFraczRightMul() self.transpose = P.Transpose() self.shape = P.Shape() self.reshape = P.Reshape() self.mul = P.Mul() self.log = P.Log() self.exp = P.Exp() self.sqrt = P.Sqrt() self.gather = P.GatherV2() self.assign = P.Assign() self.cast = P.Cast() self.eye = P.Eye() self.concat = P.Concat(0) self.cholesky = P.CusCholeskyTrsm() self.vector_matmul = P.CusBatchMatMul() self.tbe_batch_matmul = P.BatchMatMul(transpose_a=True) self.fused_abs_max2 = P.CusFusedAbsMax1() self.matrix_combine = P.CusMatrixCombine() self.slice = P.Slice() self.expand = P.ExpandDims() self.reduce_sum = P.ReduceSum(keep_dims=False) self.square = P.Square() self.inv = P.Inv() self.matmul = P.MatMul() self.axis = 0 self.one = Tensor(1, mstype.int32) self.cov_step = Parameter(initializer(0, [1], mstype.int32), name="cov_step", requires_grad=False) def _define_ascend_reducer(self, split_indices): """define ascend reducer""" self.parallel_mode = context.get_auto_parallel_context("parallel_mode") self.is_distributed = (self.parallel_mode != ParallelMode.STAND_ALONE) if self.is_distributed: mean = _get_gradients_mean() degree = _get_device_num() if not split_indices: self.split_indices = [len(self.matrix_a_cov) - 1] else: self.split_indices = split_indices if self.conv_layer_count > 0: auto_parallel_context().set_all_reduce_fusion_split_indices(self.split_indices, "hccl_world_groupsum2") auto_parallel_context().set_all_reduce_fusion_split_indices(self.split_indices, "hccl_world_groupsum4") self.grad_reducer_amax = DistributedGradReducer(self.matrix_a_cov, mean, degree, fusion_type=2) self.grad_reducer_gmax = DistributedGradReducer(self.matrix_a_cov, mean, degree, fusion_type=4) auto_parallel_context().set_all_reduce_fusion_split_indices(self.split_indices, "hccl_world_groupsum6") auto_parallel_context().set_all_reduce_fusion_split_indices(self.split_indices, "hccl_world_groupsum8") self.grad_reducer_a = DistributedGradReducer(self.matrix_a_cov, mean, degree, fusion_type=6) self.grad_reducer_g = DistributedGradReducer(self.matrix_a_cov, mean, degree, fusion_type=8) def _get_weight_idx_map(self, layer_type, idx, weight_shape): """for Ascend, get weight idx map""" if layer_type in [Conv, FC, Embedding] and "bias" not in self.params[idx].name.lower(): self.weight_fim_idx_map = self.weight_fim_idx_map + (self.thor_layer_count,) self.weight_layertype_idx_map = self.weight_layertype_idx_map + (layer_type,) if layer_type == Embedding: a_pad_dim = 0 g_pad_dim = 0 self.a_split_pad_dim_map = self.a_split_pad_dim_map + (a_pad_dim,) self.g_split_pad_dim_map = self.g_split_pad_dim_map + (g_pad_dim,) else: out_channels = weight_shape[0] g_pad_dim = self._get_pad_dim(out_channels) self.g_split_pad_dim_map = self.g_split_pad_dim_map + (g_pad_dim,) matrix_a_dim = weight_shape[1] if layer_type == Conv: matrix_a_dim = weight_shape[1] * weight_shape[2] * weight_shape[3] a_pad_dim = self._get_pad_dim(matrix_a_dim) self.a_split_pad_dim_map = self.a_split_pad_dim_map + (a_pad_dim,) self.thor_layer_count = self.thor_layer_count + 1 if layer_type == Conv: self.weight_conv_idx_map = self.weight_conv_idx_map + (self.conv_layer_count,) self.conv_layer_count = self.conv_layer_count + 1 else: self.weight_conv_idx_map = self.weight_conv_idx_map + (-1,) else: self.weight_fim_idx_map = self.weight_fim_idx_map + (-1,) self.weight_conv_idx_map = self.weight_conv_idx_map + (-1,) if layer_type == LayerNorm: self.weight_layertype_idx_map = self.weight_layertype_idx_map + (LayerNorm,) else: self.weight_layertype_idx_map = self.weight_layertype_idx_map + (Other,) def _get_fc_matrix(self, weight_shape): """for Ascend, get fc matrix_a and matrix_g""" out_channels = weight_shape[0] in_channels = weight_shape[1] if self.conv_layer_count > 0: if out_channels == 1001: fc_matrix_a = Parameter(Tensor(np.zeros([128, 128, 16, 16]).astype(np.float16)), name='matrix_a_inv_' + str(self.thor_layer_count), requires_grad=False) fc_matrix_g = Parameter(Tensor(np.zeros([63, 63, 16, 16]).astype(np.float16)), name="matrix_g_inv_" + str(self.thor_layer_count), requires_grad=False) else: fc_matrix_a = Parameter(Tensor(np.eye(in_channels).astype(np.float16)), name='matrix_a_inv_' + str(self.thor_layer_count), requires_grad=False) fc_matrix_g = Parameter(Tensor(np.eye(out_channels).astype(np.float16)), name="matrix_g_inv_" + str(self.thor_layer_count), requires_grad=False) self.matrix_a = self.matrix_a + (fc_matrix_a,) self.matrix_g = self.matrix_g + (fc_matrix_g,) def _process_matrix_init_and_weight_idx_map(self, net): """for Ascend, process matrix init shape, and get weight idx map""" layer_counter = 0 layer_type_map = get_net_layertype_mask(net) for idx in range(len(self.params)): layer_type = layer_type_map[layer_counter] weight = self.params[idx] weight_shape = self.shape(weight) if layer_type == Conv and "bias" not in self.params[idx].name.lower(): in_channels = weight_shape[1] out_channels = weight_shape[0] matrix_a_dim = in_channels * weight_shape[2] * weight_shape[3] matrix_g_dim = out_channels matrix_a_device_shape, matrix_a_device_dim = caculate_device_shape(matrix_a_dim, in_channels, True) matrix_g_device_shape, matrix_g_device_dim = caculate_device_shape(matrix_g_dim, in_channels, False) ret = is_conv_matmul_support_shape(matrix_a_device_shape, matrix_g_device_shape) if ret: matrix_a_inv = Parameter( Tensor(np.reshape(np.identity(matrix_a_device_dim).astype(np.float16), matrix_a_device_shape)), name='matrix_a_inv_' + str(self.thor_layer_count), requires_grad=False) matrix_g_inv = Parameter( Tensor(np.reshape(np.identity(matrix_g_device_dim).astype(np.float16), matrix_g_device_shape)), name="matrix_g_inv_" + str(self.thor_layer_count), requires_grad=False) self.conv_matmul_support_map = self.conv_matmul_support_map + (1,) else: matrix_a_inv = Parameter(Tensor(np.eye(matrix_a_dim).astype(np.float16)), name='matrix_a_inv_' + str(self.thor_layer_count), requires_grad=False) matrix_g_inv = Parameter(Tensor(np.eye(matrix_g_dim).astype(np.float16)), name="matrix_g_inv_" + str(self.thor_layer_count), requires_grad=False) self.conv_matmul_support_map = self.conv_matmul_support_map + (0,) self.matrix_a = self.matrix_a + (matrix_a_inv,) self.matrix_g = self.matrix_g + (matrix_g_inv,) device_shape_pad_flag = False if matrix_a_dim != matrix_a_device_dim: device_shape_pad_flag = True self.device_shape_pad_flag = self.device_shape_pad_flag + (device_shape_pad_flag,) elif layer_type == FC and "bias" not in self.params[idx].name.lower(): self._get_fc_matrix(weight_shape) self._get_weight_idx_map(layer_type, idx, weight_shape) # bert.cls1.output_bias: not a network layer, only a trainable param if "output_bias" not in self.params[idx].name.lower(): layer_counter = get_layer_counter(layer_type, layer_counter, self.params, idx) def _process_batch_matmul(self, input_matrix): """process batch matmul""" input_matrix_shape = self.shape(input_matrix) if input_matrix_shape[0] in self.batch_matmul_support_list: input_matrix = self.vector_matmul(input_matrix, input_matrix) else: input_matrix = self.tbe_batch_matmul(input_matrix, input_matrix) return input_matrix def _process_cholesky_pad(self, pad_dim, input_matrix, matrix_shape0): """process cholesky pad""" if pad_dim > 0: matrix_sup = self.eye(pad_dim, pad_dim, mstype.float32) matrix_sup = P.Pad(((0, 0), (matrix_shape0, 0)))(matrix_sup) input_matrix = P.Pad(((0, 0), (0, pad_dim)))(input_matrix) input_matrix = self.concat((input_matrix, matrix_sup)) return input_matrix def _get_abs_max(self, matrix_inv, origin_dim): """get matrix abs max""" cholesky_shape = self.shape(matrix_inv) if cholesky_shape[0] in self.abs_max_support_list: matrix_inv_max = P.CusFusedAbsMax1([origin_dim, origin_dim])(matrix_inv) matrix_max = self.fused_abs_max2(matrix_inv_max) matrix_inv = self.matrix_combine(matrix_inv) else: matrix_inv = self.matrix_combine(matrix_inv) matrix_abs = P.Abs()(matrix_inv) matrix_max = P.ReduceMax(keep_dims=False)(matrix_abs) return matrix_max, matrix_inv def _get_fc_ainv_ginv(self, index, damping_step, gradients, matrix_a_allreduce, matrix_g_allreduce, matrix_a_max_allreduce, matrix_g_max_allreduce): """get fc layer ainv and ginv""" thor_layer_count = self.weight_fim_idx_map[index] g = gradients[index] matrix_a = self.matrix_a_cov[thor_layer_count] matrix_g = self.matrix_g_cov[thor_layer_count] matrix_a = F.depend(matrix_a, g) matrix_g = F.depend(matrix_g, g) a_shape = self.shape(matrix_a) a_eye = self.eye(a_shape[0], a_shape[0], mstype.float32) g_shape = self.shape(matrix_g) g_eye = self.eye(g_shape[0], g_shape[0], mstype.float32) damping = self.sqrt(damping_step) matrix_a = matrix_a + damping * a_eye a_pad_dim = self.a_split_pad_dim_map[thor_layer_count] matrix_a = self._process_cholesky_pad(a_pad_dim, matrix_a, a_shape[0]) matrix_a_inv = self.cholesky(matrix_a) matrix_a_inv = self._process_batch_matmul(matrix_a_inv) weight_shape = self.shape(self.params[index]) out_channels = weight_shape[0] in_channels = weight_shape[1] if out_channels == 2: matrix_a_inv = self.matrix_combine(matrix_a_inv) matrix_g_inv = g_eye else: matrix_g = self.mul(matrix_g, self.loss_scale) matrix_g = self.mul(matrix_g, self.batch_size_scale) matrix_g = matrix_g + damping * g_eye g_pad_dim = self.g_split_pad_dim_map[thor_layer_count] matrix_g = self._process_cholesky_pad(g_pad_dim, matrix_g, g_shape[0]) matrix_g_inv = self.cholesky(matrix_g) matrix_g_inv = self._process_batch_matmul(matrix_g_inv) if self.conv_layer_count > 0: a_max, matrix_a_inv = self._get_abs_max(matrix_a_inv, in_channels) g_max, matrix_g_inv = self._get_abs_max(matrix_g_inv, out_channels) a_max = F.depend(a_max, g) g_max = F.depend(g_max, g) matrix_a_max_allreduce = matrix_a_max_allreduce + (a_max,) matrix_g_max_allreduce = matrix_g_max_allreduce + (g_max,) else: matrix_a_inv = self.matrix_combine(matrix_a_inv) matrix_g_inv = self.matrix_combine(matrix_g_inv) if a_pad_dim > 0: matrix_a_inv = self.slice(matrix_a_inv, (0, 0), (in_channels, in_channels)) if g_pad_dim > 0: matrix_g_inv = self.slice(matrix_g_inv, (0, 0), (out_channels, out_channels)) matrix_a_inv_shape = self.shape(matrix_a_inv) matrix_g_combine_shape = self.shape(matrix_g_inv) if matrix_a_inv_shape[0] == 2048 and matrix_g_combine_shape[0] == 1001: matrix_a_inv = self.reshape(matrix_a_inv, (matrix_a_inv_shape[0] / 16, 16, matrix_a_inv_shape[0] / 16, 16)) matrix_a_inv = self.transpose(matrix_a_inv, (2, 0, 1, 3)) matrix_g_inv = P.Pad(((0, 7), (0, 7)))(matrix_g_inv) matrix_g_inv_shape = self.shape(matrix_g_inv) matrix_g_inv = self.reshape(matrix_g_inv, (matrix_g_inv_shape[0] / 16, 16, matrix_g_inv_shape[0] / 16, 16)) matrix_g_inv = self.transpose(matrix_g_inv, (2, 0, 1, 3)) matrix_a_allreduce = matrix_a_allreduce + (matrix_a_inv,) matrix_g_allreduce = matrix_g_allreduce + (matrix_g_inv,) return matrix_a_allreduce, matrix_g_allreduce, matrix_a_max_allreduce, matrix_g_max_allreduce def _process_conv_matmul_device_pad(self, conv_layer_count, weight_shape, matrix_a_inv): """process conv matmul device pad""" if self.device_shape_pad_flag[conv_layer_count]: kernel_hw = weight_shape[2] * weight_shape[3] in_channels = weight_shape[1] matrix_a_inv = self.reshape(matrix_a_inv, (kernel_hw, in_channels, kernel_hw, in_channels)) matrix_a_inv = P.Pad(((0, 0), (0, self.C0 - in_channels), (0, 0), (0, self.C0 - in_channels)))(matrix_a_inv) return matrix_a_inv def _get_ainv_ginv_amax_gmax_list(self, gradients, damping_step, matrix_a_allreduce, matrix_g_allreduce, matrix_a_max_allreduce, matrix_g_max_allreduce): """get matrixA inverse list, matrixG inverse list, matrixA_max list, matrixG_max list""" for i in range(len(self.params)): thor_layer_count = self.weight_fim_idx_map[i] conv_layer_count = self.weight_conv_idx_map[i] layer_type = self.weight_layertype_idx_map[i] weight_shape = self.shape(self.params[i]) out_channels = weight_shape[0] if layer_type == Conv: g = gradients[i] matrix_a_dim = weight_shape[1] * weight_shape[2] * weight_shape[3] matmul_support_flag = self.conv_matmul_support_map[conv_layer_count] matrix_a = self.matrix_a_cov[thor_layer_count] matrix_g = self.matrix_g_cov[thor_layer_count] matrix_a = F.depend(matrix_a, g) matrix_g = F.depend(matrix_g, g) a_shape = self.shape(matrix_a) a_eye = self.eye(a_shape[0], a_shape[0], mstype.float32) g_shape = self.shape(matrix_g) g_eye = self.eye(g_shape[0], g_shape[0], mstype.float32) a_normalizer = self.a_normalizer[conv_layer_count] g_normalizer = self.g_normalizer[conv_layer_count] a_normalizer = F.depend(a_normalizer, g) g_normalizer = F.depend(g_normalizer, g) damping_a = self.mul(damping_step, self.batch_size / a_normalizer) damping_g = self.mul(damping_step, self.batch_size / g_normalizer) damping_a = self.sqrt(damping_a) matrix_a = matrix_a + damping_a * a_eye a_pad_dim = self.a_split_pad_dim_map[thor_layer_count] matrix_a = self._process_cholesky_pad(a_pad_dim, matrix_a, a_shape[0]) matrix_a_inv = self.cholesky(matrix_a) matrix_a_inv = self._process_batch_matmul(matrix_a_inv) a_max, matrix_a_inv = self._get_abs_max(matrix_a_inv, matrix_a_dim) damping_g = self.sqrt(damping_g) matrix_g = self.mul(matrix_g, self.loss_scale) matrix_g = self.mul(matrix_g, self.batch_size_scale) matrix_g = matrix_g + damping_g * g_eye g_pad_dim = self.g_split_pad_dim_map[thor_layer_count] matrix_g = self._process_cholesky_pad(g_pad_dim, matrix_g, g_shape[0]) matrix_g_inv = self.cholesky(matrix_g) matrix_g_inv = self._process_batch_matmul(matrix_g_inv) g_max, matrix_g_inv = self._get_abs_max(matrix_g_inv, out_channels) if a_pad_dim > 0: matrix_a_inv = self.slice(matrix_a_inv, (0, 0), (matrix_a_dim, matrix_a_dim)) if g_pad_dim > 0: matrix_g_inv = self.slice(matrix_g_inv, (0, 0), (out_channels, out_channels)) if matmul_support_flag == 1: matrix_a_inv = self._process_conv_matmul_device_pad(conv_layer_count, weight_shape, matrix_a_inv) matrix_a_inv_shape = self.shape(self.matrix_a[thor_layer_count]) matrix_a_device_temp_shape = (matrix_a_inv_shape[0], matrix_a_inv_shape[2], matrix_a_inv_shape[1], matrix_a_inv_shape[3]) matrix_a_inv = self.reshape(matrix_a_inv, matrix_a_device_temp_shape) matrix_a_inv = self.transpose(matrix_a_inv, (2, 0, 1, 3)) matrix_g_inv_shape = self.shape(self.matrix_g[thor_layer_count]) matrix_g_device_temp_shape = (matrix_g_inv_shape[0], matrix_g_inv_shape[2], matrix_g_inv_shape[1], matrix_g_inv_shape[3]) matrix_g_inv = self.reshape(matrix_g_inv, matrix_g_device_temp_shape) matrix_g_inv = self.transpose(matrix_g_inv, (2, 0, 1, 3)) a_max = F.depend(a_max, g) g_max = F.depend(g_max, g) matrix_a_allreduce = matrix_a_allreduce + (matrix_a_inv,) matrix_g_allreduce = matrix_g_allreduce + (matrix_g_inv,) matrix_a_max_allreduce = matrix_a_max_allreduce + (a_max,) matrix_g_max_allreduce = matrix_g_max_allreduce + (g_max,) elif layer_type == FC: matrix_a_allreduce, matrix_g_allreduce, matrix_a_max_allreduce, matrix_g_max_allreduce = \ self._get_fc_ainv_ginv(i, damping_step, gradients, matrix_a_allreduce, matrix_g_allreduce, matrix_a_max_allreduce, matrix_g_max_allreduce) elif layer_type == Embedding: g = gradients[i] matrix_a = self.matrix_a_cov[thor_layer_count] matrix_g = self.matrix_g_cov[thor_layer_count] matrix_a = F.depend(matrix_a, g) matrix_g = F.depend(matrix_g, g) g_shape = self.shape(matrix_g) g_eye = self.eye(g_shape[0], g_shape[0], mstype.float32) damping = self.sqrt(damping_step) a_eye = P.OnesLike()(matrix_a) matrix_a = self.mul(matrix_a, 1.0 / self.batch_size) matrix_a = matrix_a + damping * a_eye matrix_a_inv = self.inv(matrix_a) matrix_g = self.mul(matrix_g, self.loss_scale) matrix_g = self.mul(matrix_g, self.batch_size_scale) matrix_g = matrix_g + damping * g_eye matrix_g_inv = self.cholesky(matrix_g) matrix_g_inv = self._process_batch_matmul(matrix_g_inv) matrix_g_inv = self.matrix_combine(matrix_g_inv) matrix_a_allreduce = matrix_a_allreduce + (matrix_a_inv,) matrix_g_allreduce = matrix_g_allreduce + (matrix_g_inv,) return matrix_a_allreduce, matrix_g_allreduce, matrix_a_max_allreduce, matrix_g_max_allreduce def _process_layernorm(self, damping_step, gradient): """process layernorm layer for thor""" damping = self.sqrt(damping_step) normalizer = self.cast(self.batch_size, mstype.float32) fim_cov = self.square(gradient) fim_cov = self.mul(fim_cov, 1.0 / normalizer) fim_cov = fim_cov + damping fim_inv = self.inv(fim_cov) gradient = self.mul(fim_inv, gradient) return gradient def _process_thor_fc(self, thor_layer_count, matrix_a_allreduce, matrix_g_allreduce, g): """process thor graph fc layer""" temp_a = matrix_a_allreduce[thor_layer_count] temp_g = matrix_g_allreduce[thor_layer_count] self.assign(self.matrix_a_cov[thor_layer_count], temp_a) self.assign(self.matrix_g_cov[thor_layer_count], temp_g) temp_a = self.cast(temp_a, mstype.float16) temp_g = self.cast(temp_g, mstype.float16) g = self.cast(g, mstype.float16) g = self.matmul(temp_g, g) g = self.matmul(g, temp_a) g = self.cast(g, mstype.float32) return g def _get_second_gradients_one(self, params_len, gradients, new_grads): """get second gradients one""" for i in range(params_len): g = gradients[i] thor_layer_count = self.weight_fim_idx_map[i] conv_layer_count = self.weight_conv_idx_map[i] layer_type = self.weight_layertype_idx_map[i] matrix_a = self.matrix_a[thor_layer_count] matrix_g = self.matrix_g[thor_layer_count] matrix_max = self.matrix_max_inv[thor_layer_count] grad_shape = self.shape(g) if layer_type == FC: if grad_shape[0] == 1001: g = self.cube_matmul_left_fc(matrix_g, g) g = self.cube_matmul_right_fc(g, matrix_a, matrix_max) else: temp_a = self.cast(matrix_a, mstype.float16) temp_g = self.cast(matrix_g, mstype.float16) g = self.cast(g, mstype.float16) g = self.matmul(temp_g, g) g = self.matmul(g, temp_a) g = self.cast(g, mstype.float32) g = self.mul(g, matrix_max) elif layer_type == Conv: matmul_support_flag = self.conv_matmul_support_map[conv_layer_count] if matmul_support_flag == 1: g = self.cube_matmul_left(matrix_g, g) g = self.cube_matmul_right_mul(g, matrix_a, matrix_max) else: g = self.reshape(g, (grad_shape[0], grad_shape[1] * grad_shape[2] * grad_shape[3])) temp_a = self.cast(matrix_a, mstype.float16) temp_g = self.cast(matrix_g, mstype.float16) g = self.cast(g, mstype.float16) g = self.matmul(temp_g, g) g = self.matmul(g, temp_a) g = self.cast(g, mstype.float32) g = self.mul(g, matrix_max) g = self.reshape(g, grad_shape) new_grads = new_grads + (g,) return new_grads def _get_second_gradients(self, new_grads, damping_step, gradients): """get second gradients for thor""" params_len = len(self.params) if self.conv_layer_count > 0: new_grads = self._get_second_gradients_one(params_len, gradients, new_grads) else: for i in range(params_len): g = gradients[i] thor_layer_count = self.weight_fim_idx_map[i] layer_type = self.weight_layertype_idx_map[i] if layer_type == Embedding: temp_a_ori = self.matrix_a_cov[thor_layer_count] temp_g = self.matrix_g_cov[thor_layer_count] temp_a = self.expand(temp_a_ori, 1) g = self.mul(temp_a, g) temp_g = self.cast(temp_g, mstype.float16) g = self.cast(g, mstype.float16) g = self.matmul(g, temp_g) g = self.cast(g, mstype.float32) elif layer_type == FC: temp_a = self.matrix_a_cov[thor_layer_count] temp_g = self.matrix_g_cov[thor_layer_count] temp_a = self.cast(temp_a, mstype.float16) temp_g = self.cast(temp_g, mstype.float16) g = self.cast(g, mstype.float16) g = self.matmul(temp_g, g) g = self.matmul(g, temp_a) g = self.cast(g, mstype.float32) elif layer_type == LayerNorm: g = self._process_layernorm(damping_step, g) new_grads = new_grads + (g,) return new_grads def _get_second_grad_by_matmul(self, index, temp_a, temp_g, g, temp_max): """get second gradient by matmul""" conv_layer_count = self.weight_conv_idx_map[index] layer_type = self.weight_layertype_idx_map[index] grad_shape = self.shape(g) if layer_type == FC: if grad_shape[0] == 1001: g = self.cube_matmul_left_fc(temp_g, g) g = self.cube_matmul_right_fc(g, temp_a, temp_max) else: temp_a = self.cast(temp_a, mstype.float16) temp_g = self.cast(temp_g, mstype.float16) g = self.cast(g, mstype.float16) g = self.matmul(temp_g, g) g = self.matmul(g, temp_a) g = self.cast(g, mstype.float32) g = self.mul(g, temp_max) elif layer_type == Conv: a_normalizer = self.a_normalizer[conv_layer_count] a_normalizer = F.depend(a_normalizer, g) temp_max = self.mul(temp_max, self.batch_size / a_normalizer) matmul_support_flag = self.conv_matmul_support_map[conv_layer_count] if matmul_support_flag == 1: g = self.cube_matmul_left(temp_g, g) g = self.cube_matmul_right_mul(g, temp_a, temp_max) else: g = self.reshape(g, (grad_shape[0], grad_shape[1] * grad_shape[2] * grad_shape[3])) temp_a = self.cast(temp_a, mstype.float16) temp_g = self.cast(temp_g, mstype.float16) g = self.cast(g, mstype.float16) g = self.matmul(temp_g, g) g = self.matmul(g, temp_a) g = self.cast(g, mstype.float32) g = self.mul(g, temp_max) g = self.reshape(g, grad_shape) return g, temp_max def _get_second_grad_by_layertype(self, index, matrix_a_allreduce, matrix_g_allreduce, g, damping_step): """get second gradient by layertype""" thor_layer_count = self.weight_fim_idx_map[index] layer_type = self.weight_layertype_idx_map[index] if layer_type == Embedding: temp_a_ori = matrix_a_allreduce[thor_layer_count] temp_g = matrix_g_allreduce[thor_layer_count] self.assign(self.matrix_a_cov[thor_layer_count], temp_a_ori) self.assign(self.matrix_g_cov[thor_layer_count], temp_g) temp_a = self.expand(temp_a_ori, 1) g = self.mul(temp_a, g) temp_g = self.cast(temp_g, mstype.float16) g = self.cast(g, mstype.float16) g = self.matmul(g, temp_g) g = self.cast(g, mstype.float32) elif layer_type == FC: g = self._process_thor_fc(thor_layer_count, matrix_a_allreduce, matrix_g_allreduce, g) elif layer_type == LayerNorm: g = self._process_layernorm(damping_step, g) return g def construct(self, gradients): params = self.params moments = self.moments gradients = self.scale_grad(gradients) damping_step = self.gather(self.damping, self.cov_step, self.axis) damping_step = self.cast(damping_step, mstype.float32) if self.thor: matrix_a_allreduce = () matrix_g_allreduce = () matrix_a_max_allreduce = () matrix_g_max_allreduce = () matrix_a_allreduce, matrix_g_allreduce, matrix_a_max_allreduce, matrix_g_max_allreduce = \ self._get_ainv_ginv_amax_gmax_list(gradients, damping_step, matrix_a_allreduce, matrix_g_allreduce, matrix_a_max_allreduce, matrix_g_max_allreduce) if self.is_distributed: matrix_a_allreduce = self.grad_reducer_a(matrix_a_allreduce) matrix_g_allreduce = self.grad_reducer_g(matrix_g_allreduce) if self.conv_layer_count > 0: matrix_a_max_allreduce = self.grad_reducer_amax(matrix_a_max_allreduce) matrix_g_max_allreduce = self.grad_reducer_gmax(matrix_g_max_allreduce) new_grads = () if self.conv_layer_count > 0: for i in range(len(self.params)): g = gradients[i] thor_layer_count = self.weight_fim_idx_map[i] temp_a = matrix_a_allreduce[thor_layer_count] temp_g = matrix_g_allreduce[thor_layer_count] matrix_a_inv_max = self.log(matrix_a_max_allreduce[thor_layer_count]) matrix_a_inv_max = self.mul(matrix_a_inv_max, -1) matrix_a_inv_max = self.exp(matrix_a_inv_max) temp_a = self.mul(temp_a, matrix_a_inv_max) matrix_g_inv_max = self.log(matrix_g_max_allreduce[thor_layer_count]) matrix_g_inv_max = self.mul(matrix_g_inv_max, -1) matrix_g_inv_max = self.exp(matrix_g_inv_max) temp_g = self.mul(temp_g, matrix_g_inv_max) temp_max = self.mul(matrix_g_max_allreduce[thor_layer_count], matrix_g_max_allreduce[thor_layer_count]) temp_a = self.cast(temp_a, mstype.float16) temp_g = self.cast(temp_g, mstype.float16) g, temp_max = self._get_second_grad_by_matmul(i, temp_a, temp_g, g, temp_max) self.assign(self.matrix_a[thor_layer_count], temp_a) self.assign(self.matrix_g[thor_layer_count], temp_g) self.assign(self.matrix_max_inv[thor_layer_count], temp_max) new_grads = new_grads + (g,) gradients = new_grads else: for i in range(len(self.params)): g = gradients[i] g = self._get_second_grad_by_layertype(i, matrix_a_allreduce, matrix_g_allreduce, g, damping_step) new_grads = new_grads + (g,) gradients = new_grads else: new_grads = () gradients = self._get_second_gradients(new_grads, damping_step, gradients) self.cov_step = self.cov_step + self.one if self.weight_decay > 0: gradients = self.hyper_map(F.partial(apply_decay, self.weight_decay), self.decay_flags, params, gradients) gradients = clip_gradient(self.enable_clip_grad, gradients) lr = self.get_lr() success = self.hyper_map(F.partial(_momentum_opt, self.opt, self.momentum, lr), gradients, params, moments) return success
51.678871
120
0.615061
import numpy as np from mindspore.ops import functional as F, composite as C, operations as P from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter, ParameterTuple from mindspore.common.tensor import Tensor import mindspore.nn as nn import mindspore.common.dtype as mstype import mindspore.log as logger from mindspore._checkparam import Validator from mindspore.nn.optim.optimizer import Optimizer from mindspore.parallel._utils import _get_device_num, _get_gradients_mean from mindspore import context from mindspore.context import ParallelMode from mindspore.nn.layer import DenseThor, Conv2dThor, EmbeddingThor, EmbeddingLookupThor from mindspore.nn.wrap import DistributedGradReducer from mindspore.train.train_thor.convert_utils import ConvertNetUtils from mindspore.parallel._auto_parallel_context import auto_parallel_context Other = -1 Conv = 1 FC = 2 Embedding = 3 LayerNorm = 4 BatchNorm = 5 op_add = P.AddN() apply_decay = C.MultitypeFuncGraph("apply_decay") _momentum_opt = C.MultitypeFuncGraph("momentum_opt") @apply_decay.register("Number", "Bool", "Tensor", "Tensor") def _tensor_apply_decay(weight_decay, if_apply, weight, gradient): if if_apply: return op_add((weight * weight_decay, gradient)) return gradient @_momentum_opt.register("Function", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor") def _tensor_run_opt_ext(opt, momentum, learning_rate, gradient, weight, moment): success = True success = F.depend(success, opt(weight, moment, learning_rate, gradient, momentum)) return success IS_ENABLE_GLOBAL_NORM = False GRADIENT_CLIP_TYPE = 1 GRADIENT_CLIP_VALUE = 1.0 clip_grad = C.MultitypeFuncGraph("clip_grad") hyper_map_op = C.HyperMap() @clip_grad.register("Number", "Number", "Tensor") def _clip_grad(clip_type, clip_value, grad): if clip_type not in [0, 1]: return grad dt = F.dtype(grad) if clip_type == 0: new_grad = C.clip_by_value(grad, F.cast(F.tuple_to_array((-clip_value,)), dt), F.cast(F.tuple_to_array((clip_value,)), dt)) else: new_grad = nn.ClipByNorm()(grad, F.cast(F.tuple_to_array((clip_value,)), dt)) return new_grad def clip_gradient(enable_clip_grad, gradients): if enable_clip_grad: if IS_ENABLE_GLOBAL_NORM: gradients = C.clip_by_global_norm(gradients, GRADIENT_CLIP_VALUE, None) else: gradients = hyper_map_op(F.partial(clip_grad, GRADIENT_CLIP_TYPE, GRADIENT_CLIP_VALUE), gradients) return gradients C0 = 16 def _check_param(momentum, frequency, lr, cls_name): Validator.check_value_type("momentum", momentum, [float], cls_name) if isinstance(momentum, float) and momentum < 0.0: raise ValueError("momentum should be at least 0.0, but got momentum {}".format(momentum)) Validator.check_value_type("frequency", frequency, [int], cls_name) if isinstance(frequency, int) and frequency < 2: raise ValueError("frequency should be at least 2, but got frequency {}".format(frequency)) Validator.check_value_type("learning rate", lr, [Tensor], cls_name) def caculate_device_shape(matrix_dim, channel, is_a): if is_a: if channel // C0 == 0: matrix_dim = (matrix_dim / channel) * C0 ll = (int(matrix_dim // C0), int(matrix_dim // C0), C0, C0), int(matrix_dim) return ll def is_conv_matmul_support_shape(matrix_a_shape, matrix_g_shape): temp = (matrix_g_shape, matrix_a_shape) support_shape = [((4, 4, 16, 16), (49, 49, 16, 16)), ((4, 4, 16, 16), (4, 4, 16, 16)), ((4, 4, 16, 16), (36, 36, 16, 16)), ((16, 16, 16, 16), (4, 4, 16, 16)), ((4, 4, 16, 16), (16, 16, 16, 16)), ((8, 8, 16, 16), (16, 16, 16, 16)), ((8, 8, 16, 16), (72, 72, 16, 16)), ((32, 32, 16, 16), (8, 8, 16, 16)), ((32, 32, 16, 16), (16, 16, 16, 16)), ((8, 8, 16, 16), (32, 32, 16, 16)), ((16, 16, 16, 16), (32, 32, 16, 16)), ((16, 16, 16, 16), (144, 144, 16, 16)), ((64, 64, 16, 16), (16, 16, 16, 16)), ((64, 64, 16, 16), (32, 32, 16, 16)), ((16, 16, 16, 16), (64, 64, 16, 16)), ((32, 32, 16, 16), (64, 64, 16, 16)), ((32, 32, 16, 16), (288, 288, 16, 16)), ((128, 128, 16, 16), (32, 32, 16, 16)), ((128, 128, 16, 16), (64, 64, 16, 16)), ((32, 32, 16, 16), (128, 128, 16, 16))] if temp in support_shape: return True return False def caculate_matmul_shape(matrix_a_dim, matrix_g_dim, split_dim): split_dima = split_dim split_dimg = split_dim if matrix_a_dim % split_dim == 0: batch_w = matrix_a_dim // split_dim else: if matrix_a_dim < split_dim: batch_w = 1 split_dima = matrix_a_dim else: batch_w = matrix_a_dim // split_dim + 1 if matrix_g_dim % split_dim == 0: batch_h = matrix_g_dim // split_dim else: if matrix_g_dim < split_dim: batch_h = 1 split_dimg = matrix_g_dim else: batch_h = matrix_g_dim // split_dim + 1 matrix_a_shape = (batch_h, batch_w, split_dima, split_dima) matrix_g_shape = (batch_h, split_dimg, split_dimg) return matrix_a_shape, matrix_g_shape def get_layer_type_for_dense_and_conv(subcell, prefix, layertype_map): if subcell.weight.requires_grad: if "rpn_with_loss.rpn_convs_list." not in prefix.lower() \ or "rpn_with_loss.rpn_convs_list.0." in prefix.lower(): layertype_map.append(Other) def find_net_layertype_recur(net, layertype_map): cells = net.name_cells() for name in cells: subcell = cells[name] prefix = subcell.param_prefix if subcell == net: continue elif isinstance(subcell, Conv2dThor): layertype_map.append(Conv) elif isinstance(subcell, DenseThor): layertype_map.append(FC) elif isinstance(subcell, (EmbeddingThor, EmbeddingLookupThor)): layertype_map.append(Embedding) elif isinstance(subcell, nn.LayerNorm): layertype_map.append(LayerNorm) elif isinstance(subcell, nn.BatchNorm2d): if subcell.gamma.requires_grad: layertype_map.append(BatchNorm) elif isinstance(subcell, (nn.Conv2d, nn.Dense, nn.Embedding, nn.Conv2dTranspose, nn.Conv1d, nn.Conv1dTranspose, nn.BatchNorm1d, nn.GroupNorm, nn.GlobalBatchNorm)): if isinstance(subcell, (nn.Dense, nn.Conv2d)): get_layer_type_for_dense_and_conv(subcell, prefix, layertype_map) else: layertype_map.append(Other) else: find_net_layertype_recur(subcell, layertype_map) def get_net_layertype_mask(net): layertype_map = [] find_net_layertype_recur(net, layertype_map) return layertype_map def get_layer_counter(layer_type, layer_counter, params, idx): if layer_type in [Conv, FC]: if "bias" in params[idx].name.lower(): layer_counter = layer_counter + 1 else: if idx < len(params) - 1 and "bias" not in params[idx + 1].name.lower(): layer_counter = layer_counter + 1 elif layer_type in [LayerNorm, BatchNorm]: if "beta" in params[idx].name.lower(): layer_counter = layer_counter + 1 else: if "bias" in params[idx].name.lower(): layer_counter = layer_counter + 1 elif "weight" in params[idx].name.lower(): if idx < len(params) - 1 and "bias" not in params[idx + 1].name.lower(): layer_counter = layer_counter + 1 else: layer_counter = layer_counter + 1 return layer_counter def thor(net, learning_rate, damping, momentum, weight_decay=0.0, loss_scale=1.0, batch_size=32, use_nesterov=False, decay_filter=lambda x: x.name not in [], split_indices=None, enable_clip_grad=False, frequency=100): context.set_context(max_call_depth=10000) ConvertNetUtils().convert_to_thor_net(net) if context.get_context("device_target") == "Ascend": return ThorAscend(net, learning_rate, damping, momentum, weight_decay, loss_scale, batch_size, decay_filter, split_indices=split_indices, enable_clip_grad=enable_clip_grad, frequency=frequency) return ThorGpu(net, learning_rate, damping, momentum, weight_decay, loss_scale, batch_size, use_nesterov, decay_filter, split_indices=split_indices, enable_clip_grad=enable_clip_grad, frequency=frequency) class ThorGpu(Optimizer): def __init__(self, net, learning_rate, damping, momentum, weight_decay=0.0, loss_scale=1.0, batch_size=32, use_nesterov=False, decay_filter=lambda x: x.name not in [], split_indices=None, enable_clip_grad=False, frequency=100): params = filter(lambda x: x.requires_grad, net.get_parameters()) super(ThorGpu, self).__init__(learning_rate, params, weight_decay, loss_scale) _check_param(momentum, frequency, learning_rate, self.__class__.__name__) self.momentum = Parameter(Tensor(momentum, mstype.float32), name="momentum") self.params = self.parameters self.use_nesterov = Validator.check_bool(use_nesterov) self.moments = self.params.clone(prefix="moments", init='zeros') self.hyper_map = C.HyperMap() self.opt = P.ApplyMomentum(use_nesterov=self.use_nesterov) self.net = net self.matrix_a_cov = ParameterTuple(filter(lambda x: 'matrix_a' in x.name, net.get_parameters())) self.matrix_g_cov = ParameterTuple(filter(lambda x: 'matrix_g' in x.name, net.get_parameters())) self.a_normalizer = ParameterTuple(filter(lambda x: 'a_normalizer' in x.name, net.get_parameters())) self.g_normalizer = ParameterTuple(filter(lambda x: 'g_normalizer' in x.name, net.get_parameters())) self.batch_size = Tensor(batch_size, mstype.float32) self.loss_scale = Tensor(1 / (loss_scale * loss_scale), mstype.float32) self.batch_size_scale = Tensor(batch_size * batch_size, mstype.float32) self.damping = damping self._define_gpu_operator() logger.info("matrix_a_cov len is {}".format(len(self.matrix_a_cov))) self.thor = True self.matrix_a = () self.matrix_g = () self.matrix_a_shape = () self.thor_layer_count = 0 self.conv_layer_count = 0 self.weight_fim_idx_map = () self.weight_conv_idx_map = () self.weight_layertype_idx_map = () self._process_matrix_init_and_weight_idx_map(self.net) self.matrix_a = ParameterTuple(self.matrix_a) self.matrix_g = ParameterTuple(self.matrix_g) self.weight_decay = weight_decay self.decay_flags = tuple(decay_filter(x) for x in self.parameters) self.update_gradient = P.UpdateThorGradient(split_dim=self.split_dim) self.enable_clip_grad = enable_clip_grad self.frequency = frequency self._define_gpu_reducer(split_indices) def get_frequency(self): return self.frequency def _define_gpu_operator(self): self.transpose = P.Transpose() self.shape = P.Shape() self.reshape = P.Reshape() self.matmul = P.MatMul() self.assign = P.Assign() self.mul = P.Mul() self.gather = P.GatherV2() self.one = Tensor(1, mstype.int32) self.feature_map = Tensor(1.0, mstype.float32) self.axis = 0 self.cov_step = Parameter(initializer(0, [1], mstype.int32), name="cov_step", requires_grad=False) self.cast = P.Cast() self.sqrt = P.Sqrt() self.eye = P.Eye() self.split_dim = 128 self.embedding_cholesky = P.CholeskyTrsm() self.cholesky = P.CholeskyTrsm(split_dim=self.split_dim) self.vector_matmul = P.BatchMatMul(transpose_a=True) self.reduce_sum = P.ReduceSum(keep_dims=False) self.inv = P.Reciprocal() self.square = P.Square() self.expand = P.ExpandDims() def _define_gpu_reducer(self, split_indices): self.parallel_mode = context.get_auto_parallel_context("parallel_mode") self.is_distributed = (self.parallel_mode != ParallelMode.STAND_ALONE) if self.is_distributed: mean = _get_gradients_mean() degree = _get_device_num() if not split_indices: self.split_indices = [len(self.matrix_a_cov) - 1] else: self.split_indices = split_indices auto_parallel_context().set_all_reduce_fusion_split_indices(self.split_indices, "hccl_world_groupsum6") auto_parallel_context().set_all_reduce_fusion_split_indices(self.split_indices, "hccl_world_groupsum8") self.grad_reducer_a = DistributedGradReducer(self.matrix_a_cov, mean, degree, fusion_type=6) self.grad_reducer_g = DistributedGradReducer(self.matrix_a_cov, mean, degree, fusion_type=8) def _process_matrix_init_and_weight_idx_map(self, net): layer_type_map = get_net_layertype_mask(net) layer_counter = 0 for idx in range(len(self.params)): layer_type = layer_type_map[layer_counter] weight = self.params[idx] weight_shape = self.shape(weight) if layer_type in [Conv, FC] and "bias" not in self.params[idx].name.lower(): in_channels = weight_shape[1] out_channels = weight_shape[0] matrix_a_dim = in_channels if layer_type == Conv: matrix_a_dim = in_channels * weight_shape[2] * weight_shape[3] matrix_g_dim = out_channels matrix_a_shape, matrix_g_shape = caculate_matmul_shape(matrix_a_dim, matrix_g_dim, self.split_dim) matrix_a_inv = Parameter(np.zeros(matrix_a_shape).astype(np.float32), name='matrix_a_inv_' + str(self.thor_layer_count), requires_grad=False) matrix_g_inv = Parameter(np.zeros(matrix_g_shape).astype(np.float32), name="matrix_g_inv_" + str(self.thor_layer_count), requires_grad=False) self.matrix_a = self.matrix_a + (matrix_a_inv,) self.matrix_g = self.matrix_g + (matrix_g_inv,) self.matrix_a_shape = self.matrix_a_shape + (matrix_a_shape,) elif layer_type == Embedding: vocab_size = weight_shape[0] embedding_size = weight_shape[1] matrix_a_inv = Parameter(Tensor(np.zeros([vocab_size]).astype(np.float32)), name='matrix_a_inv_' + str(self.thor_layer_count), requires_grad=False) matrix_g_inv = Parameter(Tensor(np.zeros([embedding_size, embedding_size]).astype(np.float32)), name="matrix_g_inv_" + str(self.thor_layer_count), requires_grad=False) self.matrix_a = self.matrix_a + (matrix_a_inv,) self.matrix_g = self.matrix_g + (matrix_g_inv,) self.matrix_a_shape = self.matrix_a_shape + ((vocab_size,),) if layer_type in [Conv, FC, Embedding] and "bias" not in self.params[idx].name.lower(): self.weight_fim_idx_map = self.weight_fim_idx_map + (self.thor_layer_count,) self.thor_layer_count = self.thor_layer_count + 1 self.weight_layertype_idx_map = self.weight_layertype_idx_map + (layer_type,) if layer_type == Conv: self.weight_conv_idx_map = self.weight_conv_idx_map + (self.conv_layer_count,) self.conv_layer_count = self.conv_layer_count + 1 else: self.weight_conv_idx_map = self.weight_conv_idx_map + (-1,) else: self.weight_conv_idx_map = self.weight_conv_idx_map + (-1,) self.weight_fim_idx_map = self.weight_fim_idx_map + (-1,) if layer_type == LayerNorm: self.weight_layertype_idx_map = self.weight_layertype_idx_map + (LayerNorm,) else: self.weight_layertype_idx_map = self.weight_layertype_idx_map + (Other,) if "output_bias" not in self.params[idx].name.lower(): layer_counter = get_layer_counter(layer_type, layer_counter, self.params, idx) def _get_ainv_ginv_list(self, gradients, damping_step, matrix_a_allreduce, matrix_g_allreduce): for i in range(len(self.params)): thor_layer_count = self.weight_fim_idx_map[i] conv_layer_count = self.weight_conv_idx_map[i] layer_type = self.weight_layertype_idx_map[i] if layer_type in [Conv, FC, Embedding]: g = gradients[i] matrix_a = self.matrix_a_cov[thor_layer_count] matrix_g = self.matrix_g_cov[thor_layer_count] matrix_a = F.depend(matrix_a, g) matrix_g = F.depend(matrix_g, g) damping_a = damping_step damping_g = damping_step feature_map = self.feature_map if layer_type == Conv: a_normalizer = self.a_normalizer[conv_layer_count] g_normalizer = self.g_normalizer[conv_layer_count] a_normalizer = F.depend(a_normalizer, g) g_normalizer = F.depend(g_normalizer, g) damping_a = self.mul(damping_step, 1.0 / a_normalizer) damping_g = self.mul(damping_step, 1.0 / g_normalizer) feature_map = self.sqrt(1.0 / a_normalizer) a_shape = self.shape(matrix_a) a_eye = self.eye(a_shape[0], a_shape[0], mstype.float32) damping_a = self.sqrt(damping_a) damping_g = self.sqrt(damping_g) g_shape = self.shape(matrix_g) g_eye = self.eye(g_shape[0], g_shape[1], mstype.float32) matrix_g = self.mul(matrix_g, self.loss_scale) matrix_g = self.mul(matrix_g, self.batch_size_scale) matrix_g = matrix_g + damping_g * g_eye if layer_type == Embedding: a_eye = P.OnesLike()(matrix_a) matrix_a = self.mul(matrix_a, 1.0 / self.batch_size) matrix_a = matrix_a + damping_a * a_eye matrix_a = self.inv(matrix_a) matrix_g = self.embedding_cholesky(matrix_g) matrix_g = self.matmul(matrix_g, matrix_g) else: matrix_a = matrix_a + damping_a * a_eye matrix_a = self.cholesky(matrix_a) matrix_a = self.vector_matmul(matrix_a, matrix_a) matrix_a = P.BroadcastTo(self.matrix_a_shape[thor_layer_count])(matrix_a) matrix_g = self.cholesky(matrix_g) matrix_g = self.vector_matmul(matrix_g, matrix_g) matrix_a = self.mul(matrix_a, feature_map) matrix_g = self.mul(matrix_g, feature_map) matrix_a_allreduce = matrix_a_allreduce + (matrix_a,) matrix_g_allreduce = matrix_g_allreduce + (matrix_g,) return matrix_a_allreduce, matrix_g_allreduce def _process_layernorm(self, damping_step, gradient): damping = self.sqrt(damping_step) normalizer = self.batch_size normalizer = self.cast(normalizer, mstype.float32) fim_cov = self.square(gradient) fim_cov = self.mul(fim_cov, 1.0 / normalizer) fim_cov = fim_cov + damping fim_inv = self.inv(fim_cov) gradient = self.mul(fim_inv, gradient) return gradient def _reshape_gradient(self, conv_layer_count, g, g_shape): if conv_layer_count != -1: g = self.reshape(g, g_shape) return g def construct(self, gradients): params = self.params moments = self.moments gradients = self.scale_grad(gradients) damping_step = self.gather(self.damping, self.cov_step, self.axis) damping_step = self.cast(damping_step, mstype.float32) new_grads = () if self.thor: matrix_ainv_list = () matrix_ginv_list = () matrix_a_allreduce, matrix_g_allreduce = self._get_ainv_ginv_list(gradients, damping_step, matrix_ainv_list, matrix_ginv_list) if self.is_distributed: matrix_a_allreduce = self.grad_reducer_a(matrix_a_allreduce) matrix_g_allreduce = self.grad_reducer_g(matrix_g_allreduce) for i in range(len(self.params)): g = gradients[i] thor_layer_count = self.weight_fim_idx_map[i] conv_layer_count = self.weight_conv_idx_map[i] layer_type = self.weight_layertype_idx_map[i] if layer_type in [Conv, FC]: g_shape = self.shape(g) g = self.reshape(g, (g_shape[0], -1)) matrix_a = matrix_a_allreduce[thor_layer_count] matrix_g = matrix_g_allreduce[thor_layer_count] g = self.update_gradient(matrix_g, g, matrix_a) self.assign(self.matrix_a[thor_layer_count], matrix_a) self.assign(self.matrix_g[thor_layer_count], matrix_g) g = self._reshape_gradient(conv_layer_count, g, g_shape) elif layer_type == Embedding: matrix_a = matrix_a_allreduce[thor_layer_count] matrix_g = matrix_g_allreduce[thor_layer_count] self.assign(self.matrix_a[thor_layer_count], matrix_a) self.assign(self.matrix_g[thor_layer_count], matrix_g) temp_a = self.expand(matrix_a, 1) g = self.mul(temp_a, g) g = self.matmul(g, matrix_g) elif layer_type == LayerNorm: g = self._process_layernorm(damping_step, g) new_grads = new_grads + (g,) else: for j in range(len(self.params)): g = gradients[j] thor_layer_count = self.weight_fim_idx_map[j] conv_layer_count = self.weight_conv_idx_map[j] layer_type = self.weight_layertype_idx_map[j] if layer_type in [Conv, FC]: g_shape = self.shape(g) g = self.reshape(g, (g_shape[0], -1)) matrix_a = self.matrix_a[thor_layer_count] matrix_g = self.matrix_g[thor_layer_count] g = self.update_gradient(matrix_g, g, matrix_a) g = self._reshape_gradient(conv_layer_count, g, g_shape) elif layer_type == Embedding: matrix_a = self.matrix_a[thor_layer_count] matrix_g = self.matrix_g[thor_layer_count] g = gradients[j] temp_a = self.expand(matrix_a, 1) g = self.mul(temp_a, g) g = self.matmul(g, matrix_g) elif layer_type == LayerNorm: g = self._process_layernorm(damping_step, g) new_grads = new_grads + (g,) gradients = new_grads self.cov_step = self.cov_step + self.one if self.weight_decay > 0: gradients = self.hyper_map(F.partial(apply_decay, self.weight_decay), self.decay_flags, params, gradients) gradients = clip_gradient(self.enable_clip_grad, gradients) lr = self.get_lr() success = self.hyper_map(F.partial(_momentum_opt, self.opt, self.momentum, lr), gradients, params, moments) return success class ThorAscend(Optimizer): def __init__(self, net, learning_rate, damping, momentum, weight_decay=0.0, loss_scale=1.0, batch_size=32, decay_filter=lambda x: x.name not in [], split_indices=None, enable_clip_grad=False, frequency=100): params = filter(lambda x: x.requires_grad, net.get_parameters()) super(ThorAscend, self).__init__(learning_rate, params, weight_decay, loss_scale) _check_param(momentum, frequency, learning_rate, self.__class__.__name__) self.momentum = Parameter(Tensor(momentum, mstype.float32), name="momentum") self.params = self.parameters self.moments = self.params.clone(prefix="moments", init='zeros') self.hyper_map = C.HyperMap() self.opt = P.ApplyMomentum() self.net = net self.matrix_a_cov = ParameterTuple(filter(lambda x: 'matrix_a' in x.name, net.get_parameters())) self.matrix_g_cov = ParameterTuple(filter(lambda x: 'matrix_g' in x.name, net.get_parameters())) self.a_normalizer = ParameterTuple(filter(lambda x: 'a_normalizer' in x.name, net.get_parameters())) self.g_normalizer = ParameterTuple(filter(lambda x: 'g_normalizer' in x.name, net.get_parameters())) logger.info("matrix_a_cov len is {}".format(len(self.matrix_a_cov))) self._define_ascend_operator() self.C0 = 16 self.device_shape_pad_flag = () self.diag_block_dim = 128 self.matrix_a = () self.matrix_g = () self.thor_layer_count = 0 self.conv_layer_count = 0 self.weight_conv_idx_map = () self.weight_fim_idx_map = () self.weight_layertype_idx_map = () self.a_split_pad_dim_map = () self.g_split_pad_dim_map = () self.conv_matmul_support_map = () self.batch_matmul_support_list = [1, 2, 4, 5, 6, 8, 9, 16, 18, 24, 32, 36] self.abs_max_support_list = [1, 2, 4, 8, 16, 5, 9, 18, 36, 32] self._process_matrix_init_and_weight_idx_map(self.net) self.matrix_a = ParameterTuple(self.matrix_a) self.matrix_g = ParameterTuple(self.matrix_g) self.matrix_max_inv = () for i in range(len(self.matrix_a)): self.matrix_max_inv = self.matrix_max_inv + ( Parameter(initializer(1, [1], mstype.float32), name="matrix_max" + str(i), requires_grad=False),) self.matrix_max_inv = ParameterTuple(self.matrix_max_inv) self.thor = True self.weight_decay = weight_decay self.decay_flags = tuple(decay_filter(x) for x in self.parameters) self.damping = damping self.batch_size = Tensor(batch_size, mstype.float32) self.loss_scale = Tensor(1 / (loss_scale * loss_scale), mstype.float32) self.batch_size_scale = Tensor(batch_size * batch_size, mstype.float32) self.enable_clip_grad = enable_clip_grad self.frequency = frequency self._define_ascend_reducer(split_indices) def get_frequency(self): return self.frequency def _get_pad_dim(self, matrix_dim): split_pad_dim = 0 if matrix_dim == 64: return split_pad_dim res = matrix_dim % self.diag_block_dim if res != 0: split_pad_dim = self.diag_block_dim - res return split_pad_dim def _define_ascend_operator(self): self.cube_matmul_left = P.CusMatMulCubeFraczLeftCast() self.cube_matmul_left_fc = P.CusMatMulCubeDenseLeft() self.cube_matmul_right_fc = P.CusMatMulCubeDenseRight() self.cube_matmul_right_mul = P.CusMatMulCubeFraczRightMul() self.transpose = P.Transpose() self.shape = P.Shape() self.reshape = P.Reshape() self.mul = P.Mul() self.log = P.Log() self.exp = P.Exp() self.sqrt = P.Sqrt() self.gather = P.GatherV2() self.assign = P.Assign() self.cast = P.Cast() self.eye = P.Eye() self.concat = P.Concat(0) self.cholesky = P.CusCholeskyTrsm() self.vector_matmul = P.CusBatchMatMul() self.tbe_batch_matmul = P.BatchMatMul(transpose_a=True) self.fused_abs_max2 = P.CusFusedAbsMax1() self.matrix_combine = P.CusMatrixCombine() self.slice = P.Slice() self.expand = P.ExpandDims() self.reduce_sum = P.ReduceSum(keep_dims=False) self.square = P.Square() self.inv = P.Inv() self.matmul = P.MatMul() self.axis = 0 self.one = Tensor(1, mstype.int32) self.cov_step = Parameter(initializer(0, [1], mstype.int32), name="cov_step", requires_grad=False) def _define_ascend_reducer(self, split_indices): self.parallel_mode = context.get_auto_parallel_context("parallel_mode") self.is_distributed = (self.parallel_mode != ParallelMode.STAND_ALONE) if self.is_distributed: mean = _get_gradients_mean() degree = _get_device_num() if not split_indices: self.split_indices = [len(self.matrix_a_cov) - 1] else: self.split_indices = split_indices if self.conv_layer_count > 0: auto_parallel_context().set_all_reduce_fusion_split_indices(self.split_indices, "hccl_world_groupsum2") auto_parallel_context().set_all_reduce_fusion_split_indices(self.split_indices, "hccl_world_groupsum4") self.grad_reducer_amax = DistributedGradReducer(self.matrix_a_cov, mean, degree, fusion_type=2) self.grad_reducer_gmax = DistributedGradReducer(self.matrix_a_cov, mean, degree, fusion_type=4) auto_parallel_context().set_all_reduce_fusion_split_indices(self.split_indices, "hccl_world_groupsum6") auto_parallel_context().set_all_reduce_fusion_split_indices(self.split_indices, "hccl_world_groupsum8") self.grad_reducer_a = DistributedGradReducer(self.matrix_a_cov, mean, degree, fusion_type=6) self.grad_reducer_g = DistributedGradReducer(self.matrix_a_cov, mean, degree, fusion_type=8) def _get_weight_idx_map(self, layer_type, idx, weight_shape): if layer_type in [Conv, FC, Embedding] and "bias" not in self.params[idx].name.lower(): self.weight_fim_idx_map = self.weight_fim_idx_map + (self.thor_layer_count,) self.weight_layertype_idx_map = self.weight_layertype_idx_map + (layer_type,) if layer_type == Embedding: a_pad_dim = 0 g_pad_dim = 0 self.a_split_pad_dim_map = self.a_split_pad_dim_map + (a_pad_dim,) self.g_split_pad_dim_map = self.g_split_pad_dim_map + (g_pad_dim,) else: out_channels = weight_shape[0] g_pad_dim = self._get_pad_dim(out_channels) self.g_split_pad_dim_map = self.g_split_pad_dim_map + (g_pad_dim,) matrix_a_dim = weight_shape[1] if layer_type == Conv: matrix_a_dim = weight_shape[1] * weight_shape[2] * weight_shape[3] a_pad_dim = self._get_pad_dim(matrix_a_dim) self.a_split_pad_dim_map = self.a_split_pad_dim_map + (a_pad_dim,) self.thor_layer_count = self.thor_layer_count + 1 if layer_type == Conv: self.weight_conv_idx_map = self.weight_conv_idx_map + (self.conv_layer_count,) self.conv_layer_count = self.conv_layer_count + 1 else: self.weight_conv_idx_map = self.weight_conv_idx_map + (-1,) else: self.weight_fim_idx_map = self.weight_fim_idx_map + (-1,) self.weight_conv_idx_map = self.weight_conv_idx_map + (-1,) if layer_type == LayerNorm: self.weight_layertype_idx_map = self.weight_layertype_idx_map + (LayerNorm,) else: self.weight_layertype_idx_map = self.weight_layertype_idx_map + (Other,) def _get_fc_matrix(self, weight_shape): out_channels = weight_shape[0] in_channels = weight_shape[1] if self.conv_layer_count > 0: if out_channels == 1001: fc_matrix_a = Parameter(Tensor(np.zeros([128, 128, 16, 16]).astype(np.float16)), name='matrix_a_inv_' + str(self.thor_layer_count), requires_grad=False) fc_matrix_g = Parameter(Tensor(np.zeros([63, 63, 16, 16]).astype(np.float16)), name="matrix_g_inv_" + str(self.thor_layer_count), requires_grad=False) else: fc_matrix_a = Parameter(Tensor(np.eye(in_channels).astype(np.float16)), name='matrix_a_inv_' + str(self.thor_layer_count), requires_grad=False) fc_matrix_g = Parameter(Tensor(np.eye(out_channels).astype(np.float16)), name="matrix_g_inv_" + str(self.thor_layer_count), requires_grad=False) self.matrix_a = self.matrix_a + (fc_matrix_a,) self.matrix_g = self.matrix_g + (fc_matrix_g,) def _process_matrix_init_and_weight_idx_map(self, net): layer_counter = 0 layer_type_map = get_net_layertype_mask(net) for idx in range(len(self.params)): layer_type = layer_type_map[layer_counter] weight = self.params[idx] weight_shape = self.shape(weight) if layer_type == Conv and "bias" not in self.params[idx].name.lower(): in_channels = weight_shape[1] out_channels = weight_shape[0] matrix_a_dim = in_channels * weight_shape[2] * weight_shape[3] matrix_g_dim = out_channels matrix_a_device_shape, matrix_a_device_dim = caculate_device_shape(matrix_a_dim, in_channels, True) matrix_g_device_shape, matrix_g_device_dim = caculate_device_shape(matrix_g_dim, in_channels, False) ret = is_conv_matmul_support_shape(matrix_a_device_shape, matrix_g_device_shape) if ret: matrix_a_inv = Parameter( Tensor(np.reshape(np.identity(matrix_a_device_dim).astype(np.float16), matrix_a_device_shape)), name='matrix_a_inv_' + str(self.thor_layer_count), requires_grad=False) matrix_g_inv = Parameter( Tensor(np.reshape(np.identity(matrix_g_device_dim).astype(np.float16), matrix_g_device_shape)), name="matrix_g_inv_" + str(self.thor_layer_count), requires_grad=False) self.conv_matmul_support_map = self.conv_matmul_support_map + (1,) else: matrix_a_inv = Parameter(Tensor(np.eye(matrix_a_dim).astype(np.float16)), name='matrix_a_inv_' + str(self.thor_layer_count), requires_grad=False) matrix_g_inv = Parameter(Tensor(np.eye(matrix_g_dim).astype(np.float16)), name="matrix_g_inv_" + str(self.thor_layer_count), requires_grad=False) self.conv_matmul_support_map = self.conv_matmul_support_map + (0,) self.matrix_a = self.matrix_a + (matrix_a_inv,) self.matrix_g = self.matrix_g + (matrix_g_inv,) device_shape_pad_flag = False if matrix_a_dim != matrix_a_device_dim: device_shape_pad_flag = True self.device_shape_pad_flag = self.device_shape_pad_flag + (device_shape_pad_flag,) elif layer_type == FC and "bias" not in self.params[idx].name.lower(): self._get_fc_matrix(weight_shape) self._get_weight_idx_map(layer_type, idx, weight_shape) if "output_bias" not in self.params[idx].name.lower(): layer_counter = get_layer_counter(layer_type, layer_counter, self.params, idx) def _process_batch_matmul(self, input_matrix): input_matrix_shape = self.shape(input_matrix) if input_matrix_shape[0] in self.batch_matmul_support_list: input_matrix = self.vector_matmul(input_matrix, input_matrix) else: input_matrix = self.tbe_batch_matmul(input_matrix, input_matrix) return input_matrix def _process_cholesky_pad(self, pad_dim, input_matrix, matrix_shape0): if pad_dim > 0: matrix_sup = self.eye(pad_dim, pad_dim, mstype.float32) matrix_sup = P.Pad(((0, 0), (matrix_shape0, 0)))(matrix_sup) input_matrix = P.Pad(((0, 0), (0, pad_dim)))(input_matrix) input_matrix = self.concat((input_matrix, matrix_sup)) return input_matrix def _get_abs_max(self, matrix_inv, origin_dim): cholesky_shape = self.shape(matrix_inv) if cholesky_shape[0] in self.abs_max_support_list: matrix_inv_max = P.CusFusedAbsMax1([origin_dim, origin_dim])(matrix_inv) matrix_max = self.fused_abs_max2(matrix_inv_max) matrix_inv = self.matrix_combine(matrix_inv) else: matrix_inv = self.matrix_combine(matrix_inv) matrix_abs = P.Abs()(matrix_inv) matrix_max = P.ReduceMax(keep_dims=False)(matrix_abs) return matrix_max, matrix_inv def _get_fc_ainv_ginv(self, index, damping_step, gradients, matrix_a_allreduce, matrix_g_allreduce, matrix_a_max_allreduce, matrix_g_max_allreduce): thor_layer_count = self.weight_fim_idx_map[index] g = gradients[index] matrix_a = self.matrix_a_cov[thor_layer_count] matrix_g = self.matrix_g_cov[thor_layer_count] matrix_a = F.depend(matrix_a, g) matrix_g = F.depend(matrix_g, g) a_shape = self.shape(matrix_a) a_eye = self.eye(a_shape[0], a_shape[0], mstype.float32) g_shape = self.shape(matrix_g) g_eye = self.eye(g_shape[0], g_shape[0], mstype.float32) damping = self.sqrt(damping_step) matrix_a = matrix_a + damping * a_eye a_pad_dim = self.a_split_pad_dim_map[thor_layer_count] matrix_a = self._process_cholesky_pad(a_pad_dim, matrix_a, a_shape[0]) matrix_a_inv = self.cholesky(matrix_a) matrix_a_inv = self._process_batch_matmul(matrix_a_inv) weight_shape = self.shape(self.params[index]) out_channels = weight_shape[0] in_channels = weight_shape[1] if out_channels == 2: matrix_a_inv = self.matrix_combine(matrix_a_inv) matrix_g_inv = g_eye else: matrix_g = self.mul(matrix_g, self.loss_scale) matrix_g = self.mul(matrix_g, self.batch_size_scale) matrix_g = matrix_g + damping * g_eye g_pad_dim = self.g_split_pad_dim_map[thor_layer_count] matrix_g = self._process_cholesky_pad(g_pad_dim, matrix_g, g_shape[0]) matrix_g_inv = self.cholesky(matrix_g) matrix_g_inv = self._process_batch_matmul(matrix_g_inv) if self.conv_layer_count > 0: a_max, matrix_a_inv = self._get_abs_max(matrix_a_inv, in_channels) g_max, matrix_g_inv = self._get_abs_max(matrix_g_inv, out_channels) a_max = F.depend(a_max, g) g_max = F.depend(g_max, g) matrix_a_max_allreduce = matrix_a_max_allreduce + (a_max,) matrix_g_max_allreduce = matrix_g_max_allreduce + (g_max,) else: matrix_a_inv = self.matrix_combine(matrix_a_inv) matrix_g_inv = self.matrix_combine(matrix_g_inv) if a_pad_dim > 0: matrix_a_inv = self.slice(matrix_a_inv, (0, 0), (in_channels, in_channels)) if g_pad_dim > 0: matrix_g_inv = self.slice(matrix_g_inv, (0, 0), (out_channels, out_channels)) matrix_a_inv_shape = self.shape(matrix_a_inv) matrix_g_combine_shape = self.shape(matrix_g_inv) if matrix_a_inv_shape[0] == 2048 and matrix_g_combine_shape[0] == 1001: matrix_a_inv = self.reshape(matrix_a_inv, (matrix_a_inv_shape[0] / 16, 16, matrix_a_inv_shape[0] / 16, 16)) matrix_a_inv = self.transpose(matrix_a_inv, (2, 0, 1, 3)) matrix_g_inv = P.Pad(((0, 7), (0, 7)))(matrix_g_inv) matrix_g_inv_shape = self.shape(matrix_g_inv) matrix_g_inv = self.reshape(matrix_g_inv, (matrix_g_inv_shape[0] / 16, 16, matrix_g_inv_shape[0] / 16, 16)) matrix_g_inv = self.transpose(matrix_g_inv, (2, 0, 1, 3)) matrix_a_allreduce = matrix_a_allreduce + (matrix_a_inv,) matrix_g_allreduce = matrix_g_allreduce + (matrix_g_inv,) return matrix_a_allreduce, matrix_g_allreduce, matrix_a_max_allreduce, matrix_g_max_allreduce def _process_conv_matmul_device_pad(self, conv_layer_count, weight_shape, matrix_a_inv): if self.device_shape_pad_flag[conv_layer_count]: kernel_hw = weight_shape[2] * weight_shape[3] in_channels = weight_shape[1] matrix_a_inv = self.reshape(matrix_a_inv, (kernel_hw, in_channels, kernel_hw, in_channels)) matrix_a_inv = P.Pad(((0, 0), (0, self.C0 - in_channels), (0, 0), (0, self.C0 - in_channels)))(matrix_a_inv) return matrix_a_inv def _get_ainv_ginv_amax_gmax_list(self, gradients, damping_step, matrix_a_allreduce, matrix_g_allreduce, matrix_a_max_allreduce, matrix_g_max_allreduce): for i in range(len(self.params)): thor_layer_count = self.weight_fim_idx_map[i] conv_layer_count = self.weight_conv_idx_map[i] layer_type = self.weight_layertype_idx_map[i] weight_shape = self.shape(self.params[i]) out_channels = weight_shape[0] if layer_type == Conv: g = gradients[i] matrix_a_dim = weight_shape[1] * weight_shape[2] * weight_shape[3] matmul_support_flag = self.conv_matmul_support_map[conv_layer_count] matrix_a = self.matrix_a_cov[thor_layer_count] matrix_g = self.matrix_g_cov[thor_layer_count] matrix_a = F.depend(matrix_a, g) matrix_g = F.depend(matrix_g, g) a_shape = self.shape(matrix_a) a_eye = self.eye(a_shape[0], a_shape[0], mstype.float32) g_shape = self.shape(matrix_g) g_eye = self.eye(g_shape[0], g_shape[0], mstype.float32) a_normalizer = self.a_normalizer[conv_layer_count] g_normalizer = self.g_normalizer[conv_layer_count] a_normalizer = F.depend(a_normalizer, g) g_normalizer = F.depend(g_normalizer, g) damping_a = self.mul(damping_step, self.batch_size / a_normalizer) damping_g = self.mul(damping_step, self.batch_size / g_normalizer) damping_a = self.sqrt(damping_a) matrix_a = matrix_a + damping_a * a_eye a_pad_dim = self.a_split_pad_dim_map[thor_layer_count] matrix_a = self._process_cholesky_pad(a_pad_dim, matrix_a, a_shape[0]) matrix_a_inv = self.cholesky(matrix_a) matrix_a_inv = self._process_batch_matmul(matrix_a_inv) a_max, matrix_a_inv = self._get_abs_max(matrix_a_inv, matrix_a_dim) damping_g = self.sqrt(damping_g) matrix_g = self.mul(matrix_g, self.loss_scale) matrix_g = self.mul(matrix_g, self.batch_size_scale) matrix_g = matrix_g + damping_g * g_eye g_pad_dim = self.g_split_pad_dim_map[thor_layer_count] matrix_g = self._process_cholesky_pad(g_pad_dim, matrix_g, g_shape[0]) matrix_g_inv = self.cholesky(matrix_g) matrix_g_inv = self._process_batch_matmul(matrix_g_inv) g_max, matrix_g_inv = self._get_abs_max(matrix_g_inv, out_channels) if a_pad_dim > 0: matrix_a_inv = self.slice(matrix_a_inv, (0, 0), (matrix_a_dim, matrix_a_dim)) if g_pad_dim > 0: matrix_g_inv = self.slice(matrix_g_inv, (0, 0), (out_channels, out_channels)) if matmul_support_flag == 1: matrix_a_inv = self._process_conv_matmul_device_pad(conv_layer_count, weight_shape, matrix_a_inv) matrix_a_inv_shape = self.shape(self.matrix_a[thor_layer_count]) matrix_a_device_temp_shape = (matrix_a_inv_shape[0], matrix_a_inv_shape[2], matrix_a_inv_shape[1], matrix_a_inv_shape[3]) matrix_a_inv = self.reshape(matrix_a_inv, matrix_a_device_temp_shape) matrix_a_inv = self.transpose(matrix_a_inv, (2, 0, 1, 3)) matrix_g_inv_shape = self.shape(self.matrix_g[thor_layer_count]) matrix_g_device_temp_shape = (matrix_g_inv_shape[0], matrix_g_inv_shape[2], matrix_g_inv_shape[1], matrix_g_inv_shape[3]) matrix_g_inv = self.reshape(matrix_g_inv, matrix_g_device_temp_shape) matrix_g_inv = self.transpose(matrix_g_inv, (2, 0, 1, 3)) a_max = F.depend(a_max, g) g_max = F.depend(g_max, g) matrix_a_allreduce = matrix_a_allreduce + (matrix_a_inv,) matrix_g_allreduce = matrix_g_allreduce + (matrix_g_inv,) matrix_a_max_allreduce = matrix_a_max_allreduce + (a_max,) matrix_g_max_allreduce = matrix_g_max_allreduce + (g_max,) elif layer_type == FC: matrix_a_allreduce, matrix_g_allreduce, matrix_a_max_allreduce, matrix_g_max_allreduce = \ self._get_fc_ainv_ginv(i, damping_step, gradients, matrix_a_allreduce, matrix_g_allreduce, matrix_a_max_allreduce, matrix_g_max_allreduce) elif layer_type == Embedding: g = gradients[i] matrix_a = self.matrix_a_cov[thor_layer_count] matrix_g = self.matrix_g_cov[thor_layer_count] matrix_a = F.depend(matrix_a, g) matrix_g = F.depend(matrix_g, g) g_shape = self.shape(matrix_g) g_eye = self.eye(g_shape[0], g_shape[0], mstype.float32) damping = self.sqrt(damping_step) a_eye = P.OnesLike()(matrix_a) matrix_a = self.mul(matrix_a, 1.0 / self.batch_size) matrix_a = matrix_a + damping * a_eye matrix_a_inv = self.inv(matrix_a) matrix_g = self.mul(matrix_g, self.loss_scale) matrix_g = self.mul(matrix_g, self.batch_size_scale) matrix_g = matrix_g + damping * g_eye matrix_g_inv = self.cholesky(matrix_g) matrix_g_inv = self._process_batch_matmul(matrix_g_inv) matrix_g_inv = self.matrix_combine(matrix_g_inv) matrix_a_allreduce = matrix_a_allreduce + (matrix_a_inv,) matrix_g_allreduce = matrix_g_allreduce + (matrix_g_inv,) return matrix_a_allreduce, matrix_g_allreduce, matrix_a_max_allreduce, matrix_g_max_allreduce def _process_layernorm(self, damping_step, gradient): damping = self.sqrt(damping_step) normalizer = self.cast(self.batch_size, mstype.float32) fim_cov = self.square(gradient) fim_cov = self.mul(fim_cov, 1.0 / normalizer) fim_cov = fim_cov + damping fim_inv = self.inv(fim_cov) gradient = self.mul(fim_inv, gradient) return gradient def _process_thor_fc(self, thor_layer_count, matrix_a_allreduce, matrix_g_allreduce, g): temp_a = matrix_a_allreduce[thor_layer_count] temp_g = matrix_g_allreduce[thor_layer_count] self.assign(self.matrix_a_cov[thor_layer_count], temp_a) self.assign(self.matrix_g_cov[thor_layer_count], temp_g) temp_a = self.cast(temp_a, mstype.float16) temp_g = self.cast(temp_g, mstype.float16) g = self.cast(g, mstype.float16) g = self.matmul(temp_g, g) g = self.matmul(g, temp_a) g = self.cast(g, mstype.float32) return g def _get_second_gradients_one(self, params_len, gradients, new_grads): for i in range(params_len): g = gradients[i] thor_layer_count = self.weight_fim_idx_map[i] conv_layer_count = self.weight_conv_idx_map[i] layer_type = self.weight_layertype_idx_map[i] matrix_a = self.matrix_a[thor_layer_count] matrix_g = self.matrix_g[thor_layer_count] matrix_max = self.matrix_max_inv[thor_layer_count] grad_shape = self.shape(g) if layer_type == FC: if grad_shape[0] == 1001: g = self.cube_matmul_left_fc(matrix_g, g) g = self.cube_matmul_right_fc(g, matrix_a, matrix_max) else: temp_a = self.cast(matrix_a, mstype.float16) temp_g = self.cast(matrix_g, mstype.float16) g = self.cast(g, mstype.float16) g = self.matmul(temp_g, g) g = self.matmul(g, temp_a) g = self.cast(g, mstype.float32) g = self.mul(g, matrix_max) elif layer_type == Conv: matmul_support_flag = self.conv_matmul_support_map[conv_layer_count] if matmul_support_flag == 1: g = self.cube_matmul_left(matrix_g, g) g = self.cube_matmul_right_mul(g, matrix_a, matrix_max) else: g = self.reshape(g, (grad_shape[0], grad_shape[1] * grad_shape[2] * grad_shape[3])) temp_a = self.cast(matrix_a, mstype.float16) temp_g = self.cast(matrix_g, mstype.float16) g = self.cast(g, mstype.float16) g = self.matmul(temp_g, g) g = self.matmul(g, temp_a) g = self.cast(g, mstype.float32) g = self.mul(g, matrix_max) g = self.reshape(g, grad_shape) new_grads = new_grads + (g,) return new_grads def _get_second_gradients(self, new_grads, damping_step, gradients): params_len = len(self.params) if self.conv_layer_count > 0: new_grads = self._get_second_gradients_one(params_len, gradients, new_grads) else: for i in range(params_len): g = gradients[i] thor_layer_count = self.weight_fim_idx_map[i] layer_type = self.weight_layertype_idx_map[i] if layer_type == Embedding: temp_a_ori = self.matrix_a_cov[thor_layer_count] temp_g = self.matrix_g_cov[thor_layer_count] temp_a = self.expand(temp_a_ori, 1) g = self.mul(temp_a, g) temp_g = self.cast(temp_g, mstype.float16) g = self.cast(g, mstype.float16) g = self.matmul(g, temp_g) g = self.cast(g, mstype.float32) elif layer_type == FC: temp_a = self.matrix_a_cov[thor_layer_count] temp_g = self.matrix_g_cov[thor_layer_count] temp_a = self.cast(temp_a, mstype.float16) temp_g = self.cast(temp_g, mstype.float16) g = self.cast(g, mstype.float16) g = self.matmul(temp_g, g) g = self.matmul(g, temp_a) g = self.cast(g, mstype.float32) elif layer_type == LayerNorm: g = self._process_layernorm(damping_step, g) new_grads = new_grads + (g,) return new_grads def _get_second_grad_by_matmul(self, index, temp_a, temp_g, g, temp_max): conv_layer_count = self.weight_conv_idx_map[index] layer_type = self.weight_layertype_idx_map[index] grad_shape = self.shape(g) if layer_type == FC: if grad_shape[0] == 1001: g = self.cube_matmul_left_fc(temp_g, g) g = self.cube_matmul_right_fc(g, temp_a, temp_max) else: temp_a = self.cast(temp_a, mstype.float16) temp_g = self.cast(temp_g, mstype.float16) g = self.cast(g, mstype.float16) g = self.matmul(temp_g, g) g = self.matmul(g, temp_a) g = self.cast(g, mstype.float32) g = self.mul(g, temp_max) elif layer_type == Conv: a_normalizer = self.a_normalizer[conv_layer_count] a_normalizer = F.depend(a_normalizer, g) temp_max = self.mul(temp_max, self.batch_size / a_normalizer) matmul_support_flag = self.conv_matmul_support_map[conv_layer_count] if matmul_support_flag == 1: g = self.cube_matmul_left(temp_g, g) g = self.cube_matmul_right_mul(g, temp_a, temp_max) else: g = self.reshape(g, (grad_shape[0], grad_shape[1] * grad_shape[2] * grad_shape[3])) temp_a = self.cast(temp_a, mstype.float16) temp_g = self.cast(temp_g, mstype.float16) g = self.cast(g, mstype.float16) g = self.matmul(temp_g, g) g = self.matmul(g, temp_a) g = self.cast(g, mstype.float32) g = self.mul(g, temp_max) g = self.reshape(g, grad_shape) return g, temp_max def _get_second_grad_by_layertype(self, index, matrix_a_allreduce, matrix_g_allreduce, g, damping_step): thor_layer_count = self.weight_fim_idx_map[index] layer_type = self.weight_layertype_idx_map[index] if layer_type == Embedding: temp_a_ori = matrix_a_allreduce[thor_layer_count] temp_g = matrix_g_allreduce[thor_layer_count] self.assign(self.matrix_a_cov[thor_layer_count], temp_a_ori) self.assign(self.matrix_g_cov[thor_layer_count], temp_g) temp_a = self.expand(temp_a_ori, 1) g = self.mul(temp_a, g) temp_g = self.cast(temp_g, mstype.float16) g = self.cast(g, mstype.float16) g = self.matmul(g, temp_g) g = self.cast(g, mstype.float32) elif layer_type == FC: g = self._process_thor_fc(thor_layer_count, matrix_a_allreduce, matrix_g_allreduce, g) elif layer_type == LayerNorm: g = self._process_layernorm(damping_step, g) return g def construct(self, gradients): params = self.params moments = self.moments gradients = self.scale_grad(gradients) damping_step = self.gather(self.damping, self.cov_step, self.axis) damping_step = self.cast(damping_step, mstype.float32) if self.thor: matrix_a_allreduce = () matrix_g_allreduce = () matrix_a_max_allreduce = () matrix_g_max_allreduce = () matrix_a_allreduce, matrix_g_allreduce, matrix_a_max_allreduce, matrix_g_max_allreduce = \ self._get_ainv_ginv_amax_gmax_list(gradients, damping_step, matrix_a_allreduce, matrix_g_allreduce, matrix_a_max_allreduce, matrix_g_max_allreduce) if self.is_distributed: matrix_a_allreduce = self.grad_reducer_a(matrix_a_allreduce) matrix_g_allreduce = self.grad_reducer_g(matrix_g_allreduce) if self.conv_layer_count > 0: matrix_a_max_allreduce = self.grad_reducer_amax(matrix_a_max_allreduce) matrix_g_max_allreduce = self.grad_reducer_gmax(matrix_g_max_allreduce) new_grads = () if self.conv_layer_count > 0: for i in range(len(self.params)): g = gradients[i] thor_layer_count = self.weight_fim_idx_map[i] temp_a = matrix_a_allreduce[thor_layer_count] temp_g = matrix_g_allreduce[thor_layer_count] matrix_a_inv_max = self.log(matrix_a_max_allreduce[thor_layer_count]) matrix_a_inv_max = self.mul(matrix_a_inv_max, -1) matrix_a_inv_max = self.exp(matrix_a_inv_max) temp_a = self.mul(temp_a, matrix_a_inv_max) matrix_g_inv_max = self.log(matrix_g_max_allreduce[thor_layer_count]) matrix_g_inv_max = self.mul(matrix_g_inv_max, -1) matrix_g_inv_max = self.exp(matrix_g_inv_max) temp_g = self.mul(temp_g, matrix_g_inv_max) temp_max = self.mul(matrix_g_max_allreduce[thor_layer_count], matrix_g_max_allreduce[thor_layer_count]) temp_a = self.cast(temp_a, mstype.float16) temp_g = self.cast(temp_g, mstype.float16) g, temp_max = self._get_second_grad_by_matmul(i, temp_a, temp_g, g, temp_max) self.assign(self.matrix_a[thor_layer_count], temp_a) self.assign(self.matrix_g[thor_layer_count], temp_g) self.assign(self.matrix_max_inv[thor_layer_count], temp_max) new_grads = new_grads + (g,) gradients = new_grads else: for i in range(len(self.params)): g = gradients[i] g = self._get_second_grad_by_layertype(i, matrix_a_allreduce, matrix_g_allreduce, g, damping_step) new_grads = new_grads + (g,) gradients = new_grads else: new_grads = () gradients = self._get_second_gradients(new_grads, damping_step, gradients) self.cov_step = self.cov_step + self.one if self.weight_decay > 0: gradients = self.hyper_map(F.partial(apply_decay, self.weight_decay), self.decay_flags, params, gradients) gradients = clip_gradient(self.enable_clip_grad, gradients) lr = self.get_lr() success = self.hyper_map(F.partial(_momentum_opt, self.opt, self.momentum, lr), gradients, params, moments) return success
true
true
f724002fa49e096c91bf4ad7fbf5255ed93e8e91
10,946
py
Python
packages/fetchai/protocols/register/message.py
marcofavorito/agents-aea
e520f2f5d076a193514e194d94aa76c6423ac5bc
[ "Apache-2.0" ]
null
null
null
packages/fetchai/protocols/register/message.py
marcofavorito/agents-aea
e520f2f5d076a193514e194d94aa76c6423ac5bc
[ "Apache-2.0" ]
null
null
null
packages/fetchai/protocols/register/message.py
marcofavorito/agents-aea
e520f2f5d076a193514e194d94aa76c6423ac5bc
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- # ------------------------------------------------------------------------------ # # Copyright 2021 fetchai # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # ------------------------------------------------------------------------------ """This module contains register's message definition.""" import logging from typing import Dict, Set, Tuple, cast from aea.configurations.base import PublicId from aea.exceptions import AEAEnforceError, enforce from aea.protocols.base import Message _default_logger = logging.getLogger("aea.packages.fetchai.protocols.register.message") DEFAULT_BODY_SIZE = 4 class RegisterMessage(Message): """A protocol for communication between two AEAs for registration.""" protocol_id = PublicId.from_str("fetchai/register:0.4.0") class Performative(Message.Performative): """Performatives for the register protocol.""" ERROR = "error" REGISTER = "register" SUCCESS = "success" def __str__(self): """Get the string representation.""" return str(self.value) _performatives = {"error", "register", "success"} __slots__: Tuple[str, ...] = tuple() class _SlotsCls: __slots__ = ( "dialogue_reference", "error_code", "error_msg", "info", "message_id", "performative", "target", ) def __init__( self, performative: Performative, dialogue_reference: Tuple[str, str] = ("", ""), message_id: int = 1, target: int = 0, **kwargs, ): """ Initialise an instance of RegisterMessage. :param message_id: the message id. :param dialogue_reference: the dialogue reference. :param target: the message target. :param performative: the message performative. """ super().__init__( dialogue_reference=dialogue_reference, message_id=message_id, target=target, performative=RegisterMessage.Performative(performative), **kwargs, ) @property def valid_performatives(self) -> Set[str]: """Get valid performatives.""" return self._performatives @property def dialogue_reference(self) -> Tuple[str, str]: """Get the dialogue_reference of the message.""" enforce(self.is_set("dialogue_reference"), "dialogue_reference is not set.") return cast(Tuple[str, str], self.get("dialogue_reference")) @property def message_id(self) -> int: """Get the message_id of the message.""" enforce(self.is_set("message_id"), "message_id is not set.") return cast(int, self.get("message_id")) @property def performative(self) -> Performative: # type: ignore # noqa: F821 """Get the performative of the message.""" enforce(self.is_set("performative"), "performative is not set.") return cast(RegisterMessage.Performative, self.get("performative")) @property def target(self) -> int: """Get the target of the message.""" enforce(self.is_set("target"), "target is not set.") return cast(int, self.get("target")) @property def error_code(self) -> int: """Get the 'error_code' content from the message.""" enforce(self.is_set("error_code"), "'error_code' content is not set.") return cast(int, self.get("error_code")) @property def error_msg(self) -> str: """Get the 'error_msg' content from the message.""" enforce(self.is_set("error_msg"), "'error_msg' content is not set.") return cast(str, self.get("error_msg")) @property def info(self) -> Dict[str, str]: """Get the 'info' content from the message.""" enforce(self.is_set("info"), "'info' content is not set.") return cast(Dict[str, str], self.get("info")) def _is_consistent(self) -> bool: """Check that the message follows the register protocol.""" try: enforce( type(self.dialogue_reference) == tuple, "Invalid type for 'dialogue_reference'. Expected 'tuple'. Found '{}'.".format( type(self.dialogue_reference) ), ) enforce( type(self.dialogue_reference[0]) == str, "Invalid type for 'dialogue_reference[0]'. Expected 'str'. Found '{}'.".format( type(self.dialogue_reference[0]) ), ) enforce( type(self.dialogue_reference[1]) == str, "Invalid type for 'dialogue_reference[1]'. Expected 'str'. Found '{}'.".format( type(self.dialogue_reference[1]) ), ) enforce( type(self.message_id) == int, "Invalid type for 'message_id'. Expected 'int'. Found '{}'.".format( type(self.message_id) ), ) enforce( type(self.target) == int, "Invalid type for 'target'. Expected 'int'. Found '{}'.".format( type(self.target) ), ) # Light Protocol Rule 2 # Check correct performative enforce( type(self.performative) == RegisterMessage.Performative, "Invalid 'performative'. Expected either of '{}'. Found '{}'.".format( self.valid_performatives, self.performative ), ) # Check correct contents actual_nb_of_contents = len(self._body) - DEFAULT_BODY_SIZE expected_nb_of_contents = 0 if self.performative == RegisterMessage.Performative.REGISTER: expected_nb_of_contents = 1 enforce( type(self.info) == dict, "Invalid type for content 'info'. Expected 'dict'. Found '{}'.".format( type(self.info) ), ) for key_of_info, value_of_info in self.info.items(): enforce( type(key_of_info) == str, "Invalid type for dictionary keys in content 'info'. Expected 'str'. Found '{}'.".format( type(key_of_info) ), ) enforce( type(value_of_info) == str, "Invalid type for dictionary values in content 'info'. Expected 'str'. Found '{}'.".format( type(value_of_info) ), ) elif self.performative == RegisterMessage.Performative.SUCCESS: expected_nb_of_contents = 1 enforce( type(self.info) == dict, "Invalid type for content 'info'. Expected 'dict'. Found '{}'.".format( type(self.info) ), ) for key_of_info, value_of_info in self.info.items(): enforce( type(key_of_info) == str, "Invalid type for dictionary keys in content 'info'. Expected 'str'. Found '{}'.".format( type(key_of_info) ), ) enforce( type(value_of_info) == str, "Invalid type for dictionary values in content 'info'. Expected 'str'. Found '{}'.".format( type(value_of_info) ), ) elif self.performative == RegisterMessage.Performative.ERROR: expected_nb_of_contents = 3 enforce( type(self.error_code) == int, "Invalid type for content 'error_code'. Expected 'int'. Found '{}'.".format( type(self.error_code) ), ) enforce( type(self.error_msg) == str, "Invalid type for content 'error_msg'. Expected 'str'. Found '{}'.".format( type(self.error_msg) ), ) enforce( type(self.info) == dict, "Invalid type for content 'info'. Expected 'dict'. Found '{}'.".format( type(self.info) ), ) for key_of_info, value_of_info in self.info.items(): enforce( type(key_of_info) == str, "Invalid type for dictionary keys in content 'info'. Expected 'str'. Found '{}'.".format( type(key_of_info) ), ) enforce( type(value_of_info) == str, "Invalid type for dictionary values in content 'info'. Expected 'str'. Found '{}'.".format( type(value_of_info) ), ) # Check correct content count enforce( expected_nb_of_contents == actual_nb_of_contents, "Incorrect number of contents. Expected {}. Found {}".format( expected_nb_of_contents, actual_nb_of_contents ), ) # Light Protocol Rule 3 if self.message_id == 1: enforce( self.target == 0, "Invalid 'target'. Expected 0 (because 'message_id' is 1). Found {}.".format( self.target ), ) else: enforce( 0 < self.target < self.message_id, "Invalid 'target'. Expected an integer between 1 and {} inclusive. Found {}.".format( self.message_id - 1, self.target, ), ) except (AEAEnforceError, ValueError, KeyError) as e: _default_logger.error(str(e)) return False return True
38.272727
115
0.504659
import logging from typing import Dict, Set, Tuple, cast from aea.configurations.base import PublicId from aea.exceptions import AEAEnforceError, enforce from aea.protocols.base import Message _default_logger = logging.getLogger("aea.packages.fetchai.protocols.register.message") DEFAULT_BODY_SIZE = 4 class RegisterMessage(Message): protocol_id = PublicId.from_str("fetchai/register:0.4.0") class Performative(Message.Performative): ERROR = "error" REGISTER = "register" SUCCESS = "success" def __str__(self): return str(self.value) _performatives = {"error", "register", "success"} __slots__: Tuple[str, ...] = tuple() class _SlotsCls: __slots__ = ( "dialogue_reference", "error_code", "error_msg", "info", "message_id", "performative", "target", ) def __init__( self, performative: Performative, dialogue_reference: Tuple[str, str] = ("", ""), message_id: int = 1, target: int = 0, **kwargs, ): super().__init__( dialogue_reference=dialogue_reference, message_id=message_id, target=target, performative=RegisterMessage.Performative(performative), **kwargs, ) @property def valid_performatives(self) -> Set[str]: return self._performatives @property def dialogue_reference(self) -> Tuple[str, str]: enforce(self.is_set("dialogue_reference"), "dialogue_reference is not set.") return cast(Tuple[str, str], self.get("dialogue_reference")) @property def message_id(self) -> int: enforce(self.is_set("message_id"), "message_id is not set.") return cast(int, self.get("message_id")) @property def performative(self) -> Performative: orce(self.is_set("performative"), "performative is not set.") return cast(RegisterMessage.Performative, self.get("performative")) @property def target(self) -> int: enforce(self.is_set("target"), "target is not set.") return cast(int, self.get("target")) @property def error_code(self) -> int: enforce(self.is_set("error_code"), "'error_code' content is not set.") return cast(int, self.get("error_code")) @property def error_msg(self) -> str: enforce(self.is_set("error_msg"), "'error_msg' content is not set.") return cast(str, self.get("error_msg")) @property def info(self) -> Dict[str, str]: enforce(self.is_set("info"), "'info' content is not set.") return cast(Dict[str, str], self.get("info")) def _is_consistent(self) -> bool: try: enforce( type(self.dialogue_reference) == tuple, "Invalid type for 'dialogue_reference'. Expected 'tuple'. Found '{}'.".format( type(self.dialogue_reference) ), ) enforce( type(self.dialogue_reference[0]) == str, "Invalid type for 'dialogue_reference[0]'. Expected 'str'. Found '{}'.".format( type(self.dialogue_reference[0]) ), ) enforce( type(self.dialogue_reference[1]) == str, "Invalid type for 'dialogue_reference[1]'. Expected 'str'. Found '{}'.".format( type(self.dialogue_reference[1]) ), ) enforce( type(self.message_id) == int, "Invalid type for 'message_id'. Expected 'int'. Found '{}'.".format( type(self.message_id) ), ) enforce( type(self.target) == int, "Invalid type for 'target'. Expected 'int'. Found '{}'.".format( type(self.target) ), ) enforce( type(self.performative) == RegisterMessage.Performative, "Invalid 'performative'. Expected either of '{}'. Found '{}'.".format( self.valid_performatives, self.performative ), ) actual_nb_of_contents = len(self._body) - DEFAULT_BODY_SIZE expected_nb_of_contents = 0 if self.performative == RegisterMessage.Performative.REGISTER: expected_nb_of_contents = 1 enforce( type(self.info) == dict, "Invalid type for content 'info'. Expected 'dict'. Found '{}'.".format( type(self.info) ), ) for key_of_info, value_of_info in self.info.items(): enforce( type(key_of_info) == str, "Invalid type for dictionary keys in content 'info'. Expected 'str'. Found '{}'.".format( type(key_of_info) ), ) enforce( type(value_of_info) == str, "Invalid type for dictionary values in content 'info'. Expected 'str'. Found '{}'.".format( type(value_of_info) ), ) elif self.performative == RegisterMessage.Performative.SUCCESS: expected_nb_of_contents = 1 enforce( type(self.info) == dict, "Invalid type for content 'info'. Expected 'dict'. Found '{}'.".format( type(self.info) ), ) for key_of_info, value_of_info in self.info.items(): enforce( type(key_of_info) == str, "Invalid type for dictionary keys in content 'info'. Expected 'str'. Found '{}'.".format( type(key_of_info) ), ) enforce( type(value_of_info) == str, "Invalid type for dictionary values in content 'info'. Expected 'str'. Found '{}'.".format( type(value_of_info) ), ) elif self.performative == RegisterMessage.Performative.ERROR: expected_nb_of_contents = 3 enforce( type(self.error_code) == int, "Invalid type for content 'error_code'. Expected 'int'. Found '{}'.".format( type(self.error_code) ), ) enforce( type(self.error_msg) == str, "Invalid type for content 'error_msg'. Expected 'str'. Found '{}'.".format( type(self.error_msg) ), ) enforce( type(self.info) == dict, "Invalid type for content 'info'. Expected 'dict'. Found '{}'.".format( type(self.info) ), ) for key_of_info, value_of_info in self.info.items(): enforce( type(key_of_info) == str, "Invalid type for dictionary keys in content 'info'. Expected 'str'. Found '{}'.".format( type(key_of_info) ), ) enforce( type(value_of_info) == str, "Invalid type for dictionary values in content 'info'. Expected 'str'. Found '{}'.".format( type(value_of_info) ), ) enforce( expected_nb_of_contents == actual_nb_of_contents, "Incorrect number of contents. Expected {}. Found {}".format( expected_nb_of_contents, actual_nb_of_contents ), ) if self.message_id == 1: enforce( self.target == 0, "Invalid 'target'. Expected 0 (because 'message_id' is 1). Found {}.".format( self.target ), ) else: enforce( 0 < self.target < self.message_id, "Invalid 'target'. Expected an integer between 1 and {} inclusive. Found {}.".format( self.message_id - 1, self.target, ), ) except (AEAEnforceError, ValueError, KeyError) as e: _default_logger.error(str(e)) return False return True
true
true
f72400b1078101ea758b0e655b5076476f1d0bd0
1,995
py
Python
dbops_venv/lib/python3.5/site-packages/alembic/templates/generic/env.py
fractal520/dbops
20c6b6b5669e09b43cd19e6f3fa0448bc7d5eaac
[ "MIT" ]
36
2015-01-03T07:57:07.000Z
2019-12-09T10:48:08.000Z
dbops_venv/lib/python3.5/site-packages/alembic/templates/generic/env.py
fractal520/dbops
20c6b6b5669e09b43cd19e6f3fa0448bc7d5eaac
[ "MIT" ]
4
2015-01-08T20:18:49.000Z
2015-08-12T18:09:21.000Z
dbops_venv/lib/python3.5/site-packages/alembic/templates/generic/env.py
fractal520/dbops
20c6b6b5669e09b43cd19e6f3fa0448bc7d5eaac
[ "MIT" ]
11
2015-02-07T01:03:23.000Z
2021-09-24T07:13:38.000Z
from __future__ import with_statement from alembic import context from sqlalchemy import engine_from_config, pool from logging.config import fileConfig # this is the Alembic Config object, which provides # access to the values within the .ini file in use. config = context.config # Interpret the config file for Python logging. # This line sets up loggers basically. fileConfig(config.config_file_name) # add your model's MetaData object here # for 'autogenerate' support # from myapp import mymodel # target_metadata = mymodel.Base.metadata target_metadata = None # other values from the config, defined by the needs of env.py, # can be acquired: # my_important_option = config.get_main_option("my_important_option") # ... etc. def run_migrations_offline(): """Run migrations in 'offline' mode. This configures the context with just a URL and not an Engine, though an Engine is acceptable here as well. By skipping the Engine creation we don't even need a DBAPI to be available. Calls to context.execute() here emit the given string to the script output. """ url = config.get_main_option("sqlalchemy.url") context.configure(url=url) with context.begin_transaction(): context.run_migrations() def run_migrations_online(): """Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context. """ engine = engine_from_config( config.get_section(config.config_ini_section), prefix='sqlalchemy.', poolclass=pool.NullPool) connection = engine.connect() context.configure( connection=connection, target_metadata=target_metadata ) try: with context.begin_transaction(): context.run_migrations() finally: connection.close() if context.is_offline_mode(): run_migrations_offline() else: run_migrations_online()
27.708333
69
0.704261
from __future__ import with_statement from alembic import context from sqlalchemy import engine_from_config, pool from logging.config import fileConfig config = context.config fileConfig(config.config_file_name) # for 'autogenerate' support # from myapp import mymodel # target_metadata = mymodel.Base.metadata target_metadata = None # other values from the config, defined by the needs of env.py, # can be acquired: # my_important_option = config.get_main_option("my_important_option") # ... etc. def run_migrations_offline(): url = config.get_main_option("sqlalchemy.url") context.configure(url=url) with context.begin_transaction(): context.run_migrations() def run_migrations_online(): engine = engine_from_config( config.get_section(config.config_ini_section), prefix='sqlalchemy.', poolclass=pool.NullPool) connection = engine.connect() context.configure( connection=connection, target_metadata=target_metadata ) try: with context.begin_transaction(): context.run_migrations() finally: connection.close() if context.is_offline_mode(): run_migrations_offline() else: run_migrations_online()
true
true
f72400dd434d64329eb589b199f10dc4c9c14be1
14,518
py
Python
shapSD/pysubgroup/nominal_target.py
XiaoqiMa/shapSD
545f61c9e8329c7271051f22f99ba32508ba74a1
[ "MIT" ]
2
2019-06-26T21:31:03.000Z
2019-06-27T16:59:58.000Z
shapSD/pysubgroup/nominal_target.py
XiaoqiMa/shapSD
545f61c9e8329c7271051f22f99ba32508ba74a1
[ "MIT" ]
null
null
null
shapSD/pysubgroup/nominal_target.py
XiaoqiMa/shapSD
545f61c9e8329c7271051f22f99ba32508ba74a1
[ "MIT" ]
null
null
null
''' Created on 29.09.2017 @author: lemmerfn ''' import numpy as np import scipy.stats from functools import total_ordering from .measures import AbstractInterestingnessMeasure, BoundedInterestingnessMeasure from .utils import effective_sample_size, powerset from .subgroup import SubgroupDescription, Subgroup, NominalSelector @total_ordering class NominalTarget(object): def __init__(self, target_attribute=None, target_value=None, target_selector=None): """ Creates a new target for the boolean model class (classic subgroup discovery). If target_attribute and target_value are given, the target_selector is computed using attribute and value """ if target_attribute is not None and target_value is not None: if target_selector is not None: raise BaseException( "NominalTarget is to be constructed EITHER by a selector OR by attribute/value pair") target_selector = NominalSelector(target_attribute, target_value) if target_selector is None: raise BaseException("No target selector given") self.target_selector = target_selector def __repr__(self): return "T: " + str(self.target_selector) def __eq__(self, other): return self.__dict__ == other.__dict__ def __lt__(self, other): return str(self) < str(other) def covers(self, instance): return self.target_selector.covers(instance) def get_attributes(self): return [self.target_selector.get_attribute_name()] @staticmethod def get_base_statistics(data, subgroup, weighting_attribute=None): if weighting_attribute is None: sg_instances = subgroup.subgroup_description.covers(data) positives = subgroup.target.covers(data) instances_subgroup = np.sum(sg_instances) positives_dataset = np.sum(positives) instances_dataset = len(data) positives_subgroup = np.sum(np.logical_and(sg_instances, positives)) return instances_dataset, positives_dataset, instances_subgroup, positives_subgroup else: weights = data[weighting_attribute] sg_instances = subgroup.subgroup_description.covers(data) positives = subgroup.target.covers(data) instances_dataset = np.sum(weights) instances_subgroup = np.sum(np.dot(sg_instances, weights)) positives_dataset = np.sum(np.dot(positives, weights)) positives_subgroup = np.sum(np.dot(np.logical_and(sg_instances, positives), weights)) return instances_dataset, positives_dataset, instances_subgroup, positives_subgroup @staticmethod def calculate_statistics(subgroup, data, weighting_attribute=None): (instances_dataset, positives_dataset, instances_subgroup, positives_subgroup) = \ NominalTarget.get_base_statistics(data, subgroup, weighting_attribute) subgroup.statistics['size_sg'] = instances_subgroup subgroup.statistics['size_dataset'] = instances_dataset subgroup.statistics['positives_sg'] = positives_subgroup subgroup.statistics['positives_dataset'] = positives_dataset subgroup.statistics['size_complement'] = instances_dataset - instances_subgroup subgroup.statistics['relative_size_sg'] = instances_subgroup / instances_dataset subgroup.statistics['relative_size_complement'] = (instances_dataset - instances_subgroup) / instances_dataset subgroup.statistics['coverage_sg'] = positives_subgroup / positives_dataset subgroup.statistics['coverage_complement'] = (positives_dataset - positives_subgroup) / positives_dataset subgroup.statistics['target_share_sg'] = positives_subgroup / instances_subgroup subgroup.statistics['target_share_complement'] = (positives_dataset - positives_subgroup) / ( instances_dataset - instances_subgroup) subgroup.statistics['target_share_dataset'] = positives_dataset / instances_dataset subgroup.statistics['lift'] = (positives_subgroup / instances_subgroup) / ( positives_dataset / instances_dataset) if weighting_attribute is not None: (instances_dataset, positives_dataset, instances_subgroup, positives_subgroup) = \ NominalTarget.get_base_statistics(subgroup, data, weighting_attribute) subgroup.statistics['size_sg_weighted'] = instances_subgroup subgroup.statistics['size_dataset_weighted'] = instances_dataset subgroup.statistics['positives_sg_weighted'] = positives_subgroup subgroup.statistics['positives_dataset_weighted'] = positives_dataset subgroup.statistics['size_complement_weighted'] = instances_dataset - instances_subgroup subgroup.statistics['relative_size_sg_weighted'] = instances_subgroup / instances_dataset subgroup.statistics['relative_size_complement_weighted'] = \ (instances_dataset - instances_subgroup) / instances_dataset subgroup.statistics['coverage_sg_weighted'] = positives_subgroup / positives_dataset subgroup.statistics['coverage_complement_weighted'] = ( positives_dataset - positives_subgroup) / positives_dataset subgroup.statistics['target_share_sg_weighted'] = positives_subgroup / instances_subgroup subgroup.statistics['target_share_complement_weighted'] = (positives_dataset - positives_subgroup) / ( instances_dataset - instances_subgroup) subgroup.statistics['target_share_dataset_weighted'] = positives_dataset / instances_dataset subgroup.statistics['lift_weighted'] = (positives_subgroup / instances_subgroup) / ( positives_dataset / instances_dataset) class ChiSquaredQF(AbstractInterestingnessMeasure): @staticmethod def chi_squared_qf(instances_dataset, positives_dataset, instances_subgroup, positives_subgroup, min_instances=5, bidirect=True, direction_positive=True): if (instances_subgroup < min_instances) or ((instances_dataset - instances_subgroup) < min_instances): return float("-inf") p_subgroup = positives_subgroup / instances_subgroup p_dataset = positives_dataset / instances_dataset positives_complement = positives_dataset - positives_subgroup # instancesComplement = instancesDataset - instancesSubgroup negatives_subgroup = instances_subgroup - positives_subgroup negatives_dataset = instances_dataset - positives_dataset negatives_complement = negatives_dataset - negatives_subgroup # observed = [positivesSubgroup, positives_complement,negatives_subgroup, negatives_complement] # # if round(positivesSubgroup) < 0 or # round(positives_complement) < 0 or # round(negatives_subgroup) <0 or # round (negatives_complement) < 0: # print ("XXXXX") val = scipy.stats.chi2_contingency([[round(positives_subgroup), round(positives_complement)], [round(negatives_subgroup), round(negatives_complement)]], correction=False)[0] if bidirect: return val elif direction_positive and p_subgroup > p_dataset: return val elif not direction_positive and p_subgroup < p_dataset: return val return -val @staticmethod def chi_squared_qf_weighted(subgroup, data, weighting_attribute, effective_sample_size=0, min_instances=5, ): (instancesDataset, positivesDataset, instancesSubgroup, positivesSubgroup) = subgroup.get_base_statistics(data, weighting_attribute) if (instancesSubgroup < min_instances) or ((instancesDataset - instancesSubgroup) < 5): return float("inf") if effective_sample_size == 0: effective_sample_size = effective_sample_size(data[weighting_attribute]) # p_subgroup = positivesSubgroup / instancesSubgroup # p_dataset = positivesDataset / instancesDataset negatives_subgroup = instancesSubgroup - positivesSubgroup negatives_dataset = instancesDataset - positivesDataset positives_complement = positivesDataset - positivesSubgroup negatives_complement = negatives_dataset - negatives_subgroup val = scipy.stats.chi2_contingency([[positivesSubgroup, positives_complement], [negatives_subgroup, negatives_complement]], correction=True)[0] return scipy.stats.chi2.sf(val * effective_sample_size / instancesDataset, 1) def __init__(self, direction='bidirect', min_instances=5): if direction == 'bidirect': self.bidirect = True self.direction_positive = True if direction == 'positive': self.bidirect = False self.direction_positive = True if direction == 'negative': self.bidirect = False self.direction_positive = False self.min_instances = min_instances def evaluate_from_dataset(self, data, subgroup, weighting_attribute=None): if not self.is_applicable(subgroup): raise BaseException("Quality measure cannot be used for this target class") if weighting_attribute is None: result = self.evaluate_from_statistics(*subgroup.get_base_statistics(data)) else: (instancesDataset, positivesDataset, instancesSubgroup, positivesSubgroup) = subgroup.get_base_statistics( data, weighting_attribute) weights = data[weighting_attribute] base = self.evaluate_from_statistics(instancesDataset, positivesDataset, instancesSubgroup, positivesSubgroup) result = base * effective_sample_size(weights) / instancesDataset return result def evaluate_from_statistics(self, instances_dataset, positives_dataset, instances_subgroup, positives_subgroup): return ChiSquaredQF.chi_squared_qf(instances_dataset, positives_dataset, instances_subgroup, positives_subgroup, self.min_instances, self.bidirect, self.direction_positive) def supports_weights(self): return True def is_applicable(self, subgroup): return isinstance(subgroup.target, NominalTarget) class StandardQF(AbstractInterestingnessMeasure, BoundedInterestingnessMeasure): @staticmethod def standard_qf(a, instances_dataset, positives_dataset, instances_subgroup, positives_subgroup): if instances_subgroup == 0: return 0 p_subgroup = positives_subgroup / instances_subgroup p_dataset = positives_dataset / instances_dataset return (instances_subgroup / instances_dataset) ** a * (p_subgroup - p_dataset) def __init__(self, a): self.a = a def evaluate_from_dataset(self, data, subgroup, weighting_attribute=None): if not self.is_applicable(subgroup): raise BaseException("Quality measure cannot be used for this target class") return self.evaluate_from_statistics(*subgroup.get_base_statistics(data, weighting_attribute)) def optimistic_estimate_from_dataset(self, data, subgroup, weighting_attribute=None): if not self.is_applicable(subgroup): raise BaseException("Quality measure cannot be used for this target class") return self.optimistic_estimate_from_statistics(*subgroup.get_base_statistics(data, weighting_attribute)) def evaluate_from_statistics(self, instances_dataset, positives_dataset, instances_subgroup, positives_subgroup): return StandardQF.standard_qf(self.a, instances_dataset, positives_dataset, instances_subgroup, positives_subgroup) def optimistic_estimate_from_statistics(self, instances_dataset, positives_dataset, instances_subgroup, positives_subgroup): return StandardQF.standard_qf(self.a, instances_dataset, positives_dataset, positives_subgroup, positives_subgroup) def supports_weights(self): return True def is_applicable(self, subgroup): return isinstance(subgroup.target, NominalTarget) class WRAccQF(StandardQF): def __init__(self, a): super().__init__(a) self.a = 1.0 class LiftQF(StandardQF): def __init__(self, a): super().__init__(a) self.a = 0.0 class SimpleBinomial(StandardQF): def __init__(self, a): super().__init__(a) self.a = 0.5 ##### # GeneralizationAware Interestingness Measures ##### class GAStandardQF(AbstractInterestingnessMeasure): def __init__(self, a): self.a = a def evaluate_from_dataset(self, data, subgroup, weighting_attribute=None): (instances_dataset, _, instances_subgroup, positives_subgroup) = subgroup.get_base_statistics(data, weighting_attribute) if (instances_subgroup == 0) or (instances_dataset == instances_subgroup): return 0 p_subgroup = positives_subgroup / instances_subgroup max_target_share = get_max_generalization_target_share(data, subgroup, weighting_attribute) relative_size = (instances_subgroup / instances_dataset) return relative_size ** self.a * (p_subgroup - max_target_share) def supports_weights(self): return True def is_applicable(self, subgroup): return isinstance(subgroup.target, NominalTarget) def get_max_generalization_target_share(data, subgroup, weighting_attribute=None): selectors = subgroup.subgroup_description.selectors generalizations = powerset(selectors) max_target_share = 0 for sels in generalizations: sgd = SubgroupDescription(list(sels)) sg = Subgroup(subgroup.target, sgd) (_, _, instances_subgroup, positives_subgroup) = sg.get_base_statistics(data, weighting_attribute) target_share = positives_subgroup / instances_subgroup max_target_share = max(max_target_share, target_share) return max_target_share
49.549488
134
0.694655
import numpy as np import scipy.stats from functools import total_ordering from .measures import AbstractInterestingnessMeasure, BoundedInterestingnessMeasure from .utils import effective_sample_size, powerset from .subgroup import SubgroupDescription, Subgroup, NominalSelector @total_ordering class NominalTarget(object): def __init__(self, target_attribute=None, target_value=None, target_selector=None): if target_attribute is not None and target_value is not None: if target_selector is not None: raise BaseException( "NominalTarget is to be constructed EITHER by a selector OR by attribute/value pair") target_selector = NominalSelector(target_attribute, target_value) if target_selector is None: raise BaseException("No target selector given") self.target_selector = target_selector def __repr__(self): return "T: " + str(self.target_selector) def __eq__(self, other): return self.__dict__ == other.__dict__ def __lt__(self, other): return str(self) < str(other) def covers(self, instance): return self.target_selector.covers(instance) def get_attributes(self): return [self.target_selector.get_attribute_name()] @staticmethod def get_base_statistics(data, subgroup, weighting_attribute=None): if weighting_attribute is None: sg_instances = subgroup.subgroup_description.covers(data) positives = subgroup.target.covers(data) instances_subgroup = np.sum(sg_instances) positives_dataset = np.sum(positives) instances_dataset = len(data) positives_subgroup = np.sum(np.logical_and(sg_instances, positives)) return instances_dataset, positives_dataset, instances_subgroup, positives_subgroup else: weights = data[weighting_attribute] sg_instances = subgroup.subgroup_description.covers(data) positives = subgroup.target.covers(data) instances_dataset = np.sum(weights) instances_subgroup = np.sum(np.dot(sg_instances, weights)) positives_dataset = np.sum(np.dot(positives, weights)) positives_subgroup = np.sum(np.dot(np.logical_and(sg_instances, positives), weights)) return instances_dataset, positives_dataset, instances_subgroup, positives_subgroup @staticmethod def calculate_statistics(subgroup, data, weighting_attribute=None): (instances_dataset, positives_dataset, instances_subgroup, positives_subgroup) = \ NominalTarget.get_base_statistics(data, subgroup, weighting_attribute) subgroup.statistics['size_sg'] = instances_subgroup subgroup.statistics['size_dataset'] = instances_dataset subgroup.statistics['positives_sg'] = positives_subgroup subgroup.statistics['positives_dataset'] = positives_dataset subgroup.statistics['size_complement'] = instances_dataset - instances_subgroup subgroup.statistics['relative_size_sg'] = instances_subgroup / instances_dataset subgroup.statistics['relative_size_complement'] = (instances_dataset - instances_subgroup) / instances_dataset subgroup.statistics['coverage_sg'] = positives_subgroup / positives_dataset subgroup.statistics['coverage_complement'] = (positives_dataset - positives_subgroup) / positives_dataset subgroup.statistics['target_share_sg'] = positives_subgroup / instances_subgroup subgroup.statistics['target_share_complement'] = (positives_dataset - positives_subgroup) / ( instances_dataset - instances_subgroup) subgroup.statistics['target_share_dataset'] = positives_dataset / instances_dataset subgroup.statistics['lift'] = (positives_subgroup / instances_subgroup) / ( positives_dataset / instances_dataset) if weighting_attribute is not None: (instances_dataset, positives_dataset, instances_subgroup, positives_subgroup) = \ NominalTarget.get_base_statistics(subgroup, data, weighting_attribute) subgroup.statistics['size_sg_weighted'] = instances_subgroup subgroup.statistics['size_dataset_weighted'] = instances_dataset subgroup.statistics['positives_sg_weighted'] = positives_subgroup subgroup.statistics['positives_dataset_weighted'] = positives_dataset subgroup.statistics['size_complement_weighted'] = instances_dataset - instances_subgroup subgroup.statistics['relative_size_sg_weighted'] = instances_subgroup / instances_dataset subgroup.statistics['relative_size_complement_weighted'] = \ (instances_dataset - instances_subgroup) / instances_dataset subgroup.statistics['coverage_sg_weighted'] = positives_subgroup / positives_dataset subgroup.statistics['coverage_complement_weighted'] = ( positives_dataset - positives_subgroup) / positives_dataset subgroup.statistics['target_share_sg_weighted'] = positives_subgroup / instances_subgroup subgroup.statistics['target_share_complement_weighted'] = (positives_dataset - positives_subgroup) / ( instances_dataset - instances_subgroup) subgroup.statistics['target_share_dataset_weighted'] = positives_dataset / instances_dataset subgroup.statistics['lift_weighted'] = (positives_subgroup / instances_subgroup) / ( positives_dataset / instances_dataset) class ChiSquaredQF(AbstractInterestingnessMeasure): @staticmethod def chi_squared_qf(instances_dataset, positives_dataset, instances_subgroup, positives_subgroup, min_instances=5, bidirect=True, direction_positive=True): if (instances_subgroup < min_instances) or ((instances_dataset - instances_subgroup) < min_instances): return float("-inf") p_subgroup = positives_subgroup / instances_subgroup p_dataset = positives_dataset / instances_dataset positives_complement = positives_dataset - positives_subgroup negatives_subgroup = instances_subgroup - positives_subgroup negatives_dataset = instances_dataset - positives_dataset negatives_complement = negatives_dataset - negatives_subgroup val = scipy.stats.chi2_contingency([[round(positives_subgroup), round(positives_complement)], [round(negatives_subgroup), round(negatives_complement)]], correction=False)[0] if bidirect: return val elif direction_positive and p_subgroup > p_dataset: return val elif not direction_positive and p_subgroup < p_dataset: return val return -val @staticmethod def chi_squared_qf_weighted(subgroup, data, weighting_attribute, effective_sample_size=0, min_instances=5, ): (instancesDataset, positivesDataset, instancesSubgroup, positivesSubgroup) = subgroup.get_base_statistics(data, weighting_attribute) if (instancesSubgroup < min_instances) or ((instancesDataset - instancesSubgroup) < 5): return float("inf") if effective_sample_size == 0: effective_sample_size = effective_sample_size(data[weighting_attribute]) negatives_subgroup = instancesSubgroup - positivesSubgroup negatives_dataset = instancesDataset - positivesDataset positives_complement = positivesDataset - positivesSubgroup negatives_complement = negatives_dataset - negatives_subgroup val = scipy.stats.chi2_contingency([[positivesSubgroup, positives_complement], [negatives_subgroup, negatives_complement]], correction=True)[0] return scipy.stats.chi2.sf(val * effective_sample_size / instancesDataset, 1) def __init__(self, direction='bidirect', min_instances=5): if direction == 'bidirect': self.bidirect = True self.direction_positive = True if direction == 'positive': self.bidirect = False self.direction_positive = True if direction == 'negative': self.bidirect = False self.direction_positive = False self.min_instances = min_instances def evaluate_from_dataset(self, data, subgroup, weighting_attribute=None): if not self.is_applicable(subgroup): raise BaseException("Quality measure cannot be used for this target class") if weighting_attribute is None: result = self.evaluate_from_statistics(*subgroup.get_base_statistics(data)) else: (instancesDataset, positivesDataset, instancesSubgroup, positivesSubgroup) = subgroup.get_base_statistics( data, weighting_attribute) weights = data[weighting_attribute] base = self.evaluate_from_statistics(instancesDataset, positivesDataset, instancesSubgroup, positivesSubgroup) result = base * effective_sample_size(weights) / instancesDataset return result def evaluate_from_statistics(self, instances_dataset, positives_dataset, instances_subgroup, positives_subgroup): return ChiSquaredQF.chi_squared_qf(instances_dataset, positives_dataset, instances_subgroup, positives_subgroup, self.min_instances, self.bidirect, self.direction_positive) def supports_weights(self): return True def is_applicable(self, subgroup): return isinstance(subgroup.target, NominalTarget) class StandardQF(AbstractInterestingnessMeasure, BoundedInterestingnessMeasure): @staticmethod def standard_qf(a, instances_dataset, positives_dataset, instances_subgroup, positives_subgroup): if instances_subgroup == 0: return 0 p_subgroup = positives_subgroup / instances_subgroup p_dataset = positives_dataset / instances_dataset return (instances_subgroup / instances_dataset) ** a * (p_subgroup - p_dataset) def __init__(self, a): self.a = a def evaluate_from_dataset(self, data, subgroup, weighting_attribute=None): if not self.is_applicable(subgroup): raise BaseException("Quality measure cannot be used for this target class") return self.evaluate_from_statistics(*subgroup.get_base_statistics(data, weighting_attribute)) def optimistic_estimate_from_dataset(self, data, subgroup, weighting_attribute=None): if not self.is_applicable(subgroup): raise BaseException("Quality measure cannot be used for this target class") return self.optimistic_estimate_from_statistics(*subgroup.get_base_statistics(data, weighting_attribute)) def evaluate_from_statistics(self, instances_dataset, positives_dataset, instances_subgroup, positives_subgroup): return StandardQF.standard_qf(self.a, instances_dataset, positives_dataset, instances_subgroup, positives_subgroup) def optimistic_estimate_from_statistics(self, instances_dataset, positives_dataset, instances_subgroup, positives_subgroup): return StandardQF.standard_qf(self.a, instances_dataset, positives_dataset, positives_subgroup, positives_subgroup) def supports_weights(self): return True def is_applicable(self, subgroup): return isinstance(subgroup.target, NominalTarget) class WRAccQF(StandardQF): def __init__(self, a): super().__init__(a) self.a = 1.0 class LiftQF(StandardQF): def __init__(self, a): super().__init__(a) self.a = 0.0 class SimpleBinomial(StandardQF): def __init__(self, a): super().__init__(a) self.a = 0.5 F(AbstractInterestingnessMeasure): def __init__(self, a): self.a = a def evaluate_from_dataset(self, data, subgroup, weighting_attribute=None): (instances_dataset, _, instances_subgroup, positives_subgroup) = subgroup.get_base_statistics(data, weighting_attribute) if (instances_subgroup == 0) or (instances_dataset == instances_subgroup): return 0 p_subgroup = positives_subgroup / instances_subgroup max_target_share = get_max_generalization_target_share(data, subgroup, weighting_attribute) relative_size = (instances_subgroup / instances_dataset) return relative_size ** self.a * (p_subgroup - max_target_share) def supports_weights(self): return True def is_applicable(self, subgroup): return isinstance(subgroup.target, NominalTarget) def get_max_generalization_target_share(data, subgroup, weighting_attribute=None): selectors = subgroup.subgroup_description.selectors generalizations = powerset(selectors) max_target_share = 0 for sels in generalizations: sgd = SubgroupDescription(list(sels)) sg = Subgroup(subgroup.target, sgd) (_, _, instances_subgroup, positives_subgroup) = sg.get_base_statistics(data, weighting_attribute) target_share = positives_subgroup / instances_subgroup max_target_share = max(max_target_share, target_share) return max_target_share
true
true
f724016dd1c040c5547f313a4d9c94947563f287
37
py
Python
cushead/generator/__init__.py
mrsantos321/customhead
bb70867afc34abe7a2ef76bb0f92c08d7ff38214
[ "MIT" ]
3
2019-10-08T06:02:23.000Z
2020-01-22T09:14:35.000Z
cushead/generator/__init__.py
mrsantos321/cushead
bb70867afc34abe7a2ef76bb0f92c08d7ff38214
[ "MIT" ]
297
2019-08-22T19:45:23.000Z
2022-03-26T02:30:25.000Z
cushead/generator/__init__.py
mrsantos321/cushead
bb70867afc34abe7a2ef76bb0f92c08d7ff38214
[ "MIT" ]
5
2019-09-25T02:35:04.000Z
2021-03-31T04:23:47.000Z
""" Handle the files generation. """
9.25
28
0.648649
true
true
f724019f2879e58a1cc31e046087322927173612
21,423
py
Python
train.py
designer357/MSLSTM
923f29f5a274ae41dbfe79d99e1ea28bb0cf5109
[ "MIT" ]
14
2017-07-21T18:31:21.000Z
2022-01-21T11:39:45.000Z
train.py
designer357/MSLSTM
923f29f5a274ae41dbfe79d99e1ea28bb0cf5109
[ "MIT" ]
3
2019-06-02T13:00:58.000Z
2020-04-24T14:40:50.000Z
train.py
designer357/MSLSTM
923f29f5a274ae41dbfe79d99e1ea28bb0cf5109
[ "MIT" ]
6
2018-02-22T08:26:11.000Z
2022-03-08T23:32:06.000Z
# -*- coding:utf-8 -*- """ mincheng:mc.cheng@my.cityu.edu.hk """ from __future__ import division import sys import printlog import datetime import os import time import sklearn from sklearn.metrics import confusion_matrix from baselines import sclearn import evaluation from collections import defaultdict import tensorflow as tf import mslstm import config import loaddata import numpy as np import visualize from sklearn.metrics import accuracy_score from baselines import nnkeras,sclearn import matplotlib.pyplot as plt flags = tf.app.flags FLAGS = flags.FLAGS def iterate_minibatches(inputs, targets, batchsize, shuffle=False): assert inputs.shape[0] == targets.shape[0] if shuffle: indices = np.arange(inputs.shape[0]) np.random.shuffle(indices) for start_idx in range(0, inputs.shape[0] - batchsize + 1, batchsize): if shuffle: excerpt = indices[start_idx:start_idx + batchsize] else: excerpt = slice(start_idx, start_idx + batchsize) yield inputs[excerpt], targets[excerpt] def pprint(msg,method=''): #if not 'Warning' in msg: if 1<0: sys.stdout = printlog.PyLogger('',method+'_'+str(FLAGS.num_neurons1)) print(msg) try: sys.stderr.write(msg+'\n') except: pass #sys.stdout.flush() else: print(msg) #def sess_run(commander,data,label): #global sess, data_x, data_y #return sess.run(commander, {data_x: data, data_y: label}) def train_lstm(method,filename_train_list,filename_test,trigger_flag,evalua_flag,is_binary_class,result_list_dict,evaluation_list): global tempstdout FLAGS.option = method dropout = 0.8 x_train, y_train, x_val, y_val, x_test, y_test = loaddata.get_data(FLAGS.pooling_type, FLAGS.is_add_noise, FLAGS.noise_ratio, FLAGS.data_dir, filename_test, FLAGS.sequence_window, trigger_flag,is_binary_class, multiScale=FLAGS.is_multi_scale, waveScale=FLAGS.scale_levels, waveType=FLAGS.wave_type) """ if filename_test == 'HB_AS_Leak.txt': filename_train = 'HB_C_N_S.txt' elif filename_test == 'HB_Code_Red_I.txt': filename_train = 'HB_A_N_S.txt' elif filename_test == 'HB_Nimda.txt': filename_train = 'HB_A_C_S.txt' elif filename_test == 'HB_Slammer.txt': filename_train = 'HB_A_C_N.txt' print(filename_test) #x_train, y_train, x_val, y_val = loaddata.get_trainData(FLAGS.pooling_type, FLAGS.is_add_noise, FLAGS.noise_ratio, FLAGS.data_dir, # filename_train, FLAGS.sequence_window, trigger_flag,is_binary_class, # multiScale=FLAGS.is_multi_scale, waveScale=FLAGS.scale_levels, # waveType=FLAGS.wave_type) #x_test, y_test = loaddata.get_testData(FLAGS.pooling_type, FLAGS.is_add_noise, FLAGS.noise_ratio, FLAGS.data_dir, # filename_test, FLAGS.sequence_window, trigger_flag,is_binary_class, # multiScale=FLAGS.is_multi_scale, waveScale=FLAGS.scale_levels, # waveType=FLAGS.wave_type) """ #loaddata.Multi_Scale_Plotting_2(x_train) if FLAGS.is_multi_scale: FLAGS.scale_levels = x_train.shape[1] FLAGS.input_dim = x_train.shape[-1] FLAGS.number_class = y_train.shape[1] if "Nimda" in filename_test: FLAGS.batch_size = int(int(x_train.shape[0])/5) else: FLAGS.batch_size = int(x_train.shape[0]) else: FLAGS.input_dim = x_train.shape[-1] FLAGS.number_class = y_train.shape[1] if "Nimda" in filename_test: FLAGS.batch_size = int(int(x_train.shape[0])/5) else: FLAGS.batch_size = int(x_train.shape[0]) #g = tf.Graph() with tf.Graph().as_default(): #config = tf.ConfigProto() config = tf.ConfigProto(device_count={'/gpu': 0}) #turn GPU on and off #config = tf.ConfigProto(log_device_placement=True) #config.gpu_options.per_process_gpu_memory_fraction = 0.2 #with tf.variable_scope("middle")as scope: tf.set_random_seed(1337) #global_step = tf.Variable(0,name="global_step",trainable=False) data_x,data_y = mslstm.inputs(FLAGS.option) #output_u_w,prediction, label = mslstm.inference(data_x,data_y,FLAGS.option) is_training = tf.placeholder(tf.bool) prediction, label,output_last = mslstm.inference(data_x,data_y,FLAGS.option,is_training) loss = mslstm.loss_(prediction, label) tran_op,optimizer = mslstm.train(loss) minimize = optimizer.minimize(loss) correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(label, 1)) accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) #summary_op = tf.merge_all_summaries() weights = tf.Variable(tf.constant(0.1, shape=[len(y_test)*FLAGS.sequence_window, 1, FLAGS.scale_levels]), name="weights123") init_op = tf.global_variables_initializer() #init_op = tf.initialize_all_variables() sess = tf.Session(config=config) sess.run(init_op) #summary_writer = tf.train.SummaryWriter(FLAGS.log_dir, sess.graph) #saver = tf.train.Saver() saver = tf.train.Saver({"my_weights": weights}) epoch_training_loss_list = [] epoch_training_acc_list = [] epoch_val_loss_list = [] epoch_val_acc_list = [] early_stopping = 10 no_of_batches = int(len(x_train) / FLAGS.batch_size) #visualize.curve_plotting_withWindow(x_train, y_train, 0, "Train_"+'_'+FLAGS.option) #visualize.curve_plotting_withWindow(x_test, y_test, 2, "Test_"+'_'+FLAGS.option) total_iteration = 0 for i in range(FLAGS.max_epochs): if early_stopping > 0: pass else: break j_iteration = 0 for j_batch in iterate_minibatches(x_train,y_train,FLAGS.batch_size,shuffle=False): j_iteration += 1 total_iteration += 1 inp, out = j_batch sess.run(minimize, {data_x: inp, data_y: out, is_training:True}) training_acc, training_loss = sess.run((accuracy, loss), {data_x: inp, data_y: out,is_training:True}) #sys.stdout = tempstdout val_acc, val_loss = sess.run((accuracy, loss), {data_x:x_val, data_y:y_val,is_training:True}) pprint( FLAGS.option + "_Epoch%s" % (str(i + 1)) + ">" * 3 +'_Titer-'+str(total_iteration) +'_iter-'+str(j_iteration)+ str(FLAGS.wave_type) + '-' + str(FLAGS.scale_levels) + '-' + str(FLAGS.learning_rate)+'-'+str(FLAGS.num_neurons1)+'-'+str(FLAGS.num_neurons2)+ ">>>=" + "train_accuracy: %s, train_loss: %s" % ( str(training_acc), str(training_loss)) \ + ",\tval_accuracy: %s, val_loss: %s" % (str(val_acc), str(val_loss)), method) epoch_training_loss_list.append(training_loss) epoch_training_acc_list.append(training_acc) epoch_val_loss_list.append(val_loss) epoch_val_acc_list.append(val_acc) try: max_val_acc = epoch_val_acc_list[-2] except: max_val_acc = 0 if epoch_val_acc_list[-1] < max_val_acc: early_stopping -= 1 elif epoch_val_acc_list[-1] >= max_val_acc: early_stopping = 10 if val_loss > 10 or val_loss == np.nan: break if 1<0: #pprint("PPP") weights_results = sess.run(output_last, {data_x:x_test, data_y: y_test}) #print(weights_results) #sys.stdout = tempstdout visualize.curve_plotting(weights_results,y_test,filename_test,FLAGS.option) #pprint("QQQ") with open(filename_test+"_EA.txt",'w')as fout: fout.write(weights_results) #sess.run(weights.assign(weights_results)) else: pass #weights = output_u_w.eval(session=sess) #weights = saver.restore(sess, "./tf_tmp/model.ckpt") #pprint(weights) #weight_list = return_max_index(weights) result = sess.run(prediction, {data_x:x_test, data_y: y_test}) #print(result) #pprint(result) #print("LLL") saver.save(sess, "./tf_tmp/model.ckpt") sess.close() #results = evaluation.evaluation(y_test, result)#Computing ACCURACY, F1-Score, .., etc if is_binary_class == True: #sys.stdout = tempstdout results = evaluation.evaluation(y_test, result, trigger_flag, evalua_flag) # Computing ACCURACY,F1-score,..,etc y_test = loaddata.reverse_one_hot(y_test) result = loaddata.reverse_one_hot(result) else: symbol_list = [0, 1, 2, 3, 4] sys.stdout = tempstdout print(y_test) print(result) y_test = loaddata.reverse_one_hot(y_test) result = loaddata.reverse_one_hot(result) confmat = confusion_matrix(y_test, result, labels=symbol_list) visualize.plotConfusionMatrix(confmat) #accuracy = sklearn.metrics.accuracy_score(y_test, result) symbol_list2 = [0] y_ = [] for symbol in symbol_list2: for tab in range(len(y_test)): if y_test[tab] == symbol and y_test[tab] == result[tab]: y_.append(symbol) # print(y_test[0:10]) # rint(result[0:10]) # print("Accuracy is :"+str(accuracy)) accuracy = float(len(y_)) / (list(result).count(symbol)) print("Accuracy of " + str(symbol) + " is :" + str(accuracy)) print("True is ") # print(y_test) print("The 0 of True is " + str(list(y_test).count(0))) print("The 1 of True is " + str(list(y_test).count(1))) print("The 2 of True is " + str(list(y_test).count(2))) print("The 3 of True is " + str(list(y_test).count(3))) print("The 4 of True is " + str(list(y_test).count(4))) # print(len(y_test)) print("Predict is ") # print(result) print("The 0 of Predict is " + str(list(result).count(0))) print("The 1 of Predict is " + str(list(result).count(1))) print("The 2 of Predict is " + str(list(result).count(2))) print("The 3 of Predict is " + str(list(result).count(3))) print("The 4 of Predict is " + str(list(result).count(4))) print("Accuracy is :" + str(accuracy)) f1_score = sklearn.metrics.f1_score(y_test, result,average="macro") print("F-score is :" + str(f1_score)) results = {'ACCURACY': accuracy, 'F1_SCORE': f1_score, 'AUC': 9999, 'G_MEAN': 9999} sys.stdout = tempstdout #print(weights_results.shape) #print("215") y_test2 = np.array(y_test) result2 = np.array(result) #results = accuracy_score(y_test2, result2) #print(y_test2) #print(result2) #print(results) with open(os.path.join(os.path.join(os.getcwd(),'stat'),"StatFalseAlarm_" + filename_test + "_True.txt"), "w") as fout: for tab in range(len(y_test2)): fout.write(str(int(y_test2[tab])) + '\n') with open(os.path.join(os.path.join(os.getcwd(),'stat'),"StatFalseAlarm_" + filename_test + "_" + method + "_" + "_Predict.txt"), "w") as fout: for tab in range(len(result2)): fout.write(str(int(result2[tab])) + '\n') #eval_list = ["AUC", "G_MEAN","ACCURACY","F1_SCORE"] for each_eval in evaluation_list: result_list_dict[each_eval].append(results[each_eval]) if evalua_flag: with open(os.path.join(FLAGS.output, "TensorFlow_Log" + filename_test + ".txt"), "a")as fout: if not FLAGS.is_multi_scale: outfileline = FLAGS.option + "_epoch:" + str(FLAGS.max_epochs) + ",_lr:" + str(FLAGS.learning_rate) + ",_multi_scale:" + str(FLAGS.is_multi_scale) + ",hidden_nodes: "+str(FLAGS.num_neurons1)+"/"+str(FLAGS.num_neurons2) + "\n" else: outfileline = FLAGS.option + "_epoch:" + str(FLAGS.max_epochs) + ",_wavelet:"+str(FLAGS.wave_type) + ",_lr:" + str(FLAGS.learning_rate) + ",_multi_scale:" + str(FLAGS.is_multi_scale) + ",_train_set_using_level:" + str(FLAGS.scale_levels) + ",hidden_nodes: "+str(FLAGS.num_neurons1)+"/"+str(FLAGS.num_neurons2) + "\n" fout.write(outfileline) for each_eval in evaluation_list: #for eachk, eachv in result_list_dict.items(): fout.write(each_eval + ": " + str(round(np.mean(result_list_dict[each_eval]), 3)) + ",\t") fout.write('\n') return epoch_training_acc_list,epoch_val_acc_list,epoch_training_loss_list,epoch_val_loss_list else: return results def train_classic(method,filename_train,filename_test, trigger_flag,evalua_flag,is_binary_class,evaluation_list): return sclearn.Basemodel(method,filename_train,filename_test,trigger_flag,evalua_flag,evaluation_list) def train(method,filename_train,filename_test,trigger_flag,evalua_flag,is_binary_class,evaluation_list,wave_type='db1'): global data_x, data_y result_list_dict = defaultdict(list) #evaluation_list = ["ACCURACY", "F1_SCORE", "AUC", "G_MEAN"] for each in evaluation_list: result_list_dict[each] = [] if 'L' in method or 'RNN' in method: sys.stdout = tempstdout if method == '1L' or method == '2L' or method == '3L' \ or method == '4L' or method == '5L' or method == 'RNN': #FLAGS.learning_rate = 0.01 FLAGS.is_multi_scale = False elif 'AL' == method: #FLAGS.learning_rate = 0.01 FLAGS.is_multi_scale = False else: #FLAGS.learning_rate = 0.05 FLAGS.is_multi_scale = True FLAGS.wave_type = wave_type return train_lstm(method,filename_train,filename_test,trigger_flag,evalua_flag,is_binary_class,result_list_dict,evaluation_list) else: sys.stdout = tempstdout return train_classic(method,filename_train,filename_test,trigger_flag,evalua_flag,is_binary_class,result_list_dict,evaluation_list) def main(unused_argv): global tempstdout #main function #wave_type_list =['db1','db2','haar','coif1','db1','db2','haar','coif1','db1','db2'] wave_type_list = ['haar'] multi_scale_value_list = [2,3,4,5,6,10] case_label = {'SVM':'SVM','NB':'NB','DT':'DT','Ada.Boost':'Ada.Boost','RF':'RF','1NN':'1NN','1NN-DTW':'DTW', 'SVMF':'SVMF','SVMW':'SVMW','MLP':'MLP','RNN':'RNN','1L':'LSTM','2L':'2-LSTM','3L':'3-LSTM',\ 'AL':'ALSTM','HL':'MSLSTM','HAL':'MSLSTM'} trigger_flag = 1 evalua_flag = True is_binary_class = True single_layer = True if is_binary_class: filename_list = ["HB_AS_Leak.txt","HB_Code_Red_I.txt","HB_Nimda.txt","HB_Slammer.txt"] #filename_list = ["HB_Slammer.txt"] # HB_Code_Red_I.txt # HB_Nimda.txt # HB_Slammer.txt else: filename_list = ["HB_ALL.txt"] if trigger_flag == 1 : if single_layer: #case = ['AL'] #case = ['1L','3L','AL'] case = ['MLP','RNN','1L','2L','3L','AL'] else: case = ['HL','HAL'] #case = ['HL','HAL'] else: case = ["1NN"] #case = ["RF","SVM","SVMF","SVMW","NB","DT","Ada.Boost","1NN"] #case = ["NB","1NN","Ada.Boost","RF"] if evalua_flag: evaluation_list = ["AUC", "G_MEAN", "ACCURACY", "F1_SCORE"] else: evaluation_list = ["FPR", "TPR","AUC","G_MEAN"] wave_type = wave_type_list[0] hidden_unit1_list = [8,16,32,64,128,256] #hidden_unit1_list = [16] hidden_unit2_list = [8,16,32,64,128] #hidden_unit2_list = [8] #combination_list = [(16,8),(16,32),(16,64),(32,64),(128,16)] #combination_list = [(8,8),(8,32),(16,8),(16,64),(128,16),(128,64)] #learning_rate_list = [0.001, 0.01, 0.05, 0.1] learning_rate_list = [0.1,0.05,0.01,0.001] for tab in range(len(filename_list)): case_list = [] train_acc_list = [] val_acc_list = [] train_loss_list = [] val_loss_list = [] if single_layer: combination_list = hidden_unit1_list else: combination_list = [] for each1 in hidden_unit1_list: for each2 in hidden_unit2_list: combination_list.append((each1, each2)) """ if filename_list[tab] == "HB_AS_Leak.txt": combination_list = [(32, 64), (32, 128), (64, 64)] elif filename_list[tab] == "HB_Code_Red_I.txt": combination_list = [(32, 32), (16, 8), (16, 64), (32, 64)] elif filename_list[tab] == "HB_Nimda.txt": combination_list = [(8, 32), (32, 64)] elif filename_list[tab] == "HB_Slammer.txt": combination_list = [(16, 8), (16, 32), (16, 64)] """ results = {} for each_case in case: if 1>0: case_list.append(case_label[each_case]) if trigger_flag: # sys.stdout = tempstdout if each_case == 'MLP': if evalua_flag: nnkeras.Basemodel(each_case, filename_list[tab],trigger_flag,evalua_flag,is_binary_class,evaluation_list) else: results[case_label[each_case]] = nnkeras.Basemodel(each_case, filename_list[tab],trigger_flag,evalua_flag,is_binary_class,evaluation_list) else: if evalua_flag: for learning_rate in learning_rate_list: FLAGS.learning_rate = learning_rate for each_comb in combination_list: if not 'H' in each_case: FLAGS.num_neurons1 = each_comb #FLAGS.num_neurons1 = 16 #FLAGS.learning_rate = 0.001 else: #if each_case == 'HAL': #FLAGS.num_neurons1, FLAGS.num_neurons2 = (100,64) #elif each_case == 'HL': #FLAGS.num_neurons1, FLAGS.num_neurons2 = (16,8) FLAGS.num_neurons1, FLAGS.num_neurons2 = each_comb train_acc,val_acc,train_loss,val_loss = train(each_case,filename_list, filename_list[tab],trigger_flag,evalua_flag,is_binary_class,evaluation_list,wave_type) train_acc_list.append(train_acc) val_acc_list.append(val_acc) train_loss_list.append(train_loss) val_loss_list.append(val_loss) #visualize.epoch_acc_plotting(filename_list[tab],case_list,FLAGS.sequence_window,FLAGS.learning_rate,train_acc_list,val_acc_list) #visualize.epoch_loss_plotting(filename_list[tab], case_list,FLAGS.sequence_window, FLAGS.learning_rate,train_loss_list, val_loss_list) else: results[case_label[each_case]] = train(each_case,filename_list, filename_list[tab],trigger_flag,evalua_flag,is_binary_class,evaluation_list,wave_type) else: sys.stdout = tempstdout if evalua_flag: sclearn.Basemodel(each_case, filename_list[tab], trigger_flag, evalua_flag,is_binary_class,evaluation_list) else: results[case_label[each_case]] = sclearn.Basemodel(each_case, filename_list[tab],trigger_flag,evalua_flag,is_binary_class,evaluation_list) else: pass if not evalua_flag: visualize.plotAUC(results,case_list,filename_list[tab]) else: if trigger_flag: try: print() #visualize.epoch_acc_plotting(filename_list[tab], case_list, FLAGS.sequence_window,FLAGS.learning_rate, train_acc_list, val_acc_list) #visualize.epoch_loss_plotting(filename_list[tab], case_list, FLAGS.sequence_window,FLAGS.learning_rate, train_loss_list, val_loss_list) except: pass end = time.time() pprint("The time elapsed : " + str(end - start) + ' seconds.\n') if __name__ == "__main__": global tempstdout tempstdout = sys.stdout pprint("------------------------------------------------"+str(datetime.datetime.now())+"--------------------------------------------") start = time.time() tf.app.run()
46.170259
333
0.586986
from __future__ import division import sys import printlog import datetime import os import time import sklearn from sklearn.metrics import confusion_matrix from baselines import sclearn import evaluation from collections import defaultdict import tensorflow as tf import mslstm import config import loaddata import numpy as np import visualize from sklearn.metrics import accuracy_score from baselines import nnkeras,sclearn import matplotlib.pyplot as plt flags = tf.app.flags FLAGS = flags.FLAGS def iterate_minibatches(inputs, targets, batchsize, shuffle=False): assert inputs.shape[0] == targets.shape[0] if shuffle: indices = np.arange(inputs.shape[0]) np.random.shuffle(indices) for start_idx in range(0, inputs.shape[0] - batchsize + 1, batchsize): if shuffle: excerpt = indices[start_idx:start_idx + batchsize] else: excerpt = slice(start_idx, start_idx + batchsize) yield inputs[excerpt], targets[excerpt] def pprint(msg,method=''): if 1<0: sys.stdout = printlog.PyLogger('',method+'_'+str(FLAGS.num_neurons1)) print(msg) try: sys.stderr.write(msg+'\n') except: pass else: print(msg) def train_lstm(method,filename_train_list,filename_test,trigger_flag,evalua_flag,is_binary_class,result_list_dict,evaluation_list): global tempstdout FLAGS.option = method dropout = 0.8 x_train, y_train, x_val, y_val, x_test, y_test = loaddata.get_data(FLAGS.pooling_type, FLAGS.is_add_noise, FLAGS.noise_ratio, FLAGS.data_dir, filename_test, FLAGS.sequence_window, trigger_flag,is_binary_class, multiScale=FLAGS.is_multi_scale, waveScale=FLAGS.scale_levels, waveType=FLAGS.wave_type) if FLAGS.is_multi_scale: FLAGS.scale_levels = x_train.shape[1] FLAGS.input_dim = x_train.shape[-1] FLAGS.number_class = y_train.shape[1] if "Nimda" in filename_test: FLAGS.batch_size = int(int(x_train.shape[0])/5) else: FLAGS.batch_size = int(x_train.shape[0]) else: FLAGS.input_dim = x_train.shape[-1] FLAGS.number_class = y_train.shape[1] if "Nimda" in filename_test: FLAGS.batch_size = int(int(x_train.shape[0])/5) else: FLAGS.batch_size = int(x_train.shape[0]) with tf.Graph().as_default(): config = tf.ConfigProto(device_count={'/gpu': 0}) tf.set_random_seed(1337) data_x,data_y = mslstm.inputs(FLAGS.option) is_training = tf.placeholder(tf.bool) prediction, label,output_last = mslstm.inference(data_x,data_y,FLAGS.option,is_training) loss = mslstm.loss_(prediction, label) tran_op,optimizer = mslstm.train(loss) minimize = optimizer.minimize(loss) correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(label, 1)) accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) weights = tf.Variable(tf.constant(0.1, shape=[len(y_test)*FLAGS.sequence_window, 1, FLAGS.scale_levels]), name="weights123") init_op = tf.global_variables_initializer() sess = tf.Session(config=config) sess.run(init_op) saver = tf.train.Saver({"my_weights": weights}) epoch_training_loss_list = [] epoch_training_acc_list = [] epoch_val_loss_list = [] epoch_val_acc_list = [] early_stopping = 10 no_of_batches = int(len(x_train) / FLAGS.batch_size) total_iteration = 0 for i in range(FLAGS.max_epochs): if early_stopping > 0: pass else: break j_iteration = 0 for j_batch in iterate_minibatches(x_train,y_train,FLAGS.batch_size,shuffle=False): j_iteration += 1 total_iteration += 1 inp, out = j_batch sess.run(minimize, {data_x: inp, data_y: out, is_training:True}) training_acc, training_loss = sess.run((accuracy, loss), {data_x: inp, data_y: out,is_training:True}) val_acc, val_loss = sess.run((accuracy, loss), {data_x:x_val, data_y:y_val,is_training:True}) pprint( FLAGS.option + "_Epoch%s" % (str(i + 1)) + ">" * 3 +'_Titer-'+str(total_iteration) +'_iter-'+str(j_iteration)+ str(FLAGS.wave_type) + '-' + str(FLAGS.scale_levels) + '-' + str(FLAGS.learning_rate)+'-'+str(FLAGS.num_neurons1)+'-'+str(FLAGS.num_neurons2)+ ">>>=" + "train_accuracy: %s, train_loss: %s" % ( str(training_acc), str(training_loss)) \ + ",\tval_accuracy: %s, val_loss: %s" % (str(val_acc), str(val_loss)), method) epoch_training_loss_list.append(training_loss) epoch_training_acc_list.append(training_acc) epoch_val_loss_list.append(val_loss) epoch_val_acc_list.append(val_acc) try: max_val_acc = epoch_val_acc_list[-2] except: max_val_acc = 0 if epoch_val_acc_list[-1] < max_val_acc: early_stopping -= 1 elif epoch_val_acc_list[-1] >= max_val_acc: early_stopping = 10 if val_loss > 10 or val_loss == np.nan: break if 1<0: weights_results = sess.run(output_last, {data_x:x_test, data_y: y_test}) visualize.curve_plotting(weights_results,y_test,filename_test,FLAGS.option) with open(filename_test+"_EA.txt",'w')as fout: fout.write(weights_results) else: pass result = sess.run(prediction, {data_x:x_test, data_y: y_test}) saver.save(sess, "./tf_tmp/model.ckpt") sess.close() results = evaluation.evaluation(y_test, result, trigger_flag, evalua_flag) y_test = loaddata.reverse_one_hot(y_test) result = loaddata.reverse_one_hot(result) else: symbol_list = [0, 1, 2, 3, 4] sys.stdout = tempstdout print(y_test) print(result) y_test = loaddata.reverse_one_hot(y_test) result = loaddata.reverse_one_hot(result) confmat = confusion_matrix(y_test, result, labels=symbol_list) visualize.plotConfusionMatrix(confmat) symbol_list2 = [0] y_ = [] for symbol in symbol_list2: for tab in range(len(y_test)): if y_test[tab] == symbol and y_test[tab] == result[tab]: y_.append(symbol) accuracy = float(len(y_)) / (list(result).count(symbol)) print("Accuracy of " + str(symbol) + " is :" + str(accuracy)) print("True is ") print("The 0 of True is " + str(list(y_test).count(0))) print("The 1 of True is " + str(list(y_test).count(1))) print("The 2 of True is " + str(list(y_test).count(2))) print("The 3 of True is " + str(list(y_test).count(3))) print("The 4 of True is " + str(list(y_test).count(4))) print("Predict is ") print("The 0 of Predict is " + str(list(result).count(0))) print("The 1 of Predict is " + str(list(result).count(1))) print("The 2 of Predict is " + str(list(result).count(2))) print("The 3 of Predict is " + str(list(result).count(3))) print("The 4 of Predict is " + str(list(result).count(4))) print("Accuracy is :" + str(accuracy)) f1_score = sklearn.metrics.f1_score(y_test, result,average="macro") print("F-score is :" + str(f1_score)) results = {'ACCURACY': accuracy, 'F1_SCORE': f1_score, 'AUC': 9999, 'G_MEAN': 9999} sys.stdout = tempstdout y_test2 = np.array(y_test) result2 = np.array(result) with open(os.path.join(os.path.join(os.getcwd(),'stat'),"StatFalseAlarm_" + filename_test + "_True.txt"), "w") as fout: for tab in range(len(y_test2)): fout.write(str(int(y_test2[tab])) + '\n') with open(os.path.join(os.path.join(os.getcwd(),'stat'),"StatFalseAlarm_" + filename_test + "_" + method + "_" + "_Predict.txt"), "w") as fout: for tab in range(len(result2)): fout.write(str(int(result2[tab])) + '\n') for each_eval in evaluation_list: result_list_dict[each_eval].append(results[each_eval]) if evalua_flag: with open(os.path.join(FLAGS.output, "TensorFlow_Log" + filename_test + ".txt"), "a")as fout: if not FLAGS.is_multi_scale: outfileline = FLAGS.option + "_epoch:" + str(FLAGS.max_epochs) + ",_lr:" + str(FLAGS.learning_rate) + ",_multi_scale:" + str(FLAGS.is_multi_scale) + ",hidden_nodes: "+str(FLAGS.num_neurons1)+"/"+str(FLAGS.num_neurons2) + "\n" else: outfileline = FLAGS.option + "_epoch:" + str(FLAGS.max_epochs) + ",_wavelet:"+str(FLAGS.wave_type) + ",_lr:" + str(FLAGS.learning_rate) + ",_multi_scale:" + str(FLAGS.is_multi_scale) + ",_train_set_using_level:" + str(FLAGS.scale_levels) + ",hidden_nodes: "+str(FLAGS.num_neurons1)+"/"+str(FLAGS.num_neurons2) + "\n" fout.write(outfileline) for each_eval in evaluation_list: fout.write(each_eval + ": " + str(round(np.mean(result_list_dict[each_eval]), 3)) + ",\t") fout.write('\n') return epoch_training_acc_list,epoch_val_acc_list,epoch_training_loss_list,epoch_val_loss_list else: return results def train_classic(method,filename_train,filename_test, trigger_flag,evalua_flag,is_binary_class,evaluation_list): return sclearn.Basemodel(method,filename_train,filename_test,trigger_flag,evalua_flag,evaluation_list) def train(method,filename_train,filename_test,trigger_flag,evalua_flag,is_binary_class,evaluation_list,wave_type='db1'): global data_x, data_y result_list_dict = defaultdict(list) for each in evaluation_list: result_list_dict[each] = [] if 'L' in method or 'RNN' in method: sys.stdout = tempstdout if method == '1L' or method == '2L' or method == '3L' \ or method == '4L' or method == '5L' or method == 'RNN': FLAGS.is_multi_scale = False elif 'AL' == method: FLAGS.is_multi_scale = False else: FLAGS.is_multi_scale = True FLAGS.wave_type = wave_type return train_lstm(method,filename_train,filename_test,trigger_flag,evalua_flag,is_binary_class,result_list_dict,evaluation_list) else: sys.stdout = tempstdout return train_classic(method,filename_train,filename_test,trigger_flag,evalua_flag,is_binary_class,result_list_dict,evaluation_list) def main(unused_argv): global tempstdout wave_type_list = ['haar'] multi_scale_value_list = [2,3,4,5,6,10] case_label = {'SVM':'SVM','NB':'NB','DT':'DT','Ada.Boost':'Ada.Boost','RF':'RF','1NN':'1NN','1NN-DTW':'DTW', 'SVMF':'SVMF','SVMW':'SVMW','MLP':'MLP','RNN':'RNN','1L':'LSTM','2L':'2-LSTM','3L':'3-LSTM',\ 'AL':'ALSTM','HL':'MSLSTM','HAL':'MSLSTM'} trigger_flag = 1 evalua_flag = True is_binary_class = True single_layer = True if is_binary_class: filename_list = ["HB_AS_Leak.txt","HB_Code_Red_I.txt","HB_Nimda.txt","HB_Slammer.txt"] else: filename_list = ["HB_ALL.txt"] if trigger_flag == 1 : if single_layer: case = ['MLP','RNN','1L','2L','3L','AL'] else: case = ['HL','HAL'] else: case = ["1NN"] if evalua_flag: evaluation_list = ["AUC", "G_MEAN", "ACCURACY", "F1_SCORE"] else: evaluation_list = ["FPR", "TPR","AUC","G_MEAN"] wave_type = wave_type_list[0] hidden_unit1_list = [8,16,32,64,128,256] hidden_unit2_list = [8,16,32,64,128] learning_rate_list = [0.1,0.05,0.01,0.001] for tab in range(len(filename_list)): case_list = [] train_acc_list = [] val_acc_list = [] train_loss_list = [] val_loss_list = [] if single_layer: combination_list = hidden_unit1_list else: combination_list = [] for each1 in hidden_unit1_list: for each2 in hidden_unit2_list: combination_list.append((each1, each2)) """ if filename_list[tab] == "HB_AS_Leak.txt": combination_list = [(32, 64), (32, 128), (64, 64)] elif filename_list[tab] == "HB_Code_Red_I.txt": combination_list = [(32, 32), (16, 8), (16, 64), (32, 64)] elif filename_list[tab] == "HB_Nimda.txt": combination_list = [(8, 32), (32, 64)] elif filename_list[tab] == "HB_Slammer.txt": combination_list = [(16, 8), (16, 32), (16, 64)] """ results = {} for each_case in case: if 1>0: case_list.append(case_label[each_case]) if trigger_flag: sys.stdout = tempstdout if each_case == 'MLP': if evalua_flag: nnkeras.Basemodel(each_case, filename_list[tab],trigger_flag,evalua_flag,is_binary_class,evaluation_list) else: results[case_label[each_case]] = nnkeras.Basemodel(each_case, filename_list[tab],trigger_flag,evalua_flag,is_binary_class,evaluation_list) else: if evalua_flag: for learning_rate in learning_rate_list: FLAGS.learning_rate = learning_rate for each_comb in combination_list: if not 'H' in each_case: FLAGS.num_neurons1 = each_comb else: FLAGS.num_neurons1, FLAGS.num_neurons2 = each_comb train_acc,val_acc,train_loss,val_loss = train(each_case,filename_list, filename_list[tab],trigger_flag,evalua_flag,is_binary_class,evaluation_list,wave_type) train_acc_list.append(train_acc) val_acc_list.append(val_acc) train_loss_list.append(train_loss) val_loss_list.append(val_loss) else: results[case_label[each_case]] = train(each_case,filename_list, filename_list[tab],trigger_flag,evalua_flag,is_binary_class,evaluation_list,wave_type) else: sys.stdout = tempstdout if evalua_flag: sclearn.Basemodel(each_case, filename_list[tab], trigger_flag, evalua_flag,is_binary_class,evaluation_list) else: results[case_label[each_case]] = sclearn.Basemodel(each_case, filename_list[tab],trigger_flag,evalua_flag,is_binary_class,evaluation_list) else: pass if not evalua_flag: visualize.plotAUC(results,case_list,filename_list[tab]) else: if trigger_flag: try: print() except: pass end = time.time() pprint("The time elapsed : " + str(end - start) + ' seconds.\n') if __name__ == "__main__": global tempstdout tempstdout = sys.stdout pprint("------------------------------------------------"+str(datetime.datetime.now())+"--------------------------------------------") start = time.time() tf.app.run()
true
true
f72402f269dd0023b44f39c7340a56de50e067b1
1,835
py
Python
bot.py
HeitorDJAk47Gamer/B0TPYbeta
e2c6707bcd74161bfb8b75a044833d23e66ed26b
[ "MIT" ]
null
null
null
bot.py
HeitorDJAk47Gamer/B0TPYbeta
e2c6707bcd74161bfb8b75a044833d23e66ed26b
[ "MIT" ]
null
null
null
bot.py
HeitorDJAk47Gamer/B0TPYbeta
e2c6707bcd74161bfb8b75a044833d23e66ed26b
[ "MIT" ]
null
null
null
import json import os import discord import asyncio import datetime from discord.ext import commands, tasks with open('config.json') as e: infos = json.load(e) token = infos['token'] prefix = infos['prefix'] lara = commands.Bot(command_prefix=prefix, case_insensitive=True, intents=discord.Intents.all()) @lara.event async def on_ready(): calc = lara.latency * 1000 pong = round(calc) stats.start() print(f'Nome: {lara.user} ID: {lara.user.id}') print(f'Membros Globais: {len(lara.users)}') print(f'Servidores Globais: {len(lara.guilds)}') print(f'Ping {pong} ms') @tasks.loop(minutes=10) async def stats(): await lara.change_presence(activity=discord.Activity(type=discord.ActivityType.listening, name=f'{len(lara.users)} Membros')) await asyncio.sleep(5 * 60) await lara.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f'{len(lara.guilds)} Server')) @lara.event async def on_message(message): if message.author == lara.user: return elif lara.user.mentioned_in(message): await message.channel.send(f'Meu prefixo é: **-**') await lara.process_commands(message) for filename in os.listdir('./cogs'): if filename.endswith('.py'): lara.load_extension(f'cogs.{filename[:-3]}') print(f'{filename[:-3]} carregado!') @lara.command() @commands.is_owner() async def hab(ctx, *, command): command = lara.get_command(command) if command is None: await ctx.send('insira o comando!') elif ctx.command == command: ctx.send('Não pode desabilitar esse comando!') else: command.enabled = not command.enabled com = 'ativado' if command.enabled else 'desativado' await ctx.send(f'Eu tenho {com} {command.qualified_name} para você!') lara.run(token)
29.126984
129
0.681744
import json import os import discord import asyncio import datetime from discord.ext import commands, tasks with open('config.json') as e: infos = json.load(e) token = infos['token'] prefix = infos['prefix'] lara = commands.Bot(command_prefix=prefix, case_insensitive=True, intents=discord.Intents.all()) @lara.event async def on_ready(): calc = lara.latency * 1000 pong = round(calc) stats.start() print(f'Nome: {lara.user} ID: {lara.user.id}') print(f'Membros Globais: {len(lara.users)}') print(f'Servidores Globais: {len(lara.guilds)}') print(f'Ping {pong} ms') @tasks.loop(minutes=10) async def stats(): await lara.change_presence(activity=discord.Activity(type=discord.ActivityType.listening, name=f'{len(lara.users)} Membros')) await asyncio.sleep(5 * 60) await lara.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f'{len(lara.guilds)} Server')) @lara.event async def on_message(message): if message.author == lara.user: return elif lara.user.mentioned_in(message): await message.channel.send(f'Meu prefixo é: **-**') await lara.process_commands(message) for filename in os.listdir('./cogs'): if filename.endswith('.py'): lara.load_extension(f'cogs.{filename[:-3]}') print(f'{filename[:-3]} carregado!') @lara.command() @commands.is_owner() async def hab(ctx, *, command): command = lara.get_command(command) if command is None: await ctx.send('insira o comando!') elif ctx.command == command: ctx.send('Não pode desabilitar esse comando!') else: command.enabled = not command.enabled com = 'ativado' if command.enabled else 'desativado' await ctx.send(f'Eu tenho {com} {command.qualified_name} para você!') lara.run(token)
true
true
f72403e3633318d3eef4d9f59835f9dbf420aea9
3,080
py
Python
addon/resources/community/livetvcaptcha.py
mar9a7louwa/marga
f20d815e14190d6ec95b24d52cf97ef4173079ef
[ "MIT" ]
null
null
null
addon/resources/community/livetvcaptcha.py
mar9a7louwa/marga
f20d815e14190d6ec95b24d52cf97ef4173079ef
[ "MIT" ]
null
null
null
addon/resources/community/livetvcaptcha.py
mar9a7louwa/marga
f20d815e14190d6ec95b24d52cf97ef4173079ef
[ "MIT" ]
null
null
null
from PIL import Image import hashlib import time import os import xbmcaddon addonPath = xbmcaddon.Addon().getAddonInfo("path") communityStreamPath = os.path.join(addonPath,'resources') communityStreamPath = os.path.join(communityStreamPath,'community') #print 'path is ',communityStreamPath import math class VectorCompare: def magnitude(self,concordance): total = 0 for word,count in concordance.iteritems(): total += count ** 2 return math.sqrt(total) def relation(self,concordance1, concordance2): relevance = 0 topvalue = 0 for word, count in concordance1.iteritems(): if concordance2.has_key(word): topvalue += count * concordance2[word] return topvalue / (self.magnitude(concordance1) * self.magnitude(concordance2)) def buildvector(im): d1 = {} count = 0 for i in im.getdata(): d1[count] = i count += 1 return d1 def getString(imgpath): v = VectorCompare() #iconset = ['0','1','2','3','4','5','6','7','8','9','0','a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z'] iconset = ['0','1','2','3','4','5','6','7','8','9'] imageset = [] #print 'communityStreamPath',communityStreamPath,os.path.join( communityStreamPath,'/captchaiconset/%s/'%('0')) for letter in iconset: for img in os.listdir(communityStreamPath+'/captchaiconset/%s/'%(letter)): temp = [] if img != "Thumbs.db": # windows check... temp.append(buildvector(Image.open( communityStreamPath+"/captchaiconset/%s/%s"%(letter,img)))) imageset.append({letter:temp}) #print img im = Image.open(imgpath) im2 = Image.new("P",im.size,255) im = im.convert("P") temp = {} for x in range(im.size[1]): for y in range(im.size[0]): pix = im.getpixel((y,x)) #print 'imgpix',pix temp[pix] = pix if pix==0:#pix == 220 or pix == 224: # these are the numbers to get im2.putpixel((y,x),0) #aa=im2.convert('RGB') #aa.save(imgpath + ".jpg", "JPEG") #print 'temp',temp inletter = False foundletter=False start = 0 end = 0 letters = [] for y in range(im2.size[0]): # slice across for x in range(im2.size[1]): # slice down pix = im2.getpixel((y,x)) #print 'pix',pix if pix != 255: inletter = True if foundletter == False and inletter == True: foundletter = True start = y if foundletter == True and inletter == False: foundletter = False end = y letters.append((start,end)) inletter=False count = 0 #print 'letters',letters retval="" for letter in letters: m = hashlib.md5() im3 = im2.crop(( letter[0] , 0, letter[1],im2.size[1] )) #aa=im3.convert('RGB') #aa.save(imgpath + str(count)+".gif", "GIF") guess = [] for image in imageset: for x,y in image.iteritems(): if len(y) != 0: guess.append( ( v.relation(y[0],buildvector(im3)),x) ) guess.sort(reverse=True) retval+=guess[0][1] #print "",guess[0] count += 1 return retval
25.245902
162
0.599351
from PIL import Image import hashlib import time import os import xbmcaddon addonPath = xbmcaddon.Addon().getAddonInfo("path") communityStreamPath = os.path.join(addonPath,'resources') communityStreamPath = os.path.join(communityStreamPath,'community') import math class VectorCompare: def magnitude(self,concordance): total = 0 for word,count in concordance.iteritems(): total += count ** 2 return math.sqrt(total) def relation(self,concordance1, concordance2): relevance = 0 topvalue = 0 for word, count in concordance1.iteritems(): if concordance2.has_key(word): topvalue += count * concordance2[word] return topvalue / (self.magnitude(concordance1) * self.magnitude(concordance2)) def buildvector(im): d1 = {} count = 0 for i in im.getdata(): d1[count] = i count += 1 return d1 def getString(imgpath): v = VectorCompare() iconset = ['0','1','2','3','4','5','6','7','8','9'] imageset = [] for letter in iconset: for img in os.listdir(communityStreamPath+'/captchaiconset/%s/'%(letter)): temp = [] if img != "Thumbs.db": temp.append(buildvector(Image.open( communityStreamPath+"/captchaiconset/%s/%s"%(letter,img)))) imageset.append({letter:temp}) im = Image.open(imgpath) im2 = Image.new("P",im.size,255) im = im.convert("P") temp = {} for x in range(im.size[1]): for y in range(im.size[0]): pix = im.getpixel((y,x)) temp[pix] = pix if pix==0: inletter = False foundletter=False start = 0 end = 0 letters = [] for y in range(im2.size[0]): for x in range(im2.size[1]): pix = im2.getpixel((y,x)) if pix != 255: inletter = True if foundletter == False and inletter == True: foundletter = True start = y if foundletter == True and inletter == False: foundletter = False end = y letters.append((start,end)) inletter=False count = 0 retval="" for letter in letters: m = hashlib.md5() im3 = im2.crop(( letter[0] , 0, letter[1],im2.size[1] )) guess = [] for image in imageset: for x,y in image.iteritems(): if len(y) != 0: guess.append( ( v.relation(y[0],buildvector(im3)),x) ) guess.sort(reverse=True) retval+=guess[0][1] count += 1 return retval
true
true
f7240418d759063b2c79292315a9b4fb2bce2595
1,854
py
Python
leetcode/0-250/175-15. 3Sum.py
palash24/algorithms-and-data-structures
164be7d1a501a21af808673888964bbab36243a1
[ "MIT" ]
23
2018-11-06T03:54:00.000Z
2022-03-14T13:30:40.000Z
leetcode/0-250/175-15. 3Sum.py
palash24/algorithms-and-data-structures
164be7d1a501a21af808673888964bbab36243a1
[ "MIT" ]
null
null
null
leetcode/0-250/175-15. 3Sum.py
palash24/algorithms-and-data-structures
164be7d1a501a21af808673888964bbab36243a1
[ "MIT" ]
5
2019-05-24T16:56:45.000Z
2022-03-10T17:29:10.000Z
# 15. 3Sum from collections import defaultdict class Solution: # TLE at test # 312 out of 313 def threeSum(self, nums): n = len(nums) dic = defaultdict(set) nums.sort() mini = nums[0] # d = {} for i in range(n): dic[nums[i]].add(i) # d.setdefault(nums[i], []).append(i) ans = [] table = set() for i in range(n): for j in range(i+1, n): k = -nums[i]-nums[j] if k < mini: break if k in dic: s = dic.get(k).copy() s.add(i) s.add(j) arr = [k, nums[i], nums[j]] arr.sort() key = ''.join(map(str, arr)) if len(s) >= 3 and key not in table: ans.append(arr) table.add(key) return ans def threeSum2(self, nums): ans, n = [], len(nums) nums.sort() for i in range(n-2): if nums[i] > 0: break if i > 0 and nums[i] == nums[i-1]: continue l, r = i + 1, n - 1 while l < r: s = nums[i] + nums[l] + nums[r] if s > 0: r -= 1 elif s < 0: l += 1 else: ans.append([nums[i], nums[l], nums[r]]) while l < r and nums[l] == nums[l+1]: l += 1 while l < r and nums[r] == nums[r-1]: r -= 1 l += 1 r -= 1 return ans sol = Solution() print(sol.threeSum([-1, 0, 1, 2, -1, -4])) print(sol.threeSum([0,0,0]))
30.393443
59
0.342503
from collections import defaultdict class Solution: m(self, nums): n = len(nums) dic = defaultdict(set) nums.sort() mini = nums[0] for i in range(n): dic[nums[i]].add(i) ans = [] table = set() for i in range(n): for j in range(i+1, n): k = -nums[i]-nums[j] if k < mini: break if k in dic: s = dic.get(k).copy() s.add(i) s.add(j) arr = [k, nums[i], nums[j]] arr.sort() key = ''.join(map(str, arr)) if len(s) >= 3 and key not in table: ans.append(arr) table.add(key) return ans def threeSum2(self, nums): ans, n = [], len(nums) nums.sort() for i in range(n-2): if nums[i] > 0: break if i > 0 and nums[i] == nums[i-1]: continue l, r = i + 1, n - 1 while l < r: s = nums[i] + nums[l] + nums[r] if s > 0: r -= 1 elif s < 0: l += 1 else: ans.append([nums[i], nums[l], nums[r]]) while l < r and nums[l] == nums[l+1]: l += 1 while l < r and nums[r] == nums[r-1]: r -= 1 l += 1 r -= 1 return ans sol = Solution() print(sol.threeSum([-1, 0, 1, 2, -1, -4])) print(sol.threeSum([0,0,0]))
true
true
f724041b5d2e83411bc60f733f4a4170bc7d026e
7,763
py
Python
ObjectDetNet/retinanet/dataloaders/custom_transforms.py
erinfolami/ZazuML
8dbe934c06612dd7917f38090701e3ead0337fb8
[ "MIT" ]
1
2020-12-31T18:02:41.000Z
2020-12-31T18:02:41.000Z
ObjectDetNet/retinanet/dataloaders/custom_transforms.py
PatiMohit/ZazuML
c5247859353cacf0e4a58f9c530a07038d9e12cf
[ "MIT" ]
null
null
null
ObjectDetNet/retinanet/dataloaders/custom_transforms.py
PatiMohit/ZazuML
c5247859353cacf0e4a58f9c530a07038d9e12cf
[ "MIT" ]
1
2020-08-09T09:23:25.000Z
2020-08-09T09:23:25.000Z
import math import torch import random import numpy as np import torch.nn as nn from numpy import int64 as int64 import torchvision.transforms as transforms from PIL import Image, ImageOps, ImageFilter class Normalize(object): """Normalize a tensor image with mean and standard deviation. Args: mean (tuple): means for each channel. std (tuple): standard deviations for each channel. """ def __init__(self, mean=(0., 0., 0.), std=(1., 1., 1.)): self.mean = mean self.std = std def __call__(self, sample): img = sample['image'] mask = sample['label'] img = np.array(img).astype(np.float32) mask = np.array(mask).astype(np.float32) img /= 255.0 img -= self.mean img /= self.std return {'image': img, 'label': mask} class ToTensor(object): """Convert ndarrays in sample to Tensors.""" def __call__(self, sample): # swap color axis because # numpy image: H x W x C # torch image: C X H X W img = sample['image'] mask = sample['label'] img = np.array(img).astype(np.float32).transpose((2, 0, 1)) mask = np.array(mask).astype(np.float32) img = torch.from_numpy(img).float() mask = torch.from_numpy(mask).float() return {'image': img, 'label': mask} class RandomHorizontalFlip(object): def __call__(self, sample): img = sample['image'] mask = sample['label'] if random.random() < 0.5: img = img.transpose(Image.FLIP_LEFT_RIGHT) mask = mask.transpose(Image.FLIP_LEFT_RIGHT) return {'image': img, 'label': mask} class RandomRotate(object): def __init__(self, degree): self.degree = degree def __call__(self, sample): img = sample['image'] mask = sample['label'] rotate_degree = random.uniform(-1 * self.degree, self.degree) img = img.rotate(rotate_degree, Image.BILINEAR) mask = mask.rotate(rotate_degree, Image.NEAREST) return {'image': img, 'label': mask} class RandomGaussianBlur(object): def __call__(self, sample): img = sample['image'] mask = sample['label'] if random.random() < 0.5: img = img.filter(ImageFilter.GaussianBlur( radius=random.random())) return {'image': img, 'label': mask} class RandomScaleCrop(object): def __init__(self, base_size, crop_size, fill=0): self.base_size = base_size self.crop_size = crop_size self.fill = fill def __call__(self, sample): img = sample['image'] mask = sample['label'] # random scale (short edge) short_size = random.randint(int(self.base_size * 0.5), int(self.base_size * 2.0)) w, h = img.size if h > w: ow = short_size oh = int(1.0 * h * ow / w) else: oh = short_size ow = int(1.0 * w * oh / h) img = img.resize((ow, oh), Image.BILINEAR) mask = mask.resize((ow, oh), Image.NEAREST) # pad crop if short_size < self.crop_size: padh = self.crop_size - oh if oh < self.crop_size else 0 padw = self.crop_size - ow if ow < self.crop_size else 0 img = ImageOps.expand(img, border=(0, 0, padw, padh), fill=0) mask = ImageOps.expand(mask, border=(0, 0, padw, padh), fill=self.fill) # random crop crop_size w, h = img.size x1 = random.randint(0, w - self.crop_size) y1 = random.randint(0, h - self.crop_size) img = img.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size)) mask = mask.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size)) return {'image': img, 'label': mask} class FixScaleCrop(object): def __init__(self, crop_size): self.crop_size = crop_size def __call__(self, sample): img = sample['image'] mask = sample['label'] w, h = img.size if w > h: oh = self.crop_size ow = int(1.0 * w * oh / h) else: ow = self.crop_size oh = int(1.0 * h * ow / w) img = img.resize((ow, oh), Image.BILINEAR) mask = mask.resize((ow, oh), Image.NEAREST) # center crop w, h = img.size x1 = int(round((w - self.crop_size) / 2.)) y1 = int(round((h - self.crop_size) / 2.)) img = img.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size)) mask = mask.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size)) return {'image': img, 'label': mask} # resize to 512*1024 class FixedResize(object): """change the short edge length to size""" def __init__(self, resize=512): self.size1 = resize # size= 512 def __call__(self, sample): img = sample['image'] mask = sample['label'] assert img.size == mask.size w, h = img.size if w > h: oh = self.size1 ow = int(1.0 * w * oh / h) else: ow = self.size1 oh = int(1.0 * h * ow / w) img = img.resize((ow, oh), Image.BILINEAR) mask = mask.resize((ow, oh), Image.NEAREST) return {'image': img, 'label': mask} # random crop 321*321 class RandomCrop(object): def __init__(self, crop_size=320): self.crop_size = crop_size def __call__(self, sample): img = sample['image'] mask = sample['label'] w, h = img.size x1 = random.randint(0, w - self.crop_size) y1 = random.randint(0, h - self.crop_size) img = img.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size)) mask = mask.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size)) return {'image': img, 'label': mask} class RandomScale(object): def __init__(self, scales=(1,)): self.scales = scales def __call__(self, sample): img = sample['image'] mask = sample['label'] w, h = img.size scale = random.choice(self.scales) w, h = int(w * scale), int(h * scale) return {'image': img, 'label': mask} class TransformTr(object): def __init__(self, resize, multi_scale=None): if multi_scale is None: self.composed_transforms = transforms.Compose([ FixedResize(resize=resize), # RandomCrop(crop_size=args.crop_size), # tr.RandomScaleCrop(base_size=self.args.base_size, crop_size=self.args.crop_size, fill=255), # tr.RandomGaussianBlur(), # Normalize(mean, std), # ToTensor() ]) else: self.composed_transforms = transforms.Compose([ FixedResize(resize=args.resize), RandomScale(scales=args.multi_scale), RandomCrop(crop_size=args.crop_size), # tr.RandomScaleCrop(base_size=self.args.base_size, crop_size=self.args.crop_size, fill=255), # tr.RandomGaussianBlur(), Normalize(mean, std), ToTensor()]) def __call__(self, sample): return self.composed_transforms(sample) class TransformVal(object): def __init__(self, args, mean, std): self.composed_transforms = transforms.Compose([ FixedResize(resize=args.resize), FixScaleCrop(crop_size=args.crop_size), # TODO:CHECK THIS Normalize(mean, std), ToTensor()]) def __call__(self, sample): return self.composed_transforms(sample)
31.176707
109
0.556744
import math import torch import random import numpy as np import torch.nn as nn from numpy import int64 as int64 import torchvision.transforms as transforms from PIL import Image, ImageOps, ImageFilter class Normalize(object): def __init__(self, mean=(0., 0., 0.), std=(1., 1., 1.)): self.mean = mean self.std = std def __call__(self, sample): img = sample['image'] mask = sample['label'] img = np.array(img).astype(np.float32) mask = np.array(mask).astype(np.float32) img /= 255.0 img -= self.mean img /= self.std return {'image': img, 'label': mask} class ToTensor(object): def __call__(self, sample): img = sample['image'] mask = sample['label'] img = np.array(img).astype(np.float32).transpose((2, 0, 1)) mask = np.array(mask).astype(np.float32) img = torch.from_numpy(img).float() mask = torch.from_numpy(mask).float() return {'image': img, 'label': mask} class RandomHorizontalFlip(object): def __call__(self, sample): img = sample['image'] mask = sample['label'] if random.random() < 0.5: img = img.transpose(Image.FLIP_LEFT_RIGHT) mask = mask.transpose(Image.FLIP_LEFT_RIGHT) return {'image': img, 'label': mask} class RandomRotate(object): def __init__(self, degree): self.degree = degree def __call__(self, sample): img = sample['image'] mask = sample['label'] rotate_degree = random.uniform(-1 * self.degree, self.degree) img = img.rotate(rotate_degree, Image.BILINEAR) mask = mask.rotate(rotate_degree, Image.NEAREST) return {'image': img, 'label': mask} class RandomGaussianBlur(object): def __call__(self, sample): img = sample['image'] mask = sample['label'] if random.random() < 0.5: img = img.filter(ImageFilter.GaussianBlur( radius=random.random())) return {'image': img, 'label': mask} class RandomScaleCrop(object): def __init__(self, base_size, crop_size, fill=0): self.base_size = base_size self.crop_size = crop_size self.fill = fill def __call__(self, sample): img = sample['image'] mask = sample['label'] short_size = random.randint(int(self.base_size * 0.5), int(self.base_size * 2.0)) w, h = img.size if h > w: ow = short_size oh = int(1.0 * h * ow / w) else: oh = short_size ow = int(1.0 * w * oh / h) img = img.resize((ow, oh), Image.BILINEAR) mask = mask.resize((ow, oh), Image.NEAREST) if short_size < self.crop_size: padh = self.crop_size - oh if oh < self.crop_size else 0 padw = self.crop_size - ow if ow < self.crop_size else 0 img = ImageOps.expand(img, border=(0, 0, padw, padh), fill=0) mask = ImageOps.expand(mask, border=(0, 0, padw, padh), fill=self.fill) w, h = img.size x1 = random.randint(0, w - self.crop_size) y1 = random.randint(0, h - self.crop_size) img = img.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size)) mask = mask.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size)) return {'image': img, 'label': mask} class FixScaleCrop(object): def __init__(self, crop_size): self.crop_size = crop_size def __call__(self, sample): img = sample['image'] mask = sample['label'] w, h = img.size if w > h: oh = self.crop_size ow = int(1.0 * w * oh / h) else: ow = self.crop_size oh = int(1.0 * h * ow / w) img = img.resize((ow, oh), Image.BILINEAR) mask = mask.resize((ow, oh), Image.NEAREST) w, h = img.size x1 = int(round((w - self.crop_size) / 2.)) y1 = int(round((h - self.crop_size) / 2.)) img = img.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size)) mask = mask.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size)) return {'image': img, 'label': mask} class FixedResize(object): def __init__(self, resize=512): self.size1 = resize def __call__(self, sample): img = sample['image'] mask = sample['label'] assert img.size == mask.size w, h = img.size if w > h: oh = self.size1 ow = int(1.0 * w * oh / h) else: ow = self.size1 oh = int(1.0 * h * ow / w) img = img.resize((ow, oh), Image.BILINEAR) mask = mask.resize((ow, oh), Image.NEAREST) return {'image': img, 'label': mask} class RandomCrop(object): def __init__(self, crop_size=320): self.crop_size = crop_size def __call__(self, sample): img = sample['image'] mask = sample['label'] w, h = img.size x1 = random.randint(0, w - self.crop_size) y1 = random.randint(0, h - self.crop_size) img = img.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size)) mask = mask.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size)) return {'image': img, 'label': mask} class RandomScale(object): def __init__(self, scales=(1,)): self.scales = scales def __call__(self, sample): img = sample['image'] mask = sample['label'] w, h = img.size scale = random.choice(self.scales) w, h = int(w * scale), int(h * scale) return {'image': img, 'label': mask} class TransformTr(object): def __init__(self, resize, multi_scale=None): if multi_scale is None: self.composed_transforms = transforms.Compose([ FixedResize(resize=resize), ]) else: self.composed_transforms = transforms.Compose([ FixedResize(resize=args.resize), RandomScale(scales=args.multi_scale), RandomCrop(crop_size=args.crop_size), Normalize(mean, std), ToTensor()]) def __call__(self, sample): return self.composed_transforms(sample) class TransformVal(object): def __init__(self, args, mean, std): self.composed_transforms = transforms.Compose([ FixedResize(resize=args.resize), FixScaleCrop(crop_size=args.crop_size), Normalize(mean, std), ToTensor()]) def __call__(self, sample): return self.composed_transforms(sample)
true
true
f7240423341243823327a6a3db2d9cd1d055c4df
4,407
py
Python
models/resnet.py
DwaraknathT/sparsify
bbe3b6e492c2bc8fdd9dd37d87ffc5f51f520792
[ "MIT" ]
null
null
null
models/resnet.py
DwaraknathT/sparsify
bbe3b6e492c2bc8fdd9dd37d87ffc5f51f520792
[ "MIT" ]
null
null
null
models/resnet.py
DwaraknathT/sparsify
bbe3b6e492c2bc8fdd9dd37d87ffc5f51f520792
[ "MIT" ]
null
null
null
''' Properly implemented ResNet-s for CIFAR10 as described in paper [1]. The implementation and structure of this file is hugely influenced by [2] which is implemented for ImageNet and doesn't have option A for identity. Moreover, most of the implementations on the web is copy-paste from torchvision's resnet and has wrong number of params. Proper ResNet-s for CIFAR10 (for fair comparision and etc.) has following number of layers and parameters: name | layers | params ResNet20 | 20 | 0.27M ResNet32 | 32 | 0.46M ResNet44 | 44 | 0.66M ResNet56 | 56 | 0.85M ResNet110 | 110 | 1.7M ResNet1202| 1202 | 19.4m which this implementation indeed has. Reference: [1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun Deep Residual Learning for Image Recognition. arXiv:1512.03385 [2] https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py If you use this implementation in you work, please don't forget to mention the author, Yerlan Idelbayev. ''' import torch.nn as nn import torch.nn.functional as F import torch.nn.init as init from layers.layers import MaskedConv __all__ = ['ResNet', 'resnet20', 'resnet32', 'resnet44', 'resnet56', 'resnet110', 'resnet1202'] def _weights_init(m): classname = m.__class__.__name__ # print(classname) if isinstance(m, nn.Linear) or isinstance(m, MaskedConv): init.xavier_normal_(m.weight) _AFFINE = True class LambdaLayer(nn.Module): def __init__(self, lambd): super(LambdaLayer, self).__init__() self.lambd = lambd def forward(self, x): return self.lambd(x) class BasicBlock(nn.Module): expansion = 1 def __init__(self, in_planes, planes, stride=1): super(BasicBlock, self).__init__() self.conv1 = MaskedConv(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(planes, affine=_AFFINE) self.conv2 = MaskedConv(planes, planes, kernel_size=3, stride=1, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes, affine=_AFFINE) self.downsample = None self.bn3 = None if stride != 1 or in_planes != planes: self.downsample = nn.Sequential( MaskedConv(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False)) self.bn3 = nn.BatchNorm2d(self.expansion * planes, affine=_AFFINE) def forward(self, x): # x: batch_size * in_c * h * w residual = x out = F.relu(self.bn1(self.conv1(x))) out = self.bn2(self.conv2(out)) if self.downsample is not None: residual = self.bn3(self.downsample(x)) out += residual out = F.relu(out) return out class ResNet(nn.Module): def __init__(self, block, num_blocks, num_classes=10): super(ResNet, self).__init__() _outputs = [32, 64, 128] self.in_planes = _outputs[0] self.conv1 = MaskedConv(3, _outputs[0], kernel_size=3, stride=1, padding=1, bias=False) self.bn = nn.BatchNorm2d(_outputs[0], affine=_AFFINE) self.layer1 = self._make_layer(block, _outputs[0], num_blocks[0], stride=1) self.layer2 = self._make_layer(block, _outputs[1], num_blocks[1], stride=2) self.layer3 = self._make_layer(block, _outputs[2], num_blocks[2], stride=2) self.linear = nn.Linear(_outputs[2], num_classes) self.apply(_weights_init) def _make_layer(self, block, planes, num_blocks, stride): strides = [stride] + [1] * (num_blocks - 1) layers = [] for stride in strides: layers.append(block(self.in_planes, planes, stride)) self.in_planes = planes * block.expansion return nn.Sequential(*layers) def forward(self, x): out = F.relu(self.bn(self.conv1(x))) out = self.layer1(out) out = self.layer2(out) out = self.layer3(out) out = F.avg_pool2d(out, out.size()[3]) out = out.view(out.size(0), -1) out = self.linear(out) return out def resnet20(num_classes): return ResNet(BasicBlock, [3, 3, 3], num_classes=num_classes) def resnet32(num_classes): return ResNet(BasicBlock, [5, 5, 5], num_classes=num_classes) def resnet44(num_classes): return ResNet(BasicBlock, [7, 7, 7], num_classes=num_classes) def resnet56(num_classes): return ResNet(BasicBlock, [9, 9, 9], num_classes=num_classes) def resnet110(num_classes): return ResNet(BasicBlock, [18, 18, 18], num_classes=num_classes) def resnet1202(num_classes): return ResNet(BasicBlock, [200, 200, 200], num_clases=num_classes)
30.604167
99
0.701157
import torch.nn as nn import torch.nn.functional as F import torch.nn.init as init from layers.layers import MaskedConv __all__ = ['ResNet', 'resnet20', 'resnet32', 'resnet44', 'resnet56', 'resnet110', 'resnet1202'] def _weights_init(m): classname = m.__class__.__name__ if isinstance(m, nn.Linear) or isinstance(m, MaskedConv): init.xavier_normal_(m.weight) _AFFINE = True class LambdaLayer(nn.Module): def __init__(self, lambd): super(LambdaLayer, self).__init__() self.lambd = lambd def forward(self, x): return self.lambd(x) class BasicBlock(nn.Module): expansion = 1 def __init__(self, in_planes, planes, stride=1): super(BasicBlock, self).__init__() self.conv1 = MaskedConv(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(planes, affine=_AFFINE) self.conv2 = MaskedConv(planes, planes, kernel_size=3, stride=1, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes, affine=_AFFINE) self.downsample = None self.bn3 = None if stride != 1 or in_planes != planes: self.downsample = nn.Sequential( MaskedConv(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False)) self.bn3 = nn.BatchNorm2d(self.expansion * planes, affine=_AFFINE) def forward(self, x): residual = x out = F.relu(self.bn1(self.conv1(x))) out = self.bn2(self.conv2(out)) if self.downsample is not None: residual = self.bn3(self.downsample(x)) out += residual out = F.relu(out) return out class ResNet(nn.Module): def __init__(self, block, num_blocks, num_classes=10): super(ResNet, self).__init__() _outputs = [32, 64, 128] self.in_planes = _outputs[0] self.conv1 = MaskedConv(3, _outputs[0], kernel_size=3, stride=1, padding=1, bias=False) self.bn = nn.BatchNorm2d(_outputs[0], affine=_AFFINE) self.layer1 = self._make_layer(block, _outputs[0], num_blocks[0], stride=1) self.layer2 = self._make_layer(block, _outputs[1], num_blocks[1], stride=2) self.layer3 = self._make_layer(block, _outputs[2], num_blocks[2], stride=2) self.linear = nn.Linear(_outputs[2], num_classes) self.apply(_weights_init) def _make_layer(self, block, planes, num_blocks, stride): strides = [stride] + [1] * (num_blocks - 1) layers = [] for stride in strides: layers.append(block(self.in_planes, planes, stride)) self.in_planes = planes * block.expansion return nn.Sequential(*layers) def forward(self, x): out = F.relu(self.bn(self.conv1(x))) out = self.layer1(out) out = self.layer2(out) out = self.layer3(out) out = F.avg_pool2d(out, out.size()[3]) out = out.view(out.size(0), -1) out = self.linear(out) return out def resnet20(num_classes): return ResNet(BasicBlock, [3, 3, 3], num_classes=num_classes) def resnet32(num_classes): return ResNet(BasicBlock, [5, 5, 5], num_classes=num_classes) def resnet44(num_classes): return ResNet(BasicBlock, [7, 7, 7], num_classes=num_classes) def resnet56(num_classes): return ResNet(BasicBlock, [9, 9, 9], num_classes=num_classes) def resnet110(num_classes): return ResNet(BasicBlock, [18, 18, 18], num_classes=num_classes) def resnet1202(num_classes): return ResNet(BasicBlock, [200, 200, 200], num_clases=num_classes)
true
true
f72404dd1d78fb904526c04c15ecc01bbd1e1401
1,392
py
Python
api/kiveapi/endpoint_manager.py
dmacmillan/Kive
76bc8f289f66fb133f78cb6d5689568b7d015915
[ "BSD-3-Clause" ]
1
2021-12-22T06:10:01.000Z
2021-12-22T06:10:01.000Z
api/kiveapi/endpoint_manager.py
dmacmillan/Kive
76bc8f289f66fb133f78cb6d5689568b7d015915
[ "BSD-3-Clause" ]
null
null
null
api/kiveapi/endpoint_manager.py
dmacmillan/Kive
76bc8f289f66fb133f78cb6d5689568b7d015915
[ "BSD-3-Clause" ]
null
null
null
import re class EndpointManager(object): def __init__(self, session): self.session = session def __getattr__(self, name): return SessionContext(self.session, name) class SessionContext(object): def __init__(self, session, name): self.session = session self.prefix = '/api/{}/'.format(name) def adjust_args(self, args): new_args = list(args) if new_args: url = str(new_args.pop(0)) if re.match(r'^\d+$', url): # Make it easier to post to an id, if trailing slash required. url += '/' else: url = '' new_args.insert(0, self.prefix + url) return new_args def get(self, *args, **kwargs): return self.session.get(*(self.adjust_args(args)), **kwargs).json() def head(self, *args, **kwargs): return self.session.head(*(self.adjust_args(args)), **kwargs).headers def filter(self, *args, **kwargs): return self.session.filter(self.prefix, *args, **kwargs).json() def post(self, *args, **kwargs): return self.session.post(*(self.adjust_args(args)), **kwargs).json() def patch(self, *args, **kwargs): return self.session.patch(*(self.adjust_args(args)), **kwargs).json() def delete(self, *args, **kwargs): self.session.delete(*(self.adjust_args(args)), **kwargs)
30.26087
78
0.591954
import re class EndpointManager(object): def __init__(self, session): self.session = session def __getattr__(self, name): return SessionContext(self.session, name) class SessionContext(object): def __init__(self, session, name): self.session = session self.prefix = '/api/{}/'.format(name) def adjust_args(self, args): new_args = list(args) if new_args: url = str(new_args.pop(0)) if re.match(r'^\d+$', url): url += '/' else: url = '' new_args.insert(0, self.prefix + url) return new_args def get(self, *args, **kwargs): return self.session.get(*(self.adjust_args(args)), **kwargs).json() def head(self, *args, **kwargs): return self.session.head(*(self.adjust_args(args)), **kwargs).headers def filter(self, *args, **kwargs): return self.session.filter(self.prefix, *args, **kwargs).json() def post(self, *args, **kwargs): return self.session.post(*(self.adjust_args(args)), **kwargs).json() def patch(self, *args, **kwargs): return self.session.patch(*(self.adjust_args(args)), **kwargs).json() def delete(self, *args, **kwargs): self.session.delete(*(self.adjust_args(args)), **kwargs)
true
true