hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
e51dd8ba98ca66dd0a99c7beb9b1e193b3de372f
132
py
Python
moai/supervision/homoscedastic.py
tzole1155/moai
d1afb3aaf8ddcd7a1c98b84d6365afb846ae3180
[ "Apache-2.0" ]
10
2021-04-02T11:21:33.000Z
2022-01-18T18:32:32.000Z
moai/supervision/homoscedastic.py
tzole1155/moai
d1afb3aaf8ddcd7a1c98b84d6365afb846ae3180
[ "Apache-2.0" ]
1
2022-03-22T20:10:55.000Z
2022-03-24T13:11:02.000Z
moai/supervision/homoscedastic.py
tzole1155/moai
d1afb3aaf8ddcd7a1c98b84d6365afb846ae3180
[ "Apache-2.0" ]
3
2021-05-16T20:47:40.000Z
2021-12-01T21:15:36.000Z
#TODO #NOTE: https://arxiv.org/pdf/1705.07115.pdf #NOTE: https://paperswithcode.com/paper/multi-task-learning-using-uncertainty-to
44
81
0.772727
20
132
5.1
0.85
0.176471
0
0
0
0
0
0
0
0
0
0.071429
0.045455
132
3
81
44
0.738095
0.954545
0
null
0
null
0
0
null
0
0
0.333333
null
1
null
true
0
0
null
null
null
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
1
0
0
0
1
0
0
0
0
0
0
5
e543816ce06982d262d10b48ac723322feb99ca8
115
py
Python
socialcasting/__init__.py
ihuston/socialcasting
9ce16722f2ac87356ab63422f708ea3b5be8483e
[ "MIT" ]
3
2015-12-11T14:31:17.000Z
2016-06-27T08:35:48.000Z
socialcasting/__init__.py
ihuston/socialcasting
9ce16722f2ac87356ab63422f708ea3b5be8483e
[ "MIT" ]
null
null
null
socialcasting/__init__.py
ihuston/socialcasting
9ce16722f2ac87356ab63422f708ea3b5be8483e
[ "MIT" ]
null
null
null
""" Socialcasting - a simple Python wrapper for the Socialcast API """ from .api import * from .analysis import *
19.166667
66
0.721739
15
115
5.533333
0.8
0
0
0
0
0
0
0
0
0
0
0
0.182609
115
5
67
23
0.882979
0.53913
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
e54e91add9e8651af826a73861d389b649a2febc
25
py
Python
python/submodules/foo/__init__.py
robotlightsyou/test
015f13943fc402d8ce86c5f6d2f5a7d032b3340a
[ "MIT" ]
2
2019-05-26T15:09:34.000Z
2021-09-12T08:01:23.000Z
python/submodules/foo/__init__.py
robotlightsyou/test
015f13943fc402d8ce86c5f6d2f5a7d032b3340a
[ "MIT" ]
null
null
null
python/submodules/foo/__init__.py
robotlightsyou/test
015f13943fc402d8ce86c5f6d2f5a7d032b3340a
[ "MIT" ]
1
2021-04-11T20:28:21.000Z
2021-04-11T20:28:21.000Z
print('foo/__init__.py')
12.5
24
0.72
4
25
3.5
1
0
0
0
0
0
0
0
0
0
0
0
0.04
25
1
25
25
0.583333
0
0
0
0
0
0.6
0
0
0
0
0
0
1
0
true
0
0
0
0
1
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
5
e5ac46f5d0cad826eace41c0b40d18007550805b
20,281
py
Python
src/anchorpy/clientgen/common.py
kevinheavey/anchorpy
d4cc28365c6adaeaec7f5001fa6b8a3e719b41ad
[ "MIT" ]
87
2021-09-26T18:14:07.000Z
2022-03-28T08:22:24.000Z
src/anchorpy/clientgen/common.py
kevinheavey/anchorpy
d4cc28365c6adaeaec7f5001fa6b8a3e719b41ad
[ "MIT" ]
15
2021-10-07T16:12:23.000Z
2022-03-20T21:04:40.000Z
src/anchorpy/clientgen/common.py
kevinheavey/anchorpy
d4cc28365c6adaeaec7f5001fa6b8a3e719b41ad
[ "MIT" ]
16
2021-10-16T04:40:28.000Z
2022-03-18T16:49:40.000Z
"""Code generation utilities.""" from typing import Optional from pyheck import snake from anchorpy.idl import ( Idl, _IdlType, _IdlTypeVec, _IdlTypeOption, _IdlTypeCOption, _IdlTypeDefined, _IdlTypeDefTyStruct, _IdlTypeArray, _IdlField, ) _DEFAULT_DEFINED_TYPES_PREFIX = "types." INT_TYPES = {"u8", "i8", "u16", "i16", "u32", "i32", "u64", "i64", "u128", "i128"} FLOAT_TYPES = {"f32", "f64"} NUMBER_TYPES = INT_TYPES | FLOAT_TYPES def _fields_interface_name(type_name: str) -> str: return f"{type_name}Fields" def _value_interface_name(type_name: str) -> str: return f"{type_name}Value" def _kind_interface_name(type_name: str) -> str: return f"{type_name}Kind" def _json_interface_name(type_name: str) -> str: return f"{type_name}JSON" def _py_type_from_idl( idl: Idl, ty: _IdlType, types_relative_imports: bool, use_fields_interface_for_struct: bool, ) -> str: if isinstance(ty, _IdlTypeVec): inner_type = _py_type_from_idl( idl=idl, ty=ty.vec, types_relative_imports=types_relative_imports, use_fields_interface_for_struct=use_fields_interface_for_struct, ) return f"list[{inner_type}]" if isinstance(ty, _IdlTypeOption): inner_type = _py_type_from_idl( idl=idl, ty=ty.option, types_relative_imports=types_relative_imports, use_fields_interface_for_struct=use_fields_interface_for_struct, ) return f"typing.Optional[{inner_type}]" if isinstance(ty, _IdlTypeCOption): inner_type = _py_type_from_idl( idl=idl, ty=ty.coption, types_relative_imports=types_relative_imports, use_fields_interface_for_struct=use_fields_interface_for_struct, ) return f"typing.Optional[{inner_type}]" if isinstance(ty, _IdlTypeDefined): defined = ty.defined filtered = [t for t in idl.types if t.name == defined] defined_types_prefix = ( "" if types_relative_imports else _DEFAULT_DEFINED_TYPES_PREFIX ) if len(filtered) != 1: raise ValueError(f"Type not found {defined}") typedef_type = filtered[0].type module = snake(ty.defined) if isinstance(typedef_type, _IdlTypeDefTyStruct): name = ( _fields_interface_name(ty.defined) if use_fields_interface_for_struct else ty.defined ) else: # enum name = _kind_interface_name(ty.defined) return f"{defined_types_prefix}{module}.{name}" if isinstance(ty, _IdlTypeArray): inner_type = _py_type_from_idl( idl=idl, ty=ty.array[0], types_relative_imports=types_relative_imports, use_fields_interface_for_struct=use_fields_interface_for_struct, ) return f"list[{inner_type}]" if ty in {"bool", "bytes"}: return ty if ty in INT_TYPES: return "int" if ty in FLOAT_TYPES: return "float" if ty == "string": return "str" if ty == "publicKey": return "PublicKey" raise ValueError(f"Unrecognized type: {ty}") def _layout_for_type( idl: Idl, ty: _IdlType, types_relative_imports: bool, name: Optional[str] = None, ) -> str: if ty == "bool": inner = "borsh.Bool" elif ty == "u8": inner = "borsh.U8" elif ty == "i8": inner = "borsh.I8" elif ty == "u16": inner = "borsh.U16" elif ty == "i16": inner = "borsh.I16" elif ty == "u32": inner = "borsh.U32" elif ty == "f32": inner = "borsh.F32" elif ty == "i32": inner = "borsh.I32" elif ty == "u64": inner = "borsh.U64" elif ty == "i64": inner = "borsh.I64" elif ty == "f64": inner = "borsh.F64" elif ty == "u128": inner = "borsh.U128" elif ty == "i128": inner = "borsh.I128" elif ty == "bytes": inner = "borsh.Bytes" elif ty == "string": inner = "borsh.String" elif ty == "publicKey": inner = "BorshPubkey" elif isinstance(ty, _IdlTypeVec): layout = _layout_for_type( idl=idl, ty=ty.vec, types_relative_imports=types_relative_imports ) cast_layout = f"typing.cast(Construct, {layout})" inner = f"borsh.Vec({cast_layout})" elif isinstance(ty, _IdlTypeOption): layout = _layout_for_type( idl=idl, ty=ty.option, types_relative_imports=types_relative_imports ) inner = f"borsh.Option({layout})" elif isinstance(ty, _IdlTypeCOption): layout = _layout_for_type( idl=idl, ty=ty.coption, types_relative_imports=types_relative_imports ) inner = f"COption({layout})" elif isinstance(ty, _IdlTypeDefined): defined = ty.defined filtered = [t for t in idl.types if t.name == defined] typedef_type = filtered[0].type defined_types_prefix = ( "" if types_relative_imports else _DEFAULT_DEFINED_TYPES_PREFIX ) module = snake(defined) inner = ( f"{defined_types_prefix}{module}.{defined}.layout" if isinstance(typedef_type, _IdlTypeDefTyStruct) else f"{defined_types_prefix}{module}.layout" ) elif isinstance(ty, _IdlTypeArray): layout = _layout_for_type( idl=idl, ty=ty.array[0], types_relative_imports=types_relative_imports ) inner = f"{layout}[{ty.array[1]}]" else: raise ValueError(f"Unrecognized type: {ty}") if name is None: return inner return f'"{name}" / {inner}' def _maybe_none(to_check: str, if_not_none: str) -> str: return f"(None if {to_check} is None else {if_not_none})" def _field_to_encodable( idl: Idl, ty: _IdlField, types_relative_imports: bool, val_prefix: str = "", val_suffix: str = "", ) -> str: ty_type = ty.type if isinstance(ty_type, _IdlTypeVec): map_body = _field_to_encodable( idl=idl, ty=_IdlField("item", ty_type.vec), val_prefix="", types_relative_imports=types_relative_imports, val_suffix="", ) # skip mapping when not needed if map_body == "item": return f"{val_prefix}{ty.name}{val_suffix}" return f"list(map(lambda item: {map_body}, {val_prefix}{ty.name}{val_suffix}))" if isinstance(ty_type, _IdlTypeOption): encodable = _field_to_encodable( idl=idl, ty=_IdlField(ty.name, ty_type.option), val_prefix=val_prefix, types_relative_imports=types_relative_imports, val_suffix=val_suffix, ) if encodable == f"{val_prefix}{ty.name}{val_suffix}": return encodable return _maybe_none(f"{val_prefix}{ty.name}{val_suffix}", encodable) if isinstance(ty_type, _IdlTypeCOption): raise NotImplementedError("COption not implemented.") if isinstance(ty_type, _IdlTypeDefined): defined = ty_type.defined filtered = [t for t in idl.types if t.name == defined] if len(filtered) != 1: raise ValueError(f"Type not found {defined}") typedef_type = filtered[0].type if isinstance(typedef_type, _IdlTypeDefTyStruct): val_full_name = f"{val_prefix}{ty.name}{val_suffix}" return f"{val_full_name}.to_encodable()" return f"{val_prefix}{ty.name}{val_suffix}.to_encodable()" if isinstance(ty_type, _IdlTypeArray): map_body = _field_to_encodable( idl=idl, ty=_IdlField("item", ty_type.array[0]), val_prefix="", types_relative_imports=types_relative_imports, val_suffix="", ) # skip mapping when not needed if map_body == "item": return f"{val_prefix}{ty.name}{val_suffix}" return f"list(map(lambda item: {map_body}, {val_prefix}{ty.name}{val_suffix}))" if ty_type in { "bool", *NUMBER_TYPES, "string", "publicKey", "bytes", }: return f"{val_prefix}{ty.name}{val_suffix}" raise ValueError(f"Unrecognized type: {ty_type}") def _field_from_decoded( idl: Idl, ty: _IdlField, types_relative_imports: bool, val_prefix: str = "" ) -> str: ty_type = ty.type if isinstance(ty_type, _IdlTypeVec): map_body = _field_from_decoded( idl=idl, ty=_IdlField("item", ty_type.vec), val_prefix="", types_relative_imports=types_relative_imports, ) # skip mapping when not needed if map_body == "item": return f"{val_prefix}{ty.name}" return f"list(map(lambda item: {map_body}, {val_prefix}{ty.name}))" if isinstance(ty_type, _IdlTypeOption): decoded = _field_from_decoded( idl=idl, ty=_IdlField(ty.name, ty_type.option), types_relative_imports=types_relative_imports, val_prefix=val_prefix, ) # skip coercion when not needed if decoded == f"{val_prefix}{ty.name}": return decoded return _maybe_none(f"{val_prefix}{ty.name}", decoded) if isinstance(ty_type, _IdlTypeCOption): raise NotImplementedError("COption not implemented.") if isinstance(ty_type, _IdlTypeDefined): defined = ty_type.defined filtered = [t for t in idl.types if t.name == defined] if len(filtered) != 1: raise ValueError(f"Type not found {defined}") typedef_type = filtered[0].type from_decoded_func_path = ( f"{snake(defined)}.{defined}" if isinstance(typedef_type, _IdlTypeDefTyStruct) else f"{snake(defined)}" ) defined_types_prefix = ( "" if types_relative_imports else _DEFAULT_DEFINED_TYPES_PREFIX ) full_func_path = f"{defined_types_prefix}{from_decoded_func_path}" from_decoded_arg = f"{val_prefix}{ty.name}" return f"{full_func_path}.from_decoded({from_decoded_arg})" if isinstance(ty_type, _IdlTypeArray): map_body = _field_from_decoded( idl=idl, ty=_IdlField("item", ty_type.array[0]), val_prefix="", types_relative_imports=types_relative_imports, ) # skip mapping when not needed if map_body == "item": return f"{val_prefix}{ty.name}" return f"list(map(lambda item: {map_body}, {val_prefix}{ty.name}))" if ty_type in { "bool", *NUMBER_TYPES, "string", "publicKey", "bytes", }: return f"{val_prefix}{ty.name}" raise ValueError(f"Unrecognized type: {ty_type}") def _struct_field_initializer( idl: Idl, field: _IdlField, types_relative_imports: bool, prefix: str = 'fields["', suffix: str = '"]', ) -> str: field_type = field.type if isinstance(field_type, _IdlTypeDefined): defined = field_type.defined filtered = [t for t in idl.types if t.name == defined] if len(filtered) != 1: raise ValueError(f"Type not found {defined}") typedef_type = filtered[0].type if isinstance(typedef_type, _IdlTypeDefTyStruct): module = snake(defined) defined_types_prefix = ( "" if types_relative_imports else _DEFAULT_DEFINED_TYPES_PREFIX ) obj_name = f"{defined_types_prefix}{module}.{defined}" return f"{obj_name}(**{prefix}{field.name}{suffix})" return f"{prefix}{field.name}{suffix}" if isinstance(field_type, _IdlTypeOption): initializer = _struct_field_initializer( idl=idl, field=_IdlField(field.name, field_type.option), prefix=prefix, suffix=suffix, types_relative_imports=types_relative_imports, ) # skip coercion when not needed if initializer == f"{prefix}{field.name}{suffix}": return initializer return _maybe_none(f"{prefix}{field.name}{suffix}", initializer) if isinstance(field_type, _IdlTypeCOption): initializer = _struct_field_initializer( idl=idl, field=_IdlField(field.name, field_type.coption), prefix=prefix, suffix=suffix, types_relative_imports=types_relative_imports, ) # skip coercion when not needed if initializer == f"{prefix}{field.name}{suffix}": return initializer return _maybe_none(f"{prefix}{field.name}", initializer) if isinstance(field_type, _IdlTypeArray): map_body = _struct_field_initializer( idl=idl, field=_IdlField("item", field_type.array[0]), prefix="", suffix="", types_relative_imports=types_relative_imports, ) # skip mapping when not needed if map_body == "item": return f"{prefix}{field.name}{suffix}" return f"list(map(lambda item: {map_body}, {prefix}{field.name}{suffix}))" if isinstance(field_type, _IdlTypeVec): map_body = _struct_field_initializer( idl=idl, field=_IdlField("item", field_type.vec), prefix="", suffix="", types_relative_imports=types_relative_imports, ) # skip mapping when not needed if map_body == "item": return f"{prefix}{field.name}{suffix}" return f"list(map(lambda item: {map_body}, {prefix}{field.name}{suffix}))" if field_type in { "bool", *NUMBER_TYPES, "string", "publicKey", "bytes", }: return f"{prefix}{field.name}{suffix}" raise ValueError(f"Unrecognized type: {field_type}") def _field_to_json( idl: Idl, ty: _IdlField, val_prefix: str = "", val_suffix: str = "" ) -> str: ty_type = ty.type var_name = f"{val_prefix}{ty.name}{val_suffix}" if ty_type == "publicKey": return f"str({var_name})" if isinstance(ty_type, _IdlTypeVec): map_body = _field_to_json(idl, _IdlField("item", ty_type.vec)) # skip mapping when not needed if map_body == "item": return var_name return f"list(map(lambda item: {map_body}, {var_name}))" if isinstance(ty_type, _IdlTypeArray): map_body = _field_to_json(idl, _IdlField("item", ty_type.array[0])) # skip mapping when not needed if map_body == "item": return var_name return f"list(map(lambda item: {map_body}, {var_name}))" if isinstance(ty_type, _IdlTypeOption): value = _field_to_json( idl, _IdlField(ty.name, ty_type.option), val_prefix, val_suffix ) # skip coercion when not needed if value == var_name: return value return _maybe_none(var_name, value) if isinstance(ty_type, _IdlTypeCOption): value = _field_to_json( idl, _IdlField(ty.name, ty_type.coption), val_prefix, val_suffix ) # skip coercion when not needed if value == var_name: return value return _maybe_none(var_name, value) if isinstance(ty_type, _IdlTypeDefined): defined = ty_type.defined filtered = [t for t in idl.types if t.name == defined] if len(filtered) != 1: raise ValueError(f"Type not found {defined}") return f"{var_name}.to_json()" if ty_type == "bytes": return f"list({var_name})" if ty_type in { "bool", *NUMBER_TYPES, "string", }: return var_name raise ValueError(f"Unrecognized type: {ty_type}") def _idl_type_to_json_type(ty: _IdlType, types_relative_imports: bool) -> str: if isinstance(ty, _IdlTypeVec): inner = _idl_type_to_json_type( ty=ty.vec, types_relative_imports=types_relative_imports ) return f"list[{inner}]" if isinstance(ty, _IdlTypeArray): inner = _idl_type_to_json_type( ty=ty.array[0], types_relative_imports=types_relative_imports ) return f"list[{inner}]" if isinstance(ty, _IdlTypeOption): inner = _idl_type_to_json_type( ty=ty.option, types_relative_imports=types_relative_imports ) return f"typing.Optional[{inner}]" if isinstance(ty, _IdlTypeCOption): inner = _idl_type_to_json_type( ty=ty.coption, types_relative_imports=types_relative_imports ) return f"typing.Optional[{inner}]" if isinstance(ty, _IdlTypeDefined): defined_types_prefix = ( "" if types_relative_imports else _DEFAULT_DEFINED_TYPES_PREFIX ) module = snake(ty.defined) return f"{defined_types_prefix}{module}.{_json_interface_name(ty.defined)}" if ty == "bool": return "bool" if ty in INT_TYPES: return "int" if ty in FLOAT_TYPES: return "float" if ty == "bytes": return "list[int]" if ty in {"string", "publicKey"}: return "str" raise ValueError(f"Unrecognized type: {ty}") def _field_from_json( idl: Idl, ty: _IdlField, types_relative_imports: bool, param_prefix: str = 'obj["', param_suffix: str = '"]', ) -> str: ty_type = ty.type var_name = f"{param_prefix}{ty.name}{param_suffix}" if ty_type == "publicKey": return f"PublicKey({var_name})" if isinstance(ty_type, _IdlTypeVec): map_body = _field_from_json( idl=idl, ty=_IdlField("item", ty_type.vec), param_prefix="", param_suffix="", types_relative_imports=types_relative_imports, ) # skip mapping when not needed if map_body == "item": return var_name return f"list(map(lambda item: {map_body}, {var_name}))" if isinstance(ty_type, _IdlTypeArray): map_body = _field_from_json( idl=idl, ty=_IdlField("item", ty_type.array[0]), param_prefix="", param_suffix="", types_relative_imports=types_relative_imports, ) # skip mapping when not needed if map_body == "item": return var_name return f"list(map(lambda item: {map_body}, {var_name}))" if isinstance(ty_type, _IdlTypeOption): inner = _field_from_json( idl=idl, ty=_IdlField(ty.name, ty_type.option), param_prefix=param_prefix, param_suffix=param_suffix, types_relative_imports=types_relative_imports, ) # skip coercion when not needed if inner == var_name: return inner return _maybe_none(var_name, inner) if isinstance(ty_type, _IdlTypeCOption): inner = _field_from_json( idl=idl, ty=_IdlField(ty.name, ty_type.coption), param_prefix=param_prefix, param_suffix=param_suffix, types_relative_imports=types_relative_imports, ) # skip coercion when not needed if inner == var_name: return inner return _maybe_none(var_name, inner) if isinstance(ty_type, _IdlTypeDefined): from_json_arg = var_name defined = ty_type.defined filtered = [t for t in idl.types if t.name == defined] typedef_type = filtered[0].type from_json_func_path = ( f"{snake(defined)}.{defined}" if isinstance(typedef_type, _IdlTypeDefTyStruct) else f"{snake(defined)}" ) defined_types_prefix = ( "" if types_relative_imports else _DEFAULT_DEFINED_TYPES_PREFIX ) full_func_path = f"{defined_types_prefix}{from_json_func_path}" return f"{full_func_path}.from_json({from_json_arg})" if ty_type == "bytes": return f"bytes({var_name})" if ty_type in { "bool", *NUMBER_TYPES, "string", }: return var_name raise ValueError(f"Unrecognized type: {ty_type}")
34.787307
87
0.604359
2,419
20,281
4.757338
0.04878
0.073427
0.112965
0.056482
0.840806
0.801703
0.76147
0.723062
0.669013
0.643726
0
0.006361
0.28682
20,281
582
88
34.847079
0.78927
0.026231
0
0.61236
0
0
0.163794
0.079236
0
0
0
0
0
1
0.024345
false
0
0.078652
0.009363
0.250936
0
0
0
0
null
0
0
0
1
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
e5cbcc43308fe794d5969cf86dafca193ea595a7
187
py
Python
camelcase_method.py
Kunalpod/codewars
8dc1af2f3c70e209471045118fd88b3ea1e627e5
[ "MIT" ]
null
null
null
camelcase_method.py
Kunalpod/codewars
8dc1af2f3c70e209471045118fd88b3ea1e627e5
[ "MIT" ]
null
null
null
camelcase_method.py
Kunalpod/codewars
8dc1af2f3c70e209471045118fd88b3ea1e627e5
[ "MIT" ]
null
null
null
#Kunal Gautam #Codewars : @Kunalpod #Problem name: CamelCase Method #Problem level: 6 kyu def camel_case(string): return ''.join([x[0].upper()+x[1:] for x in string.lower().split()])
26.714286
95
0.68984
29
187
4.413793
0.862069
0
0
0
0
0
0
0
0
0
0
0.018519
0.13369
187
6
96
31.166667
0.771605
0.438503
0
0
0
0
0
0
0
0
0
0
0
1
1
false
0
0
1
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
f905842a9e82e2c365922b771c02b9a07430838d
13,500
py
Python
tests/datatypes/test_bytearray.py
amaarahj/voc
c1e690d8160d8982163f49e538b1c3e7c73841db
[ "BSD-3-Clause" ]
null
null
null
tests/datatypes/test_bytearray.py
amaarahj/voc
c1e690d8160d8982163f49e538b1c3e7c73841db
[ "BSD-3-Clause" ]
null
null
null
tests/datatypes/test_bytearray.py
amaarahj/voc
c1e690d8160d8982163f49e538b1c3e7c73841db
[ "BSD-3-Clause" ]
null
null
null
from .. utils import TranspileTestCase, UnaryOperationTestCase, BinaryOperationTestCase, InplaceOperationTestCase class BytearrayTests(TranspileTestCase): def test_setattr(self): self.assertCodeExecution(""" x = bytearray([1,2,3]) try: x.attr = 42 except AttributeError as err: print(err) """) def test_getattr(self): self.assertCodeExecution(""" x = bytearray([1,2,3]) try: print(x.attr) except AttributeError as err: print(err) """) def test_contains(self): self.assertCodeExecution(""" print(bytearray([1,2,3]) in bytearray([1,2])) print(bytearray([1,2]) in bytearray([1,2,3])) print(bytearray([1,2,4]) in bytearray([1,2,3])) print(bytearray([8,9,0,1]) in bytearray([1,2,3])) print(101 in bytearray([1,2,3])) print(101 in bytearray([1,2,3,101])) print(b'pybee' in bytearray([1,2])) print(bytearray([1,2]) in b'pybee') """) self.assertCodeExecution(""" try: print(300 in bytearray([1,2,3])) print("No error raised") except ValueError: print("Raised a ValueError") """) self.assertCodeExecution(""" try: print(['b', 'e'] in bytearray([1,2,3])) print("No error raised") except TypeError: print("Raised a TypeError") """) def test_capitalize(self): self.assertCodeExecution(""" print(bytearray(b'abc').capitalize()) print(bytearray().capitalize()) """) def test_islower(self): # TODO: add this test when adding support for literal hex bytes # print(b'\xf0'.islower()) self.assertCodeExecution(""" print(bytearray(b'abc').islower()) print(bytearray(b'').islower()) print(bytearray(b'Abccc').islower()) print(bytearray(b'HELLO WORD').islower()) print(bytearray(b'@#$%!').islower()) print(bytearray(b'hello world').islower()) print(bytearray(b'hello world ').islower()) """) def test_isspace(self): self.assertCodeExecution(""" print(bytearray(b'testupper').isspace()) print(bytearray(b'test isspace').isspace()) print(bytearray(b' ').isspace()) print(bytearray(b'').isspace()) print(bytearray(b'\x46').isspace()) print(bytearray(b' \t\t').isspace()) print(bytearray(b' \x0b').isspace()) print(bytearray(b' \f').isspace()) print(bytearray(b' \\n').isspace()) print(bytearray(b' \\r').isspace()) """) def test_upper(self): # TODO: add this test when adding support for literal hex bytes # print(bytearray(b'\xf0').upper()) self.assertCodeExecution(""" print(bytearray(b'abc').upper()) print(bytearray(b'').upper()) print(bytearray(b'Abccc').upper()) print(bytearray(b'HELLO WORD').upper()) print(bytearray(b'@#$%!').upper()) print(bytearray(b'hello world').upper()) print(bytearray(b'hello world ').upper()) """) def test_ljust(self): self.assertCodeExecution(""" print(bytearray(b'testMoreThanWidth').ljust(5)) print(bytearray(b'testEqualWidth').ljust(14)) print(bytearray(b'testLessThanWidth').ljust(20)) print(bytearray(b'testMoreWithFill').ljust(2, b'x')) print(bytearray(b'testEqualWithFill').ljust(17, b'x')) print(bytearray(b'testLessWithFill').ljust(25, b'x')) print(bytearray(b'testNegative').ljust(-20)) print(bytearray(b'').ljust(5)) print(bytearray(b'testNoChangeWidthOne').ljust(True, b'x')) print(bytearray(b'testBArraySecondArg').ljust(True, bytearray(b'x'))) try: print(bytearray(b'testStrArgError').ljust('5')) except Exception as e: print(str(e)) try: print(bytearray(b'testMoreLengthError').ljust(12, b'as')) except Exception as e: print(str(e)) try: print(bytearray(b'testStrFillingChar').ljust(12, 'a')) except Exception as e: print(str(e)) """) def test_rjust(self): self.assertCodeExecution(""" print(bytearray(b'testMoreThanWidth').rjust(5)) print(bytearray(b'testEqualWidth').rjust(14)) print(bytearray(b'testLessThanWidth').rjust(20)) print(bytearray(b'testMoreWithFill').rjust(2, b'x')) print(bytearray(b'testEqualWithFill').rjust(17, b'x')) print(bytearray(b'testLessWithFill').rjust(25, b'x')) print(bytearray(b'testNegative').rjust(-20)) print(bytearray(b'').rjust(5)) print(bytearray(b'testNoChangeWidthOne').rjust(True, b'x')) print(bytearray(b'testBArraySecondArg').rjust(True, bytearray(b'x'))) try: print(bytearray(b'testStrArgError').rjust('5')) except Exception as e: print(str(e)) try: print(bytearray(b'testMoreLengthError').rjust(12, b'as')) except Exception as e: print(str(e)) try: print(bytearray(b'testStrFillingChar').rjust(12, 'a')) except Exception as e: print(str(e)) """) def test_isalpha(self): # TODO: add this test when adding support for literal hex bytes # print(bytearray(b'\xf0').isalpha()) self.assertCodeExecution(""" print(bytearray(b'abc').isalpha()) print(bytearray(b'').isalpha()) print(bytearray(b'Abccc').isalpha()) print(bytearray(b'HELLO WORD').isalpha()) print(bytearray(b'@#$%!').isalpha()) print(bytearray(b'hello world').isalpha()) print(bytearray(b'hello world ').isalpha()) """) def test_isupper(self): self.assertCodeExecution(""" print(bytearray(b'abc').isupper()) print(bytearray(b'ABC').isupper()) print(bytearray(b'').isupper()) print(bytearray(b'Abccc').isupper()) print(bytearray(b'HELLO WORD').isupper()) print(bytearray(b'@#$%!').isupper()) print(bytearray(b'hello world').isupper()) print(bytearray(b'hello world ').isupper()) """) def test_lower(self): self.assertCodeExecution(""" print(bytearray(b"abc").lower()) print(bytearray(b"HELLO WORLD!").lower()) print(bytearray(b"hElLO wOrLd").lower()) print(bytearray(b"[Hello] World").lower()) """) def test_count(self): self.assertCodeExecution(""" print(bytearray(b'abcabca').count(97)) print(bytearray(b'abcabca').count(b'abc')) print(bytearray(b'qqq').count(b'q')) print(bytearray(b'qqq').count(b'qq')) print(bytearray(b'qqq').count(b'qqq')) print(bytearray(b'qqq').count(b'qqqq')) print(bytearray(b'abcdefgh').count(b'bc',-7, -5)) print(bytearray(b'abcdefgh').count(b'bc',1, -5)) print(bytearray(b'abcdefgh').count(b'bc',0, 3)) print(bytearray(b'abcdefgh').count(b'bc',-7, 500)) print(bytearray(b'qqaqqbqqqcqqqdqqqqeqqqqf').count(b'qq'),1) print(bytearray(b'').count(b'q'),0) """) def test_find(self): self.assertCodeExecution(""" print(bytearray(b'').find(b'a')) print(bytearray(b'abcd').find(b'')) print(bytearray(b'abcd').find(b'...')) print(bytearray(b'abcd').find(b'a')) print(bytearray(b'abcd').find(b'b')) print(bytearray(b'abcd').find(b'c')) print(bytearray(b'abcd').find(b'd')) print(bytearray(b'abcd').find(bytearray(b'ab'))) print(bytearray(b'abcd').find(b'bc')) print(bytearray(b'abcd').find(b'cd')) print(bytearray(b'abcd').find(b'cd', 2)) print(bytearray(b'abcd').find(bytearray(b'ab'), 3)) print(bytearray(b'abcd').find(b'cd', 2, 3)) print(bytearray(b'abcd').find(bytearray(b'ab'), 3, 4)) """) def test_center(self): self.assertCodeExecution(""" print(bytearray(b'pybee').center(12)) print(bytearray(b'pybee').center(13)) print(bytearray(b'pybee').center(2)) print(bytearray(b'pybee').center(2, b'a')) print(bytearray(b'pybee').center(12, b'a')) print(bytearray(b'pybee').center(13, b'a')) print(bytearray(b'pybee').center(-5)) print(bytearray(b'').center(5)) print(bytearray(b'pybee').center(True, b'a')) print(bytearray(b'pybee').center(True, bytearray(b'a'))) """) def test_title(self): self.assertCodeExecution(r""" print(bytearray(b"").title()) print(bytearray(b"abcd").title()) print(bytearray(b"NOT").title()) print(bytearray(b"coca cola").title()) print(bytearray(b"they are from UK, are they not?").title()) print(bytearray(b'/@.').title()) print(bytearray(b'\x46\x55\x43\x4B').title()) print(bytearray(b"py.bee").title()) """) def test_istitle(self): self.assertCodeExecution(r""" print(bytearray(b"").istitle()) print(bytearray(b"abcd").istitle()) print(bytearray(b"NOT").istitle()) print(bytearray(b"coca cola").istitle()) print(bytearray(b"they are from UK, are they not?").istitle()) print(bytearray(b'/@.').istitle()) print(bytearray(b'\x46\x55\x43\x4B').istitle()) print(bytearray(b"py.bee").title()) """) def test_repr(self): self.assertCodeExecution(r""" print(repr(bytearray(b'\xc8'))) print(repr(bytearray(b'abcdef \xc8 abcdef'))) print(repr(bytearray(b'abcdef \xc8 abcdef\n\r\t'))) print(bytearray(b'abcdef \xc8 abcdef\n\r\t')) for b in range(0, 256, 16): print(repr(bytearray(range(b, b+16)))) for b in range(0, 256, 16): print(bytearray(range(b, b+16))) """) def test_endswith(self): self.assertCodeExecution(r""" print(bytearray(b'banana').endswith(b'ana')) print(bytearray(b'banana').endswith(b'')) print(bytearray(b'').endswith(b'ana')) print(bytearray(b'').endswith(b'')) """) def test_startswith(self): self.assertCodeExecution(r""" print(bytearray(b'banana').startswith(b'ana')) print(bytearray(b'banana').startswith(b'')) print(bytearray(b'').startswith(b'ana')) print(bytearray(b'').startswith(b'')) """) def test_isalnum(self): self.assertCodeExecution(""" print(bytearray(b'0').isalnum()) print(bytearray(b'9').isalnum()) print(bytearray(b'1234567890').isalnum()) print(bytearray(b'89A23gM23z').isalnum()) print(bytearray(b':923').isalnum()) print(bytearray(b'\\923').isalnum()) print(bytearray(b' jdf fhd 33').isalnum()) print(bytearray(b'@#$%^&*').isalnum()) print(bytearray(b'"478\t47ads:').isalnum()) print(bytearray(b'AbZ').isalnum()) """) def test_isdigit(self): self.assertCodeExecution(""" print(bytearray(b'0').isdigit()) print(bytearray(b'9').isdigit()) print(bytearray(b'1234567890').isdigit()) print(bytearray(b'8923g23823').isdigit()) print(bytearray(b'923').isdigit()) print(bytearray(b'\\923').isdigit()) print(bytearray(b'000').isdigit()) print(bytearray(b'@#$%^&*').isdigit()) print(bytearray(b'"478\t47ads:').isdigit()) print(bytearray(b'AbZ').isdigit()) """) def test_join(self): self.assertCodeExecution(""" b = bytearray(b'.') print(b.join([b'12', b'dh'])) print(b.join([bytearray(b'12'), bytearray(b'dh')])) b = bytearray(b' ') print(b.join([b'd', bytearray(b'l22-'), b'=ej*'])) print(b.join([bytearray(b'31'), b'`', b'^'])) print(b.join([bytearray(b'dh')])) b = bytearray(b'%#@!') print(b.join([b'1',b'd',b'<'])) print(b.join([b'12'])) """) class UnaryBytearrayOperationTests(UnaryOperationTestCase, TranspileTestCase): data_type = 'bytearray' class BinaryBytearrayOperationTests(BinaryOperationTestCase, TranspileTestCase): data_type = 'bytearray' not_implemented_versions = { 'test_modulo_complex': (3.4, ), } class InplaceBytearrayOperationTests(InplaceOperationTestCase, TranspileTestCase): data_type = 'bytearray' not_implemented_versions = { 'test_modulo_complex': (3.4, ), }
39.244186
113
0.537481
1,470
13,500
4.912925
0.114286
0.235392
0.317779
0.076849
0.765577
0.646774
0.545832
0.365688
0.225561
0.157574
0
0.025769
0.287111
13,500
343
114
39.358601
0.724647
0.020741
0
0.334437
0
0.003311
0.824113
0.433664
0
0
0
0.002915
0.082781
1
0.076159
false
0
0.003311
0
0.109272
0.619205
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
1
null
0
0
0
0
0
0
0
0
0
0
0
1
0
5
00ce93578cc7bb07558a17ffd2233cf15a772008
213
py
Python
libs/shell/__init__.py
Pierre-Sassoulas/shell
94b65c4a417173b1aa6995b1b27d1b810d1200f4
[ "MIT" ]
null
null
null
libs/shell/__init__.py
Pierre-Sassoulas/shell
94b65c4a417173b1aa6995b1b27d1b810d1200f4
[ "MIT" ]
null
null
null
libs/shell/__init__.py
Pierre-Sassoulas/shell
94b65c4a417173b1aa6995b1b27d1b810d1200f4
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- from libs.shell.shell_color import ShellColor, print_color from libs.shell.color import Color from libs.shell.style import Style __all__ = ['ShellColor', 'Color', 'Style', "print_color"]
26.625
58
0.737089
30
213
5
0.4
0.16
0.26
0.24
0
0
0
0
0
0
0
0.005348
0.122066
213
7
59
30.428571
0.796791
0.098592
0
0
0
0
0.163158
0
0
0
0
0
0
1
0
false
0
0.75
0
0.75
0.5
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
1
0
5
00e0ba526c29f4f38c1be62c9ae3fe75f7176e04
1,844
py
Python
feature_computations/queen_castling.py
emanuel1025/Elo-Ratings-Match-Forecasting
75428675a29049294a449736ac0340d7e5cc37d1
[ "Apache-2.0" ]
null
null
null
feature_computations/queen_castling.py
emanuel1025/Elo-Ratings-Match-Forecasting
75428675a29049294a449736ac0340d7e5cc37d1
[ "Apache-2.0" ]
null
null
null
feature_computations/queen_castling.py
emanuel1025/Elo-Ratings-Match-Forecasting
75428675a29049294a449736ac0340d7e5cc37d1
[ "Apache-2.0" ]
null
null
null
''' def find( element, list): for i, j in enumerate( list): if( j == element): return i; return -1 data_path = "../data/data_uci.pgn" fd = open( data_path) ''' def find( element, list): for i, j in enumerate( list): if( j[0:2] == element): return i; return -1 data_path = "../data/data_uci.pgn" fd = open( data_path) ''' fd_white = open( "results/castle_white.fea", "w") fd_black = open( "results/castle_black.fea", "w") fd_castle = open( "results/castle.fea", "w") for row in fd: if row[0] != '\n' and row[0] != '[': moves = row.split( ' ') white_c = find( "e1g1", moves) black_c = find( "e8g8", moves) if( white_c == -1): white_c = find( "e1c1", moves) if( white_c == -1): w = 1 else: w = white_c / float( len(moves)) if( black_c == -1): black_c = find( "e8c8", moves) if( black_c == -1): b = 1 else: b = white_c / float( len(moves)) fd_white.write( str( w) + "\n") fd_black.write( str( b) + "\n") fd_castle.write( str( (w+b) * 0.5) + "\n") fd.close() fd_white.close() fd_castle.close() fd_black.close() ''' #Queen First Move fd_white = open( "results/queen_white.fea", "w") fd_black = open( "results/queen_black.fea", "w") fd_queen = open( "results/queen.fea", "w") for row in fd: if row[0] != '\n' and row[0] != '[': moves = row.split( ' ') white_c = find( "d1", moves) black_c = find( "d8", moves) #if( white_c == -1): # white_c = find( "e1c1", moves) if( white_c == -1): w = 1 else: w = white_c / float( len(moves)) #if( black_c == -1): # black_c = find( "e8c8", moves) if( black_c == -1): b = 1 else: b = white_c / float( len(moves)) fd_white.write( str( w) + "\n") fd_black.write( str( b) + "\n") fd_queen.write( str( (w+b) * 0.5) + "\n") fd.close() fd_white.close() fd_queen.close() fd_black.close()
17.233645
49
0.563991
305
1,844
3.252459
0.160656
0.072581
0.024194
0.052419
0.760081
0.760081
0.760081
0.705645
0.705645
0.705645
0
0.026536
0.223427
1,844
107
50
17.233645
0.666201
0.178416
0
0.066667
0
0
0.124069
0.057072
0
0
0
0
0
1
0.033333
false
0
0
0
0.1
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
dac6ecc6f64c8379476cec2348e0414a426cbf7b
49
py
Python
tests/__init__.py
georgercarder/calculus
13b729aefe383a5156defc4b55f3748afa8ba427
[ "MIT" ]
null
null
null
tests/__init__.py
georgercarder/calculus
13b729aefe383a5156defc4b55f3748afa8ba427
[ "MIT" ]
null
null
null
tests/__init__.py
georgercarder/calculus
13b729aefe383a5156defc4b55f3748afa8ba427
[ "MIT" ]
null
null
null
# init tests from .test_1 import test1 test1()
8.166667
25
0.714286
8
49
4.25
0.875
0
0
0
0
0
0
0
0
0
0
0.076923
0.204082
49
5
26
9.8
0.794872
0.204082
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
dae9ae2cde1753dac92c6ca2e27cd01341adc9dc
52,088
py
Python
lfs/catalog/migrations/0001_initial.py
restless/django-lfs
4058f9d45b416ef2e8c28a87856ea0f1550b523d
[ "BSD-3-Clause" ]
1
2020-02-26T03:07:39.000Z
2020-02-26T03:07:39.000Z
lfs/catalog/migrations/0001_initial.py
mxins/django-lfs
bf42ed80ce0e1ec96db6ab985adcc614ea79dfc8
[ "BSD-3-Clause" ]
null
null
null
lfs/catalog/migrations/0001_initial.py
mxins/django-lfs
bf42ed80ce0e1ec96db6ab985adcc614ea79dfc8
[ "BSD-3-Clause" ]
null
null
null
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): depends_on = ( ("supplier", "0001_initial"), ("tax", "0001_initial"), ("manufacturer", "0001_initial"), ) def forwards(self, orm): # Adding model 'Category' db.create_table('catalog_category', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=50)), ('slug', self.gf('django.db.models.fields.SlugField')(unique=True, max_length=50)), ('parent', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['catalog.Category'], null=True, blank=True)), ('show_all_products', self.gf('django.db.models.fields.BooleanField')(default=True)), ('short_description', self.gf('django.db.models.fields.TextField')(blank=True)), ('description', self.gf('django.db.models.fields.TextField')(blank=True)), ('image', self.gf('lfs.core.fields.thumbs.ImageWithThumbsField')(blank=True, max_length=100, null=True, sizes=((60, 60), (100, 100), (200, 200), (400, 400)))), ('position', self.gf('django.db.models.fields.IntegerField')(default=1000)), ('exclude_from_navigation', self.gf('django.db.models.fields.BooleanField')(default=False)), ('static_block', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='categories', null=True, to=orm['catalog.StaticBlock'])), ('template', self.gf('django.db.models.fields.PositiveSmallIntegerField')(max_length=400, null=True, blank=True)), ('active_formats', self.gf('django.db.models.fields.BooleanField')(default=False)), ('product_rows', self.gf('django.db.models.fields.IntegerField')(default=3)), ('product_cols', self.gf('django.db.models.fields.IntegerField')(default=3)), ('category_cols', self.gf('django.db.models.fields.IntegerField')(default=3)), ('meta_title', self.gf('django.db.models.fields.CharField')(default='<name>', max_length=100)), ('meta_keywords', self.gf('django.db.models.fields.TextField')(blank=True)), ('meta_description', self.gf('django.db.models.fields.TextField')(blank=True)), ('level', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=1)), ('uid', self.gf('django.db.models.fields.CharField')(default='b870c3fb-0cf0-480b-ae58-670316ce281b', unique=True, max_length=50)), )) db.send_create_signal('catalog', ['Category']) # Adding M2M table for field products on 'Category' db.create_table('catalog_category_products', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('category', models.ForeignKey(orm['catalog.category'], null=False)), ('product', models.ForeignKey(orm['catalog.product'], null=False)) )) db.create_unique('catalog_category_products', ['category_id', 'product_id']) # Adding model 'Product' db.create_table('catalog_product', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=80, blank=True)), ('slug', self.gf('django.db.models.fields.SlugField')(unique=True, max_length=80)), ('sku', self.gf('django.db.models.fields.CharField')(max_length=30, blank=True)), ('price', self.gf('django.db.models.fields.FloatField')(default=0.0)), ('price_calculator', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)), ('effective_price', self.gf('django.db.models.fields.FloatField')(blank=True)), ('price_unit', self.gf('django.db.models.fields.CharField')(max_length=20, blank=True)), ('unit', self.gf('django.db.models.fields.CharField')(max_length=20, blank=True)), ('short_description', self.gf('django.db.models.fields.TextField')(blank=True)), ('description', self.gf('django.db.models.fields.TextField')(blank=True)), ('meta_title', self.gf('django.db.models.fields.CharField')(default='<name>', max_length=80, blank=True)), ('meta_keywords', self.gf('django.db.models.fields.TextField')(blank=True)), ('meta_description', self.gf('django.db.models.fields.TextField')(blank=True)), ('for_sale', self.gf('django.db.models.fields.BooleanField')(default=False)), ('for_sale_price', self.gf('django.db.models.fields.FloatField')(default=0.0)), ('active', self.gf('django.db.models.fields.BooleanField')(default=False)), ('creation_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('supplier', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['supplier.Supplier'], null=True, blank=True)), ('deliverable', self.gf('django.db.models.fields.BooleanField')(default=True)), ('manual_delivery_time', self.gf('django.db.models.fields.BooleanField')(default=False)), ('delivery_time', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='products_delivery_time', null=True, to=orm['catalog.DeliveryTime'])), ('order_time', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='products_order_time', null=True, to=orm['catalog.DeliveryTime'])), ('ordered_at', self.gf('django.db.models.fields.DateField')(null=True, blank=True)), ('manage_stock_amount', self.gf('django.db.models.fields.BooleanField')(default=False)), ('stock_amount', self.gf('django.db.models.fields.FloatField')(default=0)), ('active_packing_unit', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=0)), ('packing_unit', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)), ('packing_unit_unit', self.gf('django.db.models.fields.CharField')(max_length=30, blank=True)), ('static_block', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='products', null=True, to=orm['catalog.StaticBlock'])), ('weight', self.gf('django.db.models.fields.FloatField')(default=0.0)), ('height', self.gf('django.db.models.fields.FloatField')(default=0.0)), ('length', self.gf('django.db.models.fields.FloatField')(default=0.0)), ('width', self.gf('django.db.models.fields.FloatField')(default=0.0)), ('tax', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['tax.Tax'], null=True, blank=True)), ('sub_type', self.gf('django.db.models.fields.CharField')(default='0', max_length=10)), ('default_variant', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['catalog.Product'], null=True, blank=True)), ('category_variant', self.gf('django.db.models.fields.SmallIntegerField')(null=True, blank=True)), ('variants_display_type', self.gf('django.db.models.fields.IntegerField')(default=0)), ('variant_position', self.gf('django.db.models.fields.IntegerField')(default=999)), ('parent', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='variants', null=True, to=orm['catalog.Product'])), ('active_name', self.gf('django.db.models.fields.BooleanField')(default=False)), ('active_sku', self.gf('django.db.models.fields.BooleanField')(default=False)), ('active_short_description', self.gf('django.db.models.fields.BooleanField')(default=False)), ('active_static_block', self.gf('django.db.models.fields.BooleanField')(default=False)), ('active_description', self.gf('django.db.models.fields.BooleanField')(default=False)), ('active_price', self.gf('django.db.models.fields.BooleanField')(default=False)), ('active_for_sale', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=0)), ('active_for_sale_price', self.gf('django.db.models.fields.BooleanField')(default=False)), ('active_images', self.gf('django.db.models.fields.BooleanField')(default=False)), ('active_related_products', self.gf('django.db.models.fields.BooleanField')(default=False)), ('active_accessories', self.gf('django.db.models.fields.BooleanField')(default=False)), ('active_meta_title', self.gf('django.db.models.fields.BooleanField')(default=False)), ('active_meta_description', self.gf('django.db.models.fields.BooleanField')(default=False)), ('active_meta_keywords', self.gf('django.db.models.fields.BooleanField')(default=False)), ('active_dimensions', self.gf('django.db.models.fields.BooleanField')(default=False)), ('template', self.gf('django.db.models.fields.PositiveSmallIntegerField')(null=True, blank=True)), ('active_price_calculation', self.gf('django.db.models.fields.BooleanField')(default=False)), ('price_calculation', self.gf('django.db.models.fields.CharField')(max_length=100, blank=True)), ('active_base_price', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=0)), ('base_price_unit', self.gf('django.db.models.fields.CharField')(max_length=30, blank=True)), ('base_price_amount', self.gf('django.db.models.fields.FloatField')(default=0.0, null=True, blank=True)), ('sku_manufacturer', self.gf('django.db.models.fields.CharField')(max_length=100, blank=True)), ('manufacturer', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='products', null=True, to=orm['manufacturer.Manufacturer'])), ('type_of_quantity_field', self.gf('django.db.models.fields.PositiveSmallIntegerField')(null=True, blank=True)), ('uid', self.gf('django.db.models.fields.CharField')(default='cf3cfe03-8587-42b7-b539-373b820046e4', unique=True, max_length=50)), )) db.send_create_signal('catalog', ['Product']) # Adding M2M table for field related_products on 'Product' db.create_table('catalog_product_related_products', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('from_product', models.ForeignKey(orm['catalog.product'], null=False)), ('to_product', models.ForeignKey(orm['catalog.product'], null=False)) )) db.create_unique('catalog_product_related_products', ['from_product_id', 'to_product_id']) # Adding model 'ProductAccessories' db.create_table('catalog_productaccessories', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('product', self.gf('django.db.models.fields.related.ForeignKey')(related_name='productaccessories_product', to=orm['catalog.Product'])), ('accessory', self.gf('django.db.models.fields.related.ForeignKey')(related_name='productaccessories_accessory', to=orm['catalog.Product'])), ('position', self.gf('django.db.models.fields.IntegerField')(default=999)), ('quantity', self.gf('django.db.models.fields.FloatField')(default=1)), )) db.send_create_signal('catalog', ['ProductAccessories']) # Adding model 'PropertyGroup' db.create_table('catalog_propertygroup', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=50, blank=True)), )) db.send_create_signal('catalog', ['PropertyGroup']) # Adding M2M table for field products on 'PropertyGroup' db.create_table('catalog_propertygroup_products', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('propertygroup', models.ForeignKey(orm['catalog.propertygroup'], null=False)), ('product', models.ForeignKey(orm['catalog.product'], null=False)) )) db.create_unique('catalog_propertygroup_products', ['propertygroup_id', 'product_id']) # Adding model 'Property' db.create_table('catalog_property', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=100)), ('title', self.gf('django.db.models.fields.CharField')(max_length=100)), ('position', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)), ('unit', self.gf('django.db.models.fields.CharField')(max_length=15, blank=True)), ('display_on_product', self.gf('django.db.models.fields.BooleanField')(default=True)), ('local', self.gf('django.db.models.fields.BooleanField')(default=False)), ('filterable', self.gf('django.db.models.fields.BooleanField')(default=True)), ('display_no_results', self.gf('django.db.models.fields.BooleanField')(default=False)), ('configurable', self.gf('django.db.models.fields.BooleanField')(default=False)), ('type', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=2)), ('price', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)), ('display_price', self.gf('django.db.models.fields.BooleanField')(default=True)), ('add_price', self.gf('django.db.models.fields.BooleanField')(default=True)), ('unit_min', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)), ('unit_max', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)), ('unit_step', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)), ('decimal_places', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=0)), ('required', self.gf('django.db.models.fields.BooleanField')(default=False)), ('step_type', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=1)), ('step', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)), ('uid', self.gf('django.db.models.fields.CharField')(default='78ef0456-a083-40d9-8a36-cc16ba6360a5', unique=True, max_length=50)), )) db.send_create_signal('catalog', ['Property']) # Adding model 'FilterStep' db.create_table('catalog_filterstep', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('property', self.gf('django.db.models.fields.related.ForeignKey')(related_name='steps', to=orm['catalog.Property'])), ('start', self.gf('django.db.models.fields.FloatField')()), )) db.send_create_signal('catalog', ['FilterStep']) # Adding model 'GroupsPropertiesRelation' db.create_table('catalog_groupspropertiesrelation', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('group', self.gf('django.db.models.fields.related.ForeignKey')(related_name='groupproperties', to=orm['catalog.PropertyGroup'])), ('property', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['catalog.Property'])), ('position', self.gf('django.db.models.fields.IntegerField')(default=999)), )) db.send_create_signal('catalog', ['GroupsPropertiesRelation']) # Adding unique constraint on 'GroupsPropertiesRelation', fields ['group', 'property'] db.create_unique('catalog_groupspropertiesrelation', ['group_id', 'property_id']) # Adding model 'ProductsPropertiesRelation' db.create_table('catalog_productspropertiesrelation', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('product', self.gf('django.db.models.fields.related.ForeignKey')(related_name='productsproperties', to=orm['catalog.Product'])), ('property', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['catalog.Property'])), ('position', self.gf('django.db.models.fields.IntegerField')(default=999)), )) db.send_create_signal('catalog', ['ProductsPropertiesRelation']) # Adding unique constraint on 'ProductsPropertiesRelation', fields ['product', 'property'] db.create_unique('catalog_productspropertiesrelation', ['product_id', 'property_id']) # Adding model 'PropertyOption' db.create_table('catalog_propertyoption', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('property', self.gf('django.db.models.fields.related.ForeignKey')(related_name='options', to=orm['catalog.Property'])), ('name', self.gf('django.db.models.fields.CharField')(max_length=100)), ('price', self.gf('django.db.models.fields.FloatField')(default=0.0, null=True, blank=True)), ('position', self.gf('django.db.models.fields.IntegerField')(default=99)), ('uid', self.gf('django.db.models.fields.CharField')(default='04c97a37-e155-4740-9934-74d6b1907eb5', unique=True, max_length=50)), )) db.send_create_signal('catalog', ['PropertyOption']) # Adding model 'ProductPropertyValue' db.create_table('catalog_productpropertyvalue', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('product', self.gf('django.db.models.fields.related.ForeignKey')(related_name='property_values', to=orm['catalog.Product'])), ('parent_id', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)), ('property', self.gf('django.db.models.fields.related.ForeignKey')(related_name='property_values', to=orm['catalog.Property'])), ('value', self.gf('django.db.models.fields.CharField')(max_length=100, blank=True)), ('value_as_float', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)), ('type', self.gf('django.db.models.fields.PositiveSmallIntegerField')()), )) db.send_create_signal('catalog', ['ProductPropertyValue']) # Adding unique constraint on 'ProductPropertyValue', fields ['product', 'property', 'value', 'type'] db.create_unique('catalog_productpropertyvalue', ['product_id', 'property_id', 'value', 'type']) # Adding model 'Image' db.create_table('catalog_image', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('content_type', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='image', null=True, to=orm['contenttypes.ContentType'])), ('content_id', self.gf('django.db.models.fields.PositiveIntegerField')(null=True, blank=True)), ('title', self.gf('django.db.models.fields.CharField')(max_length=100, blank=True)), ('image', self.gf('lfs.core.fields.thumbs.ImageWithThumbsField')(blank=True, max_length=100, null=True, sizes=((60, 60), (100, 100), (200, 200), (300, 300), (400, 400)))), ('position', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=999)), )) db.send_create_signal('catalog', ['Image']) # Adding model 'File' db.create_table('catalog_file', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('title', self.gf('django.db.models.fields.CharField')(max_length=100, blank=True)), ('slug', self.gf('django.db.models.fields.SlugField')(max_length=50)), ('content_type', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='files', null=True, to=orm['contenttypes.ContentType'])), ('content_id', self.gf('django.db.models.fields.PositiveIntegerField')(null=True, blank=True)), ('position', self.gf('django.db.models.fields.SmallIntegerField')(default=999)), ('description', self.gf('django.db.models.fields.CharField')(max_length=100, blank=True)), ('file', self.gf('django.db.models.fields.files.FileField')(max_length=100)), )) db.send_create_signal('catalog', ['File']) # Adding model 'StaticBlock' db.create_table('catalog_staticblock', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=30)), ('display_files', self.gf('django.db.models.fields.BooleanField')(default=True)), ('html', self.gf('django.db.models.fields.TextField')(blank=True)), ('position', self.gf('django.db.models.fields.SmallIntegerField')(default=1000)), )) db.send_create_signal('catalog', ['StaticBlock']) # Adding model 'DeliveryTime' db.create_table('catalog_deliverytime', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('min', self.gf('django.db.models.fields.FloatField')()), ('max', self.gf('django.db.models.fields.FloatField')()), ('unit', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=2)), ('description', self.gf('django.db.models.fields.TextField')(blank=True)), )) db.send_create_signal('catalog', ['DeliveryTime']) # Adding model 'ProductAttachment' db.create_table('catalog_productattachment', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('title', self.gf('django.db.models.fields.CharField')(max_length=50)), ('description', self.gf('django.db.models.fields.TextField')(blank=True)), ('file', self.gf('django.db.models.fields.files.FileField')(max_length=100)), ('product', self.gf('django.db.models.fields.related.ForeignKey')(related_name='attachments', to=orm['catalog.Product'])), ('position', self.gf('django.db.models.fields.IntegerField')(default=1)), )) db.send_create_signal('catalog', ['ProductAttachment']) def backwards(self, orm): # Removing unique constraint on 'ProductPropertyValue', fields ['product', 'property', 'value', 'type'] db.delete_unique('catalog_productpropertyvalue', ['product_id', 'property_id', 'value', 'type']) # Removing unique constraint on 'ProductsPropertiesRelation', fields ['product', 'property'] db.delete_unique('catalog_productspropertiesrelation', ['product_id', 'property_id']) # Removing unique constraint on 'GroupsPropertiesRelation', fields ['group', 'property'] db.delete_unique('catalog_groupspropertiesrelation', ['group_id', 'property_id']) # Deleting model 'Category' db.delete_table('catalog_category') # Removing M2M table for field products on 'Category' db.delete_table('catalog_category_products') # Deleting model 'Product' db.delete_table('catalog_product') # Removing M2M table for field related_products on 'Product' db.delete_table('catalog_product_related_products') # Deleting model 'ProductAccessories' db.delete_table('catalog_productaccessories') # Deleting model 'PropertyGroup' db.delete_table('catalog_propertygroup') # Removing M2M table for field products on 'PropertyGroup' db.delete_table('catalog_propertygroup_products') # Deleting model 'Property' db.delete_table('catalog_property') # Deleting model 'FilterStep' db.delete_table('catalog_filterstep') # Deleting model 'GroupsPropertiesRelation' db.delete_table('catalog_groupspropertiesrelation') # Deleting model 'ProductsPropertiesRelation' db.delete_table('catalog_productspropertiesrelation') # Deleting model 'PropertyOption' db.delete_table('catalog_propertyoption') # Deleting model 'ProductPropertyValue' db.delete_table('catalog_productpropertyvalue') # Deleting model 'Image' db.delete_table('catalog_image') # Deleting model 'File' db.delete_table('catalog_file') # Deleting model 'StaticBlock' db.delete_table('catalog_staticblock') # Deleting model 'DeliveryTime' db.delete_table('catalog_deliverytime') # Deleting model 'ProductAttachment' db.delete_table('catalog_productattachment') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'catalog.category': { 'Meta': {'ordering': "('position',)", 'object_name': 'Category'}, 'active_formats': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'category_cols': ('django.db.models.fields.IntegerField', [], {'default': '3'}), 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'exclude_from_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'image': ('lfs.core.fields.thumbs.ImageWithThumbsField', [], {'blank': 'True', 'max_length': '100', 'null': 'True', 'sizes': '((60, 60), (100, 100), (200, 200), (400, 400))'}), 'level': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'}), 'meta_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'meta_keywords': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'meta_title': ('django.db.models.fields.CharField', [], {'default': "'<name>'", 'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}), 'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Category']", 'null': 'True', 'blank': 'True'}), 'position': ('django.db.models.fields.IntegerField', [], {'default': '1000'}), 'product_cols': ('django.db.models.fields.IntegerField', [], {'default': '3'}), 'product_rows': ('django.db.models.fields.IntegerField', [], {'default': '3'}), 'products': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'categories'", 'blank': 'True', 'to': "orm['catalog.Product']"}), 'short_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'show_all_products': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}), 'static_block': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'categories'", 'null': 'True', 'to': "orm['catalog.StaticBlock']"}), 'template': ('django.db.models.fields.PositiveSmallIntegerField', [], {'max_length': '400', 'null': 'True', 'blank': 'True'}), 'uid': ('django.db.models.fields.CharField', [], {'default': "'0efb7bd1-afaf-4a05-8aa1-f0660388a53f'", 'unique': 'True', 'max_length': '50'}) }, 'catalog.deliverytime': { 'Meta': {'ordering': "('min',)", 'object_name': 'DeliveryTime'}, 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'max': ('django.db.models.fields.FloatField', [], {}), 'min': ('django.db.models.fields.FloatField', [], {}), 'unit': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '2'}) }, 'catalog.file': { 'Meta': {'ordering': "('position',)", 'object_name': 'File'}, 'content_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'files'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}), 'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}), 'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'position': ('django.db.models.fields.SmallIntegerField', [], {'default': '999'}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}) }, 'catalog.filterstep': { 'Meta': {'ordering': "['start']", 'object_name': 'FilterStep'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'property': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'steps'", 'to': "orm['catalog.Property']"}), 'start': ('django.db.models.fields.FloatField', [], {}) }, 'catalog.groupspropertiesrelation': { 'Meta': {'ordering': "('position',)", 'unique_together': "(('group', 'property'),)", 'object_name': 'GroupsPropertiesRelation'}, 'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'groupproperties'", 'to': "orm['catalog.PropertyGroup']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'position': ('django.db.models.fields.IntegerField', [], {'default': '999'}), 'property': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Property']"}) }, 'catalog.image': { 'Meta': {'ordering': "('position',)", 'object_name': 'Image'}, 'content_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'image'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'image': ('lfs.core.fields.thumbs.ImageWithThumbsField', [], {'blank': 'True', 'max_length': '100', 'null': 'True', 'sizes': '((60, 60), (100, 100), (200, 200), (300, 300), (400, 400))'}), 'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '999'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}) }, 'catalog.product': { 'Meta': {'ordering': "('name',)", 'object_name': 'Product'}, 'accessories': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'reverse_accessories'", 'to': "orm['catalog.Product']", 'through': "orm['catalog.ProductAccessories']", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}), 'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'active_accessories': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'active_base_price': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}), 'active_description': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'active_dimensions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'active_for_sale': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}), 'active_for_sale_price': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'active_images': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'active_meta_description': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'active_meta_keywords': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'active_meta_title': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'active_name': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'active_packing_unit': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}), 'active_price': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'active_price_calculation': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'active_related_products': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'active_short_description': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'active_sku': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'active_static_block': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'base_price_amount': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'null': 'True', 'blank': 'True'}), 'base_price_unit': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'category_variant': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}), 'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'default_variant': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']", 'null': 'True', 'blank': 'True'}), 'deliverable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'delivery_time': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'products_delivery_time'", 'null': 'True', 'to': "orm['catalog.DeliveryTime']"}), 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'effective_price': ('django.db.models.fields.FloatField', [], {'blank': 'True'}), 'for_sale': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'for_sale_price': ('django.db.models.fields.FloatField', [], {'default': '0.0'}), 'height': ('django.db.models.fields.FloatField', [], {'default': '0.0'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'length': ('django.db.models.fields.FloatField', [], {'default': '0.0'}), 'manage_stock_amount': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'manual_delivery_time': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'manufacturer': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'products'", 'null': 'True', 'to': "orm['manufacturer.Manufacturer']"}), 'meta_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'meta_keywords': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'meta_title': ('django.db.models.fields.CharField', [], {'default': "'<name>'", 'max_length': '80', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'}), 'order_time': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'products_order_time'", 'null': 'True', 'to': "orm['catalog.DeliveryTime']"}), 'ordered_at': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}), 'packing_unit': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), 'packing_unit_unit': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'variants'", 'null': 'True', 'to': "orm['catalog.Product']"}), 'price': ('django.db.models.fields.FloatField', [], {'default': '0.0'}), 'price_calculation': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}), 'price_calculator': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'price_unit': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}), 'related_products': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'reverse_related_products'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Product']"}), 'short_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'sku': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'sku_manufacturer': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '80'}), 'static_block': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'products'", 'null': 'True', 'to': "orm['catalog.StaticBlock']"}), 'stock_amount': ('django.db.models.fields.FloatField', [], {'default': '0'}), 'sub_type': ('django.db.models.fields.CharField', [], {'default': "'0'", 'max_length': '10'}), 'supplier': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['supplier.Supplier']", 'null': 'True', 'blank': 'True'}), 'tax': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tax.Tax']", 'null': 'True', 'blank': 'True'}), 'template': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}), 'type_of_quantity_field': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}), 'uid': ('django.db.models.fields.CharField', [], {'default': "'c3c4f61d-7698-4881-b253-8886ea142650'", 'unique': 'True', 'max_length': '50'}), 'unit': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}), 'variant_position': ('django.db.models.fields.IntegerField', [], {'default': '999'}), 'variants_display_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'weight': ('django.db.models.fields.FloatField', [], {'default': '0.0'}), 'width': ('django.db.models.fields.FloatField', [], {'default': '0.0'}) }, 'catalog.productaccessories': { 'Meta': {'ordering': "('position',)", 'object_name': 'ProductAccessories'}, 'accessory': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'productaccessories_accessory'", 'to': "orm['catalog.Product']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'position': ('django.db.models.fields.IntegerField', [], {'default': '999'}), 'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'productaccessories_product'", 'to': "orm['catalog.Product']"}), 'quantity': ('django.db.models.fields.FloatField', [], {'default': '1'}) }, 'catalog.productattachment': { 'Meta': {'ordering': "('position',)", 'object_name': 'ProductAttachment'}, 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'position': ('django.db.models.fields.IntegerField', [], {'default': '1'}), 'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attachments'", 'to': "orm['catalog.Product']"}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'catalog.productpropertyvalue': { 'Meta': {'unique_together': "(('product', 'property', 'value', 'type'),)", 'object_name': 'ProductPropertyValue'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'parent_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'property_values'", 'to': "orm['catalog.Product']"}), 'property': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'property_values'", 'to': "orm['catalog.Property']"}), 'type': ('django.db.models.fields.PositiveSmallIntegerField', [], {}), 'value': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}), 'value_as_float': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}) }, 'catalog.productspropertiesrelation': { 'Meta': {'ordering': "('position',)", 'unique_together': "(('product', 'property'),)", 'object_name': 'ProductsPropertiesRelation'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'position': ('django.db.models.fields.IntegerField', [], {'default': '999'}), 'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'productsproperties'", 'to': "orm['catalog.Product']"}), 'property': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Property']"}) }, 'catalog.property': { 'Meta': {'ordering': "['position']", 'object_name': 'Property'}, 'add_price': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'configurable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'decimal_places': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}), 'display_no_results': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'display_on_product': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'display_price': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'filterable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'properties'", 'to': "orm['catalog.PropertyGroup']", 'through': "orm['catalog.GroupsPropertiesRelation']", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'local': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'position': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'price': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), 'products': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'properties'", 'to': "orm['catalog.Product']", 'through': "orm['catalog.ProductsPropertiesRelation']", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}), 'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'step': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'step_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '2'}), 'uid': ('django.db.models.fields.CharField', [], {'default': "'7f8d5f20-eccf-47e7-80a0-3b316bcea88b'", 'unique': 'True', 'max_length': '50'}), 'unit': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'}), 'unit_max': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), 'unit_min': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), 'unit_step': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}) }, 'catalog.propertygroup': { 'Meta': {'ordering': "('name',)", 'object_name': 'PropertyGroup'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}), 'products': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'property_groups'", 'symmetrical': 'False', 'to': "orm['catalog.Product']"}) }, 'catalog.propertyoption': { 'Meta': {'ordering': "['position']", 'object_name': 'PropertyOption'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'position': ('django.db.models.fields.IntegerField', [], {'default': '99'}), 'price': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'null': 'True', 'blank': 'True'}), 'property': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'to': "orm['catalog.Property']"}), 'uid': ('django.db.models.fields.CharField', [], {'default': "'e4f4854e-4b74-49e0-a4b1-2d230e1ce28f'", 'unique': 'True', 'max_length': '50'}) }, 'catalog.staticblock': { 'Meta': {'ordering': "('position',)", 'object_name': 'StaticBlock'}, 'display_files': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'html': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}), 'position': ('django.db.models.fields.SmallIntegerField', [], {'default': '1000'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'manufacturer.manufacturer': { 'Meta': {'ordering': "('name',)", 'object_name': 'Manufacturer'}, 'active_formats': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'image': ('lfs.core.fields.thumbs.ImageWithThumbsField', [], {'blank': 'True', 'max_length': '100', 'null': 'True', 'sizes': '((60, 60), (100, 100), (200, 200), (400, 400))'}), 'meta_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'meta_keywords': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'meta_title': ('django.db.models.fields.CharField', [], {'default': "'<name>'", 'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}), 'position': ('django.db.models.fields.IntegerField', [], {'default': '1000'}), 'product_cols': ('django.db.models.fields.IntegerField', [], {'default': '3'}), 'product_rows': ('django.db.models.fields.IntegerField', [], {'default': '3'}), 'short_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}) }, 'supplier.supplier': { 'Meta': {'object_name': 'Supplier'}, 'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '80'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'tax.tax': { 'Meta': {'object_name': 'Tax'}, 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'rate': ('django.db.models.fields.FloatField', [], {'default': '0'}) } } complete_apps = ['catalog']
80.135385
263
0.610333
5,505
52,088
5.662852
0.049046
0.099314
0.17335
0.247642
0.856258
0.830949
0.803907
0.741932
0.67104
0.564798
0
0.015211
0.172017
52,088
650
264
80.135385
0.707615
0.03473
0
0.22898
0
0.001789
0.545446
0.349232
0
0
0
0
0
1
0.003578
false
0.001789
0.007156
0
0.017889
0
0
0
0
null
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
daeb3ecf37696bdade34503fdf94186ec32f41ff
158
py
Python
src/realtime/exceptions.py
olirice/realtime
dd1ee23d4079d1639a662f4b688c5baa21db7c83
[ "MIT" ]
1
2021-05-26T18:37:54.000Z
2021-05-26T18:37:54.000Z
src/realtime/exceptions.py
olirice/realtime
dd1ee23d4079d1639a662f4b688c5baa21db7c83
[ "MIT" ]
null
null
null
src/realtime/exceptions.py
olirice/realtime
dd1ee23d4079d1639a662f4b688c5baa21db7c83
[ "MIT" ]
null
null
null
class RealtimeException(Exception): pass class ParseFailureException(Exception): """Failure to parse a logical replication test_decoding message"""
22.571429
70
0.778481
16
158
7.625
0.875
0
0
0
0
0
0
0
0
0
0
0
0.14557
158
6
71
26.333333
0.903704
0.379747
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.333333
0
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
1
0
0
5
daec7cbaf48700d7ffafa67adf1dc7ca12f7bc1e
972
py
Python
SimG4Core/PrintGeomInfo/python/testTotemGeometryXML_cfi.py
ckamtsikis/cmssw
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
[ "Apache-2.0" ]
852
2015-01-11T21:03:51.000Z
2022-03-25T21:14:00.000Z
SimG4Core/PrintGeomInfo/python/testTotemGeometryXML_cfi.py
ckamtsikis/cmssw
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
[ "Apache-2.0" ]
30,371
2015-01-02T00:14:40.000Z
2022-03-31T23:26:05.000Z
SimG4Core/PrintGeomInfo/python/testTotemGeometryXML_cfi.py
ckamtsikis/cmssw
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
[ "Apache-2.0" ]
3,240
2015-01-02T05:53:18.000Z
2022-03-31T17:24:21.000Z
import FWCore.ParameterSet.Config as cms XMLIdealGeometryESSource = cms.ESSource("XMLIdealGeometryESSource", geomXMLFiles = cms.vstring('Geometry/CMSCommonData/data/materials.xml', 'Geometry/CMSCommonData/data/rotations.xml', 'Geometry/CMSCommonData/data/extend/cmsextent.xml', 'Geometry/CMSCommonData/data/cms.xml', 'Geometry/CMSCommonData/data/cmsMother.xml', 'Geometry/ForwardCommonData/data/forward.xml', 'Geometry/ForwardCommonData/data/totemMaterials.xml', 'Geometry/ForwardCommonData/data/totemRotations.xml', 'Geometry/ForwardCommonData/data/totemt1.xml', 'Geometry/ForwardCommonData/data/totemt2.xml', 'Geometry/ForwardCommonData/data/ionpump.xml', 'Geometry/ForwardSimData/data/totemsensT1.xml', 'Geometry/ForwardSimData/data/totemsensT2.xml', 'Geometry/CMSCommonData/data/FieldParameters.xml'), rootNodeName = cms.string('cms:OCMS') )
48.6
75
0.720165
89
972
7.865169
0.370787
0.204286
0.214286
0.274286
0
0
0
0
0
0
0
0.004884
0.157407
972
19
76
51.157895
0.849817
0
0
0
0
0
0.66358
0.65535
0
0
0
0
0
1
0
false
0
0.055556
0
0.055556
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
dafb197e24fe4b996fed2a6c2cdbc15e45a46dae
254
py
Python
python/cuxfilter/charts/core/aggregate/__init__.py
rnyak/cuxfilter
626e45af3b8a0f2e37bc5cdbe6d2da618141f995
[ "Apache-2.0" ]
null
null
null
python/cuxfilter/charts/core/aggregate/__init__.py
rnyak/cuxfilter
626e45af3b8a0f2e37bc5cdbe6d2da618141f995
[ "Apache-2.0" ]
null
null
null
python/cuxfilter/charts/core/aggregate/__init__.py
rnyak/cuxfilter
626e45af3b8a0f2e37bc5cdbe6d2da618141f995
[ "Apache-2.0" ]
null
null
null
from .core_aggregate_bar import BaseBar from .core_aggregate_choropleth import BaseChoropleth from .core_aggregate_3d_choropleth import Base3dChoropleth from .core_aggregate_line import BaseLine from .core_datasize_indicator import BaseDataSizeIndicator
42.333333
58
0.901575
31
254
7.032258
0.483871
0.183486
0.311927
0
0
0
0
0
0
0
0
0.008547
0.07874
254
5
59
50.8
0.923077
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
9727d8a559b3a34f1b6e5b949e1895b2124635eb
226
py
Python
genesis/utils/pipeline/viz/__init__.py
leifdenby/genesis
3e4942eac74fb9c69d9b3feedfce5aa745e3bf9c
[ "BSD-3-Clause" ]
2
2019-12-18T15:39:06.000Z
2020-07-16T14:44:38.000Z
genesis/utils/pipeline/viz/__init__.py
leifdenby/genesis
3e4942eac74fb9c69d9b3feedfce5aa745e3bf9c
[ "BSD-3-Clause" ]
2
2019-12-26T11:23:11.000Z
2020-07-22T10:04:45.000Z
genesis/utils/pipeline/viz/__init__.py
leifdenby/genesis
3e4942eac74fb9c69d9b3feedfce5aa745e3bf9c
[ "BSD-3-Clause" ]
1
2019-12-18T16:48:39.000Z
2019-12-18T16:48:39.000Z
from . import bulk, comparison, objects # noqa from .all import CrossSection, HorizontalMeanProfile # noqa from .cumulants_2d import CumulantScalesProfile, CumulantSlices # noqa from .plot_utils import PlotJoinTask # noqa
45.2
71
0.80531
25
226
7.2
0.64
0.133333
0
0
0
0
0
0
0
0
0
0.005155
0.141593
226
4
72
56.5
0.92268
0.084071
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
97a2763aaab3784064df9d4550c3e77ed9b918c1
150
py
Python
tests/test_client/auth_backends.py
jpmallarino/django
659d2421c7adbbcd205604002d521d82d6b0b465
[ "BSD-3-Clause", "0BSD" ]
61,676
2015-01-01T00:05:13.000Z
2022-03-31T20:37:54.000Z
tests/test_client/auth_backends.py
jpmallarino/django
659d2421c7adbbcd205604002d521d82d6b0b465
[ "BSD-3-Clause", "0BSD" ]
8,884
2015-01-01T00:12:05.000Z
2022-03-31T19:53:11.000Z
tests/test_client/auth_backends.py
jpmallarino/django
659d2421c7adbbcd205604002d521d82d6b0b465
[ "BSD-3-Clause", "0BSD" ]
33,143
2015-01-01T02:04:52.000Z
2022-03-31T19:42:46.000Z
from django.contrib.auth.backends import ModelBackend class TestClientBackend(ModelBackend): pass class BackendWithoutGetUserMethod: pass
15
53
0.806667
14
150
8.642857
0.785714
0
0
0
0
0
0
0
0
0
0
0
0.146667
150
9
54
16.666667
0.945313
0
0
0.4
0
0
0
0
0
0
0
0
0
1
0
true
0.4
0.2
0
0.6
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
1
0
0
5
c102454e495dbfec7259efbd8d967ed3acab6c8c
70
py
Python
__init__.py
gtsueng/covid_figshare
760096ba41870109ce6015456137054443f922cd
[ "MIT" ]
null
null
null
__init__.py
gtsueng/covid_figshare
760096ba41870109ce6015456137054443f922cd
[ "MIT" ]
null
null
null
__init__.py
gtsueng/covid_figshare
760096ba41870109ce6015456137054443f922cd
[ "MIT" ]
1
2021-09-24T02:51:45.000Z
2021-09-24T02:51:45.000Z
from .dump import FigshareDumper from .upload import FigshareUploader
23.333333
36
0.857143
8
70
7.5
0.75
0
0
0
0
0
0
0
0
0
0
0
0.114286
70
2
37
35
0.967742
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
c18784ee3d3585b7a75c69c8a02685c4b6f63864
213
py
Python
code/database/base.py
ahillbs/minimum_scan_cover
e41718e5a8e0e3039d161800da70e56bd50a1b97
[ "MIT" ]
null
null
null
code/database/base.py
ahillbs/minimum_scan_cover
e41718e5a8e0e3039d161800da70e56bd50a1b97
[ "MIT" ]
null
null
null
code/database/base.py
ahillbs/minimum_scan_cover
e41718e5a8e0e3039d161800da70e56bd50a1b97
[ "MIT" ]
null
null
null
from sqlalchemy.ext.declarative import declarative_base, DeclarativeMeta from abc import ABCMeta class DeclarativeABCMeta(ABCMeta, DeclarativeMeta): pass Base = declarative_base(metaclass=DeclarativeABCMeta)
30.428571
72
0.849765
22
213
8.136364
0.590909
0.167598
0
0
0
0
0
0
0
0
0
0
0.098592
213
7
73
30.428571
0.932292
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0.2
0.4
0
0.6
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
1
0
1
0
0
5
c19ee0a23c714fc93504e3c356406dc5d8afff70
488
py
Python
Hacker_Rank/Sock_Merchant.py
Jai-kishan/Practice-Questions
cf3a3eb5c2e930fcfcb762d822430060bb5deb2d
[ "Apache-2.0" ]
1
2019-05-04T09:21:00.000Z
2019-05-04T09:21:00.000Z
Hacker_Rank/Sock_Merchant.py
Jai-kishan/Practice-Questions
cf3a3eb5c2e930fcfcb762d822430060bb5deb2d
[ "Apache-2.0" ]
null
null
null
Hacker_Rank/Sock_Merchant.py
Jai-kishan/Practice-Questions
cf3a3eb5c2e930fcfcb762d822430060bb5deb2d
[ "Apache-2.0" ]
null
null
null
# n=int(input()) # n2=input() # ar=str(n2) # ar=ar.split() # pair={} # total=0 # for i in ar: # if i not in pair: # pair[i]=1 # else: # pair[i]+=1 # for j in pair: # store=pair[j]//2 # total+=store # print (total) n=int(input("number of socks :")) ar=input("colors of socks :").split() pair={} total=0 for i in ar: if i not in pair: pair[i]=1 else: pair[i]+=1 for j in pair: store=pair[j]//2 total+=store print (total)
14.352941
37
0.518443
85
488
2.976471
0.294118
0.094862
0.094862
0.118577
0.727273
0.727273
0.727273
0.727273
0.727273
0.727273
0
0.028736
0.286885
488
33
38
14.787879
0.698276
0.440574
0
0
0
0
0.132813
0
0
0
0
0
0
1
0
false
0
0
0
0
0.076923
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
c1a5742649e2b5470555da81b9531ef14d6b8894
112
py
Python
profiler/torchmodules/torchsummary/__init__.py
vibhatha/pipedream
af6b811f5d01a68e9eb91065e5242fc1a075f279
[ "MIT" ]
null
null
null
profiler/torchmodules/torchsummary/__init__.py
vibhatha/pipedream
af6b811f5d01a68e9eb91065e5242fc1a075f279
[ "MIT" ]
null
null
null
profiler/torchmodules/torchsummary/__init__.py
vibhatha/pipedream
af6b811f5d01a68e9eb91065e5242fc1a075f279
[ "MIT" ]
null
null
null
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. from .torchsummary import summary
22.4
39
0.758929
13
112
6.538462
1
0
0
0
0
0
0
0
0
0
0
0
0.178571
112
4
40
28
0.923913
0.607143
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
c1bf224104871dddff3d08d37c85dd295c67d5f7
287
py
Python
test/itest_close.py
tnakagawa/lit
57c63ed5cc9584bff083047c8fc0b5be1c4fde2f
[ "MIT" ]
560
2016-11-16T02:10:02.000Z
2022-03-26T16:28:58.000Z
test/itest_close.py
tnakagawa/lit
57c63ed5cc9584bff083047c8fc0b5be1c4fde2f
[ "MIT" ]
374
2016-11-29T21:42:49.000Z
2021-02-16T13:30:44.000Z
test/itest_close.py
tnakagawa/lit
57c63ed5cc9584bff083047c8fc0b5be1c4fde2f
[ "MIT" ]
126
2016-12-15T21:26:19.000Z
2022-02-22T21:23:03.000Z
import testlib import test_combinators def forward(env): lit1 = env.lits[0] lit2 = env.lits[1] test_combinators.run_close_test(env, lit1, lit2, lit1) def reverse(env): lit1 = env.lits[0] lit2 = env.lits[1] test_combinators.run_close_test(env, lit1, lit2, lit1)
22.076923
58
0.69338
45
287
4.266667
0.333333
0.145833
0.104167
0.145833
0.71875
0.71875
0.71875
0.71875
0.71875
0.71875
0
0.060086
0.188153
287
12
59
23.916667
0.763949
0
0
0.6
0
0
0
0
0
0
0
0
0
1
0.2
false
0
0.2
0
0.4
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
c1bfd1397c392c8bea50de3701d4ced1bb266f6b
128
py
Python
examples/research_projects/movement-pruning/emmental/modules/__init__.py
liminghao1630/transformers
207594be81b8e5a8589c8b11c3b236924555d806
[ "Apache-2.0" ]
50,404
2019-09-26T09:55:55.000Z
2022-03-31T23:07:49.000Z
examples/research_projects/movement-pruning/emmental/modules/__init__.py
liminghao1630/transformers
207594be81b8e5a8589c8b11c3b236924555d806
[ "Apache-2.0" ]
13,179
2019-09-26T10:10:57.000Z
2022-03-31T23:17:08.000Z
examples/research_projects/movement-pruning/emmental/modules/__init__.py
liminghao1630/transformers
207594be81b8e5a8589c8b11c3b236924555d806
[ "Apache-2.0" ]
13,337
2019-09-26T10:49:38.000Z
2022-03-31T23:06:17.000Z
# flake8: noqa from .binarizer import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer from .masked_nn import MaskedLinear
32
76
0.851563
13
128
8.307692
0.846154
0
0
0
0
0
0
0
0
0
0
0.008696
0.101563
128
3
77
42.666667
0.930435
0.09375
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
c1fccd74394f42a186e46da08cd699c24f6bee25
294
py
Python
ex074corrigido.py
jgabriel1607/Python
d6b75519eb8f0d4fef944e1690ba8914d81a5d16
[ "MIT" ]
null
null
null
ex074corrigido.py
jgabriel1607/Python
d6b75519eb8f0d4fef944e1690ba8914d81a5d16
[ "MIT" ]
null
null
null
ex074corrigido.py
jgabriel1607/Python
d6b75519eb8f0d4fef944e1690ba8914d81a5d16
[ "MIT" ]
null
null
null
from random import randint numeros = (randint(0, 9), randint(0, 9), randint(0, 9), randint(0, 9), randint(0, 9)) print('Os valores digitados foram: ', end='') for n in numeros: print(f'{n}', end=' ') print(f'\nO maior número foi {max(numeros)}') print(f'O menor número foi {min(numeros)}')
36.75
85
0.653061
50
294
3.84
0.5
0.208333
0.234375
0.333333
0.234375
0.234375
0.234375
0.234375
0.234375
0.234375
0
0.039841
0.146259
294
7
86
42
0.7251
0
0
0
0
0
0.340136
0
0
0
0
0
0
1
0
false
0
0.142857
0
0.142857
0.571429
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
5
a9e86b1aea29c51c0a32f2f6490231ffa6b05fc4
26
py
Python
news/__init__.py
Untesler/New-s
bdc7f98e6abe783b3b304c351204a13432b3d287
[ "Apache-2.0" ]
null
null
null
news/__init__.py
Untesler/New-s
bdc7f98e6abe783b3b304c351204a13432b3d287
[ "Apache-2.0" ]
4
2020-03-16T05:18:42.000Z
2021-12-13T20:40:36.000Z
news/__init__.py
Untesler/New-s
bdc7f98e6abe783b3b304c351204a13432b3d287
[ "Apache-2.0" ]
1
2020-05-26T16:01:58.000Z
2020-05-26T16:01:58.000Z
from news.News import News
26
26
0.846154
5
26
4.4
0.6
0
0
0
0
0
0
0
0
0
0
0
0.115385
26
1
26
26
0.956522
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
a9ec3695c2438351bf0ee3df57ccd60f961a61ef
219
py
Python
main/views.py
falcondai/flask-html5boilerplate
e01c75ea63cc6c63145de65d102b8d4b6fbada2a
[ "MIT" ]
1
2018-01-08T03:19:34.000Z
2018-01-08T03:19:34.000Z
main/views.py
falcondai/flask-html5boilerplate
e01c75ea63cc6c63145de65d102b8d4b6fbada2a
[ "MIT" ]
null
null
null
main/views.py
falcondai/flask-html5boilerplate
e01c75ea63cc6c63145de65d102b8d4b6fbada2a
[ "MIT" ]
null
null
null
from flask import render_template from main import app @app.route('/') def index(): return render_template('index.html') @app.errorhandler(404) def page_not_found(e): return render_template('404.html'), 404
18.25
43
0.730594
32
219
4.84375
0.5625
0.270968
0.258065
0
0
0
0
0
0
0
0
0.047872
0.141553
219
11
44
19.909091
0.776596
0
0
0
0
0
0.086758
0
0
0
0
0
0
1
0.25
false
0
0.25
0.25
0.75
0
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
e703ea119314f5f8e08430f12a06fae2cd2cd4de
11
py
Python
data/studio21_generated/introductory/4718/starter_code.py
vijaykumawat256/Prompt-Summarization
614f5911e2acd2933440d909de2b4f86653dc214
[ "Apache-2.0" ]
null
null
null
data/studio21_generated/introductory/4718/starter_code.py
vijaykumawat256/Prompt-Summarization
614f5911e2acd2933440d909de2b4f86653dc214
[ "Apache-2.0" ]
null
null
null
data/studio21_generated/introductory/4718/starter_code.py
vijaykumawat256/Prompt-Summarization
614f5911e2acd2933440d909de2b4f86653dc214
[ "Apache-2.0" ]
null
null
null
def x(n):
5.5
9
0.454545
3
11
1.666667
1
0
0
0
0
0
0
0
0
0
0
0
0.272727
11
2
10
5.5
0.625
0
0
0
0
0
0
0
0
0
0
0
0
0
null
null
0
0
null
null
0
1
1
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
5
e782afe078eece835704068bf79ecfc7fa793182
26,845
py
Python
test/bnn_inference_tests.py
swagnercarena/ovejero
4f5d518bfa0806f86c7a7d187712e0e1362dc26a
[ "MIT" ]
4
2020-10-28T01:10:55.000Z
2020-10-28T11:44:32.000Z
test/bnn_inference_tests.py
swagnercarena/ovejero
4f5d518bfa0806f86c7a7d187712e0e1362dc26a
[ "MIT" ]
2
2020-10-28T04:28:17.000Z
2020-10-28T04:28:39.000Z
test/bnn_inference_tests.py
swagnercarena/ovejero
4f5d518bfa0806f86c7a7d187712e0e1362dc26a
[ "MIT" ]
2
2021-04-21T01:54:52.000Z
2021-06-18T09:35:34.000Z
import unittest, os, json from ovejero import bnn_inference, data_tools, bnn_alexnet, model_trainer import numpy as np import pandas as pd import tensorflow as tf import matplotlib.pyplot as plt import gc # Eliminate TF warning in tests os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' class BNNInferenceTest(unittest.TestCase): def setUp(self): # Open up the config file. self.root_path = os.path.dirname(os.path.abspath(__file__))+'/test_data/' with open(self.root_path+'test.json','r') as json_f: self.cfg = json.load(json_f) self.batch_size = self.cfg['training_params']['batch_size'] self.normalized_param_path = self.root_path + 'normed_metadata.csv' self.normalization_constants_path = self.root_path + 'norm.csv' self.lens_params_path = self.root_path + 'metadata.csv' self.lens_params = ['external_shear_gamma_ext','external_shear_psi_ext', 'lens_mass_center_x','lens_mass_center_y', 'lens_mass_e1','lens_mass_e2', 'lens_mass_gamma','lens_mass_theta_E'] self.num_params = len(self.lens_params) self.cfg['dataset_params']['normalization_constants_path'] = 'norm.csv' self.cfg['training_params']['final_params'] = self.lens_params self.cfg['training_params']['bnn_type'] = 'diag' self.tf_record_path = self.root_path+self.cfg['validation_params'][ 'tf_record_path'] # Simulate training self.final_params = ['external_shear_g1','external_shear_g2', 'lens_mass_center_x','lens_mass_center_y','lens_mass_e1', 'lens_mass_e2','lens_mass_gamma','lens_mass_theta_E_log'] model_trainer.prepare_tf_record(self.cfg, self.root_path, self.tf_record_path,self.final_params,'train') os.remove(self.tf_record_path) np.random.seed(2) tf.random.set_seed(2) def tearDown(self): # Cleanup for memory self.cfg = None tf.keras.backend.clear_session() gc.collect() def test_fix_flip_pairs(self): # Check that fix_flip_pairs always selects the best possible configuration # to return. self.infer_class = bnn_inference.InferenceClass(self.cfg, lite_class=True) # Delete the tf record file made during the initialization of the # inference class. os.remove(self.root_path+'tf_record_test_val') os.remove(self.root_path+'new_metadata.csv') # Get rid of the normalization file. os.remove(self.normalization_constants_path) # Get the set of all flip pairs we want to check flip_pairs = self.cfg['training_params']['flip_pairs'] flip_set = set() for flip_pair in flip_pairs: flip_set.update(flip_pair) y_test = np.ones((self.batch_size,self.num_params)) predict_samps = np.ones((10,self.batch_size,self.num_params)) pi = 0 for flip_index in flip_set: predict_samps[pi,:,flip_index] = -1 # Flip pairs of points. self.infer_class.fix_flip_pairs(predict_samps,y_test,self.batch_size) self.assertEqual(np.sum(np.abs(predict_samps-y_test)),0) dont_flip_set = set(range(self.num_params)) dont_flip_set=dont_flip_set.difference(flip_set) pi = 0 for flip_index in dont_flip_set: predict_samps[pi,:,flip_index] = -1 # Flip pairs of points. self.infer_class.fix_flip_pairs(predict_samps,y_test,self.batch_size) self.assertEqual(np.sum(np.abs(predict_samps-y_test)), 2*self.batch_size*len(dont_flip_set)) def test_undo_param_norm(self): # Test if normalizing the lens parameters works correctly. self.infer_class = bnn_inference.InferenceClass(self.cfg, lite_class=True) # Delete the tf record file made during the initialization of the # inference class. os.remove(self.root_path+'tf_record_test_val') os.remove(self.root_path+'new_metadata.csv') # Get rid of the normalization file. os.remove(self.normalization_constants_path) train_or_test='train' data_tools.normalize_lens_parameters(self.lens_params, self.lens_params_path,self.normalized_param_path, self.normalization_constants_path,train_or_test=train_or_test) lens_params_csv = pd.read_csv(self.lens_params_path, index_col=None) norm_params_csv = pd.read_csv(self.normalized_param_path, index_col=None) # Pull lens parameters out of the csv files. lens_params_numpy = [] norms_params_numpy = [] for lens_param in self.lens_params: lens_params_numpy.append(lens_params_csv[lens_param]) norms_params_numpy.append(norm_params_csv[lens_param]) lens_params_numpy = np.array(lens_params_numpy).T norms_params_numpy = np.array(norms_params_numpy).T predict_samps = np.tile(norms_params_numpy,(3,1,1)) # TODO: write a good test for al_samps! al_samps = np.ones((3,3,self.num_params,self.num_params)) # Try to denormalize everything self.infer_class.undo_param_norm(predict_samps,norms_params_numpy, al_samps) self.assertAlmostEqual(np.mean(np.abs(norms_params_numpy- lens_params_numpy)),0) self.assertAlmostEqual(np.mean(np.abs(predict_samps- lens_params_numpy)),0) # Clean up the file now that we're done os.remove(self.normalized_param_path) os.remove(self.normalization_constants_path) def test_gen_samples_diag(self): self.infer_class = bnn_inference.InferenceClass(self.cfg) # Delete the tf record file made during the initialization of the # inference class. os.remove(self.root_path+'tf_record_test_val') os.remove(self.root_path+'new_metadata.csv') # Get rid of the normalization file. os.remove(self.normalization_constants_path) # First we have to make a fake model whose statistics are very well # defined. class ToyModel(): def __init__(self,mean,covariance,batch_size,al_std): # We want to make sure our performance is consistent for a # test np.random.seed(4) self.mean=mean self.covariance = covariance self.batch_size = batch_size self.al_std = al_std def predict(self,image): # We won't actually be using the image. We just want it for # testing. return tf.constant(np.concatenate([np.random.multivariate_normal( self.mean,self.covariance,self.batch_size),np.zeros(( self.batch_size,len(self.mean)))+self.al_std],axis=-1), tf.float32) # Start with a simple covariance matrix example. mean = np.ones(self.num_params)*2 covariance = np.diag(np.ones(self.num_params)) al_std = -1000 diag_model = ToyModel(mean,covariance,self.batch_size,al_std) # We don't want any flipping going on self.infer_class.flip_mat_list = [np.diag(np.ones(self.num_params))] # Create tf record. This won't be used, but it has to be there for # the function to be able to pull some images. # Make fake norms data fake_norms = {} for lens_param in self.lens_params: fake_norms[lens_param] = np.array([0.0,1.0]) fake_norms = pd.DataFrame(data=fake_norms) fake_norms.to_csv(self.normalization_constants_path,index=False) data_tools.generate_tf_record(self.root_path,self.lens_params, self.lens_params_path,self.tf_record_path) # Replace the real model with our fake model and generate samples self.infer_class.model = diag_model self.infer_class.gen_samples(10000) # Make sure these samples follow the required statistics. self.assertAlmostEqual(np.mean(np.abs(self.infer_class.y_pred-mean)),0, places=1) self.assertAlmostEqual(np.mean(np.abs(self.infer_class.y_std-np.diag( covariance))),0,places=1) self.assertAlmostEqual(np.mean(np.abs(self.infer_class.y_cov-covariance)), 0,places=1) self.assertTupleEqual(self.infer_class.al_cov.shape,(self.batch_size, self.num_params,self.num_params)) self.assertAlmostEqual(np.mean(np.abs(self.infer_class.al_cov)),0) # Repeat this process again with a new covariance matrix and means mean = np.random.rand(self.num_params) covariance = np.random.rand(self.num_params,self.num_params) al_std = 0 # Make sure covariance is positive semidefinite covariance = np.dot(covariance,covariance.T) diag_model = ToyModel(mean,covariance,self.batch_size,al_std) self.infer_class.model = diag_model self.infer_class.gen_samples(10000) self.assertAlmostEqual(np.mean(np.abs(self.infer_class.y_pred-mean)),0, places=1) # Covariance is the sum of two random variables covariance = covariance+np.eye(self.num_params) self.assertAlmostEqual(np.mean(np.abs(self.infer_class.y_std-np.sqrt( np.diag(covariance)))),0,places=1) self.assertAlmostEqual(np.mean(np.abs(self.infer_class.y_cov-covariance)), 0,places=1) self.assertTupleEqual(self.infer_class.al_cov.shape,(self.batch_size, self.num_params,self.num_params)) self.assertAlmostEqual(np.mean(np.abs(self.infer_class.al_cov- np.eye(self.num_params))),0) # Make sure our test probes things well. wrong_mean = np.random.randn(self.num_params) wrong_covariance = np.random.rand(self.num_params,self.num_params) al_std = -1000 # Make sure covariance is positive semidefinite wrong_covariance = np.dot(wrong_covariance,wrong_covariance.T) diag_model = ToyModel(wrong_mean,wrong_covariance,self.batch_size, al_std) self.infer_class.model = diag_model self.infer_class.gen_samples(10000) self.assertGreater(np.mean(np.abs(self.infer_class.y_pred-mean)),0.05) self.assertGreater(np.mean(np.abs(self.infer_class.y_std-np.sqrt( np.diag(covariance)))),0.05) self.assertGreater(np.mean(np.abs(self.infer_class.y_cov-covariance)), 0.05) self.assertTupleEqual(self.infer_class.al_cov.shape,(self.batch_size, self.num_params,self.num_params)) self.assertAlmostEqual(np.mean(np.abs(self.infer_class.al_cov)),0) # Clean up the files we generated os.remove(self.normalization_constants_path) os.remove(self.tf_record_path) def test_gen_samples_full(self): self.infer_class = bnn_inference.InferenceClass(self.cfg) # Delete the tf record file made during the initialization of the # inference class. os.remove(self.root_path+'tf_record_test_val') os.remove(self.root_path+'new_metadata.csv') # Get rid of the normalization file. os.remove(self.normalization_constants_path) # First we have to make a fake model whose statistics are very well # defined. class ToyModel(): def __init__(self,mean,covariance,batch_size,L_elements): # We want to make sure our performance is consistent for a # test np.random.seed(6) self.mean=mean self.num_params = len(mean) self.covariance = covariance self.batch_size = batch_size self.L_elements = L_elements self.L_elements_len = int(self.num_params*(self.num_params+1)/2) def predict(self,image): # We won't actually be using the image. We just want it for # testing. return tf.constant(np.concatenate([np.zeros(( self.batch_size,self.num_params))+self.mean,np.zeros(( self.batch_size,self.L_elements_len))+self.L_elements], axis=-1),tf.float32) # Start with a simple covariance matrix example. mean = np.ones(self.num_params)*2 covariance = np.diag(np.ones(self.num_params)*0.000001) L_elements = np.array([np.log(1)]*self.num_params+[0]*int( self.num_params*(self.num_params-1)/2)) full_model = ToyModel(mean,covariance,self.batch_size,L_elements) # We don't want any flipping going on self.infer_class.flip_mat_list = [np.diag(np.ones(self.num_params))] # Create tf record. This won't be used, but it has to be there for # the function to be able to pull some images. # Make fake norms data fake_norms = {} for lens_param in self.lens_params: fake_norms[lens_param] = np.array([0.0,1.0]) fake_norms = pd.DataFrame(data=fake_norms) fake_norms.to_csv(self.normalization_constants_path,index=False) data_tools.generate_tf_record(self.root_path,self.lens_params, self.lens_params_path,self.tf_record_path) # Replace the real model with our fake model and generate samples self.infer_class.model = full_model self.infer_class.bnn_type = 'full' # self.infer_class.gen_samples(1000) # # Make sure these samples follow the required statistics. # self.assertAlmostEqual(np.mean(np.abs(self.infer_class.y_pred-mean)), # 0,places=1) # self.assertAlmostEqual(np.mean(np.abs(self.infer_class.y_std-1)),0, # places=1) # self.assertAlmostEqual(np.mean(np.abs(self.infer_class.y_cov-np.eye( # self.num_params))),0,places=1) # self.assertTupleEqual(self.infer_class.al_cov.shape,(self.batch_size, # self.num_params,self.num_params)) # self.assertAlmostEqual(np.mean(np.abs(self.infer_class.al_cov-np.eye( # self.num_params))),0) mean = np.zeros(self.num_params) loss_class = bnn_alexnet.LensingLossFunctions([],self.num_params) L_elements = np.ones((1,len(L_elements)))*0.2 full_model = ToyModel(mean,covariance,self.batch_size,L_elements) self.infer_class.model = full_model self.infer_class.gen_samples(1000) # Calculate the corresponding covariance matrix _, _, L_mat = loss_class.construct_precision_matrix( tf.constant(L_elements)) L_mat = np.linalg.inv(L_mat.numpy()[0].T) cov_mat = np.dot(L_mat,L_mat.T) # Make sure these samples follow the required statistics. self.assertAlmostEqual(np.mean(np.abs(self.infer_class.y_pred-mean)),0, places=1) self.assertAlmostEqual(np.mean(np.abs(self.infer_class.y_std-np.sqrt( np.diag(cov_mat)))),0,places=1) self.assertAlmostEqual(np.mean(np.abs((self.infer_class.y_cov-cov_mat))), 0,places=1) self.assertTupleEqual(self.infer_class.al_cov.shape,(self.batch_size, self.num_params,self.num_params)) self.assertAlmostEqual(np.mean(np.abs(self.infer_class.al_cov-cov_mat)), 0) # Clean up the files we generated os.remove(self.normalization_constants_path) os.remove(self.tf_record_path) def test_gen_samples_gmm(self): self.infer_class = bnn_inference.InferenceClass(self.cfg) # Delete the tf record file made during the initialization of the # inference class. os.remove(self.root_path+'tf_record_test_val') os.remove(self.root_path+'new_metadata.csv') # Get rid of the normalization file. os.remove(self.normalization_constants_path) # First we have to make a fake model whose statistics are very well # defined. class ToyModel(): def __init__(self,mean1,covariance1,mean2,covariance2,batch_size, L_elements1,L_elements2,pi_logit): # We want to make sure our performance is consistent for a # test np.random.seed(6) self.mean1=mean1 self.mean2=mean2 self.covariance1=covariance1 self.covariance2=covariance2 self.num_params = len(mean1) self.batch_size = batch_size self.L_elements1 = L_elements1 self.L_elements2 = L_elements2 self.pi_logit = pi_logit self.L_elements_len = int(self.num_params*(self.num_params+1)/2) def predict(self,image): # We won't actually be using the image. We just want it for # testing. return tf.constant(np.concatenate([ np.random.multivariate_normal(self.mean1,self.covariance1, self.batch_size), np.zeros(( self.batch_size,self.L_elements_len))+self.L_elements1, np.random.multivariate_normal(self.mean2,self.covariance2, self.batch_size), np.zeros(( self.batch_size,self.L_elements_len))+self.L_elements2, np.zeros( (self.batch_size,1))+self.pi_logit],axis=-1),tf.float32) # Start with a simple covariance matrix example where both gmms # are the same. This is just checking the base case. mean1 = np.ones(self.num_params)*2 mean2 = np.ones(self.num_params)*2 covariance1 = np.diag(np.ones(self.num_params)*0.000001) covariance2 = np.diag(np.ones(self.num_params)*0.000001) L_elements1 = np.array([np.log(1)]*self.num_params+[0]*int( self.num_params*(self.num_params-1)/2)) L_elements2 = np.array([np.log(1)]*self.num_params+[0]*int( self.num_params*(self.num_params-1)/2)) pi_logit = 0 gmm_model = ToyModel(mean1,covariance1,mean2,covariance2, self.batch_size,L_elements1,L_elements2,pi_logit) # We don't want any flipping going on self.infer_class.flip_mat_list = [np.diag(np.ones(self.num_params))] # Create tf record. This won't be used, but it has to be there for # the function to be able to pull some images. # Make fake norms data fake_norms = {} for lens_param in self.lens_params: fake_norms[lens_param] = np.array([0.0,1.0]) fake_norms = pd.DataFrame(data=fake_norms) fake_norms.to_csv(self.normalization_constants_path,index=False) data_tools.generate_tf_record(self.root_path,self.lens_params, self.lens_params_path,self.tf_record_path) # Replace the real model with our fake model and generate samples self.infer_class.model = gmm_model self.infer_class.bnn_type = 'gmm' self.infer_class.gen_samples(1000) # Make sure these samples follow the required statistics. self.assertAlmostEqual(np.mean(np.abs(self.infer_class.y_pred-mean1)), 0,places=1) self.assertAlmostEqual(np.mean(np.abs(self.infer_class.y_std-1)),0, places=1) self.assertAlmostEqual(np.mean(np.abs(self.infer_class.y_cov-np.eye( self.num_params))),0,places=1) self.assertTupleEqual(self.infer_class.al_cov.shape,(self.batch_size, self.num_params,self.num_params)) self.assertAlmostEqual(np.mean(np.abs(self.infer_class.al_cov-np.eye( self.num_params))),0) # Now we try and example where all the samples should be drawn from one # of the two gmms because of the logit. mean1 = np.ones(self.num_params)*2 mean2 = np.ones(self.num_params)*200 covariance1 = np.diag(np.ones(self.num_params)*0.000001) covariance2 = np.diag(np.ones(self.num_params)*0.000001) L_elements1 = np.array([np.log(1)]*self.num_params+[0]*int( self.num_params*(self.num_params-1)/2)) L_elements2 = np.array([np.log(10)]*self.num_params+[0]*int( self.num_params*(self.num_params-1)/2)) pi_logit = np.log(0.99999)-np.log(0.00001) gmm_model = ToyModel(mean1,covariance1,mean2,covariance2, self.batch_size,L_elements1,L_elements2,pi_logit) self.infer_class.model = gmm_model self.infer_class.gen_samples(1000) # Make sure these samples follow the required statistics. self.assertAlmostEqual(np.mean(np.abs(self.infer_class.y_pred-mean1)), 0,places=1) self.assertAlmostEqual(np.mean(np.abs(self.infer_class.y_std-1)),0, places=1) self.assertAlmostEqual(np.mean(np.abs(self.infer_class.y_cov-np.eye( self.num_params))),0,places=1) self.assertTupleEqual(self.infer_class.al_cov.shape,(self.batch_size, self.num_params,self.num_params)) self.assertAlmostEqual(np.mean(np.abs(self.infer_class.al_cov-np.eye( self.num_params))),0) # Now test that it takes a combination of them correctly mean1 = np.ones(self.num_params)*2 mean2 = np.ones(self.num_params)*6 covariance1 = np.diag(np.ones(self.num_params)*0.000001) covariance2 = np.diag(np.ones(self.num_params)*0.000001) L_elements1 = np.array([np.log(10)]*self.num_params+[0]*int( self.num_params*(self.num_params-1)/2)) L_elements2 = np.array([np.log(1)]*self.num_params+[0]*int( self.num_params*(self.num_params-1)/2)) pi_logit = np.log(0.0001)-np.log(0.9999) gmm_model = ToyModel(mean1,covariance1,mean2,covariance2, self.batch_size,L_elements1,L_elements2,pi_logit) self.infer_class.model = gmm_model self.infer_class.gen_samples(2000) # Make sure these samples follow the required statistics. self.assertAlmostEqual(np.mean(np.abs(self.infer_class.y_pred-4)), 0,places=1) self.assertAlmostEqual(np.mean(np.abs(self.infer_class.y_std-np.sqrt(5))), 0,places=0) self.assertTupleEqual(self.infer_class.al_cov.shape,(self.batch_size, self.num_params,self.num_params)) # The first Gaussian is always favored in the current parameterization, # so we can't test the scenario where the second is favored. # Clean up the files we generated os.remove(self.normalization_constants_path) os.remove(self.tf_record_path) def test_gen_samples_save(self): self.infer_class = bnn_inference.InferenceClass(self.cfg) # Delete the tf record file made during the initialization of the # inference class. os.remove(self.root_path+'tf_record_test_val') os.remove(self.root_path+'new_metadata.csv') # Get rid of the normalization file. os.remove(self.normalization_constants_path) # First we have to make a fake model whose statistics are very well # defined. class ToyModel(): def __init__(self,mean,covariance,batch_size,al_std): # We want to make sure our performance is consistent for a # test np.random.seed(4) self.mean=mean self.covariance = covariance self.batch_size = batch_size self.al_std = al_std def predict(self,image): # We won't actually be using the image. We just want it for # testing. return tf.constant(np.concatenate([np.random.multivariate_normal( self.mean,self.covariance,self.batch_size),np.zeros(( self.batch_size,len(self.mean)))+self.al_std],axis=-1), tf.float32) # Start with a simple covariance matrix example. mean = np.ones(self.num_params)*2 covariance = np.diag(np.ones(self.num_params)) al_std = -1000 diag_model = ToyModel(mean,covariance,self.batch_size,al_std) # We don't want any flipping going on self.infer_class.flip_mat_list = [np.diag(np.ones(self.num_params))] # Create tf record. This won't be used, but it has to be there for # the function to be able to pull some images. # Make fake norms data fake_norms = {} for lens_param in self.lens_params: fake_norms[lens_param] = np.array([0.0,1.0]) fake_norms = pd.DataFrame(data=fake_norms) fake_norms.to_csv(self.normalization_constants_path,index=False) data_tools.generate_tf_record(self.root_path,self.lens_params, self.lens_params_path,self.tf_record_path) # Replace the real model with our fake model and generate samples self.infer_class.model = diag_model # Provide a save path to then check that we get the same data save_path = self.root_path + 'test_gen_samps/' self.infer_class.gen_samples(10000,save_path) pred_1 = np.copy(self.infer_class.predict_samps) # Generate again and make sure they are equivalent self.infer_class.gen_samples(10000,save_path) np.testing.assert_almost_equal(pred_1,self.infer_class.predict_samps) # Test that none of the plotting routines break self.infer_class.gen_coverage_plots(block=False) plt.close('all') self.infer_class.report_stats() self.infer_class.plot_posterior_contours(1,block=False) plt.close('all') plt.close('all') self.infer_class.comp_al_ep_unc(block=False) plt.close('all') self.infer_class.comp_al_ep_unc(block=False,norm_diagonal=False) plt.close('all') self.infer_class.plot_calibration(block=False,title='test') plt.close('all') # Clean up the files we generated os.remove(self.normalization_constants_path) os.remove(self.tf_record_path) os.remove(save_path+'pred.npy') os.remove(save_path+'al_cov.npy') os.remove(save_path+'images.npy') os.remove(save_path+'y_test.npy') os.rmdir(save_path) def test_calc_p_dlt(self): self.infer_class = bnn_inference.InferenceClass(self.cfg, lite_class=True) # Delete the tf record file made during the initialization of the # inference class. os.remove(self.root_path+'tf_record_test_val') os.remove(self.root_path+'new_metadata.csv') # Get rid of the normalization file. os.remove(self.normalization_constants_path) # Test that the calc_p_dlt returns the correct percentages for some # toy examples # Check a simple case size = int(1e6) self.infer_class.predict_samps = np.random.normal(size=size*2).reshape( (size//10,10,2)) self.infer_class.predict_samps[:,:,1]=0 self.infer_class.y_pred = np.mean(self.infer_class.predict_samps,axis=0) self.infer_class.y_test = np.array([[1,2,3,4,5,6,7,8,9,10], [0,0,0,0,0,0,0,0,0,0]],dtype=np.float32).T self.infer_class.calc_p_dlt(cov_emp=np.diag(np.ones(2))) percentages = [0.682689,0.954499,0.997300,0.999936,0.999999]+[1.0]*5 for p_i in range(len(percentages)): self.assertAlmostEqual(percentages[p_i],self.infer_class.p_dlt[p_i], places=2) # Shift the mean size = int(1e6) self.infer_class.predict_samps = np.random.normal(loc=2, size=size*2).reshape((size//10,10,2)) self.infer_class.predict_samps[:,:,1]=0 self.infer_class.y_pred = np.mean(self.infer_class.predict_samps,axis=0) self.infer_class.y_test = np.array([[1,2,3,4,5,6,7,8,9,10], [0,0,0,0,0,0,0,0,0,0]],dtype=np.float32).T self.infer_class.calc_p_dlt(cov_emp=np.diag(np.ones(2))) percentages = [0.682689,0,0.682689,0.954499,0.997300,0.999936]+[1.0]*4 for p_i in range(len(percentages)): self.assertAlmostEqual(percentages[p_i],self.infer_class.p_dlt[p_i], places=2) # Expand to higher dimensions size = int(1e6) self.infer_class.predict_samps = np.random.normal(loc=0, size=size*2).reshape((size//10,10,2)) self.infer_class.predict_samps /= np.sqrt(np.sum(np.square( self.infer_class.predict_samps),axis=-1,keepdims=True)) self.infer_class.predict_samps *= np.random.random(size=size).reshape(( size//10,10,1))*5 self.infer_class.y_pred = np.mean(self.infer_class.predict_samps,axis=0) self.infer_class.y_test = np.array([[1,2,3,4,5,6,7,8,9,10],[0]*10]).T self.infer_class.calc_p_dlt(cov_emp=np.diag(np.ones(2))) percentages = [1/5,2/5,3/5,4/5,1,1]+[1.0]*4 for p_i in range(len(percentages)): self.assertAlmostEqual(percentages[p_i],self.infer_class.p_dlt[p_i], places=2) # Expand to higher dimensions size = int(1e6) self.infer_class.predict_samps = np.random.normal(loc=0, size=size*2).reshape((size//2,2,2))*5 self.infer_class.predict_samps[:,:,1]=0 self.infer_class.y_pred = np.mean(self.infer_class.predict_samps,axis=0) self.infer_class.y_test = np.array([[0,np.sqrt(2)],[0]*2]).T self.infer_class.calc_p_dlt() percentages = [0,0.223356] for p_i in range(len(percentages)): self.assertAlmostEqual(percentages[p_i],self.infer_class.p_dlt[p_i], places=2) def test_specify_test_set_path(self): # Pass a specific test_set_path to the inference class and make sure # it behaves as expected. test_set_path = self.root_path # Check that the file doesn't already exist. self.assertFalse(os.path.isfile(test_set_path+'tf_record_test_val')) # We will again have to simulate training so that the desired # normalization path exists. model_trainer.prepare_tf_record(self.cfg, self.root_path, self.tf_record_path,self.final_params,'train') os.remove(self.tf_record_path) _ = bnn_inference.InferenceClass(self.cfg, test_set_path=test_set_path,lite_class=True) # Check that a new tf_record was generated self.assertTrue(os.path.isfile(test_set_path+'tf_record_test_val')) # Check that passing a fake test_set_path raises an error. fake_test_path = self.root_path+'fake_data' os.mkdir(fake_test_path) with self.assertRaises(FileNotFoundError): _ = bnn_inference.InferenceClass(self.cfg, test_set_path=fake_test_path,lite_class=True) # Test cleanup os.rmdir(fake_test_path) os.remove(test_set_path+'tf_record_test_val') os.remove(self.root_path+'new_metadata.csv') os.remove(self.normalization_constants_path)
38.905797
76
0.750494
4,448
26,845
4.310926
0.092176
0.052099
0.081043
0.018357
0.779609
0.756767
0.732308
0.72219
0.704302
0.697679
0
0.024896
0.127696
26,845
689
77
38.962264
0.793953
0.203986
0
0.594771
0
0
0.044645
0.004474
0
0
0
0.001451
0.098039
1
0.039216
false
0
0.015251
0.008715
0.074074
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
e79374449dd8b4f58aa0a50c2d3d3aa9c54fe352
44
py
Python
python/testData/intentions/PyAnnotateVariableTypeIntentionTest/annotationEmptyTupleType_after.py
truthiswill/intellij-community
fff88cfb0dc168eea18ecb745d3e5b93f57b0b95
[ "Apache-2.0" ]
2
2019-04-28T07:48:50.000Z
2020-12-11T14:18:08.000Z
python/testData/intentions/PyAnnotateVariableTypeIntentionTest/annotationEmptyTupleType_after.py
truthiswill/intellij-community
fff88cfb0dc168eea18ecb745d3e5b93f57b0b95
[ "Apache-2.0" ]
173
2018-07-05T13:59:39.000Z
2018-08-09T01:12:03.000Z
python/testData/intentions/PyAnnotateVariableTypeIntentionTest/annotationEmptyTupleType_after.py
truthiswill/intellij-community
fff88cfb0dc168eea18ecb745d3e5b93f57b0b95
[ "Apache-2.0" ]
2
2020-03-15T08:57:37.000Z
2020-04-07T04:48:14.000Z
from typing import Tuple var: [Tuple] = ()
11
24
0.659091
6
44
4.833333
0.833333
0
0
0
0
0
0
0
0
0
0
0
0.204545
44
3
25
14.666667
0.828571
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
e7b9c46eb8c4dcfde5398209806dab8c997c3ebb
94
py
Python
papers/errors.py
Squirtle692/Oh-my-papers
dba279ff4fcb22028b5f4290eb437dd4a87d4a2f
[ "MIT" ]
null
null
null
papers/errors.py
Squirtle692/Oh-my-papers
dba279ff4fcb22028b5f4290eb437dd4a87d4a2f
[ "MIT" ]
null
null
null
papers/errors.py
Squirtle692/Oh-my-papers
dba279ff4fcb22028b5f4290eb437dd4a87d4a2f
[ "MIT" ]
null
null
null
class RequestFailedError(Exception): pass class DOIFormatIncorrect(Exception): pass
13.428571
36
0.765957
8
94
9
0.625
0.361111
0
0
0
0
0
0
0
0
0
0
0.170213
94
6
37
15.666667
0.923077
0
0
0.5
0
0
0
0
0
0
0
0
0
1
0
true
0.5
0
0
0.5
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
0
0
0
5
e7d028e85c5953b52a42766cdf0e2f21b0e0f897
167
py
Python
gitkit/util/cli.py
akx/git-kit
54948b57f201adecc810c4895b6712c1c8265cf3
[ "MIT" ]
3
2017-02-16T09:04:09.000Z
2021-05-03T08:25:52.000Z
gitkit/util/cli.py
akx/git-kit
54948b57f201adecc810c4895b6712c1c8265cf3
[ "MIT" ]
2
2017-02-16T08:54:15.000Z
2017-02-16T09:09:41.000Z
gitkit/util/cli.py
akx/git-kit
54948b57f201adecc810c4895b6712c1c8265cf3
[ "MIT" ]
1
2022-02-07T09:07:39.000Z
2022-02-07T09:07:39.000Z
import sys import click def yorn(prompt, default=False): return click.confirm(prompt, default=default) def croak(message): print(message) sys.exit(1)
12.846154
49
0.706587
23
167
5.130435
0.652174
0.220339
0
0
0
0
0
0
0
0
0
0.007353
0.185629
167
12
50
13.916667
0.860294
0
0
0
0
0
0
0
0
0
0
0
0
1
0.285714
false
0
0.285714
0.142857
0.714286
0.142857
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
99f99e78a5fab4dc6fc3687c3e7b1d2ffbd2c8c5
19
py
Python
Class04/test3.py
BinHan-Code/PythonNetClass
c63e89c74407e4f1706e163c90e9d117149561c9
[ "Apache-2.0" ]
null
null
null
Class04/test3.py
BinHan-Code/PythonNetClass
c63e89c74407e4f1706e163c90e9d117149561c9
[ "Apache-2.0" ]
null
null
null
Class04/test3.py
BinHan-Code/PythonNetClass
c63e89c74407e4f1706e163c90e9d117149561c9
[ "Apache-2.0" ]
null
null
null
print (__name__)
4.75
16
0.684211
2
19
4.5
1
0
0
0
0
0
0
0
0
0
0
0
0.210526
19
3
17
6.333333
0.6
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0
0
0
1
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
5
822fefbdd32714c0ae7b67ca39be8b52f9954de5
37
py
Python
src/ilp/data/__init__.py
mali-git/hyper_relational_ilp
6db58acc3efa410bc1860f601b0e294ab555579a
[ "MIT" ]
4
2021-07-08T13:13:11.000Z
2021-10-02T20:34:58.000Z
src/ilp/data/__init__.py
mali-git/hyper_relational_ilp
6db58acc3efa410bc1860f601b0e294ab555579a
[ "MIT" ]
1
2021-12-10T10:40:16.000Z
2021-12-10T10:41:32.000Z
src/ilp/data/__init__.py
mali-git/hyper_relational_ilp
6db58acc3efa410bc1860f601b0e294ab555579a
[ "MIT" ]
3
2021-12-03T00:17:27.000Z
2022-03-08T09:10:13.000Z
"""Data loading and data methods."""
18.5
36
0.675676
5
37
5
0.8
0
0
0
0
0
0
0
0
0
0
0
0.135135
37
1
37
37
0.78125
0.810811
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
4146227ecff2fe5369980d7699bfe819fdb65a96
160
py
Python
src/backlogapiprocessmodule/__init__.py
tys-hiroshi/BacklogProcessing
3ca95242045fb867295cfc2f363ee6a980bd8dc9
[ "MIT" ]
null
null
null
src/backlogapiprocessmodule/__init__.py
tys-hiroshi/BacklogProcessing
3ca95242045fb867295cfc2f363ee6a980bd8dc9
[ "MIT" ]
null
null
null
src/backlogapiprocessmodule/__init__.py
tys-hiroshi/BacklogProcessing
3ca95242045fb867295cfc2f363ee6a980bd8dc9
[ "MIT" ]
null
null
null
import azure.functions as func from backlogapiprocessmodule import backlogapiprocess def main(mytimer: func.TimerRequest) -> None: backlogapiprocess.run()
26.666667
53
0.8125
17
160
7.647059
0.823529
0
0
0
0
0
0
0
0
0
0
0
0.11875
160
5
54
32
0.921986
0
0
0
0
0
0
0
0
0
0
0
0
1
0.25
false
0
0.5
0
0.75
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
1
0
1
0
0
5
419481a1f6b92fe608264a653db6c4d0567af333
11,498
py
Python
tests/test_validation.py
codingedward/book-a-meal-api
36756abc225bf7e8306330f2c3e223dc32af7869
[ "MIT" ]
null
null
null
tests/test_validation.py
codingedward/book-a-meal-api
36756abc225bf7e8306330f2c3e223dc32af7869
[ "MIT" ]
null
null
null
tests/test_validation.py
codingedward/book-a-meal-api
36756abc225bf7e8306330f2c3e223dc32af7869
[ "MIT" ]
2
2018-10-01T17:45:19.000Z
2020-12-07T13:48:25.000Z
import json import unittest from app.validation.validator import Validator class TestValidator(unittest.TestCase): def setUp(self): self.V = Validator() def test_accepted(self): V = self.V V.set_rules({'field': 'accepted'}) V.set_request({'field': '0'}) self.assertTrue(V.fails()) err_str = str(V.errors()) self.assertIn('accepted', err_str) for val in [1, '1', True, 'true', 'yes']: V.set_request({'field': val}) self.assertTrue(V.passes()) def test_after(self): V = self.V V.set_rules({'field': 'after:2008-01-10'}) V.set_request({'field': '2002-02-10'}) self.assertTrue(V.fails()) err_str = str(V.errors()) self.assertIn('after', err_str) V.set_request({'field': '2009-01-10'}) self.assertTrue(V.passes()) def test_alpha(self): V = self.V V.set_rules({'field': 'alpha'}) V.set_request({'field': '123 abc'}) self.assertTrue(V.fails()) err_str = str(V.errors()) self.assertIn('letters', err_str) V.set_request({'field': 'abc def'}) self.assertTrue(V.passes()) def test_alpha_dash(self): V = self.V V.set_rules({'field': 'alpha_dash'}) V.set_request({'field': '123 abc --- ###'}) self.assertTrue(V.fails()) err_str = str(V.errors()) self.assertIn('dashes', err_str) self.assertIn('letters', err_str) V.set_request({'field': '123 abc ---'}) self.assertTrue(V.passes()) def test_alpha_num(self): V = self.V V.set_rules({'field': 'alpha_num'}) V.set_request({'field': '1234 abc --'}) self.assertTrue(V.fails()) err_str = str(V.errors()) self.assertIn('numbers', err_str) self.assertIn('letters', err_str) V.set_request({'field': '1234 hi there'}) self.assertTrue(V.passes()) def test_before(self): V = self.V V.set_rules({'field': 'before:2008-01-10'}) V.set_request({'field': '2012-02-10'}) self.assertTrue(V.fails()) err_str = str(V.errors()) self.assertIn('before', err_str) V.set_request({'field': '2007-01-10'}) self.assertTrue(V.passes()) def test_between_numeric(self): V = self.V V.set_rules({'field': 'between_numeric:0,100'}) V.set_request({'field': 123}) self.assertTrue(V.fails()) err_str = str(V.errors()) self.assertIn('between', err_str) V.set_request({'field': 50}) self.assertTrue(V.passes()) def test_between_string(self): V = self.V V.set_rules({'field': 'between_string:0,10'}) V.set_request({'field': 'xxxxxxxxxxxxx'}) self.assertTrue(V.fails()) err_str = str(V.errors()) self.assertIn('between', err_str) self.assertIn('characters', err_str) V.set_request({'field': 'xxxx'}) self.assertTrue(V.passes()) def test_boolean(self): V = self.V V.set_rules({'field': 'boolean'}) V.set_request({'field': 'hi'}) self.assertTrue(V.fails()) err_str = str(V.errors()) self.assertIn('true or false', err_str) for val in [1, '1', True, 'true', 0, '0', False, 'false' ]: V.set_request({'field': val}) self.assertTrue(V.passes()) def test_confirmed(self): V = self.V V.set_rules({'field': 'confirmed'}) V.set_request({'field': 'hi'}) self.assertTrue(V.fails()) err_str = str(V.errors()) self.assertIn('confirmation', err_str) V.set_request({'field': 'hi', 'field_confirmation': 'hi there'}) self.assertTrue(V.fails()) err_str = str(V.errors()) self.assertIn('confirmation', err_str) V.set_request({'field': 'hi', 'field_confirmation': 'hi'}) self.assertTrue(V.passes()) def test_date(self): V = self.V V.set_rules({'field': 'date'}) V.set_request({'field': 'hi'}) self.assertTrue(V.fails()) err_str = str(V.errors()) self.assertIn('date', err_str) V.set_request({'field': '2018-02-12'}) self.assertTrue(V.passes()) def test_different(self): V = self.V V.set_rules({'field': 'different:field2'}) V.set_request({'field': 'hi', 'field2': 'hi'}) self.assertTrue(V.fails()) err_str = str(V.errors()) self.assertIn('different', err_str) V.set_request({'field': 'hi', 'field2': 'there'}) self.assertTrue(V.passes()) def test_digits(self): V = self.V V.set_rules({'field': 'digits:5'}) V.set_request({'field': 1.032}) self.assertTrue(V.fails()) err_str = str(V.errors()) self.assertIn('digits', err_str) V.set_request({'field': 1.0245}) self.assertTrue(V.passes()) def test_email(self): V = self.V V.set_rules({'field': 'email'}) V.set_request({'field': 'user@mail'}) self.assertTrue(V.fails()) err_str = str(V.errors()) self.assertIn('email', err_str) V.set_request({'field': 'user@mail.com'}) self.assertTrue(V.passes()) def test_found_in(self): V = self.V V.set_rules({'field': 'found_in:male,female'}) V.set_request({'field': 'hi'}) self.assertTrue(V.fails()) err_str = str(V.errors()) self.assertIn('invalid', err_str) V.set_request({'field': 'male'}) self.assertTrue(V.passes()) def test_integer(self): V = self.V V.set_rules({'field': 'integer'}) V.set_request({'field': 'hi'}) self.assertTrue(V.fails()) err_str = str(V.errors()) self.assertIn('integer', err_str) V.set_request({'field': 10}) self.assertTrue(V.passes()) def test_json(self): V = self.V V.set_rules({'field': 'json'}) V.set_request({'field': '{hi man}'}) self.assertTrue(V.fails()) err_str = str(V.errors()) self.assertIn('json', err_str) V.set_request({'field': json.dumps({'hi': 'there'})}) self.assertTrue(V.passes()) def test_most_numeric(self): V = self.V V.set_rules({'field': 'most_numeric:30'}) V.set_request({'field': 309}) self.assertTrue(V.fails()) err_str = str(V.errors()) self.assertIn('not be greater than', err_str) V.set_request({'field': 20}) self.assertTrue(V.passes()) def test_most_string(self): V = self.V V.set_rules({'field': 'most_string:10'}) V.set_request({'field': 'xxxxxxxxxxxxx'}) self.assertTrue(V.fails()) err_str = str(V.errors()) self.assertIn('not be greater than', err_str) V.set_request({'field': 'xxxx'}) self.assertTrue(V.passes()) def test_least_numeric(self): V = self.V V.set_rules({'field': 'least_numeric:30'}) V.set_request({'field': 20}) self.assertTrue(V.fails()) err_str = str(V.errors()) self.assertIn('least', err_str) V.set_request({'field': 200}) self.assertTrue(V.passes()) def test_least_string(self): V = self.V V.set_rules({'field': 'least_string:10'}) V.set_request({'field': 'xxx'}) self.assertTrue(V.fails()) err_str = str(V.errors()) self.assertIn('least', err_str) V.set_request({'field': 'xxxxxxxxxxxx'}) self.assertTrue(V.passes()) def test_numeric(self): V = self.V V.set_rules({'field': 'numeric'}) V.set_request({'field': 'hi'}) self.assertTrue(V.fails()) err_str = str(V.errors()) self.assertIn('number', err_str) V.set_request({'field': 10.0}) self.assertTrue(V.passes()) V.set_request({'field': -10}) self.assertTrue(V.passes()) def test_not_in(self): V = self.V V.set_rules({'field': 'not_in:xyz,abc'}) V.set_request({'field': 'abc'}) self.assertTrue(V.fails()) err_str = str(V.errors()) self.assertIn('invalid', err_str) V.set_request({'field': 'def'}) self.assertTrue(V.passes()) def test_regex(self): V = self.V V.set_rules({'field': 'regex:^\d+'}) V.set_request({'field': 'test'}) self.assertTrue(V.fails()) err_str = str(V.errors()) self.assertIn('format', err_str) V.set_request({'field': '123'}) self.assertTrue(V.passes()) def test_required(self): V = self.V V.set_rules({'field2': 'required'}) V.set_request({'field': 'test'}) self.assertTrue(V.fails()) err_str = str(V.errors()) self.assertIn('required', err_str) V.set_request({'field2': '123'}) self.assertTrue(V.passes()) def test_required_with(self): V = self.V V.set_rules({'field2': 'required_with:field1'}) V.set_request({'field2': 'test'}) self.assertTrue(V.fails()) err_str = str(V.errors()) self.assertIn('required when', err_str) V.set_request({'field2': '123', 'field1': 'hi'}) self.assertTrue(V.passes()) def test_required_without(self): V = self.V V.set_rules({'field2': 'required_without:field1'}) V.set_request({'field3': 'test'}) self.assertTrue(V.fails()) err_str = str(V.errors()) self.assertIn('required when', err_str) V.set_request({'field1': '123'}) self.assertTrue(V.passes()) V.set_request({'field2': '123'}) self.assertTrue(V.passes()) def test_same(self): V = self.V V.set_rules({'field2': 'same:field1'}) V.set_request({'field1': 'test', 'field2': 'different'}) self.assertTrue(V.fails()) err_str = str(V.errors()) self.assertIn('match', err_str) V.set_request({'field1': '123', 'field2': '123'}) self.assertTrue(V.passes()) def test_size_numeric(self): V = self.V V.set_rules({'field': 'size_numeric:30'}) V.set_request({'field': 309}) self.assertTrue(V.fails()) err_str = str(V.errors()) self.assertIn('must be', err_str) V.set_request({'field': 30}) self.assertTrue(V.passes()) def test_size_string(self): V = self.V V.set_rules({'field': 'size_string:5'}) V.set_request({'field': 'xxx'}) self.assertTrue(V.fails()) err_str = str(V.errors()) self.assertIn('must be', err_str) V.set_request({'field': 'xxxxx'}) self.assertTrue(V.passes()) def test_string(self): V = self.V V.set_rules({'field': 'string'}) V.set_request({'field': 10}) self.assertTrue(V.fails()) err_str = str(V.errors()) self.assertIn('string', err_str) V.set_request({'field': 'xxxxx'}) self.assertTrue(V.passes()) def test_url(self): V = self.V V.set_rules({'field': 'url'}) V.set_request({'field': 'hi there'}) self.assertTrue(V.fails()) err_str = str(V.errors()) self.assertIn('format is invalid', err_str) V.set_request({'field': 'http://www.google.com'}) self.assertTrue(V.passes())
27.840194
72
0.549574
1,487
11,498
4.094822
0.081372
0.065035
0.121038
0.155034
0.878962
0.858597
0.797339
0.706191
0.60092
0.489079
0
0.021282
0.268481
11,498
412
73
27.907767
0.702651
0
0
0.570033
0
0
0.139863
0.003827
0
0
0
0
0.335505
1
0.107492
false
0.110749
0.009772
0
0.120521
0
0
0
0
null
0
0
0
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
5
41acb6c171914cf082ffd2009cfa119241c52e1f
21,021
py
Python
examples/GISANS_problem.py
reflectometry/osrefl
ddf55d542f2eab2a29fd6ffc862379820a06d5c7
[ "BSD-3-Clause" ]
2
2015-05-21T15:16:46.000Z
2015-10-23T17:47:36.000Z
examples/GISANS_problem.py
reflectometry/osrefl
ddf55d542f2eab2a29fd6ffc862379820a06d5c7
[ "BSD-3-Clause" ]
null
null
null
examples/GISANS_problem.py
reflectometry/osrefl
ddf55d542f2eab2a29fd6ffc862379820a06d5c7
[ "BSD-3-Clause" ]
null
null
null
#from greens_thm_form import greens_form_line, greens_form_shape from greens_thm_form import div_form_shape as greens_form_shape from numpy import arange, linspace, float64, indices, zeros_like, ones_like, pi, sin, complex128, array, exp, newaxis, cumsum, sum, cos, sin, log, log10, zeros, sqrt, ones from osrefl.theory.DWBAGISANS import dwbaWavefunction from gaussian_envelope import FWHM_to_sigma, normgauss class Shape(object): name = "Shape" def __init__(self, name=None, points=None, sld=0.0, sldi=0.0): if name is not None: self.name = name if points is not None: self.points = points else: self.points = [] self.sld = sld self.sldi = sldi class rectangle(Shape): name = "rectangle" def __init__(self, x0, y0, dx, dy, sld=0.0, sldi=0.0): points = [[x0,y0], [x0+dx, y0], [x0+dx, y0+dy], [x0, y0+dy]] Shape.__init__(self, points=points, sld=sld, sldi=sldi) self.x0 = x0 self.y0 = y0 self.dx = dx self.dy = dy self.area = dx * dy def reference_integral(self,qx,qy): x0=self.x0 y0=self.y0 dx=self.dx dy=self.dy result = -1.0/(qx * qy) * (exp(1j*qx*(x0+dx)) - exp(1j*qx*x0)) * (exp(1j*qy*(y0+dy)) - exp(1j*qy*y0)) return result class GISANS_problem(object): def __init__(self, sublayers, matrix, front_sld, front_sldi, back_sld, back_sldi, wavelength, qx, qy, qz, Lx,Ly, autoFT=True, name='grazing_incidence'): self.name = name self.sublayers = sublayers self.matrix = matrix self.Lx = Lx self.Ly = Ly self.front_sld = front_sld self.front_sldi = front_sldi self.back_sld = back_sld self.back_sldi = back_sldi self.wavelength = wavelength self._qx = qx self._qy = qy self._qz = qz self.update_SLDArray() self.alpha_in = None self.FTs = [] if autoFT == True: self.update_FTs() def get_qx(self): return self._qx def set_qx(self, value): self._qx = value self.update_FTs() def del_qx(self): del self._qx qx = property(get_qx, set_qx, del_qx, "I'm the qx property.") def get_qy(self): return self._qy def set_qy(self, value): self._qy = value self.update_FTs() def del_qy(self): del self._qy qy = property(get_qy, set_qy, del_qy, "I'm the qy property.") def get_qz(self): return self._qz def set_qz(self, value): self._qz = value self.update_FTs() def del_qz(self): del self._qz qz = property(get_qz, set_qz, del_qz, "I'm the qz property.") def update_SLDArray(self): SLDArray = [ [self.front_sld, 0, self.front_sldi] ] # [sld.real, thickness, sld.imag] for sl in self.sublayers: SLDArray.append([sl[1], sl[3], sl[2]]) SLDArray.append([self.back_sld, 0, self.back_sldi]) self.SLDArray = array(SLDArray) def update_sublayers(self, sublayers): self.sublayers = sublayers self.update_SLDArray() def update_Qs(self, alpha_in=None): if alpha_in is not None: self.alpha_in = alpha_in k0 = 2*pi/self.wavelength kz_in = array([[[k0 * sin(self.alpha_in * pi/180.0)]]], dtype=complex128) kx_in = array([k0 * cos(self.alpha_in * pi/180.0)], dtype=complex128) kz_out = kz_in - self.qz ky_out = -self.qy kx_out = sqrt(k0**2 - kz_out**2 - ky_out**2) self.kz_in = kz_in self.kz_out = kz_out self.qx = kx_in - kx_out def update_FTs(self): dFTs = [] # differential = SLD - (avg. SLD) FTs = [] for sl in self.sublayers: dFT = zeros((self.qx.shape[0],self.qy.shape[1]), dtype=complex128) #FT = zeros((self.qx.shape[0], self.qy.shape[0]), dtype=complex128) qx = self.qx[:,:,0] qy = self.qy[:,:,0] shapes = sl[0] for shape in shapes: dFT += greens_form_shape(shape.points, qx, qy) * (shape.sld) dFT += greens_form_shape(self.matrix.points, qx, qy) * (self.matrix.sld) FT = dFT.copy() FTs.append(FT) # do this before subtracting avg. SLD dFT += greens_form_shape(self.matrix.points, qx, qy) * (-sl[1]) # subtract FT of average SLD dFTs.append(dFT) self.FTs = FTs self.dFTs = dFTs def calc_overlap(self): wf_in = dwbaWavefunction(self.kz_in, self.SLDArray) wf_out = dwbaWavefunction(-self.kz_out, self.SLDArray) # solve 1d equation for time-reversed state self.wf_in = wf_in self.wf_out = wf_out kz_in_l = wf_in.kz_l # inside the layers kz_out_l = -wf_out.kz_l # inside the layers dz = self.SLDArray[1:-1,1][:,newaxis,newaxis,newaxis] zs = cumsum(self.SLDArray[1:-1,1]) - self.SLDArray[1,1] # start at zero with first layer z_array = array(zs)[:,newaxis,newaxis,newaxis] thickness = sum(self.SLDArray[1:-1,1]) qrt_inside = -kz_in_l[1:-1] - kz_out_l[1:-1] qtt_inside = -kz_in_l[1:-1] + kz_out_l[1:-1] qtr_inside = +kz_in_l[1:-1] + kz_out_l[1:-1] qrr_inside = +kz_in_l[1:-1] - kz_out_l[1:-1] # the overlap is the forward-moving amplitude c in psi_in multiplied by # the forward-moving amplitude in the time-reversed psi_out, which # ends up being the backward-moving amplitude d in the non-time-reversed psi_out # (which is calculated by the wavefunction calculator) # ... and vice-verso for d and c in psi_in and psi_out overlap = wf_out.c[1:-1] * wf_in.c[1:-1] / (1j * qtt_inside) * (exp(1j * qtt_inside * dz) - 1.0)*exp(1j*qtt_inside*z_array) overlap += wf_out.d[1:-1] * wf_in.d[1:-1] / (1j * qrr_inside) * (exp(1j * qrr_inside * dz) - 1.0)*exp(1j*qrr_inside*z_array) overlap += wf_out.c[1:-1] * wf_in.d[1:-1] / (1j * qtr_inside) * (exp(1j * qtr_inside * dz) - 1.0)*exp(1j*qtr_inside*z_array) overlap += wf_out.d[1:-1] * wf_in.c[1:-1] / (1j * qrt_inside) * (exp(1j * qrt_inside * dz) - 1.0)*exp(1j*qrt_inside*z_array) self.overlap = overlap return overlap def calc_overlap_BA(self): dz = self.SLDArray[1:-1,1][:,newaxis,newaxis,newaxis] zs = cumsum(self.SLDArray[1:-1,1]) - self.SLDArray[1,1] # start at zero with first layer z_array = array(zs)[:,newaxis,newaxis,newaxis] overlap_BA = 1.0 / (1j * self.qz) * (exp(1j * self.qz * dz) - 1.0) * exp(1j*self.qz*z_array) self.overlap_BA = overlap_BA return overlap_BA def calc_gisans(self, alpha_in=None, show_plot=True, add_specular=False): if alpha_in is not None: self.update_Qs(alpha_in) overlap = self.calc_overlap() gisans = sum(sum(overlap * array(self.dFTs)[:,:,:,newaxis], axis=0), axis=0) # first over layers, then Qx # now if you want to add specular back in... if add_specular == True: specular = ones((self.qx.shape[0], self.qy.shape[1], self.qz.shape[2]), dtype=complex128) specular *= complex128(2)*pi/self.Lx * normgauss(self.qx, FWHM_to_sigma(2.0*pi/self.Lx), x0=0.0) specular *= complex128(2)*pi/self.Ly * normgauss(self.qy, FWHM_to_sigma(2.0*pi/self.Ly), x0=0.0) specular *= 2.0*1j*self.kz_in*self.wf_in.r*self.Lx*self.Ly specular = sum(specular, axis=0)/self.qx.shape[0] # sum over Qx, taking average self.specular = specular gisans += specular self.gisans = gisans if show_plot == True: self.plot_gisans() def calc_gisans_BA(self, show_plot=True): overlap_BA = self.calc_overlap_BA() gisans_BA = sum(sum(overlap_BA * array(self.FTs)[:,:,:,newaxis], axis=0), axis=0) self.gisans_BA = gisans_BA if show_plot == True: self.plot_gisans_BA() def calc_both(self, show_plot=True, add_specular=False): self.calc_gisans(show_plot=False, add_specular=add_specular) self.calc_gisans_BA(show_plot=False) if show_plot == True: self.plot_both() def plot_gisans(self, vmax=None, vmin=None): from pylab import imshow, figure, colorbar extent = [self.qy.min(), self.qy.max(), self.qz.min(), self.qz.max()] figure() imshow(log10(abs(self.gisans)**2).T, origin='lower', extent=extent, aspect='auto', vmax=vmax, vmin=vmin) colorbar() def plot_gisans_BA(self, vmax=None, vmin=None): from pylab import imshow, figure, colorbar extent = [self.qy.min(), self.qy.max(), self.qz.min(), self.qz.max()] figure() imshow(log10(abs(self.gisans_BA)**2).T, origin='lower', extent=extent, aspect='auto', vmax=vmax, vmin=vmin) colorbar() def plot_both(self): vmax = max(log10(abs(self.gisans)**2).max(), log10(abs(self.gisans_BA)**2).max()) vmin = min(log10(abs(self.gisans)**2).min(), log10(abs(self.gisans_BA)**2).min()) self.plot_gisans(vmax=vmax, vmin=vmin) self.plot_gisans_BA(vmax=vmax, vmin=vmin) class GISANS_angle_problem(GISANS_problem): def __init__(self, sublayers, matrix, front_sld, front_sldi, back_sld, back_sldi, wavelength, angle_in, angle_out, inplane_angle, Lx,Ly, autoFT=True, name='grazing_incidence'): self.name = name self._qx = self._qy = self._qz = None self.sublayers = sublayers self.matrix = matrix self.Lx = Lx self.Ly = Ly self.front_sld = front_sld self.front_sldi = front_sldi self.back_sld = back_sld self.back_sldi = back_sldi self.wavelength = wavelength self._angle_in = angle_in self._angle_out = angle_out self._inplane_angle = inplane_angle self.update_SLDArray() self.alpha_in = None self.FTs = [] if autoFT == True: self.update_Qs() self.update_FTs() def get_angle_in(self): return self._angle_in def set_angle_in(self, value): self._angle_in = value self.update_Qs() self.update_FTs() def del_angle_in(self): del self._angle_in angle_in = property(get_angle_in, set_angle_in, del_angle_in, "I'm the angle_in property.") def get_angle_out(self): return self._angle_out def set_angle_out(self, value): self._angle_out = value self.update_Qs() self.update_FTs() def del_angle_out(self): del self._angle_out angle_out = property(get_angle_out, set_angle_out, del_angle_out, "I'm the angle_out property.") def get_inplane_angle(self): return self._inplane_angle def set_inplane_angle(self, value): self._inplane_angle = value self.update_Qs() self.update_FTs() def del_inplane_angle(self): del self._inplane_angle inplane_angle = property(get_inplane_angle, set_inplane_angle, del_inplane_angle, "I'm the inplane_angle property.") def update_Qs(self): wavelength = self.wavelength # convert angle to radians angle_in = self.angle_in * pi / 180. angle_out = self.angle_out * pi/180. iptheta = self.inplane_angle * pi/180. # determine wave vector (k) kvec = 2.0*pi/wavelength kz_out = kvec * sin( -angle_out )[newaxis,newaxis,:] kx_out = kvec * cos( -angle_out )[newaxis,newaxis,:] ky_out = -kvec * cos( -angle_out ) * sin( iptheta )[newaxis,:,newaxis] kz_in = kvec * sin( angle_in ) kx_in = kvec * cos( angle_in ) ky_in = zeros_like( ky_out ) self._qx = kx_in - kx_out self._qy = ky_in - ky_out self._qz = kz_in - kz_out self.kz_in = kz_in self.kz_out = kz_out self.update_FTs() def plot_gisans(self, vmax=None, vmin=None): from pylab import imshow, figure, colorbar, xlabel, ylabel, title extent = [self.inplane_angle.min(), self.inplane_angle.max(), self.angle_out.min(), self.angle_out.max()] figure() imshow(log10(abs(self.gisans)**2).T, origin='lower', extent=extent, aspect='auto', vmax=vmax, vmin=vmin) title('%s GISANS, angle_in = %g degrees' % (self.name, self.angle_in)) colorbar() def plot_gisans_BA(self, vmax=None, vmin=None): from pylab import imshow, figure, colorbar, xlabel, ylabel, title extent = [self.inplane_angle.min(), self.inplane_angle.max(), self.angle_out.min(), self.angle_out.max()] figure() imshow(log10(abs(self.gisans_BA)**2).T, origin='lower', extent=extent, aspect='auto', vmax=vmax, vmin=vmin) title('%s GISANS (Born Approximation), angle_in = %g degrees' % (self.name, self.angle_in)) colorbar() class GISANS_problem_old(object): def __init__(self, sublayers, matrix, front_sld, front_sldi, back_sld, back_sldi, wavelength, qx, qy, qz, Lx,Ly, autoFT=True): self.sublayers = sublayers self.matrix = matrix self.Lx = Lx self.Ly = Ly self.front_sld = front_sld self.front_sldi = front_sldi self.back_sld = back_sld self.back_sldi = back_sldi self.wavelength = wavelength self._qx = qx self._qy = qy self._qz = qz self.update_SLDArray() self.alpha_in = None self.FTs = [] if autoFT == True: self.update_FTs() def get_qx(self): return self._qx def set_qx(self, value): self._qx = value self.update_FTs() def del_qx(self): del self._qx qx = property(get_qx, set_qx, del_qx, "I'm the qx property.") def get_qy(self): return self._qy def set_qy(self, value): self._qy = value self.update_FTs() def del_qy(self): del self._qy qy = property(get_qy, set_qy, del_qy, "I'm the qy property.") def get_qz(self): return self._qz def set_qz(self, value): self._qz = value self.update_FTs() def del_qz(self): del self._qz qz = property(get_qz, set_qz, del_qz, "I'm the qz property.") def update_SLDArray(self): SLDArray = [ [self.front_sld, 0, self.front_sldi] ] # [sld.real, thickness, sld.imag] for sl in self.sublayers: SLDArray.append([sl[1], sl[3], sl[2]]) SLDArray.append([self.back_sld, 0, self.back_sldi]) self.SLDArray = array(SLDArray) def update_sublayers(self, sublayers): self.sublayers = sublayers self.update_SLDArray() def update_Qs(self, alpha_in=None): if alpha_in is not None: self.alpha_in = alpha_in k0 = 2*pi/self.wavelength kz_in = array([k0 * sin(self.alpha_in * pi/180.0)], dtype=complex128) kx_in = array([k0 * cos(self.alpha_in * pi/180.0)], dtype=complex128) kz_out = kz_in - self.qz ky_out = -self.qy kx_out = sqrt(k0**2 - kz_out[newaxis,newaxis,:]**2 - ky_out[newaxis,:,newaxis]**2) self.kz_in = kz_in self.kz_out = kz_out self.qx = kx_in - kx_out def update_FTs(self): dFTs = [] # differential = SLD - (avg. SLD) FTs = [] for sl in self.sublayers: dFT = zeros((self.qx.shape[0], self.qy.shape[1]), dtype=complex128) #FT = zeros((self.qx.shape[0], self.qy.shape[0]), dtype=complex128) qx = self.qx[:,:,0] qy = self.qy[:,:,0] shapes = sl[0] for shape in shapes: dFT += greens_form_shape(shape.points, qx, qy) * (shape.sld) dFT += greens_form_shape(self.matrix.points, qx, qy) * (self.matrix.sld) FT = dFT.copy() FTs.append(FT) # do this before subtracting avg. SLD dFT += greens_form_shape(self.matrix.points, qx, qy) * (-sl[1]) # subtract FT of average SLD dFTs.append(dFT) self.FTs = FTs self.dFTs = dFTs def calc_gisans(self, alpha_in, show_plot=True, add_specular=False): k0 = 2*pi/self.wavelength kz_in_0 = array([k0 * sin(alpha_in * pi/180.0)], dtype=complex128) kx_in_0 = array([k0 * cos(alpha_in * pi/180.0)], dtype=complex128) kz_out_0 = kz_in_0 - self.qz self.kz_out_0 = kz_out_0 ky_out_0 = -self.qy kx_out_0 = sqrt(k0**2 - kz_out_0[newaxis,newaxis,:]**2 - ky_out_0[newaxis,:,newaxis]**2) qx = kx_in_0 - kx_out_0 self.qx_derived = qx kz_out_neg = kz_out_0 < 0 kz_in_neg = kz_in_0 < 0 wf_in = dwbaWavefunction((kz_in_0), self.SLDArray) wf_out = dwbaWavefunction((-kz_out_0), self.SLDArray) # solve 1d equation for time-reversed state self.wf_in = wf_in self.wf_out = wf_out kz_in_l = wf_in.kz_l # inside the layers #kz_in_l[:, kz_in_neg] *= -1.0 kz_in_p_l = -kz_in_l # prime kz_out_l = -wf_out.kz_l # inside the layers #kz_out_l[:, kz_out_neg] *= -1.0 kz_out_p_l = -kz_out_l # kz_f_prime in the Sinha paper notation dz = self.SLDArray[1:-1,1][:,newaxis] zs = cumsum(self.SLDArray[1:-1,1]) - self.SLDArray[1,1] # start at zero with first layer z_array = array(zs)[:,newaxis] thickness = sum(self.SLDArray[1:-1,1]) qrt_inside = -kz_in_l[1:-1] - kz_out_l[1:-1] qtt_inside = -kz_in_l[1:-1] + kz_out_l[1:-1] qtr_inside = +kz_in_l[1:-1] + kz_out_l[1:-1] qrr_inside = +kz_in_l[1:-1] - kz_out_l[1:-1] # the overlap is the forward-moving amplitude c in psi_in multiplied by # the forward-moving amplitude in the time-reversed psi_out, which # ends up being the backward-moving amplitude d in the non-time-reversed psi_out # (which is calculated by the wavefunction calculator) # ... and vice-verso for d and c in psi_in and psi_out overlap = wf_out.c[1:-1] * wf_in.c[1:-1] / (1j * qtt_inside) * (exp(1j * qtt_inside * dz) - 1.0)*exp(1j*qtt_inside*z_array) overlap += wf_out.d[1:-1] * wf_in.d[1:-1] / (1j * qrr_inside) * (exp(1j * qrr_inside * dz) - 1.0)*exp(1j*qrr_inside*z_array) overlap += wf_out.c[1:-1] * wf_in.d[1:-1] / (1j * qtr_inside) * (exp(1j * qtr_inside * dz) - 1.0)*exp(1j*qtr_inside*z_array) overlap += wf_out.d[1:-1] * wf_in.c[1:-1] / (1j * qrt_inside) * (exp(1j * qrt_inside * dz) - 1.0)*exp(1j*qrt_inside*z_array) self.overlap = overlap overlap_BA = 1.0 / (1j * self.qz) * (exp(1j * self.qz * dz) - 1.0) * exp(1j*self.qz*z_array) self.overlap_BA = overlap_BA gisans = sum(sum(overlap * array(self.dFTs)[:,:,:,newaxis], axis=0), axis=0) # first over layers, then Qx # now if you want to add specular back in... if add_specular == True: specular = complex128(2)*pi/self.Lx * normgauss(qx, FWHM_to_sigma(2.0*pi/self.Lx), x0=0.0) specular *= complex128(2)*pi/self.Ly * normgauss(self.qy[newaxis,:,newaxis], FWHM_to_sigma(2.0*pi/self.Ly), x0=0.0) specular *= 2.0*1j*kz_in_0*wf_in.r[newaxis,newaxis,:]*self.Lx*self.Ly specular = sum(specular, axis=0)/self.qx.shape[0] # sum over Qx, taking average self.specular = specular gisans += specular gisans_BA = sum(sum(overlap_BA * array(self.FTs)[:,:,:,newaxis], axis=0), axis=0) extent = [self.qy.min(), self.qy.max(), self.qz.min(), self.qz.max()] self.alpha_in = alpha_in self.gisans = gisans self.gisans_BA = gisans_BA if show_plot == True: from pylab import imshow, figure, colorbar zmax = max(log10(abs(gisans)**2).max(), log10(abs(gisans_BA)**2).max()) zmin = min(log10(abs(gisans)**2).min(), log10(abs(gisans_BA)**2).min()) figure() imshow(log10(abs(gisans)**2).T, origin='lower', extent=extent, aspect='auto', vmax=zmax, vmin=zmin) colorbar() figure() imshow(log10(abs(gisans_BA)**2).T, origin='lower', extent=extent, aspect='auto', vmax=zmax, vmin=zmin) colorbar()
41.461538
171
0.578041
3,089
21,021
3.727096
0.074134
0.00886
0.004169
0.018067
0.788066
0.743768
0.722401
0.704682
0.702076
0.687397
0
0.029111
0.292422
21,021
506
172
41.543478
0.744924
0.080729
0
0.692488
0
0
0.020278
0
0
0
0
0
0
1
0.124413
false
0
0.021127
0.021127
0.211268
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
41c13be9af9eb6a0dd8ffeb8cf7db811c0abd147
131
py
Python
frontend_helpers/init.py
PatchyVideo/PatchyVideo
cafbdfa34591d7292090d5e67bb633b974447b64
[ "MIT" ]
13
2020-06-04T00:25:24.000Z
2022-03-31T13:12:17.000Z
frontend_helpers/init.py
PatchyVideo/PatchyVideo
cafbdfa34591d7292090d5e67bb633b974447b64
[ "MIT" ]
1
2021-01-03T04:17:45.000Z
2021-02-07T14:19:04.000Z
scraper/init.py
PatchyVideo/PatchyVideo
cafbdfa34591d7292090d5e67bb633b974447b64
[ "MIT" ]
null
null
null
from aiohttp import web from aiohttp import ClientSession app = web.Application() routes = web.RouteTableDef() init_funcs = []
13.1
33
0.755725
16
131
6.125
0.6875
0.22449
0.346939
0
0
0
0
0
0
0
0
0
0.160305
131
9
34
14.555556
0.890909
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.4
0
0.4
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
5
68c377f24469eacae17df4fcd25c52c80e79144e
56
py
Python
rpipes/__init__.py
Numerlor/rpipes
ee81669760ed06cdf08f130509ddff5db5cacb59
[ "MIT" ]
3
2021-07-19T21:41:37.000Z
2022-01-18T18:48:55.000Z
rpipes/__init__.py
Numerlor/rpipes
ee81669760ed06cdf08f130509ddff5db5cacb59
[ "MIT" ]
1
2021-07-17T17:04:50.000Z
2021-07-17T17:04:50.000Z
rpipes/__init__.py
Numerlor/rpipes
ee81669760ed06cdf08f130509ddff5db5cacb59
[ "MIT" ]
1
2021-07-23T20:02:52.000Z
2021-07-23T20:02:52.000Z
from blessed import Terminal terminal: Terminal = None
14
28
0.803571
7
56
6.428571
0.714286
0.711111
0
0
0
0
0
0
0
0
0
0
0.160714
56
3
29
18.666667
0.957447
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
68ea05d2aae8cb7ae0937d059e6c8e75da1576f1
36
py
Python
strongr/clouddomain/query/listdeployfailedvms.py
bigr-erasmusmc/StrongR
48573e170771a251f629f2d13dba7173f010a38c
[ "Apache-2.0" ]
null
null
null
strongr/clouddomain/query/listdeployfailedvms.py
bigr-erasmusmc/StrongR
48573e170771a251f629f2d13dba7173f010a38c
[ "Apache-2.0" ]
null
null
null
strongr/clouddomain/query/listdeployfailedvms.py
bigr-erasmusmc/StrongR
48573e170771a251f629f2d13dba7173f010a38c
[ "Apache-2.0" ]
null
null
null
class ListDeployFailedVms: pass
12
26
0.777778
3
36
9.333333
1
0
0
0
0
0
0
0
0
0
0
0
0.194444
36
2
27
18
0.965517
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.5
0
0
0.5
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
0
0
0
5
ec3f92919027b70bfd0a659c9edc1a61f25bb074
634
py
Python
crypto/Day 2/Examples/rsa/hastad/solve/solve.py
b01lers/bootcamp-training-2020
5efa86f45df66541b1e4ca7340c689aeda95b99d
[ "MIT" ]
9
2020-10-07T11:21:30.000Z
2022-02-04T05:08:46.000Z
crypto/Day 2/Examples/rsa/hastad/solve/solve.py
b01lers/bootcamp-training-2020
5efa86f45df66541b1e4ca7340c689aeda95b99d
[ "MIT" ]
1
2020-10-04T22:19:53.000Z
2020-10-04T22:19:53.000Z
crypto/Day 2/Examples/rsa/hastad/solve/solve.py
b01lers/bootcamp-training-2020
5efa86f45df66541b1e4ca7340c689aeda95b99d
[ "MIT" ]
5
2020-10-02T04:18:58.000Z
2021-06-11T16:18:26.000Z
from sage.all import * from Crypto.Util.number import long_to_bytes as ltb ciphertexts = [6816192635244433032171632550443449557145278339704533135253318051343869682485, 29458333613251083477279181027991958647486339164210273946108733843048288771798, 52008835028241149739773168099431219570798566543042976440748722008163085793792] moduli = [81432338653519942865405641552095057076423594628943058525534293705394967595179, 71978431351050052696487194220659622019786217862770403524979900613826263964339, 53561730229599407697626373473399340929187312544407455817435153279814343543237] x = CRT(ciphertexts,moduli) root = x.nth_root(3) ltb(root)
70.444444
250
0.916404
33
634
17.515152
0.757576
0
0
0
0
0
0
0
0
0
0
0.763636
0.045741
634
8
251
79.25
0.191736
0
0
0
0
0
0
0
0
1
0
0
0
1
0
false
0
0.285714
0
0.285714
0
0
0
1
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
1
0
0
0
0
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
5
ec41a8edbd362039f37a90d7e97d10b2141f4221
69
py
Python
modelnetc_utils/modelnetc_utils/__init__.py
jiawei-ren/ModelNet-C
1187b20954e955c340b545c2ae9a055351b0242f
[ "Apache-2.0" ]
31
2022-02-08T02:49:01.000Z
2022-03-31T05:39:15.000Z
modelnetc_utils/modelnetc_utils/__init__.py
jiawei-ren/modelnetc
1187b20954e955c340b545c2ae9a055351b0242f
[ "Apache-2.0" ]
1
2022-02-08T18:34:24.000Z
2022-02-08T18:34:41.000Z
modelnetc_utils/modelnetc_utils/__init__.py
jiawei-ren/modelnetc
1187b20954e955c340b545c2ae9a055351b0242f
[ "Apache-2.0" ]
2
2022-02-08T05:41:21.000Z
2022-02-24T13:33:34.000Z
from .dataset import ModelNetC from .eval import eval_corrupt_wrapper
34.5
38
0.869565
10
69
5.8
0.7
0
0
0
0
0
0
0
0
0
0
0
0.101449
69
2
38
34.5
0.935484
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
ec4afa9e197e5a144c5da3e30882aa06ac02bf28
225
py
Python
pymom/PyMomRecoverableError.py
patrickmay/pymom
756f4dc7c6b86797f61c7903eeefd1696144bda9
[ "Apache-2.0" ]
null
null
null
pymom/PyMomRecoverableError.py
patrickmay/pymom
756f4dc7c6b86797f61c7903eeefd1696144bda9
[ "Apache-2.0" ]
null
null
null
pymom/PyMomRecoverableError.py
patrickmay/pymom
756f4dc7c6b86797f61c7903eeefd1696144bda9
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python3 import sys class PyMomRecoverableError(Exception): """ A specialized exception for the PyMom framework that indicates a recoverable error occured in an on_message method. """ pass
20.454545
68
0.72
28
225
5.75
0.928571
0
0
0
0
0
0
0
0
0
0
0.00565
0.213333
225
10
69
22.5
0.903955
0.608889
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.333333
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
1
0
0
5
6b61f671a962ea84d25bbd1da5ac54a502b9312b
2,646
py
Python
app/tests/domain/test_purchasedomain.py
wlsouza/cashbackgb
c5cffe782eb0f8c2ec0303405820e49c494d04a3
[ "MIT" ]
null
null
null
app/tests/domain/test_purchasedomain.py
wlsouza/cashbackgb
c5cffe782eb0f8c2ec0303405820e49c494d04a3
[ "MIT" ]
null
null
null
app/tests/domain/test_purchasedomain.py
wlsouza/cashbackgb
c5cffe782eb0f8c2ec0303405820e49c494d04a3
[ "MIT" ]
1
2022-02-10T04:15:19.000Z
2022-02-10T04:15:19.000Z
from decimal import Decimal from random import randrange from unittest import mock import pytest from app import crud, domain, schemas def test_calculate_cashback_with_value_lower_than_1000_must_return_10_percent(): purchase_value = Decimal(randrange(100001)) / 100 expected_value = purchase_value * Decimal(0.1) cashback_value = domain.purchase.calculate_cashback( purchase_value=purchase_value ) assert cashback_value == expected_value def test_calculate_cashback_with_value_between_1000_and_1500_must_return_15_percent(): purchase_value = Decimal(randrange(100001, 150001)) / 100 expected_value = purchase_value * Decimal(0.15) cashback_value = domain.purchase.calculate_cashback( purchase_value=purchase_value ) assert cashback_value == expected_value def test_calculate_cashback_with_value_upper_then_15000_must_return_15_percent(): purchase_value = Decimal(randrange(150001, 500000)) / 100 expected_value = purchase_value * Decimal(0.20) cashback_value = domain.purchase.calculate_cashback( purchase_value=purchase_value ) assert cashback_value == expected_value @pytest.mark.asyncio @mock.patch.object(crud.user, "get_by_id") @mock.patch.object(crud.purchase_status, "get_by_name") async def test_get_default_purchase_status_id_when_cpf_is_15350946056_must_return_the_id_of_approved_purchase_status( mocked_purchase_status_get_by_name, mocked_user_get_by_id ): # Mocking the cpf returned by crud.user.get_by_id mocked_user_get_by_id.return_value.cpf = "15350946056" arg_mock = mock.Mock() await domain.purchase.get_default_purchase_status_id( db=arg_mock, purchase_user_id=arg_mock ) # asserting if the method was called with "Approved" mocked_purchase_status_get_by_name.assert_awaited_with( db=arg_mock, name=schemas.statusEnum.APPROVED ) @pytest.mark.asyncio @mock.patch.object(crud.user, "get_by_id") @mock.patch.object(crud.purchase_status, "get_by_name") async def test_get_default_purchase_status_id_when_cpf_is_not_15350946056_must_return_the_id_of_in_validation_purchase_status( mocked_purchase_status_get_by_name, mocked_user_get_by_id ): # Mocking the cpf returned by crud.user.get_by_id mocked_user_get_by_id.return_value.cpf = "99999999999" arg_mock = mock.Mock() await domain.purchase.get_default_purchase_status_id( db=arg_mock, purchase_user_id=arg_mock ) # asserting if the method was called with "In validation" mocked_purchase_status_get_by_name.assert_awaited_with( db=arg_mock, name=schemas.statusEnum.IN_VALIDATION )
37.267606
126
0.792139
378
2,646
5.084656
0.206349
0.03642
0.037461
0.045786
0.855359
0.855359
0.784079
0.726327
0.676379
0.676379
0
0.050088
0.139834
2,646
70
127
37.8
0.794376
0.076342
0
0.5
0
0
0.02542
0
0
0
0
0
0.092593
1
0.055556
false
0
0.092593
0
0.148148
0
0
0
0
null
0
0
0
1
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
6b9500b653b87101f2761d6940c2480200f9e335
184
py
Python
bin/4_to_numpy.py
MichaelKreil/reverse_engineer_botometer
c93a4242e3896e2d482f22b669147f307819cedf
[ "MIT" ]
1
2021-05-02T14:02:56.000Z
2021-05-02T14:02:56.000Z
bin/4_to_numpy.py
MichaelKreil/reverse_engineer_botometer
c93a4242e3896e2d482f22b669147f307819cedf
[ "MIT" ]
1
2021-05-10T20:12:04.000Z
2021-05-10T20:12:04.000Z
bin/4_to_numpy.py
MichaelKreil/reverse_engineering_botometer
c93a4242e3896e2d482f22b669147f307819cedf
[ "MIT" ]
null
null
null
import numpy as np print('load vectors') data = np.loadtxt('../data/all_users_normalized.tsv') print(data.shape) print('save npy') np.save('../data/all_users_normalized.npy', data)
18.4
53
0.728261
29
184
4.482759
0.551724
0.107692
0.184615
0.338462
0
0
0
0
0
0
0
0
0.092391
184
9
54
20.444444
0.778443
0
0
0
0
0
0.456522
0.347826
0
0
0
0
0
1
0
false
0
0.166667
0
0.166667
0.5
1
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
5
6b963277f48ac22772778435989c8c6d37b3581b
167
py
Python
novosparc/io/__init__.py
mgmoshes/novosparc
dabd076692af104580bc80355cd95136a211e762
[ "BSD-3-Clause" ]
100
2019-02-04T21:54:51.000Z
2022-02-17T16:10:55.000Z
novosparc/io/__init__.py
mgmoshes/novosparc
dabd076692af104580bc80355cd95136a211e762
[ "BSD-3-Clause" ]
46
2019-07-04T12:41:26.000Z
2022-01-24T11:25:20.000Z
novosparc/io/__init__.py
mgmoshes/novosparc
dabd076692af104580bc80355cd95136a211e762
[ "BSD-3-Clause" ]
43
2019-03-25T18:11:16.000Z
2021-11-12T13:17:38.000Z
from ._saving import write_sdge_to_disk, save_gene_pattern_plots, save_spatially_informative_gene_pattern_plots from ._data_loading import load_data, load_target_space
83.5
111
0.91018
26
167
5.192308
0.692308
0.162963
0.237037
0
0
0
0
0
0
0
0
0
0.05988
167
2
112
83.5
0.859873
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
6ba25854b99612b99dab0eb88f933f4e57454eae
193
py
Python
examples/sms/send-sms.py
instasent/instasent-python-lib
bebf8de5f0bd5c3f676fdc88012cd39e4a8a4477
[ "MIT" ]
null
null
null
examples/sms/send-sms.py
instasent/instasent-python-lib
bebf8de5f0bd5c3f676fdc88012cd39e4a8a4477
[ "MIT" ]
null
null
null
examples/sms/send-sms.py
instasent/instasent-python-lib
bebf8de5f0bd5c3f676fdc88012cd39e4a8a4477
[ "MIT" ]
null
null
null
import instasent client = instasent.Client('my-token') response = client.send_sms('My company', '+34666666666', 'test message') print response['response_code'] print response['response_body']
27.571429
72
0.766839
24
193
6.041667
0.625
0.206897
0.289655
0
0
0
0
0
0
0
0
0.0625
0.088083
193
7
73
27.571429
0.761364
0
0
0
0
0
0.350515
0
0
0
0
0
0
0
null
null
0
0.2
null
null
0.4
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
5
6ba4918b78b800d05ad2a02339e234d7437f8903
264
py
Python
generated-libraries/python/netapp/snapmirror_policy/sm_restart_enum.py
radekg/netapp-ontap-lib-get
6445ebb071ec147ea82a486fbe9f094c56c5c40d
[ "MIT" ]
2
2017-03-28T15:31:26.000Z
2018-08-16T22:15:18.000Z
generated-libraries/python/netapp/snapmirror_policy/sm_restart_enum.py
radekg/netapp-ontap-lib-get
6445ebb071ec147ea82a486fbe9f094c56c5c40d
[ "MIT" ]
null
null
null
generated-libraries/python/netapp/snapmirror_policy/sm_restart_enum.py
radekg/netapp-ontap-lib-get
6445ebb071ec147ea82a486fbe9f094c56c5c40d
[ "MIT" ]
null
null
null
class SmRestartEnum(basestring): """ always|never|default Possible values: <ul> <li> "always" , <li> "never" , <li> "default" </ul> """ @staticmethod def get_api_name(): return "sm-restart-enum"
16.5
34
0.507576
25
264
5.28
0.76
0
0
0
0
0
0
0
0
0
0
0
0.344697
264
15
35
17.6
0.763006
0.375
0
0
0
0
0.12
0
0
0
0
0
0
1
0.25
true
0
0
0.25
0.75
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
0
1
1
0
0
5
6ba579632ac0753ea7880323ec21af668891e038
199
py
Python
tests/dtd/test_flatten_leaves.py
alexgorji/music_score
b4176da52295361f3436826903485c5cb8054c5e
[ "MIT" ]
2
2020-06-22T13:33:28.000Z
2020-12-30T15:09:00.000Z
tests/dtd/test_flatten_leaves.py
alexgorji/music_score
b4176da52295361f3436826903485c5cb8054c5e
[ "MIT" ]
37
2020-02-18T12:15:00.000Z
2021-12-13T20:01:14.000Z
tests/dtd/test_flatten_leaves.py
alexgorji/music_score
b4176da52295361f3436826903485c5cb8054c5e
[ "MIT" ]
null
null
null
from unittest import TestCase from musicscore.musicxml.elements.note import Note # class Test(TestCase): # def test(self): # dtd = Note()._DTD # print(dtd.get_flatten_leaves())
22.111111
50
0.678392
25
199
5.28
0.68
0
0
0
0
0
0
0
0
0
0
0
0.211055
199
9
51
22.111111
0.840764
0.537688
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
6bb73c603cc2551c3da3d43873e03a7500ed5907
126
py
Python
lib/modeling/semantic_seg_head/__init__.py
vinbigdata-medical/endocv2020-seg
91675391911a3d70a09c51edb0eeb73b1081b037
[ "Apache-2.0" ]
6
2021-02-13T18:41:59.000Z
2021-06-01T09:29:06.000Z
lib/modeling/semantic_seg_head/__init__.py
VinBDI-MedicalImagingTeam/endocv2020-seg
91675391911a3d70a09c51edb0eeb73b1081b037
[ "Apache-2.0" ]
1
2020-11-24T03:25:21.000Z
2020-11-24T03:25:21.000Z
lib/modeling/semantic_seg_head/__init__.py
vinbigdata-medical/endocv2020-seg
91675391911a3d70a09c51edb0eeb73b1081b037
[ "Apache-2.0" ]
1
2022-03-18T10:28:19.000Z
2022-03-18T10:28:19.000Z
from .build import build_sem_seg_head, SEM_SEG_HEAD_REGISTRY from .fpn import FPNHead from .unet import UNetDecoder, UNetHead
31.5
60
0.849206
20
126
5.05
0.6
0.118812
0.19802
0
0
0
0
0
0
0
0
0
0.111111
126
4
61
31.5
0.901786
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
6bb770dcf58ff571fbc8adc6f0670f9894694ca2
112
py
Python
wns/__init__.py
Neetuj/python-wns
3e3d87d7453f522bf4b558a7a64963e522b90a82
[ "BSD-3-Clause" ]
8
2015-06-21T15:29:19.000Z
2017-02-28T12:27:28.000Z
wns/__init__.py
Neetuj/python-wns
3e3d87d7453f522bf4b558a7a64963e522b90a82
[ "BSD-3-Clause" ]
10
2015-06-21T07:46:09.000Z
2021-08-14T18:54:38.000Z
wns/__init__.py
Neetuj/python-wns
3e3d87d7453f522bf4b558a7a64963e522b90a82
[ "BSD-3-Clause" ]
8
2015-07-02T12:51:43.000Z
2017-01-11T07:50:40.000Z
from __future__ import absolute_import from .wnslib import WNSClient, WNSException, WNSInvalidPushTypeException
37.333333
72
0.883929
11
112
8.545455
0.727273
0
0
0
0
0
0
0
0
0
0
0
0.089286
112
2
73
56
0.921569
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
6bc09c979a3ef26272819ef41dc7a29401cdd824
196
py
Python
bot_scheduler/scheduling/core.py
simoncrowe/bot-scheduler
0fba5a28620c68971e53bb65ee9b72ac4d920f10
[ "MIT" ]
null
null
null
bot_scheduler/scheduling/core.py
simoncrowe/bot-scheduler
0fba5a28620c68971e53bb65ee9b72ac4d920f10
[ "MIT" ]
null
null
null
bot_scheduler/scheduling/core.py
simoncrowe/bot-scheduler
0fba5a28620c68971e53bb65ee9b72ac4d920f10
[ "MIT" ]
null
null
null
from abc import ABC, abstractmethod from datetime import datetime class ScheduleOccurrence(ABC): @abstractmethod def occurs(self, time: datetime, interval: float) -> bool: pass
19.6
62
0.72449
22
196
6.454545
0.681818
0.239437
0
0
0
0
0
0
0
0
0
0
0.204082
196
9
63
21.777778
0.910256
0
0
0
0
0
0
0
0
0
0
0
0
1
0.166667
false
0.166667
0.333333
0
0.666667
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
1
0
0
0
0
5
6bef46c8f1aebe16ad841abb410ceded9c8090fe
196
py
Python
tests/functions/expr.py
Slater-Victoroff/pyjaco
89c4e3c46399c5023b0e160005d855a01241c58a
[ "MIT" ]
38
2015-01-01T18:08:59.000Z
2022-02-18T08:57:27.000Z
tests/functions/expr.py
dusty-phillips/pyjaco
066895ae38d1828498e529c1875cb88df6cbc54d
[ "MIT" ]
1
2020-07-15T13:30:32.000Z
2020-07-15T13:30:32.000Z
tests/functions/expr.py
Slater-Victoroff/pyjaco
89c4e3c46399c5023b0e160005d855a01241c58a
[ "MIT" ]
12
2016-03-07T09:30:49.000Z
2021-09-05T20:38:47.000Z
from __future__ import division a = 244 b = 23 print a print a + 4 print a - 2 print a << 4 print a >> 2 print a | 234324 print a & 213213 print a ^ 2312 print a // 324 print a / 2 print b ** 3
11.529412
31
0.642857
40
196
3.05
0.4
0.491803
0.172131
0.295082
0.278689
0.278689
0.278689
0.278689
0
0
0
0.211268
0.27551
196
16
32
12.25
0.647887
0
0
0
0
0
0
0
0
0
0
0
0
0
null
null
0
0.071429
null
null
0.785714
0
0
0
null
1
0
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
1
0
5
d42db6d4750d8763f47d114261a210fa35938713
52
py
Python
scatterbrane/__init__.py
krosenfeld/scatterbrane
1a7b1c357433f93e3380eb33431611288d13b462
[ "MIT" ]
2
2015-12-22T03:41:47.000Z
2019-12-10T05:03:42.000Z
scatterbrane/__init__.py
krosenfeld/scatterbrane
1a7b1c357433f93e3380eb33431611288d13b462
[ "MIT" ]
1
2016-03-03T13:25:17.000Z
2016-03-06T08:56:35.000Z
scatterbrane/__init__.py
krosenfeld/scatterbrane
1a7b1c357433f93e3380eb33431611288d13b462
[ "MIT" ]
3
2016-01-19T21:27:45.000Z
2022-03-10T01:56:27.000Z
from .brane import Brane from .tracks import Target
17.333333
26
0.807692
8
52
5.25
0.625
0
0
0
0
0
0
0
0
0
0
0
0.153846
52
2
27
26
0.954545
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
d46be57f307af97ad5aff34d7df5011084470892
65
py
Python
gem/src/keys_server/GMO/utils/__init__.py
Martynaslin/Workshop2019
b0edebc4c09a4778f2afa5fcd1a84e97300e15fb
[ "BSD-2-Clause" ]
1
2019-07-11T13:07:42.000Z
2019-07-11T13:07:42.000Z
gem/src/keys_server/GMO/utils/__init__.py
Martynaslin/Workshop2019
b0edebc4c09a4778f2afa5fcd1a84e97300e15fb
[ "BSD-2-Clause" ]
3
2019-07-02T17:04:39.000Z
2019-07-18T10:21:17.000Z
gem/src/keys_server/GMO/utils/__init__.py
Martynaslin/Workshop2019
b0edebc4c09a4778f2afa5fcd1a84e97300e15fb
[ "BSD-2-Clause" ]
6
2019-07-01T21:19:49.000Z
2021-02-10T13:34:51.000Z
from .AreaPerilLookup import * from .VulnerabilityLookup import *
32.5
34
0.830769
6
65
9
0.666667
0
0
0
0
0
0
0
0
0
0
0
0.107692
65
2
34
32.5
0.931034
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
d472c23275cbe54edb0e0b95e24713ba0dd27ae0
627
py
Python
horizon/views.py
patrickn699/django_tutorial
6a2d527d3800f342d0e4b740f116dffe08753a5a
[ "MIT" ]
null
null
null
horizon/views.py
patrickn699/django_tutorial
6a2d527d3800f342d0e4b740f116dffe08753a5a
[ "MIT" ]
null
null
null
horizon/views.py
patrickn699/django_tutorial
6a2d527d3800f342d0e4b740f116dffe08753a5a
[ "MIT" ]
null
null
null
from django.shortcuts import render from django.http import HttpResponse # Create your views here. def hello(request): #return HttpResponse('Hello World') #return render(request,'base.html', context={'name': 'PN'}) pass def root(request): return render(request, 'index.html',context={'user':request.user}) def squar(request): #num = request.GET['numb'] #num = int(num) #result = num**2 return render(request, 'base.html', context={'op': 'result'}) def square(request): num = request.POST['numb'] num = int(num) result = num**2 return render(request, 'op.html', context={'op': result})
28.5
70
0.668262
84
627
4.988095
0.416667
0.114558
0.181384
0.109785
0.317422
0.317422
0.200477
0.200477
0.200477
0.200477
0
0.00381
0.162679
627
22
71
28.5
0.794286
0.269537
0
0
0
0
0.097345
0
0
0
0
0
0
1
0.307692
false
0.076923
0.153846
0.153846
0.692308
0
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
1
1
0
0
5
2e66df94c47774a3b5e02877220b29d6acb7b776
53
py
Python
lib/video_utils.py
bcarroll/PiControl
a9afe0d42922fe33de3e44344a5997e0fa406cdf
[ "Apache-2.0" ]
2
2020-07-28T22:13:37.000Z
2022-01-05T19:09:36.000Z
lib/video_utils.py
bcarroll/PiControl
a9afe0d42922fe33de3e44344a5997e0fa406cdf
[ "Apache-2.0" ]
2
2020-07-28T22:11:28.000Z
2020-07-28T22:13:00.000Z
lib/video_utils.py
bcarroll/PiControl
a9afe0d42922fe33de3e44344a5997e0fa406cdf
[ "Apache-2.0" ]
1
2018-01-03T16:02:11.000Z
2018-01-03T16:02:11.000Z
# coding=utf8 import os from flask import jsonify
13.25
26
0.754717
8
53
5
0.875
0
0
0
0
0
0
0
0
0
0
0.02381
0.207547
53
3
27
17.666667
0.928571
0.207547
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
2e8be4df4e504ca0af6992dcec6668253f6932ea
239
py
Python
rio/graph/errors.py
soasme/rio
e6b89634db8d3ad75ac7f7b25ddec5b19d4f66e2
[ "MIT" ]
null
null
null
rio/graph/errors.py
soasme/rio
e6b89634db8d3ad75ac7f7b25ddec5b19d4f66e2
[ "MIT" ]
14
2016-04-14T04:18:41.000Z
2016-05-12T03:46:37.000Z
rio/graph/errors.py
soasme/rio
e6b89634db8d3ad75ac7f7b25ddec5b19d4f66e2
[ "MIT" ]
1
2016-04-06T08:54:20.000Z
2016-04-06T08:54:20.000Z
# -*- coding: utf-8 -*- class MissingSender(Exception): pass class WrongSenderSecret(Exception): pass class NotAllowed(Exception): pass class MissingProject(Exception): pass class MissingAction(Exception): pass
11.95
35
0.698745
23
239
7.26087
0.478261
0.389222
0.431138
0
0
0
0
0
0
0
0
0.005236
0.200837
239
19
36
12.578947
0.86911
0.087866
0
0.5
0
0
0
0
0
0
0
0
0
1
0
true
0.5
0
0
0.5
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
0
0
0
5
5cf5d37c9327df4e61eccb9b1ea0b534d4576434
208
py
Python
generate.py
shivamswarnkar/Image-Generator
55b6d066c84c615403e48c27e77ee017cf260955
[ "MIT" ]
8
2019-11-07T19:55:37.000Z
2021-11-11T06:53:50.000Z
generate.py
shivamswarnkar/Image-Generator
55b6d066c84c615403e48c27e77ee017cf260955
[ "MIT" ]
1
2021-07-02T23:44:22.000Z
2021-07-10T08:00:12.000Z
generate.py
shivamswarnkar/Image-Generator
55b6d066c84c615403e48c27e77ee017cf260955
[ "MIT" ]
2
2019-11-07T19:31:21.000Z
2019-11-21T12:02:12.000Z
from utils.args import get_generate_args from DCGAN.generate import generate_images if __name__ == '__main__': # read arguments from terminal args = get_generate_args() # train gan generate_images(args)
23.111111
42
0.793269
29
208
5.206897
0.551724
0.145695
0.198676
0
0
0
0
0
0
0
0
0
0.139423
208
9
43
23.111111
0.843575
0.182692
0
0
1
0
0.047619
0
0
0
0
0
0
1
0
false
0
0.4
0
0.4
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
5
cf010f0b7ff32f3147121341e91b94a194f1a907
300
py
Python
src/garage/torch/policies/__init__.py
Maltimore/garage
a3f44b37eeddca37d157766a9a72e8772f104bcd
[ "MIT" ]
1
2020-02-19T00:01:29.000Z
2020-02-19T00:01:29.000Z
src/garage/torch/policies/__init__.py
Maltimore/garage
a3f44b37eeddca37d157766a9a72e8772f104bcd
[ "MIT" ]
null
null
null
src/garage/torch/policies/__init__.py
Maltimore/garage
a3f44b37eeddca37d157766a9a72e8772f104bcd
[ "MIT" ]
1
2020-02-13T12:05:35.000Z
2020-02-13T12:05:35.000Z
"""PyTorch Policies.""" from garage.torch.policies.base import Policy from garage.torch.policies.deterministic_mlp_policy import ( DeterministicMLPPolicy) from garage.torch.policies.gaussian_mlp_policy import GaussianMLPPolicy __all__ = ['DeterministicMLPPolicy', 'GaussianMLPPolicy', 'Policy']
37.5
71
0.82
31
300
7.677419
0.451613
0.12605
0.189076
0.289916
0
0
0
0
0
0
0
0
0.083333
300
7
72
42.857143
0.865455
0.056667
0
0
0
0
0.162455
0.079422
0
0
0
0
0
1
0
false
0
0.6
0
0.6
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
cf10abcff8245b1c567666986ff370e163232a1b
80
py
Python
whatsapp.py
bhatiaharshit07/automate-whatsapp-insta
8eb7779e54e05ef7934bd7b24d685a86e9116dca
[ "Apache-2.0" ]
null
null
null
whatsapp.py
bhatiaharshit07/automate-whatsapp-insta
8eb7779e54e05ef7934bd7b24d685a86e9116dca
[ "Apache-2.0" ]
null
null
null
whatsapp.py
bhatiaharshit07/automate-whatsapp-insta
8eb7779e54e05ef7934bd7b24d685a86e9116dca
[ "Apache-2.0" ]
null
null
null
import pywhatkit pywhatkit.sendwhatmsg('+919313152973', 'test message', 16, 44)
26.666667
62
0.775
9
80
6.888889
0.888889
0
0
0
0
0
0
0
0
0
0
0.219178
0.0875
80
2
63
40
0.630137
0
0
0
0
0
0.3125
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
cf15412fe9b44f24408a1a6ad77545e5ccb9c23f
197
py
Python
similarity.py
Peter-Devine/text_finder
b09ae796511dc1d000b07c12996d25576566e012
[ "MIT" ]
null
null
null
similarity.py
Peter-Devine/text_finder
b09ae796511dc1d000b07c12996d25576566e012
[ "MIT" ]
null
null
null
similarity.py
Peter-Devine/text_finder
b09ae796511dc1d000b07c12996d25576566e012
[ "MIT" ]
null
null
null
from scipy import spatial # Find the distance between each embedding def get_pairwise_dist(embeddings): return spatial.distance.squareform(spatial.distance.pdist(embeddings, metric="cosine"))
32.833333
91
0.812183
25
197
6.32
0.8
0.189873
0
0
0
0
0
0
0
0
0
0
0.106599
197
5
92
39.4
0.897727
0.203046
0
0
0
0
0.03871
0
0
0
0
0
0
1
0.333333
false
0
0.333333
0.333333
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
1
1
0
0
0
5
cf197c9423cff95f882f9e53463708ad94867d60
76
py
Python
contests_atcoder/abc185/abc185c.py
takelifetime/competitive-programming
e7cf8ef923ccefad39a1727ca94c610d650fcb76
[ "BSD-2-Clause" ]
null
null
null
contests_atcoder/abc185/abc185c.py
takelifetime/competitive-programming
e7cf8ef923ccefad39a1727ca94c610d650fcb76
[ "BSD-2-Clause" ]
1
2021-01-02T06:36:51.000Z
2021-01-02T06:36:51.000Z
contests_atcoder/abc185/abc185c.py
takelifetime/competitive-programming
e7cf8ef923ccefad39a1727ca94c610d650fcb76
[ "BSD-2-Clause" ]
null
null
null
from scipy.special import comb print(comb(int(input()) - 1, 11, exact=True))
38
45
0.723684
13
76
4.230769
0.923077
0
0
0
0
0
0
0
0
0
0
0.044118
0.105263
76
2
45
38
0.764706
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0.5
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
1
0
5
cf4b87ae6d64cdc4e146ac75e5dd924690cbdbbd
148
py
Python
hubcare/metrics/community_metrics/code_of_conduct/admin.py
aleronupe/2019.1-hubcare-api
3f031eac9559a10fdcf70a88ee4c548cf93e4ac2
[ "MIT" ]
7
2019-03-31T17:58:45.000Z
2020-02-29T22:44:27.000Z
hubcare/metrics/community_metrics/code_of_conduct/admin.py
aleronupe/2019.1-hubcare-api
3f031eac9559a10fdcf70a88ee4c548cf93e4ac2
[ "MIT" ]
90
2019-03-26T01:14:54.000Z
2021-06-10T21:30:25.000Z
hubcare/metrics/community_metrics/code_of_conduct/admin.py
aleronupe/2019.1-hubcare-api
3f031eac9559a10fdcf70a88ee4c548cf93e4ac2
[ "MIT" ]
null
null
null
from django.contrib import admin from code_of_conduct.models import CodeOfConduct # Register your models here. admin.site.register(CodeOfConduct)
21.142857
48
0.837838
20
148
6.1
0.7
0
0
0
0
0
0
0
0
0
0
0
0.108108
148
6
49
24.666667
0.924242
0.175676
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
cf86ef9d454ea3240d1c9b7769d0317bb52038b3
196
py
Python
app.py
botul/app01
53539554017b0f136f2bc21656dcecf3c41b622e
[ "MIT" ]
1
2022-03-14T21:16:06.000Z
2022-03-14T21:16:06.000Z
app.py
botul/app01
53539554017b0f136f2bc21656dcecf3c41b622e
[ "MIT" ]
null
null
null
app.py
botul/app01
53539554017b0f136f2bc21656dcecf3c41b622e
[ "MIT" ]
1
2022-03-09T23:03:40.000Z
2022-03-09T23:03:40.000Z
from crypt import methods from flask import Flask, render_template, url_for app = Flask(__name__) @app.route("/", methods=['GET', 'POST']) def main(): return render_template('index.html')
17.818182
49
0.709184
27
196
4.888889
0.703704
0.212121
0
0
0
0
0
0
0
0
0
0
0.142857
196
10
50
19.6
0.785714
0
0
0
0
0
0.092784
0
0
0
0
0
0
1
0.166667
false
0
0.333333
0.166667
0.666667
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
1
1
0
0
5
d882f839b906598f90bdf75181a786a5ada76afb
62
py
Python
tests/bento_service_examples/local_dependencies/local_module/__init__.py
co42/BentoML
b14c748c9a2841731c6b7694ccd61125324661ec
[ "Apache-2.0" ]
3,451
2019-04-02T01:47:42.000Z
2022-03-31T16:20:49.000Z
tests/bento_service_examples/local_dependencies/local_module/__init__.py
co42/BentoML
b14c748c9a2841731c6b7694ccd61125324661ec
[ "Apache-2.0" ]
1,925
2019-04-03T00:19:05.000Z
2022-03-31T22:41:54.000Z
tests/bento_service_examples/local_dependencies/local_module/__init__.py
co42/BentoML
b14c748c9a2841731c6b7694ccd61125324661ec
[ "Apache-2.0" ]
451
2019-04-02T01:53:41.000Z
2022-03-29T08:49:06.000Z
def dependency_in_local_module_directory(foo): return foo
20.666667
46
0.822581
9
62
5.222222
0.888889
0
0
0
0
0
0
0
0
0
0
0
0.129032
62
2
47
31
0.87037
0
0
0
0
0
0
0
0
0
0
0
0
1
0.5
false
0
0
0.5
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
d8980e387126bb048452011b8b0d6b4987e97605
38
py
Python
application/__init__.py
JuiceFV/RamblerTask
1aa57fefcd96059ac63391d6d178ea7cfa49e1d0
[ "MIT" ]
1
2020-03-18T12:29:34.000Z
2020-03-18T12:29:34.000Z
application/__init__.py
JuiceFV/RamblerTask
1aa57fefcd96059ac63391d6d178ea7cfa49e1d0
[ "MIT" ]
11
2020-03-06T18:21:17.000Z
2022-03-12T00:34:37.000Z
application/__init__.py
JuiceFV/RamblerTask
1aa57fefcd96059ac63391d6d178ea7cfa49e1d0
[ "MIT" ]
null
null
null
"""The basic application's module. """
19
34
0.684211
5
38
5.2
1
0
0
0
0
0
0
0
0
0
0
0
0.105263
38
2
35
19
0.764706
0.815789
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
d898df74717a3dc949234d5a9a0df5ff534980b6
241
py
Python
MyPythonDemos2018/SimpleDemos/a11_isinstance.py
zcatt/MyDemos2018
a332fdf94170663ba7a530cedc28418159a1c29a
[ "MIT" ]
null
null
null
MyPythonDemos2018/SimpleDemos/a11_isinstance.py
zcatt/MyDemos2018
a332fdf94170663ba7a530cedc28418159a1c29a
[ "MIT" ]
null
null
null
MyPythonDemos2018/SimpleDemos/a11_isinstance.py
zcatt/MyDemos2018
a332fdf94170663ba7a530cedc28418159a1c29a
[ "MIT" ]
null
null
null
#!usr/bin/env python #coding:utf-8 """ isinstance(object, classinfo) """ class A: pass class B(A): pass c=2 print(isinstance(c, int)) print(isinstance(c, (str, int, list))) print(isinstance(A(), A)) print(isinstance(B(), A))
10.954545
38
0.630705
38
241
4.026316
0.552632
0.392157
0.20915
0
0
0
0
0
0
0
0
0.009901
0.161826
241
21
39
11.47619
0.742574
0.128631
0
0.222222
0
0
0
0
0
0
0
0
0
0
null
null
0.222222
0
null
null
0.444444
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
1
0
0
0
1
0
5
d8c2407e457e8d6363497223fcab842464725aff
330
py
Python
lambda_processors/newssite-scraper-processors/jq_scripts.py
EmilLaursen/art_docker
5a4b33690637e5c8e7b53f67e2ecd3b168b4436a
[ "MIT" ]
null
null
null
lambda_processors/newssite-scraper-processors/jq_scripts.py
EmilLaursen/art_docker
5a4b33690637e5c8e7b53f67e2ecd3b168b4436a
[ "MIT" ]
4
2021-02-10T01:54:55.000Z
2022-03-02T14:59:51.000Z
lambda_processors/newssite-scraper-processors/jq_scripts.py
EmilLaursen/art_docker
5a4b33690637e5c8e7b53f67e2ecd3b168b4436a
[ "MIT" ]
null
null
null
NON_EMPTY_FILEKEYS = """ .Contents[] | select(.Size > 0) | .Key | select(endswith(".jsonl")) """ EMPTY_FILEKEYS = """ .Contents[] | select(.Size == 0) | .Key | select(endswith(".jsonl")) """ DELETE_FILTER = """ .ResponseMetadata | {HTTPStatusCode, RetryAttempts} """
18.333333
56
0.515152
26
330
6.384615
0.576923
0.156627
0.253012
0.325301
0.650602
0.650602
0.650602
0.650602
0.650602
0.650602
0
0.008584
0.293939
330
17
57
19.411765
0.703863
0
0
0.6
0
0
0.766667
0.157576
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
0
1
1
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
2b40a6069c7edfdedd87b842f10dca4966cba07e
49
py
Python
test/samples-python/simple.py
voltek62/vscode-ipe
8056154f2cd2c8ba4d743bd206938cd09ad199f9
[ "MIT" ]
227
2018-07-24T08:55:17.000Z
2018-11-07T15:45:38.000Z
test/samples-python/simple.py
voltek62/vscode-ipe
8056154f2cd2c8ba4d743bd206938cd09ad199f9
[ "MIT" ]
22
2018-10-23T14:25:11.000Z
2021-06-11T09:30:54.000Z
test/samples-python/simple.py
voltek62/vscode-ipe
8056154f2cd2c8ba4d743bd206938cd09ad199f9
[ "MIT" ]
24
2018-11-08T10:41:44.000Z
2022-01-15T20:16:42.000Z
print('hello') print('how are you') print(2+4*2)
12.25
20
0.653061
10
49
3.2
0.7
0
0
0
0
0
0
0
0
0
0
0.068182
0.102041
49
3
21
16.333333
0.659091
0
0
0
0
0
0.326531
0
0
0
0
0
0
1
0
true
0
0
0
0
1
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
5
2b4612e5e95e6607c23797e89a69e33c999907f3
60
py
Python
_solved/_solutions/pandas_09_data_cleaning8.py
jorisvandenbossche/ICES-python-data
63864947657f37cb26cb4e2dcd67ff106dffe9cd
[ "BSD-3-Clause" ]
1
2022-03-02T17:41:46.000Z
2022-03-02T17:41:46.000Z
_solved/_solutions/pandas_09_data_cleaning8.py
jorisvandenbossche/ICES-python-data
63864947657f37cb26cb4e2dcd67ff106dffe9cd
[ "BSD-3-Clause" ]
1
2022-03-14T15:15:53.000Z
2022-03-14T15:15:53.000Z
_solved/_solutions/pandas_09_data_cleaning8.py
jorisvandenbossche/ICES-python-data
63864947657f37cb26cb4e2dcd67ff106dffe9cd
[ "BSD-3-Clause" ]
null
null
null
casualties["DT_HOUR"] = casualties["DT_HOUR"].replace(99, 6)
60
60
0.733333
9
60
4.666667
0.666667
0.571429
0.761905
0
0
0
0
0
0
0
0
0.052632
0.05
60
1
60
60
0.684211
0
0
0
0
0
0.229508
0
0
0
0
0
0
1
0
true
0
0
0
0
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
9937d5890ed042ba0409db30120ad943acc483bf
143
py
Python
voxel_globe/websockets/admin.py
ngageoint/voxel-globe
91f386de652b704942165889c10468b2c4cf4eec
[ "MIT" ]
28
2015-07-27T23:57:24.000Z
2020-04-05T15:10:52.000Z
voxel_globe/websockets/admin.py
VisionSystemsInc/voxel_globe
6eb3fca5586726428e9d914f7b730ca164c64a52
[ "MIT" ]
50
2016-02-11T15:50:22.000Z
2016-10-27T22:38:27.000Z
voxel_globe/websockets/admin.py
ngageoint/voxel-globe
91f386de652b704942165889c10468b2c4cf4eec
[ "MIT" ]
8
2015-07-27T19:22:03.000Z
2021-01-04T09:44:48.000Z
from django.contrib import admin from voxel_globe.websockets import models # Register your models here. admin.site.register(models.LogMessage)
28.6
41
0.839161
20
143
5.95
0.7
0
0
0
0
0
0
0
0
0
0
0
0.097902
143
5
42
28.6
0.922481
0.181818
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
994970b20049714bfc1d25cfe3519e0f2179f93f
279
py
Python
tests/lnttool/test_importreport.py
llvm/lnt
77e0a25f996a5363e23f701c0d995525a5c6484a
[ "Apache-2.0" ]
19
2019-01-15T03:04:00.000Z
2021-12-08T00:09:01.000Z
tests/lnttool/test_importreport.py
llvm/lnt
77e0a25f996a5363e23f701c0d995525a5c6484a
[ "Apache-2.0" ]
5
2019-04-11T06:22:18.000Z
2021-09-13T17:41:14.000Z
tests/lnttool/test_importreport.py
llvm/lnt
77e0a25f996a5363e23f701c0d995525a5c6484a
[ "Apache-2.0" ]
21
2019-02-10T02:47:55.000Z
2022-03-31T14:16:36.000Z
# Testing text importing. # # RUN: echo "foo.exec 10" > input # RUN: echo "bar.exec 20" >> input # RUN: echo "foo.hash d7" >> input # RUN: echo "bar.profile Xz6/" >> input # RUN: lnt importreport --testsuite nts --order 123 --machine foo input output.json # RUN: cat output.json
31
83
0.670251
43
279
4.348837
0.581395
0.149733
0.192513
0.160428
0
0
0
0
0
0
0
0.03913
0.175627
279
8
84
34.875
0.773913
0.939068
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
99930df0a669a56ee2c952346d0c338a1a0f3f34
150
py
Python
lib/#bin/__init__.py
dalbertweiss/DataPreprocessing
84d4cc73f8c34801ec68203a1be69bdb5cfcba3e
[ "MIT" ]
null
null
null
lib/#bin/__init__.py
dalbertweiss/DataPreprocessing
84d4cc73f8c34801ec68203a1be69bdb5cfcba3e
[ "MIT" ]
null
null
null
lib/#bin/__init__.py
dalbertweiss/DataPreprocessing
84d4cc73f8c34801ec68203a1be69bdb5cfcba3e
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ Created on Thu Feb 24 13:45:29 2022 @author: D.Albert-Weiss """ from .preprocessing import * from .augmentation import *
16.666667
35
0.666667
22
150
4.545455
0.909091
0
0
0
0
0
0
0
0
0
0
0.104
0.166667
150
9
36
16.666667
0.696
0.553333
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
512596a30496e4ff197f8b42cd0a26b159c671c6
78
py
Python
src/sqlalchemy_declarative/schema/__init__.py
DanCardin/sqlalchemy-declarative
e82da0a03235edfbc2348cf65d3d9e1c944ef0d2
[ "Apache-2.0" ]
null
null
null
src/sqlalchemy_declarative/schema/__init__.py
DanCardin/sqlalchemy-declarative
e82da0a03235edfbc2348cf65d3d9e1c944ef0d2
[ "Apache-2.0" ]
null
null
null
src/sqlalchemy_declarative/schema/__init__.py
DanCardin/sqlalchemy-declarative
e82da0a03235edfbc2348cf65d3d9e1c944ef0d2
[ "Apache-2.0" ]
null
null
null
# flake8: noqa from sqlalchemy_declarative.schema.base import Schema, Schemas
26
62
0.833333
10
78
6.4
0.9
0
0
0
0
0
0
0
0
0
0
0.014286
0.102564
78
2
63
39
0.9
0.153846
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
51597838a6aa218175cdf305489d932b6db05f5c
300
py
Python
src/nlp_datasets/sentence_classification/__init__.py
TeaKatz/NLP_Datasets
6eeacd0d120ce8d7d1e3da2b40af94006ee1cdf6
[ "MIT" ]
null
null
null
src/nlp_datasets/sentence_classification/__init__.py
TeaKatz/NLP_Datasets
6eeacd0d120ce8d7d1e3da2b40af94006ee1cdf6
[ "MIT" ]
null
null
null
src/nlp_datasets/sentence_classification/__init__.py
TeaKatz/NLP_Datasets
6eeacd0d120ce8d7d1e3da2b40af94006ee1cdf6
[ "MIT" ]
null
null
null
from .AmazonDataset import AmazonDataset from .YahooDataset import YahooDataset from .STSDataset import STSDataset from .SNLIDataset import SNLIDataset, RefinedSNLIDataset from .MNLIDataset import MNLIDataset, RefinedMNLIDataset from .NLIDataset import NLIDataset, RefinedNLIDataset, SimcseNLIDataset
50
71
0.876667
28
300
9.392857
0.428571
0
0
0
0
0
0
0
0
0
0
0
0.09
300
6
71
50
0.96337
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
515a21b5b12c6f6199001752dbc699e45e9dbb8f
2,969
py
Python
mysite_env/mysite/blog/views.py
Hongyil1/Django-learning-project
13d4a5731f81a538e91d8fc7fad0587245056aea
[ "Apache-2.0" ]
null
null
null
mysite_env/mysite/blog/views.py
Hongyil1/Django-learning-project
13d4a5731f81a538e91d8fc7fad0587245056aea
[ "Apache-2.0" ]
null
null
null
mysite_env/mysite/blog/views.py
Hongyil1/Django-learning-project
13d4a5731f81a538e91d8fc7fad0587245056aea
[ "Apache-2.0" ]
null
null
null
from django.shortcuts import render_to_response, get_object_or_404 from django.core.paginator import Paginator from .models import Blog, BlogType from django.conf import settings # Create your views here. # 返回列表 def blog_list(request): page_num = request.GET.get('page', 1) # 获取页码参数(GET请求) blogs_all_list = Blog.objects.all() paginator = Paginator(blogs_all_list, settings.EACH_PAGE_BLOGS_NUMBER) # 10 articles per page page_of_blogs = paginator.get_page(page_num) # return 1 when invalid input current_page_num = page_of_blogs.number # 当前页码 page_range = list(range(max(current_page_num - 2, 1), current_page_num)) + \ list(range(current_page_num, min(current_page_num + 2, paginator.num_pages) + 1)) # Plus 省略标记 if page_range[0] - 1 >=2: page_range.insert(0, "...") if paginator.num_pages - page_range[-1] >=2: page_range.append("...") # Plus the first and last page if page_range[0] != 1: page_range.insert(0, 1) if page_range[-1] != paginator.num_pages: page_range.append(paginator.num_pages) context = {} context['blogs'] = page_of_blogs.object_list context['page_of_blogs'] = page_of_blogs context['page_range'] = page_range context['blog_types'] = BlogType.objects.all() context['blogs_count'] = Blog.objects.all().count() return render_to_response('blog/blog_list.html', context=context) def blog_detail(request, blog_pk): context = {} context['blog'] = get_object_or_404(Blog, pk=blog_pk) return render_to_response('blog/blog_detail.html', context=context) def blogs_with_type(request, blog_type_pk): context = {} blog_type = get_object_or_404(BlogType, pk=blog_type_pk) blogs_all_list = Blog.objects.filter(blog_type=blog_type) page_num = request.GET.get('page', 1) # 获取页码参数(GET请求) paginator = Paginator(blogs_all_list, settings.EACH_PAGE_BLOGS_NUMBER) # 10 articles per page page_of_blogs = paginator.get_page(page_num) # return 1 when invalid input current_page_num = page_of_blogs.number # 当前页码 page_range = list(range(max(current_page_num - 2, 1), current_page_num)) + \ list(range(current_page_num, min(current_page_num + 2, paginator.num_pages) + 1)) # Plus 省略标记 if page_range[0] - 1 >= 2: page_range.insert(0, "...") if paginator.num_pages - page_range[-1] >= 2: page_range.append("...") # Plus the first and last page if page_range[0] != 1: page_range.insert(0, 1) if page_range[-1] != paginator.num_pages: page_range.append(paginator.num_pages) context['blogs'] = page_of_blogs.object_list context['blog_type'] = blog_type context['page_of_blogs'] = page_of_blogs context['page_range'] = page_range context['blog_types'] = BlogType.objects.all() # context['blogs_count'] = Blog.objects.all().count() return render_to_response('blog/blogs_with_type.html', context=context)
41.236111
98
0.697204
436
2,969
4.444954
0.16055
0.102167
0.05676
0.03096
0.762642
0.738906
0.721362
0.721362
0.687307
0.650155
0
0.019334
0.181206
2,969
71
99
41.816901
0.777869
0.099023
0
0.709091
0
0
0.069575
0.0173
0
0
0
0
0
1
0.054545
false
0
0.072727
0
0.181818
0
0
0
0
null
0
0
0
0
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
8510b5c92b049bacb7037c7a0d30fa69388aaf93
283
py
Python
ambra_sdk/service/entrypoints/webhook.py
dicomgrid/sdk-python
bb12eed311bad73dfb863917df4dc5cbcd91a447
[ "Apache-2.0" ]
9
2020-04-20T23:45:44.000Z
2021-04-18T11:22:17.000Z
ambra_sdk/service/entrypoints/webhook.py
dicomgrid/sdk-python
bb12eed311bad73dfb863917df4dc5cbcd91a447
[ "Apache-2.0" ]
13
2020-02-08T16:15:05.000Z
2021-09-13T22:55:28.000Z
ambra_sdk/service/entrypoints/webhook.py
dicomgrid/sdk-python
bb12eed311bad73dfb863917df4dc5cbcd91a447
[ "Apache-2.0" ]
6
2020-03-25T17:47:45.000Z
2021-04-18T11:22:19.000Z
from ambra_sdk.service.entrypoints.generated.webhook import \ AsyncWebhook as GAsyncWebhook from ambra_sdk.service.entrypoints.generated.webhook import Webhook as GWebhook class Webhook(GWebhook): """Webhook.""" class AsyncWebhook(GAsyncWebhook): """AsyncWebhook."""
23.583333
79
0.773852
30
283
7.233333
0.433333
0.082949
0.110599
0.175115
0.479263
0.479263
0.479263
0.479263
0
0
0
0
0.123675
283
11
80
25.727273
0.875
0.077739
0
0
1
0
0
0
0
0
0
0
0
1
0
true
0
0.4
0
0.8
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
8512beec6e68db826b699c765d5c25252d0755b2
150
py
Python
ferris/core/oauth2/__init__.py
palladius/gae-ferris-ricc
e6d9d8d4aadeae10eb258b94b6fe5912c8630b36
[ "MIT" ]
2
2015-03-04T07:05:57.000Z
2015-03-04T07:06:00.000Z
ferris/core/oauth2/__init__.py
palladius/gae-ferris-ricc
e6d9d8d4aadeae10eb258b94b6fe5912c8630b36
[ "MIT" ]
null
null
null
ferris/core/oauth2/__init__.py
palladius/gae-ferris-ricc
e6d9d8d4aadeae10eb258b94b6fe5912c8630b36
[ "MIT" ]
null
null
null
from .user_credentials import UserCredentials, find_credentials from .service_account import build_credentials from .util import credentials_to_token
37.5
63
0.886667
19
150
6.684211
0.631579
0.23622
0
0
0
0
0
0
0
0
0
0
0.086667
150
3
64
50
0.927007
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
518763986478203a9906ddc45b26c7e702dea034
305
py
Python
licos/json/element/lobby/JsonGetSettings.py
tk-tam/LiCOS-JSON4Python
d1d5e1362f8eca93f4e66e4a1759ecca5e68003e
[ "Apache-2.0" ]
null
null
null
licos/json/element/lobby/JsonGetSettings.py
tk-tam/LiCOS-JSON4Python
d1d5e1362f8eca93f4e66e4a1759ecca5e68003e
[ "Apache-2.0" ]
null
null
null
licos/json/element/lobby/JsonGetSettings.py
tk-tam/LiCOS-JSON4Python
d1d5e1362f8eca93f4e66e4a1759ecca5e68003e
[ "Apache-2.0" ]
null
null
null
import licos.json.element.lobby import jsons from typing import List from dataclasses import dataclass @dataclass class JsonGetSettings( type: str, TypeSystem(type)): def _validType -> str: return JsonGetSettings.type class JsonGetSettings: type: str = "getSettings"
16.052632
35
0.711475
33
305
6.545455
0.606061
0.263889
0.222222
0.25
0
0
0
0
0
0
0
0
0.222951
305
18
36
16.944444
0.911392
0
0
0
0
0
0.036066
0
0
0
0
0
0
0
null
null
0
0.333333
null
null
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
1
0
0
0
0
5
51e91e9cbf8b499f641eb83712e1636fefe8bb03
195
py
Python
PYTHON/2_desktop/1_env.py
sj-jw/python
4d1d6f86fddbd99f0efb18da60ff08c57ce3718e
[ "MIT" ]
null
null
null
PYTHON/2_desktop/1_env.py
sj-jw/python
4d1d6f86fddbd99f0efb18da60ff08c57ce3718e
[ "MIT" ]
null
null
null
PYTHON/2_desktop/1_env.py
sj-jw/python
4d1d6f86fddbd99f0efb18da60ff08c57ce3718e
[ "MIT" ]
null
null
null
import pyautogui size = pyautogui.size() #현재 화면의스크린 사이즈를 가져옴 print(size) import pyautogui size = pyautogui.size() #현재화면의 스크린사이즈 가져옴 print(size) #가로 세로 크기 #size[0] = width #size[1] = hight
12.1875
43
0.702564
30
195
4.566667
0.566667
0.379562
0.277372
0.408759
0.467153
0
0
0
0
0
0
0.0125
0.179487
195
15
44
13
0.84375
0.369231
0
1
0
0
0
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0.333333
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
5
51f916e5ea8439629cd441139d2c59fc8fbbaa19
152
py
Python
hyperg/__init__.py
weleen/MGH.pytorch
69f2830f6bd60fe3b33c80c04540c0c800d26de1
[ "Apache-2.0" ]
4
2021-10-06T15:57:29.000Z
2021-12-21T12:46:19.000Z
hyperg/__init__.py
weleen/MGH.pytorch
69f2830f6bd60fe3b33c80c04540c0c800d26de1
[ "Apache-2.0" ]
1
2022-02-14T06:36:19.000Z
2022-02-24T08:18:39.000Z
hyperg/__init__.py
weleen/MGH.pytorch
69f2830f6bd60fe3b33c80c04540c0c800d26de1
[ "Apache-2.0" ]
null
null
null
from .spectral_clustering import spectral_hg_partitioning from .gen_hg import gen_knn_hg, gen_clustering_hg, concat_multi_hg from .hyperg import HyperG
38
66
0.875
24
152
5.125
0.458333
0
0
0
0
0
0
0
0
0
0
0
0.092105
152
3
67
50.666667
0.891304
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
cf97de0eda39fd87d7eedbd780bed68a3efccc0b
149
py
Python
Tests Nose/programa.py
txtbits/daw-python
5dde1207e2791e90aa5e9ce2b6afc4116129efab
[ "MIT" ]
null
null
null
Tests Nose/programa.py
txtbits/daw-python
5dde1207e2791e90aa5e9ce2b6afc4116129efab
[ "MIT" ]
null
null
null
Tests Nose/programa.py
txtbits/daw-python
5dde1207e2791e90aa5e9ce2b6afc4116129efab
[ "MIT" ]
null
null
null
''' Created on 10/02/2012 @author: Alumno ''' def suma(x, y): return x+y def multiplica(x, y): return x*y def cuadrado(x): return x*x
10.642857
21
0.597315
27
149
3.296296
0.518519
0.089888
0.179775
0.202247
0.292135
0.292135
0
0
0
0
0
0.070796
0.241611
149
14
22
10.642857
0.716814
0.255034
0
0
0
0
0
0
0
0
0
0
0
1
0.5
false
0
0
0.5
1
0
1
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
cfc0a82dc9bd932b99b9cc4c746664c2f7f007be
53
py
Python
bencode/__init__.py
Martynas-P/bencode
d4b2a406e07aa828bfc02eb1ed3bd68efbe1c6cb
[ "Apache-2.0" ]
null
null
null
bencode/__init__.py
Martynas-P/bencode
d4b2a406e07aa828bfc02eb1ed3bd68efbe1c6cb
[ "Apache-2.0" ]
null
null
null
bencode/__init__.py
Martynas-P/bencode
d4b2a406e07aa828bfc02eb1ed3bd68efbe1c6cb
[ "Apache-2.0" ]
null
null
null
from .encode import encode from .decode import decode
26.5
26
0.830189
8
53
5.5
0.5
0
0
0
0
0
0
0
0
0
0
0
0.132075
53
2
27
26.5
0.956522
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
cfd129e7689ac29ad5a748f88435938a7eaefcd9
482
py
Python
algorithms/calculator/reverse_polish_notation/__init__.py
kirkirey/programming-for-linguists
d97c59738713fab725073e9c88c7321119a648fc
[ "Apache-2.0" ]
null
null
null
algorithms/calculator/reverse_polish_notation/__init__.py
kirkirey/programming-for-linguists
d97c59738713fab725073e9c88c7321119a648fc
[ "Apache-2.0" ]
null
null
null
algorithms/calculator/reverse_polish_notation/__init__.py
kirkirey/programming-for-linguists
d97c59738713fab725073e9c88c7321119a648fc
[ "Apache-2.0" ]
4
2021-02-09T12:00:34.000Z
2021-05-21T18:59:38.000Z
""" Programming for linguists ReversePolishNotation module """ from algorithms.calculator.reverse_polish_notation.binary_op import BinaryOp from algorithms.calculator.reverse_polish_notation.digit import Digit from algorithms.calculator.reverse_polish_notation.op import Op, OpFactory from algorithms.calculator.reverse_polish_notation.reverse_polish_notation import ReversePolishNotation from algorithms.calculator.reverse_polish_notation.bracket import CloseBracket, OpenBracket
43.818182
103
0.890041
55
482
7.563636
0.363636
0.1875
0.302885
0.372596
0.540865
0.540865
0
0
0
0
0
0
0.062241
482
10
104
48.2
0.920354
0.114108
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
cfdf2d2b71e1b04e16c209cf4ec4e963013f4883
140
py
Python
test_main.py
ledzep2/musicxml_to_jianpu
818a4c8a27cc0c0b12346d8defe0e2ea77387a59
[ "MIT" ]
17
2019-04-22T23:07:48.000Z
2021-07-20T07:11:20.000Z
test_main.py
ledzep2/musicxml_to_jianpu
818a4c8a27cc0c0b12346d8defe0e2ea77387a59
[ "MIT" ]
null
null
null
test_main.py
ledzep2/musicxml_to_jianpu
818a4c8a27cc0c0b12346d8defe0e2ea77387a59
[ "MIT" ]
6
2020-01-18T03:49:38.000Z
2022-03-30T02:51:53.000Z
#!/usr/bin/env python3 import unittest from test_reader import * from test_writer import * if __name__ == "__main__": unittest.main()
15.555556
26
0.728571
19
140
4.842105
0.684211
0.173913
0
0
0
0
0
0
0
0
0
0.008547
0.164286
140
8
27
17.5
0.777778
0.15
0
0
0
0
0.067797
0
0
0
0
0
0
1
0
true
0
0.6
0
0.6
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
3207a6789c43bad79b040f6a1a9b189a72778379
143
py
Python
scripts/__init__.py
owid/co2-data
c3e17d2842f9f1a5efadc583ae665d91c2483a3a
[ "CC-BY-4.0" ]
245
2020-08-20T18:24:26.000Z
2022-03-29T16:01:31.000Z
scripts/__init__.py
yuzhangnju/co2-data
c3e17d2842f9f1a5efadc583ae665d91c2483a3a
[ "CC-BY-4.0" ]
19
2020-11-25T19:29:02.000Z
2022-02-28T10:26:27.000Z
scripts/__init__.py
yuzhangnju/co2-data
c3e17d2842f9f1a5efadc583ae665d91c2483a3a
[ "CC-BY-4.0" ]
105
2020-08-28T11:12:10.000Z
2022-03-27T02:30:55.000Z
import os CURRENT_DIR = os.path.dirname(__file__) INPUT_DIR = os.path.join(CURRENT_DIR, "input") OUTPUT_DIR = os.path.join(CURRENT_DIR, "..")
23.833333
46
0.741259
23
143
4.217391
0.434783
0.309278
0.278351
0.268041
0.474227
0.474227
0
0
0
0
0
0
0.097902
143
5
47
28.6
0.751938
0
0
0
0
0
0.048951
0
0
0
0
0
0
1
0
false
0
0.25
0
0.25
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
321fc8c8cf68644ac239a9df5959a034e2d3d8f6
2,739
py
Python
example/run_FindPathMany_minimal.py
zehuilu/DrMaMP-Distributed-Real-time-Multi-agent-Mission-Planning-Algorithm
894875ebddf7d1f6bbf7a47ce82f05d7be2bafdc
[ "Apache-2.0" ]
4
2022-02-22T05:12:18.000Z
2022-03-29T01:56:37.000Z
example/run_FindPathMany_minimal.py
zehuilu/DrMaMP-Distributed-Real-time-Multi-agent-Mission-Planning-Algorithm
894875ebddf7d1f6bbf7a47ce82f05d7be2bafdc
[ "Apache-2.0" ]
null
null
null
example/run_FindPathMany_minimal.py
zehuilu/DrMaMP-Distributed-Real-time-Multi-agent-Mission-Planning-Algorithm
894875ebddf7d1f6bbf7a47ce82f05d7be2bafdc
[ "Apache-2.0" ]
3
2022-02-23T03:14:56.000Z
2022-03-14T12:22:05.000Z
#!/usr/bin/env python3 import time import pathmagic with pathmagic.context(): import DrMaMP if __name__ == "__main__": # define the world map map_width = 20 map_height = 20 world_map = [ # 00 01 02 03 04 05 06 07 08 09 10 11 12 13 14 15 16 17 18 19 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, # 00 1,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,1, # 01 1,9,9,1,1,9,9,9,1,9,1,9,1,9,1,9,9,9,1,1, # 02 1,9,9,1,1,9,9,9,1,9,1,9,1,9,1,9,9,9,1,1, # 03 1,9,1,1,1,1,9,9,1,9,1,9,1,1,1,1,9,9,1,1, # 04 1,9,1,1,9,1,1,1,1,9,1,1,1,1,9,1,1,1,1,1, # 05 1,9,9,9,9,1,1,1,1,1,1,9,9,9,9,1,1,1,1,1, # 06 1,9,9,9,9,9,9,9,9,1,1,1,9,9,9,9,9,9,9,1, # 07 1,9,1,1,1,1,1,1,1,1,1,9,1,1,1,1,1,1,1,1, # 08 1,9,1,9,9,9,9,9,9,9,1,1,9,9,9,9,9,9,9,1, # 09 1,9,1,1,1,1,9,1,1,9,1,1,1,1,1,1,1,1,1,1, # 10 1,9,9,9,9,9,1,9,1,9,1,9,9,9,9,9,1,1,1,1, # 11 1,9,1,9,1,9,9,9,1,9,1,9,1,9,1,9,9,9,1,1, # 12 1,9,1,9,1,9,9,9,1,9,1,9,1,9,1,9,9,9,1,1, # 13 1,9,1,1,1,1,9,9,1,9,1,9,1,1,1,1,9,9,1,1, # 14 1,9,1,1,9,1,1,1,1,9,1,1,1,1,9,1,1,1,1,1, # 15 1,9,9,9,9,1,1,1,1,1,1,9,9,9,9,1,1,1,1,1, # 16 1,1,9,9,9,9,9,9,9,1,1,1,9,9,9,1,9,9,9,9, # 17 1,9,1,1,1,1,1,1,1,1,1,9,1,1,1,1,1,1,1,1, # 18 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1 # 19 ] # for LazyThetaStarPython, 0 for no obstacles; 255 for obstacles for idx in range(len(world_map)): if world_map[idx] == 9: world_map[idx] = 255 else: world_map[idx] = 0 # This is for a single start and goal start = [0, 0] end = [15, 10] t0 = time.time() # solve it path, distance = DrMaMP.FindPath(start, end, world_map, map_width, map_height) t1 = time.time() print("This is the path. Time used [sec]:" + str(t1 - t0)) print("Total distance: " + str(distance)) for idx in range(0,len(path),2): str_print = str(path[idx]) + ', ' + str(path[idx+1]) print(str_print) # This is for an agent and a set of targets agent_position = [0, 0] targets_position = [15,10, 19,19, 13,10] t0 = time.time() # solve it path_many, distances_many = DrMaMP.FindPathMany(agent_position, targets_position, world_map, map_width, map_height) t1 = time.time() print("These are all the paths. Time used [sec]:" + str(t1 - t0)) for i in range(0,len(path_many),1): print("This is a path.") print("Total distance: " + str(distances_many[i])) for j in range(0,len(path_many[i]),2): str_print = str(path_many[i][j]) + ', ' + str(path_many[i][j+1]) print(str_print)
34.670886
119
0.512961
673
2,739
2.034175
0.132244
0.219138
0.245435
0.2542
0.486486
0.438276
0.382031
0.344777
0.338934
0.322863
0
0.260955
0.258489
2,739
78
120
35.115385
0.413097
0.117196
0
0.321429
0
0
0.056067
0
0
0
0
0
0
1
0
false
0
0.053571
0
0.053571
0.160714
0
0
0
null
1
1
1
0
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
5c671faa0d2d1713ea133bd3ab5d9e99df115864
81
py
Python
supersuit/aec_vector/__init__.py
mimoralea/SuperSuit
b30160468add83591a606b43809d3474b67f2c21
[ "MIT" ]
null
null
null
supersuit/aec_vector/__init__.py
mimoralea/SuperSuit
b30160468add83591a606b43809d3474b67f2c21
[ "MIT" ]
null
null
null
supersuit/aec_vector/__init__.py
mimoralea/SuperSuit
b30160468add83591a606b43809d3474b67f2c21
[ "MIT" ]
null
null
null
from .base_aec_vec_env import VectorAECEnv from .create import vectorize_aec_env
27
42
0.876543
13
81
5.076923
0.692308
0
0
0
0
0
0
0
0
0
0
0
0.098765
81
2
43
40.5
0.90411
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
5c7cbe7d260969d074b5bebf045b705e3140c73f
85
py
Python
ahegao/exceptions.py
AhegaoTeam/AhegaoAPIPython
da7933f9565fa3e4bbe9cfec7054b4bac0ba9fc6
[ "MIT" ]
null
null
null
ahegao/exceptions.py
AhegaoTeam/AhegaoAPIPython
da7933f9565fa3e4bbe9cfec7054b4bac0ba9fc6
[ "MIT" ]
null
null
null
ahegao/exceptions.py
AhegaoTeam/AhegaoAPIPython
da7933f9565fa3e4bbe9cfec7054b4bac0ba9fc6
[ "MIT" ]
null
null
null
class ApiError(Exception): pass class AuthorizationFailed(Exception): pass
12.142857
37
0.741176
8
85
7.875
0.625
0.412698
0
0
0
0
0
0
0
0
0
0
0.188235
85
6
38
14.166667
0.913043
0
0
0.5
0
0
0
0
0
0
0
0
0
1
0
true
0.5
0
0
0.5
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
0
0
0
5
5c8302b57422beb7ec46b246e74f0c6b24754e1b
90
py
Python
main/send_data/send.py
anonymous203030/Tele_Source
cb8a591bdca2d3d73690ba8c277816b7f38447d7
[ "MIT" ]
null
null
null
main/send_data/send.py
anonymous203030/Tele_Source
cb8a591bdca2d3d73690ba8c277816b7f38447d7
[ "MIT" ]
null
null
null
main/send_data/send.py
anonymous203030/Tele_Source
cb8a591bdca2d3d73690ba8c277816b7f38447d7
[ "MIT" ]
null
null
null
# TODO: create functions for data sending async def send_data(event, buttons): pass
15
41
0.733333
13
90
5
0.923077
0
0
0
0
0
0
0
0
0
0
0
0.2
90
5
42
18
0.902778
0.433333
0
0
0
0
0
0
0
0
0
0.2
0
1
0
true
0.5
0
0
0
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
1
1
0
0
0
0
0
5
5cb1c3f2222680c95018fcd16f3f37fb14725937
67
py
Python
nuggt/__init__.py
healthonrails/nuggt
7a5e624a5931c115916a19174100d305265f21f1
[ "MIT" ]
5
2020-06-11T08:24:17.000Z
2021-07-06T00:20:16.000Z
nuggt/__init__.py
healthonrails/nuggt
7a5e624a5931c115916a19174100d305265f21f1
[ "MIT" ]
6
2018-05-01T16:52:26.000Z
2021-10-15T20:42:11.000Z
nuggt/__init__.py
healthonrails/nuggt
7a5e624a5931c115916a19174100d305265f21f1
[ "MIT" ]
5
2019-07-15T15:28:07.000Z
2021-01-12T16:42:48.000Z
from .brain_regions import BrainRegions from .utils import ngutils
22.333333
39
0.850746
9
67
6.222222
0.777778
0
0
0
0
0
0
0
0
0
0
0
0.119403
67
2
40
33.5
0.949153
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
5cb4a72da561ea8ff6bc3ca5f3c83c4edbe2e978
281
py
Python
examples/pos_tagging/src/data/__init__.py
obss/trapper
40e6fc25a2d8c1ece8bf006c362a9cb163c4355c
[ "MIT" ]
36
2021-11-01T19:29:31.000Z
2022-02-25T15:19:08.000Z
examples/pos_tagging/src/data/__init__.py
obss/trapper
40e6fc25a2d8c1ece8bf006c362a9cb163c4355c
[ "MIT" ]
7
2021-11-01T14:33:21.000Z
2022-03-22T09:01:36.000Z
examples/pos_tagging/src/data/__init__.py
obss/trapper
40e6fc25a2d8c1ece8bf006c362a9cb163c4355c
[ "MIT" ]
4
2021-11-30T00:34:20.000Z
2022-03-31T21:06:30.000Z
from src.data.data_adapter import ExampleDataAdapterForPosTagging from src.data.data_processor import ExampleConll2003PosTaggingDataProcessor from src.data.label_mapper import ExampleLabelMapperForPosTagging from src.data.tokenizer_wrapper import ExamplePosTaggingTokenizerWrapper
56.2
75
0.914591
28
281
9.035714
0.5
0.110672
0.173913
0.118577
0
0
0
0
0
0
0
0.015094
0.05694
281
4
76
70.25
0.939623
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
7a7e6f46826ba7a4757111d6fa38e4c3973a71bc
203
py
Python
My_Blog_Project/views.py
mannnD/Myblog
2426a6c59ebc747eb574fa4cf645708adbd78ec8
[ "BSD-3-Clause" ]
null
null
null
My_Blog_Project/views.py
mannnD/Myblog
2426a6c59ebc747eb574fa4cf645708adbd78ec8
[ "BSD-3-Clause" ]
null
null
null
My_Blog_Project/views.py
mannnD/Myblog
2426a6c59ebc747eb574fa4cf645708adbd78ec8
[ "BSD-3-Clause" ]
null
null
null
from django.http import HttpResponse from django.shortcuts import HttpResponseRedirect from django.urls import reverse def index(request): return HttpResponseRedirect(reverse('App_Blog:blog_list'))
29
62
0.832512
25
203
6.68
0.64
0.179641
0
0
0
0
0
0
0
0
0
0
0.103448
203
6
63
33.833333
0.917582
0
0
0
0
0
0.08867
0
0
0
0
0
0
1
0.2
false
0
0.6
0.2
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
1
1
0
0
5
7aa51bcd5ce38cd2c21189dca6369236a27faace
2,916
py
Python
datahub/omis/quote/test/test_managers.py
Staberinde/data-hub-api
3d0467dbceaf62a47158eea412a3dba827073300
[ "MIT" ]
6
2019-12-02T16:11:24.000Z
2022-03-18T10:02:02.000Z
datahub/omis/quote/test/test_managers.py
Staberinde/data-hub-api
3d0467dbceaf62a47158eea412a3dba827073300
[ "MIT" ]
1,696
2019-10-31T14:08:37.000Z
2022-03-29T12:35:57.000Z
datahub/omis/quote/test/test_managers.py
Staberinde/data-hub-api
3d0467dbceaf62a47158eea412a3dba827073300
[ "MIT" ]
9
2019-11-22T12:42:03.000Z
2021-09-03T14:25:05.000Z
from unittest import mock import pytest from dateutil.parser import parse as dateutil_parse from datahub.company.test.factories import AdviserFactory from datahub.omis.quote.models import Quote, TermsAndConditions # mark the whole module for db use pytestmark = pytest.mark.django_db class TestQuoteManager: """Tests for the Quote Manager.""" @mock.patch('datahub.omis.quote.managers.calculate_quote_expiry_date') @mock.patch('datahub.omis.quote.managers.generate_quote_reference') @mock.patch('datahub.omis.quote.managers.generate_quote_content') def test_create_from_order_commit_true( self, mocked_generate_quote_content, mocked_generate_quote_reference, mocked_calculate_quote_expiry_date, ): """ Test that Quote.objects.create_from_order creates a quote and commits the changes. """ expiry_date = dateutil_parse('2030-01-01').date() mocked_generate_quote_content.return_value = 'Quote content' mocked_generate_quote_reference.return_value = 'ABC123' mocked_calculate_quote_expiry_date.return_value = expiry_date by = AdviserFactory() quote = Quote.objects.create_from_order( order=mock.MagicMock(), by=by, commit=True, ) quote.refresh_from_db() assert quote.reference == 'ABC123' assert quote.content == 'Quote content' assert quote.created_by == by assert quote.expires_on == expiry_date assert quote.terms_and_conditions == TermsAndConditions.objects.first() @mock.patch('datahub.omis.quote.managers.calculate_quote_expiry_date') @mock.patch('datahub.omis.quote.managers.generate_quote_reference') @mock.patch('datahub.omis.quote.managers.generate_quote_content') def test_create_from_order_commit_false( self, mocked_generate_quote_content, mocked_generate_quote_reference, mocked_calculate_quote_expiry_date, ): """ Test that Quote.objects.create_from_order with commit=False builds a quote but doesn't commit the changes. """ expiry_date = dateutil_parse('2030-01-01').date() mocked_generate_quote_content.return_value = 'Quote content' mocked_generate_quote_reference.return_value = 'ABC123' mocked_calculate_quote_expiry_date.return_value = expiry_date quote = Quote.objects.create_from_order( order=mock.MagicMock(), by=AdviserFactory(), commit=False, ) assert quote.reference == 'ABC123' assert quote.content == 'Quote content' assert not quote.created_by assert quote.expires_on == expiry_date assert quote.terms_and_conditions == TermsAndConditions.objects.first() with pytest.raises(Quote.DoesNotExist): quote.refresh_from_db()
35.13253
82
0.695816
340
2,916
5.673529
0.226471
0.062208
0.078797
0.062208
0.73717
0.73717
0.73717
0.73717
0.73717
0.73717
0
0.012329
0.221193
2,916
82
83
35.560976
0.837076
0.08642
0
0.666667
0
0
0.157935
0.120955
0
0
0
0
0.175439
1
0.035088
false
0
0.087719
0
0.140351
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
8f87ea48d13f7312812eee69176d703fcd384f7f
175
py
Python
code/python/tests-workshop/app.py
jsphwllng/aws-cdk-intro-workshop
49f0fa48ad408b8b9d65071fee5136e6592de9dc
[ "MIT-0" ]
null
null
null
code/python/tests-workshop/app.py
jsphwllng/aws-cdk-intro-workshop
49f0fa48ad408b8b9d65071fee5136e6592de9dc
[ "MIT-0" ]
59
2022-01-10T06:14:45.000Z
2022-03-28T06:15:52.000Z
code/python/tests-workshop/app.py
jsphwllng/aws-cdk-intro-workshop
49f0fa48ad408b8b9d65071fee5136e6592de9dc
[ "MIT-0" ]
null
null
null
#!/usr/bin/env python3 import aws_cdk as cdk from cdk_workshop.cdk_workshop_stack import CdkWorkshopStack app = cdk.App() CdkWorkshopStack(app, "cdk-workshop") app.synth()
17.5
60
0.782857
26
175
5.115385
0.538462
0.24812
0.330827
0
0
0
0
0
0
0
0
0.00641
0.108571
175
9
61
19.444444
0.846154
0.12
0
0
0
0
0.078431
0
0
0
0
0
0
1
0
false
0
0.4
0
0.4
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
5
8f8dfa35fcf160d7f63d536ccf2c38633befc844
11,484
py
Python
test/test_request.py
aws-greengrass/aws-greengrass-cloudwatch-metrics
b428cb0ca78ff1f67e346d6d4cce7994f6462860
[ "Apache-2.0" ]
1
2022-03-23T16:25:48.000Z
2022-03-23T16:25:48.000Z
test/test_request.py
aws-greengrass/aws-greengrass-cloudwatch-metrics
b428cb0ca78ff1f67e346d6d4cce7994f6462860
[ "Apache-2.0" ]
1
2022-03-23T16:35:49.000Z
2022-03-23T20:23:32.000Z
test/test_request.py
aws-greengrass/aws-greengrass-cloudwatch-metrics
b428cb0ca78ff1f67e346d6d4cce7994f6462860
[ "Apache-2.0" ]
null
null
null
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 import pytest from src.request import * DEFAULT_NAMESPACE = 'Greengrass' DEFAULT_METRIC_NAME = 'Count' DEFAULT_METRIC_VALUE = 12.0 DEFAULT_METRIC_UNITS = 'Seconds' DEFAULT_DIMENSION_NAME = 'hostname' DEFAULT_DIMENSION_VALUE = 'test_hostname' class TestPutMetricRequest(object): def assert_default_metric_values(self, metric_datum): assert metric_datum['Value'] == DEFAULT_METRIC_VALUE assert metric_datum['MetricName'] == DEFAULT_METRIC_NAME assert metric_datum['Unit'] == DEFAULT_METRIC_UNITS assert metric_datum['Dimensions'][0]['Name'] == DEFAULT_DIMENSION_NAME assert metric_datum['Dimensions'][0]['Value'] == DEFAULT_DIMENSION_VALUE def test_successful_parse_request(self): event = self.create_valid_request_with_all_fields() put_request = PutMetricRequest(event) assert put_request.namespace == DEFAULT_NAMESPACE self.assert_default_metric_values(put_request.metric_datum) def test_parse_fails_with_empty_input(self): with pytest.raises(Exception) as error: PutMetricRequest("") assert 'input is empty' in str(error.value) def test_parse_fails_with_no_request_field(self): with pytest.raises(Exception) as error: PutMetricRequest({'Random': 'test'}) assert 'mandatory field ({}) is absent in the input'.format( FIELD_REQUEST) in str(error.value) def test_parse_fails_with_request_field_is_not_dict(self): with pytest.raises(Exception) as error: PutMetricRequest("test") assert 'mandatory field ({}) is not a dict in the input'.format( FIELD_REQUEST) in str(error.value) def test_parse_request_fails_when_timestamp_is_not_number(self): event = self.create_valid_request_with_all_fields() event['request']['metricData']['timestamp'] = 'Wed, 24 Jun 2020 14:09:19 UTC' with pytest.raises(Exception) as error: PutMetricRequest(event) assert 'field ({}) is not a number, must be in (milliseconds)'.format(FIELD_METRIC_TIMESTAMP) in str( error.value) def test_add_dimension(self): event = self.create_valid_request_with_all_fields() put_request = PutMetricRequest(event) put_request.add_dimension('TestName', 'TestValue') assert put_request.namespace == DEFAULT_NAMESPACE assert put_request.metric_datum['Dimensions'][1]['Name'] == 'TestName' assert put_request.metric_datum['Dimensions'][1]['Value'] == 'TestValue' # clear out dimension array del event['request']['metricData']['dimensions'][:] put_request = PutMetricRequest(event) put_request.add_dimension('TestName', 'TestValue') assert put_request.namespace == DEFAULT_NAMESPACE assert put_request.metric_datum['Dimensions'][0]['Name'] == 'TestName' assert put_request.metric_datum['Dimensions'][0]['Value'] == 'TestValue' # remove the dimension array del event['request']['metricData']['dimensions'] put_request = PutMetricRequest(event) put_request.add_dimension('TestName', 'TestValue') assert put_request.namespace == DEFAULT_NAMESPACE assert put_request.metric_datum['Dimensions'][0]['Name'] == 'TestName' assert put_request.metric_datum['Dimensions'][0]['Value'] == 'TestValue' def test_parse_request_success_when_dimensions_empty(self): event = self.create_valid_request_with_all_fields() # clear out the dimension array del event['request']['metricData']['dimensions'][:] print(event) put_request = PutMetricRequest(event) assert put_request.namespace == DEFAULT_NAMESPACE assert len(put_request.metric_datum['Dimensions']) == 0 def test_parse_request_success_when_dimensions_absent(self): event = self.create_valid_request_with_all_fields() # remove the dimension array del event['request']['metricData']['dimensions'] print(event) put_request = PutMetricRequest(event) assert put_request.namespace == DEFAULT_NAMESPACE assert len(put_request.metric_datum['Dimensions']) == 0 def test_successful_parse_request_with_multiple_dimensions(self): event = self.create_valid_request_with_all_fields() new_dimension = {'name': 'new_name', 'value': 'new_value'} event['request']['metricData']['dimensions'].append(new_dimension) put_request = PutMetricRequest(event) put_request_metric_datum = put_request.metric_datum self.assert_default_metric_values(put_request_metric_datum) assert put_request_metric_datum['Dimensions'][1].get( 'Name') == 'new_name' assert put_request_metric_datum['Dimensions'][1].get( 'Value') == 'new_value' def test_parse_request_fails_when_dimensions_exceed_limit(self): event = self.create_valid_request_with_all_fields() new_dimension = {'name': 'new_name', 'value': 'new_value'} event['request']['metricData']['dimensions'].extend([new_dimension, new_dimension, new_dimension, new_dimension, new_dimension, new_dimension, new_dimension, new_dimension, new_dimension, new_dimension]) with pytest.raises(Exception) as error: PutMetricRequest(event) assert 'More than ({}) entries present in field (dimensions)'.format(MAX_DIMENSIONS_PER_METRIC) in str( error.value) def test_parse_request_fails_when_dimensions_name_absent(self): event = self.create_valid_request_with_all_fields() del event['request']['metricData']['dimensions'][0]['name'] with pytest.raises(Exception) as error: PutMetricRequest(event) assert 'mandatory field ({}) is absent in the dimension'.format( FIELD_DIMENSION_NAME) in str(error.value) def test_parse_request_fails_when_dimensions_is_not_dict(self): event = self.create_valid_request_with_all_fields() event['request']['metricData']['dimensions'] = {} with pytest.raises(Exception) as error: PutMetricRequest(event) assert 'field ({}) is not of type list in the input'.format( FIELD_DIMENSIONS) in str(error.value) event['request']['metricData']['dimensions'] = "string" with pytest.raises(Exception) as error: PutMetricRequest(event) assert 'field ({}) is not of type list in the input'.format( FIELD_DIMENSIONS) in str(error.value) def test_parse_request_fails_when_dimensions_value_absent(self): event = self.create_valid_request_with_all_fields() del event['request']['metricData']['dimensions'][0]['value'] with pytest.raises(Exception) as error: PutMetricRequest(event) assert 'mandatory field ({}) is absent in the dimension'.format( FIELD_DIMENSION_VALUE) in str(error.value) def test_parse_request_succeeds_when_value_is_int(self): event = self.create_valid_request_with_all_fields() event['request']['metricData']['value'] = -1 put_request = PutMetricRequest(event) assert put_request.metric_datum['Value'] == -1 def test_parse_request_fails_when_value_is_not_number(self): event = self.create_valid_request_with_all_fields() event['request']['metricData']['value'] = 'test' with pytest.raises(Exception) as error: PutMetricRequest(event) assert 'mandatory field ({}) is not a number'.format( FIELD_METRIC_VALUE) in str(error.value) def test_parse_request_fails_when_value_is_absent(self): event = self.create_valid_request_with_all_fields() del event['request']['metricData']['value'] with pytest.raises(Exception) as error: PutMetricRequest(event) assert 'mandatory field ({}) is absent in the input'.format( FIELD_METRIC_VALUE) in str(error.value) def test_parse_request_fails_when_unit_is_not_valid(self): event = self.create_valid_request_with_all_fields() event['request']['metricData']['unit'] = 'random_value' with pytest.raises(Exception) as error: PutMetricRequest(event) assert 'field ({}) is not a valid value, must be in ({})'.format(FIELD_METRIC_UNIT, VALID_UNIT_VALUES) in str( error.value) def test_parse_request_fails_when_namespace_absent(self): event = self.create_valid_request_with_all_fields() del event['request']['namespace'] with pytest.raises(Exception) as error: PutMetricRequest(event) assert 'mandatory field ({}) is absent in the input'.format( FIELD_NAMESPACE) in str(error.value) def test_parse_request_fails_when_metricdata_absent(self): event = self.create_valid_request_with_all_fields() del event['request']['metricData'] with pytest.raises(Exception) as error: PutMetricRequest(event) def test_parse_request_fails_when_metricdata_is_not_dict(self): event = self.create_valid_request_with_all_fields() event['request']['metricData'] = [] with pytest.raises(Exception) as error: PutMetricRequest(event) assert 'Incorrect payload format, field ({}) is not a dict'.format( FIELD_METRIC_DATA) in str(error.value) event['request']['metricData'] = "string" with pytest.raises(Exception) as error: PutMetricRequest(event) assert 'Incorrect payload format, field ({}) is not a dict'.format( FIELD_METRIC_DATA) in str(error.value) def test_parse_request_fails_when_metricname_absent(self): event = self.create_valid_request_with_all_fields() del event['request']['metricData']['metricName'] with pytest.raises(Exception) as error: PutMetricRequest(event) assert 'mandatory field ({}) is absent in the input'.format( FIELD_METRIC_NAME) in str(error.value) def test_parse_request_succeeds_when_all_optional_fields_absent(self): event = self.create_valid_request_with_all_fields() del event['request']['metricData']['dimensions'] del event['request']['metricData']['unit'] put_request = PutMetricRequest(event) assert put_request.namespace == DEFAULT_NAMESPACE assert put_request.metric_datum['MetricName'] == DEFAULT_METRIC_NAME assert put_request.metric_datum['Value'] == DEFAULT_METRIC_VALUE def create_valid_request_with_all_fields(self): return { "request": { "namespace": DEFAULT_NAMESPACE, "metricData": { "metricName": DEFAULT_METRIC_NAME, "dimensions": [ { "name": DEFAULT_DIMENSION_NAME, "value": DEFAULT_DIMENSION_VALUE } ], "value": DEFAULT_METRIC_VALUE, "unit": DEFAULT_METRIC_UNITS } } }
40.013937
118
0.661268
1,301
11,484
5.521906
0.09608
0.050111
0.06431
0.061247
0.833241
0.809438
0.778536
0.738029
0.686526
0.648107
0
0.003768
0.237374
11,484
286
119
40.153846
0.816511
0.01846
0
0.534314
0
0
0.15881
0
0
0
0
0
0.215686
1
0.117647
false
0
0.009804
0.004902
0.137255
0.009804
0
0
0
null
0
0
0
1
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
8fcdfc60f01c81065c0109bf0e16d4f673472ed3
160
py
Python
pyvims/errors.py
seignovert/pyvims
a70b5b9b8bc5c37fa43b7db4d15407f312a31849
[ "BSD-3-Clause" ]
4
2019-09-16T15:50:22.000Z
2021-04-08T15:32:48.000Z
pyvims/errors.py
seignovert/pyvims
a70b5b9b8bc5c37fa43b7db4d15407f312a31849
[ "BSD-3-Clause" ]
3
2018-05-04T09:28:24.000Z
2018-12-03T09:00:31.000Z
pyvims/errors.py
seignovert/pyvims
a70b5b9b8bc5c37fa43b7db4d15407f312a31849
[ "BSD-3-Clause" ]
1
2020-10-12T15:14:17.000Z
2020-10-12T15:14:17.000Z
"""VIMS generic errors.""" class VIMSError(Exception): """Generic VIMS error.""" class VIMSCameraError(VIMSError): """Generic VIMS Camera error."""
16
36
0.66875
16
160
6.6875
0.5625
0.205607
0
0
0
0
0
0
0
0
0
0
0.1625
160
9
37
17.777778
0.798507
0.41875
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
1
0
0
5
89127a9bc32a7d034401f48b4ba6cbcccd1bc178
28
py
Python
metapipe/__main__.py
TorkamaniLab/metapipe
15592e5b0c217afb00ac03503f8d0d7453d4baf4
[ "MIT" ]
11
2016-01-26T06:47:05.000Z
2022-02-23T19:12:00.000Z
metapipe/__main__.py
TorkamaniLab/metapipe
15592e5b0c217afb00ac03503f8d0d7453d4baf4
[ "MIT" ]
44
2016-01-08T00:46:47.000Z
2016-04-13T00:46:47.000Z
metapipe/__main__.py
TorkamaniLab/metapipe
15592e5b0c217afb00ac03503f8d0d7453d4baf4
[ "MIT" ]
4
2015-10-30T19:24:13.000Z
2020-01-25T02:56:53.000Z
from app import main main()
9.333333
20
0.75
5
28
4.2
0.8
0
0
0
0
0
0
0
0
0
0
0
0.178571
28
2
21
14
0.913043
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
56b8c0b107e398c693e7b5f35b8827501bb3a501
1,951
py
Python
sanic/application/logo.py
Varriount/sanic
55c36e0240dfeb03deccdeb5a53ca7fcfa728bff
[ "MIT" ]
1,883
2021-01-05T11:28:36.000Z
2022-03-31T19:24:26.000Z
sanic/application/logo.py
Varriount/sanic
55c36e0240dfeb03deccdeb5a53ca7fcfa728bff
[ "MIT" ]
451
2021-01-05T12:19:49.000Z
2022-03-31T11:06:23.000Z
sanic/application/logo.py
Varriount/sanic
55c36e0240dfeb03deccdeb5a53ca7fcfa728bff
[ "MIT" ]
271
2021-01-06T03:46:44.000Z
2022-03-28T14:35:41.000Z
import re import sys from os import environ BASE_LOGO = """ Sanic Build Fast. Run Fast. """ COFFEE_LOGO = """\033[48;2;255;13;104m \033[0m \033[38;2;255;255;255;48;2;255;13;104m ▄████████▄ \033[0m \033[38;2;255;255;255;48;2;255;13;104m ██ ██▀▀▄ \033[0m \033[38;2;255;255;255;48;2;255;13;104m ███████████ █ \033[0m \033[38;2;255;255;255;48;2;255;13;104m ███████████▄▄▀ \033[0m \033[38;2;255;255;255;48;2;255;13;104m ▀███████▀ \033[0m \033[48;2;255;13;104m \033[0m Dark roast. No sugar.""" COLOR_LOGO = """\033[48;2;255;13;104m \033[0m \033[38;2;255;255;255;48;2;255;13;104m ▄███ █████ ██ \033[0m \033[38;2;255;255;255;48;2;255;13;104m ██ \033[0m \033[38;2;255;255;255;48;2;255;13;104m ▀███████ ███▄ \033[0m \033[38;2;255;255;255;48;2;255;13;104m ██ \033[0m \033[38;2;255;255;255;48;2;255;13;104m ████ ████████▀ \033[0m \033[48;2;255;13;104m \033[0m Build Fast. Run Fast.""" FULL_COLOR_LOGO = """ \033[38;2;255;13;104m ▄███ █████ ██ \033[0m ▄█▄ ██ █ █ ▄██████████ \033[38;2;255;13;104m ██ \033[0m █ █ █ ██ █ █ ██ \033[38;2;255;13;104m ▀███████ ███▄ \033[0m ▀ █ █ ██ ▄ █ ██ \033[38;2;255;13;104m ██\033[0m █████████ █ ██ █ █ ▄▄ \033[38;2;255;13;104m ████ ████████▀ \033[0m █ █ █ ██ █ ▀██ ███████ """ # noqa ansi_pattern = re.compile(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])") def get_logo(full=False, coffee=False): logo = ( (FULL_COLOR_LOGO if full else (COFFEE_LOGO if coffee else COLOR_LOGO)) if sys.stdout.isatty() else BASE_LOGO ) if ( sys.platform == "darwin" and environ.get("TERM_PROGRAM") == "Apple_Terminal" ): logo = ansi_pattern.sub("", logo) return logo
33.637931
86
0.451051
344
1,951
3.093023
0.209302
0.109023
0.107143
0.178571
0.571429
0.571429
0.56203
0.550752
0.465226
0.390977
0
0.332855
0.285495
1,951
57
87
34.22807
0.286944
0.00205
0
0.139535
0
0.348837
0.743445
0.305913
0
0
0
0
0
1
0.023256
false
0
0.069767
0
0.116279
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
854e6ce6a445e50dea0c03cbdf9e3f6a5aa17132
33
py
Python
src/fingerflow/extractor/__init__.py
jakubarendac/fingerflow
a0a53259ec575704d19ae0ae770335536e567583
[ "MIT" ]
327
2017-12-28T10:49:09.000Z
2022-01-31T14:12:55.000Z
MultiSourceDataFeeds/Providers/GDELT/extractor/__init__.py
Esri/ArcGIS-Solutions-for-Business
306b778bb6246f13766ce14245c6ba2aab42ba08
[ "Apache-2.0" ]
25
2017-12-14T13:13:54.000Z
2022-02-09T23:26:52.000Z
MultiSourceDataFeeds/Providers/GDELT/extractor/__init__.py
Esri/ArcGIS-Solutions-for-Business
306b778bb6246f13766ce14245c6ba2aab42ba08
[ "Apache-2.0" ]
66
2017-12-28T23:00:07.000Z
2021-11-09T15:40:16.000Z
from .extractor import Extractor
16.5
32
0.848485
4
33
7
0.75
0
0
0
0
0
0
0
0
0
0
0
0.121212
33
1
33
33
0.965517
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
8554273993ca3825f4ec5d7b64aa4c28a9363da4
154
py
Python
projectpy/__init__.py
DumbMachine/ProjectPy
470ab51a7f81a16303178c933bc87933128c04f5
[ "BSD-2-Clause" ]
null
null
null
projectpy/__init__.py
DumbMachine/ProjectPy
470ab51a7f81a16303178c933bc87933128c04f5
[ "BSD-2-Clause" ]
1
2019-06-01T08:45:53.000Z
2019-06-01T08:45:53.000Z
projectpy/__init__.py
DumbMachine/ProjectPy
470ab51a7f81a16303178c933bc87933128c04f5
[ "BSD-2-Clause" ]
null
null
null
import platform from pathlib import Path import os from . import utils utils.cprint("PROJECTPY", ": A Python CLI to create packages", "", normal=True)
17.111111
79
0.74026
22
154
5.181818
0.772727
0
0
0
0
0
0
0
0
0
0
0
0.162338
154
8
80
19.25
0.883721
0
0
0
0
0
0.276316
0
0
0
0
0
0
1
0
true
0
0.8
0
0.8
0.2
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
858fbf187f1648621937fce18345fc063895a8eb
46
py
Python
w2v/__init__.py
searobbersduck/my_nlp_corpus
e31779de0d37b718d98c1285a483d2626982f7b9
[ "MIT" ]
null
null
null
w2v/__init__.py
searobbersduck/my_nlp_corpus
e31779de0d37b718d98c1285a483d2626982f7b9
[ "MIT" ]
null
null
null
w2v/__init__.py
searobbersduck/my_nlp_corpus
e31779de0d37b718d98c1285a483d2626982f7b9
[ "MIT" ]
null
null
null
from .vocab import * from .gen_pieces import *
23
25
0.76087
7
46
4.857143
0.714286
0
0
0
0
0
0
0
0
0
0
0
0.152174
46
2
25
23
0.871795
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5