hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
eebba2a69a46887f14eab916fbeb93540243cd82
1,246
py
Python
server/grips/middleware/auth.py
yizhang7210/Syllable
0536763a21db9532fc73cd32d03a7732d73f4ab8
[ "MIT" ]
null
null
null
server/grips/middleware/auth.py
yizhang7210/Syllable
0536763a21db9532fc73cd32d03a7732d73f4ab8
[ "MIT" ]
13
2018-09-29T21:34:25.000Z
2018-12-15T18:54:52.000Z
server/grips/middleware/auth.py
yizhang7210/Syllable
0536763a21db9532fc73cd32d03a7732d73f4ab8
[ "MIT" ]
null
null
null
from rest_framework import authentication from rest_framework import exceptions from users.middleware.auth import ApiUserAuth from grips.services import grips as grips_service class ApiGripWriteAuth(authentication.BaseAuthentication): api_auth = ApiUserAuth() def authenticate(self, request): user, _ = self.api_auth.authenticate(request) grip_id = request.path.split('/')[-1] grip = grips_service.get_by_id(grip_id) if grip is None: raise exceptions.AuthenticationFailed('Grip does not exist') if grips_service.is_editable_by(grip, user.email): return (user, grip) raise exceptions.AuthenticationFailed('User not authorized') class ApiGripReadAuth(authentication.BaseAuthentication): api_auth = ApiUserAuth() def authenticate(self, request): user, _ = self.api_auth.authenticate(request) grip_id = request.path.split('/')[-2] grip = grips_service.get_by_id(grip_id) if grip is None: raise exceptions.AuthenticationFailed('Grip does not exist') if grips_service.is_readable(grip, user.email): return (user, grip) raise exceptions.AuthenticationFailed('User not authorized')
31.948718
72
0.702247
143
1,246
5.951049
0.314685
0.070505
0.164512
0.054054
0.749706
0.749706
0.749706
0.749706
0.749706
0.749706
0
0.002037
0.211878
1,246
38
73
32.789474
0.864562
0
0
0.615385
0
0
0.0626
0
0
0
0
0
0
1
0.076923
false
0
0.153846
0
0.461538
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
eec2600d4b15ed51fd928b1856849d33bb95ba46
2,204
py
Python
Pcolors/_functions/preview.py
rafalou38/Pcolors
a4dc57c57d6a142a23a8ce8bc422581028fc8abd
[ "MIT" ]
1
2020-08-31T09:45:28.000Z
2020-08-31T09:45:28.000Z
Pcolors/_functions/preview.py
rafalou38/Pcolors
a4dc57c57d6a142a23a8ce8bc422581028fc8abd
[ "MIT" ]
null
null
null
Pcolors/_functions/preview.py
rafalou38/Pcolors
a4dc57c57d6a142a23a8ce8bc422581028fc8abd
[ "MIT" ]
null
null
null
import colorama colorama.init() def preview(): print(""" Test of the colors: foreground: light: \033[90m black\033[0m \033[94m blue \033[0m \033[91m red\033[0m \033[95m magenta\033[0m \033[92m green\033[0m \033[96m cyan\033[0m \033[93m yellow\033[0m \033[97m white\033[0m dark: \033[30m lblack\033[0m\033[34m lblue \033[0m \033[31m lred\033[0m \033[35m lmagenta\033[0m \033[32m lgreen\033[0m \033[36m lcyan\033[0m \033[33m lyellow\033[0m \033[37m lwhite\033[0m background: light: \033[100m black \033[0m \033[104mblue \033[0m \033[101m red \033[0m \033[105mmagenta \033[0m \033[102m green \033[0m \033[106mcyan \033[0m \033[103m yellow \033[0m \033[107mwhite \033[0m dark: \033[40m lblack \033[0m \033[44mlblue \033[0m \033[41m lred \033[0m \033[45mlmagenta \033[0m \033[42m lgreen \033[0m \033[46mlcyan \033[0m \033[43m lyellow\033[0m \033[47mlwhite \033[0m format: \033[0m normal \033[0m \033[1mbold \033[0m \033[2m faint \033[0m \033[4munderline \033[0m \033[5m slow_blink \033[0m \033[9mcrossed \033[0m \033[6m rapid_blink \033[0m \033[21munderline_bold \033[0m \033[7m reverse \033[0m \033[51mframed \033[0m \033[8m hidden \033[0m \033[52mrounded \033[0m \033[3m italic \033[0m combo: \033[42;31;1;51m background lgreen, foreground red, bold, framed \033[0m \033[42;31;51m background lgreen, foreground red, framed \033[0m \033[42;30;51m background lgreen, foreground black, framed \033[0m \033[42;30;51;2m background lgreen, foreground black, framed, faint \033[0m \033[42;36;7m background lgreen, foreground cyan, reverse \033[0m \033[51;1;3;4;9m framed, bold, italic, underline, crossed \033[0m \033[90;1;21ma\033[91mm\033[92ma\033[93mz\033[94mi\033[95mn\033[96mg \033[90m& \033[91ma \033[92mlo\033[93mt \033[94mmo\033[95mre\033[96m t\033[97mo \033[90mdi\033[91msc\033[92mov\033[93mer\033[0m \033[31;1m!\033[0m\033[90;2;3m the result can be diferent depending on the terminal you use and the theme\033[0m \033[31;1m¡\033[0m """)
40.814815
197
0.647005
379
2,204
3.757256
0.350923
0.196629
0.275281
0.02809
0.146067
0.025281
0
0
0
0
0
0.352234
0.207804
2,204
53
198
41.584906
0.462772
0
0
0.088889
0
0.111111
0.970962
0.098004
0
0
0
0
0
1
0.022222
true
0
0.022222
0
0.044444
0.022222
0
0
0
null
0
1
0
0
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
0
1
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
010807a21e4f84a4045d136e0affddd3af910d02
6,621
py
Python
skill_sdk/requests.py
stranac/voice-skill-sdk
8bfbbedf36ed4e4b2ff865deffe4dee804d57031
[ "MIT" ]
18
2020-11-25T12:58:36.000Z
2022-01-06T21:13:52.000Z
skill_sdk/requests.py
stranac/voice-skill-sdk
8bfbbedf36ed4e4b2ff865deffe4dee804d57031
[ "MIT" ]
28
2020-11-27T08:45:57.000Z
2022-03-31T09:01:48.000Z
skill_sdk/requests.py
stranac/voice-skill-sdk
8bfbbedf36ed4e4b2ff865deffe4dee804d57031
[ "MIT" ]
15
2020-11-30T08:19:44.000Z
2022-03-10T13:07:05.000Z
# # voice-skill-sdk # # (C) 2021, Deutsche Telekom AG # # This file is distributed under the terms of the MIT license. # For details see the file LICENSE in the top directory. # """HTTP sync/async clients with circuit breaker""" from typing import Callable, Iterable, List, Union import logging from warnings import warn import httpx from httpx import codes, HTTPError, Response # noqa from aiobreaker import ( CircuitBreaker, CircuitBreakerState, # noqa ) from skill_sdk.config import settings from skill_sdk.log import tracing_headers logger = logging.getLogger(__name__) DEFAULT_REQUESTS_TIMEOUT = settings.REQUESTS_TIMEOUT class Client(httpx.Client): """ Sync HTTP client with a circuit breaker """ def __init__( self, *, internal: bool = False, circuit_breaker: CircuitBreaker = None, timeout: Union[int, float] = None, exclude: Iterable[codes] = None, response_hook: Callable[[httpx.Response], None] = None, **kwargs, ) -> None: """ Construct sync client :param internal: identifies a request to an internal service propagate the tracing headers, if the request is internal :param circuit_breaker: optional circuit breaker, DEFAULT_CIRCUIT_BREAKER if not set :param timeout: optional timeout for a request :param exclude: list of HTTP status codes that are treated as "normal" (no exception is raised) :param response_hook: function to be executed after a response is received (with response as argument) :param kwargs: keyword arguments passed over to request """ self.internal = internal # If no custom circuit breaker supplied, we'll create a new instance self.circuit_breaker = circuit_breaker or CircuitBreaker() self.exclude = tuple(exclude) if exclude else () super().__init__(timeout=timeout or DEFAULT_REQUESTS_TIMEOUT, **kwargs) if response_hook: self.event_hooks = dict(response=[response_hook]) def request( self, *args, exclude: Iterable[codes] = None, **kwargs, ): exclude = exclude or self.exclude # Propagate tracing headers if request is created as "internal" if self.internal: logger.debug("Internal service, adding tracing headers.") kwargs["headers"] = { **(kwargs.get("headers", None) or {}), **tracing_headers(), } @self.circuit_breaker def _inner_call(*a, **kw): """Wraps Client.request""" _r = super(Client, self).request(*a, **kw) if _r.status_code in exclude: logger.debug("Status code %s is excluded", _r.status_code) else: _r.raise_for_status() return _r try: result = _inner_call(*args, **kwargs) logger.debug("HTTP completed with status code: %d", result.status_code) except HTTPError as e: logger.error( "HTTP request [%s, %s] failed with error: %s", repr(args), repr(kwargs), repr(e), ) raise return result class AsyncClient(httpx.AsyncClient): """ Async client with a circuit breaker """ def __init__( self, *, internal: bool = False, circuit_breaker: CircuitBreaker = None, timeout: Union[int, float] = None, exclude: List[codes] = None, response_hook: Callable[[httpx.Response], None] = None, **kwargs, ) -> None: """ Construct sync client :param internal: identifies a request to an internal service propagate the tracing headers, if the request is internal :param circuit_breaker: optional circuit breaker, DEFAULT_CIRCUIT_BREAKER if not set :param timeout: optional timeout for a request :param exclude: list of HTTP status codes that are treated as "normal" (no exception is raised) :param response_hook: function to be executed after a response is received (with response as argument) :param kwargs: keyword arguments passed over to request """ self.internal = internal # If no custom circuit breaker supplied, we'll create a new instance self.circuit_breaker = circuit_breaker or CircuitBreaker() self.exclude = tuple(exclude) if exclude else () super().__init__(timeout=timeout or DEFAULT_REQUESTS_TIMEOUT, **kwargs) if response_hook: self.event_hooks = dict(response=[response_hook]) async def request( self, *args, exclude: Iterable[codes] = None, **kwargs, ): exclude = exclude or self.exclude # Propagate tracing headers if request is created as "internal" if self.internal: logger.debug("Internal service, adding tracing headers.") kwargs["headers"] = { **(kwargs.get("headers", None) or {}), **tracing_headers(), } @self.circuit_breaker async def _inner_call(*a, **kw): """Wraps Client.request""" _r = await super(AsyncClient, self).request(*a, **kw) if _r.status_code in exclude: logger.debug("Status code %s is excluded", _r.status_code) else: _r.raise_for_status() return _r try: result = await _inner_call(*args, **kwargs) logger.debug("HTTP completed with status code: %d", result.status_code) except HTTPError as e: logger.error( "HTTP request [%s, %s] failed with error: %s", repr(args), repr(kwargs), repr(e), ) raise return result class CircuitBreakerSession(Client): """**DEPRECATED**: HTTP(s) session with a circuit breaker. Renamed to `skill_sdk.requests.Client` The name is left for backward compatibility """ def __init__(self, *args, **kwargs) -> None: warn( '"requests.CircuitBreakerSession" is deprecated.\n' 'Please use "requests.Client" if you need a circuit breaker or "httpx.Client" if not.', DeprecationWarning, stacklevel=2, ) super().__init__(*args, **kwargs)
31.679426
112
0.590998
728
6,621
5.247253
0.221154
0.076963
0.015707
0.014921
0.756545
0.756545
0.756545
0.756545
0.756545
0.738743
0
0.001113
0.321251
6,621
208
113
31.831731
0.84891
0.285153
0
0.696721
0
0.008197
0.10076
0.007149
0
0
0
0
0
1
0.040984
false
0
0.065574
0
0.163934
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
01430d49710534c231499dbb63a50fd9e7bed5b4
62
py
Python
sabueso/_private_tools/molecular_system/__init__.py
dprada/sabueso
14843cf3522b5b89db5b61c1541a7015f114dd53
[ "MIT" ]
null
null
null
sabueso/_private_tools/molecular_system/__init__.py
dprada/sabueso
14843cf3522b5b89db5b61c1541a7015f114dd53
[ "MIT" ]
2
2022-01-31T21:22:17.000Z
2022-02-04T20:20:12.000Z
sabueso/_private_tools/molecular_system/__init__.py
dprada/sabueso
14843cf3522b5b89db5b61c1541a7015f114dd53
[ "MIT" ]
1
2021-07-20T15:01:14.000Z
2021-07-20T15:01:14.000Z
from .is_file import is_file from .is_string import is_string
20.666667
32
0.83871
12
62
4
0.416667
0.25
0
0
0
0
0
0
0
0
0
0
0.129032
62
2
33
31
0.888889
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
0154b2e083a1a027e45489bd851f11bc1fea0b69
139
py
Python
lbc/models/__init__.py
cgeller/WorldOnRails
d8aa9f7ae67a6b7b71a2fc5ba86bb2a44f221bef
[ "MIT" ]
108
2021-05-04T02:13:04.000Z
2022-03-24T02:11:55.000Z
lbc/models/__init__.py
cgeller/WorldOnRails
d8aa9f7ae67a6b7b71a2fc5ba86bb2a44f221bef
[ "MIT" ]
45
2021-05-10T13:32:51.000Z
2022-03-23T07:23:19.000Z
lbc/models/__init__.py
cgeller/WorldOnRails
d8aa9f7ae67a6b7b71a2fc5ba86bb2a44f221bef
[ "MIT" ]
22
2021-05-04T16:38:17.000Z
2022-03-25T16:40:00.000Z
from .point_model import PointModel, RGBPointModel from .converter import Converter __all__ = ['PointModel', 'RGBPointModel', 'Converter']
34.75
54
0.798561
14
139
7.571429
0.571429
0.433962
0
0
0
0
0
0
0
0
0
0
0.100719
139
4
54
34.75
0.848
0
0
0
0
0
0.228571
0
0
0
0
0
0
1
0
false
0
0.666667
0
0.666667
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
01690dcdce2a3606e89e463e21f9223568a9106f
60
py
Python
src/oscar/utils/deprecation.py
QueoLda/django-oscar
8dd992d82e31d26c929b3caa0e08b57e9701d097
[ "BSD-3-Clause" ]
4,639
2015-01-01T00:42:33.000Z
2022-03-29T18:32:12.000Z
src/oscar/utils/deprecation.py
QueoLda/django-oscar
8dd992d82e31d26c929b3caa0e08b57e9701d097
[ "BSD-3-Clause" ]
2,215
2015-01-02T22:32:51.000Z
2022-03-29T12:16:23.000Z
src/oscar/utils/deprecation.py
QueoLda/django-oscar
8dd992d82e31d26c929b3caa0e08b57e9701d097
[ "BSD-3-Clause" ]
2,187
2015-01-02T06:33:31.000Z
2022-03-31T15:32:36.000Z
class RemovedInOscar32Warning(DeprecationWarning): pass
20
50
0.833333
4
60
12.5
1
0
0
0
0
0
0
0
0
0
0
0.037736
0.116667
60
2
51
30
0.90566
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.5
0
0
0.5
0
1
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
0
0
0
5
0174d5ba014bd7a65927edb82b986392fc21807f
86
py
Python
filters/__init__.py
Ramesh-X/Image-Enhancer
4eb98c4652f99ecc49f966c1c0d7cb133b6f76a4
[ "MIT" ]
4
2018-12-24T17:12:37.000Z
2019-07-29T09:21:22.000Z
filters/__init__.py
Ramesh-X/Image-Enhancer
4eb98c4652f99ecc49f966c1c0d7cb133b6f76a4
[ "MIT" ]
null
null
null
filters/__init__.py
Ramesh-X/Image-Enhancer
4eb98c4652f99ecc49f966c1c0d7cb133b6f76a4
[ "MIT" ]
1
2022-03-08T07:44:56.000Z
2022-03-08T07:44:56.000Z
from .abstract_filter import AbstractFilter from .filter_wrapper import FilterWrapper
28.666667
43
0.883721
10
86
7.4
0.7
0
0
0
0
0
0
0
0
0
0
0
0.093023
86
2
44
43
0.948718
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
0174e13ecb4205f5048510c266504c68edf36aa4
72
py
Python
src/apification/utils/http.py
Quantify-world/Apification
5fc0bd056d0bf430645a2e2c5d7d9435328b9a4a
[ "MIT" ]
5
2016-10-29T14:23:22.000Z
2017-07-15T08:39:58.000Z
src/apification/utils/http.py
Quantify-world/Apification
5fc0bd056d0bf430645a2e2c5d7d9435328b9a4a
[ "MIT" ]
32
2016-10-23T19:18:26.000Z
2017-02-27T18:33:36.000Z
src/apification/utils/http.py
Quantify-world/apification
5fc0bd056d0bf430645a2e2c5d7d9435328b9a4a
[ "MIT" ]
null
null
null
def parse_http_accept(header): # TODO return 'application/json'
18
30
0.708333
9
72
5.444444
1
0
0
0
0
0
0
0
0
0
0
0
0.194444
72
3
31
24
0.844828
0.055556
0
0
0
0
0.242424
0
0
0
0
0.333333
0
1
0.5
false
0
0
0.5
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
1
0
0
0
1
0
0
0
5
6d66da06ec832978f610137ebaa8912de21fb60c
11,840
py
Python
tests/testTrajectoriesSaveLoad.py
ningtangla/trainCentrolControl
66874876a035ad1814b472f03aa9e701f3bdcab7
[ "MIT" ]
null
null
null
tests/testTrajectoriesSaveLoad.py
ningtangla/trainCentrolControl
66874876a035ad1814b472f03aa9e701f3bdcab7
[ "MIT" ]
null
null
null
tests/testTrajectoriesSaveLoad.py
ningtangla/trainCentrolControl
66874876a035ad1814b472f03aa9e701f3bdcab7
[ "MIT" ]
null
null
null
import sys import os DIRNAME = os.path.dirname(__file__) sys.path.append(os.path.join(DIRNAME, '..')) import unittest from ddt import ddt, data, unpack import numpy as np import pandas as pd from exec.trajectoriesSaveLoad import ConvertTrajectoryToStateDf, GetAgentCoordinateFromTrajectoryAndStateDf, \ conditionDfFromParametersDict, GetSavePath, LoadTrajectories, readParametersFromDf, loadFromPickle, GenerateAllSampleIndexSavePaths @ddt class TestTrajectoriesSaveLoad(unittest.TestCase): def setUp(self): self.dataDirectory = 'testData' self.stateIndex = 0 self.getRangeNumAgentsFromTrajectory = lambda trajectory: list(range(np.shape(trajectory[0][self.stateIndex])[0])) self.getRangeTrajectoryLength = lambda trajectory: list(range(len(trajectory))) self.getAllLevelValuesRange = {'timeStep': self.getRangeTrajectoryLength, 'agentId': self.getRangeNumAgentsFromTrajectory} self.getAgentPosXCoord = GetAgentCoordinateFromTrajectoryAndStateDf(self.stateIndex, 2) self.getAgentPosYCoord = GetAgentCoordinateFromTrajectoryAndStateDf(self.stateIndex, 3) self.getAgentVelXCoord = GetAgentCoordinateFromTrajectoryAndStateDf(self.stateIndex, 4) self.getAgentVelYCoord = GetAgentCoordinateFromTrajectoryAndStateDf(self.stateIndex, 5) self.extractColumnValues = {'xPos': self.getAgentPosXCoord, 'yPos': self.getAgentPosYCoord, 'xVel': self.getAgentVelXCoord, 'yVel': self.getAgentVelYCoord} self.convertTrajectoryToStateDf = ConvertTrajectoryToStateDf(self.getAllLevelValuesRange, conditionDfFromParametersDict, self.extractColumnValues) # @data(([(np.asarray([[1, 2, 1, 2, 3, 4], [5, 6, 5, 6, 7, 8]]), [np.asarray((0, 10)), np.asarray((7, 7))]), # (np.asarray([[-1, -2, -1, -2, -3, -4], [-5, -6, -5, -6, -7, -8]]), [np.asarray((0, 10)), np.asarray((7, 7))])], # pd.DataFrame([(1, 2, 3, 4), (5, 6, 7, 8), (-1, -2, -3, -4),(-5, -6, -7, -8)], # index = pd.MultiIndex.from_product([[0, 1], [0, 1]], names=['timeStep', 'agentId']), # columns = ('xPos', 'yPos', 'xVel', 'yVel')))) # @unpack # def testConvertTrajectoryToStateDf(self, trajectory, groundTruthDf): # df = self.convertTrajectoryToStateDf(trajectory) # print(df) # truthValue = groundTruthDf.equals(df) # self.assertTrue(truthValue) # @data(('..', '.txt', {'qPosInit': (1, 2, 3, 4), 'numSimulations': 12}, {'numTrials': 23, 'trainSteps': 2}, # '../numSimulations=12_numTrials=23_qPosInit=(1,2,3,4)_trainSteps=2.txt'), # ('', '.pickle', {'qPosInit': [1, 2, 3, 4], 'numSimulations': 12}, {'numTrials': 23, 'trainSteps': 2}, # 'numSimulations=12_numTrials=23_qPosInit=[1,2,3,4]_trainSteps=2.pickle')) # @unpack # def testGetSavePathWithFixedParameters(self, dataDirectory, extension, fixedParameters, parameters, groundTruthPath): # getSavePath = GetSavePath(dataDirectory, extension, fixedParameters) # path = getSavePath(parameters) # self.assertEqual(path, groundTruthPath) # # # @data(('..', '.txt', {'numTrials': 23, 'trainSteps': 2}, '../numTrials=23_trainSteps=2.txt'), # ('', '.pickle', {'numTrials': 23, 'trainSteps': 2}, 'numTrials=23_trainSteps=2.pickle')) # @unpack # def testGetSavePathWithoutFixedParameters(self, dataDirectory, extension, parameters, groundTruthPath): # getSavePath = GetSavePath(dataDirectory, extension) # path = getSavePath(parameters) # self.assertEqual(path, groundTruthPath) # # # @data(({'numTrials': 100, 'qPosInit': (-4, 0, 4, 0)}, {'maxRunningSteps': 15, 'numSimulations': 200, # 'sheepPolicyName': 'MCTS'})) # @unpack # def testLoadTrajectoriesNumTrials(self, parameters, fixedParameters): # getSavePath = GetSavePath('testData', '.pickle', fixedParameters) # # loadTrajectories = LoadTrajectories(getSavePath, loadFromPickle) # loadedTrajectories = loadTrajectories(parameters) # numTrials = len(loadedTrajectories) # # groundTruthNumTrials = parameters['numTrials'] # self.assertEqual(numTrials, groundTruthNumTrials) # # # @data(({'numTrials': 100, 'qPosInit': (-4, 0, 4, 0)}, {'maxRunningSteps': 15, 'numSimulations': 200, # 'sheepPolicyName': 'MCTS'})) # @unpack # def testLoadTrajectoriesQPosInit(self, parameters, fixedParameters): # getSavePath = GetSavePath('testData', '.pickle', fixedParameters) # # loadTrajectories = LoadTrajectories(getSavePath, loadFromPickle) # loadedTrajectories = loadTrajectories(parameters) # initTimeStep = 0 # stateIndex = 0 # qPosIndex = 0 # numQPosEachAgent = 2 # allInitStates = [trajectory[initTimeStep][stateIndex] for trajectory in loadedTrajectories] # allQPosInit = [initState[:, qPosIndex:qPosIndex+numQPosEachAgent].flatten() for initState in allInitStates] # # groundTruthQPosInit = parameters['qPosInit'] # # allTruthValues = np.asarray([np.all(qPosInit == groundTruthQPosInit) for qPosInit in allQPosInit]) # self.assertTrue(np.all(allTruthValues)) # # # @data((pd.DataFrame(index=pd.MultiIndex.from_tuples([(100, (-4, 0, 4, 0))], names=['numTrials', 'qPosInit'])), # {'numTrials': 100, 'qPosInit': (-4, 0, 4, 0)})) # @unpack # def testReadParametersFromDf(self, df, groundTruthParameters): # parameters = readParametersFromDf(df) # self.assertEqual(parameters, groundTruthParameters) @data((GetSavePath('testData', '.pickle', {'maxRunningSteps': 15, 'numTrials': 100, 'qPosInit': (-4,0,4,0), 'sheepPolicyName': 'MCTS'}), 3, {'numSimulations': 200}, {0: os.path.join('testData', 'maxRunningSteps=15_numSimulations=200_numTrials=100_qPosInit=(-4,0,4,0)_sampleIndex=100_sheepPolicyName=MCTS.pickle'), 1: os.path.join('testData', 'maxRunningSteps=15_numSimulations=200_numTrials=100_qPosInit=(-4,0,4,0)_sampleIndex=101_sheepPolicyName=MCTS.pickle'), 2: os.path.join('testData', 'maxRunningSteps=15_numSimulations=200_numTrials=100_qPosInit=(-4,0,4,0)_sampleIndex=102_sheepPolicyName=MCTS.pickle')})) @unpack def testGenerateAllSampleIndexSavePaths(self, getSavePath, numSamples, pathParameters, groundTruthAllPaths): generateAllSampleIndexSavePaths = GenerateAllSampleIndexSavePaths(getSavePath) allPaths = generateAllSampleIndexSavePaths(numSamples, pathParameters) print("ALL PATHS", allPaths) self.assertEqual(allPaths, groundTruthAllPaths) @data(('..', '.txt', {'qPosInit': (1, 2, 3, 4), 'numSimulations': 12}, {'numTrials': 23, 'trainSteps': 2}, '../numSimulations=12_numTrials=23_qPosInit=(1,2,3,4)_trainSteps=2.txt'), ('', '.pickle', {'qPosInit': (1, 2, 3, 4), 'numSimulations': 12}, {'numTrials': 23, 'trainSteps': 2}, 'numSimulations=12_numTrials=23_qPosInit=(1,2,3,4)_trainSteps=2.pickle')) @unpack def testGetSavePathWithFixedParameters(self, dataDirectory, extension, fixedParameters, parameters, groundTruthPath): getSavePath = GetSavePath(dataDirectory, extension, fixedParameters) path = getSavePath(parameters) self.assertEqual(path, groundTruthPath) @data(('..', '.txt', {'numTrials': 23, 'trainSteps': 2}, '../numTrials=23_trainSteps=2.txt'), ('', '.pickle', {'numTrials': 23, 'trainSteps': 2}, 'numTrials=23_trainSteps=2.pickle')) @unpack def testGetSavePathWithoutFixedParameters(self, dataDirectory, extension, parameters, groundTruthPath): getSavePath = GetSavePath(dataDirectory, extension) path = getSavePath(parameters) self.assertEqual(path, groundTruthPath) @data((pd.DataFrame(index=pd.MultiIndex.from_tuples([(100, (-4, 0, 4, 0))], names=['numTrials', 'qPosInit'])), {'numTrials': 100, 'qPosInit': (-4, 0, 4, 0)})) @unpack def testReadParametersFromDf(self, df, groundTruthParameters): parameters = readParametersFromDf(df) self.assertEqual(parameters, groundTruthParameters) @data(({'numTrials': 100, 'qPosInit': (-4, 0, 4, 0)}, {'maxRunningSteps': 15, 'numSimulations': 200, 'sheepPolicyName': 'MCTS'}, ['sampleIndex'])) @unpack def testLoadMultipleTrajectoriesFromMultipleFiles(self, parameters, fixedParameters, fuzzySearchParameterNames): getSavePath = GetSavePath('testData', '.pickle', fixedParameters) loadTrajectories = LoadTrajectories(getSavePath, loadFromPickle, fuzzySearchParameterNames) loadedTrajectories = loadTrajectories(parameters) numTrials = len(loadedTrajectories) groundTruthNumTrials = parameters['numTrials'] self.assertEqual(numTrials, groundTruthNumTrials) @data(({'numTrials': 50, 'qPosInit': (0, 0, 0, 0)}, {'maxRunningSteps': 2, 'numSimulations': 800}, ['sampleIndex']), ({'numTrials': 100, 'qPosInit': (0, 0, 0, 0)}, {'maxRunningSteps': 2, 'numSimulations': 800}, ['sampleIndex'])) @unpack def testLoadMultipleTrajectoriesFromOneFile(self, parameters, fixedParameters, fuzzySearchParameterNames): getSavePath = GetSavePath('testData', '.pickle', fixedParameters) loadTrajectories = LoadTrajectories(getSavePath, loadFromPickle, fuzzySearchParameterNames) loadedTrajectories = loadTrajectories(parameters) numTrials = len(loadedTrajectories) groundTruthNumTrials = parameters['numTrials'] self.assertEqual(numTrials, groundTruthNumTrials) @data(({'numTrials': 100, 'qPosInit': (-4, 0, 4, 0)}, {'maxRunningSteps': 15, 'numSimulations': 200, 'sheepPolicyName': 'MCTS'}, ['sampleIndex'])) @unpack def testLoadTrajectoriesQPosInit(self, parameters, fixedParameters, fuzzySearchParameterNames): getSavePath = GetSavePath('testData', '.pickle', fixedParameters) loadTrajectories = LoadTrajectories(getSavePath, loadFromPickle, fuzzySearchParameterNames) loadedTrajectories = loadTrajectories(parameters) initTimeStep = 0 stateIndex = 0 qPosIndex = 0 numQPosEachAgent = 2 allInitStates = [trajectory[initTimeStep][stateIndex] for trajectory in loadedTrajectories] allQPosInit = [initState[:, qPosIndex:qPosIndex+numQPosEachAgent].flatten() for initState in allInitStates] groundTruthQPosInit = parameters['qPosInit'] allTruthValues = np.asarray([np.all(qPosInit == groundTruthQPosInit) for qPosInit in allQPosInit]) self.assertTrue(np.all(allTruthValues)) # @data((GetSavePath('..', '.pickle', {'numTrials': 25, 'maxRunningSteps': 3}), 3, {'numSimulations': 20}, # {0: os.path.join('..', 'maxRunningSteps=3_numSimulations=20_numTrials=25_sampleIndex=0.pickle'), # 1: os.path.join('..', 'maxRunningSteps=3_numSimulations=20_numTrials=25_sampleIndex=1.pickle'), # 2: os.path.join('..', 'maxRunningSteps=3_numSimulations=20_numTrials=25_sampleIndex=2.pickle')})) # @unpack # def testGenerateAllSampleIndexSavePaths(self, getSavePath, numSamples, pathParameters, groundTruthAllPaths): # generateAllSampleIndexSavePaths = GenerateAllSampleIndexSavePaths(getSavePath) # allPaths = generateAllSampleIndexSavePaths(numSamples, pathParameters) # self.assertEqual(allPaths, groundTruthAllPaths) if __name__ == '__main__': unittest.main()
57.475728
168
0.664189
1,000
11,840
7.809
0.14
0.006147
0.00461
0.006147
0.739915
0.73633
0.729159
0.726085
0.724036
0.724036
0
0.036769
0.19603
11,840
205
169
57.756098
0.783591
0.374409
0
0.309278
0
0.030928
0.167372
0.074676
0
0
0
0
0.072165
1
0.082474
false
0
0.072165
0
0.164948
0.010309
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
6d9371936b74432fdad9489c745f4c11d19e0015
45
py
Python
sqlsite/__init__.py
j4mie/sqlsite
f2dadb8db5ed7880f8872b6591d8cb1487f777ea
[ "Unlicense" ]
192
2020-01-13T17:12:07.000Z
2021-08-13T21:44:38.000Z
sqlsite/__init__.py
j4mie/sqlsite
f2dadb8db5ed7880f8872b6591d8cb1487f777ea
[ "Unlicense" ]
6
2020-01-12T15:55:32.000Z
2020-02-08T03:29:26.000Z
sqlsite/__init__.py
j4mie/sqlsite
f2dadb8db5ed7880f8872b6591d8cb1487f777ea
[ "Unlicense" ]
3
2020-01-16T18:42:33.000Z
2022-02-06T23:21:38.000Z
from .wsgi import make_app app = make_app()
11.25
26
0.733333
8
45
3.875
0.625
0.451613
0
0
0
0
0
0
0
0
0
0
0.177778
45
3
27
15
0.837838
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
1
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
5
6da4fe2b828fc3a75f9e667ad0f5818530be86cc
356
py
Python
CodingInterviews/Q1-30/Q1.py
BennyJane/algorithm_mad
4173a4cc60d0f4f87b0cb7f6bc87d1eefbaff937
[ "Apache-2.0" ]
null
null
null
CodingInterviews/Q1-30/Q1.py
BennyJane/algorithm_mad
4173a4cc60d0f4f87b0cb7f6bc87d1eefbaff937
[ "Apache-2.0" ]
null
null
null
CodingInterviews/Q1-30/Q1.py
BennyJane/algorithm_mad
4173a4cc60d0f4f87b0cb7f6bc87d1eefbaff937
[ "Apache-2.0" ]
null
null
null
# !/usr/bin/env python # -*-coding:utf-8 -*- # Warning :The Hard Way Is Easier """ ================================================================================================ 两个等长整数列表a,b无序,允许交换a,b列表中的数据位置,使得a列表中数值的和与b列表中数据和的差最小 ================================================================================================ """ """ way1.0: """
25.428571
96
0.289326
20
356
5.15
1
0
0
0
0
0
0
0
0
0
0
0.009119
0.075843
356
14
97
25.428571
0.303951
0.907303
0
null
0
null
0
0
null
1
0
0
null
1
null
true
0
0
null
null
null
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
1
0
0
0
0
0
0
null
1
0
0
0
0
0
1
0
0
0
0
0
0
5
6db9f4b35b3bb41c3743bef2b9015510a4de0c14
21
py
Python
Howdy.py
llabuschagne/cruft-simple-2
5a78ae8509d6e5633012f5ef2f178f3e007ba2ce
[ "MIT" ]
null
null
null
Howdy.py
llabuschagne/cruft-simple-2
5a78ae8509d6e5633012f5ef2f178f3e007ba2ce
[ "MIT" ]
null
null
null
Howdy.py
llabuschagne/cruft-simple-2
5a78ae8509d6e5633012f5ef2f178f3e007ba2ce
[ "MIT" ]
null
null
null
print("Hello, Jane!")
21
21
0.666667
3
21
4.666667
1
0
0
0
0
0
0
0
0
0
0
0
0.047619
21
1
21
21
0.7
0
0
0
0
0
0.545455
0
0
0
0
0
0
1
0
true
0
0
0
0
1
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
5
6dc7e94bc1e3434137d21f288865a18b796f5bf6
37
py
Python
ansible/env/vagrant/vagrant.py
domdom82/docker-autoscaler
b6a9ce7895ae96eeffbd52f13cebb02d1944f511
[ "Apache-2.0" ]
null
null
null
ansible/env/vagrant/vagrant.py
domdom82/docker-autoscaler
b6a9ce7895ae96eeffbd52f13cebb02d1944f511
[ "Apache-2.0" ]
null
null
null
ansible/env/vagrant/vagrant.py
domdom82/docker-autoscaler
b6a9ce7895ae96eeffbd52f13cebb02d1944f511
[ "Apache-2.0" ]
null
null
null
# tbd script that queries vagrant vms
37
37
0.810811
6
37
5
1
0
0
0
0
0
0
0
0
0
0
0
0.162162
37
1
37
37
0.967742
0.945946
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
6ddcaa6d93fd9a23c89df7ea3469210b6d4b4add
153
py
Python
control/__init__.py
audriusb/midiangel
c786b5d222c845222729f4d9ff9202c75c575354
[ "Apache-2.0" ]
null
null
null
control/__init__.py
audriusb/midiangel
c786b5d222c845222729f4d9ff9202c75c575354
[ "Apache-2.0" ]
null
null
null
control/__init__.py
audriusb/midiangel
c786b5d222c845222729f4d9ff9202c75c575354
[ "Apache-2.0" ]
null
null
null
from .midi_control import MIDIControl from .sdrangel_api import SDRAngelAPI from .sdr_control import SDRController from .midi_parse import midi_translate
38.25
38
0.875817
21
153
6.142857
0.571429
0.124031
0
0
0
0
0
0
0
0
0
0
0.098039
153
4
39
38.25
0.934783
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
6df55c2d29586ca71feacba5c24a0d362647310e
61
py
Python
gym_multiagent/envs/__init__.py
helgehatt/gym-multiagent
4d4438d93dc6d947ad1b12bfb28606cd658afc07
[ "MIT" ]
1
2021-03-17T13:17:48.000Z
2021-03-17T13:17:48.000Z
gym_multiagent/envs/__init__.py
helgehatt/gym-multiagent
4d4438d93dc6d947ad1b12bfb28606cd658afc07
[ "MIT" ]
null
null
null
gym_multiagent/envs/__init__.py
helgehatt/gym-multiagent
4d4438d93dc6d947ad1b12bfb28606cd658afc07
[ "MIT" ]
null
null
null
from gym_multiagent.envs.multiagent_env import MultiAgentEnv
30.5
60
0.901639
8
61
6.625
0.875
0
0
0
0
0
0
0
0
0
0
0
0.065574
61
1
61
61
0.929825
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
6df7639ed66021aeaf17e351a9380f8921f28825
219
py
Python
src/dbxdeploy/string/RandomStringGenerator.py
DataSentics/dbx-deploy
c019d80a2b947b4f180071e97c3981daa3a2214a
[ "MIT" ]
null
null
null
src/dbxdeploy/string/RandomStringGenerator.py
DataSentics/dbx-deploy
c019d80a2b947b4f180071e97c3981daa3a2214a
[ "MIT" ]
null
null
null
src/dbxdeploy/string/RandomStringGenerator.py
DataSentics/dbx-deploy
c019d80a2b947b4f180071e97c3981daa3a2214a
[ "MIT" ]
null
null
null
import random import string class RandomStringGenerator: def generate(self, stringLength: int): letters = string.ascii_lowercase return ''.join(random.choice(letters) for i in range(stringLength))
24.333333
75
0.730594
25
219
6.36
0.8
0
0
0
0
0
0
0
0
0
0
0
0.187215
219
8
76
27.375
0.893258
0
0
0
1
0
0
0
0
0
0
0
0
1
0.166667
false
0
0.333333
0
0.833333
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
0968216e59bd1827e3be5d7257b77d68ed95754a
4,785
py
Python
tests/integrationv2/test_compatibility_with_oqs_openssl.py
glaubitz/s2n
d0b2c5ef9b5909cf078cb5a3e8acc156aee8cb4f
[ "Apache-2.0" ]
4
2020-10-01T21:55:38.000Z
2021-02-05T09:47:03.000Z
tests/integrationv2/test_compatibility_with_oqs_openssl.py
glaubitz/s2n
d0b2c5ef9b5909cf078cb5a3e8acc156aee8cb4f
[ "Apache-2.0" ]
null
null
null
tests/integrationv2/test_compatibility_with_oqs_openssl.py
glaubitz/s2n
d0b2c5ef9b5909cf078cb5a3e8acc156aee8cb4f
[ "Apache-2.0" ]
3
2020-06-24T18:36:11.000Z
2021-12-09T18:20:37.000Z
import copy import pytest import subprocess, os from common import Certificates, Ciphers, Curves, Protocols, AvailablePorts from configuration import available_ports, PROVIDERS, PROTOCOLS from common import Ciphers, ProviderOptions, Protocols, data_bytes from fixtures import managed_process from providers import Provider, S2N, OpenSSL from utils import get_expected_s2n_version from pip._internal.cli.cmdoptions import cert oqs_as_server_test_vectors = [ {"client_ciphers": Ciphers.ECDHE_RSA_AES256_GCM_SHA384, "server_ciphers": Ciphers.ECDHE_RSA_AES256_GCM_SHA384, "expected_cipher": "ECDHE-RSA-AES256-GCM-SHA384", "expected_kem": "NONE" }, ] oqs_as_client_test_vectors = [ {"client_ciphers": Ciphers.ECDHE_RSA_AES256_GCM_SHA384, "server_ciphers": Ciphers.ECDHE_RSA_AES256_GCM_SHA384, "expected_cipher": "ECDHE-RSA-AES256-GCM-SHA384", "expected_kem": "NONE" }, ] def get_oqs_openssl_override_env_vars(): oqs_openssl_install_dir = os.environ["OQS_OPENSSL_1_1_1_INSTALL_DIR"] override_env_vars = dict() override_env_vars["PATH"] = oqs_openssl_install_dir + "/bin" override_env_vars["LD_LIBRARY_PATH"] = oqs_openssl_install_dir + "/lib" return override_env_vars @pytest.mark.parametrize("vector", oqs_as_server_test_vectors) def test_oqs_openssl_as_server(managed_process, vector): host = "localhost" port = next(available_ports) # We are manually passing the cipher flag to s2nc and s2nd. # This is because PQ ciphers are specific to S2N at this point # in time. client_options = ProviderOptions( mode=Provider.ClientMode, host=host, port=port, insecure=True, cipher=vector['client_ciphers'], protocol=Protocols.TLS12) server_options = ProviderOptions( mode = Provider.ServerMode, host=host, port=port, cipher=vector['server_ciphers'], protocol=Protocols.TLS12, cert=Certificates.RSA_4096_SHA512.cert, key=Certificates.RSA_4096_SHA512.key, env_overrides=get_oqs_openssl_override_env_vars()) server = managed_process(OpenSSL, server_options, timeout=5) client = managed_process(S2N, client_options, timeout=5) # OQS OpenSSL is Server, so just check that it had a valid exit code for results in server.get_results(): assert results.exception is None assert results.exit_code == 0 expected_version = get_expected_s2n_version(Protocols.TLS12, S2N) # Validate S2N Client results were what was expected for results in client.get_results(): assert results.exception is None assert results.exit_code == 0 assert bytes("Actual protocol version: {}".format(expected_version).encode('utf-8')) in results.stdout assert bytes("KEM: {}".format(vector['expected_kem']).encode('utf-8')) in results.stdout assert bytes("Cipher negotiated: {}".format(vector['expected_cipher']).encode('utf-8')) in results.stdout @pytest.mark.parametrize("vector", oqs_as_client_test_vectors) def test_oqs_openssl_as_client(managed_process, vector): host = "localhost" port = next(available_ports) # We are manually passing the cipher flag to s2nc and s2nd. # This is because PQ ciphers are specific to S2N at this point # in time. client_options = ProviderOptions( mode=Provider.ClientMode, host=host, port=port, insecure=True, cipher=vector['client_ciphers'], protocol=Protocols.TLS12, env_overrides=get_oqs_openssl_override_env_vars()) server_options = ProviderOptions( mode = Provider.ServerMode, host=host, port=port, cipher=vector['server_ciphers'], protocol=Protocols.TLS12, cert=Certificates.RSA_4096_SHA512.cert, key=Certificates.RSA_4096_SHA512.key) server = managed_process(S2N, server_options, timeout=5) client = managed_process(OpenSSL, client_options, timeout=5) # OQS OpenSSL is Client, so just check that it had a valid exit code for results in client.get_results(): assert results.exception is None assert results.exit_code == 0 expected_version = get_expected_s2n_version(Protocols.TLS12, S2N) # Validate S2N Server results were what was expected for results in server.get_results(): assert results.exception is None assert results.exit_code == 0 assert bytes("Actual protocol version: {}".format(expected_version).encode('utf-8')) in results.stdout assert bytes("KEM: {}".format(vector['expected_kem']).encode('utf-8')) in results.stdout assert bytes("Cipher negotiated: {}".format(vector['expected_cipher']).encode('utf-8')) in results.stdout
38.58871
190
0.713271
615
4,785
5.310569
0.209756
0.03368
0.032149
0.031231
0.819963
0.794856
0.766687
0.703001
0.687079
0.660747
0
0.028268
0.194148
4,785
123
191
38.902439
0.818724
0.102612
0
0.639535
0
0
0.118608
0.019379
0
0
0
0
0.162791
1
0.034884
false
0
0.116279
0
0.162791
0
0
0
0
null
0
0
0
1
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
098ef6b1f3c27a11d65baa5d5a3426a65d3b0a21
1,093
py
Python
test/units/misc/test_datefix.py
bronxc/refinery
9448facf48a0008f27861dd1a5ee8f5218e6bb86
[ "BSD-3-Clause" ]
1
2022-02-13T20:57:15.000Z
2022-02-13T20:57:15.000Z
test/units/misc/test_datefix.py
bronxc/refinery
9448facf48a0008f27861dd1a5ee8f5218e6bb86
[ "BSD-3-Clause" ]
null
null
null
test/units/misc/test_datefix.py
bronxc/refinery
9448facf48a0008f27861dd1a5ee8f5218e6bb86
[ "BSD-3-Clause" ]
null
null
null
#!/usr/bin/env python3 # -*- coding: utf-8 -*- from .. import TestUnitBase class TestDateFix(TestUnitBase): def test_dates(self): unit = self.load() self.assertEqual(b'2016-03-15 04:35:35', unit(b'1458016535')) self.assertEqual(b'2016-03-15 04:35:35', unit(b'1458016535000')) self.assertEqual(b'2010-03-15 06:27:50', unit(b'2010-03-15T06:27:50')) self.assertEqual(b'2017-09-11 21:47:22', unit(b'2017:09:11 23:47:22+02:00')) self.assertEqual(b'2017-10-22 05:51:44', unit(b'20171022055144Z')) self.assertEqual(b'2011-10-20 19:37:27', unit(b'20111020193727')) self.assertEqual(b'2010-03-15 06:27:50', unit(b'2010-03-15T06:27:50.000000')) self.assertEqual(b'2010-03-15 06:27:50', unit(b'2010-03-15 06:27:50')) self.assertEqual(b'2014-04-24 19:32:21', unit(b'Thu Apr 24 2014 12:32:21 GMT-0700 (PDT)')) def test_dates_dos(self): unit = self.load(dos=True) self.assertEqual(b'2019-04-02 10:58:44', unit(b'1317164886')) self.assertEqual(b'2019-04-02 10:58:44', unit(b'1317164886000'))
45.541667
98
0.641354
192
1,093
3.635417
0.348958
0.23639
0.252149
0.051576
0.459885
0.41404
0.41404
0.402579
0.402579
0.402579
0
0.341731
0.164684
1,093
23
99
47.521739
0.422782
0.039341
0
0
0
0
0.39313
0.024809
0
0
0
0
0.647059
1
0.117647
false
0
0.058824
0
0.235294
0
0
0
0
null
1
1
0
0
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
1
0
0
0
0
0
0
0
0
0
5
0992e062a7fc736b95e8faa5a469e71d8a4909d4
7,296
py
Python
pyaz/keyvault/secret/__init__.py
py-az-cli/py-az-cli
9a7dc44e360c096a5a2f15595353e9dad88a9792
[ "MIT" ]
null
null
null
pyaz/keyvault/secret/__init__.py
py-az-cli/py-az-cli
9a7dc44e360c096a5a2f15595353e9dad88a9792
[ "MIT" ]
null
null
null
pyaz/keyvault/secret/__init__.py
py-az-cli/py-az-cli
9a7dc44e360c096a5a2f15595353e9dad88a9792
[ "MIT" ]
1
2022-02-03T09:12:01.000Z
2022-02-03T09:12:01.000Z
''' Manage secrets. ''' from ... pyaz_utils import _call_az def list(id=None, include_managed=None, maxresults=None, vault_name=None): ''' Optional Parameters: - id -- Full URI of the Vault or HSM. If specified all other 'Id' arguments should be omitted. - include_managed -- Include managed secrets. Default: false - maxresults -- Maximum number of results to return in a page. If not specified, the service will return up to 25 results. - vault_name -- Name of the Key Vault. Required if --id is not specified. ''' return _call_az("az keyvault secret list", locals()) def list_versions(id=None, maxresults=None, name=None, vault_name=None): ''' Optional Parameters: - id -- Id of the secret. If specified all other 'Id' arguments should be omitted. - maxresults -- Maximum number of results to return in a page. If not specified, the service will return up to 25 results. - name -- Name of the secret. Required if --id is not specified. - vault_name -- Name of the Key Vault. Required if --id is not specified. ''' return _call_az("az keyvault secret list-versions", locals()) def list_deleted(vault_name, id=None, maxresults=None): ''' Required Parameters: - vault_name -- Name of the Vault. Optional Parameters: - id -- Full URI of the Vault. If specified all other 'Id' arguments should be omitted. - maxresults -- Maximum number of results to return in a page. If not specified the service will return up to 25 results. ''' return _call_az("az keyvault secret list-deleted", locals()) def set(name, vault_name, description=None, disabled=None, encoding=None, expires=None, file=None, not_before=None, secret_attributes=None, tags=None, value=None): ''' Create a secret (if one doesn't exist) or update a secret in a KeyVault. Required Parameters: - name -- Name of the secret. - vault_name -- Name of the Vault. Optional Parameters: - description -- Description of the secret contents (e.g. password, connection string, etc) - disabled -- Create secret in disabled state. - encoding -- Source file encoding. The value is saved as a tag (`file-encoding=<val>`) and used during download to automatically encode the resulting file. - expires -- Expiration UTC datetime (Y-m-d'T'H:M:S'Z'). - file -- Source file for secret. Use in conjunction with '--encoding' - not_before -- Key not usable before the provided UTC datetime (Y-m-d'T'H:M:S'Z'). - secret_attributes -- ==SUPPRESS== - tags -- space-separated tags: key[=value] [key[=value] ...]. Use '' to clear existing tags. - value -- Plain text secret value. Cannot be used with '--file' or '--encoding' ''' return _call_az("az keyvault secret set", locals()) def set_attributes(content_type=None, enabled=None, expires=None, id=None, name=None, not_before=None, secret_attributes=None, tags=None, vault_name=None, version=None): ''' Optional Parameters: - content_type -- Type of the secret value such as a password. - enabled -- Enable the secret. - expires -- Expiration UTC datetime (Y-m-d'T'H:M:S'Z'). - id -- Id of the secret. If specified all other 'Id' arguments should be omitted. - name -- Name of the secret. Required if --id is not specified. - not_before -- Key not usable before the provided UTC datetime (Y-m-d'T'H:M:S'Z'). - secret_attributes -- ==SUPPRESS== - tags -- space-separated tags: key[=value] [key[=value] ...]. Use '' to clear existing tags. - vault_name -- Name of the Key Vault. Required if --id is not specified. - version -- The secret version. If omitted, uses the latest version. ''' return _call_az("az keyvault secret set-attributes", locals()) def show(id=None, name=None, vault_name=None, version=None): ''' Optional Parameters: - id -- Id of the secret. If specified all other 'Id' arguments should be omitted. - name -- Name of the secret. Required if --id is not specified. - vault_name -- Name of the Key Vault. Required if --id is not specified. - version -- The secret version. If omitted, uses the latest version. ''' return _call_az("az keyvault secret show", locals()) def show_deleted(id=None, name=None, vault_name=None): ''' Optional Parameters: - id -- The recovery id of the secret. If specified all other 'Id' arguments should be omitted. - name -- Name of the secret. Required if --id is not specified. - vault_name -- Name of the Vault. Required if --id is not specified. ''' return _call_az("az keyvault secret show-deleted", locals()) def delete(id=None, name=None, vault_name=None): ''' Optional Parameters: - id -- Id of the secret. If specified all other 'Id' arguments should be omitted. - name -- Name of the secret. Required if --id is not specified. - vault_name -- Name of the Key Vault. Required if --id is not specified. ''' return _call_az("az keyvault secret delete", locals()) def purge(id=None, name=None, vault_name=None): ''' Optional Parameters: - id -- The recovery id of the secret. If specified all other 'Id' arguments should be omitted. - name -- Name of the secret. Required if --id is not specified. - vault_name -- Name of the Vault. Required if --id is not specified. ''' return _call_az("az keyvault secret purge", locals()) def recover(id=None, name=None, vault_name=None): ''' Optional Parameters: - id -- The recovery id of the secret. If specified all other 'Id' arguments should be omitted. - name -- Name of the secret. Required if --id is not specified. - vault_name -- Name of the Vault. Required if --id is not specified. ''' return _call_az("az keyvault secret recover", locals()) def download(file, encoding=None, id=None, name=None, vault_name=None, version=None): ''' Required Parameters: - file -- File to receive the secret contents. Optional Parameters: - encoding -- Encoding of the secret. By default, will look for the 'file-encoding' tag on the secret. Otherwise will assume 'utf-8'. - id -- Id of the secret. If specified all other 'Id' arguments should be omitted. - name -- Name of the secret. Required if --id is not specified. - vault_name -- Name of the Key Vault. Required if --id is not specified. - version -- The secret version. If omitted, uses the latest version. ''' return _call_az("az keyvault secret download", locals()) def backup(file, id=None, name=None, vault_name=None): ''' Required Parameters: - file -- File to receive the secret contents. Optional Parameters: - id -- Id of the secret. If specified all other 'Id' arguments should be omitted. - name -- Name of the secret. Required if --id is not specified. - vault_name -- Name of the Key Vault. Required if --id is not specified. ''' return _call_az("az keyvault secret backup", locals()) def restore(file, vault_name): ''' Required Parameters: - file -- File to receive the secret contents. - vault_name -- Name of the Vault. ''' return _call_az("az keyvault secret restore", locals())
38.4
169
0.671053
1,054
7,296
4.577799
0.13093
0.038342
0.047668
0.061969
0.755233
0.747565
0.736995
0.713368
0.676062
0.623212
0
0.001229
0.219572
7,296
189
170
38.603175
0.846154
0.667489
0
0
0
0
0.184127
0
0
0
0
0
0
1
0.481481
false
0
0.037037
0
1
0
0
0
0
null
0
0
0
0
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
5
09b70a8daa547b7bb9b9a2bf0f98ef47deb6783e
584
py
Python
roboticstoolbox/backends/Swift/__init__.py
looch-huang/robotics-toolbox-python
38d58a16dc1131c4d30c129b0e9a640ed3c4d3cf
[ "MIT" ]
1
2021-07-02T09:08:06.000Z
2021-07-02T09:08:06.000Z
roboticstoolbox/backends/Swift/__init__.py
looch-huang/robotics-toolbox-python
38d58a16dc1131c4d30c129b0e9a640ed3c4d3cf
[ "MIT" ]
null
null
null
roboticstoolbox/backends/Swift/__init__.py
looch-huang/robotics-toolbox-python
38d58a16dc1131c4d30c129b0e9a640ed3c4d3cf
[ "MIT" ]
1
2021-07-02T09:08:04.000Z
2021-07-02T09:08:04.000Z
from roboticstoolbox.backends.Swift.Swift import Swift from roboticstoolbox.backends.Swift.Swift import Slider from roboticstoolbox.backends.Swift.Swift import SwiftElement from roboticstoolbox.backends.Swift.Swift import Label from roboticstoolbox.backends.Swift.Swift import Select from roboticstoolbox.backends.Swift.Swift import Button from roboticstoolbox.backends.Swift.Swift import Checkbox from roboticstoolbox.backends.Swift.Swift import Radio __all__ = [ 'Swift', 'Slider', 'SwiftElement', 'Label', 'Select', 'Button', 'Checkbox', 'Radio' ]
29.2
61
0.782534
65
584
6.969231
0.2
0.335541
0.476821
0.565121
0.759382
0.759382
0
0
0
0
0
0
0.131849
584
19
62
30.736842
0.893491
0
0
0
0
0
0.090753
0
0
0
0
0
0
1
0
false
0
0.444444
0
0.444444
0
0
0
0
null
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
5
09bb0d2da6fc62e8700c66817701712fc6f876a9
105
py
Python
draw_image_lidar_fusion.py
JHZ-2326/3d_detection_kit
25a0d7db1441862bc7d7b6b8616562d0b8cd6247
[ "MIT" ]
23
2019-03-26T14:26:05.000Z
2022-03-25T03:57:41.000Z
draw_image_lidar_fusion.py
JHZ-2326/3d_detection_kit
25a0d7db1441862bc7d7b6b8616562d0b8cd6247
[ "MIT" ]
1
2021-11-21T03:03:18.000Z
2021-11-21T03:03:18.000Z
draw_image_lidar_fusion.py
JHZ-2326/3d_detection_kit
25a0d7db1441862bc7d7b6b8616562d0b8cd6247
[ "MIT" ]
6
2019-10-28T12:09:22.000Z
2021-07-08T16:53:02.000Z
""" this file shows drawing 3D bouding box on lidar point cloud using calibration file """ import cv2
11.666667
59
0.742857
16
105
4.875
0.9375
0
0
0
0
0
0
0
0
0
0
0.02381
0.2
105
8
60
13.125
0.904762
0.780952
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
09dc90247adfd10d93375175dffd0c0082a4350d
6,575
py
Python
tests/tests_f12020/test_telemetry.py
f1laps/f1laps-telemetry
0c264f9300d58397fe2f8b3018cd2e9151e28d08
[ "MIT" ]
3
2021-02-23T22:06:13.000Z
2022-02-06T15:05:56.000Z
tests/tests_f12020/test_telemetry.py
f1laps/f1laps-telemetry
0c264f9300d58397fe2f8b3018cd2e9151e28d08
[ "MIT" ]
null
null
null
tests/tests_f12020/test_telemetry.py
f1laps/f1laps-telemetry
0c264f9300d58397fe2f8b3018cd2e9151e28d08
[ "MIT" ]
null
null
null
from unittest import TestCase from receiver.f12020.telemetry import Telemetry from receiver.telemetry_base import KEY_INDEX_MAP class TelemetryTests(TestCase): def test_start_telemetry(self): # initialize telemetry = Telemetry() self.assertEqual(telemetry.current_lap, None) self.assertEqual(telemetry.current_lap_number, None) self.assertEqual(telemetry.lap_dict, {}) # start new lap telemetry.start_new_lap(1) self.assertEqual(telemetry.current_lap.number, 1) self.assertEqual(telemetry.current_lap_number, 1) self.assertEqual(telemetry.lap_dict[1], telemetry.current_lap) self.assertEqual(telemetry.current_lap.frame_dict, {}) # start same lap again - no problem telemetry.start_new_lap(1) self.assertEqual(telemetry.current_lap.number, 1) self.assertEqual(telemetry.current_lap.frame_dict, {}) # start new lap telemetry.start_new_lap(2) self.assertEqual(telemetry.current_lap.number, 2) self.assertEqual(telemetry.current_lap_number, 2) self.assertEqual(telemetry.lap_dict[2], telemetry.current_lap) self.assertEqual(telemetry.lap_dict[1].number, 1) self.assertEqual(telemetry.current_lap.frame_dict, {}) # a third lap deletes lap 1 telemetry.start_new_lap(3) self.assertEqual(telemetry.current_lap.number, 3) self.assertEqual(telemetry.lap_dict[3], telemetry.current_lap) self.assertEqual(telemetry.lap_dict[2].number, 2) self.assertEqual(1 in telemetry.lap_dict, False) self.assertEqual(2 in telemetry.lap_dict, True) def test_set_frame_value(self): telemetry = Telemetry() # test on empty value - should not raise but log warning telemetry.set(1000, speed=200) self.assertEqual(telemetry.current_lap, None) # start lap telemetry.start_new_lap(1) telemetry.set(1000, speed=200) self.assertEqual(telemetry.current_lap.frame_dict[1000][KEY_INDEX_MAP["speed"]], 200) telemetry.set(1000, speed=300, brake=0.05) self.assertEqual(telemetry.current_lap.frame_dict[1000][KEY_INDEX_MAP["speed"]], 300) self.assertEqual(telemetry.current_lap.frame_dict[1000][KEY_INDEX_MAP["brake"]], 0.05) # a new frame sets telemetry.set(1001, speed=300, brake=0.04) self.assertEqual(telemetry.current_lap.frame_dict[1001][KEY_INDEX_MAP["speed"]], 300) self.assertEqual(telemetry.current_lap.frame_dict[1001][KEY_INDEX_MAP["brake"]], 0.04) # new lap, new values telemetry.start_new_lap(2) telemetry.set(1000, speed=300, brake=0.01) self.assertEqual(telemetry.current_lap.frame_dict[1000][KEY_INDEX_MAP["speed"]], 300) self.assertEqual(telemetry.current_lap.frame_dict[1000][KEY_INDEX_MAP["brake"]], 0.01) def test_get_telemetry_api_dict(self): telemetry = Telemetry() telemetry.start_new_lap(1) telemetry.set(1000, speed=200, lap_distance=10) frame_dict = telemetry.get_telemetry_api_dict(1) self.assertEqual(frame_dict, {1000: [10, None, 200, None, None, None, None, None]}) frame_dict = telemetry.get_telemetry_api_dict(2) self.assertEqual(frame_dict, None) class TelemetryLapTests(TestCase): def test_clean_frame_flashback(self): telemetry = Telemetry() telemetry.start_new_lap(1) tl = telemetry.current_lap telemetry.set(1000, speed=300, lap_distance=50) telemetry.set(1001, speed=301, lap_distance=51) telemetry.set(1002, speed=302, lap_distance=52) self.assertEqual(tl.frame_dict, { 1000: [50, None, 300, None, None, None, None, None], 1001: [51, None, 301, None, None, None, None, None], 1002: [52, None, 302, None, None, None, None, None], }) telemetry.set(1003, speed=303, lap_distance=51) self.assertEqual(tl.frame_dict, { 1000: [50, None, 300, None, None, None, None, None], 1003: [51, None, 303, None, None, None, None, None], }) telemetry.set(1004, speed=304, lap_distance=52) telemetry.set(1005, speed=305, lap_distance=53) self.assertEqual(tl.frame_dict, { 1000: [50, None, 300, None, None, None, None, None], 1003: [51, None, 303, None, None, None, None, None], 1004: [52, None, 304, None, None, None, None, None], 1005: [53, None, 305, None, None, None, None, None], }) def test_clean_frame_new_lap(self): telemetry = Telemetry() telemetry.start_new_lap(1) tl = telemetry.current_lap telemetry.set(1000, speed=300, lap_distance=4400) telemetry.set(1001, speed=301, lap_distance=4401) telemetry.set(1002, speed=302, lap_distance=4402) self.assertEqual(tl.frame_dict, { 1000: [4400, None, 300, None, None, None, None, None], 1001: [4401, None, 301, None, None, None, None, None], 1002: [4402, None, 302, None, None, None, None, None], }) telemetry.set(1003, speed=303, lap_distance=13) self.assertEqual(tl.frame_dict, { 1003: [13, None, 303, None, None, None, None, None], }) telemetry.set(1004, speed=304, lap_distance=14) telemetry.set(1005, speed=305, lap_distance=15) self.assertEqual(tl.frame_dict, { 1003: [13, None, 303, None, None, None, None, None], 1004: [14, None, 304, None, None, None, None, None], 1005: [15, None, 305, None, None, None, None, None], }) def test_clean_frame_pre_first_line(self): telemetry = Telemetry() telemetry.start_new_lap(1) tl = telemetry.current_lap telemetry.set(1000, speed=300, lap_distance=-100) telemetry.set(1001, speed=301, lap_distance=-99) telemetry.set(1002, speed=302, lap_distance=-98) self.assertEqual(tl.frame_dict, {}) telemetry.set(1003, speed=303, lap_distance=13) self.assertEqual(tl.frame_dict, { 1003: [13, None, 303, None, None, None, None, None], }) telemetry.set(1004, speed=304, lap_distance=14) telemetry.set(1005, speed=305, lap_distance=15) self.assertEqual(tl.frame_dict, { 1003: [13, None, 303, None, None, None, None, None], 1004: [14, None, 304, None, None, None, None, None], 1005: [15, None, 305, None, None, None, None, None], })
45.659722
94
0.64289
861
6,575
4.744483
0.109175
0.164504
0.185067
0.164504
0.819829
0.78776
0.740759
0.64798
0.562791
0.548348
0
0.099165
0.234677
6,575
143
95
45.979021
0.712639
0.030418
0
0.52459
0
0
0.0055
0
0
0
0
0
0.311475
1
0.04918
false
0
0.02459
0
0.090164
0
0
0
0
null
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
09f345446615b556df7c18aeae31c4f6afe8ee07
78
py
Python
custom/openlmis/models.py
dslowikowski/commcare-hq
ad8885cf8dab69dc85cb64f37aeaf06106124797
[ "BSD-3-Clause" ]
1
2015-02-10T23:26:39.000Z
2015-02-10T23:26:39.000Z
custom/openlmis/models.py
SEL-Columbia/commcare-hq
992ee34a679c37f063f86200e6df5a197d5e3ff6
[ "BSD-3-Clause" ]
1
2022-03-12T01:03:25.000Z
2022-03-12T01:03:25.000Z
custom/openlmis/models.py
johan--/commcare-hq
86ee99c54f55ee94e4c8f2f6f30fc44e10e69ebd
[ "BSD-3-Clause" ]
null
null
null
# ensure our signals get loaded at django bootstrap time from . import signals
39
56
0.807692
12
78
5.25
0.916667
0
0
0
0
0
0
0
0
0
0
0
0.166667
78
2
57
39
0.969231
0.692308
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
61e1edc232bf558d2e4ab2bfb63ab8b2179d582d
24,975
py
Python
cross_loss_influence/data/scripts/mat2vec_tokenize_test.py
CORE-Robotics-Lab/Cross_Loss_Influence_Functions
6f0fa45f8896cd6c238c143eca6ddebef97b642c
[ "MIT" ]
1
2022-03-08T05:59:17.000Z
2022-03-08T05:59:17.000Z
cross_loss_influence/data/scripts/mat2vec_tokenize_test.py
CORE-Robotics-Lab/Cross_Loss_Influence_Functions
6f0fa45f8896cd6c238c143eca6ddebef97b642c
[ "MIT" ]
null
null
null
cross_loss_influence/data/scripts/mat2vec_tokenize_test.py
CORE-Robotics-Lab/Cross_Loss_Influence_Functions
6f0fa45f8896cd6c238c143eca6ddebef97b642c
[ "MIT" ]
null
null
null
import pandas as pd import regex from cross_loss_influence.config import PROJECT_HOME, DATA_DIR import logging import string import unidecode from chemdataextractor.doc import Paragraph from gensim.models.phrases import Phraser import copy import os logger = logging.getLogger(__name__) """ Modified version of the MaterialsTextProcessor, publicly available in the mat2vec github repo: https://github.com/materialsintelligence/mat2vec under the MIT License Originally authored by Vahe Tshitoyan and credited to John Dagdelen, Leigh Weston, Anubhav Jain Revisions and updates by Andrew Silva """ class MedicalTextProcessor: def __init__(self, phraser_path=os.path.join(PROJECT_HOME, 'models', 'checkpoints', 'phraser.pkl'), BERT=False): self.punctuation = list(string.punctuation) + ["\"", "“", "”", "≥", "≤", "×"] self.phraser = Phraser.load(phraser_path) self.BERT = BERT if self.BERT: self.toker = BertTokenizer(vocab_file=os.path.join(PROJECT_HOME, 'data','objects','itos.txt')) self.NR_BASIC = regex.compile(r"^[+-]?\d*\.?\d+\(?\d*\)?+$", regex.DOTALL) self.lookup_dict = self.load_replacements() self.stop_words = ['ourselves', 'hers', 'between', 'yourself', 'but', 'again', 'there', 'about', 'once', 'during', 'out', 'very', 'having', 'with', 'they', 'own', 'an', 'be', 'some', 'for', 'do', 'its', 'yours', 'such', 'into', 'of', 'most', 'itself', 'other', 'off', 'is', 's', 'am', 'or', 'who', 'as', 'from', 'him', 'each', 'the', 'themselves', 'until', 'below', 'are', 'we', 'these', 'your', 'his', 'through', 'don', 'nor', 'me', 'were', 'her', 'more', 'himself', 'this', 'down', 'should', 'our', 'their', 'while', 'above', 'both', 'up', 'to', 'ours', 'had', 'she', 'all', 'no', 'when', 'at', 'any', 'before', 'them', 'same', 'and', 'been', 'have', 'in', 'will', 'on', 'does', 'yourselves', 'then', 'that', 'because', 'what', 'over', 'why', 'so', 'can', 'did', 'not', 'now', 'under', 'he', 'you', 'herself', 'has', 'just', 'where', 'too', 'only', 'myself', 'which', 'those', 'i', 'after', 'few', 'whom', 't', 'being', 'if', 'theirs', 'my', 'against', 'a', 'by', 'doing', 'it', 'how', 'further', 'was', 'here', 'than', 'î', 'ó', 'ô', 'đ', 'š', 'ƒ', 'ƞ', 'ɤ', 'ώ', 'в', 'и', 'с', '⁶', '₁', '₂', 'ⅴ', 'ⅻ', '↕', '≡', '⋙', '△', '☆', '✕', '-', '>', '}(n)', '}118', '}56', '~10', '~106', '~116', '~145,000', '~195', '~210', '~28', '~29', '~30', '~300', '~31', '~32', '~49', '~505', '~80', '~90', 'è', 'ê', 'ϕ', '℃', 'ⅸ'] @staticmethod def replace_from_lookup(lowercase_tokens, replacement_dict): """ swap out synonyms (set multiple_sclerosis = ms = multiple sclerosis) :param lowercase_tokens: raw altered strings :param replacement_dict: dict of form {token to keep: [list of synonyms to replace], token2_to_keep: [list2], etc} :return: text input """ for key, val in replacement_dict.items(): for token in val: for _ in range(3): lowercase_tokens = lowercase_tokens.replace(token, key) return lowercase_tokens @staticmethod def load_replacements(): """ create a dictionary of {token_to_keep: [list of replacements]} with hard coded lookups :return: dictionary """ disease_analogy_gt = pd.read_csv( os.path.join(DATA_DIR, 'disease_similarities.csv')) molecular_analogy_gt = pd.read_csv( os.path.join(DATA_DIR, 'molecular_similarities.csv')) master_dict = {} replacement_number_df = pd.DataFrame(columns=molecular_analogy_gt.columns) for df in [disease_analogy_gt, molecular_analogy_gt, replacement_number_df]: for _, row in df.iterrows(): row = row.dropna().tolist() if ' ' in row[0]: row.append(row[0]) row[0] = row[0].replace(' ', '_') skip_this = False for entry in row: if 'X is a number between' in entry: # If it's one of these, add all the new rows and skip this skip_this = True start = 1 end = int(entry.split()[-1]) for subscript in range(start, end+1): new_row = copy.deepcopy(row) new_row = new_row[:-1] # drop the x between nonsense if row[0] == 'CDX' and subscript in [31, 40]: # weird hard-coded rule to avoid cluster of differentiation 31 and 40 continue new_data = {"Molecule": new_row[0].replace('X', str(subscript)), "A1":None, "A2": None, "A3":None, "A4":None, "A5": None, "A6":None, "A7":None, "A8": None, "A9":None, "A10":None, "A11": None, "A12":None, "A13":None, "A14": None} for entry_index in range(1, len(new_row)): new_data[f"A{entry_index}"] = new_row[entry_index].replace('X', str(subscript)) replacement_number_df.loc[len(replacement_number_df)] = new_data if skip_this: continue row = [' ' + r.lower() + ' ' for r in row] list_of_replacements = row[1:] list_of_replacements.sort(key=len) list_of_replacements = list_of_replacements[::-1] list_of_replacements.append(" " + row[0].strip() + " " + row[0].strip() + " ") master_dict[row[0]] = list_of_replacements return master_dict def token_acceptable(self, text): """ clean tokens of stopwords or urls or whatever else :param text: token to clean :return: 1 if token is valid or -1 if token is invalid """ stripped = text.strip().lower() if not stripped: return False if 'www.' in stripped: return False if stripped in self.stop_words: return False return True def tokenize(self, text, keep_sentences=True): """Converts a string to a list tokens (words) using a modified chemdataextractor tokenizer. Args: text: input text as a string keep_sentences: if False, will disregard the sentence structure and return tokens as a single list of strings. Otherwise returns a list of lists, each sentence separately. Returns: A list of strings if keep_sentence is False, otherwise a list of list of strings, which each list corresponding to a single sentence. """ if not self.BERT: cde_p = Paragraph(text) tokens = cde_p.tokens else: tokens = self.toker._tokenize(text) toks = [] for sentence in tokens: if keep_sentences: toks.append([]) for tok in sentence: if not self.BERT: tok = tok.text if self.token_acceptable(tok): toks[-1] += [tok] else: for tok in sentence: if not self.BERT: tok = tok.text if self.token_acceptable(tok): toks += [tok] return toks def process_for_vocab(self, tokens, exclude_punct=False, convert_num=False, remove_accents=True, make_phrases=False): """Processes a pre-tokenized list of strings or a string. Selective lower casing, accent removal, phrasing Args: tokens: A list of strings or a string. If a string is supplied, will use the tokenize method first to split it into a list of token strings. exclude_punct: Bool flag to exclude all punctuation. convert_num: Bool flag to convert numbers (selectively) to <nUm>. remove_accents: Bool flag to remove accents, e.g. Néel -> Neel. make_phrases: Bool flag to convert single tokens to common materials science phrases. Returns: A list of strings """ if not isinstance(tokens, str): tokens = " ".join(tok for tok in tokens) tokens = tokens.lower() tokens = self.replace_from_lookup(tokens, self.lookup_dict) if not isinstance(tokens, list): # If it's a string. tokens = self.tokenize(tokens, keep_sentences=False) processed = [] for i, tok in enumerate(tokens): if exclude_punct and tok in self.punctuation: # Punctuation. continue elif convert_num and self.is_number(tok): # Number. # Replace all numbers with <NUM>, except if it is a crystal direction (e.g. "(111)"). try: if tokens[i - 1] == "(" and tokens[i + 1] == ")" \ or tokens[i - 1] == "〈" and tokens[i + 1] == "〉": pass else: tok = "<NUM>" except IndexError: tok = "<NUM>" if remove_accents: tok = self.remove_accent(tok) processed.append(tok) if make_phrases: processed = self.make_phrases(processed, reps=2) while '.' in processed: eos_ind = processed.index('.') processed[eos_ind:eos_ind+1] = ['<EOS>', '<SOS>'] processed[:0] = ['<SOS>'] if processed[-1] == '<SOS>': processed = processed[:-1] return processed def process_for_model(self, tokens, exclude_punct=False, convert_num=False, remove_accents=True, make_phrases=False): """Processes a pre-tokenized list of strings or a string. Selective lower casing, accent removal, phrasing Args: tokens: A list of strings or a string. If a string is supplied, will use the tokenize method first to split it into a list of token strings. exclude_punct: Bool flag to exclude all punctuation. convert_num: Bool flag to convert numbers (selectively) to <nUm>. remove_accents: Bool flag to remove accents, e.g. Néel -> Neel. make_phrases: Bool flag to convert single tokens to common materials science phrases. Returns: A list of strings """ if not isinstance(tokens, str): tokens = " ".join(tok for tok in tokens) tokens = tokens.lower() tokens = self.replace_from_lookup(tokens, self.lookup_dict) if not isinstance(tokens, list): # If it's a string. tokens = self.tokenize(tokens, keep_sentences=False) processed = [] punct_checklist = copy.deepcopy(self.punctuation) punct_checklist.remove('.') for i, tok in enumerate(tokens): if exclude_punct and tok in punct_checklist: # Punctuation. continue elif convert_num and self.is_number(tok): # Number. # Replace all numbers with <NUM>, except if it is a crystal direction (e.g. "(111)"). try: if tokens[i - 1] == "(" and tokens[i + 1] == ")" \ or tokens[i - 1] == "〈" and tokens[i + 1] == "〉": pass else: tok = "<NUM>" except IndexError: tok = "<NUM>" if remove_accents: tok = self.remove_accent(tok) processed.append(tok) if make_phrases: processed = self.make_phrases(processed, reps=2) while '.' in processed: eos_ind = processed.index('.') processed[eos_ind:eos_ind+1] = ['<EOS>', '<SOS>'] processed[:0] = ['<SOS>'] if processed[-1] == '<SOS>': processed = processed[:-1] return processed def make_phrases(self, sentence, reps=2): """Generates phrases from a sentence of words. Args: sentence: A list of tokens (strings). reps: How many times to combine the words. Returns: A list of strings where the strings in the original list are combined to form phrases, separated from each other with an underscore "_". """ while reps > 0: sentence = self.phraser[sentence] reps -= 1 return sentence def is_number(self, s): """Determines if the supplied string is number. Args: s: The input string. Returns: True if the supplied string is a number (both . and , are acceptable), False otherwise. """ return self.NR_BASIC.match(s.replace(",", "")) is not None @staticmethod def remove_accent(txt): """Removes accents from a string. Args: txt: The input string. Returns: The de-accented string. """ # There is a problem with angstrom sometimes, so ignoring length 1 strings. return unidecode.unidecode(txt) if len(txt) > 1 else txt class SciFiTextProcessor: def __init__(self, phraser_path=os.path.join(PROJECT_HOME, 'models', 'checkpoints', 'phraser.pkl'), BERT=False): self.punctuation = list(string.punctuation) + ["\"", "“", "”", "≥", "≤", "×"] self.BERT = BERT self.NR_BASIC = regex.compile(r"^[+-]?\d*\.?\d+\(?\d*\)?+$", regex.DOTALL) # self.stop_words = ['ourselves', 'hers', 'between', 'yourself', 'but', 'again', 'there', 'about', 'once', # 'during', 'out', 'very', 'having', 'with', 'they', 'own', 'an', 'be', 'some', 'for', 'do', # 'its', 'yours', 'such', 'into', 'of', 'most', 'itself', 'other', 'off', 'is', 's', 'am', # 'or', 'who', 'as', 'from', 'him', 'each', 'the', 'themselves', 'until', 'below', 'are', 'we', # 'these', 'your', 'his', 'through', 'don', 'nor', 'me', 'were', 'her', 'more', 'himself', # 'this', 'down', 'should', 'our', 'their', 'while', 'above', 'both', 'up', 'to', 'ours', # 'had', 'she', 'all', 'no', 'when', 'at', 'any', 'before', 'them', 'same', 'and', 'been', # 'have', 'in', 'will', 'on', 'does', 'yourselves', 'then', 'that', 'because', 'what', 'over', # 'why', 'so', 'can', 'did', 'not', 'now', 'under', 'he', 'you', 'herself', 'has', 'just', # 'where', 'too', 'only', 'myself', 'which', 'those', 'i', 'after', 'few', 'whom', 't', # 'being', 'if', 'theirs', 'my', 'against', 'a', 'by', 'doing', 'it', 'how', 'further', # 'was', 'here', 'than', 'î', 'ó', 'ô', 'đ', 'š', 'ƒ', 'ƞ', 'ɤ', 'ώ', 'в', 'и', 'с', '⁶', '₁', # '₂', 'ⅴ', 'ⅻ', '↕', '≡', '⋙', '△', '☆', '✕', '-', '>', '}(n)', '}118', '}56', '~10', # '~106', '~116', '~145,000', '~195', '~210', '~28', '~29', '~30', '~300', '~31', '~32', '~49', # '~505', '~80', '~90', 'è', 'ê', 'ϕ', '℃', 'ⅸ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', # 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'] self.stop_words = [] def token_acceptable(self, text): """ clean tokens of stopwords or urls or whatever else :param text: token to clean :return: 1 if token is valid or -1 if token is invalid """ stripped = text.strip().lower() if not stripped: return False if 'www.' in stripped: return False if stripped in self.stop_words: return False return True def tokenize(self, text, keep_sentences=True): """Converts a string to a list tokens (words) using a modified chemdataextractor tokenizer. Args: text: input text as a string keep_sentences: if False, will disregard the sentence structure and return tokens as a single list of strings. Otherwise returns a list of lists, each sentence separately. Returns: A list of strings if keep_sentence is False, otherwise a list of list of strings, which each list corresponding to a single sentence. """ tokens = text.split(' ') toks = [] for word in tokens: if keep_sentences: toks.append([]) if self.token_acceptable(word): toks[-1] += [word] else: if self.token_acceptable(word): toks += [word] return toks def process_for_vocab(self, tokens, exclude_punct=False, convert_num=True, remove_accents=True, make_phrases=False): """Processes a pre-tokenized list of strings or a string. Selective lower casing, accent removal, phrasing Args: tokens: A list of strings or a string. If a string is supplied, will use the tokenize method first to split it into a list of token strings. exclude_punct: Bool flag to exclude all punctuation. convert_num: Bool flag to convert numbers (selectively) to <nUm>. remove_accents: Bool flag to remove accents, e.g. Néel -> Neel. make_phrases: Bool flag to convert single tokens to common materials science phrases. Returns: A list of strings """ if not isinstance(tokens, str): tokens = " ".join(tok for tok in tokens) tokens = tokens.lower() if not isinstance(tokens, list): # If it's a string. tokens = self.tokenize(tokens, keep_sentences=False) processed = [] for i, tok in enumerate(tokens): if exclude_punct and tok in self.punctuation: # Punctuation. continue elif convert_num and self.is_number(tok): # Number. # Replace all numbers with <NUM>, except if it is a crystal direction (e.g. "(111)"). try: if tokens[i - 1] == "(" and tokens[i + 1] == ")" \ or tokens[i - 1] == "〈" and tokens[i + 1] == "〉": pass else: tok = "<NUM>" except IndexError: tok = "<NUM>" if remove_accents: tok = self.remove_accent(tok) processed.append(tok) while '.' in processed: eos_ind = processed.index('.') processed[eos_ind:eos_ind+1] = ['<EOS>', '<SOS>'] processed[:0] = ['<SOS>'] if processed[-1] == '<SOS>': processed = processed[:-1] return processed def process_for_model(self, tokens, exclude_punct=False, convert_num=True, remove_accents=True, make_phrases=False): """Processes a pre-tokenized list of strings or a string. Selective lower casing, accent removal, phrasing Args: tokens: A list of strings or a string. If a string is supplied, will use the tokenize method first to split it into a list of token strings. exclude_punct: Bool flag to exclude all punctuation. convert_num: Bool flag to convert numbers (selectively) to <nUm>. remove_accents: Bool flag to remove accents, e.g. Néel -> Neel. make_phrases: Bool flag to convert single tokens to common materials science phrases. Returns: A list of strings """ if not isinstance(tokens, str): tokens = " ".join(tok for tok in tokens) tokens = tokens.lower() if not isinstance(tokens, list): # If it's a string. tokens = self.tokenize(tokens, keep_sentences=False) processed = [] punct_checklist = copy.deepcopy(self.punctuation) punct_checklist.remove('.') for i, tok in enumerate(tokens): if exclude_punct and tok in punct_checklist: # Punctuation. continue elif convert_num and self.is_number(tok): # Number. # Replace all numbers with <NUM>, except if it is a crystal direction (e.g. "(111)"). try: if tokens[i - 1] == "(" and tokens[i + 1] == ")" \ or tokens[i - 1] == "〈" and tokens[i + 1] == "〉": pass else: tok = "<NUM>" except IndexError: tok = "<NUM>" if remove_accents: tok = self.remove_accent(tok) processed.append(tok) while '.' in processed: eos_ind = processed.index('.') processed[eos_ind:eos_ind+1] = ['<EOS>', '<SOS>'] processed[:0] = ['<SOS>'] if processed[-1] == '<SOS>': processed = processed[:-1] return processed @staticmethod def remove_accent(txt): """Removes accents from a string. Args: txt: The input string. Returns: The de-accented string. """ # There is a problem with angstrom sometimes, so ignoring length 1 strings. return unidecode.unidecode(txt) if len(txt) > 1 else txt def is_number(self, s): """Determines if the supplied string is number. Args: s: The input string. Returns: True if the supplied string is a number (both . and , are acceptable), False otherwise. """ return self.NR_BASIC.match(s.replace(",", "")) is not None def preprocess_data_for_vocab(corpus_in, processor, lower=True): """ Pass in a set of abstracts from a pickle file, and this will preprocess them to remove numbers, punctuation, casing and replace synonyms :param corpus_in: loaded pickle file :param lower: whether or not to lowercase all tokens :return: """ num_errors = 0 all_tokens = [] for index, abstract in enumerate(corpus_in): try: tokens = processor.process_for_vocab(abstract, exclude_punct=True) except TypeError as e: print(e) num_errors += 1 continue except Exception as e: print(e) print(f"Failed on abstract with index {index} and text: {abstract}") print(f"Dying...") break all_tokens.append(tokens) logger.info(f"Completed preprocessing with {num_errors} errors") return all_tokens def preprocess_data_for_model(corpus_in, lower=True): """ Pass in a set of abstracts from a pickle file, and this will preprocess them to remove numbers, punctuation, casing and replace synonyms :param corpus_in: loaded pickle file :param lower: whether or not to lowercase all tokens :return: """ num_errors = 0 text_processor = MedicalTextProcessor(BERT=False) all_tokens = [] for index, abstract in enumerate(corpus_in): try: tokens = text_processor.process_for_model(abstract, exclude_punct=True) except TypeError as e: print(e) num_errors += 1 continue except Exception as e: print(e) print(f"Failed on abstract with index {index} and text: {abstract}") print(f"Dying...") break all_tokens.append(tokens) logger.info(f"Completed preprocessing with {num_errors} errors") return all_tokens def preprocess_single_abstract_for_model(abstract_in, lower=True): """ Pass in a set of abstracts from a pickle file, and this will preprocess them to remove numbers, punctuation, casing and replace synonyms :param abstract_in: loaded abstract file :param lower: whether or not to lowercase all tokens :return: """ text_processor = MedicalTextProcessor(BERT=False) try: tokens = text_processor.process_for_model(abstract_in, exclude_punct=True) except TypeError as e: print(e) return False except Exception as e: print(e) print(f"Dying...") return False return tokens
42.838765
144
0.530851
2,923
24,975
4.457065
0.169689
0.01658
0.010746
0.011821
0.784464
0.771185
0.766733
0.761667
0.753761
0.750691
0
0.012811
0.340541
24,975
582
145
42.912371
0.775653
0.31952
0
0.694611
0
0.056886
0.081841
0.006522
0
0
0
0
0
1
0.05988
false
0.011976
0.02994
0
0.173653
0.032934
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
111fc5e446ee2b247f4b6a8816b7d08a63261700
2,995
py
Python
ranks.py
jcorvino/wows-ranked
7bb4d95b9df1e3d4199ac4eaaa0a4ab76bee10e1
[ "MIT" ]
1
2020-07-19T13:42:15.000Z
2020-07-19T13:42:15.000Z
ranks.py
jcorvino/wows-ranked
7bb4d95b9df1e3d4199ac4eaaa0a4ab76bee10e1
[ "MIT" ]
3
2020-07-19T13:39:26.000Z
2020-07-27T00:36:02.000Z
ranks.py
jcorvino/wows-ranked
7bb4d95b9df1e3d4199ac4eaaa0a4ab76bee10e1
[ "MIT" ]
null
null
null
# Season 16 rank information # Assumes rank 18-11 give a free star same as season 15 https://worldofwarships.com/en/news/general-news/ranked-15/ # TODO: Fix rank 17 logic since stars can't be lost (see https://worldofwarships.com/en/news/general-news/ranked-15/). regular_ranks = { 18: { 'stars': 1, 'irrevocable': True, 'free-star': False }, 17: { 'stars': 2, 'irrevocable': True, 'free-star': True }, 16: { 'stars': 2, 'irrevocable': True, 'free-star': True }, 15: { 'stars': 2, 'irrevocable': True, 'free-star': True }, 14: { 'stars': 2, 'irrevocable': False, 'free-star': True }, 13: { 'stars': 2, 'irrevocable': False, 'free-star': True }, 12: { 'stars': 2, 'irrevocable': True, 'free-star': True }, 11: { 'stars': 2, 'irrevocable': False, 'free-star': True }, 10: { 'stars': 4, 'irrevocable': False, 'free-star': False }, 9: { 'stars': 4, 'irrevocable': False, 'free-star': False }, 8: { 'stars': 4, 'irrevocable': False, 'free-star': False }, 7: { 'stars': 4, 'irrevocable': False, 'free-star': False }, 6: { 'stars': 4, 'irrevocable': False, 'free-star': False }, 5: { 'stars': 5, 'irrevocable': False, 'free-star': False }, 4: { 'stars': 5, 'irrevocable': False, 'free-star': False }, 3: { 'stars': 5, 'irrevocable': False, 'free-star': False }, 2: { 'stars': 5, 'irrevocable': False, 'free-star': False }, 1: { 'stars': 1, 'irrevocable': True, 'free-star': True } } # Ranked sprint. Based on season 5. # See https://worldofwarships.com/en/news/general-news/ranked-sprint-5/ sprint_ranks = { 10: { 'stars': 1, 'irrevocable': True, 'free-star': False }, 9: { 'stars': 2, 'irrevocable': True, 'free-star': True }, 8: { 'stars': 2, 'irrevocable': True, 'free-star': True }, 7: { 'stars': 2, 'irrevocable': False, 'free-star': True }, 6: { 'stars': 2, 'irrevocable': False, 'free-star': True }, 5: { 'stars': 3, 'irrevocable': True, 'free-star': True }, 4: { 'stars': 3, 'irrevocable': False, 'free-star': True }, 3: { 'stars': 3, 'irrevocable': True, 'free-star': True }, 2: { 'stars': 3, 'irrevocable': False, 'free-star': True }, 1: { 'stars': 1, 'irrevocable': True, 'free-star': True } }
19.834437
118
0.433389
291
2,995
4.453608
0.182131
0.179012
0.157407
0.296296
0.85108
0.849537
0.849537
0.166667
0.114198
0
0
0.045632
0.4
2,995
150
119
19.966667
0.67557
0.120534
0
0.722222
0
0
0.266362
0
0
0
0
0.006667
0
1
0
false
0
0
0
0
0.006944
0
0
0
null
0
0
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
1160dee8d431e6c562a350794b047cae125212d9
63
py
Python
invtorch/nn/__init__.py
xmodar/invtorch
74b80be3b4126925e583282b6f78171b99788b37
[ "Apache-2.0" ]
14
2021-11-18T11:26:11.000Z
2022-01-20T13:29:52.000Z
invtorch/nn/__init__.py
xmodar/invtorch
74b80be3b4126925e583282b6f78171b99788b37
[ "Apache-2.0" ]
null
null
null
invtorch/nn/__init__.py
xmodar/invtorch
74b80be3b4126925e583282b6f78171b99788b37
[ "Apache-2.0" ]
null
null
null
"""Invertible Neural Network Modules""" from .modules import *
21
39
0.746032
7
63
6.714286
0.857143
0
0
0
0
0
0
0
0
0
0
0
0.126984
63
2
40
31.5
0.854545
0.52381
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
116979a115da92cf6bb0813ff555c4c6fa55743c
47
py
Python
python/testData/mover/oneStatementInFunction_afterUp.py
jnthn/intellij-community
8fa7c8a3ace62400c838e0d5926a7be106aa8557
[ "Apache-2.0" ]
2
2018-12-29T09:53:39.000Z
2018-12-29T09:53:42.000Z
python/testData/mover/oneStatementInFunction_afterUp.py
Cyril-lamirand/intellij-community
60ab6c61b82fc761dd68363eca7d9d69663cfa39
[ "Apache-2.0" ]
173
2018-07-05T13:59:39.000Z
2018-08-09T01:12:03.000Z
python/testData/mover/oneStatementInFunction_afterUp.py
Cyril-lamirand/intellij-community
60ab6c61b82fc761dd68363eca7d9d69663cfa39
[ "Apache-2.0" ]
2
2020-03-15T08:57:37.000Z
2020-04-07T04:48:14.000Z
test2() test() def foo(): pass test1()
7.833333
11
0.510638
6
47
4
1
0
0
0
0
0
0
0
0
0
0
0.060606
0.297872
47
5
12
9.4
0.666667
0
0
0
0
0
0
0
0
0
0
0
0
1
0.2
true
0.2
0
0
0.2
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
0
0
0
5
fed111cbb709ab27bfdb7f2ef11f7cf199d1112a
216
py
Python
dyftc/feedings/tests/test_models.py
sokotim/dyftc
55749a80a579a70c248521b1112a0136fb35b639
[ "MIT" ]
2
2020-04-02T14:40:49.000Z
2020-04-02T14:42:50.000Z
dyftc/feedings/tests/test_models.py
sokotim/dyftc
55749a80a579a70c248521b1112a0136fb35b639
[ "MIT" ]
null
null
null
dyftc/feedings/tests/test_models.py
sokotim/dyftc
55749a80a579a70c248521b1112a0136fb35b639
[ "MIT" ]
null
null
null
import pytest from dyftc.feedings.models import Feeding pytestmark = pytest.mark.django_db def test_feeding_get_absolute_url(feeding: Feeding): assert feeding.get_absolute_url() == f"/feedings/{feeding.pk}/"
21.6
67
0.787037
30
216
5.433333
0.633333
0.122699
0.220859
0.257669
0
0
0
0
0
0
0
0
0.111111
216
9
68
24
0.848958
0
0
0
0
0
0.106481
0.106481
0
0
0
0
0.2
1
0.2
false
0
0.4
0
0.6
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
fee8d7e15858d7d90fca1c58c3453628b54d5c5d
89
py
Python
silkyy/__init__.py
kevenli/silkyy
105069e26a1ac04f170e55d732e030e68a4fdb86
[ "Apache-2.0" ]
null
null
null
silkyy/__init__.py
kevenli/silkyy
105069e26a1ac04f170e55d732e030e68a4fdb86
[ "Apache-2.0" ]
null
null
null
silkyy/__init__.py
kevenli/silkyy
105069e26a1ac04f170e55d732e030e68a4fdb86
[ "Apache-2.0" ]
1
2021-05-07T01:28:49.000Z
2021-05-07T01:28:49.000Z
import os from pkgutil import get_data __version__ = get_data(__package__, 'VERSION')
22.25
46
0.786517
12
89
5
0.666667
0.233333
0
0
0
0
0
0
0
0
0
0
0.146067
89
4
46
22.25
0.789474
0
0
0
0
0
0.08046
0
0
0
0
0
0
1
0
false
0
0.666667
0
0.666667
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
3a07a4cc1bd59435a95e1f034c157246e9984ff2
760
py
Python
old_version/chess_helper.py
tanishbafna/wizard-chess
eee5f0c8412d134c431c867b23c5de06ed44efc8
[ "MIT" ]
null
null
null
old_version/chess_helper.py
tanishbafna/wizard-chess
eee5f0c8412d134c431c867b23c5de06ed44efc8
[ "MIT" ]
null
null
null
old_version/chess_helper.py
tanishbafna/wizard-chess
eee5f0c8412d134c431c867b23c5de06ed44efc8
[ "MIT" ]
null
null
null
import re #--------------------- def getFirstMove(file_name): moveRegex = re.compile(r'[a-h][1-8][a-h][1-8]') with open(file_name, 'r') as f: moves_in = f.read().strip().lower() while not (len(moves_in) == 4 and re.match(moveRegex, moves_in)): with open(file_name, 'r') as f: moves_in = f.read().strip().lower() return moves_in #--------------------- def getMove(lastmove, file_name): moves_in = lastmove moveRegex = re.compile(r'[a-h][1-8][a-h][1-8]') while moves_in in [lastmove, ''] or not (len(moves_in) == 4 and re.match(moveRegex, moves_in)): with open(file_name, 'r') as f: moves_in = f.read().strip().lower() return moves_in #---------------------
23.75
99
0.530263
112
760
3.455357
0.285714
0.198966
0.031008
0.041344
0.739018
0.739018
0.739018
0.739018
0.739018
0.739018
0
0.017007
0.226316
760
31
100
24.516129
0.641156
0.082895
0
0.625
0
0
0.06196
0
0
0
0
0
0
1
0.125
false
0
0.0625
0
0.3125
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
3a48600ae0f4b9713cd0099ba175645d79bb662a
2,023
py
Python
tests/unit/commands/logs/test_command.py
torresxb1/aws-sam-cli
d307f2eb6e1a91a476a5e2ca6070f974b0c913f1
[ "BSD-2-Clause", "Apache-2.0" ]
1
2021-07-10T14:19:00.000Z
2021-07-10T14:19:00.000Z
tests/unit/commands/logs/test_command.py
torresxb1/aws-sam-cli
d307f2eb6e1a91a476a5e2ca6070f974b0c913f1
[ "BSD-2-Clause", "Apache-2.0" ]
1
2021-07-29T20:56:48.000Z
2021-07-29T20:56:48.000Z
tests/unit/commands/logs/test_command.py
torresxb1/aws-sam-cli
d307f2eb6e1a91a476a5e2ca6070f974b0c913f1
[ "BSD-2-Clause", "Apache-2.0" ]
1
2021-11-26T13:12:50.000Z
2021-11-26T13:12:50.000Z
from unittest import TestCase from unittest.mock import Mock, patch from samcli.commands.logs.command import do_cli class TestLogsCliCommand(TestCase): def setUp(self): self.function_name = "name" self.stack_name = "stack name" self.filter_pattern = "filter" self.start_time = "start" self.end_time = "end" @patch("samcli.commands.logs.logs_context.LogsCommandContext") def test_without_tail(self, logs_command_context_mock): tailing = False context_mock = Mock() logs_command_context_mock.return_value.__enter__.return_value = context_mock do_cli(self.function_name, self.stack_name, self.filter_pattern, tailing, self.start_time, self.end_time) logs_command_context_mock.assert_called_with( self.function_name, stack_name=self.stack_name, filter_pattern=self.filter_pattern, start_time=self.start_time, end_time=self.end_time, ) context_mock.fetcher.load_time_period.assert_called_with( filter_pattern=context_mock.filter_pattern, start_time=context_mock.start_time, end_time=context_mock.end_time, ) @patch("samcli.commands.logs.logs_context.LogsCommandContext") def test_with_tailing(self, logs_command_context_mock): tailing = True context_mock = Mock() logs_command_context_mock.return_value.__enter__.return_value = context_mock do_cli(self.function_name, self.stack_name, self.filter_pattern, tailing, self.start_time, self.end_time) logs_command_context_mock.assert_called_with( self.function_name, stack_name=self.stack_name, filter_pattern=self.filter_pattern, start_time=self.start_time, end_time=self.end_time, ) context_mock.fetcher.tail.assert_called_with( filter_pattern=context_mock.filter_pattern, start_time=context_mock.start_time )
34.288136
113
0.694019
251
2,023
5.183267
0.167331
0.143736
0.083013
0.10146
0.780938
0.760953
0.710223
0.710223
0.710223
0.619523
0
0
0.229362
2,023
58
114
34.87931
0.834509
0
0
0.454545
0
0
0.06525
0.051409
0
0
0
0
0.090909
1
0.068182
false
0
0.068182
0
0.159091
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
28b0e61e69c28f39518a59e96af11f6d788d7b9b
166
py
Python
nomo/apps.py
guettli/nomo
f1ea89fa404ae13f74975e8ac018c2f0af17c4e1
[ "Apache-2.0" ]
1
2020-10-28T16:49:36.000Z
2020-10-28T16:49:36.000Z
nomo/apps.py
guettli/nomo
f1ea89fa404ae13f74975e8ac018c2f0af17c4e1
[ "Apache-2.0" ]
null
null
null
nomo/apps.py
guettli/nomo
f1ea89fa404ae13f74975e8ac018c2f0af17c4e1
[ "Apache-2.0" ]
null
null
null
from django.apps import AppConfig from django.db.models.signals import post_migrate from django.urls import reverse class NoMoConfig(AppConfig): name = 'nomo'
18.444444
49
0.789157
23
166
5.652174
0.695652
0.230769
0
0
0
0
0
0
0
0
0
0
0.144578
166
8
50
20.75
0.915493
0
0
0
0
0
0.024242
0
0
0
0
0
0
1
0
false
0
0.6
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
28cf1c49eb624cc07fa193b92dd0befd5b59be92
138
py
Python
display4D/fieldsGUI/__init__.py
seVenVo1d/General-Relativity-Tensorial-Calculations
6c07823f74840352253c235af2e4dbe60044941a
[ "MIT" ]
1
2021-06-16T07:29:30.000Z
2021-06-16T07:29:30.000Z
display4D/fieldsGUI/__init__.py
seVenVo1d/General-Relativity-Tensorial-Calculations
6c07823f74840352253c235af2e4dbe60044941a
[ "MIT" ]
null
null
null
display4D/fieldsGUI/__init__.py
seVenVo1d/General-Relativity-Tensorial-Calculations
6c07823f74840352253c235af2e4dbe60044941a
[ "MIT" ]
1
2021-12-02T15:11:06.000Z
2021-12-02T15:11:06.000Z
from .scalarfieldGUI import scalarfield_gui4d from .vectorfieldGUI import vectorfield_gui4d from .tensorfieldGUI import tensorfield_gui4d
34.5
45
0.891304
15
138
8
0.6
0.15
0
0
0
0
0
0
0
0
0
0.02381
0.086957
138
3
46
46
0.928571
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
28f4c047d92354ea0d049625ee69addad01cc0f4
43
py
Python
haiku_trainer/__init__.py
NTT123/haiku_trainer
b065249faf0dd4b1808a662f1c75d4e1fe35d76f
[ "MIT" ]
null
null
null
haiku_trainer/__init__.py
NTT123/haiku_trainer
b065249faf0dd4b1808a662f1c75d4e1fe35d76f
[ "MIT" ]
null
null
null
haiku_trainer/__init__.py
NTT123/haiku_trainer
b065249faf0dd4b1808a662f1c75d4e1fe35d76f
[ "MIT" ]
null
null
null
from .trainer import Trainer, TrainerState
21.5
42
0.837209
5
43
7.2
0.8
0
0
0
0
0
0
0
0
0
0
0
0.116279
43
1
43
43
0.947368
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
e929bf304de6b18e0fbaf2653938e78ec43fb12a
155
py
Python
whyis/blueprint/sparql/__init__.py
tolulomo/whyis
eb50ab3301eb7efd27a1a3f6fb2305dedd910397
[ "Apache-2.0" ]
31
2018-05-30T02:41:23.000Z
2021-10-17T01:25:20.000Z
whyis/blueprint/sparql/__init__.py
tolulomo/whyis
eb50ab3301eb7efd27a1a3f6fb2305dedd910397
[ "Apache-2.0" ]
115
2018-04-07T00:59:11.000Z
2022-03-02T03:06:45.000Z
whyis/blueprint/sparql/__init__.py
tolulomo/whyis
eb50ab3301eb7efd27a1a3f6fb2305dedd910397
[ "Apache-2.0" ]
25
2018-04-07T00:49:55.000Z
2021-09-28T14:29:18.000Z
from .sparql_blueprint import sparql_blueprint from .sparql_form import sparql_form as __sparql_form from .sparql_view import sparql_view as __sparql_view
38.75
53
0.877419
24
155
5.166667
0.291667
0.241935
0
0
0
0
0
0
0
0
0
0
0.103226
155
3
54
51.666667
0.892086
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0.333333
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
e93c8185a5a900a7fdca427b6da07e97dc098008
165
py
Python
Task00/hello.py
apilatau/pythonSandBox
cb7a78dbf383190bcb08788685ea8d468f13742b
[ "MIT" ]
null
null
null
Task00/hello.py
apilatau/pythonSandBox
cb7a78dbf383190bcb08788685ea8d468f13742b
[ "MIT" ]
1
2021-10-01T11:50:40.000Z
2021-10-01T11:50:40.000Z
Task00/hello.py
apilatau/pythonSandBox
cb7a78dbf383190bcb08788685ea8d468f13742b
[ "MIT" ]
null
null
null
import os,sys sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) def main(): print("Hello world") if __name__=="__main__": main()
20.625
76
0.69697
25
165
4.12
0.6
0.174757
0.252427
0.291262
0.31068
0
0
0
0
0
0
0
0.115152
165
8
77
20.625
0.705479
0
0
0
0
0
0.114458
0
0
0
0
0
0
1
0.166667
true
0
0.166667
0
0.333333
0.166667
1
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
3a6f991d2aee352ff15d2b5873a65967c1048365
177
py
Python
Beecrowd/Python/1097 - Sequence IJ 3.py
nazmul629/OJ-Problem_Solution
cf5e01ab8cf062441bfe901e12d98cbaa1d727f9
[ "MIT" ]
null
null
null
Beecrowd/Python/1097 - Sequence IJ 3.py
nazmul629/OJ-Problem_Solution
cf5e01ab8cf062441bfe901e12d98cbaa1d727f9
[ "MIT" ]
null
null
null
Beecrowd/Python/1097 - Sequence IJ 3.py
nazmul629/OJ-Problem_Solution
cf5e01ab8cf062441bfe901e12d98cbaa1d727f9
[ "MIT" ]
null
null
null
j= 7 for i in range(9+1): if i%2 ==1: print(f"I={i} J={j}") print(f"I={i} J={j-1}") print(f"I={i} J={j-2}") j =j+2
16.090909
31
0.310734
35
177
1.571429
0.342857
0.145455
0.381818
0.436364
0.581818
0.581818
0.4
0
0
0
0
0.081633
0.446328
177
11
32
16.090909
0.479592
0
0
0
0
0
0.207865
0
0
0
0
0
0
1
0
false
0
0
0
0
0.428571
0
0
1
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
5
3a87c63ee57a93781ff4ef532719ad7d4bcf84e6
62
py
Python
enthought/pyface/ui/qt4/action/action_item.py
enthought/etsproxy
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
[ "BSD-3-Clause" ]
3
2016-12-09T06:05:18.000Z
2018-03-01T13:00:29.000Z
enthought/pyface/ui/qt4/action/action_item.py
enthought/etsproxy
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
[ "BSD-3-Clause" ]
1
2020-12-02T00:51:32.000Z
2020-12-02T08:48:55.000Z
enthought/pyface/ui/qt4/action/action_item.py
enthought/etsproxy
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
[ "BSD-3-Clause" ]
null
null
null
# proxy module from pyface.ui.qt4.action.action_item import *
20.666667
46
0.790323
10
62
4.8
0.9
0
0
0
0
0
0
0
0
0
0
0.018182
0.112903
62
2
47
31
0.854545
0.193548
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
3aa3173d8d2299381880afe37010325c7bc9fd95
91
py
Python
tests/data-files/plugins/noop_utils.py
carderne/raster-vision
915fbcd3263d8f2193e65c2cd0eb53e050a47a01
[ "Apache-2.0" ]
4
2019-03-11T12:38:15.000Z
2021-04-06T14:57:52.000Z
tests/data-files/plugins/noop_utils.py
carderne/raster-vision
915fbcd3263d8f2193e65c2cd0eb53e050a47a01
[ "Apache-2.0" ]
null
null
null
tests/data-files/plugins/noop_utils.py
carderne/raster-vision
915fbcd3263d8f2193e65c2cd0eb53e050a47a01
[ "Apache-2.0" ]
1
2021-02-25T18:23:27.000Z
2021-02-25T18:23:27.000Z
# Used to test import statements for the NoopAugmentor plugin. def noop(x): return x
15.166667
62
0.725275
14
91
4.714286
0.928571
0
0
0
0
0
0
0
0
0
0
0
0.21978
91
5
63
18.2
0.929577
0.659341
0
0
0
0
0
0
0
0
0
0
0
1
0.5
false
0
0
0.5
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
3ac0fca8c6c1d1c78bc7aa2775006d8a06e9dde9
187
py
Python
nosql_schema/db/__init__.py
steven266/nosql_schema
a00e5049cadf1f4c83c7e151677e680c36383b6e
[ "MIT" ]
null
null
null
nosql_schema/db/__init__.py
steven266/nosql_schema
a00e5049cadf1f4c83c7e151677e680c36383b6e
[ "MIT" ]
2
2017-03-30T16:40:32.000Z
2018-01-09T11:24:36.000Z
nosql_schema/db/__init__.py
steven266/nosql_schema
a00e5049cadf1f4c83c7e151677e680c36383b6e
[ "MIT" ]
null
null
null
from .abstract_collection_handler import AbstractCollectionHandler from .abstract_database_handler import AbstractDatabaseHandler from .database import create_handler, get_default_handler
62.333333
66
0.914439
20
187
8.2
0.55
0.146341
0
0
0
0
0
0
0
0
0
0
0.064171
187
3
67
62.333333
0.937143
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
aaf1a7c562f43ebdb9336decabf1fffca19f3678
149
py
Python
explainn/jaspar/__init__.py
oriolfornes/JASPAR-motif-discovery
4c3fe91e9345df0aa06a52e94a9bb02ff2748965
[ "MIT" ]
null
null
null
explainn/jaspar/__init__.py
oriolfornes/JASPAR-motif-discovery
4c3fe91e9345df0aa06a52e94a9bb02ff2748965
[ "MIT" ]
null
null
null
explainn/jaspar/__init__.py
oriolfornes/JASPAR-motif-discovery
4c3fe91e9345df0aa06a52e94a9bb02ff2748965
[ "MIT" ]
null
null
null
from .jaspar2logo import get_figure, _get_figure from .jaspar2others import reformat_motif __all__ = ["get_figure", "reformat_motif", "_get_figure"]
37.25
57
0.812081
19
149
5.736842
0.473684
0.330275
0
0
0
0
0
0
0
0
0
0.014815
0.09396
149
4
57
37.25
0.792593
0
0
0
0
0
0.233333
0
0
0
0
0
0
1
0
false
0
0.666667
0
0.666667
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
c94644fbd1f1f0f0e01c4abe7b0819f3fd606b25
98
py
Python
tilenol/gadgets/__init__.py
paulie-g/tilenol
6e52c558d1c698ef8320e6ef32771af2864c1c49
[ "MIT" ]
42
2015-01-19T15:43:16.000Z
2021-09-19T15:20:58.000Z
tilenol/gadgets/__init__.py
paulie-g/tilenol
6e52c558d1c698ef8320e6ef32771af2864c1c49
[ "MIT" ]
2
2015-05-30T03:15:17.000Z
2019-02-28T16:37:02.000Z
tilenol/gadgets/__init__.py
tailhook/tilenol
3b71f6600d437a4e5f167315683e7f0137cd3788
[ "MIT" ]
11
2015-10-04T06:01:02.000Z
2022-03-26T18:46:46.000Z
from .menu import SelectExecutable, SelectLayout, FindWindow, RenameWindow from .tabs import Tabs
32.666667
74
0.836735
11
98
7.454545
0.727273
0
0
0
0
0
0
0
0
0
0
0
0.112245
98
2
75
49
0.942529
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
a3602e4a3313cb0727e95c3f3e7617e7ca4cacb1
194
py
Python
phantom/predicates/base.py
sobolevn/phantom-types
f9a5fe6ad2cb340a812d644f1c0eb8953deb7bce
[ "MIT" ]
null
null
null
phantom/predicates/base.py
sobolevn/phantom-types
f9a5fe6ad2cb340a812d644f1c0eb8953deb7bce
[ "MIT" ]
null
null
null
phantom/predicates/base.py
sobolevn/phantom-types
f9a5fe6ad2cb340a812d644f1c0eb8953deb7bce
[ "MIT" ]
null
null
null
from typing import Protocol from typing import TypeVar T = TypeVar("T", bound=object, contravariant=True) class Predicate(Protocol[T]): def __call__(self, arg: T, /) -> bool: ...
19.4
50
0.670103
25
194
5.04
0.68
0.15873
0.253968
0
0
0
0
0
0
0
0
0
0.195876
194
9
51
21.555556
0.807692
0
0
0
0
0
0.005155
0
0
0
0
0
0
1
0.166667
false
0
0.333333
0
0.666667
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
a36090c3faeceff694283fa09952f8ba4eba08ca
112
py
Python
newsltd_etl/projects/tribune/datasets/date_dim/__init__.py
telia-oss/birgitta-example-etl
8bb32aac94486b4edc1fee3964cf7d2dcf095020
[ "MIT" ]
8
2019-11-25T16:39:33.000Z
2022-03-31T12:48:54.000Z
newsltd_etl/projects/tribune/datasets/date_dim/__init__.py
telia-oss/birgitta-example-etl
8bb32aac94486b4edc1fee3964cf7d2dcf095020
[ "MIT" ]
218
2019-09-09T11:11:59.000Z
2022-03-08T05:16:40.000Z
newsltd_etl/projects/tribune/datasets/date_dim/__init__.py
telia-oss/birgitta-example-etl
8bb32aac94486b4edc1fee3964cf7d2dcf095020
[ "MIT" ]
4
2020-07-21T15:33:40.000Z
2021-12-22T11:32:45.000Z
from birgitta.dataset.dataset import Dataset from .schema import schema dataset = Dataset("date_dim", schema)
18.666667
44
0.794643
15
112
5.866667
0.466667
0.318182
0
0
0
0
0
0
0
0
0
0
0.125
112
5
45
22.4
0.897959
0
0
0
0
0
0.071429
0
0
0
0
0
0
1
0
false
0
0.666667
0
0.666667
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
6e6cad59faeaa5656162b1788675128391265252
147
py
Python
cpcctool/__init__.py
l2m2/cpcc-tool
48404e1c228f06edfde697069641d722823955f3
[ "MIT" ]
1
2021-06-15T10:16:01.000Z
2021-06-15T10:16:01.000Z
cpcctool/__init__.py
l2m2/cpcc-tool
48404e1c228f06edfde697069641d722823955f3
[ "MIT" ]
null
null
null
cpcctool/__init__.py
l2m2/cpcc-tool
48404e1c228f06edfde697069641d722823955f3
[ "MIT" ]
null
null
null
from .cpcc_code_docx import gen_code_docx, docx_first_n_pages, docx_last_n_pages, docx_sandwich from .cpcc_count_code_lines import count_code_lines
73.5
95
0.897959
27
147
4.296296
0.481481
0.137931
0.172414
0
0
0
0
0
0
0
0
0
0.068027
147
2
96
73.5
0.846715
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
6e89e186781b9364c1c92ad2edd34e16dcb3ad96
135
py
Python
ginjinn/test_pytest.py
AGOberprieler/GinJinn_development
e2424ccad4a1ab05f9c21c26cd177aaa8d69b2c7
[ "Apache-2.0" ]
17
2021-07-24T20:55:58.000Z
2022-02-09T05:15:04.000Z
ginjinn/test_pytest.py
AGOberprieler/GinJinn_development
e2424ccad4a1ab05f9c21c26cd177aaa8d69b2c7
[ "Apache-2.0" ]
1
2021-12-21T06:33:56.000Z
2022-02-05T13:57:53.000Z
ginjinn/test_pytest.py
AGOberprieler/GinJinn2
527feac125f476165e332277823c11016565f99d
[ "Apache-2.0" ]
null
null
null
''' A simple test ''' def test_pytest(): '''test_pytest Summary ------- Dummy test function. ''' assert True
11.25
24
0.518519
14
135
4.857143
0.714286
0.294118
0
0
0
0
0
0
0
0
0
0
0.311111
135
11
25
12.272727
0.731183
0.466667
0
0
0
0
0
0
0
0
0
0
0.5
1
0.5
true
0
0
0
0.5
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
1
1
0
0
0
0
0
0
5
6e91ebf8d5c3c61871a37cb00f10482f74464930
261
py
Python
salesforce/exceptions.py
lycanthropystudios/salesforce-python
f3cc49cd08380536aa45f46ae8536e5f36780476
[ "MIT" ]
5
2018-03-16T19:59:31.000Z
2020-04-28T13:47:34.000Z
salesforce/exceptions.py
lycanthropystudios/salesforce-python
f3cc49cd08380536aa45f46ae8536e5f36780476
[ "MIT" ]
null
null
null
salesforce/exceptions.py
lycanthropystudios/salesforce-python
f3cc49cd08380536aa45f46ae8536e5f36780476
[ "MIT" ]
3
2020-03-31T20:11:49.000Z
2022-02-25T14:26:22.000Z
class BaseError(Exception): pass class UnknownError(BaseError): pass class AccessTokenRequired(BaseError): pass class BadOAuthTokenError(BaseError): pass class BadRequestError(BaseError): pass class TokenError(BaseError): pass
11.347826
37
0.731801
24
261
7.958333
0.375
0.235602
0.376963
0
0
0
0
0
0
0
0
0
0.199234
261
22
38
11.863636
0.913876
0
0
0.5
0
0
0
0
0
0
0
0
0
1
0
true
0.5
0
0
0.5
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
0
0
0
5
6ec48974a2ec9dbc59f22ac4822bb757a1da3a40
22,205
py
Python
weight.py
ggoh29/Checkers-AI
b3543a8464aff983c6cecb442fe8bab036426579
[ "MIT" ]
1
2021-03-20T15:09:08.000Z
2021-03-20T15:09:08.000Z
weight.py
ggoh29/Checkers-AI
b3543a8464aff983c6cecb442fe8bab036426579
[ "MIT" ]
null
null
null
weight.py
ggoh29/Checkers-AI
b3543a8464aff983c6cecb442fe8bab036426579
[ "MIT" ]
null
null
null
weights = [[[-233.10916985130171, -132.02809178499916, -133.97809820826203, -6.439078484448004, 91.78401106281984, 203.35283098941278, 221.1208647487615, 186.57900165803895, 142.15480658486774, 94.13831558666118, 76.33931569006049, 65.23096543325813, 56.55695882555967, 20.29009953465111, 14.078977818934154, 3.1704887735561105, 13.2862161364332, 6.663354377121673, -17.521523206193113, 5.540600922674115, -5.639476693265324, -4.8090600050858265, -2.6484576147532857, -0.6965175004570358, 0.058453590147003354, 0.08494166129179126, 0.5822403611772298, -0.3732670319266005, 0.2351893694803331, -0.002116054736276407, -0.40820020357435927, 0.1395627588740911, 59.190274067208705], [-233.16787359719103, -133.09129004235572, -133.90718891224336, -6.164164595570928, 90.18398489757116, 203.822591748199, 220.224142834925, 186.94096818319505, 142.14337004368065, 95.2954964522241, 75.52273270716913, 64.3136355212682, 57.00315591344283, 20.10908931492667, 13.39681707262878, 3.234193816442745, 11.610688497355241, 7.76424310204873, -18.667387028279833, 5.724472017698351, -4.089840152029699, -4.737287527721938, -1.8204752805941018, -0.6367966163163106, -0.3514423207091917, -0.8360637295741231, -0.9877634747955004, -0.5417953403377151, 0.08029479644601611, 0.3764254171911583, -0.355064599759203, -0.36333364433088033, 60.9269976247728], [-232.80205296337004, -132.0188423805417, -134.8298283012035, -5.815301072926971, 92.08268831112936, 203.98748247885786, 221.0814835487129, 187.79214465157455, 143.62281332579954, 95.06051975610535, 75.4141482418702, 63.9271645976061, 56.563320861597674, 21.216115009780445, 14.164516228539867, 4.38246753333746, 12.8829348865244, 6.773418890262926, -17.487325436075235, 5.794796770492681, -5.246976877744146, -5.261450785920299, -2.6646492451493846, -0.14059356372545387, 0.8801642300934731, 0.16368443370543218, 0.0735031203947818, 0.6857682341322378, 0.8631643069554003, 0.9175470238278542, -0.17578558376016562, 0.320033548995067, 59.75792455965109], [-231.954917814056, -132.0053873595635, -134.02395739695535, -6.334891177867392, 91.10302200426594, 202.85203560597557, 220.2014037478839, 186.82368824031454, 143.75885677518784, 95.17255778799645, 76.45653192945232, 64.59933064734412, 55.91898665148465, 20.02977529130542, 15.137841392598316, 3.5209899606931696, 11.473484615387974, 7.041999401739565, -18.181652991927702, 6.02296498511157, -5.22636648745358, -3.8156953123209427, -2.6006443389248908, 0.3529146519576236, 0.30618892262070396, 0.18055234774840612, 0.895120888179125, 0.7341901732094842, 0.5746357262984108, -0.897221676080536, -0.9297747727671575, 0.26313372097258525, 59.93102918199867], [-231.8597595923692, -133.10929488102357, -134.020438733329, -6.508223605392641, 90.26736021500518, 202.54985310675795, 221.31471912928447, 186.983427962754, 142.06703522710717, 94.74825074119377, 76.33314891404153, 63.650212441560136, 56.09471680325403, 21.01825237749394, 14.861956460915229, 3.300684767267371, 11.784863240930356, 7.5302712229979285, -18.52549279006796, 5.960733625056645, -5.224115674544993, -3.468794380761772, -2.805650794180831, -0.07859617116663282, 0.8783230953465326, 0.1722898160982107, -0.20402782231789085, 0.41387932773516267, -0.43567639733085484, 0.3753554983755596, 0.2954407999857587, -0.11489467425347888, 60.2422272292799], [-232.89230106752171, -132.99858911850507, -134.65737938687803, -6.9792735022978, 90.44800532780994, 202.56829409100482, 219.56225289541973, 186.06687243268203, 142.39266103764814, 93.97707339111447, 75.44464141087697, 63.66032625716021, 56.988222141122144, 19.76551409316881, 15.194896879723796, 4.1952173328124305, 11.85532833790084, 8.054669376630887, -18.04071743630943, 4.623109196141839, -4.668568712295471, -4.5637318613414415, -2.0152250731549506, -0.9587066896753456, 0.32866037442111895, -0.42589372736443254, -0.20246576118240878, -0.917024713431398, -0.16431183701079988, 0.3686928138385046, 0.9144676707692851, -0.8349014500294449, 60.87700251834675], [-231.97480892726338, -131.92322143551613, -134.01631863269742, -6.287406255269449, 91.57155923707562, 204.10486398862975, 220.51047896388772, 187.79489545320632, 142.43582722287792, 93.88683423925605, 76.42746435747904, 63.61686162119816, 56.99253684207629, 20.763266674028802, 14.769401974093965, 2.9410303941769915, 13.046838420311817, 6.443127059016261, -17.05854436747508, 4.364499685171414, -4.100115755902353, -3.7152542345082598, -2.0345466766968374, 0.3438943764567899, 0.734429257264464, 0.9917679149911065, 0.5068897956291052, 0.4774146411366613, -0.671641849966186, 0.6894618362015548, -0.8646534856146182, 0.7045072710282634, 60.86288232068027], [-231.9391289834192, -133.81444391265467, -135.20378392454188, -6.462163665218318, 90.72639800089726, 203.2024806072659, 219.74317351761823, 187.6779132245019, 142.36981880803017, 93.55920813286238, 75.74965253913534, 64.58816055237384, 55.684046926988366, 20.395142365648738, 15.07939935874713, 3.020552166711393, 12.696361345062645, 7.198806277966366, -17.695205040071997, 5.767487736413037, -4.6698180340756394, -4.64451208740007, -1.1683491067240006, -0.6761588433512153, 0.364120183280656, 0.729181164428081, 0.24368107043527143, 0.2238437300974565, -0.9792647247992738, -0.6306594157884564, 0.13407077641219223, -0.3816103249799121, 60.073271618258104], [-232.24735777958227, -133.04410723478742, -134.81979710776352, -5.222712135060507, 91.55923053344391, 202.23206911998307, 221.05492652877504, 186.16200137959603, 142.71412925015647, 94.84119482290723, 76.73411732427346, 63.70866624830421, 57.08982598106749, 20.699264597723545, 13.303875412270742, 3.111053307940842, 11.70942234169212, 6.322932706867692, -17.460146687720123, 5.521473276777217, -4.650754330652705, -4.479167976055895, -2.0563146855613836, -0.9501869800332177, -0.9432908577251067, 0.9491355801689527, 0.3278308001697523, -0.4537595393103524, 0.4173894319974931, 0.29547569949765373, 0.8069929238594409, 0.22879700268382197, 60.59722068765607], [-232.16232547530393, -132.2481760449397, -135.02659905893898, -5.375019275988348, 90.80707754574769, 203.72092162328755, 219.95299625587182, 187.5514311648871, 143.00847827327993, 93.94014259894146, 75.78454377184097, 63.97293265957152, 56.175625861603514, 20.692202910986662, 13.401434933596573, 3.2078130677982153, 11.861685560658138, 8.050463822185367, -17.626052534359673, 4.935285772655176, -5.43079387823135, -4.841020378999994, -1.5246956010677173, -1.298073344566385, -0.5171802266523364, 0.7785725409915663, -0.5575765278884275, -0.48471248012327783, 0.9665635986081715, -0.9584301604536691, 0.8647533083750492, 0.8397571490174294, 59.339260416726205], [-232.6516463560318, -132.00973925024653, -133.90022802295658, -6.575590351978772, 91.16023187010651, 203.45872877368876, 221.03611393314455, 187.18884955133285, 142.23664028959882, 94.07907894170691, 76.2386772231212, 64.50072602164656, 56.07332037874612, 21.16208674849551, 13.78489926143584, 3.8597633280742722, 11.955558825351801, 6.157465019740756, -17.987300448686437, 4.737489537149486, -4.936553498258474, -4.513724538106248, -1.1707266975846422, -0.2831014848185272, -0.8101211149043506, -0.40419455392257375, -0.5607667907549911, 0.47773696142935296, -0.5015663769195036, -0.9155349903560208, -0.04278173041181499, -0.06963364057532062, 60.27905808420323], [-232.1528176848866, -133.79165528952907, -134.59627276218535, -6.007961165541039, 90.89047233940549, 202.95504350333644, 220.28487910714313, 186.2024140650376, 143.12089035001472, 95.42499173770558, 75.43644124815422, 65.0736144625866, 56.12028451066975, 19.690112337148804, 14.481358090511604, 3.838792931209995, 12.622351704605286, 6.979043718787793, -17.93962063377137, 5.003627409541229, -3.965523175225973, -4.14928577671612, -2.267164325535858, -0.7258486557469308, -0.009540549582181868, 0.4467512521864354, -0.31219147054224594, -0.6841527645269949, 0.42603600475165426, -0.6190694342166492, -0.16027730628045966, 0.8326571910166629, 60.716292977421354], [-231.81377038794517, -131.97724552881252, -134.5773923259052, -5.599995080639609, 91.36502421062238, 203.595037806577, 219.36743974795183, 187.29496453191268, 142.5076855670836, 95.3851945651247, 75.94424451997203, 64.71577058441846, 57.145848996442744, 19.560573623339483, 14.889495575586702, 3.2197796324326298, 11.83093404428152, 7.030585217044809, -16.965008291689692, 6.124976903647905, -5.648538363498476, -3.7705515017289244, -1.580375166609995, -1.426464207626078, 0.40795518984054624, 0.8540185960082427, -0.569340976743395, 0.9824419543283558, -0.5478610450061152, 0.5161721315416992, -0.024384417043693984, 0.18039644620114448, 60.30015700290158], [-231.72255529644787, -132.00439072372055, -134.59639390082515, -6.580104644480388, 91.13142433991351, 203.52455718819314, 219.8692476528666, 186.84093164274427, 142.95352093814034, 94.95934984138546, 75.92497462953683, 65.28578673924765, 56.67815974116934, 20.96318086880223, 13.431196050112163, 2.809528346764319, 13.178140652724505, 6.383085483898295, -17.775518269427245, 5.81391607339621, -5.015734404436914, -3.9146369990616865, -1.7603806528672947, -1.3293300356691036, -0.8567881364939753, -0.14154227908744743, -0.17384480200652352, -0.5659738866285309, 0.3481625426608552, -0.963923083007882, 0.2261496256919282, 0.3665419128139955, 60.949002377544346], [-232.5710878817451, -133.84887517632387, -134.41512869923656, -5.160853036902851, 91.50340973261125, 202.7776936021117, 219.95180640719227, 186.64442827303458, 143.55117371423896, 95.51464245244675, 77.24606653978383, 65.30792185491957, 55.2455076955429, 21.26530054302859, 13.98048405820591, 4.039621415512451, 12.221188067573108, 7.706672456602185, -18.139808017600938, 4.592795090832404, -4.974658445696455, -5.080768396010687, -2.002903082613226, -0.2991105101510782, 0.974203173416563, 0.6637535535016741, 0.4298599660772189, -0.09940924610282664, -0.9491341341030972, -0.7461389989598062, 0.16719542719057157, 0.1784311937853369, 60.037210896801], [-232.57486211780906, -132.4500286644238, -135.3123259634692, -5.936829636531599, 91.01308498564478, 203.70872328781792, 220.2435376988547, 186.7857021677109, 142.46353880903078, 93.80942158804326, 77.22463427422423, 63.94045296185665, 55.526442774125016, 20.363519171584933, 14.136914644531982, 3.491390420264243, 12.938743310582582, 8.003797470868124, -18.594704756253233, 4.79318418855792, -4.685270955364445, -4.077545240583116, -2.1155149130212356, -0.49784440785222217, -0.5928265516319331, 0.27711323945355537, -0.15369343859115414, -0.9562074235432754, 0.19057366977390378, 0.4441923763649014, -0.7715180044565937, -0.5812946248850306, 60.76522476116767], [-232.48558143498462, -133.17685786285858, -134.63870520571416, -5.9416415510111955, 90.7214349376741, 202.38557012953996, 219.69937514963948, 186.80126951504283, 143.77357576149075, 94.01871136011357, 75.4446535667474, 63.79561310107341, 56.470857964182784, 19.77721493444961, 14.359504211063811, 3.1748837461851775, 12.493482789837703, 7.687321248835975, -17.511273446131487, 4.5638899360086, -5.71910599190446, -4.762698417107377, -1.7058599698451635, -0.6644072869468365, -0.90842855978706, -0.755277218359337, -0.18424423373123444, 0.43430527217642534, -0.328452557611997, 0.7881469227942393, 0.5676583785314133, -0.03365460112965413, 59.48422175103757], [-233.05525510048443, -133.15868876337782, -135.0708135886053, -6.350260900068003, 90.85767462945279, 204.0663586932144, 219.81196424973172, 186.07997339433905, 143.70379651370183, 93.6441978631294, 75.96385941382816, 64.96721235100846, 55.60674054480123, 20.462993881861152, 13.756336701977311, 3.9756686431782664, 11.692035001037798, 6.718029395623615, -18.279888868431698, 4.436342248627565, -4.222298000929883, -3.56181758199416, -1.2437463567796978, -1.3080291913271627, 0.6129484259563547, 0.867777135926576, 0.22224002917078933, 0.6300084912199664, 0.049824565073450566, -0.08253504571869197, -0.9532353784224517, 0.1810192126043053, 61.06895150661504], [-231.8155576287429, -133.79341044758124, -134.40863409926695, -5.034551374953913, 92.07771312949961, 203.15159269846626, 219.66754332254854, 187.62209385484957, 143.05321878395196, 94.31371801820147, 76.1196483196131, 64.98567797697066, 57.046605617593414, 19.45962098634421, 14.694844162695869, 3.0130563699151875, 13.240053958935732, 7.166622426992684, -17.6660171447017, 5.250079561826078, -5.948033872273418, -5.195607172017671, -2.320110090798635, -0.9606082997163488, -0.7402613915381357, 0.12783410796479489, -0.4677402246524762, 0.7147102487313499, 0.5211450298626645, 0.5911397298637051, -0.45653637966299043, -0.46525902701821154, 60.73681308070019], [-231.58297216884574, -132.23194856912906, -134.51885994532344, -6.221003766110028, 90.3704478674923, 203.02274977590466, 220.39550821515562, 186.13057934927693, 143.5752863836879, 95.24160402210757, 75.97094136941908, 64.3108630012821, 56.476507987244545, 20.989651426404183, 14.778611866388646, 2.881713619655113, 12.201828880188076, 6.2473407061705535, -17.201269628194108, 5.858008955345509, -5.769983248551941, -5.173797985137102, -1.5348720940187102, 0.0774550013913317, 0.510764088574768, 0.06300689393685488, -0.14324367639080382, 0.8072185451994769, -0.2281146240389884, 0.5112434458452098, 0.6131604669966462, 0.48147693914556133, 61.16783543352658]], [[-903.1277531518255, -2990.68399585911, -777.9457339937718, -777.1585083047183, -1812.8156204094826, -2166.608986547458, -834.2317297515583, -2918.9920182358355, -3003.8635996522703, -915.5422780440715, -1390.4950463136297, -859.9010528276904, -1771.3340442007375, -797.966909353001, -2788.8590203756435, -2530.4813272662427, -3002.0625282174833, -2735.0576764013285, -3002.204561287641, 0.023887298609412744, 826.3534037420492], [-902.6917790015752, -2991.1685734525418, -776.7938873716453, -777.7651477893429, -1812.6653961424463, -2164.840710704428, -834.1893274510336, -2917.759669749714, -3003.600050973843, -916.6264465470545, -1391.004320669632, -860.507248927163, -1772.1273112076246, -796.6612657420641, -2788.837835710327, -2530.152554720534, -3001.6614016990693, -2733.150433047165, -3003.093567630315, -0.720985456710963, 826.1207878598701], [-902.8413187855907, -2990.5406087524943, -778.0893305696625, -777.7301028712128, -1812.8952400471462, -2165.7629043029624, -834.7790709730463, -2919.1373367457986, -3005.095531142112, -916.9074812366739, -1389.2793015702653, -860.3976181542555, -1772.321622613335, -798.1702770649098, -2789.406389727169, -2531.263903528873, -3002.626757725746, -2734.744841691187, -3002.8156823386726, -0.6238548298438886, 826.7527970903953], [-903.6734910487983, -2991.259552033195, -777.0786998987983, -777.881832159402, -1814.0140081255613, -2165.37266448738, -835.1035034796997, -2919.4198132962265, -3004.89948490781, -916.6026054406171, -1390.3788886687744, -860.806928244454, -1770.9714035320658, -796.6083985155353, -2788.3446437304215, -2529.516866153162, -3002.956177804002, -2734.9591228981426, -3002.2122056870885, -0.7602396103312854, 825.9244553905545], [-902.6995602348275, -2991.117806248337, -776.2921815965515, -777.7404127860627, -1813.9670339052504, -2166.3536395107662, -835.2048659777806, -2917.6268258298583, -3003.9296014016923, -915.5081448855645, -1391.1409707504877, -860.6205167573578, -1771.2737996195033, -797.5275589853857, -2788.787710959876, -2531.2634504272014, -3001.893647617647, -2733.746851072249, -3001.2924379224237, 0.1845395475204492, 826.1445049368526], [-901.8771969325956, -2990.5616769788408, -778.0139370678461, -777.4283709665779, -1813.3886881149597, -2165.235097541457, -834.2758114218456, -2917.807927580935, -3005.2946087229343, -915.4480968263124, -1390.3720643063948, -859.4502735226863, -1772.010037129527, -797.77992663031, -2789.1593778239558, -2529.913731678135, -3003.2757221467778, -2734.67752563355, -3001.693952473216, -0.45153890899802085, 825.4210014682595], [-902.1444657993007, -2991.6384573520754, -778.0087112439663, -778.0297766841286, -1812.626673740523, -2166.4154659079254, -835.0161067651891, -2919.406314139391, -3004.045448696704, -915.5081513799547, -1390.9007459843097, -860.6310530334746, -1771.4185638916665, -796.5756158168781, -2788.4906299368718, -2529.9827696414077, -3002.023734853292, -2734.354586532639, -3002.5233368341856, 0.6601379398106606, 826.8586194870044], [-903.174816943633, -2990.7207608526783, -777.341808878863, -778.6450050456996, -1812.527844378999, -2165.89183341944, -835.4903545250468, -2917.8642602349637, -3003.7071774432015, -915.1269667585669, -1390.3275592653733, -859.588735916691, -1771.241343519036, -797.1151473767877, -2788.629304049787, -2531.387377960357, -3001.904128555147, -2733.126916418797, -3002.4850994853246, 0.5985474307907135, 825.4919523752753], [-902.4973568580643, -2990.4730012174055, -777.078298231686, -776.9104856095545, -1813.3839429846967, -2165.9465447460025, -835.562200214589, -2917.593490540285, -3004.330429401575, -916.9301976328902, -1390.1737287415622, -859.2886006401807, -1770.5785320024665, -796.9852871309677, -2788.693237720537, -2529.7002242090857, -3001.937366767393, -2733.7104220837628, -3002.0317456783587, -0.7559295720822683, 825.5640921038433], [-903.0368977764542, -2990.9700198780297, -776.6774368829306, -778.5093720484849, -1812.3144514012229, -2165.061303423271, -834.9374853988182, -2918.2585859269816, -3004.781899573351, -915.5415489241327, -1390.5665781624311, -860.6239819778241, -1771.1730146680125, -797.6696894372561, -2788.2641066519623, -2531.413359782658, -3002.9766083677086, -2733.872877282008, -3002.6356922682426, -0.29984508683805466, 826.644693173932], [-902.8327071188238, -2990.310629098028, -776.6264120825268, -778.78885778984, -1812.8369864424312, -2165.215347801856, -835.3080581873501, -2918.5355633801955, -3004.787151858761, -915.1195447898325, -1390.3158537133754, -861.0894932214401, -1771.8073034925105, -797.6963517481382, -2788.058413502732, -2529.500363869317, -3002.6704484107577, -2734.203548283407, -3002.70268599713, -0.48972530909854317, 825.406684490541], [-902.1322700326759, -2991.1241486182375, -777.8041340423388, -777.4402952662562, -1813.359793403953, -2166.720928755894, -835.2779588460196, -2917.748604520745, -3004.0194193895945, -916.9598958048877, -1390.9334161760833, -859.927090116274, -1770.6512072629084, -796.9080536621399, -2787.6482963672875, -2531.2567090544753, -3003.3177812383237, -2734.8124859564405, -3001.520571183128, 0.04449843107655038, 825.9715153872586], [-901.9095904809401, -2990.748620782321, -777.3931955500702, -777.6831383289871, -1813.5879985808847, -2165.069003190135, -834.3970870582275, -2917.598033191443, -3005.230430973756, -916.3713494436054, -1390.086015521689, -860.4404435801824, -1772.4353020689496, -798.046015733266, -2788.4600735828, -2530.318644662441, -3002.6005560817152, -2733.4145332463913, -3002.2357496704008, -0.3109484528344273, 826.5486413313223], [-902.3941931516991, -2991.911315598101, -777.2811374515401, -777.9314246412637, -1814.0227654201171, -2165.165267140742, -834.2509812081247, -2919.2607732070996, -3004.0498848809557, -915.4614692018584, -1389.8628369627086, -860.9255122518224, -1771.0568053504599, -797.9793972319287, -2787.8224597213098, -2531.4590113149966, -3003.1105021222515, -2734.0669859404134, -3002.231106952699, -0.487807700858238, 825.7376410619074], [-903.0512732159887, -2990.376508973811, -776.4669036412055, -777.0437230857308, -1812.3805920380369, -2166.6043999067815, -834.7611514618567, -2918.597870405584, -3003.586821390666, -916.2891204893411, -1390.1240966187454, -860.6310909217774, -1772.1089083458105, -798.092027723811, -2788.482902371498, -2530.8826650663777, -3002.8200379308714, -2733.179685357399, -3001.307316854071, 0.48699133409162876, 826.5314300760122], [-902.8468737596454, -2990.533240593817, -776.2518882206263, -778.1352578663199, -1813.69980937337, -2165.1103455548614, -835.824049345663, -2917.9923676150047, -3004.7632366073594, -915.3886996768931, -1390.192968046708, -860.5103657882618, -1771.142501941249, -797.2452572444447, -2788.153698155081, -2529.559557027152, -3003.5097823836554, -2733.220300086705, -3003.177914000658, 0.7751415094340857, 825.48785871025], [-901.99329286305, -2990.4638839378877, -777.655446285665, -777.941715525546, -1812.2368135949123, -2164.938570483982, -835.1929183902336, -2917.85549361474, -3004.8341249253986, -915.247094826252, -1390.3133149946866, -859.747335337158, -1771.537757498077, -797.683279003666, -2788.115771855312, -2531.2210232618827, -3002.5734000663983, -2733.1939524070476, -3001.6268439192736, 0.8870111380983536, 825.1842377011728], [-902.0763103911371, -2991.280102278835, -777.7416877262804, -777.4332993395869, -1813.8491346793353, -2164.746972900244, -835.8781683811508, -2918.1153707839653, -3003.6441658897334, -915.5023064897504, -1390.9344685172305, -859.5225032294134, -1771.1631917750497, -797.3559726688833, -2787.57273328797, -2531.445134856756, -3002.0730157596645, -2733.9972722786156, -3002.590429851526, 0.5858724163946829, 826.323894469941], [-902.3923986632725, -2990.051138807298, -778.0570398326249, -778.5180685654692, -1812.4813142793423, -2164.842994009234, -835.0598963672765, -2918.7717371485696, -3003.6661290358993, -915.177272534355, -1391.052568181105, -860.631801850075, -1771.388474204241, -797.7906413501161, -2787.820735973969, -2529.505333758031, -3002.317033699569, -2733.2973256704813, -3001.5429976653395, 0.7098412265753469, 825.9035801327841], [-901.7877019766174, -2991.4573723954877, -777.8867556132162, -778.0358392732355, -1813.0136154215847, -2166.2369765789626, -834.4466517669853, -2918.356291694467, -3004.3056431300106, -917.0826089102958, -1390.8456986945096, -860.3588134990972, -1770.6092347455196, -797.5674828247488, -2789.2639467074537, -2531.3448476047633, -3003.053598077848, -2734.8248548772403, -3001.5423190994406, 0.9191770178517782, 826.6678574457168]], [[-0.687144045711034, 0.05153153779929576, -0.8609233822024864, 0.2994344270624978, -0.2613752129147173, -0.3597841548120122, -0.22937513061758108, 0.3575231308549849, -0.1312555351834208, 0.4154247927948076, 0.942424910446394, -0.2538053374934961, -0.5335339257786111, -0.5141713950775648, -0.2333413875822694, 0.37446909502764303, -0.5360084286027411, 0.9620481217729815, 0.42695696162997465, 0.18614173090030062, -13.736356440639007]]]
7,401.666667
13,234
0.817699
2,203
22,205
8.241943
0.548343
0
0
0
0
0
0
0
0
0
0
0.86023
0.049809
22,205
3
13,235
7,401.666667
0.000332
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
1
1
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
6ec9ef49ca3ed6c0ee3215a82bab7fbf67cab6e0
3,258
py
Python
user_activities/serializers.py
chopdgd/django-user-activities
fbd950e3d72675f5bedf26a95541907f89c6759a
[ "MIT" ]
1
2018-03-07T09:35:47.000Z
2018-03-07T09:35:47.000Z
user_activities/serializers.py
chopdgd/django-user-activities
fbd950e3d72675f5bedf26a95541907f89c6759a
[ "MIT" ]
200
2017-12-29T15:02:16.000Z
2022-03-28T14:48:19.000Z
user_activities/serializers.py
chopdgd/django-user-activities
fbd950e3d72675f5bedf26a95541907f89c6759a
[ "MIT" ]
1
2021-05-26T12:00:13.000Z
2021-05-26T12:00:13.000Z
# -*- coding: utf-8 -*- from django.contrib.auth import get_user_model from django.contrib.contenttypes.models import ContentType from genomix.fields import ContentRelatedField, DisplayChoiceField, UserRelatedField from rest_framework import serializers from . import choices, fields, models class ActivitySerializer(serializers.ModelSerializer): """Serializer for list and detail requests for User Activities.""" user = UserRelatedField(queryset=get_user_model().objects.all()) activity_type = DisplayChoiceField(choices=choices.ACTIVITY_TYPES) class Meta: model = models.Activity fields = ( 'id', 'activity_type', 'active', 'user', 'created', 'modified', ) class ActivitySerializerCreateOrEdit(serializers.ModelSerializer): """Serializer for POST, PUT, PATCH requests for User Activities.""" user = UserRelatedField(queryset=get_user_model().objects.all()) activity_type = DisplayChoiceField(choices=choices.ACTIVITY_TYPES) content_type = ContentRelatedField(queryset=ContentType.objects.all()) class Meta: model = models.Activity fields = ( 'id', 'activity_type', 'active', 'content_type', 'object_id', 'user', 'created', 'modified', ) class CommentSerializer(serializers.ModelSerializer): """Serializer for list and detail requests for User Comments.""" user = UserRelatedField(queryset=get_user_model().objects.all()) tags = serializers.StringRelatedField(many=True) class Meta: model = models.Comment fields = ( 'id', 'text', 'active', 'tags', 'user', 'created', 'modified', ) class CommentSerializerCreateOrEdit(serializers.ModelSerializer): """Serializer for POST, PUT, PATCH requests for User Comments.""" user = UserRelatedField(queryset=get_user_model().objects.all()) content_type = ContentRelatedField(queryset=ContentType.objects.all()) class Meta: model = models.Comment fields = ( 'id', 'text', 'active', 'content_type', 'object_id', 'user', 'created', 'modified', ) class ReviewSerializer(serializers.ModelSerializer): """Serializer for list and detail requests for User Reviews.""" user = UserRelatedField(queryset=get_user_model().objects.all()) tags = serializers.StringRelatedField(many=True) rating = fields.RatingRelatedField(queryset=models.Rating.objects.all()) class Meta: model = models.Comment fields = ( 'id', 'text', 'active', 'rating', 'tags', 'user', 'created', 'modified', ) class ReviewSerializerCreateOrEdit(serializers.ModelSerializer): """Serializer for POST, PUT, PATCH requests for User Reviews.""" user = UserRelatedField(queryset=get_user_model().objects.all()) content_type = ContentRelatedField(queryset=ContentType.objects.all()) rating = fields.RatingRelatedField(queryset=models.Rating.objects.all()) class Meta: model = models.Review fields = ( 'id', 'text', 'rating', 'active', 'content_type', 'object_id', 'user', 'created', 'modified', )
32.58
84
0.66237
312
3,258
6.820513
0.208333
0.051692
0.039474
0.109962
0.794643
0.768327
0.768327
0.768327
0.74765
0.68609
0
0.000392
0.217925
3,258
99
85
32.909091
0.834772
0.116943
0
0.681818
0
0
0.100843
0
0
0
0
0
0
1
0
false
0
0.075758
0
0.484848
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
6edcf3dd7b103431b5d15e5f8f8593ad9853f95b
155
py
Python
src/kgmk/ds/gen/models/base.py
kagemeka/python
486ce39d97360b61029527bacf00a87fdbcf552c
[ "MIT" ]
null
null
null
src/kgmk/ds/gen/models/base.py
kagemeka/python
486ce39d97360b61029527bacf00a87fdbcf552c
[ "MIT" ]
null
null
null
src/kgmk/ds/gen/models/base.py
kagemeka/python
486ce39d97360b61029527bacf00a87fdbcf552c
[ "MIT" ]
null
null
null
from abc import ABCMeta, abstractmethod import dataclasses @dataclasses.dataclass class Base(metaclass=ABCMeta): ... # print(BaseModel.__name__)
15.5
39
0.76129
16
155
7.125
0.8125
0
0
0
0
0
0
0
0
0
0
0
0.148387
155
9
40
17.222222
0.863636
0.16129
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.4
0
0.6
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
6eec89724f3254cfc4cfbaa7d7e5dde541687a24
34
py
Python
jupyterlabpymolpysnips/Selection/selectName.py
MooersLab/pymolpysnips
50a89c85adf8006d85c1d6cd3f8aad7e440a0b92
[ "MIT" ]
null
null
null
jupyterlabpymolpysnips/Selection/selectName.py
MooersLab/pymolpysnips
50a89c85adf8006d85c1d6cd3f8aad7e440a0b92
[ "MIT" ]
null
null
null
jupyterlabpymolpysnips/Selection/selectName.py
MooersLab/pymolpysnips
50a89c85adf8006d85c1d6cd3f8aad7e440a0b92
[ "MIT" ]
null
null
null
cmd.do('select oxygen2, name O2')
17
33
0.705882
6
34
4
1
0
0
0
0
0
0
0
0
0
0
0.066667
0.117647
34
1
34
34
0.733333
0
0
0
0
0
0.676471
0
0
0
0
0
0
1
0
true
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
6eee451f586b7af3cbe1368c6ae1d0e74e19842f
87
py
Python
pytorch/projects/datasets/__init__.py
enpelonio/O-CNN
eb3484d2d708c5111c67cfe9d5759db0b95c4eb7
[ "MIT" ]
299
2019-05-27T02:18:56.000Z
2022-03-31T15:29:20.000Z
pytorch/projects/datasets/__init__.py
enpelonio/O-CNN
eb3484d2d708c5111c67cfe9d5759db0b95c4eb7
[ "MIT" ]
100
2019-05-07T03:17:01.000Z
2022-03-30T09:02:04.000Z
pytorch/projects/datasets/__init__.py
enpelonio/O-CNN
eb3484d2d708c5111c67cfe9d5759db0b95c4eb7
[ "MIT" ]
84
2019-05-17T17:44:06.000Z
2022-02-14T04:32:02.000Z
from .scannet import get_scannet_dataset from .completion import get_completion_dataset
43.5
46
0.896552
12
87
6.166667
0.5
0.243243
0
0
0
0
0
0
0
0
0
0
0.08046
87
2
46
43.5
0.925
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
42d309ecb8fd3b57548ba98df10afffa4299f639
384
py
Python
tests/test_builder.py
cyraxjoe/pypfop
7cb716f33a591878825ab8f2757f3bebd24ebc08
[ "Apache-2.0" ]
9
2015-03-11T07:42:50.000Z
2021-12-08T12:32:39.000Z
tests/test_builder.py
cyraxjoe/pypfop
7cb716f33a591878825ab8f2757f3bebd24ebc08
[ "Apache-2.0" ]
null
null
null
tests/test_builder.py
cyraxjoe/pypfop
7cb716f33a591878825ab8f2757f3bebd24ebc08
[ "Apache-2.0" ]
5
2019-06-05T17:22:28.000Z
2021-11-12T01:45:19.000Z
import unittest class TestBaseBuilder(unittest.TestCase): def test_get_timefile(self): pass def test_not_implemented_call(self): pass class TestSubProcessBuilder(unittest.TestCase): def test_call(self): pass class TestFopsBuilder(unittest.TestCase): def test_no_auth_call(self): pass def test_auth_call(self): pass
16
47
0.690104
45
384
5.644444
0.4
0.137795
0.188976
0.271654
0
0
0
0
0
0
0
0
0.239583
384
23
48
16.695652
0.869863
0
0
0.357143
0
0
0
0
0
0
0
0
0
1
0.357143
false
0.357143
0.071429
0
0.642857
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
1
0
0
5
42e49748c4eb13021958218609f5a83326dfb773
5,342
py
Python
azblobexplorer/base.py
akshaybabloo/azure-blob-explorer-python
ef28e8ac44984629276001803b763855ffd9209e
[ "MIT" ]
null
null
null
azblobexplorer/base.py
akshaybabloo/azure-blob-explorer-python
ef28e8ac44984629276001803b763855ffd9209e
[ "MIT" ]
23
2019-05-06T07:10:27.000Z
2021-06-25T15:19:06.000Z
azblobexplorer/base.py
akshaybabloo/azure-blob-explorer-python
ef28e8ac44984629276001803b763855ffd9209e
[ "MIT" ]
1
2019-06-10T23:56:47.000Z
2019-06-10T23:56:47.000Z
from datetime import timedelta, datetime from azure.storage.blob import BlobServiceClient, BlobSasPermissions, generate_blob_sas class BlobBase: """ .. versionadded:: 2.0 """ def __init__(self, account_name: str, account_key: str, container_name: str): """ :param str account_name: Azure storage account name. :param str account_key: Azure storage key. :param str container_name: Azure storage container name, URL will be added automatically. """ self.account_name = account_name self.account_key = account_key self.container_name = container_name block_blob_service = BlobServiceClient.from_connection_string( f"DefaultEndpointsProtocol=https;AccountName={self.account_name};AccountKey={self.account_key};EndpointSuffix=core.windows.net") self.container_client = block_blob_service.get_container_client(self.container_name) def generate_url(self, blob_name: str, read: bool = True, add: bool = False, create: bool = False, write: bool = False, delete: bool = False, sas: bool = False, access_time: int = 1) -> str: """ Generate's blob URL. It can also generate Shared Access Signature (SAS) if ``sas=True``. :param bool write: Write access .. versionadded:: 2.0 :param bool create: Create access .. versionadded:: 2.0 :param bool add: Add access .. versionadded:: 2.0 :param bool read: Read access .. versionadded:: 2.0 :param bool delete: Delete access .. versionadded:: 2.0 :param int access_time: Time till the URL is valid :param str blob_name: Name of the blob, this could be a path :param bool sas: Set ``True`` to generate SAS key :return: Blob URL **Example without ``sas``** >>> import os >>> from azblobexplorer import AzureBlobDownload >>> az = AzureBlobDownload('account name', 'account key', 'container name') >>> az.generate_url("filename.txt") https://containername.blob.core.windows.net/blobname/filename.txt **Example with ``upload_to`` and ``sas``** >>> import os >>> from azblobexplorer import AzureBlobDownload >>> az = AzureBlobDownload('account name', 'account key', 'container name') >>> az.generate_url("filename.txt", sas=True) https://containername.blob.core.windows.net/blobname/filename.txt?se=2019-11-05T16%3A33%3A46Z&sp=w&sv=2019-02-02&sr=b&sig=t%2BpUG2C2FQKp/Hb8SdCsmaZCZxbYXHUedwsquItGx%2BM%3D """ blob = self.container_client.get_blob_client(blob_name) if sas: sas_token = generate_blob_sas( blob.account_name, blob.container_name, blob.blob_name, account_key=blob.credential.account_key, permission=BlobSasPermissions(read, add, create, write, delete), expiry=datetime.utcnow() + timedelta(hours=access_time) ) return blob.url + '?' + sas_token else: return blob.url def generate_url_mime(self, blob_name: str, mime_type: str, sas: bool = False, read: bool = True, add: bool = False, create: bool = False, write: bool = False, delete: bool = False, access_time: int = 1) -> str: """ Generate's blob URL with MIME type. It can also generate Shared Access Signature (SAS) if ``sas=True``. :param bool write: Write access .. versionadded:: 2.0 :param bool create: Create access .. versionadded:: 2.0 :param bool add: Add access .. versionadded:: 2.0 :param bool read: Read access .. versionadded:: 2.0 :param bool delete: Delete access .. versionadded:: 2.0 :param int access_time: Time till the URL is valid :param str blob_name: Name of the blob :param int access_time: Time till the URL is valid :param str mime_type: MIME type of the application :param bool sas: Set ``True`` to generate SAS key :return: Blob URL >>> import os >>> from azblobexplorer import AzureBlobDownload >>> az = AzureBlobDownload('account name', 'account key', 'container name') >>> az.generate_url_mime("filename.zip", sas=True, mime_type="application/zip") https://containername.blob.core.windows.net/blobname/filename.zip?se=2019-11-05T16%3A33%3A46Z&sp=w&sv=2019-02-02&sr=b&sig=t%2BpUG2C2FQKp/Hb8SdCsmaZCZxbYXHUedwsquItGx%2BM%3D """ blob = self.container_client.get_blob_client(blob_name) if sas: sas_token = generate_blob_sas( blob.account_name, blob.container_name, blob.blob_name, account_key=blob.credential.account_key, permission=BlobSasPermissions(read, add, create, write, delete), expiry=datetime.utcnow() + timedelta(hours=access_time), content_type=mime_type ) return blob.url + '?' + sas_token else: return blob.url
36.340136
180
0.606702
624
5,342
5.065705
0.190705
0.037963
0.048719
0.063271
0.712749
0.712749
0.712749
0.712749
0.696299
0.637457
0
0.022228
0.292587
5,342
146
181
36.589041
0.814237
0.471172
0
0.511628
1
0.023256
0.054264
0.053402
0
0
0
0
0
1
0.069767
false
0
0.046512
0
0.232558
0
0
0
0
null
0
0
0
0
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
42fe723f0c87e0f37d604eb8a80f15671c29d4fc
179
py
Python
www/landing/views.py
mattvenn/cursivedata
43e43263bef6f01698166d87bcff00b246957277
[ "CC-BY-4.0" ]
1
2015-12-31T01:46:09.000Z
2015-12-31T01:46:09.000Z
www/landing/views.py
mattvenn/cursivedata
43e43263bef6f01698166d87bcff00b246957277
[ "CC-BY-4.0" ]
5
2015-04-06T19:15:15.000Z
2015-07-25T21:45:16.000Z
www/landing/views.py
mattvenn/cursivedata
43e43263bef6f01698166d87bcff00b246957277
[ "CC-BY-4.0" ]
null
null
null
from django.contrib.auth.decorators import login_required from django.shortcuts import redirect @login_required def landing(requests): return redirect('cursivedata:index')
19.888889
57
0.815642
22
179
6.545455
0.727273
0.138889
0
0
0
0
0
0
0
0
0
0
0.111732
179
8
58
22.375
0.90566
0
0
0
0
0
0.095506
0
0
0
0
0
0
1
0.2
false
0
0.4
0.2
0.8
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
1
1
0
0
5
6e0697bfcd6b6e12f244a2dc31b34b81d44a95c9
99
py
Python
hyp3lib/util.py
lidar-nevermore/hyp3-lib
bfb2487b6cbc0029eb689bc059890e71b7f60d3c
[ "BSD-3-Clause" ]
4
2020-07-16T18:29:57.000Z
2021-11-23T00:35:50.000Z
hyp3lib/util.py
lidar-nevermore/hyp3-lib
bfb2487b6cbc0029eb689bc059890e71b7f60d3c
[ "BSD-3-Clause" ]
16
2020-07-13T23:55:09.000Z
2022-03-23T08:05:10.000Z
hyp3lib/util.py
lidar-nevermore/hyp3-lib
bfb2487b6cbc0029eb689bc059890e71b7f60d3c
[ "BSD-3-Clause" ]
7
2020-07-13T23:28:43.000Z
2022-03-19T13:39:14.000Z
"""Small utility functions""" def string_is_true(s: str) -> bool: return s.lower() == 'true'
16.5
35
0.626263
14
99
4.285714
0.857143
0
0
0
0
0
0
0
0
0
0
0
0.181818
99
5
36
19.8
0.740741
0.232323
0
0
0
0
0.057143
0
0
0
0
0
0
1
0.5
false
0
0
0.5
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
6e0bebb54e4327e9e6434bf685f59bb33749f95e
688
py
Python
utils/errors.py
YangWanjun/areaparking
b08bc9b8f8d5f602d823115263b9d040edb9f245
[ "Apache-2.0" ]
1
2018-08-02T04:00:44.000Z
2018-08-02T04:00:44.000Z
utils/errors.py
YangWanjun/areaparking
b08bc9b8f8d5f602d823115263b9d040edb9f245
[ "Apache-2.0" ]
null
null
null
utils/errors.py
YangWanjun/areaparking
b08bc9b8f8d5f602d823115263b9d040edb9f245
[ "Apache-2.0" ]
null
null
null
# coding: UTF-8 class MyBaseException(Exception): def __init__(self, message): self.message = message class FileNotExistException(MyBaseException): def __init__(self, message=""): MyBaseException.__init__(self, message) class SettingException(MyBaseException): """ 設定はしてない場合、または設定間違った場合発生する例外 """ def __init__(self, message=""): MyBaseException.__init__(self, message) class CustomException(MyBaseException): def __init__(self, message=""): MyBaseException.__init__(self, message) class OperationFinishedException(MyBaseException): def __init__(self, message=""): MyBaseException.__init__(self, message)
21.5
50
0.707849
59
688
7.644068
0.271186
0.243902
0.299335
0.199557
0.558758
0.558758
0.558758
0.558758
0.558758
0.301552
0
0.001779
0.18314
688
31
51
22.193548
0.800712
0.061047
0
0.533333
0
0
0
0
0
0
0
0
0
1
0.333333
false
0
0
0
0.666667
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
5
6e14545eefdd4041a01a56f9630738dc4622e845
40
py
Python
astoria/managers/__init__.py
sedders123/astoria
d1e9603b10d765aed4c1237e79b5ab48a9af1d83
[ "MIT" ]
1
2021-02-03T02:54:54.000Z
2021-02-03T02:54:54.000Z
astoria/managers/__init__.py
sedders123/astoria
d1e9603b10d765aed4c1237e79b5ab48a9af1d83
[ "MIT" ]
72
2020-12-15T18:29:18.000Z
2022-03-08T09:42:53.000Z
astoria/managers/__init__.py
sedders123/astoria
d1e9603b10d765aed4c1237e79b5ab48a9af1d83
[ "MIT" ]
2
2022-02-05T23:00:51.000Z
2022-03-09T21:40:49.000Z
"""Executable components of Astoria."""
20
39
0.725
4
40
7.25
1
0
0
0
0
0
0
0
0
0
0
0
0.1
40
1
40
40
0.805556
0.825
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
6e360284353bd2a6d21e27991043a052889b2ae7
97
py
Python
app/auth/__init__.py
NarayanAdithya/Portfolio2.0
691acbac1ad4220cb67c5e07a80bd401421f00d3
[ "MIT" ]
null
null
null
app/auth/__init__.py
NarayanAdithya/Portfolio2.0
691acbac1ad4220cb67c5e07a80bd401421f00d3
[ "MIT" ]
null
null
null
app/auth/__init__.py
NarayanAdithya/Portfolio2.0
691acbac1ad4220cb67c5e07a80bd401421f00d3
[ "MIT" ]
null
null
null
from flask import Blueprint auth = Blueprint('auth', __name__) from . import routes, models
12.125
34
0.731959
12
97
5.583333
0.666667
0.38806
0
0
0
0
0
0
0
0
0
0
0.185567
97
7
35
13.857143
0.848101
0
0
0
0
0
0.042105
0
0
0
0
0
0
1
0
false
0
0.666667
0
0.666667
0.666667
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
1
0
5
28103959ca12a6b1be45c0272c4bb6596d0e6bdd
439
py
Python
openpecha/core/ids.py
ta4tsering/openpecha-toolkit
ff24b4813fb8146a4327e746e4024890b6807bea
[ "Apache-2.0" ]
1
2021-12-08T04:47:40.000Z
2021-12-08T04:47:40.000Z
openpecha/core/ids.py
ta4tsering/openpecha-toolkit
ff24b4813fb8146a4327e746e4024890b6807bea
[ "Apache-2.0" ]
38
2019-11-12T10:49:25.000Z
2021-04-07T12:10:24.000Z
openpecha/core/ids.py
ta4tsering/openpecha-toolkit
ff24b4813fb8146a4327e746e4024890b6807bea
[ "Apache-2.0" ]
6
2019-11-14T12:30:35.000Z
2020-05-12T01:50:13.000Z
import random from uuid import uuid4 def get_uuid(): return uuid4().hex def get_id(prefix, length): return prefix + "".join(random.choices(uuid4().hex, k=length)).upper() def get_pecha_id(): return get_id(prefix="P", length=8) def get_work_id(): return get_id(prefix="W", length=8) def get_alignment_id(): return get_id(prefix="A", length=8) def get_collection_id(): return get_id(prefix="C", length=8)
16.259259
74
0.678815
71
439
4
0.352113
0.126761
0.193662
0.183099
0.267606
0
0
0
0
0
0
0.019178
0.168565
439
26
75
16.884615
0.758904
0
0
0
0
0
0.009112
0
0
0
0
0
0
1
0.428571
false
0
0.142857
0.428571
1
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
2815f413e2c49f703e6d4bc1cbc80f06f3f4c9e9
7,488
py
Python
savings_rules.py
davidjohnoliver/IncomeForecast
f638a16a3bccb576f7977f9ea3fc08047c96ecce
[ "MIT" ]
null
null
null
savings_rules.py
davidjohnoliver/IncomeForecast
f638a16a3bccb576f7977f9ea3fc08047c96ecce
[ "MIT" ]
null
null
null
savings_rules.py
davidjohnoliver/IncomeForecast
f638a16a3bccb576f7977f9ea3fc08047c96ecce
[ "MIT" ]
null
null
null
import model from typing import Callable def get_simple_linear(initial_rrsp: float, final_rrsp: float, initial_year: int, career_length_yrs: int): """ Sets the split between RRSP and TFSA as a linear function of time. s[y] = a + b*(y - y_0), where s = RRSP allotment (normalized), a = initial_rrsp (normalized value), b = (final_rrsp - initial_rrsp) / career_length_yrs, y_0 = initial_year, y = current year """ return get_simple_linear_func(lambda: initial_rrsp, lambda: final_rrsp, initial_year, career_length_yrs, None) def get_simple_linear_func(initial_rrsp_func: Callable[[], float], final_rrsp_func: Callable[[], float], initial_year: int, career_length_yrs: int, fail_func: Callable[[], None]): """ Sets the split between RRSP and TFSA as a linear function of time. Takes generator functions for initial_rrsp and final_rrsp to facilitate optimization. s[y] = a + b*(y - y_0), where s = RRSP allotment (normalized), initial_rrsp = initial_rrsp_func(), final_rrsp = final_rrsp_func(), a = initial_rrsp (normalized value), b = (final_rrsp - initial_rrsp) / career_length_yrs, y_0 = initial_year, y = current year """ def simple_linear(deltas: model.deltas_state, previous_funds: model.funds_state, previous_deltas: model.deltas_state): initial_rrsp = initial_rrsp_func() final_rrsp = final_rrsp_func() if not (0 <= initial_rrsp <= 1): if fail_func != None: fail_func("savings_rules.simple_linear: initial_rrsp must be between 0 and 1") else: raise ValueError("initial_rrsp must be between 0 and 1") if not (0 <= final_rrsp <= 1): if fail_func != None: fail_func("savings_rules.simple_linear: final_rrsp must be between 0 and 1") else: raise ValueError("final_rrsp must be between 0 and 1") slope = (final_rrsp - initial_rrsp) / career_length_yrs years_elapsed = deltas.year - initial_year if not (0 <= years_elapsed <= career_length_yrs): raise ValueError(f"{deltas.year} lies outside the allowed range of years for the rule (initial year={initial_year}, career length={career_length_yrs})") rrsp_norm = initial_rrsp + slope * years_elapsed is_in_bounds = 0 <= rrsp_norm <= 1 if fail_func != None and not is_in_bounds: fail_func("savings_rules.simple_linear: interpolated RRSP must be between 0 and 1") else: assert is_in_bounds tfsa_norm = 1 - rrsp_norm output = deltas.update_rrsp(deltas.undifferentiated_savings * rrsp_norm) output = output.update_tfsa(deltas.undifferentiated_savings * tfsa_norm) return output return simple_linear def get_simple_retirement_deduction(retirement_year: int, year_of_death: int): """ Deduct from savings to cover retirement income. The split between RRSP and TFSA is made by a simple heuristic which tries to keep a constant level of RRSP withdrawals, to minimize marginal tax. """ def simple_retirement_deduction(deltas: model.deltas_state, previous_funds: model.funds_state, previous_deltas: model.deltas_state): years_elapsed = deltas.year - retirement_year years_remaining = year_of_death - deltas.year if (years_elapsed < 0 or deltas.year > year_of_death): raise ValueError(f"{deltas.year} lies outside the allowed range of years for the rule (initial year={retirement_year}, final year={year_of_death})") # spending = -deltas.undifferentiated_savings # We expect undifferentiated_savings to be a negative value, with contributions from # spending (retirement income) + tax owed on last year's RRSP withdrawal remaining_rrsp = previous_funds.rrsp_savings rrsp_allotment = remaining_rrsp / (years_remaining + 1) # Try to distribute RRSP withdrawals evenly to minimize marginal tax rrsp_withdrawal = max(min(spending, rrsp_allotment), 0) # Don't let the RRSP go below 0. This is mainly to try to cut down on weird edge # cases; if final savings is below 0 for any given run we don't care that much, the outer simulation will simply discard that run. tfsa_withdrawal = spending - rrsp_withdrawal output = deltas.update_rrsp(-rrsp_withdrawal) output = output.update_tfsa(-tfsa_withdrawal) return output return simple_retirement_deduction def get__linear_retirement_deduction_func(initial_rrsp_func: Callable[[], float], final_rrsp_func: Callable[[], float], initial_year: int, retirement_length_yrs: int, fail_func: Callable[[], None]): """ Deduct from savings to cover retirement income. The split between RRSP and TFSA is made as a linear function of time. Takes generator functions for initial_rrsp and final_rrsp to facilitate optimization. """ inner_rule = get_simple_linear_func(initial_rrsp_func, final_rrsp_func, initial_year, retirement_length_yrs, fail_func) def checked_rule(deltas: model.deltas_state, previous_funds: model.funds_state, previous_deltas: model.deltas_state): output = inner_rule(deltas, previous_funds, previous_deltas) if (previous_funds.rrsp_savings + deltas.rrsp < 0): # fail_func("savings_rules.linear_retirement_deduction: RRSP must not go below 0") return output return checked_rule def get_adjusted_heuristic_retirement_deduction(retirement_year: int, year_of_death: int, rrsp_adjustment_func: Callable[[], float]): """ Deduct from savings to cover retirement income. The split between RRSP and TFSA is made by a simple heuristic which tries to keep a constant level of RRSP withdrawals, to minimize marginal tax, adjusted by an optimizable constant proportional offset. """ def simple_retirement_deduction(deltas: model.deltas_state, previous_funds: model.funds_state, previous_deltas: model.deltas_state): years_elapsed = deltas.year - retirement_year years_remaining = year_of_death - deltas.year if (years_elapsed < 0 or deltas.year > year_of_death): raise ValueError(f"{deltas.year} lies outside the allowed range of years for the rule (initial year={retirement_year}, final year={year_of_death})") # spending = -deltas.undifferentiated_savings # We expect undifferentiated_savings to be a negative value, with contributions from # spending (retirement income) + tax owed on last year's RRSP withdrawal remaining_rrsp = previous_funds.rrsp_savings rrsp_allotment = remaining_rrsp / (years_remaining + 1) # Try to distribute RRSP withdrawals evenly to minimize marginal tax rrsp_proportional_adjustment = rrsp_adjustment_func() * spending rrsp_allotment += rrsp_proportional_adjustment # Apply the adjustment, we clamp to spending in the next line rrsp_withdrawal = max(min(spending, rrsp_allotment), 0) # Don't let the RRSP go below 0. This is mainly to try to cut down on weird edge # cases; if final savings is below 0 for any given run we don't care that much, the outer simulation will simply discard that run. tfsa_withdrawal = spending - rrsp_withdrawal output = deltas.update_rrsp(-rrsp_withdrawal) output = output.update_tfsa(-tfsa_withdrawal) return output return simple_retirement_deduction
54.26087
198
0.713942
1,027
7,488
4.963973
0.148004
0.043154
0.023539
0.034523
0.781483
0.776775
0.770498
0.743821
0.714594
0.694978
0
0.005785
0.215144
7,488
137
199
54.656934
0.861664
0.301816
0
0.479452
0
0.041096
0.140873
0.047153
0
0
0
0
0.013699
1
0.123288
false
0
0.027397
0
0.273973
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
2825fd3e57f5c2a7e5902ace5215131e649760b5
129
py
Python
AutoWatch/home/forms.py
openInfra-project/AutoWatch-Manage-Server
b33af2edb673e3607a57209211e910a1055c21fb
[ "MIT" ]
null
null
null
AutoWatch/home/forms.py
openInfra-project/AutoWatch-Manage-Server
b33af2edb673e3607a57209211e910a1055c21fb
[ "MIT" ]
2
2021-07-05T13:10:26.000Z
2021-07-05T13:46:43.000Z
AutoWatch/home/forms.py
openInfra-project/AutoWatch-Mange-Server
b33af2edb673e3607a57209211e910a1055c21fb
[ "MIT" ]
null
null
null
from django import forms class Login(forms.Form): username = forms.CharField(max_length=32) password = forms.CharField()
25.8
45
0.744186
17
129
5.588235
0.764706
0.294737
0
0
0
0
0
0
0
0
0
0.018349
0.155039
129
5
46
25.8
0.853211
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0.25
0.25
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
1
0
0
5
28990948c87d032adcfe09cf8753b67bbc36ab1f
179
py
Python
indextest.py
title848/ALPR
001d13ce552b4bdaf7773a2948bba91b05d9cd71
[ "Unlicense" ]
1
2022-02-03T16:29:16.000Z
2022-02-03T16:29:16.000Z
indextest.py
title848/ALPR
001d13ce552b4bdaf7773a2948bba91b05d9cd71
[ "Unlicense" ]
null
null
null
indextest.py
title848/ALPR
001d13ce552b4bdaf7773a2948bba91b05d9cd71
[ "Unlicense" ]
null
null
null
a = [100,2,1,3,5,200,300,400,2,3,0,1,23,24,56,30,500,20,1000,3000] #print(a.index(max(a))) b = [] for i in range(0,9): b.append(a.index(max(a))) a[a.index(max(a))] = 0 print(b)
25.571429
66
0.592179
47
179
2.255319
0.595745
0.169811
0.254717
0.283019
0
0
0
0
0
0
0
0.273292
0.100559
179
7
67
25.571429
0.385093
0.122905
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
0
0.166667
0
0
0
null
0
1
1
0
0
0
0
0
0
0
1
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
958411bf1e36a57f4010dd6f5504ebf12ee2f498
79
py
Python
aiortp/__init__.py
vodik/aiortp
7dcce55dfe2d86735a46d071e2059483957a85cc
[ "Apache-2.0" ]
6
2018-06-13T14:32:25.000Z
2021-07-08T22:46:39.000Z
aiortp/__init__.py
vodik/aiortp
7dcce55dfe2d86735a46d071e2059483957a85cc
[ "Apache-2.0" ]
3
2017-08-25T19:41:27.000Z
2019-11-20T22:31:54.000Z
aiortp/__init__.py
vodik/aiortp
7dcce55dfe2d86735a46d071e2059483957a85cc
[ "Apache-2.0" ]
5
2017-08-25T19:32:13.000Z
2020-09-04T10:33:44.000Z
from .scheduler import RTPScheduler from .sources import AudioFile, DTMF, Tone
26.333333
42
0.822785
10
79
6.5
0.8
0
0
0
0
0
0
0
0
0
0
0
0.126582
79
2
43
39.5
0.942029
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
9586931d00c7587a10771baa1f1242158fe50406
52
py
Python
CondCore/ESSources/python/CondDBESSource_condDBv2_cfi.py
gputtley/cmssw
c1ef8454804e4ebea8b65f59c4a952a6c94fde3b
[ "Apache-2.0" ]
6
2017-09-08T14:12:56.000Z
2022-03-09T23:57:01.000Z
CondCore/ESSources/python/CondDBESSource_condDBv2_cfi.py
gputtley/cmssw
c1ef8454804e4ebea8b65f59c4a952a6c94fde3b
[ "Apache-2.0" ]
545
2017-09-19T17:10:19.000Z
2022-03-07T16:55:27.000Z
CondCore/ESSources/python/CondDBESSource_condDBv2_cfi.py
gputtley/cmssw
c1ef8454804e4ebea8b65f59c4a952a6c94fde3b
[ "Apache-2.0" ]
14
2017-10-04T09:47:21.000Z
2019-10-23T18:04:45.000Z
from CondCore.ESSources.CondDBESSource_cfi import *
26
51
0.865385
6
52
7.333333
1
0
0
0
0
0
0
0
0
0
0
0
0.076923
52
1
52
52
0.916667
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
9592f49a86b27d73ffe468d2c60aba1409816a3c
203
py
Python
API/apis/institutions.py
CASDON-MYSTERY/studentapp
0fd942e963a10a02a6c9f358dd362cfd646eecc3
[ "MIT" ]
null
null
null
API/apis/institutions.py
CASDON-MYSTERY/studentapp
0fd942e963a10a02a6c9f358dd362cfd646eecc3
[ "MIT" ]
null
null
null
API/apis/institutions.py
CASDON-MYSTERY/studentapp
0fd942e963a10a02a6c9f358dd362cfd646eecc3
[ "MIT" ]
null
null
null
from institutions.serializers import Institution_Serializer from institutions.models import Institution from rest_framework import permissions,generics,status from rest_framework.response import Response
50.75
59
0.901478
24
203
7.5
0.541667
0.177778
0.188889
0
0
0
0
0
0
0
0
0
0.073892
203
4
60
50.75
0.957447
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
9599c4f2f00b812c99c43f927279898069757c15
2,328
py
Python
redes/views.py
arthurcordeiro/redes-sociais-cd
1764fb34a71ef25b8c4d0859ec9208e7ab07a6ff
[ "MIT" ]
null
null
null
redes/views.py
arthurcordeiro/redes-sociais-cd
1764fb34a71ef25b8c4d0859ec9208e7ab07a6ff
[ "MIT" ]
null
null
null
redes/views.py
arthurcordeiro/redes-sociais-cd
1764fb34a71ef25b8c4d0859ec9208e7ab07a6ff
[ "MIT" ]
null
null
null
from django.shortcuts import render from django.http import HttpResponse,JsonResponse import csv import json from django.views.generic import TemplateView class TwitterView(TemplateView): template_name = "pages/twitter.html" def get_context_data(self, **kwargs): final_array = [] with open('experimento_twitter/static/_csv/twitter.csv') as csvfile: reader = csv.DictReader(csvfile) for row in reader: final_array.append( { "id do Tweet": row['id do Tweet'], "link permanente do Tweet": row['link permanente do Tweet'], "texto do Tweet": row['texto do Tweet'].replace("'",'').replace('"', ''), "impressões": row['impressões'], "horário": row['horário'], "interações": row['interações'], "respostas": row['respostas'], "taxa de envolvimento": float(row['interações']) / float(row['impressões']), } ) return {'content':(str(final_array).replace("'", '"'))} class FacebookView(TemplateView): template_name = "pages/facebook.html" def get_context_data(self, **kwargs): final_array = [] with open('experimento_twitter/static/_csv/twitter.csv') as csvfile: reader = csv.DictReader(csvfile) for row in reader: final_array.append( { "id do Tweet": row['id do Tweet'], "link permanente do Tweet": row['link permanente do Tweet'], "texto do Tweet": row['texto do Tweet'].replace("'",'').replace('"', ''), "impressões": row['impressões'], "horário": row['horário'], "interações": row['interações'], "respostas": row['respostas'], "taxa de envolvimento": float(row['interações']) / float(row['impressões']), } ) return {'content':(str(final_array).replace("'", '"'))}
45.647059
104
0.478522
199
2,328
5.517588
0.301508
0.076503
0.054645
0.076503
0.781421
0.781421
0.781421
0.781421
0.781421
0.781421
0
0
0.3939
2,328
51
105
45.647059
0.778172
0
0
0.666667
0
0
0.242593
0.036926
0
0
0
0
0
1
0.044444
false
0
0.111111
0
0.288889
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
95f916a0d2f23768a5613ddf564dc02ba2c599ac
478
py
Python
ggit_platform/admin.py
girlsgoit/GirlsGoIT
447cd15c44ebee4af9e942a079d681be8683239f
[ "MIT" ]
1
2019-02-27T21:20:54.000Z
2019-02-27T21:20:54.000Z
ggit_platform/admin.py
girlsgoit/GirlsGoIT
447cd15c44ebee4af9e942a079d681be8683239f
[ "MIT" ]
null
null
null
ggit_platform/admin.py
girlsgoit/GirlsGoIT
447cd15c44ebee4af9e942a079d681be8683239f
[ "MIT" ]
null
null
null
from django.contrib import admin from markdownx.admin import MarkdownxModelAdmin from .models import Event from .models import Member from .models import MemberRole from .models import Region from .models import Story from .models import Track admin.site.register(Track, MarkdownxModelAdmin) admin.site.register(Region) admin.site.register(Member) admin.site.register(MemberRole) admin.site.register(Event, MarkdownxModelAdmin) admin.site.register(Story, MarkdownxModelAdmin)
28.117647
47
0.83682
61
478
6.557377
0.262295
0.15
0.24
0.18
0
0
0
0
0
0
0
0
0.089958
478
16
48
29.875
0.91954
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.571429
0
0.571429
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
250ea4e03b8b1268b9a90d4402788970e95adcac
92
py
Python
models/segmentation/unet_pre_trained_contrastive/__init__.py
neurips2021vat/Variance-Aware-Training
2dcd017ef06e81e299448bdd9da65fa682835127
[ "BSD-2-Clause" ]
null
null
null
models/segmentation/unet_pre_trained_contrastive/__init__.py
neurips2021vat/Variance-Aware-Training
2dcd017ef06e81e299448bdd9da65fa682835127
[ "BSD-2-Clause" ]
null
null
null
models/segmentation/unet_pre_trained_contrastive/__init__.py
neurips2021vat/Variance-Aware-Training
2dcd017ef06e81e299448bdd9da65fa682835127
[ "BSD-2-Clause" ]
null
null
null
from models.segmentation.unet_pre_trained_contrastive.model import Model # pyflakes.ignore
46
91
0.869565
12
92
6.416667
0.916667
0
0
0
0
0
0
0
0
0
0
0
0.076087
92
1
92
92
0.905882
0.163043
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
25124e86c291383341b9cd07b20408f005edb9b6
35,719
py
Python
tests/test_op_env.py
apiology/op-env
8707f26c65e98c1739a97a0e358574d77144594a
[ "MIT" ]
11
2021-02-03T14:50:47.000Z
2021-11-09T15:23:40.000Z
tests/test_op_env.py
apiology/op-env
8707f26c65e98c1739a97a0e358574d77144594a
[ "MIT" ]
2
2021-12-14T19:06:56.000Z
2022-01-15T16:35:30.000Z
tests/test_op_env.py
apiology/op_env
84269db5ca58801bff3ea4f0ab56b6ac74132806
[ "MIT" ]
null
null
null
#!/usr/bin/env python """Tests for `op_env` package.""" import argparse import io import json import os import subprocess import sys import tempfile from typing import Dict from unittest.mock import ANY, call, patch import pytest import yaml import op_env from op_env._cli import Arguments, main, parse_argv, process_args from op_env.op import ( _do_env_lookups, _do_title_lookups, _fields_from_title, _op_fields_to_try, _op_pluck_correct_field, EnvVarName, FieldName, FieldValue, InvalidTagOPLookupError, NoEntriesOPLookupError, NoFieldValueOPLookupError, Title, TooManyEntriesOPLookupError, ) @pytest.fixture def list_of_number_yaml_file(): with tempfile.NamedTemporaryFile(mode="w+t") as yaml_file: contents = [123, 456] yaml.dump(contents, yaml_file) yaml_file.flush() yield yaml_file.name @pytest.fixture def number_yaml_file(): with tempfile.NamedTemporaryFile(mode="w+t") as yaml_file: contents = 123 yaml.dump(contents, yaml_file) yaml_file.flush() yield yaml_file.name @pytest.fixture def string_yaml_file(): with tempfile.NamedTemporaryFile(mode="w+t") as yaml_file: contents = 'foo' yaml.dump(contents, yaml_file) yaml_file.flush() yield yaml_file.name @pytest.fixture def object_yaml_file(): with tempfile.NamedTemporaryFile(mode="w+t") as yaml_file: contents = {'foo': 'bar'} yaml.dump(contents, yaml_file) yaml_file.flush() yield yaml_file.name @pytest.fixture def invalid_yaml_file(): with tempfile.NamedTemporaryFile(mode="w+t") as yaml_file: yaml_file.write('"') yaml_file.flush() yield yaml_file.name @pytest.fixture def empty_file(): with tempfile.NamedTemporaryFile(mode="w+t") as yaml_file: yield yaml_file.name @pytest.fixture def one_item_yaml_file(): with tempfile.NamedTemporaryFile(mode="w+t") as yaml_file: contents = ['VARA'] yaml.dump(contents, yaml_file) yaml_file.flush() yield yaml_file.name @pytest.fixture def two_item_yaml_file(): with tempfile.NamedTemporaryFile(mode="w+t") as yaml_file: contents = ['VAR1', 'VAR2'] yaml.dump(contents, yaml_file) yaml_file.flush() yield yaml_file.name @pytest.fixture def two_item_text_file(): with tempfile.NamedTemporaryFile(mode="w+t") as text_file: contents = ['TVAR1', 'TVAR2'] for item in contents: text_file.write(item) text_file.write("\n") text_file.flush() yield text_file.name @patch('op_env.op.subprocess', autospec=op_env.op.subprocess) def test_fields_from_title(subprocess) -> None: output = { 'overview': { 'tags': [ 'A1', 'B1' ] }, 'details': { 'fields': [ { 'name': 'a1', 'value': 'a1val', 'designation': 'password', 'type': 'P', } ], 'sections': [ { }, { 'fields': [ { 't': 'b1', 'v': 'b1val', } ] } ] } } output_json = json.dumps(output) subprocess.check_output.return_value = output_json.encode('utf-8') out = _fields_from_title(Title('title')) assert out == {'A1': 'a1val', 'B1': 'b1val'} subprocess.check_output.assert_called_with(['op', 'get', 'item', 'title']) @patch('op_env.op.subprocess', autospec=op_env.op.subprocess) @patch('op_env.op._fields_from_title', autospec=op_env.op._fields_from_title) def test_do_title_lookups_both_titles_not_found(_fields_from_title, subprocess): _fields_from_title.return_value = {} out = _do_title_lookups(['abc', 'def']) assert out == {} _fields_from_title.assert_has_calls([call('abc'), call('def')]) @patch('op_env.op.subprocess', autospec=op_env.op.subprocess) @patch('op_env.op._fields_from_title', autospec=op_env.op._fields_from_title) def test_do_title_lookups_one_title_not_found(_fields_from_title, subprocess): _fields_from_title.side_effect = [{'A1': 'a1val'}, {}] out = _do_title_lookups(['abc', 'def']) assert out == {'A1': 'a1val'} _fields_from_title.assert_has_calls([call('abc'), call('def')]) @patch('op_env.op.subprocess', autospec=op_env.op.subprocess) @patch('op_env.op._fields_from_title', autospec=op_env.op._fields_from_title) def test_do_title_lookups_one_title_one_env_var(_fields_from_title, subprocess): _fields_from_title.return_value = {'A1': 'a1val'} out = _do_title_lookups(['abc']) assert out == {'A1': 'a1val'} _fields_from_title.assert_called_once_with('abc') @patch('op_env.op.subprocess', autospec=op_env.op.subprocess) @patch('op_env.op._fields_from_title', autospec=op_env.op._fields_from_title) def test_do_title_lookups_two_titles_no_env_vars(_fields_from_title, subprocess): _fields_from_title.return_value = {} out = _do_title_lookups(['abc', 'def']) assert out == {} _fields_from_title.assert_has_calls([call('abc'), call('def')]) @patch('op_env.op.subprocess', autospec=op_env.op.subprocess) @patch('op_env.op._fields_from_title', autospec=op_env.op._fields_from_title) def test_do_title_lookups_one_title_returns_no_env_vars(_fields_from_title, subprocess): _fields_from_title.return_value = {} out = _do_title_lookups(['abc']) assert out == {} _fields_from_title.assert_called_once_with('abc') @patch('op_env.op.subprocess', autospec=op_env.op.subprocess) @patch('op_env.op._fields_from_title', autospec=op_env.op._fields_from_title) def test_do_title_lookups_no_titles(_fields_from_title, subprocess): out = _do_title_lookups([]) assert out == {} _fields_from_title.assert_not_called() @patch('op_env.op._op_list_items', autospec=op_env.op._op_list_items) @patch('op_env.op._op_consolidated_fields', autospec=op_env.op._op_consolidated_fields) @patch('op_env.op._fields_from_list_output', autospec=op_env.op._fields_from_list_output) @patch('op_env.op._op_pluck_correct_field', autospec=op_env.op._op_pluck_correct_field) @patch('sys.stdout', new_callable=io.StringIO) def test_process_args_shows_json_with_simple_env(stdout_stringio, op_pluck_correct_field, op_get_item, op_consolidated_fields, op_list_items) -> None: list_items_output = op_list_items.return_value all_fields_to_seek = op_consolidated_fields.return_value Dict[EnvVarName, Dict[FieldName, FieldValue]] retval: Dict[EnvVarName, Dict[FieldName, FieldValue]] = { EnvVarName('a'): { FieldName('password'): FieldValue('1'), }} op_get_item.return_value = retval env_var_names = [EnvVarName('a')] args: Arguments = { 'operation': 'json', 'environment': env_var_names, 'title': [], 'command': [] } op_pluck_correct_field.return_value = '1' process_args(args) assert stdout_stringio.getvalue() == '{"a": "1"}\n' op_list_items.assert_called_with(env_var_names) op_consolidated_fields.assert_called_with(env_var_names) op_pluck_correct_field.assert_called_with('a', {'password': '1'}) op_get_item.assert_called_with(list_items_output, env_var_names, all_fields_to_seek) @patch.dict(os.environ, {'ORIGINAL_ENV': 'TRUE'}, clear=True) @patch('op_env._cli.do_lookups', autospec=op_env._cli.do_lookups) @patch('op_env._cli.subprocess', autospec=op_env._cli.subprocess) def test_process_args_runs_simple_command_with_simple_env(subprocess, do_lookups): command = ['env'] args = {'operation': 'run', 'command': command, 'environment': ['a'], 'title': []} do_lookups.return_value = {'a': '1'} process_args(args) do_lookups.assert_called_with(['a'], []) subprocess.check_call.assert_called_with(command, env={'a': '1', 'ORIGINAL_ENV': 'TRUE'}) @patch.dict(os.environ, {'ORIGINAL_ENV': 'TRUE'}, clear=True) @patch('op_env._cli.do_lookups', autospec=op_env._cli.do_lookups) @patch('sys.stdout', new_callable=io.StringIO) def test_process_args_shows_env_with_variables_needing_escape(stdout_stringio, do_lookups): args = {'operation': 'sh', 'environment': ['a', 'c'], 'title': []} do_lookups.return_value = {'a': "'", 'c': 'd'} process_args(args) assert stdout_stringio.getvalue() == 'a=\'\'"\'"\'\'; export a\nc=d; export c\n' @patch.dict(os.environ, {'ORIGINAL_ENV': 'TRUE'}, clear=True) @patch('op_env._cli.do_lookups', autospec=op_env._cli.do_lookups) @patch('sys.stdout', new_callable=io.StringIO) def test_process_args_shows_env_with_multiple_variables(stdout_stringio, do_lookups): def fake_op_smart_lookup(k): return { 'a': 'b', 'c': 'd', }[k] do_lookups.return_value = {'a': 'b', 'c': 'd'} args = {'operation': 'sh', 'environment': ['a', 'c'], 'title': []} process_args(args) assert stdout_stringio.getvalue() == 'a=b; export a\nc=d; export c\n' @patch.dict(os.environ, {'ORIGINAL_ENV': 'TRUE'}, clear=True) @patch('op_env._cli.do_lookups', autospec=op_env._cli.do_lookups) @patch('sys.stdout', new_callable=io.StringIO) def test_process_args_shows_env_with_simple_env(stdout_stringio, do_lookups): do_lookups.return_value = {'a': 'b'} args = {'operation': 'sh', 'environment': ['a'], 'title': []} process_args(args) assert stdout_stringio.getvalue() == 'a=b; export a\n' @pytest.mark.skip(reason="need to mock op binary in test PATH") @patch.dict(os.environ, {'ORIGINAL_ENV': 'TRUE'}, clear=True) @patch('op_env.op.subprocess', autospec=op_env.op.subprocess) def test_process_args_runs_simple_command(subprocess): command = ['env'] args = {'operation': 'run', 'command': command, 'environment': [], 'title': []} process_args(args) subprocess.check_call.assert_called_with(command, env={ 'ORIGINAL_ENV': 'TRUE', }) @patch('op_env.op.subprocess', autospec=op_env.op.subprocess) def test_process_args_rejects_non_run(subprocess): with pytest.raises(ValueError): args = {'operation': 'definitely-not-run'} process_args(args) @patch('op_env.op.subprocess', autospec=op_env.op.subprocess) def test_fields_to_try_breaks_on_double_underscore_and_underscore(subprocess): out = _op_fields_to_try('ABC__FLOOGLE_BAR') assert out == ['abc__floogle_bar', 'floogle_bar', 'bar'] @patch('op_env.op.subprocess', autospec=op_env.op.subprocess) def test_fields_to_try_breaks_on_double_underscore(subprocess): out = _op_fields_to_try('ABC__FLOOGLE') assert out == ['abc__floogle', 'floogle'] @patch('op_env.op.subprocess', autospec=op_env.op.subprocess) def test_fields_to_try_conversion_username(subprocess): out = _op_fields_to_try('ABC_USER') assert out == ['abc_user', 'user', 'username'] @patch('op_env.op.subprocess', autospec=op_env.op.subprocess) def test_fields_to_try_multiple_words_inc_password_password(subprocess): out = _op_fields_to_try('ABC_PASSWORD') assert out == ['abc_password', 'password'] @patch('op_env.op.subprocess', autospec=op_env.op.subprocess) def test_fields_to_try_multiple_words_inc_password_passwd(subprocess): out = _op_fields_to_try('ABC_PASSWD') assert out == ['abc_passwd', 'passwd', 'password'] @patch('op_env.op.subprocess', autospec=op_env.op.subprocess) def test_fields_to_try_multiple_words_inc_password_pass(subprocess): out = _op_fields_to_try('ABC_PASS') assert out == ['abc_pass', 'pass', 'password'] @patch('op_env.op.subprocess', autospec=op_env.op.subprocess) def test_fields_to_try_multiple_words(subprocess): out = _op_fields_to_try('ABC_FLOOGLE') assert out == ['abc_floogle', 'floogle'] @patch('op_env.op.subprocess', autospec=op_env.op.subprocess) def test_fields_to_try_simple(subprocess): out = _op_fields_to_try('ABC') assert out == ['abc'] @patch('op_env.op.subprocess', autospec=op_env.op.subprocess) def test_op_do_env_lookups_multiple_entries(subprocess): list_output_data = [ { "uuid": "dummy", "trashed": "N", "itemVersion": 2, "vaultUuid": "dummy", "overview": { "tags": ["ANY_TEST_VALUE"] } }, { "uuid": "dummy", "trashed": "N", "itemVersion": 2, "vaultUuid": "dummy", "overview": { "tags": ["ANOTHER_TEST_VALUE"] } } ] list_output = json.dumps(list_output_data).encode('utf-8') get_output_data = [ { "any_test_value": "something", "another_test_value": "something else", "value": "" }, { "any_test_value": "", "another_test_value": "another", "value": "" } ] get_output = "\n".join([ json.dumps(get_output_item) for get_output_item in get_output_data ]).encode('utf-8') subprocess.check_output.side_effect = [ list_output, get_output, ] out = _do_env_lookups(['ANY_TEST_VALUE', 'ANOTHER_TEST_VALUE']) subprocess.check_output.\ assert_has_calls([call(['op', 'list', 'items', '--tags', 'ANY_TEST_VALUE,ANOTHER_TEST_VALUE']), call(['op', 'get', 'item', '-', '--fields', 'another_test_value,any_test_value,value'], input=ANY)]) kwargs = subprocess.check_output.call_args[1] get_item_input = kwargs['input'] assert json.loads(get_item_input) == list_output_data assert out == { 'ANY_TEST_VALUE': 'something', 'ANOTHER_TEST_VALUE': 'another' } @patch('op_env.op.subprocess', autospec=op_env.op.subprocess) def test_do_env_lookups_no_tags(subprocess): assert {} == _do_env_lookups([]) subprocess.check_output.assert_not_called() @patch('op_env.op.subprocess', autospec=op_env.op.subprocess) def test_do_env_lookups_no_field_value(subprocess): list_output_data = [ { "overview": { "tags": ["ANY_TEST_VALUE"] }, "trashed": "N", "vaultUuid": "dummy", "itemVersion": 2, "uuid": "dummy", } ] list_output = json.dumps(list_output_data).encode('utf-8') get_output = b'{"password":""}\n' subprocess.check_output.side_effect = [ list_output, get_output, ] with pytest.raises(NoFieldValueOPLookupError, match=('1Passsword entry with tag ANY_TEST_VALUE ' 'has no value for the fields tried: ' 'any_test_value, value. ' 'Please populate one of these fields in 1Password.')): _do_env_lookups(['ANY_TEST_VALUE']) subprocess.check_output.\ assert_has_calls([call(['op', 'list', 'items', '--tags', 'ANY_TEST_VALUE']), call(['op', 'get', 'item', '-', '--fields', 'any_test_value,value'], input=ANY)]) kwargs = subprocess.check_output.call_args[1] get_item_input = kwargs['input'] assert json.loads(get_item_input) == list_output_data @patch('op_env.op.subprocess', autospec=op_env.op.subprocess) def test_do_env_lookups_too_few_entries(subprocess): list_output = b"[]" subprocess.check_output.return_value = list_output with pytest.raises(NoEntriesOPLookupError, match='No 1Password entries with tag ANY_TEST_VALUE found'): _do_env_lookups(['ANY_TEST_VALUE']) subprocess.check_output.\ assert_called_with(['op', 'list', 'items', '--tags', 'ANY_TEST_VALUE']) @patch('op_env.op.subprocess', autospec=op_env.op.subprocess) def test_do_env_lookups_too_many_entries(subprocess): list_output_data = [ { "uuid": "dummy", "trashed": "N", "itemVersion": 2, "vaultUuid": "dummy", "overview": { "tags": ["ANY_TEST_VALUE"] } }, { "uuid": "dummy", "trashed": "N", "itemVersion": 2, "vaultUuid": "dummy", "overview": { "tags": ["ANY_TEST_VALUE"] } } ] list_output = json.dumps(list_output_data).encode('utf-8') subprocess.check_output.return_value = list_output with pytest.raises(TooManyEntriesOPLookupError, match='Too many 1Password entries with tag ANY_TEST_VALUE'): _do_env_lookups(['ANY_TEST_VALUE']) subprocess.check_output.\ assert_called_with(['op', 'list', 'items', '--tags', 'ANY_TEST_VALUE']) @patch('op_env.op.subprocess', autospec=op_env.op.subprocess) def test_op_do_env_lookups_comma_in_env(subprocess): list_output = b'[{"overview": {"tags": ["ANY_TEST_VALUE"]}}]' get_output = b'{"any_test_value":"","value":""}\n' subprocess.check_output.side_effect = [ list_output, get_output, ] with pytest.raises(InvalidTagOPLookupError, match='1Password does not support tags with commas'): _do_env_lookups(['ENV_WITH_,_IN_IT']) subprocess.check_output.assert_not_called() @patch('op_env.op.subprocess', autospec=op_env.op.subprocess) def test_op_do_env_lookups_one_var(subprocess): list_output_data = [ { "uuid": "dummy", "trashed": "N", "itemVersion": 2, "vaultUuid": "dummy", "overview": { "tags": ["ANY_TEST_VALUE"] } } ] list_output = json.dumps(list_output_data).encode('utf-8') get_output = b'{"any_test_value":"v1","value":""}\n' subprocess.check_output.side_effect = [ list_output, get_output, ] out = _do_env_lookups(['ANY_TEST_VALUE']) subprocess.check_output.\ assert_has_calls([call(['op', 'list', 'items', '--tags', 'ANY_TEST_VALUE']), call(['op', 'get', 'item', '-', '--fields', 'any_test_value,value'], input=ANY)]) kwargs = subprocess.check_output.call_args[1] get_item_input = kwargs['input'] assert json.loads(get_item_input) == list_output_data assert out == {'ANY_TEST_VALUE': 'v1'} @patch('op_env.op._op_fields_to_try', autospec=_op_fields_to_try) def test_op_pluck_correct_field_multiple_fields(op_fields_to_try): op_fields_to_try.return_value = ['floogle', 'blah'] ret = _op_pluck_correct_field('ENVVARNAME', {'blah': '', 'floogle': 'result value'}) op_fields_to_try.assert_called_with('ENVVARNAME') assert ret == 'result value' @patch('op_env.op._op_fields_to_try', autospec=_op_fields_to_try) def test_op_pluck_correct_field_multiple_fields_all_errors(op_fields_to_try): op_fields_to_try.return_value = ['floogle', 'blah'] with pytest.raises(NoFieldValueOPLookupError, match=('1Passsword entry with tag ' 'ENVVARNAME has no value for ' 'the fields tried: ' "floogle, blah. Please populate " 'one of these fields in 1Password.')): _op_pluck_correct_field('ENVVARNAME', {'floogle': '', 'blah': ''}) op_fields_to_try.assert_called_with('ENVVARNAME') @patch('op_env.op._op_fields_to_try', autospec=_op_fields_to_try) def test_op_pluck_correct_field_single_field_with_error(op_fields_to_try): op_fields_to_try.return_value = ['floogle'] with pytest.raises(NoFieldValueOPLookupError): _op_pluck_correct_field('ENVVARNAME', {'floogle': ''}) op_fields_to_try.assert_called_with('ENVVARNAME') @patch('op_env.op._op_fields_to_try', autospec=_op_fields_to_try) def test_op_pluck_correct_field_multiple_fields_chooses_second(op_fields_to_try): op_fields_to_try.return_value = ['floogle', 'blah'] ret = _op_pluck_correct_field('ENVVARNAME', {'floogle': '', 'blah': 'result value'}) op_fields_to_try.assert_called_with('ENVVARNAME') assert ret == 'result value' @patch('op_env.op._op_fields_to_try', autospec=_op_fields_to_try) def test_op_pluck_correct_field_chooses_first(op_fields_to_try): op_fields_to_try.return_value = ['floogle'] ret = _op_pluck_correct_field('ENVVARNAME', {'floogle': 'myvalue'}) op_fields_to_try.assert_called_with('ENVVARNAME') assert ret == 'myvalue' def test_parse_args_json_operation_no_env_variables(): argv = ['op-env', 'json'] args = parse_argv(argv) assert args == {'environment': [], 'title': [], 'operation': 'json'} def test_parse_args_run_operation_with_long_name_specified(): argv = ['op-env', 'run', '--title', 'foo:bar', 'mycmd'] args = parse_argv(argv) assert args == {'command': ['mycmd'], 'environment': [], 'title': ['foo:bar'], 'operation': 'run'} def test_parse_args_run_operation_with_multiple_name_specified(): argv = ['op-env', 'run', '-t', 'foo: bar', '-t' 'bing: baz', 'mycmd'] args = parse_argv(argv) assert args == {'command': ['mycmd'], 'title': ['foo: bar', 'bing: baz'], 'environment': [], 'operation': 'run'} def test_parse_args_run_operation_with_name_specified(): argv = ['op-env', 'run', '-t', 'foo: bar', 'mycmd'] args = parse_argv(argv) assert args == {'command': ['mycmd'], 'title': ['foo: bar'], 'environment': [], 'operation': 'run'} def test_parse_args_run_operation_with_long_env_variables(): argv = ['op-env', 'run', '-e', 'DUMMY', '--environment', 'DUMMY2', 'mycmd'] args = parse_argv(argv) assert args == {'command': ['mycmd'], 'environment': ['DUMMY', 'DUMMY2'], 'title': [], 'operation': 'run'} def test_parse_args_run_operation_no_env_variables(): argv = ['op-env', 'run', 'mycmd'] args = parse_argv(argv) assert args == {'command': ['mycmd'], 'environment': [], 'title': [], 'operation': 'run'} def test_parse_args_run_operation_with_multiple_environment_arguments(): argv = ['op-env', 'run', '-e', 'DUMMY', '-e', 'DUMMY2', 'mycmd'] args = parse_argv(argv) assert args == {'command': ['mycmd'], 'environment': ['DUMMY', 'DUMMY2'], 'title': [], 'operation': 'run'} def test_parse_args_run_operation_with_environment_arguments(): argv = ['op-env', 'run', '-e', 'DUMMY', 'mycmd', '1', '2', '3'] args = parse_argv(argv) assert args == {'command': ['mycmd', '1', '2', '3'], 'environment': ['DUMMY'], 'title': [], 'operation': 'run'} def test_parse_args_run_operation_with_multiple_yaml_and_environment_arguments(one_item_yaml_file, two_item_yaml_file): argv = ['op-env', 'run', '-e', 'VAR_1', '-e', 'VAR0', '-y', two_item_yaml_file, '-y', one_item_yaml_file, 'mycmd', '1', '2', '3'] args = parse_argv(argv) assert args == {'command': ['mycmd', '1', '2', '3'], 'environment': ['VAR_1', 'VAR0', 'VAR1', 'VAR2', 'VARA'], 'title': [], 'operation': 'run'} def test_parse_args_run_operation_with_yaml_arguments_and_environment_arguments(two_item_yaml_file): argv = ['op-env', 'run', '-e', 'VAR0', '-y', two_item_yaml_file, 'mycmd', '1', '2', '3'] args = parse_argv(argv) assert args == {'command': ['mycmd', '1', '2', '3'], 'environment': ['VAR0', 'VAR1', 'VAR2'], 'title': [], 'operation': 'run'} def test_parse_args_run_operation_with_yaml_arguments_and_text_environment_arguments( two_item_yaml_file, two_item_text_file ): argv = ['op-env', 'run', '-e', 'VAR0', '-y', two_item_yaml_file, '-f', two_item_text_file, 'mycmd', '1', '2', '3'] args = parse_argv(argv) assert args == {'command': ['mycmd', '1', '2', '3'], 'environment': ['VAR0', 'VAR1', 'VAR2', 'TVAR1', 'TVAR2'], 'title': [], 'operation': 'run'} def test_parse_args_run_operation_with_text_arguments_and_environment_arguments(two_item_text_file): argv = ['op-env', 'run', '-e', 'VAR0', '-f', two_item_text_file, 'mycmd', '1', '2', '3'] args = parse_argv(argv) assert args == {'command': ['mycmd', '1', '2', '3'], 'environment': ['VAR0', 'TVAR1', 'TVAR2'], 'title': [], 'operation': 'run'} def test_list_of_numbers_yaml_argument(list_of_number_yaml_file): argv = ['op-env', 'run', '-y', list_of_number_yaml_file, 'mycmd', '1', '2', '3'] with pytest.raises(argparse.ArgumentTypeError, match='YAML file must contain a list of strings'): parse_argv(argv) def test_parse_args_run_operation_with_number_file_yaml_argument(number_yaml_file): argv = ['op-env', 'run', '-y', number_yaml_file, 'mycmd', '1', '2', '3'] with pytest.raises(argparse.ArgumentTypeError, match='YAML file must be a list; found'): parse_argv(argv) def test_parse_args_run_operation_with_string_file_yaml_argument(string_yaml_file): argv = ['op-env', 'run', '-y', string_yaml_file, 'mycmd', '1', '2', '3'] with pytest.raises(argparse.ArgumentTypeError, match='YAML file must be a list; found'): parse_argv(argv) def test_parse_args_run_operation_with_object_file_yaml_argument(object_yaml_file): argv = ['op-env', 'run', '-y', object_yaml_file, 'mycmd', '1', '2', '3'] with pytest.raises(argparse.ArgumentTypeError, match='YAML file must be a list; found'): parse_argv(argv) def test_parse_args_run_operation_with_invalid_file_yaml_argument(invalid_yaml_file): argv = ['op-env', 'run', '-y', invalid_yaml_file, 'mycmd', '1', '2', '3'] with pytest.raises(yaml.scanner.ScannerError): parse_argv(argv) def test_parse_args_run_operation_with_empty_file_yaml_argument(empty_file): argv = ['op-env', 'run', '-y', empty_file, 'mycmd', '1', '2', '3'] args = parse_argv(argv) assert args == {'command': ['mycmd', '1', '2', '3'], 'environment': [], 'title': [], 'operation': 'run'} def test_parse_args_run_operation_with_empty_file_text_argument(empty_file): argv = ['op-env', 'run', '-f', empty_file, 'mycmd', '1', '2', '3'] args = parse_argv(argv) assert args == {'command': ['mycmd', '1', '2', '3'], 'environment': [], 'title': [], 'operation': 'run'} def test_parse_args_run_operation_with_text_argument(two_item_text_file): argv = ['op-env', 'run', '-f', two_item_text_file, 'mycmd', '1', '2', '3'] args = parse_argv(argv) assert args == {'command': ['mycmd', '1', '2', '3'], 'environment': ['TVAR1', 'TVAR2'], 'title': [], 'operation': 'run'} def test_parse_args_run_operation_with_yaml_argument(two_item_yaml_file): argv = ['op-env', 'run', '-y', two_item_yaml_file, 'mycmd', '1', '2', '3'] args = parse_argv(argv) assert args == {'command': ['mycmd', '1', '2', '3'], 'environment': ['VAR1', 'VAR2'], 'title': [], 'operation': 'run'} def test_parse_args_run_simple(): argv = ['op-env', 'run', '-e', 'DUMMY', 'mycmd'] args = parse_argv(argv) assert args == {'command': ['mycmd'], 'environment': ['DUMMY'], 'operation': 'run', 'title': []} def test_parse_args_sh_simple(): argv = ['op-env', 'sh', '-e', 'DUMMY'] args = parse_argv(argv) assert args == {'environment': ['DUMMY'], 'operation': 'sh', 'title': []} @pytest.mark.skip(reason="need to mock op binary in test PATH") def test_cli_run(): argv = ['op-env', 'run', '-e', 'DUMMY', 'env'] expected_envvar = 'DUMMY=dummyvalue' actual_output = subprocess.check_output(argv).decode('utf-8') assert expected_envvar in actual_output def test_cli_help_run(): request_long_lines = {'COLUMNS': '999', 'LINES': '25'} env = {} env.update(os.environ) env.update(request_long_lines) expected_help = """usage: op-env run [-h] [--title TITLE] [--environment ENVVAR] [--yaml-environment YAMLENV] \ [--file-environment FILEENV] command [command ...] Run the specified command with the given environment variables positional arguments: command Command to run with the environment set from 1Password options: -h, --help show this help message and exit --title TITLE, -t TITLE title of 1Password item from which all tagged environment variable names \ will be set --environment ENVVAR, -e ENVVAR environment variable name to set, based on item with same tag in 1Password --yaml-environment YAMLENV, -y YAMLENV YAML config specifying a list of environment variable names to set --file-environment FILEENV, -f FILEENV Text config specifying environment variable names to set, one on each line """ if sys.version_info <= (3, 10): # 3.10 changed the wording a bit expected_help = expected_help.replace('options:', 'optional arguments:') # older python versions show arguments like this: actual_help = subprocess.check_output(['op-env', 'run', '--help'], env=env).decode('utf-8') assert actual_help == expected_help def test_cli_help_json(): request_long_lines = {'COLUMNS': '999', 'LINES': '25'} env = {} env.update(os.environ) env.update(request_long_lines) expected_help = """usage: op-env json [-h] [--title TITLE] [--environment ENVVAR] [--yaml-environment YAMLENV] \ [--file-environment FILEENV] Produce simple JSON on stdout mapping requested env variables to values options: -h, --help show this help message and exit --title TITLE, -t TITLE title of 1Password item from which all tagged environment variable names \ will be set --environment ENVVAR, -e ENVVAR environment variable name to set, based on item with same tag in 1Password --yaml-environment YAMLENV, -y YAMLENV YAML config specifying a list of environment variable names to set --file-environment FILEENV, -f FILEENV Text config specifying environment variable names to set, one on each line """ if sys.version_info <= (3, 10): # 3.10 changed the wording a bit expected_help = expected_help.replace('options:', 'optional arguments:') actual_help = subprocess.check_output(['op-env', 'json', '--help'], env=env).decode('utf-8') assert actual_help == expected_help def test_cli_help_sh(): request_long_lines = {'COLUMNS': '999', 'LINES': '25'} env = {} env.update(os.environ) env.update(request_long_lines) expected_help = """usage: op-env sh [-h] [--title TITLE] [--environment ENVVAR] [--yaml-environment YAMLENV] \ [--file-environment FILEENV] Produce commands on stdout that can be 'eval'ed to set variables in current shell options: -h, --help show this help message and exit --title TITLE, -t TITLE title of 1Password item from which all tagged environment variable names \ will be set --environment ENVVAR, -e ENVVAR environment variable name to set, based on item with same tag in 1Password --yaml-environment YAMLENV, -y YAMLENV YAML config specifying a list of environment variable names to set --file-environment FILEENV, -f FILEENV Text config specifying environment variable names to set, one on each line """ if sys.version_info <= (3, 10): # 3.10 changed the wording a bit expected_help = expected_help.replace('options:', 'optional arguments:') # older python versions show arguments like this: actual_help = subprocess.check_output(['op-env', 'sh', '--help'], env=env).decode('utf-8') assert actual_help == expected_help def test_cli_no_args(): expected_help = """usage: op-env [-h] {run,json,sh} ... op-env: error: the following arguments are required: operation """ request_long_lines = {'COLUMNS': '999', 'LINES': '25'} env = {} env.update(os.environ) env.update(request_long_lines) # older python versions show arguments like this: completed_process = subprocess.run(['op-env'], env=env, stderr=subprocess.PIPE) actual_help = completed_process.stderr.decode('utf-8') assert actual_help == expected_help assert completed_process.returncode == 2 def test_cli_help(): request_long_lines = {'COLUMNS': '999', 'LINES': '25'} env = {} env.update(os.environ) env.update(request_long_lines) expected_help = """usage: op-env [-h] {run,json,sh} ... positional arguments: {run,json,sh} run Run the specified command with the given environment variables json Produce simple JSON on stdout mapping requested env variables to values sh Produce commands on stdout that can be 'eval'ed to set variables in current shell options: -h, --help show this help message and exit """ if sys.version_info <= (3, 10): # 3.10 changed the wording a bit expected_help = expected_help.replace('options:', 'optional arguments:') # older python versions show arguments like this: actual_help = subprocess.check_output(['op-env', '--help'], env=env).decode('utf-8') assert actual_help == expected_help @patch('op_env._cli.parse_argv', autospec=parse_argv) @patch('op_env._cli.process_args', autospec=process_args) def test_main(process_args, parse_argv): argv = object() args = parse_argv.return_value assert process_args.return_value == main(argv) process_args.assert_called_with(args)
37.207292
116
0.60906
4,338
35,719
4.685108
0.073306
0.030506
0.025487
0.04015
0.81298
0.795808
0.771797
0.711917
0.672407
0.644361
0
0.008277
0.252443
35,719
959
117
37.24609
0.752865
0.010191
0
0.512821
0
0.030769
0.250672
0.021109
0
0
0
0
0.103846
1
0.094872
false
0.034615
0.017949
0.001282
0.114103
0
0
0
0
null
0
0
0
1
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
2524be1909041d3ceab5fccae4740147d447c279
897
py
Python
yqml/query.py
maztohir/YQML
6e5eb06f2869a7309614120cc56ac04f5a689c9f
[ "MIT" ]
2
2021-03-11T16:44:02.000Z
2021-04-17T17:57:38.000Z
yqml/query.py
maztohir/yqml
6e5eb06f2869a7309614120cc56ac04f5a689c9f
[ "MIT" ]
null
null
null
yqml/query.py
maztohir/yqml
6e5eb06f2869a7309614120cc56ac04f5a689c9f
[ "MIT" ]
null
null
null
from .keys import Keys from .select import Select # from .cte import Cte class Query: def is_key_exist(self, key): return self._content.get(key) != None def is_key_not_exist(self, key): return self._content.get(key) == None def __init__(self, dict): self._content = dict def is_cte(self): return self.is_key_exist(Keys.CTE) def is_simple_selection(self): return self.is_key_not_exist(Keys.CTE) and self.is_key_exist(Keys.SELECT) def get_raw_sql(self): if self.is_simple_selection(): return Select(self._content).to_sql() # if self.is_cte(): # return Cte(self._content) #TODO: def is_scripting(self): pass def is_merge(self): pass def is_insert(self): pass def is_update(self): pass def is_delete(self): pass
20.386364
81
0.60981
127
897
4.031496
0.259843
0.087891
0.085938
0.101563
0.291016
0.164063
0.164063
0.164063
0.164063
0.164063
0
0
0.292085
897
44
82
20.386364
0.806299
0.081382
0
0.192308
0
0
0
0
0
0
0
0.022727
0
1
0.423077
false
0.192308
0.076923
0.153846
0.730769
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
1
0
1
0
1
1
0
0
5
255ea5e43e5739e9c05b188a138f55d580b44bf6
159
py
Python
pubclouds/boxdotnet/__init__.py
mk-fg/tahoe-lafs-public-clouds
84e61e1742db08f1868e09a6ec77b762d41f85c2
[ "WTFPL" ]
21
2015-01-23T04:39:54.000Z
2020-04-07T17:39:55.000Z
pubclouds/boxdotnet/__init__.py
mk-fg/tahoe-lafs-public-clouds
84e61e1742db08f1868e09a6ec77b762d41f85c2
[ "WTFPL" ]
null
null
null
pubclouds/boxdotnet/__init__.py
mk-fg/tahoe-lafs-public-clouds
84e61e1742db08f1868e09a6ec77b762d41f85c2
[ "WTFPL" ]
2
2020-06-29T15:56:51.000Z
2021-08-21T07:28:37.000Z
from allmydata.storage.backends.cloud.boxdotnet.boxdotnet_container import configure_boxdotnet_container configure_container = configure_boxdotnet_container
31.8
104
0.90566
17
159
8.117647
0.529412
0.391304
0.391304
0
0
0
0
0
0
0
0
0
0.056604
159
4
105
39.75
0.92
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
5
25701013945952de391bb3dfde43c5dc5c3eb57d
16
py
Python
yusheng_project/hongpeng.py
YoungRainy/HY_coorprate
89661ad737863fd2b65cb8731f5c613e0ed13b99
[ "MIT" ]
null
null
null
yusheng_project/hongpeng.py
YoungRainy/HY_coorprate
89661ad737863fd2b65cb8731f5c613e0ed13b99
[ "MIT" ]
null
null
null
yusheng_project/hongpeng.py
YoungRainy/HY_coorprate
89661ad737863fd2b65cb8731f5c613e0ed13b99
[ "MIT" ]
null
null
null
print('change')
8
15
0.6875
2
16
5.5
1
0
0
0
0
0
0
0
0
0
0
0
0.0625
16
1
16
16
0.733333
0
0
0
0
0
0.375
0
0
0
0
0
0
1
0
true
0
0
0
0
1
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
5
c280616465d95168642cbfd7c62f5977ca85e5ca
139
py
Python
Chapter 01/args1_3_4.py
bpbpublications/Advance-Core-Python-Programming
8902ceb270f55c04c12e818032f90d641c14d7b1
[ "MIT" ]
null
null
null
Chapter 01/args1_3_4.py
bpbpublications/Advance-Core-Python-Programming
8902ceb270f55c04c12e818032f90d641c14d7b1
[ "MIT" ]
null
null
null
Chapter 01/args1_3_4.py
bpbpublications/Advance-Core-Python-Programming
8902ceb270f55c04c12e818032f90d641c14d7b1
[ "MIT" ]
null
null
null
def sum_func(a, *args): s = a+sum(args) print(s) sum_func(10) sum_func(10,20) sum_func(10,20,30) sum_func(10, 20, 30, 40)
15.444444
25
0.597122
29
139
2.689655
0.37931
0.448718
0.461538
0.423077
0.333333
0
0
0
0
0
0
0.185185
0.223022
139
8
26
17.375
0.537037
0
0
0
0
0
0
0
0
0
0
0
0
1
0.142857
false
0
0
0
0.142857
0.142857
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
c2a7b17ddb340f97a6871c4b2be062094d2f54ea
87
py
Python
sms_verification_android/__init__.py
atbaker/sms-verification-android-python
a88212611b6c2956a6869b7b1152813e2c5c0eca
[ "MIT" ]
2
2019-02-04T05:01:37.000Z
2019-12-04T11:36:44.000Z
sms_verification_android/__init__.py
atbaker/sms-verification-android-python
a88212611b6c2956a6869b7b1152813e2c5c0eca
[ "MIT" ]
67
2019-08-22T07:37:48.000Z
2021-12-31T18:30:10.000Z
sms_verification_android/__init__.py
atbaker/sms-verification-android-python
a88212611b6c2956a6869b7b1152813e2c5c0eca
[ "MIT" ]
null
null
null
from .sms_verification_android import app, setup_app from .sms_verify import SMSVerify
29
52
0.862069
13
87
5.461538
0.692308
0.197183
0
0
0
0
0
0
0
0
0
0
0.103448
87
2
53
43.5
0.910256
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
c2c932aff6edc3d0ed6f69b6736cca7e2f4cd32c
46
py
Python
app/load_report_service/load_report_exception.py
harvard-lts/drs-translation-service
8b448422c460c735860d3a64cf2ddc01f6e16eb3
[ "Apache-2.0" ]
null
null
null
app/load_report_service/load_report_exception.py
harvard-lts/drs-translation-service
8b448422c460c735860d3a64cf2ddc01f6e16eb3
[ "Apache-2.0" ]
3
2022-03-21T23:40:33.000Z
2022-03-28T01:35:01.000Z
app/load_report_service/load_report_exception.py
harvard-lts/drs-translation-service
8b448422c460c735860d3a64cf2ddc01f6e16eb3
[ "Apache-2.0" ]
null
null
null
class LoadReportException(Exception): pass
23
37
0.804348
4
46
9.25
1
0
0
0
0
0
0
0
0
0
0
0
0.130435
46
2
38
23
0.925
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.5
0
0
0.5
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
0
0
0
5
6c4ec598bc27b4ba8689ce406bd6e1dadb1cb380
111
py
Python
myproject/__init__.py
charleyjoo/myproject
e0b04156e0148cf8c86768c73f66fa6b5795c0d4
[ "BSD-2-Clause" ]
null
null
null
myproject/__init__.py
charleyjoo/myproject
e0b04156e0148cf8c86768c73f66fa6b5795c0d4
[ "BSD-2-Clause" ]
null
null
null
myproject/__init__.py
charleyjoo/myproject
e0b04156e0148cf8c86768c73f66fa6b5795c0d4
[ "BSD-2-Clause" ]
null
null
null
print("Hello from __init__.py") from . import pronto_utils from . import basic_utils from .constant import pi
18.5
31
0.783784
17
111
4.764706
0.647059
0.246914
0
0
0
0
0
0
0
0
0
0
0.144144
111
5
32
22.2
0.852632
0
0
0
0
0
0.198198
0
0
0
0
0
0
1
0
true
0
0.75
0
0.75
0.25
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
6682a3366c2e60c1b79a821bbc17b957edb533cb
3,102
py
Python
tests/unit/crs/test_cr_wait_container_build.py
guvenbz/amazon-s3-find-and-forget
398f7d86d38068c8a9d77ddc9183758946c9dbe4
[ "Apache-2.0" ]
165
2020-05-29T08:12:17.000Z
2022-03-30T22:35:57.000Z
tests/unit/crs/test_cr_wait_container_build.py
guvenbz/amazon-s3-find-and-forget
398f7d86d38068c8a9d77ddc9183758946c9dbe4
[ "Apache-2.0" ]
101
2020-06-24T12:59:49.000Z
2022-03-28T13:32:15.000Z
tests/unit/crs/test_cr_wait_container_build.py
guvenbz/amazon-s3-find-and-forget
398f7d86d38068c8a9d77ddc9183758946c9dbe4
[ "Apache-2.0" ]
23
2020-06-18T10:53:49.000Z
2022-03-29T03:38:04.000Z
from datetime import datetime, timezone import boto3 import pytest from mock import patch, MagicMock, Mock from backend.lambdas.custom_resources.wait_container_build import create, poll, handler pytestmark = [pytest.mark.unit, pytest.mark.task] @patch("backend.lambdas.custom_resources.wait_container_build.s3_client") @patch("backend.lambdas.custom_resources.wait_container_build.ecr_client") def test_it_signal_readiness_when_image_ready(mock_ecr_client, mock_s3_client): event = { "ResourceProperties": { "ArtefactName": "build/s3f2.zip", "CodeBuildArtefactBucket": "codebuild-bucket", "ECRRepository": "ecr-repo", } } mock_ecr_client.describe_images.return_value = { "imageDetails": [ {"imagePushedAt": datetime(2020, 1, 6, 16, 12, 57, tzinfo=timezone.utc)} ] } mock_object = MagicMock() mock_s3_client.Object.return_value = mock_object mock_object.last_modified = datetime(2020, 1, 6, 16, 8, 51, tzinfo=timezone.utc) resp = poll(event, MagicMock()) assert resp @patch("backend.lambdas.custom_resources.wait_container_build.s3_client") @patch("backend.lambdas.custom_resources.wait_container_build.ecr_client") def test_it_keeps_polling_when_image_not_ready(mock_ecr_client, mock_s3_client): event = { "ResourceProperties": { "ArtefactName": "build/s3f2.zip", "CodeBuildArtefactBucket": "codebuild-bucket", "ECRRepository": "ecr-repo", } } mock_ecr_client.describe_images.return_value = { "imageDetails": [ {"imagePushedAt": datetime(2020, 1, 6, 14, 0, 13, tzinfo=timezone.utc)} ] } mock_object = MagicMock() mock_s3_client.Object.return_value = mock_object mock_object.last_modified = datetime(2020, 1, 6, 16, 8, 51, tzinfo=timezone.utc) resp = poll(event, MagicMock()) assert not resp @patch("backend.lambdas.custom_resources.wait_container_build.s3_client") @patch("backend.lambdas.custom_resources.wait_container_build.ecr_client") def test_it_keeps_polling_when_no_latest_image_found(mock_ecr_client, mock_s3_client): event = { "ResourceProperties": { "ArtefactName": "build/s3f2.zip", "CodeBuildArtefactBucket": "codebuild-bucket", "ECRRepository": "ecr-repo", } } e = boto3.client("ecr").exceptions.ImageNotFoundException mock_ecr_client.exceptions.ImageNotFoundException = e mock_ecr_client.describe_images.side_effect = Mock( side_effect=e({}, "ImageNotFoundException") ) mock_object = MagicMock() mock_s3_client.Object.return_value = mock_object mock_object.last_modified = datetime(2020, 1, 6, 16, 8, 51, tzinfo=timezone.utc) resp = poll(event, MagicMock()) assert not resp @patch("backend.lambdas.custom_resources.wait_container_build.helper") def test_it_delegates_to_cr_helper(cr_helper): handler(1, 2) cr_helper.assert_called_with(1, 2) def test_it_does_nothing_on_create(): assert create({}, MagicMock()) is None
33
87
0.704707
372
3,102
5.572581
0.244624
0.043415
0.077183
0.111915
0.76604
0.75205
0.75205
0.729378
0.729378
0.729378
0
0.030399
0.18343
3,102
93
88
33.354839
0.787998
0
0
0.541667
0
0
0.266925
0.171502
0
0
0
0
0.069444
1
0.069444
false
0
0.069444
0
0.138889
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
66956591f8800d8835ff18b4715a29cc8392c9ca
52
py
Python
tracker/models/recipient/__init__.py
jneuendorf/price_tracker
9cb6878613e7af52f049ddd80a7a5ae2ae21028b
[ "MIT" ]
null
null
null
tracker/models/recipient/__init__.py
jneuendorf/price_tracker
9cb6878613e7af52f049ddd80a7a5ae2ae21028b
[ "MIT" ]
null
null
null
tracker/models/recipient/__init__.py
jneuendorf/price_tracker
9cb6878613e7af52f049ddd80a7a5ae2ae21028b
[ "MIT" ]
null
null
null
from .call_me_bot import CallMeBotRecipient # NOQA
26
51
0.826923
7
52
5.857143
1
0
0
0
0
0
0
0
0
0
0
0
0.134615
52
1
52
52
0.911111
0.076923
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
669f35783bb32849e8cd2d980f4a041da007c9a4
7,123
py
Python
py_libgit/tests/unit/test_index.py
tony-yang/e-libgit
589e08717ef58317b044852185b4f0748b600d96
[ "Apache-2.0" ]
null
null
null
py_libgit/tests/unit/test_index.py
tony-yang/e-libgit
589e08717ef58317b044852185b4f0748b600d96
[ "Apache-2.0" ]
null
null
null
py_libgit/tests/unit/test_index.py
tony-yang/e-libgit
589e08717ef58317b044852185b4f0748b600d96
[ "Apache-2.0" ]
null
null
null
import unittest from unittest.mock import MagicMock import os, shutil from py_libgit.core.index import Index from py_libgit.core.index_entry import IndexEntry from py_libgit.core.repo import Repo import py_libgit.settings_tests class TestIndex(unittest.TestCase): def setUp(self): self.repo_name = 'repotest' self.repo_dir = os.path.join('/tmp', self.repo_name) self.git_repo_root = os.path.join(self.repo_dir, '.git') os.makedirs(self.git_repo_root) os.chdir(self.repo_dir) repo = MagicMock() repo.get_repo_root = MagicMock(return_value=self.git_repo_root) self.index = Index(repo) self.index_file = os.path.join(self.git_repo_root, 'index') def tearDown(self): shutil.rmtree(self.repo_dir, ignore_errors=True) def test_index_updated_correctly_when_no_entry(self): pathname = 'helloworld' current_sha1 = '0' * 40 new_sha1 = '0123456789' * 4 unix_mode = '10000644' staging_content = [IndexEntry(pathname, current_sha1=current_sha1, new_sha1=new_sha1)] self.index.update_index(staging_content) self.assertTrue(os.path.exists(self.index_file)) with open(self.index_file, 'r') as f: index_content = f.read() expected_index_content = '{} {} {} {}\n'.format(pathname, current_sha1, new_sha1, unix_mode) self.assertEqual(index_content, expected_index_content, 'The index content did not record the proper pathname') def test_duplicate_index_update_should_change_nothing(self): pathname = 'helloworld' current_sha1 = '0' * 40 new_sha1 = '0123456789' * 4 unix_mode = '10000644' staging_content = [IndexEntry(pathname, new_sha1=new_sha1)] self.index.update_index(staging_content) self.index.update_index(staging_content) with open(self.index_file, 'r') as f: index_content = f.read() expected_index_content = '{} {} {} {}\n'.format(pathname, current_sha1, new_sha1, unix_mode) self.assertTrue(os.path.exists(self.index_file)) self.assertEqual(index_content, expected_index_content, 'The index content should only contain one entry of the pathname') def test_adding_different_index_entry(self): pathname = 'helloworld' current_sha1 = '0' * 40 new_sha1 = '0123456789' * 4 unix_mode = '10000644' pathname_2 = 'helloworld2' current_sha1_2 = '0' * 40 new_sha1_2 = '9876543210' * 4 unix_mode_2 = '10000644' staging_content = [IndexEntry(pathname, new_sha1=new_sha1), IndexEntry(pathname_2, new_sha1=new_sha1_2)] self.index.update_index(staging_content) with open(self.index_file, 'r') as f: index_content = f.read() expected_index_content = '{} {} {} {}\n{} {} {} {}\n'.format(pathname, current_sha1, new_sha1, unix_mode, pathname_2, current_sha1_2, new_sha1_2, unix_mode_2) self.assertEqual(index_content, expected_index_content, 'The index content did not record both pathnames') def test_updaing_index_entry_new_sha1_should_work(self): pathname = 'helloworld' current_sha1 = '0' * 40 new_sha1 = '0123456789' * 4 unix_mode = '10000644' new_sha1_2 = '9876543210' * 4 staging_content = [IndexEntry(pathname, current_sha1=current_sha1, new_sha1=new_sha1)] self.index.update_index(staging_content) staging_content = [IndexEntry(pathname, current_sha1=current_sha1, new_sha1=new_sha1_2)] self.index.update_index(staging_content) with open(self.index_file, 'r') as f: index_content = f.read() expected_index_content = '{} {} {} {}\n'.format(pathname, current_sha1, new_sha1_2, unix_mode) self.assertEqual(index_content, expected_index_content, 'The index content did not update the new_sha1 attributes properly') def test_updaing_index_entry_current_sha_should_work(self): pathname = 'helloworld' current_sha1 = '0' * 40 new_sha1 = '0123456789' * 4 unix_mode = '10000644' current_sha1_new = '0123456789' * 4 staging_content = [IndexEntry(pathname, current_sha1=current_sha1, new_sha1=new_sha1)] self.index.update_index(staging_content) staging_content = [IndexEntry(pathname, current_sha1=current_sha1_new, new_sha1=new_sha1)] self.index.update_index(staging_content) with open(self.index_file, 'r') as f: index_content = f.read() expected_index_content = '{} {} {} {}\n'.format(pathname, current_sha1_new, new_sha1, unix_mode) self.assertEqual(index_content, expected_index_content, 'The index content did not update the current_sha1 attributes properly') def test_normalize_pathname_removes_single_dot_path(self): pathname = os.path.join(self.repo_dir, 'abc/./test') normalized_pathname = self.index.normalize_pathname(pathname) expected_pathname = os.path.join(self.repo_name, 'abc/test') self.assertEqual(normalized_pathname, expected_pathname, 'The single dot in the normalized path is not properly handled') def test_normalize_pathname_removes_double_dots_with_same_path(self): pathname = os.path.join(self.repo_dir, 'abc/test/../test') normalized_pathname = self.index.normalize_pathname(pathname) expected_pathname = os.path.join(self.repo_name, 'abc/test') self.assertEqual(normalized_pathname, expected_pathname, 'The double dot in the normalized path is not properly handled') def test_normalize_pathname_removes_double_dots_with_different_path(self): pathname = os.path.join(self.repo_dir, 'abc/test1/../test2') normalized_pathname = self.index.normalize_pathname(pathname) expected_pathname = os.path.join(self.repo_name, 'abc/test2') self.assertEqual(normalized_pathname, expected_pathname, 'The double dot in the normalized path is not properly handled') def test_normalize_pathname_removes_multiple_double_dots(self): pathname = os.path.join(self.repo_dir, 'abc/test1/test2/../../test3') normalized_pathname = self.index.normalize_pathname(pathname) expected_pathname = os.path.join(self.repo_name, 'abc/test3') self.assertEqual(normalized_pathname, expected_pathname, 'The double dot in the normalized path is not properly handled') def test_normalize_pathname_removes_multiple_double_dots_separately(self): pathname = os.path.join(self.repo_dir, 'abc/test1/../test2/../test3') normalized_pathname = self.index.normalize_pathname(pathname) expected_pathname = os.path.join(self.repo_name, 'abc/test3') self.assertEqual(normalized_pathname, expected_pathname, 'The double dot in the normalized path is not properly handled') def test_build_tracked_index_should_return_empty_dictionary_if_index_not_exists(self): tracked_index = self.index.build_tracked_index() self.assertEqual(tracked_index, {}, 'Returned the incorrect tracked_index when index file not exists')
48.455782
166
0.706865
931
7,123
5.104189
0.131042
0.045665
0.037037
0.035354
0.791667
0.738847
0.734428
0.734428
0.719697
0.699705
0
0.040717
0.193177
7,123
146
167
48.787671
0.786149
0
0
0.517241
0
0
0.154991
0.007581
0
0
0
0
0.112069
1
0.112069
false
0
0.060345
0
0.181034
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
66cde30b39505ab4b029d63a010f61217ad4b68c
61
py
Python
src/python/zquantum/core/wip/circuit/conversions2/pyquil_conversions.py
kottmanj/z-quantum-core
21752e92e79aafedbfeb6e7ae196bdc2fd5803e4
[ "Apache-2.0" ]
null
null
null
src/python/zquantum/core/wip/circuit/conversions2/pyquil_conversions.py
kottmanj/z-quantum-core
21752e92e79aafedbfeb6e7ae196bdc2fd5803e4
[ "Apache-2.0" ]
null
null
null
src/python/zquantum/core/wip/circuit/conversions2/pyquil_conversions.py
kottmanj/z-quantum-core
21752e92e79aafedbfeb6e7ae196bdc2fd5803e4
[ "Apache-2.0" ]
null
null
null
# TODO: implement zquantum<->pyquil circuit conversions here
30.5
60
0.803279
7
61
7
1
0
0
0
0
0
0
0
0
0
0
0
0.114754
61
1
61
61
0.907407
0.95082
0
null
0
null
0
0
null
0
0
1
null
1
null
true
0
0
null
null
null
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
1
0
0
0
1
0
0
0
0
0
0
5
66d191301df195d6cfbd9af36834ee23a7cc3b94
81
py
Python
lighttext/__init__.py
smilelight/lightTEXT
b015d0e3524722fb5a8ee5ea83b7fbbd7408f797
[ "Apache-2.0" ]
12
2020-01-26T09:16:21.000Z
2021-12-06T06:44:37.000Z
lighttext/__init__.py
smilelight/lightTEXT
b015d0e3524722fb5a8ee5ea83b7fbbd7408f797
[ "Apache-2.0" ]
null
null
null
lighttext/__init__.py
smilelight/lightTEXT
b015d0e3524722fb5a8ee5ea83b7fbbd7408f797
[ "Apache-2.0" ]
7
2020-04-30T00:37:32.000Z
2021-07-07T06:32:40.000Z
from .component import * from .text_mining import * from .string_search import *
20.25
28
0.777778
11
81
5.545455
0.636364
0.327869
0
0
0
0
0
0
0
0
0
0
0.148148
81
3
29
27
0.884058
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
dd73c5c77e1b70c89971edb931c187169b864e39
36
py
Python
app/__init__.py
Raman325/broadlink-rm-rest-server
daca67d0eab9171f2e4b60b017f12d902e942389
[ "MIT" ]
9
2018-12-06T09:15:49.000Z
2021-04-02T08:35:17.000Z
app/__init__.py
Raman325/broadlink-rm-rest-server
daca67d0eab9171f2e4b60b017f12d902e942389
[ "MIT" ]
6
2020-09-18T18:07:45.000Z
2021-10-18T23:18:04.000Z
app/__init__.py
Raman325/broadlink-rm-rest-server
daca67d0eab9171f2e4b60b017f12d902e942389
[ "MIT" ]
6
2019-02-10T11:15:52.000Z
2021-06-16T05:16:24.000Z
"""Broadlink RM REST Server app."""
18
35
0.666667
5
36
4.8
1
0
0
0
0
0
0
0
0
0
0
0
0.138889
36
1
36
36
0.774194
0.805556
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
06c38a770028e34cda5ce1ce4344d52aaa732b99
126
py
Python
hiduu/_init_.py
patrickgagnon/HIDUU
601669207fe4da62e0ddbffb5799197d8296267c
[ "MIT" ]
1
2020-04-28T19:42:13.000Z
2020-04-28T19:42:13.000Z
hiduu/_init_.py
patrickgagnon/HIDUU
601669207fe4da62e0ddbffb5799197d8296267c
[ "MIT" ]
null
null
null
hiduu/_init_.py
patrickgagnon/HIDUU
601669207fe4da62e0ddbffb5799197d8296267c
[ "MIT" ]
null
null
null
"""This package is a wrapper for HealtheIntent Data Upload Utlity""" from .hiduu import( hiduu_upload, hiduu_upload_win )
25.2
68
0.761905
18
126
5.166667
0.777778
0.236559
0
0
0
0
0
0
0
0
0
0
0.15873
126
5
69
25.2
0.877358
0.492063
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.333333
0
0.333333
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
66098498b514a881b3efeef76b2012d7f0bb8e51
200
py
Python
takeyourmeds/settings/__init__.py
takeyourmeds/takeyourmeds-web
edf24188f26948902cfb69793b4d5aa3cf8b6dea
[ "MIT" ]
11
2015-06-01T16:31:42.000Z
2022-03-01T01:20:58.000Z
takeyourmeds/settings/__init__.py
takeyourmeds/takeyourmeds-web
edf24188f26948902cfb69793b4d5aa3cf8b6dea
[ "MIT" ]
111
2015-07-20T13:23:16.000Z
2017-09-08T08:17:10.000Z
takeyourmeds/settings/__init__.py
takeyourmeds/takeyourmeds-web
edf24188f26948902cfb69793b4d5aa3cf8b6dea
[ "MIT" ]
6
2015-07-15T08:08:12.000Z
2018-06-23T00:13:13.000Z
import sys from defaults import * if sys.argv[1:2] == ['test']: from roles.test import * else: from role import * try: from custom import * except ImportError: pass
14.285714
29
0.595
26
200
4.576923
0.653846
0
0
0
0
0
0
0
0
0
0
0.014493
0.31
200
13
30
15.384615
0.847826
0
0
0
0
0
0.02
0
0
0
0
0
0
1
0
true
0.1
0.6
0
0.6
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
1
0
0
5
b09b116a64df70a6261ccdbe23b5b76f42534c3d
23
py
Python
vuejspython/static/__init__.py
twitwi/vuejs-python
18ae4a8583748cbaf39e63aa5e7d97c9904257d5
[ "MIT" ]
18
2019-02-14T16:17:38.000Z
2022-03-07T21:02:53.000Z
vuejspython/static/__init__.py
louispuyo/vuejs-python
193672ef38dc243a8f755b123e1af0e087ae8af4
[ "MIT" ]
null
null
null
vuejspython/static/__init__.py
louispuyo/vuejs-python
193672ef38dc243a8f755b123e1af0e087ae8af4
[ "MIT" ]
4
2019-12-16T11:12:29.000Z
2022-01-19T10:55:57.000Z
# to help the packager
11.5
22
0.73913
4
23
4.25
1
0
0
0
0
0
0
0
0
0
0
0
0.217391
23
1
23
23
0.944444
0.869565
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
b0e5f7e71a2d9bf6b125e45af168757f8e4818f3
607
py
Python
magpie/randoms/__init__.py
knaidoo29/magpie
efab3c2666aab2c928ca12a631758bc1b43c149c
[ "MIT" ]
null
null
null
magpie/randoms/__init__.py
knaidoo29/magpie
efab3c2666aab2c928ca12a631758bc1b43c149c
[ "MIT" ]
null
null
null
magpie/randoms/__init__.py
knaidoo29/magpie
efab3c2666aab2c928ca12a631758bc1b43c149c
[ "MIT" ]
null
null
null
from .cart import randoms_1d from .cart import randoms_2d from .cart import randoms_3d from .pdf import pdf2cdf from .pdf import randoms_cdf from .pdf import randoms_pdf from .polar import randoms_polar from .sphere import randoms_sphere_r from .sphere import randoms_sphere from .subsample import shuffle from .subsample import random_draw from .subsample import random_prob_draw from .subsample import stochastic_integer_weights from .subsample import stochastic_binary_weights from .usphere import randoms_usphere from .usphere import _randoms_healpix_xy from .usphere import randoms_healpix_pixel
25.291667
49
0.850082
89
607
5.550562
0.292135
0.289474
0.192308
0.12753
0.242915
0
0
0
0
0
0
0.007505
0.121911
607
23
50
26.391304
0.919325
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
9fd357ba5c6697a5f4058050966abfa847bceb3a
256
py
Python
motzkin/__init__.py
PermutaTriangle/motzkin
43e01cd09cd476ffd2c87ba649cb2a27fe66772e
[ "BSD-3-Clause" ]
1
2021-09-24T21:51:04.000Z
2021-09-24T21:51:04.000Z
motzkin/__init__.py
PermutaTriangle/motzkin
43e01cd09cd476ffd2c87ba649cb2a27fe66772e
[ "BSD-3-Clause" ]
null
null
null
motzkin/__init__.py
PermutaTriangle/motzkin
43e01cd09cd476ffd2c87ba649cb2a27fe66772e
[ "BSD-3-Clause" ]
null
null
null
from .motzkinpaths import ( MotzkinPaths, MotzkinPathsStartingWithH, MotzkinPathsStartingWithU, ) from .motzkinpatterns import CrossingPattern, MotzkinPath from .motzkinspec import MotzkinSpecificationFinder from .strategies import MotzkinPack
28.444444
57
0.832031
19
256
11.210526
0.631579
0
0
0
0
0
0
0
0
0
0
0
0.128906
256
8
58
32
0.955157
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
c6647028cdd98bab07d061ddea2886f804081d17
18,672
py
Python
binance.py
lampshade9909/BinanceAPI
8080203573232ffbaf9cbeeefa63ddedce093222
[ "MIT" ]
3
2018-01-04T06:11:43.000Z
2018-04-10T13:06:51.000Z
binance.py
lampshade9909/BinanceAPI
8080203573232ffbaf9cbeeefa63ddedce093222
[ "MIT" ]
null
null
null
binance.py
lampshade9909/BinanceAPI
8080203573232ffbaf9cbeeefa63ddedce093222
[ "MIT" ]
null
null
null
#!/usr/bin/env python __author__ = "Joey Zacherl" __license__ = "MIT" __version__ = "1.0.0" __description__ = "Binance API integration" __email__ = "Joey.Zacherl@gmail.com" from loggingConfig import InitLogging, PrintAndLogError, PrintAndLog import requests import sys, traceback import json import hmac import hashlib import time APIKey_Binance = None APISecret_Binance = None URL_Binance_Base = "https://www.binance.com/api/" RequestTimeout_seconds = 15 def SetAPIKey(key): global APIKey_Binance APIKey_Binance = key def SetAPISecret(secret): global APISecret_Binance APISecret_Binance = secret def IsAPIKeySecretSet(): global APIKey_Binance global APISecret_Binance if APIKey_Binance and APISecret_Binance: return True else: return False def GetBinanceSignature(totalParams): global APISecret_Binance # print "APISecret_Binance = ", APISecret_Binance signature = hmac.new(APISecret_Binance, totalParams, hashlib.sha256).hexdigest() return signature def GetBinanceHeader(): global APIKey_Binance # print "APIKey_Binance = ", APIKey_Binance headers_Binance = {'content-type': 'application/json', 'X-MBX-APIKEY': APIKey_Binance} # PrintAndLog("headers_Binance = " + str(headers_Binance)) return headers_Binance def API_Get_Ping(): url = URL_Binance_Base + "v1/ping" PrintAndLog("url = " + url) response = requests.get(url, timeout=RequestTimeout_seconds) if response.ok: responseData = response.content jData = json.loads(responseData) PrintAndLog("API_Get_Ping_Binance jData = " + str(jData)) return jData else: # If response code is not ok (200), print the resulting http error code with description response.raise_for_status() def ValidateAPIKey(): if not IsAPIKeySecretSet(): raise Exception('API Key or Secret is not set. Set that before making this call.') def API_Get_Time(): url = URL_Binance_Base + "v1/time" # PrintAndLog("url = " + url) response = requests.get(url, timeout=RequestTimeout_seconds) if response.ok: responseData = response.content jData = json.loads(responseData) # PrintAndLog("API_Get_Time_Binance jData = " + str(jData)) return jData else: # If response code is not ok (200), print the resulting http error code with description response.raise_for_status() def API_Get_TopNOrders(symbol, n, side): if side.lower() != "bids" and side.lower() != "asks" and side.lower() != "both": raise ValueError('side must be either bids, asks, or both') else: ordersJData = API_Get_Orders(symbol) # PrintAndLog("type(ordersJData) = " + str(type(ordersJData))) # PrintAndLog("type(ordersJData['bids']) = " + str(type(ordersJData['bids']))) # PrintAndLog("type(ordersJData['asks']) = " + str(type(ordersJData['asks']))) if side.lower() == "bids": returnList = ordersJData['bids'][:n] elif side.lower() == "asks": returnList = ordersJData['asks'][:n] elif side.lower() == "both": returnList = (ordersJData['bids'][:n], ordersJData['asks'][:n]) # PrintAndLog("API_Get_TopNOrders_Binance returnList = " + str(returnList)) return returnList def API_Get_Orders(symbol): url = URL_Binance_Base + "v1/depth?symbol=" + str(symbol) PrintAndLog("url = " + url) # headers_local = {'content-type': 'application/json'} # response = requests.get(url, headers=headers_local, timeout=RequestTimeout_seconds) response = requests.get(url, timeout=RequestTimeout_seconds) if response.ok: responseData = response.content jData = json.loads(responseData) # PrintAndLog("API_Get_Orders_Binance jData = " + str(jData)) return jData else: # If response code is not ok (200), print the resulting http error code with description response.raise_for_status() def API_Get_Price(symbol): marketsJData = API_Get_Markets() for market in marketsJData: firstFew = market['symbol'][:len(symbol)] # PrintAndLog("market = " + str(market) + " and firstFew = " + firstFew) if firstFew.lower() == symbol.lower(): return market def API_Get_Markets(): url = URL_Binance_Base + "v1/ticker/allPrices" PrintAndLog("url = " + url) # headers_local = {'content-type': 'application/json'} # response = requests.get(url, headers=headers_local, timeout=RequestTimeout_seconds) response = requests.get(url, timeout=RequestTimeout_seconds) if response.ok: responseData = response.content jData = json.loads(responseData) # PrintAndLog("API_Get_Markets_Binance jData = " + str(jData)) return jData else: # If response code is not ok (200), print the resulting http error code with description response.raise_for_status() def API_Get_TradeHistory(symbol, recvWindow=5000): ValidateAPIKey() timeStamp = GetTimeStamp() totalParams = "symbol=" + symbol.upper() + "&recvWindow=" + str(recvWindow) + "&timestamp=" + timeStamp # PrintAndLog("totalParams = " + totalParams) signature = GetBinanceSignature(totalParams) # PrintAndLog("signature = " + signature) url = URL_Binance_Base + "v3/myTrades?" + totalParams + "&signature=" + signature PrintAndLog("url = " + url) response = requests.get(url, headers=GetBinanceHeader(), timeout=RequestTimeout_seconds) if response.ok: responseData = response.content jData = json.loads(responseData) PrintAndLog("API_Get_TradeHistory_Binance jData = " + str(jData)) return jData else: # If response code is not ok (200), print the resulting http error code with description PrintAndLog("response = " + str(response.content)) response.raise_for_status() def API_Get_24TickerPriceChange(symbol): url = URL_Binance_Base + "v1/ticker/24hr?symbol=" + str(symbol) PrintAndLog("url = " + url) response = requests.get(url, timeout=RequestTimeout_seconds) if response.ok: responseData = response.content jData = json.loads(responseData) PrintAndLog("API_Get_24TickerPriceChange_Binance jData = " + str(jData)) return jData else: # If response code is not ok (200), print the resulting http error code with description PrintAndLog("response = " + str(response.content)) response.raise_for_status() def API_Get_KlineCandlestick(symbol, interval): url = URL_Binance_Base + "v1/klines?symbol=" + str(symbol) + "&interval=" + str(interval) PrintAndLog("url = " + url) response = requests.get(url, timeout=RequestTimeout_seconds) if response.ok: responseData = response.content jData = json.loads(responseData) PrintAndLog("API_Get_KlineCandlestick_Binance jData = " + str(jData)) return jData else: # If response code is not ok (200), print the resulting http error code with description PrintAndLog("response = " + str(response.content)) response.raise_for_status() def API_Get_AggregateTrades(symbol, recvWindow=5000): url = URL_Binance_Base + "v1/aggTrades?symbol=" + str(symbol) PrintAndLog("url = " + url) response = requests.get(url, timeout=RequestTimeout_seconds) if response.ok: responseData = response.content jData = json.loads(responseData) PrintAndLog("API_Get_AggregateTrades_Binance jData = " + str(jData)) return jData else: # If response code is not ok (200), print the resulting http error code with description PrintAndLog("response = " + str(response.content)) response.raise_for_status() def GetTimeStamp(): try: # Try getting the server time from the API return str(API_Get_Time()['serverTime']) except: # Get the time locally (note sometimes the binance API doesn't like this time because they re-route you somewhere that doesn't match this time...) timeStamp = str(long(round(time.time() * 1000))) return timeStamp def API_Post_BuyLimitOrder(symbol, quantity, price, icebergQty=None, stopPrice=None, recvWindow=5000): return API_Post_LimitOrder(symbol, "buy", quantity, price, icebergQty, stopPrice, recvWindow) def API_Post_SellLimitOrder(symbol, quantity, price, icebergQty=None, stopPrice=None, recvWindow=5000): return API_Post_LimitOrder(symbol, "sell", quantity, price, icebergQty, stopPrice, recvWindow) def API_Post_LimitOrder(symbol, side, quantity, priceString, icebergQty, stopPrice, recvWindow): ValidateAPIKey() timeStamp = GetTimeStamp() if side.lower() == "buy" or side.lower() == "sell": side = side.upper() else: raise ValueError('side must be either buy or sell') totalParams = "symbol=" + symbol.upper() + "&side=" + side + "&type=LIMIT&timeInForce=GTC&quantity=" + str(quantity) + "&price=" + priceString + "&recvWindow=" + str(recvWindow) + "&timestamp=" + timeStamp if icebergQty: totalParams += "&icebergQty=" + str(icebergQty) if stopPrice: totalParams += "&stopPrice=" + str(stopPrice) # PrintAndLog("totalParams = " + totalParams) signature = GetBinanceSignature(totalParams) # PrintAndLog("signature = " + signature) url = URL_Binance_Base + "v3/order?" + totalParams + "&signature=" + signature PrintAndLog("url = " + url) response = requests.post(url, headers=GetBinanceHeader(), timeout=RequestTimeout_seconds) if response.ok: responseData = response.content jData = json.loads(responseData) PrintAndLog("API_Post_LimitOrder_Binance jData = " + str(jData)) return jData else: # If response code is not ok (200), print the resulting http error code with description PrintAndLog("response = " + str(response.content)) response.raise_for_status() def API_Post_BuyMarketOrder(symbol, quantity, icebergQty=None, stopPrice=None, recvWindow=5000): return API_Post_MarketOrder(symbol, "buy", quantity, icebergQty, stopPrice, recvWindow) def API_Post_SellMarketOrder(symbol, quantity, icebergQty=None, stopPrice=None, recvWindow=5000): return API_Post_MarketOrder(symbol, "sell", quantity, icebergQty, stopPrice, recvWindow) def API_Post_MarketOrder(symbol, side, quantity, icebergQty=None, stopPrice=None, recvWindow=5000): ValidateAPIKey() timeStamp = GetTimeStamp() if side.lower() == "buy" or side.lower() == "sell": side = side.upper() else: raise ValueError('side must be either buy or sell') # quantity must be rounded to two decimal places quantity = str(round(float(quantity), 0)) totalParams = "symbol=" + symbol.upper() + "&side=" + side + "&type=MARKET&quantity=" + str(quantity) + "&recvWindow=" + str(recvWindow) + "&timestamp=" + timeStamp if icebergQty: totalParams += "&icebergQty=" + str(icebergQty) if stopPrice: totalParams += "&stopPrice=" + str(stopPrice) # PrintAndLog("totalParams = " + totalParams) signature = GetBinanceSignature(totalParams) # PrintAndLog("signature = " + signature) url = URL_Binance_Base + "v3/order?" + totalParams + "&signature=" + signature PrintAndLog("url = " + url) response = requests.post(url, headers=GetBinanceHeader(), timeout=RequestTimeout_seconds) if response.ok: responseData = response.content jData = json.loads(responseData) PrintAndLog("API_Post_MarketOrder_Binance jData = " + str(jData)) return jData else: # If response code is not ok (200), print the resulting http error code with description PrintAndLog("response = " + str(response.content)) response.raise_for_status() def API_Get_OrderStatus(symbol, orderId, recvWindow=5000): ValidateAPIKey() timeStamp = GetTimeStamp() totalParams = "symbol=" + symbol.upper() + "&orderId=" + str(orderId) + "&recvWindow=" + str(recvWindow) + "&timestamp=" + timeStamp # PrintAndLog("totalParams = " + totalParams) signature = GetBinanceSignature(totalParams) # PrintAndLog("signature = " + signature) url = URL_Binance_Base + "v3/order?" + totalParams + "&signature=" + signature PrintAndLog("url = " + url) response = requests.get(url, headers=GetBinanceHeader(), timeout=RequestTimeout_seconds) if response.ok: responseData = response.content jData = json.loads(responseData) PrintAndLog("API_Get_OrderStatus_Binance jData = " + str(jData)) return jData else: # If response code is not ok (200), print the resulting http error code with description PrintAndLog("response = " + str(response.content)) response.raise_for_status() def API_Delete_Order(symbol, orderId, recvWindow=5000): ValidateAPIKey() timeStamp = GetTimeStamp() totalParams = "symbol=" + symbol.upper() + "&orderId=" + str(orderId) + "&recvWindow=" + str(recvWindow) + "&timestamp=" + timeStamp # PrintAndLog("totalParams = " + totalParams) signature = GetBinanceSignature(totalParams) # PrintAndLog("signature = " + signature) url = URL_Binance_Base + "v3/order?" + totalParams + "&signature=" + signature PrintAndLog("url = " + url) response = requests.delete(url, headers=GetBinanceHeader(), timeout=RequestTimeout_seconds) if response.ok: responseData = response.content jData = json.loads(responseData) PrintAndLog("API_Delete_Order_Binance jData = " + str(jData)) return jData else: # If response code is not ok (200), print the resulting http error code with description PrintAndLog("response = " + str(response.content)) response.raise_for_status() def API_Get_OpenBuyOrders(symbol, recvWindow=5000): ordersJData = API_Get_OpenOrders(symbol, recvWindow) returnList = [] for order in ordersJData: if order['side'].lower() == "buy": returnList.append(order) return returnList def API_Get_OpenSellOrders(symbol, recvWindow=5000): ordersJData = API_Get_OpenOrders(symbol, recvWindow) returnList = [] for order in ordersJData: if order['side'].lower() == "sell": returnList.append(order) return returnList def API_Get_OpenOrders(symbol, recvWindow=5000): ValidateAPIKey() timeStamp = GetTimeStamp() totalParams = "symbol=" + symbol.upper() + "&recvWindow=" + str(recvWindow) + "&timestamp=" + timeStamp # PrintAndLog("totalParams = " + totalParams) signature = GetBinanceSignature(totalParams) # PrintAndLog("signature = " + signature) url = URL_Binance_Base + "v3/openOrders?" + totalParams + "&signature=" + signature PrintAndLog("url = " + url) response = requests.get(url, headers=GetBinanceHeader(), timeout=RequestTimeout_seconds) if response.ok: responseData = response.content jData = json.loads(responseData) PrintAndLog("API_Get_OpenOrders_Binance jData = " + str(jData)) return jData else: # If response code is not ok (200), print the resulting http error code with description PrintAndLog("response = " + str(response.content)) response.raise_for_status() def API_Get_Balance(currency): accountJData = API_Get_AccountInfo() for balance in accountJData['balances']: if currency.lower() == balance['asset'].lower(): return balance def API_Get_AccountInfo(recvWindow=5000): ValidateAPIKey() timeStamp = GetTimeStamp() # totalParams = "recvWindow=" + str(recvWindow) + "&timestamp=" + timeStamp totalParams = "timestamp=" + timeStamp # PrintAndLog("totalParams = " + totalParams) signature = GetBinanceSignature(totalParams) # PrintAndLog("signature = " + signature) url = URL_Binance_Base + "v3/account?" + totalParams + "&signature=" + signature PrintAndLog("url = " + url) response = requests.get(url, headers=GetBinanceHeader(), timeout=RequestTimeout_seconds) if response.ok: responseData = response.content jData = json.loads(responseData) # PrintAndLog("API_Get_AccountInfo_Binance jData = " + str(jData)) return jData else: # If response code is not ok (200), print the resulting http error code with description PrintAndLog("response = " + str(response.content)) response.raise_for_status() def API_Post_Withdraw(asset, address, amount, recvWindow=5000): ValidateAPIKey() timeStamp = GetTimeStamp() totalParams = "asset=" + asset.upper() + "&address=" + address + "&amount=" + str(amount) + "&recvWindow=" + str(recvWindow) + "&timestamp=" + timeStamp # PrintAndLog("totalParams = " + totalParams) signature = GetBinanceSignature(totalParams) # PrintAndLog("signature = " + signature) url = "https://www.binance.com/wapi/v1/withdraw.html?" + totalParams + "&signature=" + signature # returns me weird stuff PrintAndLog("url = " + url) response = requests.post(url, headers=GetBinanceHeader(), timeout=RequestTimeout_seconds) if response.ok: responseData = response.content jData = json.loads(responseData) PrintAndLog("API_Post_Withdraw jData = " + str(jData)) return jData else: # If response code is not ok (200), print the resulting http error code with description PrintAndLog("response = " + str(response.content)) response.raise_for_status() def API_Post_GetWithdrawHistory(recvWindow=5000): ValidateAPIKey() timeStamp = GetTimeStamp() totalParams = "recvWindow=" + str(recvWindow) + "&timestamp=" + timeStamp # PrintAndLog("totalParams = " + totalParams) signature = GetBinanceSignature(totalParams) # PrintAndLog("signature = " + signature) url = "https://www.binance.com/wapi/v1/getWithdrawHistory.html?" + totalParams + "&signature=" + signature # returns me weird stuff PrintAndLog("url = " + url) response = requests.post(url, headers=GetBinanceHeader(), timeout=RequestTimeout_seconds) if response.ok: responseData = response.content jData = json.loads(responseData) PrintAndLog("API_Post_GetWithdrawHistory jData = " + str(jData)) return jData else: # If response code is not ok (200), print the resulting http error code with description PrintAndLog("response = " + str(response.content)) response.raise_for_status()
34.135283
209
0.677324
1,983
18,672
6.253656
0.1059
0.01645
0.040642
0.038707
0.759697
0.750746
0.735666
0.717684
0.701879
0.701879
0
0.009694
0.20994
18,672
546
210
34.197802
0.830938
0.191463
0
0.636086
0
0
0.123213
0.0258
0
0
0
0
0
1
0.097859
false
0
0.021407
0.012232
0.214067
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
c664813d57327b21a2bc101b3d73b202cd8e5783
74
py
Python
cmdline/createservice.py
Esri/arcpy-server-util-rest
198c9526f41ab742da36d0eff467475bca7da2fe
[ "Apache-2.0" ]
10
2015-04-24T15:34:39.000Z
2021-11-28T21:22:58.000Z
cmdline/createservice.py
Esri/arcpy-server-util-rest
198c9526f41ab742da36d0eff467475bca7da2fe
[ "Apache-2.0" ]
1
2016-09-08T09:02:07.000Z
2016-09-08T17:17:41.000Z
cmdline/createservice.py
Esri/arcpy-server-util-rest
198c9526f41ab742da36d0eff467475bca7da2fe
[ "Apache-2.0" ]
5
2017-11-03T21:00:48.000Z
2022-01-06T10:10:04.000Z
#! python import arcrest.admin arcrest.admin.cmdline.createservice()
14.8
38
0.756757
8
74
7
0.75
0.428571
0
0
0
0
0
0
0
0
0
0
0.135135
74
4
39
18.5
0.875
0.108108
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
c68e5d86540c5fc1f4c0a82a2c423121aed26117
41
py
Python
macro/__init__.py
KyleADOlson/advanced-microkeys
d5aa2f7acffd872893c859a49e1a43ea53974b12
[ "MIT" ]
null
null
null
macro/__init__.py
KyleADOlson/advanced-microkeys
d5aa2f7acffd872893c859a49e1a43ea53974b12
[ "MIT" ]
null
null
null
macro/__init__.py
KyleADOlson/advanced-microkeys
d5aa2f7acffd872893c859a49e1a43ea53974b12
[ "MIT" ]
1
2022-02-25T20:14:19.000Z
2022-02-25T20:14:19.000Z
"""MicroKeys Python macro extensions"""
20.5
40
0.731707
4
41
7.5
1
0
0
0
0
0
0
0
0
0
0
0
0.121951
41
1
41
41
0.833333
0.804878
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
c6c2674b458cc4049596f362efc8be0bf8893719
197
py
Python
Reader/reader/compressed/__init__.py
sarvex/DjangoKoans
a4ba1e787dc508d1706897f26d3e91219ad5d982
[ "0BSD" ]
null
null
null
Reader/reader/compressed/__init__.py
sarvex/DjangoKoans
a4ba1e787dc508d1706897f26d3e91219ad5d982
[ "0BSD" ]
null
null
null
Reader/reader/compressed/__init__.py
sarvex/DjangoKoans
a4ba1e787dc508d1706897f26d3e91219ad5d982
[ "0BSD" ]
null
null
null
__author__ = 'Sarvex' from reader.reader.compressed.bzipped import opener as bz2_opener from reader.reader.compressed.gzipped import opener as gzip_opener __all__ = ['bz2_opener', 'gzip_opener']
28.142857
66
0.807107
27
197
5.444444
0.481481
0.136054
0.217687
0.353742
0
0
0
0
0
0
0
0.011364
0.106599
197
6
67
32.833333
0.823864
0
0
0
0
0
0.137056
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
5
c6e458612c3780e17e64daea67f57c5eb8fd108e
133
py
Python
prawdziweZadania/cw3.py
Wiktor-Wewe/zadaniaDoKolosaWewe
69519edae6b582bd81b5011871ce38f1e6a2447f
[ "MIT" ]
null
null
null
prawdziweZadania/cw3.py
Wiktor-Wewe/zadaniaDoKolosaWewe
69519edae6b582bd81b5011871ce38f1e6a2447f
[ "MIT" ]
null
null
null
prawdziweZadania/cw3.py
Wiktor-Wewe/zadaniaDoKolosaWewe
69519edae6b582bd81b5011871ce38f1e6a2447f
[ "MIT" ]
null
null
null
""" znajdz największy palindrom utworzony poprzez przemnożenie przez siebie dwóch liczby 3-cyfrowych odp:906609 """ print("906609")
19
96
0.789474
16
133
6.5625
0.9375
0
0
0
0
0
0
0
0
0
0
0.111111
0.120301
133
6
97
22.166667
0.786325
0.804511
0
0
0
0
0.333333
0
0
0
0
0
0
1
0
true
0
0
0
0
1
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
5
059df84000bd2b57d655aa086d0db34e145f5ae9
116
py
Python
lenders_sentry_utils/__init__.py
Lenders-Cooperative/sentry-utils
20932cd767ccb52747ba6be9465fedc002991e75
[ "BSD-3-Clause" ]
null
null
null
lenders_sentry_utils/__init__.py
Lenders-Cooperative/sentry-utils
20932cd767ccb52747ba6be9465fedc002991e75
[ "BSD-3-Clause" ]
null
null
null
lenders_sentry_utils/__init__.py
Lenders-Cooperative/sentry-utils
20932cd767ccb52747ba6be9465fedc002991e75
[ "BSD-3-Clause" ]
null
null
null
from .utils import sentry_init, protect_body, capture_exception from .transport import TrafficSplittingHttpTransport
58
63
0.887931
13
116
7.692308
0.846154
0
0
0
0
0
0
0
0
0
0
0
0.077586
116
2
64
58
0.934579
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
05b062e984797af5f4bafa56f8247c3385377b79
99
py
Python
20. comprension de listas.py
JSNavas/CursoPython2.7
d1f9170dbf897b6eb729f9696a208880e33c550b
[ "MIT" ]
null
null
null
20. comprension de listas.py
JSNavas/CursoPython2.7
d1f9170dbf897b6eb729f9696a208880e33c550b
[ "MIT" ]
null
null
null
20. comprension de listas.py
JSNavas/CursoPython2.7
d1f9170dbf897b6eb729f9696a208880e33c550b
[ "MIT" ]
null
null
null
lista = [1,2,3,4,5] lista2 = [i * 2 for i in lista] print "Lista: ", lista print "Lista2:",lista2
16.5
31
0.616162
19
99
3.210526
0.578947
0.327869
0
0
0
0
0
0
0
0
0
0.1125
0.191919
99
6
32
16.5
0.65
0
0
0
0
0
0.14
0
0
0
0
0
0
0
null
null
0
0
null
null
0.5
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
1
0
5
af1bb53d43ed273b710eaec8c230a47424c252a2
507
py
Python
ns-allinone-3.27/pybindgen-0.17.0.post58+ngcf00cc0/pybindgen/__init__.py
zack-braun/4607_NS
43c8fb772e5552fb44bd7cd34173e73e3fb66537
[ "MIT" ]
93
2019-04-21T08:22:26.000Z
2022-03-30T04:26:29.000Z
ns-allinone-3.27/pybindgen-0.17.0.post58+ngcf00cc0/pybindgen/__init__.py
zack-braun/4607_NS
43c8fb772e5552fb44bd7cd34173e73e3fb66537
[ "MIT" ]
12
2019-04-19T16:39:58.000Z
2021-06-22T13:18:32.000Z
ns-allinone-3.27/pybindgen-0.17.0.post58+ngcf00cc0/pybindgen/__init__.py
zack-braun/4607_NS
43c8fb772e5552fb44bd7cd34173e73e3fb66537
[ "MIT" ]
21
2019-05-27T19:36:12.000Z
2021-07-26T02:37:41.000Z
from pybindgen.typehandlers.base import ReturnValue, Parameter from pybindgen.module import Module from pybindgen.function import Function from pybindgen.typehandlers.codesink import CodeSink, FileCodeSink from pybindgen.cppclass import CppMethod, CppClass, CppConstructor from pybindgen.enum import Enum from pybindgen.utils import write_preamble, param, retval try: from pybindgen.version import version as __version__ except ImportError: # the version.py file is generated and may not exist pass
42.25
72
0.838264
65
507
6.461538
0.538462
0.247619
0.119048
0
0
0
0
0
0
0
0
0
0.122288
507
11
73
46.090909
0.94382
0.098619
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.090909
0.818182
0
0.818182
0
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
1
0
0
5
af2379585c35d46e0b6d395c4dc3f1197a156038
245
py
Python
server/apps/api/auth.py
IvanDubrowin/simple-shop
7eb3b3a82863ee382692498812954a2f4530dafe
[ "MIT" ]
null
null
null
server/apps/api/auth.py
IvanDubrowin/simple-shop
7eb3b3a82863ee382692498812954a2f4530dafe
[ "MIT" ]
8
2021-03-30T13:03:15.000Z
2022-03-12T00:22:52.000Z
server/apps/api/auth.py
IvanDubrowin/simple-shop
7eb3b3a82863ee382692498812954a2f4530dafe
[ "MIT" ]
null
null
null
from rest_framework.authentication import SessionAuthentication from rest_framework.request import Request class CsrfExemptSessionAuthentication(SessionAuthentication): def enforce_csrf(self, request: Request) -> None: return None
30.625
63
0.820408
24
245
8.25
0.625
0.080808
0.171717
0
0
0
0
0
0
0
0
0
0.130612
245
7
64
35
0.929577
0
0
0
0
0
0
0
0
0
0
0
0
1
0.2
false
0
0.4
0.2
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
1
1
0
0
5
af301c4130c4cb8e6e324343719ad2657e733882
263
py
Python
tests/unittests/api/basic_app_with_cqrs/application/CreateUser/CreateUserCommand.py
ahmetcagriakca/pdip
c4c16d5666a740154cabdc6762cd44d98b7bdde8
[ "MIT" ]
2
2021-12-09T21:07:46.000Z
2021-12-11T22:18:01.000Z
tests/unittests/api/basic_app_with_cqrs/application/CreateUser/CreateUserCommand.py
fmuyilmaz/pdip
f7e30b0c04d9e85ef46b0b7094fafd3ce18bccab
[ "MIT" ]
null
null
null
tests/unittests/api/basic_app_with_cqrs/application/CreateUser/CreateUserCommand.py
fmuyilmaz/pdip
f7e30b0c04d9e85ef46b0b7094fafd3ce18bccab
[ "MIT" ]
3
2021-11-15T00:47:00.000Z
2021-12-17T11:35:45.000Z
from dataclasses import dataclass from pdip.cqrs import ICommand from tests.unittests.api.basic_app_with_cqrs.application.CreateUser.CreateUserRequest import CreateUserRequest @dataclass class CreateUserCommand(ICommand): request: CreateUserRequest = None
26.3
110
0.851711
29
263
7.62069
0.689655
0
0
0
0
0
0
0
0
0
0
0
0.098859
263
9
111
29.222222
0.932489
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.833333
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
af3cb2d5142527b78f20912a2291c5cc1d42d545
220
py
Python
GNetLMM/pycore/mtSet/gp/__init__.py
PMBio/GNetLMM
103d6433ff6d4a13b5787c116032fda268dc4302
[ "Apache-2.0" ]
4
2016-02-25T18:40:36.000Z
2019-05-06T06:15:47.000Z
GNetLMM/pycore/mtSet/gp/__init__.py
PMBio/GNetLMM
103d6433ff6d4a13b5787c116032fda268dc4302
[ "Apache-2.0" ]
6
2016-03-29T02:55:17.000Z
2017-11-27T19:30:04.000Z
GNetLMM/pycore/mtSet/gp/__init__.py
PMBio/GNetLMM
103d6433ff6d4a13b5787c116032fda268dc4302
[ "Apache-2.0" ]
2
2017-05-09T05:23:50.000Z
2019-07-27T13:19:22.000Z
import sys sys.path.append('./..') from gp2kronSum import gp2kronSum #from gp2kronSumSvd import gp2kronSumSvd from gp3kronSum import gp3kronSum from gp3kronSumLR import gp3kronSumLR from gp2kronSumLR import gp2kronSumLR
27.5
40
0.845455
25
220
7.44
0.4
0
0
0
0
0
0
0
0
0
0
0.050761
0.104545
220
7
41
31.428571
0.893401
0.177273
0
0
0
0
0.022222
0
0
0
0
0
0
1
0
true
0
0.833333
0
0.833333
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
afcbff96ad8c4fb92abaae854c9b35f8ff2afb4e
202
py
Python
home/views.py
BrenoFroes/ProjetoFinal-Django
724f7bb2935661a2858e11cf7a666901f92f7a94
[ "MIT" ]
null
null
null
home/views.py
BrenoFroes/ProjetoFinal-Django
724f7bb2935661a2858e11cf7a666901f92f7a94
[ "MIT" ]
null
null
null
home/views.py
BrenoFroes/ProjetoFinal-Django
724f7bb2935661a2858e11cf7a666901f92f7a94
[ "MIT" ]
null
null
null
from django.shortcuts import render, redirect, get_object_or_404 def home(request): return render(request, 'home.html') def my_logout(request): my_logout(request) return redirect('home')
20.2
64
0.742574
28
202
5.178571
0.607143
0.17931
0.206897
0
0
0
0
0
0
0
0
0.017544
0.153465
202
9
65
22.444444
0.830409
0
0
0
0
0
0.064677
0
0
0
0
0
0
1
0.333333
false
0
0.166667
0.166667
0.833333
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
0
0
0
5