hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f2b93bf23045c1700cace39d8df18ba10deaa92d | 173,532 | py | Python | test/unit/app.py | sanAkdam/chime | 1adbddbdddcdc2669086dee60d1bfb2f97535cff | [
"BSD-3-Clause"
] | 8 | 2015-02-05T22:12:41.000Z | 2015-05-15T16:15:14.000Z | test/unit/app.py | sanAkdam/chime | 1adbddbdddcdc2669086dee60d1bfb2f97535cff | [
"BSD-3-Clause"
] | 168 | 2015-02-02T23:02:52.000Z | 2015-05-15T21:54:07.000Z | test/unit/app.py | codeforamerica/bizarro-cms | 1adbddbdddcdc2669086dee60d1bfb2f97535cff | [
"BSD-3-Clause"
] | 5 | 2016-11-20T15:51:32.000Z | 2021-04-16T09:44:08.000Z | # -- coding: utf-8 --
from __future__ import absolute_import
from unittest import main, TestCase
from tempfile import mkdtemp
from os.path import join, exists, dirname, isdir, abspath, sep
from urlparse import urlparse, urljoin
from os import environ, mkdir
from shutil import rmtree, copytree
from re import search, sub
import random
from datetime import date, timedelta, datetime
import sys
from chime.repo_functions import ChimeRepo
from slugify import slugify
from multiprocessing import Process
import time
import logging
import tempfile
logging.disable(logging.CRITICAL)
repo_root = abspath(join(dirname(__file__), '..'))
sys.path.insert(0, repo_root)
from box.util.rotunicode import RotUnicode
from httmock import response, HTTMock
from mock import MagicMock, patch
from bs4 import Comment, BeautifulSoup
from chime import (
create_app, repo_functions, google_api_functions, view_functions,
publish, errors)
from chime import constants
from chime import chime_activity
from unit.chime_test_client import ChimeTestClient
import codecs
codecs.register(RotUnicode.search_function)
# these patterns help us search the HTML of a response to determine if the expected page loaded
PATTERN_BRANCH_COMMENT = u'<!-- branch: {} -->'
PATTERN_AUTHOR_COMMENT = u'<!-- author: {} -->'
PATTERN_TASK_COMMENT = u'<!-- task: {} -->'
PATTERN_TEMPLATE_COMMENT = u'<!-- template name: {} -->'
PATTERN_FILE_COMMENT = u'<!-- file type: {file_type}, file name: {file_name}, file title: {file_title} -->'
PATTERN_OVERVIEW_ITEM_CREATED = u'<p>The "{created_name}" {created_type} was created by {author_email}.</p>'
PATTERN_OVERVIEW_ACTIVITY_STARTED = u'<p>The "{activity_name}" activity was started by {author_email}.</p>'
PATTERN_OVERVIEW_COMMENT_BODY = u'<div class="comment__body">{comment_body}</div>'
PATTERN_OVERVIEW_ITEM_DELETED = u'<p>The "{deleted_name}" {deleted_type} {deleted_also}was deleted by {author_email}.</p>'
PATTERN_FLASH_TASK_DELETED = u'You deleted the "{description}" activity!'
PATTERN_FLASH_CREATED_CATEGORY = u'Created a new topic named {title}! Remember to submit this change for feedback when you\'re ready to go live.'
PATTERN_FLASH_SAVED_CATEGORY = u'Saved changes to the {title} topic! Remember to submit this change for feedback when you\'re ready to go live.'
PATTERN_FLASH_CREATED_ARTICLE = u'Created a new article named {title}! Remember to submit this change for feedback when you\'re ready to go live.'
PATTERN_FLASH_SAVED_ARTICLE = u'Saved changes to the {title} article! Remember to submit this change for feedback when you\'re ready to go live.'
PATTERN_FLASH_DELETED_CATEGORY = u'The "{title}" topic {containing}was deleted! Remember to submit this change for feedback when you\'re ready to go live.'
PATTERN_FLASH_DELETED_ARTICLE = u'The "{title}" article was deleted! Remember to submit this change for feedback when you\'re ready to go live.'
PATTERN_FORM_CATEGORY_TITLE = u'<input name="en-title" type="text" value="{title}" class="directory-modify__name" placeholder="Crime Statistics and Maps">'
PATTERN_FORM_CATEGORY_DESCRIPTION = u'<textarea name="en-description" class="directory-modify__description" placeholder="Crime statistics and reports by district and map">{description}</textarea>'
# review stuff
PATTERN_UNREVIEWED_EDITS_LINK = u'<a href="/tree/{branch_name}/edit/">'
PATTERN_FEEDBACK_REQUESTED_LINK = u'<a href="/tree/{branch_name}/" class="toolbar__item button">Feedback requested</a>'
PATTERN_READY_TO_PUBLISH_LINK = u'<a href="/tree/{branch_name}/" class="toolbar__item button">Ready to publish</a>'
class TestAppConfig (TestCase):
# in TestAppConfig
def test_missing_values(self):
self.assertRaises(KeyError, lambda: create_app({}))
# in TestAppConfig
def test_present_values(self):
create_app_environ = {}
create_app_environ['RUNNING_STATE_DIR'] = 'Yo'
create_app_environ['GA_CLIENT_ID'] = 'Yo'
create_app_environ['GA_CLIENT_SECRET'] = 'Yo'
create_app_environ['LIVE_SITE_URL'] = 'Hey'
create_app_environ['BROWSERID_URL'] = 'Hey'
create_app(create_app_environ)
# in TestAppConfig
def test_error_template_args(self):
''' Default error template args are generated as expected
'''
create_app_environ = {}
create_app_environ['RUNNING_STATE_DIR'] = 'Yo'
create_app_environ['GA_CLIENT_ID'] = 'Yo'
create_app_environ['GA_CLIENT_SECRET'] = 'Yo'
create_app_environ['BROWSERID_URL'] = 'Hey'
create_app_environ['LIVE_SITE_URL'] = 'Hey'
fake_support_email = u'support@example.com'
fake_support_phone_number = u'(123) 456-7890'
create_app_environ['SUPPORT_EMAIL_ADDRESS'] = fake_support_email
create_app_environ['SUPPORT_PHONE_NUMBER'] = fake_support_phone_number
app = create_app(create_app_environ)
template_args = errors.common_error_template_args(app.config)
self.assertEqual(len(template_args), 3)
self.assertTrue('activities_path' in template_args)
self.assertTrue('support_email' in template_args)
self.assertTrue('support_phone_number' in template_args)
self.assertEqual(template_args['support_email'], fake_support_email)
self.assertEqual(template_args['support_phone_number'], fake_support_phone_number)
# in TestAppConfig
def test_for_constant_name_conflicts(self):
''' None of the constant names defined in constants.py conflict with reserved config variable names
'''
flask_reserved_config_names = ['DEBUG', 'TESTING', 'PROPAGATE_EXCEPTIONS', 'PRESERVE_CONTEXT_ON_EXCEPTION', 'SECRET_KEY', 'SESSION_COOKIE_NAME', 'SESSION_COOKIE_DOMAIN', 'SESSION_COOKIE_PATH', 'SESSION_COOKIE_HTTPONLY', 'SESSION_COOKIE_SECURE', 'PERMANENT_SESSION_LIFETIME', 'USE_X_SENDFILE', 'LOGGER_NAME', 'SERVER_NAME', 'APPLICATION_ROOT', 'MAX_CONTENT_LENGTH', 'SEND_FILE_MAX_AGE_DEFAULT', 'TRAP_HTTP_EXCEPTIONS', 'TRAP_BAD_REQUEST_ERRORS', 'PREFERRED_URL_SCHEME', 'JSON_AS_ASCII', 'JSON_SORT_KEYS', 'JSONIFY_PRETTYPRINT_REGULAR']
chime_reserved_config_names = ['RUNNING_STATE_DIR', 'REPO_PATH', 'WORK_PATH', 'AUTH_DATA_HREF', 'BROWSERID_URL', 'GA_CLIENT_ID', 'GA_CLIENT_SECRET', 'GA_REDIRECT_URI', 'SUPPORT_EMAIL_ADDRESS', 'SUPPORT_PHONE_NUMBER', 'GDOCS_CLIENT_ID', 'GDOCS_CLIENT_SECRET', 'GITHUB_CLIENT_ID', 'GITHUB_CLIENT_SECRET', 'LIVE_SITE_URL', 'PUBLISH_SERVICE_URL']
check_names = flask_reserved_config_names + chime_reserved_config_names
for reserved_name in check_names:
self.assertFalse(hasattr(constants, reserved_name), u'The reserved config variable name {} is present in constants!'.format(reserved_name))
class TestApp (TestCase):
def setUp(self):
self.old_tempdir, tempfile.tempdir = tempfile.tempdir, mkdtemp(prefix='chime-TestApp-')
self.work_path = mkdtemp(prefix='chime-repo-clones-')
self.publish_path = mkdtemp(prefix='chime-publish-path-')
repo_path = dirname(abspath(__file__)) + '/../test-app.git'
temp_repo_dir = mkdtemp(prefix='chime-root')
temp_repo_path = temp_repo_dir + '/test-app.git'
copytree(repo_path, temp_repo_path)
self.origin = ChimeRepo(temp_repo_path)
repo_functions.ignore_task_metadata_on_merge(self.origin)
self.clone1 = self.origin.clone(mkdtemp(prefix='chime-'))
repo_functions.ignore_task_metadata_on_merge(self.clone1)
fake_author_email = u'erica@example.com'
self.session = dict(email=fake_author_email)
environ['GIT_AUTHOR_NAME'] = ' '
environ['GIT_COMMITTER_NAME'] = ' '
environ['GIT_AUTHOR_EMAIL'] = self.session['email']
environ['GIT_COMMITTER_EMAIL'] = self.session['email']
create_app_environ = {}
create_app_environ['SINGLE_USER'] = 'Yes'
create_app_environ['GA_CLIENT_ID'] = 'client_id'
create_app_environ['GA_CLIENT_SECRET'] = 'meow_secret'
self.ga_config_dir = mkdtemp(prefix='chime-config-')
create_app_environ['RUNNING_STATE_DIR'] = self.ga_config_dir
create_app_environ['WORK_PATH'] = self.work_path
create_app_environ['REPO_PATH'] = temp_repo_path
create_app_environ['AUTH_DATA_HREF'] = 'http://example.com/auth.csv'
create_app_environ['BROWSERID_URL'] = 'http://localhost'
create_app_environ['LIVE_SITE_URL'] = 'http://example.org/'
create_app_environ['PUBLISH_PATH'] = self.publish_path
create_app_environ['SUPPORT_EMAIL_ADDRESS'] = u'support@example.com'
create_app_environ['SUPPORT_PHONE_NUMBER'] = u'(123) 456-7890'
self.app = create_app(create_app_environ)
# write a tmp config file
config_values = {
"access_token": "meowser_token",
"refresh_token": "refresh_meows",
"profile_id": "12345678",
"project_domain": ""
}
with self.app.app_context():
google_api_functions.write_ga_config(config_values, self.app.config['RUNNING_STATE_DIR'])
random.choice = MagicMock(return_value="P")
self.test_client = self.app.test_client()
def tearDown(self):
rmtree(tempfile.tempdir)
tempfile.tempdir = self.old_tempdir
def auth_csv_example_disallowed(self, url, request):
if url.geturl() == 'http://example.com/auth.csv':
return response(200, '''Email domain,Organization\n''')
raise Exception('Asked for unknown URL ' + url.geturl())
def auth_csv_example_allowed(self, url, request):
if url.geturl() == 'http://example.com/auth.csv':
return response(200, '''Email domain,Organization\nexample.com,Example Org\n*,Anyone''')
raise Exception('Asked for unknown URL ' + url.geturl())
def mock_persona_verify_erica(self, url, request):
if url.geturl() == 'https://verifier.login.persona.org/verify':
return response(200, '''{"status": "okay", "email": "erica@example.com"}''', headers=dict(Link='<https://api.github.com/user/337792/repos?page=1>; rel="prev", <https://api.github.com/user/337792/repos?page=1>; rel="first"'))
else:
return self.auth_csv_example_allowed(url, request)
def mock_persona_verify_non_roman(self, url, request):
if url.geturl() == 'https://verifier.login.persona.org/verify':
return response(200, '''{"status": "okay", "email": "੯ूᵕू ໒꒱ƶƵ@快速狐狸.com"}''', headers=dict(Link='<https://api.github.com/user/337792/repos?page=1>; rel="prev", <https://api.github.com/user/337792/repos?page=1>; rel="first"'))
else:
return self.auth_csv_example_allowed(url, request)
def mock_persona_verify_frances(self, url, request):
if url.geturl() == 'https://verifier.login.persona.org/verify':
return response(200, '''{"status": "okay", "email": "frances@example.com"}''', headers=dict(Link='<https://api.github.com/user/337792/repos?page=1>; rel="prev", <https://api.github.com/user/337792/repos?page=1>; rel="first"'))
else:
return self.auth_csv_example_allowed(url, request)
def mock_persona_verify_william(self, url, request):
if url.geturl() == 'https://verifier.login.persona.org/verify':
return response(200, '''{"status": "okay", "email": "william@example.org"}''', headers=dict(Link='<https://api.github.com/user/337792/repos?page=1>; rel="prev", <https://api.github.com/user/337792/repos?page=1>; rel="first"'))
else:
return self.auth_csv_example_allowed(url, request)
def mock_google_authorization(self, url, request):
if 'https://accounts.google.com/o/oauth2/auth' in url.geturl():
return response(200, '''{"access_token": "meowser_token", "token_type": "meowser_type", "refresh_token": "refresh_meows", "expires_in": 3920}''')
else:
return self.auth_csv_example_allowed(url, request)
def mock_successful_google_callback(self, url, request):
if google_api_functions.GOOGLE_ANALYTICS_TOKENS_URL in url.geturl():
return response(200, '''{"access_token": "meowser_token", "token_type": "meowser_type", "refresh_token": "refresh_meows", "expires_in": 3920}''')
elif google_api_functions.GOOGLE_PLUS_WHOAMI_URL in url.geturl():
return response(200, '''{"displayName": "Jane Doe", "emails": [{"type": "account", "value": "erica@example.com"}]}''')
elif google_api_functions.GOOGLE_ANALYTICS_PROPERTIES_URL in url.geturl():
return response(200, '''{"items": [{"defaultProfileId": "12345678", "name": "Property One", "websiteUrl": "http://propertyone.example.com"}, {"defaultProfileId": "87654321", "name": "Property Two", "websiteUrl": "http://propertytwo.example.com"}]}''')
else:
return self.auth_csv_example_allowed(url, request)
def mock_failed_google_callback(self, url, request):
if google_api_functions.GOOGLE_ANALYTICS_TOKENS_URL in url.geturl():
return response(500, '''{}''')
elif google_api_functions.GOOGLE_PLUS_WHOAMI_URL in url.geturl():
return response(200, '''{"displayName": "Jane Doe", "emails": [{"type": "account", "value": "erica@example.com"}]}''')
elif google_api_functions.GOOGLE_ANALYTICS_PROPERTIES_URL in url.geturl():
return response(200, '''{"items": [{"defaultProfileId": "12345678", "name": "Property One", "websiteUrl": "http://propertyone.example.com"}, {"defaultProfileId": "87654321", "name": "Property Two", "websiteUrl": "http://propertytwo.example.com"}]}''')
else:
return self.auth_csv_example_allowed(url, request)
def mock_google_invalid_credentials_response(self, url, request):
if 'https://www.googleapis.com/analytics/' in url.geturl() or google_api_functions.GOOGLE_ANALYTICS_PROPERTIES_URL in url.geturl():
return response(401, '''{"error": {"code": 401, "message": "Invalid Credentials", "errors": [{"locationType": "header", "domain": "global", "message": "Invalid Credentials", "reason": "authError", "location": "Authorization"}]}}''')
elif google_api_functions.GOOGLE_PLUS_WHOAMI_URL in url.geturl():
return response(403, '''{"error": {"code": 403, "message": "Access Not Configured. The API (Google+ API) is not enabled for your project. Please use the Google Developers Console to update your configuration.", "errors": [{"domain": "usageLimits", "message": "Access Not Configured. The API (Google+ API) is not enabled for your project. Please use the Google Developers Console to update your configuration.", "reason": "accessNotConfigured", "extendedHelp": "https://console.developers.google.com"}]}}''')
else:
return self.auth_csv_example_allowed(url, request)
def mock_google_no_properties_response(self, url, request):
if google_api_functions.GOOGLE_ANALYTICS_PROPERTIES_URL in url.geturl():
return response(200, '''{"kind": "analytics#webproperties", "username": "erica@example.com", "totalResults": 0, "startIndex": 1, "itemsPerPage": 1000, "items": []}''')
elif google_api_functions.GOOGLE_PLUS_WHOAMI_URL in url.geturl():
return response(200, '''{"displayName": "Jane Doe", "emails": [{"type": "account", "value": "erica@example.com"}]}''')
else:
return self.auth_csv_example_allowed(url, request)
def mock_google_analytics(self, url, request):
start_date = (date.today() - timedelta(days=7)).isoformat()
end_date = date.today().isoformat()
url_string = url.geturl()
if 'ids=ga%3A12345678' in url_string and 'end-date=' + end_date in url_string and 'start-date=' + start_date in url_string and 'filters=ga%3ApagePath%3D~%28hello.html%7Chello%29' in url_string:
return response(200, '''{"ga:previousPagePath": "/about/", "ga:pagePath": "/lib/", "ga:pageViews": "12", "ga:avgTimeOnPage": "56.17", "ga:exiteRate": "43.75", "totalsForAllResults": {"ga:pageViews": "24", "ga:avgTimeOnPage": "67.36363636363636"}}''')
else:
return self.auth_csv_example_allowed(url, request)
def mock_internal_server_error(self, url, request):
from flask import abort
abort(500)
def mock_exception(self, url, request):
raise Exception(u'This is a generic exception.')
# in TestApp
def test_no_cache_headers(self):
''' The expected no-cache headers are in the server response.
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.test_client, self)
erica.sign_in(email='erica@example.com')
erica.open_link(constants.ROUTE_ACTIVITY)
# The static no-cache headers are as expected
self.assertEqual(erica.headers['Cache-Control'], 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0')
self.assertEqual(erica.headers['Pragma'], 'no-cache')
self.assertEqual(erica.headers['Expires'], '-1')
# The last modified date is within 10 seconds of now
last_modified = datetime.strptime(erica.headers['Last-Modified'], '%Y-%m-%d %H:%M:%S.%f')
delta = datetime.now() - last_modified
self.assertTrue(delta.seconds < 10)
# in TestApp
def test_bad_login(self):
''' Check basic log in / log out flow without talking to Persona.
'''
response = self.test_client.get(constants.ROUTE_ACTIVITY)
self.assertFalse('erica@example.com' in response.data)
with HTTMock(self.mock_persona_verify_erica):
response = self.test_client.post('/sign-in', data={'assertion': 'erica@example.com'})
self.assertEqual(response.status_code, 200)
with HTTMock(self.auth_csv_example_disallowed):
response = self.test_client.get(constants.ROUTE_ACTIVITY)
self.assertFalse('Create' in response.data)
# in TestApp
def test_login(self):
''' Check basic log in / log out flow without talking to Persona.
'''
response = self.test_client.get(constants.ROUTE_ACTIVITY)
self.assertFalse('Start' in response.data)
with HTTMock(self.mock_persona_verify_erica):
response = self.test_client.post('/sign-in', data={'assertion': 'erica@example.com'})
self.assertEqual(response.status_code, 200)
with HTTMock(self.auth_csv_example_allowed):
response = self.test_client.get(constants.ROUTE_ACTIVITY)
self.assertTrue('Start' in response.data)
self.assertTrue('http://example.org' in response.data, 'Should see LIVE_SITE_URL in response')
response = self.test_client.post('/sign-out')
self.assertEqual(response.status_code, 200)
response = self.test_client.get(constants.ROUTE_ACTIVITY)
self.assertFalse('Start' in response.data)
# in TestApp
def test_login_splat(self):
''' Check basic log in / log out flow without talking to Persona.
'''
response = self.test_client.get(constants.ROUTE_ACTIVITY)
self.assertFalse('Start' in response.data)
with HTTMock(self.mock_persona_verify_william):
response = self.test_client.post('/sign-in', data={'assertion': 'william@example.org'})
self.assertEqual(response.status_code, 200)
with HTTMock(self.auth_csv_example_allowed):
response = self.test_client.get(constants.ROUTE_ACTIVITY)
self.assertTrue('Start' in response.data)
# in TestApp
def test_default_auth_href_warning(self):
''' Check basic log in / log out flow without talking to Persona.
'''
with patch('chime.view_functions.AUTH_DATA_HREF_DEFAULT', new='http://example.com/auth.csv'):
response = self.test_client.get('/not-allowed')
expected = 'Your Chime <code>AUTH_DATA_HREF</code> is set to default value.'
self.assertTrue(expected in response.data, 'Should see a warning')
# in TestApp
@patch('chime.view_functions.AUTH_CHECK_LIFESPAN', new=1.0)
def test_login_timeout(self):
''' Check basic log in / log out flow with auth check lifespan.
'''
response = self.test_client.get(constants.ROUTE_ACTIVITY)
self.assertFalse('Start' in response.data)
with HTTMock(self.mock_persona_verify_erica):
response = self.test_client.post('/sign-in', data={'assertion': 'erica@example.com'})
self.assertEqual(response.status_code, 200)
with HTTMock(self.auth_csv_example_allowed):
response = self.test_client.get(constants.ROUTE_ACTIVITY)
self.assertTrue('Start' in response.data)
with patch('chime.view_functions.get_auth_data_file') as get_auth_data_file:
# Show that email status does not require a call to auth CSV.
response = self.test_client.get(constants.ROUTE_ACTIVITY)
self.assertEqual(response.status_code, 200, 'Should have worked')
self.assertEqual(get_auth_data_file.call_count, 0, 'Should not have called get_auth_data_file()')
# Show that a call to auth CSV was made, outside the timeout period.
time.sleep(1.1)
response = self.test_client.get(constants.ROUTE_ACTIVITY)
self.assertEqual(get_auth_data_file.call_count, 1, 'Should have called get_auth_data_file()')
with HTTMock(self.auth_csv_example_allowed):
# Show that email status was correctly updatedw with call to CSV.
response = self.test_client.get(constants.ROUTE_ACTIVITY)
self.assertEqual(response.status_code, 200, 'Should have worked')
response = self.test_client.post('/sign-out')
self.assertEqual(response.status_code, 200)
response = self.test_client.get(constants.ROUTE_ACTIVITY)
self.assertFalse('Start' in response.data)
# in TestApp
def test_need_description_to_start_activity(self):
''' You need a description to start a new activity
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.test_client, self)
erica.sign_in(email='erica@example.com')
pattern_template_comment_stripped = sub(ur'<!--|-->', u'', PATTERN_TEMPLATE_COMMENT)
flash_message_text = u'Please describe what you\'re doing when you start a new activity!'
# start a new task without a description
erica.open_link(constants.ROUTE_ACTIVITY)
erica.start_task(description=u'')
# the activities-list template reloaded
comments = erica.soup.findAll(text=lambda text: isinstance(text, Comment))
self.assertTrue(pattern_template_comment_stripped.format(u'activities-list') in comments)
# verify that there's a flash message warning about submitting an empty description
self.assertEqual(flash_message_text, erica.soup.find('li', class_='flash').text)
# in TestApp
def test_whitespace_stripped_from_description(self):
''' Carriage returns, tabs, spaces are stripped from task descriptions before they're saved.
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.test_client, self)
erica.sign_in(email='erica@example.com')
# start a new task with a lot of random whitespace
erica.open_link(constants.ROUTE_ACTIVITY)
task_description = u'I think\n\r\n\rI am so \t\t\t coool!!\n\n\nYeah.\n\nOK\n\rERWEREW dkkdk'
task_description_stripped = u'I think I am so coool!! Yeah. OK ERWEREW dkkdk'
erica.start_task(description=task_description)
# the stripped comment is in the HTML
pattern_task_comment_stripped = sub(ur'<!--|-->', u'', PATTERN_TASK_COMMENT)
comments = erica.soup.findAll(text=lambda text: isinstance(text, Comment))
self.assertTrue(pattern_task_comment_stripped.format(task_description_stripped) in comments)
# the stripped comment is in the task metadata
repo = view_functions.get_repo(repo_path=self.app.config['REPO_PATH'], work_path=self.app.config['WORK_PATH'], email='erica@example.com')
task_metadata = repo_functions.get_task_metadata_for_branch(repo, erica.get_branch_name())
self.assertEqual(task_description_stripped, task_metadata['task_description'])
# in TestApp
def test_notification_on_create_category(self):
''' You get a flash notification when you create a category
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in('erica@example.com')
# Start a new task
erica.open_link(constants.ROUTE_ACTIVITY)
erica.start_task(description=u'Lick Water Droplets From Leaves for Leopard Geckos')
# Get the branch name
branch_name = erica.get_branch_name()
# Enter the "other" folder
other_slug = u'other'
erica.follow_link(href='/tree/{}/edit/{}/'.format(branch_name, other_slug))
# Create a category
category_name = u'Rubber Plants'
category_slug = slugify(category_name)
erica.add_category(category_name=category_name)
# the category is correctly represented on the page
self.assertIsNotNone(erica.soup.find(lambda tag: bool(tag.name == 'a' and category_name in tag.text)))
self.assertIsNotNone(erica.soup.find(lambda tag: bool(tag.name == 'a' and category_slug in tag['href'])))
# a flash message appeared
self.assertEqual(PATTERN_FLASH_CREATED_CATEGORY.format(title=category_name), erica.soup.find('li', class_='flash').text)
# in TestApp
def test_notifications_on_create_edit_and_delete_article(self):
''' You get a flash notification when you create an article
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in('erica@example.com')
# Start a new task
erica.open_link(constants.ROUTE_ACTIVITY)
erica.start_task(description=u'Lick Water Droplets From Leaves for Leopard Geckos')
# Get the branch name
branch_name = erica.get_branch_name()
# Enter the "other" folder
other_slug = u'other'
erica.follow_link(href='/tree/{}/edit/{}/'.format(branch_name, other_slug))
# Create a category and sub-category
category_name = u'Rubber Plants'
subcategory_name = u'Leaves'
erica.add_category(category_name=category_name)
erica.add_subcategory(subcategory_name=subcategory_name)
subcategory_path = erica.path
# Create an article
article_name = u'Water Droplets'
erica.add_article(article_name=article_name)
# a flash message appeared
self.assertEqual(PATTERN_FLASH_CREATED_ARTICLE.format(title=article_name), erica.soup.find('li', class_='flash').text)
# edit the article
erica.edit_article(title_str=article_name, body_str=u'Watch out for poisonous insects.')
# a flash message appeared
self.assertEqual(PATTERN_FLASH_SAVED_ARTICLE.format(title=article_name), erica.soup.find('li', class_='flash').text)
# delete the article
erica.open_link(subcategory_path)
erica.delete_article(article_name)
# a flash message appeared
self.assertEqual(PATTERN_FLASH_DELETED_ARTICLE.format(title=article_name), erica.soup.find('li', class_='flash').text)
# in TestApp
def test_branches(self):
''' Check basic branching functionality.
'''
fake_task_description = u'do things for somebody else'
fake_author_email = u'erica@example.com'
fake_endorser_email = u'frances@example.com'
fake_page_slug = u'hello'
fake_page_path = u'{}/index.{}'.format(fake_page_slug, constants.CONTENT_FILE_EXTENSION)
fake_page_content = u'People of earth we salute you.'
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_author_email})
with HTTMock(self.auth_csv_example_allowed):
# create a new branch
response = self.test_client.post('/start', data={'task_description': fake_task_description}, follow_redirects=True)
self.assertTrue(PATTERN_TEMPLATE_COMMENT.format('articles-list') in response.data)
self.assertTrue(PATTERN_TASK_COMMENT.format(fake_task_description) in response.data)
self.assertTrue(PATTERN_AUTHOR_COMMENT.format(fake_author_email) in response.data)
# extract the generated branch name from the returned HTML
generated_branch_search = search(r'<!-- branch: (.{{{}}}) -->'.format(repo_functions.BRANCH_NAME_LENGTH), response.data)
self.assertIsNotNone(generated_branch_search)
try:
generated_branch_name = generated_branch_search.group(1)
except AttributeError:
raise Exception('No match for generated branch name.')
with HTTMock(self.mock_google_analytics):
# create a new file
response = self.test_client.post('/tree/{}/edit/'.format(generated_branch_name),
data={'action': 'create', 'create_what': constants.ARTICLE_LAYOUT, 'request_path': fake_page_slug},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(fake_page_path in response.data)
# get the index page for the branch and verify that the new file is listed
response = self.test_client.get('/tree/{}/edit/'.format(generated_branch_name), follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(PATTERN_BRANCH_COMMENT.format(generated_branch_name) in response.data)
self.assertTrue(PATTERN_FILE_COMMENT.format(**{"file_name": fake_page_slug, "file_title": fake_page_slug, "file_type": constants.ARTICLE_LAYOUT}) in response.data)
# get the edit page for the new file and extract the hexsha value
response = self.test_client.get('/tree/{}/edit/{}'.format(generated_branch_name, fake_page_path))
self.assertEqual(response.status_code, 200)
self.assertTrue(fake_page_path in response.data)
hexsha = search(r'<input name="hexsha" value="(\w+)"', response.data).group(1)
# now save the file with new content
response = self.test_client.post('/tree/{}/save/{}'.format(generated_branch_name, fake_page_path),
data={'layout': constants.ARTICLE_LAYOUT, 'hexsha': hexsha,
'en-title': 'Greetings',
'en-body': u'{}\n'.format(fake_page_content),
'fr-title': '', 'fr-body': '',
'url-slug': u'{}/index'.format(fake_page_slug)},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(fake_page_path in response.data)
self.assertTrue(fake_page_content in response.data)
# Request feedback on the change
with HTTMock(self.auth_csv_example_allowed):
response = self.test_client.post('/tree/{}/'.format(generated_branch_name), data={'comment_text': u'', 'request_feedback': u'Request Feedback'}, follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(u'{} {}'.format(fake_author_email, repo_functions.ACTIVITY_FEEDBACK_MESSAGE) in response.data)
#
#
# Log in as a different person
with HTTMock(self.mock_persona_verify_frances):
self.test_client.post('/sign-in', data={'assertion': fake_endorser_email})
# Endorse the change
with HTTMock(self.auth_csv_example_allowed):
response = self.test_client.post('/tree/{}/'.format(generated_branch_name), data={'comment_text': u'', 'endorse_edits': 'Endorse Edits'}, follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(u'{} {}'.format(fake_endorser_email, repo_functions.ACTIVITY_ENDORSED_MESSAGE) in response.data)
# And publish the change!
with HTTMock(self.auth_csv_example_allowed):
response = self.test_client.post('/tree/{}/'.format(generated_branch_name), data={'comment_text': u'', 'merge': 'Publish'}, follow_redirects=True)
self.assertEqual(response.status_code, 200)
# should've been redirected to the front page
self.assertTrue(PATTERN_TEMPLATE_COMMENT.format('activities-list') in response.data)
# the activity we just published should be listed under 'recently published activities'
self.assertTrue(generated_branch_name in response.data)
self.assertTrue(response.data.find(generated_branch_name) > response.data.find(u'Recently Published Activities'))
# Look in the published directory and see if the words are there.
with open(join(self.publish_path, fake_page_slug, 'index.html')) as file:
self.assertTrue(fake_page_content in file.read())
# in TestApp
def test_delete_strange_tasks(self):
''' Delete a task that you can see on the activity list but haven't viewed or edited.
'''
fake_author_email = u'erica@example.com'
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_author_email})
with HTTMock(self.auth_csv_example_allowed):
# start a new branch via the http interface
# invokes view_functions/get_repo which creates a clone
disposable_task_description = u'unimportant task for unimportant person'
response = self.test_client.post('/start', data={'task_description': disposable_task_description}, follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(PATTERN_TEMPLATE_COMMENT.format('articles-list') in response.data)
self.assertTrue(PATTERN_TASK_COMMENT.format(disposable_task_description) in response.data)
# create a branch programmatically on our pre-made clone
check_task_description = u'Creating a Star Child for Ancient Aliens'
check_branch = repo_functions.get_start_branch(self.clone1, 'master', check_task_description, fake_author_email)
self.assertTrue(check_branch.name in self.clone1.branches)
self.assertTrue(check_branch.name in self.origin.branches)
# verify that the branch doesn't exist in our new clone
with self.app.app_context():
with self.app.test_request_context():
from flask import session
session['email'] = fake_author_email
new_clone = view_functions.get_repo(flask_app=self.app)
self.assertFalse(check_branch.name in new_clone.branches)
# load the activity list and verify that the branch is visible there
response = self.test_client.get(constants.ROUTE_ACTIVITY, follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(check_branch.name in response.data)
# Delete the activity
response = self.test_client.post('/update', data={'abandon': 'Delete', 'branch': '{}'.format(check_branch.name)}, follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertFalse(check_branch.name in response.data)
# in TestApp
def test_review_process(self):
''' Check the review process
'''
fake_task_description = u'groom pets for pet owners'
fake_author_email = u'erica@example.com'
fake_endorser_email = u'frances@example.com'
fake_page_slug = u'hello'
# log in
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_author_email})
with HTTMock(self.auth_csv_example_allowed):
# create a new branch
response = self.test_client.post('/start', data={'task_description': fake_task_description}, follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(PATTERN_TEMPLATE_COMMENT.format('articles-list') in response.data)
# extract the generated branch name from the returned HTML
generated_branch_search = search(r'<!-- branch: (.{{{}}}) -->'.format(repo_functions.BRANCH_NAME_LENGTH), response.data)
self.assertIsNotNone(generated_branch_search)
try:
generated_branch_name = generated_branch_search.group(1)
except AttributeError:
raise Exception('No match for generated branch name.')
# get the activity list page
response = self.test_client.get(constants.ROUTE_ACTIVITY, follow_redirects=True)
self.assertEqual(response.status_code, 200)
# verify that the project is listed in the edited column
soup = BeautifulSoup(response.data)
pub_ul = soup.select("#activity-list-edited")[0]
# there should be an HTML comment with the branch name
comments = pub_ul.findAll(text=lambda text: isinstance(text, Comment))
found = False
for comment in comments:
if generated_branch_name in comment:
found = True
pub_li = comment.find_parent('li')
# and the activity title wrapped in an a tag
self.assertIsNotNone(pub_li.find('a', text=fake_task_description))
self.assertEqual(True, found)
# create a new file
response = self.test_client.post('/tree/{}/edit/'.format(generated_branch_name),
data={'action': 'create', 'create_what': constants.ARTICLE_LAYOUT, 'request_path': fake_page_slug},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
# get the edit page for the branch
response = self.test_client.get('/tree/{}/edit/'.format(generated_branch_name), follow_redirects=True)
self.assertEqual(response.status_code, 200)
# verify that there's a 'request feedback' button
soup = BeautifulSoup(response.data)
self.assertIsNotNone(soup.find("button", {"data-test-id": "request-feedback-button"}))
# get the overview page for the branch
response = self.test_client.get('/tree/{}/'.format(generated_branch_name), follow_redirects=True)
self.assertEqual(response.status_code, 200)
# verify that there's a 'request feedback' button
soup = BeautifulSoup(response.data)
self.assertIsNotNone(soup.find("button", {"data-test-id": "request-feedback-button"}))
# get the activity list page
response = self.test_client.get(constants.ROUTE_ACTIVITY, follow_redirects=True)
self.assertEqual(response.status_code, 200)
# verify that the project is listed in the edited column
soup = BeautifulSoup(response.data)
pub_ul = soup.select("#activity-list-edited")[0]
# there should be an HTML comment with the branch name
comments = pub_ul.findAll(text=lambda text: isinstance(text, Comment))
found = False
for comment in comments:
if generated_branch_name in comment:
found = True
pub_li = comment.find_parent('li')
# and the activity title wrapped in an a tag
self.assertIsNotNone(pub_li.find('a', text=fake_task_description))
self.assertEqual(True, found)
# Request feedback on the change
with HTTMock(self.auth_csv_example_allowed):
response = self.test_client.post('/tree/{}/'.format(generated_branch_name), data={'comment_text': u'', 'request_feedback': u'Request Feedback'}, follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(u'{} {}'.format(fake_author_email, repo_functions.ACTIVITY_FEEDBACK_MESSAGE) in response.data)
#
#
# Log in as a different person
with HTTMock(self.mock_persona_verify_frances):
self.test_client.post('/sign-in', data={'assertion': fake_endorser_email})
with HTTMock(self.auth_csv_example_allowed):
# get the edit page for the branch
response = self.test_client.get('/tree/{}/edit/'.format(generated_branch_name), follow_redirects=True)
self.assertEqual(response.status_code, 200)
# verify that there's a 'Endorse Edits' button
soup = BeautifulSoup(response.data)
self.assertIsNotNone(soup.find("button", {"data-test-id": "endorse-edits-button"}))
# get the overview page for the branch
response = self.test_client.get('/tree/{}/'.format(generated_branch_name), follow_redirects=True)
self.assertEqual(response.status_code, 200)
# verify that there's a 'Endorse Edits' button
soup = BeautifulSoup(response.data)
self.assertIsNotNone(soup.find("button", {"data-test-id": "endorse-edits-button"}))
# get the activity list page
response = self.test_client.get(constants.ROUTE_ACTIVITY, follow_redirects=True)
self.assertEqual(response.status_code, 200)
# verify that the project is listed in the feedback needed column
soup = BeautifulSoup(response.data)
pub_ul = soup.select("#activity-list-feedback")[0]
# there should be an HTML comment with the branch name
comment = pub_ul.findAll(text=lambda text: isinstance(text, Comment))[0]
self.assertTrue(generated_branch_name in comment)
pub_li = comment.find_parent('li')
# and the activity title wrapped in an a tag
self.assertIsNotNone(pub_li.find('a', text=fake_task_description))
# Endorse the change
with HTTMock(self.auth_csv_example_allowed):
response = self.test_client.post('/tree/{}/'.format(generated_branch_name), data={'comment_text': u'', 'endorse_edits': 'Endorse Edits'}, follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(u'{} {}'.format(fake_endorser_email, repo_functions.ACTIVITY_ENDORSED_MESSAGE) in response.data)
# log back in as the original editor
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_author_email})
with HTTMock(self.auth_csv_example_allowed):
# get the edit page for the branch
response = self.test_client.get('/tree/{}/edit/'.format(generated_branch_name), follow_redirects=True)
self.assertEqual(response.status_code, 200)
# verify that there's a 'publish' button
soup = BeautifulSoup(response.data)
self.assertIsNotNone(soup.find("button", {"data-test-id": "publish-button"}))
# get the overview page for the branch
response = self.test_client.get('/tree/{}/'.format(generated_branch_name), follow_redirects=True)
self.assertEqual(response.status_code, 200)
# verify that there's a 'publish' button
soup = BeautifulSoup(response.data)
self.assertIsNotNone(soup.find("button", {"data-test-id": "publish-button"}))
# get the activity list page
response = self.test_client.get(constants.ROUTE_ACTIVITY, follow_redirects=True)
self.assertEqual(response.status_code, 200)
# verify that the project is listed in the ready to publish column
soup = BeautifulSoup(response.data)
pub_ul = soup.select("#activity-list-endorsed")[0]
# there should be an HTML comment with the branch name
comment = pub_ul.findAll(text=lambda text: isinstance(text, Comment))[0]
self.assertTrue(generated_branch_name in comment)
pub_li = comment.find_parent('li')
# and the activity title wrapped in an a tag
self.assertIsNotNone(pub_li.find('a', text=fake_task_description))
# And publish the change!
with HTTMock(self.auth_csv_example_allowed):
response = self.test_client.post('/tree/{}/'.format(generated_branch_name), data={'comment_text': u'', 'merge': 'Publish'}, follow_redirects=True)
self.assertEqual(response.status_code, 200)
# should've been redirected to the front page
self.assertTrue(PATTERN_TEMPLATE_COMMENT.format('activities-list') in response.data)
# verify that the project is listed in the recently published column
soup = BeautifulSoup(response.data)
pub_ul = soup.select("#activity-list-published")[0]
# there should be an HTML comment with the branch name
comment = pub_ul.findAll(text=lambda text: isinstance(text, Comment))[0]
self.assertTrue(generated_branch_name in comment)
pub_li = comment.find_parent('li')
# and the activity title wrapped in an a tag
self.assertIsNotNone(pub_li.find('a', text=fake_task_description))
# in TestApp
def test_get_request_does_not_create_branch(self):
''' Navigating to a made-up URL should not create a branch
'''
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': 'erica@example.com'})
with HTTMock(self.auth_csv_example_allowed):
fake_branch_name = 'this-should-not-create-a-branch'
#
# edit
#
response = self.test_client.get('/tree/{}/edit/'.format(fake_branch_name), follow_redirects=True)
self.assertEqual(response.status_code, 404)
self.assertTrue(view_functions.MESSAGE_ACTIVITY_DELETED in response.data)
# the branch path should not be in the returned HTML
self.assertFalse(PATTERN_BRANCH_COMMENT.format(fake_branch_name) in response.data)
# the branch name should not be in the origin's branches list
self.assertFalse(fake_branch_name in self.origin.branches)
#
# history
#
response = self.test_client.get('/tree/{}/history/'.format(fake_branch_name), follow_redirects=True)
self.assertEqual(response.status_code, 404)
self.assertTrue(view_functions.MESSAGE_ACTIVITY_DELETED in response.data)
# the branch path should not be in the returned HTML
self.assertFalse(PATTERN_BRANCH_COMMENT.format(fake_branch_name) in response.data)
# the branch name should not be in the origin's branches list
self.assertFalse(fake_branch_name in self.origin.branches)
#
# view
#
response = self.test_client.get('/tree/{}/view/'.format(fake_branch_name), follow_redirects=True)
self.assertEqual(response.status_code, 404)
self.assertTrue(view_functions.MESSAGE_ACTIVITY_DELETED in response.data)
# the branch path should not be in the returned HTML
self.assertFalse(PATTERN_BRANCH_COMMENT.format(fake_branch_name) in response.data)
# the branch name should not be in the origin's branches list
self.assertFalse(fake_branch_name in self.origin.branches)
# in TestApp
def test_post_request_does_not_create_branch(self):
''' Certain POSTs to a made-up URL should not create a branch
'''
fake_page_slug = u'hello'
fake_page_path = u'{}/index.{}'.format(fake_page_slug, constants.CONTENT_FILE_EXTENSION)
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': 'erica@example.com'})
with HTTMock(self.auth_csv_example_allowed):
#
# try creating an article in a non-existent branch
#
fake_branch_name = repo_functions.make_branch_name()
response = self.test_client.post('/tree/{}/edit/'.format(fake_branch_name), data={'action': 'create', 'create_what': constants.ARTICLE_LAYOUT, 'request_path': fake_page_slug}, follow_redirects=True)
self.assertEqual(response.status_code, 404)
self.assertTrue(view_functions.MESSAGE_ACTIVITY_DELETED in response.data)
# the branch name should not be in the origin's branches list
self.assertFalse(fake_branch_name in self.origin.branches)
#
# create a branch then delete it right before a POSTing a save command
#
fake_task_description = u'Doing fake stuff for Nobody'
response = self.test_client.post('/start', data={'task_description': fake_task_description}, follow_redirects=True)
# we should be on the new task's edit page
self.assertEqual(response.status_code, 200)
self.assertTrue(PATTERN_TEMPLATE_COMMENT.format('articles-list') in response.data)
self.assertTrue(PATTERN_TASK_COMMENT.format(fake_task_description) in response.data)
# extract the generated branch name from the returned HTML
generated_branch_search = search(r'<!-- branch: (.{{{}}}) -->'.format(repo_functions.BRANCH_NAME_LENGTH), response.data)
self.assertIsNotNone(generated_branch_search)
try:
generated_branch_name = generated_branch_search.group(1)
except AttributeError:
raise Exception('No match for generated branch name.')
# create a new article
response = self.test_client.post('/tree/{}/edit/'.format(generated_branch_name), data={'action': 'create', 'create_what': constants.ARTICLE_LAYOUT, 'request_path': fake_page_slug}, follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(PATTERN_TEMPLATE_COMMENT.format('article-edit') in response.data)
# load the article list and verify that the new article is listed
response = self.test_client.get('/tree/{}/edit/'.format(generated_branch_name), follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(PATTERN_TEMPLATE_COMMENT.format('articles-list') in response.data)
self.assertTrue(PATTERN_BRANCH_COMMENT.format(generated_branch_name) in response.data)
self.assertTrue(PATTERN_FILE_COMMENT.format(**{"file_name": fake_page_slug, "file_title": fake_page_slug, "file_type": constants.ARTICLE_LAYOUT}) in response.data)
# load the article edit page and grab the hexsha from the form
response = self.test_client.get('/tree/{}/edit/{}'.format(generated_branch_name, fake_page_path))
self.assertEqual(response.status_code, 200)
hexsha = search(r'<input name="hexsha" value="(\w+)"', response.data).group(1)
# delete the branch
response = self.test_client.post('/update', data={'abandon': 'Delete', 'branch': '{}'.format(generated_branch_name)}, follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertFalse(generated_branch_name in response.data)
# try submitting a change to the article
response = self.test_client.post('/tree/{}/save/{}'.format(generated_branch_name, fake_page_path), data={'layout': constants.ARTICLE_LAYOUT, 'hexsha': hexsha, 'en-title': 'Greetings', 'en-body': 'Hello world.\n', 'fr-title': '', 'fr-body': '', 'url-slug': 'hello'}, follow_redirects=True)
self.assertEqual(response.status_code, 404)
self.assertTrue(view_functions.MESSAGE_ACTIVITY_DELETED in response.data)
# the task name should not be in the returned HTML
self.assertFalse(PATTERN_BRANCH_COMMENT.format(fake_task_description) in response.data)
# the branch name should not be in the origin's branches list
self.assertFalse('{}'.format(generated_branch_name) in self.origin.branches)
# in TestApp
def test_accessing_local_branch_fetches_remote(self):
''' GETting or POSTing to a URL that indicates a branch that exists remotely but not locally
fetches the remote branch and allows access
'''
fake_author_email = u'erica@example.com'
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_author_email})
with HTTMock(self.auth_csv_example_allowed):
# start a new branch via the http interface
# invokes view_functions/get_repo which creates a clone
disposable_task_description = u'unimportant task for unimportant person'
response = self.test_client.post('/start', data={'task_description': disposable_task_description}, follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(PATTERN_TEMPLATE_COMMENT.format('articles-list') in response.data)
self.assertTrue(PATTERN_TASK_COMMENT.format(disposable_task_description) in response.data)
# create a branch programmatically on our pre-made clone
check_task_description = u'the branch we are checking for for just me'
check_branch = repo_functions.get_start_branch(self.clone1, 'master', check_task_description, fake_author_email)
self.assertTrue(check_branch.name in self.clone1.branches)
self.assertTrue(check_branch.name in self.origin.branches)
# verify that the branch doesn't exist in our new clone
with self.app.app_context():
with self.app.test_request_context():
from flask import session
session['email'] = fake_author_email
new_clone = view_functions.get_repo(flask_app=self.app)
self.assertFalse(check_branch.name in new_clone.branches)
# request an edit page for the check branch through the http interface
response = self.test_client.get('/tree/{}/edit/'.format(check_branch.name), follow_redirects=True)
self.assertEqual(response.status_code, 200)
# the task description should be in the returned HTML
self.assertTrue(PATTERN_TEMPLATE_COMMENT.format('articles-list') in response.data)
self.assertTrue(PATTERN_TASK_COMMENT.format(check_task_description) in response.data)
# the branch name should now be in the original repo's branches list
self.assertTrue(check_branch.name in new_clone.branches)
# in TestApp
def test_git_merge_strategy_implemented(self):
''' The Git merge strategy has been implmemented for a new clone.
'''
fake_author_email = u'erica@example.com'
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_author_email})
with HTTMock(self.auth_csv_example_allowed):
# create a new clone via get_repo
with self.app.app_context():
with self.app.test_request_context():
from flask import session
session['email'] = fake_author_email
new_clone = view_functions.get_repo(flask_app=self.app)
# check for the config setting
self.assertEqual(new_clone.config_reader().get_value('merge "ignored"', 'driver'), True)
# check for the attributes setting
attributes_path = join(new_clone.git_dir, 'info/attributes')
self.assertTrue(exists(attributes_path))
with open(attributes_path, 'r') as file:
content = file.read().decode("utf-8")
self.assertEqual(content, u'{} merge=ignored'.format(repo_functions.TASK_METADATA_FILENAME))
# in TestApp
def test_task_metadata_should_exist(self):
''' Task metadata file should exist but doesn't
'''
fake_author_email = u'erica@example.com'
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_author_email})
fake_task_description = u'unimportant task for unimportant person'
branch1 = repo_functions.get_start_branch(self.clone1, 'master', fake_task_description, fake_author_email)
branch1_name = branch1.name
branch1.checkout()
# verify that the most recent commit on the new branch is for starting the activity
self.assertTrue(repo_functions.ACTIVITY_CREATED_MESSAGE in branch1.commit.message)
# validate the existence of the task metadata file
self.assertTrue(repo_functions.verify_file_exists_in_branch(self.clone1, repo_functions.TASK_METADATA_FILENAME, branch1_name))
# now delete it
repo_functions.delete_task_metadata_for_branch(self.clone1, 'master')
self.assertFalse(repo_functions.verify_file_exists_in_branch(self.clone1, repo_functions.TASK_METADATA_FILENAME, branch1_name))
# verify that we can load a functional edit page for the branch
with HTTMock(self.auth_csv_example_allowed):
# request an edit page for the check branch through the http interface
response = self.test_client.get('/tree/{}/edit/'.format(branch1_name), follow_redirects=True)
# it's a good response
self.assertEqual(response.status_code, 200)
# the branch name should be in the returned HTML
self.assertTrue(PATTERN_BRANCH_COMMENT.format(branch1_name) in response.data)
# the 'Started by' should be 'Unknown' for now
self.assertTrue(PATTERN_AUTHOR_COMMENT.format(u'unknown') in response.data)
# in TestApp
def test_google_callback_is_successful(self):
''' Ensure we get a successful page load on callback from Google authentication
'''
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': 'erica@example.com'})
with HTTMock(self.mock_google_authorization):
self.test_client.post('/authorize')
with HTTMock(self.mock_successful_google_callback):
response = self.test_client.get('/callback?state=PPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP&code=code')
with self.app.app_context():
ga_config = google_api_functions.read_ga_config(self.app.config['RUNNING_STATE_DIR'])
self.assertEqual(ga_config['access_token'], 'meowser_token')
self.assertEqual(ga_config['refresh_token'], 'refresh_meows')
self.assertTrue('/setup' in response.location)
# in TestApp
def test_analytics_setup_is_successful(self):
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': 'erica@example.com'})
with HTTMock(self.mock_google_authorization):
self.test_client.post('/authorize')
# mock-post the form in authorize.html to authorization-complete.html with some dummy values and check the results
response = self.test_client.post('/authorization-complete', data={'email': 'erica@example.com', 'name': 'Jane Doe', 'google_email': 'erica@example.com', 'return_link': 'http://example.com', 'property': '12345678', '12345678-domain': 'http://propertyone.example.com', '12345678-name': 'Property One'})
self.assertEqual(u'200 OK', response.status)
with self.app.app_context():
ga_config = google_api_functions.read_ga_config(self.app.config['RUNNING_STATE_DIR'])
# views.authorization_complete() strips the 'http://' from the domain
self.assertEqual(ga_config['project_domain'], 'propertyone.example.com')
self.assertEqual(ga_config['profile_id'], '12345678')
# in TestApp
def test_handle_bad_analytics_response(self):
''' Verify that an unauthorized analytics response is handled correctly
'''
with HTTMock(self.mock_google_invalid_credentials_response):
with self.app.app_context():
analytics_dict = google_api_functions.fetch_google_analytics_for_page(self.app.config, u'index.html', 'meowser_token')
self.assertEqual(analytics_dict, {})
# in TestApp
def test_google_callback_fails(self):
''' Ensure that we get an appropriate error flashed when we fail to auth with google
'''
with HTTMock(self.mock_persona_verify_erica):
response = self.test_client.post('/sign-in', data={'assertion': 'erica@example.com'})
with HTTMock(self.mock_google_authorization):
response = self.test_client.post('/authorize')
with HTTMock(self.mock_failed_google_callback):
response = self.test_client.get('/callback?state=PPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP&code=code', follow_redirects=True)
self.assertEqual(response.status_code, 200)
# find the flashed error message in the returned HTML
self.assertTrue('Google rejected authorization request' in response.data)
# in TestApp
def test_invalid_access_token(self):
''' Ensure that we get an appropriate error flashed when we have an invalid access token
'''
with HTTMock(self.mock_persona_verify_erica):
response = self.test_client.post('/sign-in', data={'assertion': 'erica@example.com'})
self.assertEqual(response.status_code, 200)
with HTTMock(self.mock_google_invalid_credentials_response):
response = self.test_client.get('/setup', follow_redirects=True)
self.assertEqual(response.status_code, 200)
# find the flashed error message in the returned HTML
self.assertTrue('Invalid Credentials' in response.data)
# in TestApp
def test_no_properties_found(self):
''' Ensure that we get an appropriate error flashed when no analytics properties are
associated with the authorized Google account
'''
with HTTMock(self.mock_persona_verify_erica):
response = self.test_client.post('/sign-in', data={'assertion': 'erica@example.com'})
self.assertEqual(response.status_code, 200)
with HTTMock(self.mock_google_no_properties_response):
response = self.test_client.get('/setup', follow_redirects=True)
self.assertEqual(response.status_code, 200)
# find the flashed error message in the returned HTML
self.assertTrue('Your Google Account is not associated with any Google Analytics properties' in response.data)
# in TestApp
def test_redirect(self):
''' Check redirect to BROWSERID_URL.
'''
with HTTMock(self.mock_persona_verify_erica):
response = self.test_client.get('/not-allowed', headers={'Host': 'wrong.local'})
expected_url = urljoin(self.app.config['BROWSERID_URL'], '/not-allowed')
self.assertEqual(response.status_code, 302)
self.assertEqual(response.headers['Location'], expected_url)
# in TestApp
def test_create_category(self):
''' Creating a new category creates a directory with an appropriate index file inside.
'''
fake_author_email = u'erica@example.com'
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_author_email})
with HTTMock(self.auth_csv_example_allowed):
# start a new branch via the http interface
# invokes view_functions/get_repo which creates a clone
task_description = u'force a clam shell open for starfish'
working_branch = repo_functions.get_start_branch(self.clone1, 'master', task_description, fake_author_email)
self.assertTrue(working_branch.name in self.clone1.branches)
self.assertTrue(working_branch.name in self.origin.branches)
working_branch_name = working_branch.name
working_branch.checkout()
# create a new category
page_slug = u'hello'
response = self.test_client.post('/tree/{}/edit/'.format(working_branch_name),
data={'action': 'create', 'create_what': constants.CATEGORY_LAYOUT, 'request_path': page_slug},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
# pull the changes
self.clone1.git.pull('origin', working_branch_name)
# a directory was created
dir_location = join(self.clone1.working_dir, page_slug)
idx_location = u'{}/index.{}'.format(dir_location, constants.CONTENT_FILE_EXTENSION)
self.assertTrue(exists(dir_location) and isdir(dir_location))
# an index page was created inside
self.assertTrue(exists(idx_location))
# the directory and index page pass the category test
self.assertTrue(view_functions.is_category_dir(dir_location))
# in TestApp
def test_period_in_category_name(self):
''' Putting a period in a category or subcategory name doesn't crop it.
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.test_client, self)
erica.sign_in(email='erica@example.com')
# Start a new task
erica.open_link(constants.ROUTE_ACTIVITY)
erica.start_task(description=u'Be Shot Hundreds Of Feet Into The Air for A Geyser Of Highly Pressurized Water')
# Get the branch name
branch_name = erica.get_branch_name()
# Enter the "other" folder
other_slug = u'other'
erica.follow_link(href='/tree/{}/edit/{}/'.format(branch_name, other_slug))
# Create a category that has a period in its name
category_name = u'Mt. Splashmore'
category_slug = slugify(category_name)
erica.add_category(category_name=category_name)
# the category is correctly represented on the page
self.assertIsNotNone(erica.soup.find(lambda tag: bool(tag.name == 'a' and category_name in tag.text)))
self.assertIsNotNone(erica.soup.find(lambda tag: bool(tag.name == 'a' and category_slug in tag['href'])))
# the category is correctly represented on disk
repo = view_functions.get_repo(repo_path=self.app.config['REPO_PATH'], work_path=self.app.config['WORK_PATH'], email='erica@example.com')
cat_location = join(repo.working_dir, u'{}/{}'.format(other_slug, category_slug))
self.assertTrue(exists(cat_location))
self.assertTrue(view_functions.is_category_dir(cat_location))
# in TestApp
def test_empty_category_or_article_name(self):
''' Submitting an empty category or article name reloads with a warning.
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.test_client, self)
erica.sign_in(email='erica@example.com')
pattern_template_comment_stripped = sub(ur'<!--|-->', u'', PATTERN_TEMPLATE_COMMENT)
# Start a new task
erica.open_link(constants.ROUTE_ACTIVITY)
erica.start_task(description=u'Deep-Fry a Buffalo in Forty Seconds for Moe')
# Get the branch name
branch_name = erica.get_branch_name()
# Enter the "other" folder
other_slug = u'other'
erica.follow_link(href='/tree/{}/edit/{}/'.format(branch_name, other_slug))
# Try to create a category with no name
category_name = u''
erica.add_category(category_name=category_name)
# the articles-list template reloaded
comments = erica.soup.findAll(text=lambda text: isinstance(text, Comment))
self.assertTrue(pattern_template_comment_stripped.format(u'articles-list') in comments)
# verify that there's a flash message warning about submitting an empty description
self.assertEqual(u'Please enter a name to create a topic!', erica.soup.find('li', class_='flash').text)
# Try to create a category with a name that slufigies to an empty string
category_name = u'(╯□)╯︵ ┻━┻'
self.assertEqual(u'', slugify(category_name))
erica.add_category(category_name=category_name)
# the articles-list template reloaded
comments = erica.soup.findAll(text=lambda text: isinstance(text, Comment))
self.assertTrue(pattern_template_comment_stripped.format(u'articles-list') in comments)
# verify that there's a flash message warning about submitting an empty description
self.assertEqual(u'{} is not an acceptable topic name!'.format(category_name), erica.soup.find('li', class_='flash').text)
# Create a category and sub-category
category_name = u'Mammals'
subcategory_name = u'Bison'
erica.add_category(category_name=category_name)
erica.add_subcategory(subcategory_name=subcategory_name)
# Try to create an article with no name
article_name = u''
erica.add_article(article_name=article_name)
# the articles-list template reloaded
comments = erica.soup.findAll(text=lambda text: isinstance(text, Comment))
self.assertTrue(pattern_template_comment_stripped.format(u'articles-list') in comments)
# verify that there's a flash message warning about submitting an empty description
self.assertEqual(u'Please enter a name to create an article!', erica.soup.find('li', class_='flash').text)
# Try to create a article with a name that slufigies to an empty string
article_name = u'(╯□)╯︵ ┻━┻'
self.assertEqual(u'', slugify(article_name))
erica.add_article(article_name=article_name)
# the articles-list template reloaded
comments = erica.soup.findAll(text=lambda text: isinstance(text, Comment))
self.assertTrue(pattern_template_comment_stripped.format(u'articles-list') in comments)
# verify that there's a flash message warning about submitting an empty description
self.assertEqual(u'{} is not an acceptable article name!'.format(article_name), erica.soup.find('li', class_='flash').text)
# in TestApp
def test_create_duplicate_category(self):
''' If we ask to create a category that exists, let's not and say we did.
'''
fake_author_email = u'erica@example.com'
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_author_email})
with HTTMock(self.auth_csv_example_allowed):
# start a new branch via the http interface
# invokes view_functions/get_repo which creates a clone
working_branch = repo_functions.get_start_branch(self.clone1, 'master', u'force a clam shell open for starfish', fake_author_email)
working_branch.checkout()
# create a new category
request_data = {'action': 'create', 'create_what': constants.CATEGORY_LAYOUT, 'request_path': u'hello'}
response = self.test_client.post('/tree/{}/edit/'.format(working_branch.name),
data=request_data,
follow_redirects=True)
self.assertEqual(response.status_code, 200)
# now do it again
response = self.test_client.post('/tree/{}/edit/'.format(working_branch.name),
data=request_data,
follow_redirects=True)
self.assertEqual(response.status_code, 200)
response_data = sub('"', '"', response.data.decode('utf-8'))
self.assertTrue(u'Topic "hello" already exists' in response_data)
# pull the changes
self.clone1.git.pull('origin', working_branch.name)
# everything looks good
dir_location = join(self.clone1.working_dir, u'hello')
idx_location = u'{}/index.{}'.format(dir_location, constants.CONTENT_FILE_EXTENSION)
self.assertTrue(exists(dir_location) and isdir(dir_location))
# an index page was created inside
self.assertTrue(exists(idx_location))
# the directory and index page pass the category test
self.assertTrue(view_functions.is_category_dir(dir_location))
# in TestApp
def test_delete_categories_and_articles(self):
''' Non-empty categories and articles can be deleted
'''
fake_author_email = u'erica@example.com'
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_author_email})
with HTTMock(self.auth_csv_example_allowed):
# start a new branch via the http interface
# invokes view_functions/get_repo which creates a clone
task_description = u'vomit digestive fluid onto rotting flesh for flies'
working_branch = repo_functions.get_start_branch(self.clone1, 'master', task_description, fake_author_email)
self.assertTrue(working_branch.name in self.clone1.branches)
self.assertTrue(working_branch.name in self.origin.branches)
working_branch_name = working_branch.name
working_branch.checkout()
# create a categories directory
categories_slug = u'categories'
response = self.test_client.post('/tree/{}/edit/'.format(working_branch_name),
data={'action': 'create', 'create_what': constants.CATEGORY_LAYOUT, 'request_path': categories_slug},
follow_redirects=True)
# and put a new category inside it
cata_title = u'Mouth Parts'
cata_slug = slugify(cata_title)
response = self.test_client.post('/tree/{}/edit/{}'.format(working_branch_name, categories_slug),
data={'action': 'create', 'create_what': constants.CATEGORY_LAYOUT, 'request_path': cata_title},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
# put another cateogry inside that
catb_title = u'Esophagus'
catb_slug = slugify(catb_title)
response = self.test_client.post('/tree/{}/edit/{}'.format(working_branch_name, join(categories_slug, cata_slug)),
data={'action': 'create', 'create_what': constants.CATEGORY_LAYOUT, 'request_path': catb_title},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
# and an article inside that
art_title = u'Stomach'
art_slug = slugify(art_title)
response = self.test_client.post('/tree/{}/edit/{}'.format(working_branch_name, join(categories_slug, cata_slug, catb_slug)),
data={'action': 'create', 'create_what': constants.ARTICLE_LAYOUT, 'request_path': art_title},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
# pull the changes
self.clone1.git.pull('origin', working_branch_name)
# verify that the categories and article exist
art_location = join(self.clone1.working_dir, categories_slug, cata_slug, catb_slug, art_slug)
catb_location = join(self.clone1.working_dir, categories_slug, cata_slug, catb_slug)
cata_location = join(self.clone1.working_dir, categories_slug, cata_slug)
self.assertTrue(exists(art_location))
self.assertTrue(view_functions.is_article_dir(art_location))
# delete category a
response = self.test_client.post('/tree/{}/edit/{}'.format(working_branch_name, join(categories_slug, cata_slug)),
data={'action': 'delete_category'},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
# pull the changes
self.clone1.git.pull('origin', working_branch_name)
# verify that the deleted category and article no longer exist
self.assertFalse(exists(art_location))
self.assertFalse(exists(catb_location))
self.assertFalse(exists(cata_location))
# in TestApp
def test_delete_commit_accuracy(self):
''' The record of a delete in the corresponding commit is accurate.
'''
with HTTMock(self.auth_csv_example_allowed):
erica_email = u'erica@example.com'
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.test_client, self)
erica.sign_in(email=erica_email)
# Start a new task
erica.open_link(constants.ROUTE_ACTIVITY)
erica.start_task(description=u'Ferment Tuber Fibres Using Symbiotic Bacteria in the Intestines for Naked Mole Rats')
# Get the branch name
branch_name = erica.get_branch_name()
# Enter the "other" folder
erica.follow_link(href='/tree/{}/edit/other/'.format(branch_name))
# Create a category and fill it with some subcategories and articles
category_names = [u'Indigestible Cellulose']
subcategory_names = [u'Volatile Fatty Acids', u'Non-Reproducing Females', u'Arid African Deserts']
article_names = [u'Eusocial Exhibition', u'Old Enough to Eat Solid Food', u'Contributing to Extension of Tunnels', u'Foraging and Nest Building']
erica.add_category(category_name=category_names[0])
category_path = erica.path
erica.add_subcategory(subcategory_name=subcategory_names[0])
erica.open_link(category_path)
erica.add_subcategory(subcategory_name=subcategory_names[1])
erica.open_link(category_path)
erica.add_subcategory(subcategory_name=subcategory_names[2])
subcategory_path = erica.path
erica.add_article(article_name=article_names[0])
erica.open_link(subcategory_path)
erica.add_article(article_name=article_names[1])
erica.open_link(subcategory_path)
erica.add_article(article_name=article_names[2])
erica.open_link(subcategory_path)
erica.add_article(article_name=article_names[3])
# Delete the all-containing category
erica.open_link(category_path)
erica.follow_modify_category_link(category_names[0])
erica.delete_category()
# get and check the history
repo = view_functions.get_repo(repo_path=self.app.config['REPO_PATH'], work_path=self.app.config['WORK_PATH'], email='erica@example.com')
activity = chime_activity.ChimeActivity(repo=repo, branch_name=branch_name, default_branch_name='master', actor_email=erica_email)
activity_history = activity.history
delete_history = activity_history[0]['actions']
for item in delete_history:
self.assertEqual(item['action'], u'delete')
if item['title'] in category_names:
self.assertEqual(item['display_type'], constants.CATEGORY_LAYOUT)
category_names.remove(item['title'])
elif item['title'] in subcategory_names:
self.assertEqual(item['display_type'], constants.CATEGORY_LAYOUT)
subcategory_names.remove(item['title'])
elif item['title'] in article_names:
self.assertEqual(item['display_type'], constants.ARTICLE_LAYOUT)
article_names.remove(item['title'])
# we should have fewer category, subcategory, and article names
self.assertEqual(len(category_names), 0)
self.assertEqual(len(subcategory_names), 0)
self.assertEqual(len(article_names), 0)
# in TestApp
def test_delete_article(self):
''' An article can be deleted
'''
fake_author_email = u'erica@example.com'
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_author_email})
with HTTMock(self.auth_csv_example_allowed):
# start a new branch via the http interface
# invokes view_functions/get_repo which creates a clone
task_description = u'Remove Small Organic Particles From Seawater Passing Over Outspread Tentacles for Sea Anemones'
working_branch = repo_functions.get_start_branch(self.clone1, 'master', task_description, fake_author_email)
self.assertTrue(working_branch.name in self.clone1.branches)
self.assertTrue(working_branch.name in self.origin.branches)
working_branch_name = working_branch.name
working_branch.checkout()
# create an article
art_title = u'Zooplankters'
art_slug = slugify(art_title)
response = self.test_client.post('/tree/{}/edit/'.format(working_branch_name),
data={'action': 'create', 'create_what': constants.ARTICLE_LAYOUT, 'request_path': art_title},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
# pull the changes
self.clone1.git.pull('origin', working_branch_name)
# verify that the article exists
art_location = join(self.clone1.working_dir, art_slug)
self.assertTrue(exists(art_location))
self.assertTrue(view_functions.is_article_dir(art_location))
# delete the article
response = self.test_client.post('/tree/{}/edit/{}'.format(working_branch_name, art_slug),
data={'action': 'delete_article', 'request_path': art_slug},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
# pull the changes
self.clone1.git.pull('origin', working_branch_name)
# verify that the deleted category and article no longer exist
self.assertFalse(exists(art_location))
# in TestApp
def test_article_creation_with_unicode_via_web_interface(self):
''' An article with unicode in its title is created as expected.
'''
fake_author_email = u'erica@example.com'
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_author_email})
with HTTMock(self.auth_csv_example_allowed):
# start a new branch via the http interface
# invokes view_functions/get_repo which creates a clone
task_description = u'eviscerate a salmon for baby grizzly bears'
working_branch = repo_functions.get_start_branch(self.clone1, 'master', task_description, fake_author_email)
self.assertTrue(working_branch.name in self.clone1.branches)
self.assertTrue(working_branch.name in self.origin.branches)
working_branch_name = working_branch.name
working_branch.checkout()
# create a new article
art_title = u'快速狐狸'
art_slug = slugify(art_title)
response = self.test_client.post('/tree/{}/edit/'.format(working_branch_name), data={'action': 'create', 'create_what': constants.ARTICLE_LAYOUT, 'request_path': art_title}, follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(PATTERN_TEMPLATE_COMMENT.format(u'article-edit') in response.data.decode('utf-8'))
# pull the changes
self.clone1.git.pull('origin', working_branch_name)
# a directory was created
dir_location = join(self.clone1.working_dir, art_slug)
idx_location = u'{}/index.{}'.format(dir_location, constants.CONTENT_FILE_EXTENSION)
self.assertTrue(exists(dir_location) and isdir(dir_location))
# an index page was created inside
self.assertTrue(exists(idx_location))
# the directory and index page pass the article test
self.assertTrue(view_functions.is_article_dir(dir_location))
# the title saved in the index front matter is the same text that was used to create the article
self.assertEqual(view_functions.get_value_from_front_matter('title', idx_location), art_title)
# the title saved in the index front matter is displayed on the article list page
response = self.test_client.get('/tree/{}/edit/'.format(working_branch_name), follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(PATTERN_TEMPLATE_COMMENT.format(u'articles-list') in response.data.decode('utf-8'))
self.assertTrue(PATTERN_BRANCH_COMMENT.format(working_branch) in response.data.decode('utf-8'))
self.assertTrue(PATTERN_FILE_COMMENT.format(**{"file_name": art_slug, "file_title": art_title, "file_type": constants.ARTICLE_LAYOUT}) in response.data.decode('utf-8'))
# in TestApp
def test_save_non_roman_characters_to_article(self):
''' Adding non-roman characters to an article's title and body raises no unicode errors.
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in('erica@example.com')
# Start a new task, topic, subtopic, article
erica.open_link(constants.ROUTE_ACTIVITY)
args = 'Mermithergate for Ant Worker', 'Enoplia Nematode', 'Genus Mermis', 'Cephalotes Atratus'
erica.quick_activity_setup(*args)
# Edit the new article and give it a non-roman character title
erica.edit_article(u'快速狐狸', u'Myrmeconema ੯ूᵕू ໒꒱ƶƵ Neotropicum')
# in TestApp
def test_sign_in_with_email_containing_non_roman_characters(self):
''' Adding non-roman characters to the sign-in email raises no errors.
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_non_roman):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in('੯ूᵕू ໒꒱ƶƵ@快速狐狸.com')
# in TestApp
def test_new_item_has_name_and_title(self):
''' A slugified directory name and display title are created when a new category or article is created.
'''
fake_author_email = u'erica@example.com'
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_author_email})
with HTTMock(self.auth_csv_example_allowed):
# start a new branch via the http interface
# invokes view_functions/get_repo which creates a clone
task_description = u'eviscerate a salmon for baby grizzly bears'
working_branch = repo_functions.get_start_branch(self.clone1, 'master', task_description, fake_author_email)
self.assertTrue(working_branch.name in self.clone1.branches)
self.assertTrue(working_branch.name in self.origin.branches)
working_branch_name = working_branch.name
working_branch.checkout()
# create a new category
cat_title = u'grrowl!! Yeah'
cat_slug = slugify(cat_title)
response = self.test_client.post('/tree/{}/edit/'.format(working_branch_name),
data={'action': 'create', 'create_what': constants.CATEGORY_LAYOUT, 'request_path': cat_title},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
# pull the changes
self.clone1.git.pull('origin', working_branch_name)
# a directory was created
dir_location = join(self.clone1.working_dir, cat_slug)
idx_location = u'{}/index.{}'.format(dir_location, constants.CONTENT_FILE_EXTENSION)
self.assertTrue(exists(dir_location) and isdir(dir_location))
# an index page was created inside
self.assertTrue(exists(idx_location))
# the directory and index page pass the category test
self.assertTrue(view_functions.is_category_dir(dir_location))
# the title saved in the index front matter is the same text that was used to create the category
self.assertEqual(view_functions.get_value_from_front_matter('title', idx_location), cat_title)
# the title saved in the index front matter is displayed on the article list page
response = self.test_client.get('/tree/{}/edit/'.format(working_branch_name), follow_redirects=True)
self.assertTrue(PATTERN_FILE_COMMENT.format(**{"file_name": cat_slug, "file_title": cat_title, "file_type": constants.CATEGORY_LAYOUT}) in response.data)
# create a new article
art_title = u'快速狐狸'
art_slug = slugify(art_title)
response = self.test_client.post('/tree/{}/edit/'.format(working_branch_name), data={'action': 'create', 'create_what': constants.ARTICLE_LAYOUT, 'request_path': art_title}, follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(PATTERN_TEMPLATE_COMMENT.format(u'article-edit') in response.data.decode('utf-8'))
# pull the changes
self.clone1.git.pull('origin', working_branch_name)
# a directory was created
dir_location = join(self.clone1.working_dir, art_slug)
idx_location = u'{}/index.{}'.format(dir_location, constants.CONTENT_FILE_EXTENSION)
self.assertTrue(exists(dir_location) and isdir(dir_location))
# an index page was created inside
self.assertTrue(exists(idx_location))
# the directory and index page pass the article test
self.assertTrue(view_functions.is_article_dir(dir_location))
# the title saved in the index front matter is the same text that was used to create the article
self.assertEqual(view_functions.get_value_from_front_matter('title', idx_location), art_title)
# the title saved in the index front matter is displayed on the article list page
response = self.test_client.get('/tree/{}/edit/'.format(working_branch_name), follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(PATTERN_TEMPLATE_COMMENT.format(u'articles-list') in response.data.decode('utf-8'))
self.assertTrue(PATTERN_BRANCH_COMMENT.format(working_branch) in response.data.decode('utf-8'))
self.assertTrue(PATTERN_FILE_COMMENT.format(**{"file_name": art_slug, "file_title": art_title, "file_type": constants.ARTICLE_LAYOUT}) in response.data.decode('utf-8'))
# in TestApp
def test_edit_category_title_and_description(self):
''' A category's title and description can be edited.
'''
fake_author_email = u'erica@example.com'
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_author_email})
with HTTMock(self.auth_csv_example_allowed):
# start a new branch via the http interface
# invokes view_functions/get_repo which creates a clone
task_description = u'rapidly discharge black ink into the mantle cavity for squids'
working_branch = repo_functions.get_start_branch(self.clone1, 'master', task_description, fake_author_email)
self.assertTrue(working_branch.name in self.clone1.branches)
self.assertTrue(working_branch.name in self.origin.branches)
working_branch_name = working_branch.name
working_branch.checkout()
# create a categories directory
categories_slug = u'categories'
response = self.test_client.post('/tree/{}/edit/'.format(working_branch_name),
data={'action': 'create', 'create_what': constants.CATEGORY_LAYOUT, 'request_path': categories_slug},
follow_redirects=True)
# and put a new category inside it
cat_title = u'Bolus'
cat_slug = slugify(cat_title)
response = self.test_client.post('/tree/{}/edit/{}'.format(working_branch_name, categories_slug),
data={'action': 'create', 'create_what': constants.CATEGORY_LAYOUT, 'request_path': cat_title},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
# pull the changes
self.clone1.git.pull('origin', working_branch_name)
# get the hexsha
hexsha = self.clone1.commit().hexsha
# get the modify page and verify that the form renders with the correct values
cat_path = join(categories_slug, cat_slug, u'index.{}'.format(constants.CONTENT_FILE_EXTENSION))
response = self.test_client.get('/tree/{}/edit/{}'.format(working_branch_name, cat_path), follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(PATTERN_FORM_CATEGORY_TITLE.format(title=cat_title) in response.data)
self.assertTrue(PATTERN_FORM_CATEGORY_DESCRIPTION.format(description=u'') in response.data)
# now save a new title and description for the category
new_cat_title = u'Caecum'
cat_description = u'An intraperitoneal pouch, that is considered to be the beginning of the large intestine.'
response = self.test_client.post('/tree/{}/edit/{}'.format(working_branch_name, cat_path),
data={'layout': constants.CATEGORY_LAYOUT, 'hexsha': hexsha, 'url-slug': u'{}/{}/'.format(categories_slug, cat_slug),
'en-title': new_cat_title, 'en-description': cat_description, 'order': u'0', 'action': u'save_category'},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
# check the returned HTML for the description and title values (format will change as pages are designed)
response_data = sub(''', '\'', response.data.decode('utf-8'))
self.assertTrue(PATTERN_FLASH_SAVED_CATEGORY.format(title=new_cat_title) in response_data)
self.assertTrue(PATTERN_FORM_CATEGORY_DESCRIPTION.format(description=cat_description) in response_data)
self.assertTrue(PATTERN_FORM_CATEGORY_TITLE.format(title=new_cat_title) in response_data)
# pull the changes
self.clone1.git.pull('origin', working_branch_name)
# a directory was created
dir_location = join(self.clone1.working_dir, categories_slug, cat_slug)
idx_location = u'{}/index.{}'.format(dir_location, constants.CONTENT_FILE_EXTENSION)
self.assertTrue(exists(dir_location) and isdir(dir_location))
# an index page was created inside
self.assertTrue(exists(idx_location))
# the directory and index page pass the category test
self.assertTrue(view_functions.is_category_dir(dir_location))
# the title and description saved in the index front matter is the same text that was used to create the category
self.assertEqual(view_functions.get_value_from_front_matter('title', idx_location), new_cat_title)
self.assertEqual(view_functions.get_value_from_front_matter('description', idx_location), cat_description)
# the title saved in the index front matter is displayed on the article list page
response = self.test_client.get('/tree/{}/edit/{}'.format(working_branch_name, categories_slug), follow_redirects=True)
self.assertTrue(PATTERN_FILE_COMMENT.format(**{"file_name": cat_slug, "file_title": new_cat_title, "file_type": constants.CATEGORY_LAYOUT}) in response.data)
# in TestApp
def test_delete_category(self):
''' A category can be deleted
'''
fake_author_email = u'erica@example.com'
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_author_email})
with HTTMock(self.auth_csv_example_allowed):
# start a new branch via the http interface
# invokes view_functions/get_repo which creates a clone
task_description = u'clasp with front legs and draw up the hind end for geometridae'
working_branch = repo_functions.get_start_branch(self.clone1, 'master', task_description, fake_author_email)
self.assertTrue(working_branch.name in self.clone1.branches)
self.assertTrue(working_branch.name in self.origin.branches)
working_branch_name = working_branch.name
working_branch.checkout()
# create a categories directory
categories_slug = u'categories'
response = self.test_client.post('/tree/{}/edit/'.format(working_branch_name),
data={'action': 'create', 'create_what': constants.CATEGORY_LAYOUT, 'request_path': categories_slug},
follow_redirects=True)
# and put a new category inside it
cat_title = u'Soybean Looper'
cat_slug = slugify(cat_title)
response = self.test_client.post('/tree/{}/edit/{}'.format(working_branch_name, categories_slug),
data={'action': 'create', 'create_what': constants.CATEGORY_LAYOUT, 'request_path': cat_title},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
# pull the changes
self.clone1.git.pull('origin', working_branch_name)
# get the hexsha
hexsha = self.clone1.commit().hexsha
# now delete the category
cat_description = u''
url_slug = u'{}/{}/'.format(categories_slug, cat_slug)
response = self.test_client.post('/tree/{}/edit/{}'.format(working_branch_name, url_slug.rstrip('/')),
data={'layout': constants.CATEGORY_LAYOUT, 'hexsha': hexsha, 'url-slug': url_slug,
'en-title': cat_title, 'en-description': cat_description, 'order': u'0',
'action': u'delete_category'},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
# check the returned HTML for the description and title values (format will change as pages are designed)
soup = BeautifulSoup(response.data)
self.assertEqual(PATTERN_FLASH_DELETED_CATEGORY.format(title=cat_title, containing=u''), soup.find('li', class_='flash').text)
# pull the changes
self.clone1.git.pull('origin', working_branch_name)
# the directory was deleted
dir_location = join(self.clone1.working_dir, categories_slug, cat_slug)
self.assertFalse(exists(dir_location) and isdir(dir_location))
# the title is not displayed on the article list page
response = self.test_client.get('/tree/{}/edit/{}'.format(working_branch_name, categories_slug), follow_redirects=True)
self.assertFalse(PATTERN_FILE_COMMENT.format(file_name=cat_slug, file_title=cat_title, file_type=constants.CATEGORY_LAYOUT) in response.data)
# in TestApp
def test_set_and_retrieve_order_and_description(self):
''' Order and description can be set to and retrieved from an article's or category's front matter.
'''
fake_author_email = u'erica@example.com'
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_author_email})
with HTTMock(self.auth_csv_example_allowed):
# start a new branch via the http interface
# invokes view_functions/get_repo which creates a clone
task_description = u'regurgitate partially digested worms and grubs for baby birds'
working_branch = repo_functions.get_start_branch(self.clone1, 'master', task_description, fake_author_email)
self.assertTrue(working_branch.name in self.clone1.branches)
self.assertTrue(working_branch.name in self.origin.branches)
working_branch_name = working_branch.name
working_branch.checkout()
# create a categories directory
categories_slug = u'categories'
response = self.test_client.post('/tree/{}/edit/'.format(working_branch_name),
data={'action': 'create', 'create_what': constants.CATEGORY_LAYOUT, 'request_path': categories_slug},
follow_redirects=True)
# and put a new category inside it
cat_title = u'Small Intestine'
cat_slug = slugify(cat_title)
response = self.test_client.post('/tree/{}/edit/{}'.format(working_branch_name, categories_slug),
data={'action': 'create', 'create_what': constants.CATEGORY_LAYOUT, 'request_path': cat_title},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
# pull the changes
self.clone1.git.pull('origin', working_branch_name)
# get the hexsha
hexsha = self.clone1.commit().hexsha
# now save some values into the category's index page's front matter
new_cat_title = u'The Small Intestine'
cat_description = u'The part of the GI tract following the stomach and followed by the large intestine where much of the digestion and absorption of food takes place.'
cat_order = 3
cat_path = join(categories_slug, cat_slug, u'index.{}'.format(constants.CONTENT_FILE_EXTENSION))
response = self.test_client.post('/tree/{}/edit/{}'.format(working_branch_name, cat_path),
data={'layout': constants.CATEGORY_LAYOUT, 'hexsha': hexsha,
'en-title': new_cat_title, 'en-description': cat_description,
'order': cat_order, 'action': u'save_category'},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
# check the returned HTML for the description and order values (format will change as pages are designed)
soup = BeautifulSoup(response.data)
self.assertEqual(soup.find('textarea', {'name': 'en-description'}).text, cat_description)
self.assertEqual(int(soup.find('input', {'name': 'order'})['value']), cat_order)
# pull the changes
self.clone1.git.pull('origin', working_branch_name)
# a directory was created
dir_location = join(self.clone1.working_dir, categories_slug, cat_slug)
idx_location = u'{}/index.{}'.format(dir_location, constants.CONTENT_FILE_EXTENSION)
self.assertTrue(exists(dir_location) and isdir(dir_location))
# an index page was created inside
self.assertTrue(exists(idx_location))
# the directory and index page pass the category test
self.assertTrue(view_functions.is_category_dir(dir_location))
# the title saved in the index front matter is the same text that was used to create the category
self.assertEqual(view_functions.get_value_from_front_matter('title', idx_location), new_cat_title)
# check order and description
self.assertEqual(view_functions.get_value_from_front_matter('order', idx_location), cat_order)
self.assertEqual(view_functions.get_value_from_front_matter('description', idx_location), cat_description)
# the title saved in the index front matter is displayed on the article list page
response = self.test_client.get('/tree/{}/edit/{}'.format(working_branch_name, categories_slug), follow_redirects=True)
self.assertTrue(PATTERN_FILE_COMMENT.format(**{"file_name": cat_slug, "file_title": new_cat_title, "file_type": constants.CATEGORY_LAYOUT}) in response.data)
# in TestApp
def test_column_navigation_structure(self):
''' The column navigation structure matches the structure of the site.
'''
fake_author_email = u'erica@example.com'
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_author_email})
with HTTMock(self.auth_csv_example_allowed):
# start a new branch via the http interface
# invokes view_functions/get_repo which creates a clone
task_description = u'force a clam shell open for starfish'
working_branch = repo_functions.get_start_branch(self.clone1, 'master', task_description, fake_author_email)
self.assertTrue(working_branch.name in self.clone1.branches)
self.assertTrue(working_branch.name in self.origin.branches)
working_branch_name = working_branch.name
working_branch.checkout()
# create some nested categories
slug_hello = u'hello'
response = self.test_client.post('/tree/{}/edit/'.format(working_branch_name),
data={'action': 'create', 'create_what': constants.CATEGORY_LAYOUT, 'request_path': slug_hello},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
slug_world = u'world'
response = self.test_client.post('/tree/{}/edit/{}'.format(working_branch_name, slug_hello),
data={'action': 'create', 'create_what': constants.CATEGORY_LAYOUT, 'request_path': slug_world},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
slug_how = u'how'
response = self.test_client.post('/tree/{}/edit/{}'.format(working_branch_name, sep.join([slug_hello, slug_world])),
data={'action': 'create', 'create_what': constants.CATEGORY_LAYOUT, 'request_path': slug_how},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
slug_are = u'are'
response = self.test_client.post('/tree/{}/edit/{}'.format(working_branch_name, sep.join([slug_hello, slug_world, slug_how])),
data={'action': 'create', 'create_what': constants.CATEGORY_LAYOUT, 'request_path': slug_are},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
# pull the changes
self.clone1.git.pull('origin', working_branch_name)
# get the columns
dir_columns = view_functions.make_directory_columns(self.clone1, working_branch_name, sep.join([slug_hello, slug_world, slug_how, slug_are]))
# test that the contents match our expectations
self.assertEqual(len(dir_columns), 4)
self.assertEqual(len(dir_columns[0]['files']), 7)
expected = {'hello': u'category', 'img': u'folder', 'index.md': u'file', 'other': u'folder', 'other.md': u'file', 'sub': u'folder', 'test-articles': u'folder'}
for item in dir_columns[0]['files']:
self.assertTrue(item['name'] in expected)
self.assertTrue(expected[item['name']] == item['display_type'])
self.assertTrue(dir_columns[1]['files'][0]['name'] == slug_world)
self.assertTrue(dir_columns[2]['files'][0]['name'] == slug_how)
self.assertTrue(dir_columns[3]['files'][0]['name'] == slug_are)
# in TestApp
def test_activity_overview_page_is_accurate(self):
''' The activity history page accurately displays the activity history
'''
fake_author_email = u'erica@example.com'
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_author_email})
with HTTMock(self.auth_csv_example_allowed):
# start a new branch via the http interface
# invokes view_functions/get_repo which creates a clone
task_description = u'deposit eggs in a syconium for fig wasp larvae'
working_branch = repo_functions.get_start_branch(self.clone1, 'master', task_description, fake_author_email)
self.assertTrue(working_branch.name in self.clone1.branches)
self.assertTrue(working_branch.name in self.origin.branches)
working_branch_name = working_branch.name
working_branch.checkout()
title_fig_zh = u'无花果'
slug_fig_zh = u'wu-hua-guo'
title_syconium = u'Syconium'
slug_syconium = u'syconium'
title_ostiole = u'Ostiole'
title_fig_en = u'Fig'
title_fig_bn = u'Dumur'
create_details = [
(u'', title_fig_zh, constants.CATEGORY_LAYOUT),
(slug_fig_zh, title_syconium, constants.CATEGORY_LAYOUT),
(u'{}/{}'.format(slug_fig_zh, slug_syconium), title_ostiole, constants.ARTICLE_LAYOUT),
(u'', title_fig_en, constants.CATEGORY_LAYOUT),
(u'', title_fig_bn, constants.CATEGORY_LAYOUT)
]
for detail in create_details:
response = self.test_client.post('/tree/{}/edit/{}'.format(working_branch_name, detail[0]),
data={'action': 'create', 'create_what': detail[2], 'request_path': detail[1]},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
# add a comment
comment_text = u'The flowers provide a safe haven and nourishment for the next generation of wasps. ᙙᙖ'
response = self.test_client.post('/tree/{}/'.format(working_branch_name),
data={'comment': 'Comment', 'comment_text': comment_text},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
# delete a topic
response = self.test_client.post('/tree/{}/edit/{}'.format(working_branch_name, slug_fig_zh),
data={'action': 'delete_category'},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
# get the activity history page
response = self.test_client.get('/tree/{}/'.format(working_branch_name), follow_redirects=True)
# TODO: for some reason (encoding?) my double-quotes are being replaced by " in the returned HTML
response_data = sub('"', '"', response.data.decode('utf-8'))
# make sure everything we did above is shown on the activity page
self.assertTrue(PATTERN_TEMPLATE_COMMENT.format('activity-overview') in response_data)
self.assertTrue(PATTERN_OVERVIEW_ACTIVITY_STARTED.format(activity_name=task_description, author_email=fake_author_email) in response_data)
self.assertTrue(PATTERN_OVERVIEW_COMMENT_BODY.format(comment_body=comment_text) in response_data)
self.assertTrue(PATTERN_OVERVIEW_ITEM_DELETED.format(deleted_name=title_fig_zh, deleted_type=view_functions.file_display_name(constants.CATEGORY_LAYOUT), deleted_also=u'(containing 1 topic and 1 article) ', author_email=fake_author_email) in response_data)
for detail in create_details:
self.assertTrue(PATTERN_OVERVIEW_ITEM_CREATED.format(created_name=detail[1], created_type=detail[2], author_email=fake_author_email), response_data)
# in TestApp
def test_activity_history_summary_accuracy(self):
''' The summary of an activity's history is displayed as expected.
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.test_client, self)
erica.sign_in(email='erica@example.com')
# Start a new task
erica.open_link(constants.ROUTE_ACTIVITY)
erica.start_task(description=u'Parasitize with Ichneumonidae for Moth Larvae')
# Get the branch name
branch_name = erica.get_branch_name()
# Load the "other" folder
erica.open_link(url='/tree/{}/edit/other/'.format(branch_name))
# Create a category, sub-category, article
category_name = u'Antennae Segments'
subcategory_name = u'Short Ovipositors'
article_names = [u'Inject Eggs Directly Into a Host Body', u'A Technique Of Celestial Navigation Called Transverse Orientation']
erica.add_category(category_name=category_name)
erica.add_subcategory(subcategory_name=subcategory_name)
subcategory_path = erica.path
erica.add_article(article_name=article_names[0])
# edit the article
erica.edit_article(title_str=article_names[0], body_str=u'Inject venom along with the egg')
# create another article and delete it
erica.open_link(subcategory_path)
erica.add_article(article_name=article_names[1])
erica.open_link(subcategory_path)
erica.delete_article(article_names[1])
# Load the activity overview page
erica.open_link(url='/tree/{}/'.format(branch_name))
# there is a summary
summary_div = erica.soup.find("div", {"data-test-id": "summary-div"})
self.assertIsNotNone(summary_div)
# it's right about what's changed
self.assertIsNotNone(summary_div.find(lambda tag: bool(tag.name == 'p' and '2 articles and 2 topics' in tag.text)))
# grab all the list items
check_rows = summary_div.find_all('li')
# the link to create a new change
change_row = check_rows.pop()
self.assertIsNotNone(change_row.find("a", {"data-test-id": "change-link"}))
self.assertEqual(change_row.find("a", {"data-test-id": "change-link"}).text, constants.TEXT_ADD_CHANGE)
# make sure the list items match what we did above
category_row = check_rows.pop()
self.assertIsNotNone(category_row.find("a", {"data-test-id": "change-link"}))
self.assertEqual(category_row.find('h3', {"data-test-id": "change-title"}).text, category_name)
self.assertEqual(category_row.find('div', {"data-test-id": "change-display-type"}).text, constants.LAYOUT_DISPLAY_LOOKUP[constants.CATEGORY_LAYOUT].title())
self.assertEqual(category_row.find('p', {"data-test-id": "change-actions"}).text, u'Created')
subcategory_row = check_rows.pop()
self.assertIsNotNone(subcategory_row.find("a", {"data-test-id": "change-link"}))
self.assertEqual(subcategory_row.find('h3', {"data-test-id": "change-title"}).text, subcategory_name)
self.assertEqual(subcategory_row.find('div', {"data-test-id": "change-display-type"}).text, constants.LAYOUT_DISPLAY_LOOKUP[constants.CATEGORY_LAYOUT].title())
self.assertEqual(subcategory_row.find('p', {"data-test-id": "change-actions"}).text, u'Created')
article_1_row = check_rows.pop()
self.assertIsNotNone(article_1_row.find("a", {"data-test-id": "change-link"}))
self.assertEqual(article_1_row.find('h3', {"data-test-id": "change-title"}).text, article_names[0])
self.assertEqual(article_1_row.find('div', {"data-test-id": "change-display-type"}).text, constants.LAYOUT_DISPLAY_LOOKUP[constants.ARTICLE_LAYOUT].title())
self.assertEqual(article_1_row.find('p', {"data-test-id": "change-actions"}).text, u'Created, Edited')
article_2_row = check_rows.pop()
self.assertIsNone(article_2_row.find("a", {"data-test-id": "change-link"}))
self.assertIsNone(article_2_row.find('h3', {"data-test-id": "change-title"}).find('a'))
self.assertEqual(article_2_row.find('h3', {"data-test-id": "change-title"}).text, article_names[1])
self.assertEqual(article_2_row.find('div', {"data-test-id": "change-display-type"}).text, constants.LAYOUT_DISPLAY_LOOKUP[constants.ARTICLE_LAYOUT].title())
self.assertEqual(article_2_row.find('p', {"data-test-id": "change-actions"}).text, u'Created, Deleted')
# no rows left
self.assertEqual(len(check_rows), 0)
# in TestApp
def test_create_page_creates_directory_containing_index(self):
''' Creating a new page creates a directory with an editable index file inside.
'''
fake_author_email = u'erica@example.com'
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_author_email})
with HTTMock(self.auth_csv_example_allowed):
# start a new branch via the http interface
# invokes view_functions/get_repo which creates a clone
task_description = u'filter plankton from sea water for humpback whales'
working_branch = repo_functions.get_start_branch(self.clone1, 'master', task_description, fake_author_email)
self.assertTrue(working_branch.name in self.clone1.branches)
self.assertTrue(working_branch.name in self.origin.branches)
working_branch_name = working_branch.name
working_branch.checkout()
# create a new page
page_slug = u'hello'
page_path = u'{}/index.{}'.format(page_slug, constants.CONTENT_FILE_EXTENSION)
response = self.test_client.post('/tree/{}/edit/'.format(working_branch_name),
data={'action': 'create', 'create_what': constants.ARTICLE_LAYOUT, 'request_path': page_slug},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(page_path in response.data)
# pull the changes
self.clone1.git.pull('origin', working_branch_name)
# a directory was created
dir_location = join(self.clone1.working_dir, page_slug)
idx_location = u'{}/index.{}'.format(dir_location, constants.CONTENT_FILE_EXTENSION)
self.assertTrue(exists(dir_location) and isdir(dir_location))
# an index page was created inside
self.assertTrue(exists(idx_location))
# the directory and index page pass the article test
self.assertTrue(view_functions.is_article_dir(dir_location))
# in TestApp
def test_can_rename_editable_directories(self):
''' Can rename an editable directory.
'''
fake_author_email = u'erica@example.com'
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_author_email})
with HTTMock(self.auth_csv_example_allowed):
# start a new branch via the http interface
# invokes view_functions/get_repo which creates a clone
task_description = u'filter plankton from sea water for humpback whales'
working_branch = repo_functions.get_start_branch(self.clone1, 'master', task_description, fake_author_email)
self.assertTrue(working_branch.name in self.clone1.branches)
self.assertTrue(working_branch.name in self.origin.branches)
working_branch_name = working_branch.name
working_branch.checkout()
# create a new page
page_slug = u'hello'
page_path = u'{}/index.{}'.format(page_slug, constants.CONTENT_FILE_EXTENSION)
response = self.test_client.post('/tree/{}/edit/'.format(working_branch_name),
data={'action': 'create', 'create_what': constants.ARTICLE_LAYOUT, 'request_path': page_slug},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(page_path in response.data)
hexsha = search(r'<input name="hexsha" value="(\w+)"', response.data).group(1)
# now save the file with new content
new_page_slug = u'goodbye'
new_page_path = u'{}/index.{}'.format(new_page_slug, constants.CONTENT_FILE_EXTENSION)
response = self.test_client.post('/tree/{}/save/{}'.format(working_branch_name, page_path),
data={'layout': constants.ARTICLE_LAYOUT, 'hexsha': hexsha,
'en-title': u'',
'en-body': u'',
'fr-title': u'', 'fr-body': u'',
'url-slug': u'{}'.format(new_page_slug)},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(new_page_path in response.data)
# pull the changes
self.clone1.git.pull('origin', working_branch_name)
# the old directory is gone
old_dir_location = join(self.clone1.working_dir, page_slug)
self.assertFalse(exists(old_dir_location))
# the new directory exists and is properly structured
new_dir_location = join(self.clone1.working_dir, new_page_slug)
self.assertTrue(exists(new_dir_location) and isdir(new_dir_location))
# an index page is inside
idx_location = u'{}/index.{}'.format(new_dir_location, constants.CONTENT_FILE_EXTENSION)
self.assertTrue(exists(idx_location))
# the directory and index page pass the editable test
self.assertTrue(view_functions.is_article_dir(new_dir_location))
# in TestApp
def test_cannot_move_a_directory_inside_iteslf(self):
''' Can't rename an editable directory in a way which moves it inside itself
'''
fake_author_email = u'erica@example.com'
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_author_email})
with HTTMock(self.auth_csv_example_allowed):
# start a new branch via the http interface
# invokes view_functions/get_repo which creates a clone
task_description = u'filter plankton from sea water for humpback whales'
working_branch = repo_functions.get_start_branch(self.clone1, 'master', task_description, fake_author_email)
self.assertTrue(working_branch.name in self.clone1.branches)
self.assertTrue(working_branch.name in self.origin.branches)
working_branch_name = working_branch.name
working_branch.checkout()
# create a new page
page_slug = u'hello'
page_path = u'{}/index.{}'.format(page_slug, constants.CONTENT_FILE_EXTENSION)
response = self.test_client.post('/tree/{}/edit/'.format(working_branch_name),
data={'action': 'create', 'create_what': constants.ARTICLE_LAYOUT, 'request_path': page_slug},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(page_path in response.data)
hexsha = search(r'<input name="hexsha" value="(\w+)"', response.data).group(1)
# now save the file with new content
new_page_slug = u'hello/is/better/than/goodbye'
new_page_path = u'{}/index.{}'.format(new_page_slug, constants.CONTENT_FILE_EXTENSION)
response = self.test_client.post('/tree/{}/save/{}'.format(working_branch_name, page_path),
data={'layout': constants.ARTICLE_LAYOUT, 'hexsha': hexsha,
'en-title': u'',
'en-body': u'',
'fr-title': u'', 'fr-body': u'',
'url-slug': u'{}'.format(new_page_slug)},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
# the new page shouldn't have been created
self.assertFalse(new_page_path in response.data)
# there shoudld be a flashed error message
self.assertTrue(u'I cannot move a directory inside itself!' in response.data)
# pull the changes
self.clone1.git.pull('origin', working_branch_name)
# the old directory is not gone
old_dir_location = join(self.clone1.working_dir, page_slug)
self.assertTrue(exists(old_dir_location))
# the new directory doesn't exist
new_dir_location = join(self.clone1.working_dir, new_page_slug)
self.assertFalse(exists(new_dir_location) and isdir(new_dir_location))
# in TestApp
def test_editable_directories_are_shown_as_articles(self):
''' Editable directories (directories containing only an editable index file) are displayed as articles.
'''
fake_author_email = u'erica@example.com'
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_author_email})
with HTTMock(self.auth_csv_example_allowed):
# start a new branch via the http interface
# invokes view_functions/get_repo which creates a clone
task_description = u'filter plankton from sea water for humpback whales'
working_branch = repo_functions.get_start_branch(self.clone1, 'master', task_description, fake_author_email)
self.assertTrue(working_branch.name in self.clone1.branches)
self.assertTrue(working_branch.name in self.origin.branches)
working_branch_name = working_branch.name
working_branch.checkout()
# create a new page
page_slug = u'hello'
page_path = u'{}/index.{}'.format(page_slug, constants.CONTENT_FILE_EXTENSION)
response = self.test_client.post('/tree/{}/edit/'.format(working_branch_name),
data={'action': 'create', 'create_what': constants.ARTICLE_LAYOUT, 'request_path': page_slug},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(page_path in response.data)
# load the index page
response = self.test_client.get('/tree/{}/edit/'.format(working_branch_name), follow_redirects=True)
self.assertEqual(response.status_code, 200)
# verify that the new folder is represented as a file in the HTML
self.assertTrue(PATTERN_BRANCH_COMMENT.format(working_branch_name) in response.data)
self.assertTrue(PATTERN_FILE_COMMENT.format(**{"file_name": page_slug, "file_title": page_slug, "file_type": constants.ARTICLE_LAYOUT}) in response.data)
# in TestApp
def test_page_not_found_error(self):
''' A 404 page is generated when we get an address that doesn't exist
'''
fake_author_email = u'erica@example.com'
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_author_email})
with HTTMock(self.auth_csv_example_allowed):
# start a new branch via the http interface
# invokes view_functions/get_repo which creates a clone
task_description = u'drink quinine for mosquitos'
working_branch = repo_functions.get_start_branch(self.clone1, 'master', task_description, fake_author_email)
self.assertTrue(working_branch.name in self.clone1.branches)
self.assertTrue(working_branch.name in self.origin.branches)
working_branch_name = working_branch.name
working_branch.checkout()
# get a non-existent page
response = self.test_client.get('tree/{}/malaria'.format(working_branch_name), follow_redirects=True)
self.assertTrue(PATTERN_TEMPLATE_COMMENT.format('error-404') in response.data)
# these values are set in setUp() above
self.assertTrue(u'support@example.com' in response.data)
self.assertTrue(u'(123) 456-7890' in response.data)
# in TestApp
def test_garbage_edit_url_raises_page_not_found(self):
''' A 404 page is generated when we get an edit address that doesn't exist
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in('erica@example.com')
# Start a new task
erica.open_link(constants.ROUTE_ACTIVITY)
erica.start_task(description=u'Take Malarone for People Susceptible to Malaria')
# Get the branch name
branch_name = erica.get_branch_name()
# Enter the "other" folder
other_slug = u'other'
erica.follow_link(href='/tree/{}/edit/{}/'.format(branch_name, other_slug))
# Create a category
category_name = u'Rubber Plants'
category_slug = slugify(category_name)
erica.add_category(category_name=category_name)
# Try to load a non-existent page within the category
erica.open_link(url='/tree/{}/edit/{}/malaria'.format(branch_name, category_slug), expected_status_code=404)
# in TestApp
def test_garbage_view_url_raises_page_not_found(self):
''' A 404 page is generated when we get a view address that doesn't exist
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in('erica@example.com')
# Start a new task
erica.open_link(constants.ROUTE_ACTIVITY)
erica.start_task(description=u'Chew Mulberry Leaves for Silkworms')
# Get the branch name
branch_name = erica.get_branch_name()
# Enter the "other" folder
other_slug = u'other'
erica.follow_link(href='/tree/{}/edit/{}/'.format(branch_name, other_slug))
# Create a category
category_name = u'Bombyx Mori'
category_slug = slugify(category_name)
erica.add_category(category_name=category_name)
# Try to load a non-existent asset within the other folder
erica.open_link(url='/tree/{}/view/{}/{}/missing.jpg'.format(branch_name, other_slug, category_slug), expected_status_code=404)
# in TestApp
def test_internal_server_error(self):
''' A 500 page is generated when we provoke a server error
'''
with HTTMock(self.mock_persona_verify_erica):
response = self.test_client.post('/sign-in', data={'assertion': 'erica@example.com'})
with HTTMock(self.mock_internal_server_error):
response = self.test_client.get(constants.ROUTE_ACTIVITY, follow_redirects=True)
self.assertTrue(PATTERN_TEMPLATE_COMMENT.format('error-500') in response.data)
# these values are set in setUp() above
self.assertTrue(u'support@example.com' in response.data)
self.assertTrue(u'(123) 456-7890' in response.data)
# in TestApp
def test_exception_error(self):
''' A 500 page is generated when we provoke an uncaught exception
'''
with HTTMock(self.mock_persona_verify_erica):
response = self.test_client.post('/sign-in', data={'assertion': 'erica@example.com'})
with HTTMock(self.mock_exception):
response = self.test_client.get(constants.ROUTE_ACTIVITY, follow_redirects=True)
self.assertTrue(PATTERN_TEMPLATE_COMMENT.format('error-500') in response.data)
# these values are set in setUp() above
self.assertTrue(u'support@example.com' in response.data)
self.assertTrue(u'(123) 456-7890' in response.data)
# in TestApp
def test_merge_conflict_error(self):
''' We get a merge conflict error page when there's a merge conflict
'''
fake_task_description_1 = u'do things for somebody else'
fake_task_description_2 = u'do other things for somebody even else'
fake_email_1 = u'erica@example.com'
fake_email_2 = u'frances@example.com'
fake_page_slug = u'hello'
fake_page_path = u'{}/index.{}'.format(fake_page_slug, constants.CONTENT_FILE_EXTENSION)
fake_page_content_1 = u'Hello world.'
fake_page_content_2 = u'Hello moon.'
#
#
# Log in as person 1
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_email_1})
with HTTMock(self.auth_csv_example_allowed):
# create a new branch
response = self.test_client.post('/start', data={'task_description': fake_task_description_1}, follow_redirects=True)
# extract the generated branch name from the returned HTML
generated_branch_search = search(r'<!-- branch: (.{{{}}}) -->'.format(repo_functions.BRANCH_NAME_LENGTH), response.data)
self.assertIsNotNone(generated_branch_search)
try:
generated_branch_name_1 = generated_branch_search.group(1)
except AttributeError:
raise Exception('No match for generated branch name.')
with HTTMock(self.mock_google_analytics):
# create a new file
response = self.test_client.post('/tree/{}/edit/'.format(generated_branch_name_1),
data={'action': 'create', 'create_what': constants.ARTICLE_LAYOUT, 'request_path': fake_page_slug},
follow_redirects=True)
# get the edit page for the new file and extract the hexsha value
response = self.test_client.get('/tree/{}/edit/{}'.format(generated_branch_name_1, fake_page_path))
hexsha = search(r'<input name="hexsha" value="(\w+)"', response.data).group(1)
# now save the file with new content
response = self.test_client.post('/tree/{}/save/{}'.format(generated_branch_name_1, fake_page_path),
data={'layout': constants.ARTICLE_LAYOUT, 'hexsha': hexsha,
'en-title': 'Greetings',
'en-body': u'{}\n'.format(fake_page_content_1),
'url-slug': u'{}/index'.format(fake_page_slug)},
follow_redirects=True)
# Request feedback on person 1's change
with HTTMock(self.auth_csv_example_allowed):
response = self.test_client.post('/tree/{}/'.format(generated_branch_name_1), data={'comment_text': u'', 'request_feedback': u'Request Feedback'}, follow_redirects=True)
#
#
# Log in as person 2
with HTTMock(self.mock_persona_verify_frances):
self.test_client.post('/sign-in', data={'assertion': fake_email_2})
with HTTMock(self.auth_csv_example_allowed):
# create a new branch
response = self.test_client.post('/start', data={'task_description': fake_task_description_2}, follow_redirects=True)
# extract the generated branch name from the returned HTML
generated_branch_search = search(r'<!-- branch: (.{{{}}}) -->'.format(repo_functions.BRANCH_NAME_LENGTH), response.data)
try:
generated_branch_name_2 = generated_branch_search.group(1)
except AttributeError:
raise Exception('No match for generated branch name.')
with HTTMock(self.mock_google_analytics):
# create a new file
response = self.test_client.post('/tree/{}/edit/'.format(generated_branch_name_2),
data={'action': 'create', 'create_what': constants.ARTICLE_LAYOUT, 'request_path': fake_page_slug},
follow_redirects=True)
# get the edit page for the new file and extract the hexsha value
response = self.test_client.get('/tree/{}/edit/{}'.format(generated_branch_name_2, fake_page_path))
hexsha = search(r'<input name="hexsha" value="(\w+)"', response.data).group(1)
# now save the file with new content
fake_new_title = u'Bloople'
response = self.test_client.post('/tree/{}/save/{}'.format(generated_branch_name_2, fake_page_path),
data={'layout': constants.ARTICLE_LAYOUT, 'hexsha': hexsha,
'en-title': fake_new_title,
'en-body': u'{}\n'.format(fake_page_content_2),
'url-slug': u'{}/index'.format(fake_page_slug)},
follow_redirects=True)
# Request feedback on person 2's change
with HTTMock(self.auth_csv_example_allowed):
response = self.test_client.post('/tree/{}/'.format(generated_branch_name_2), data={'comment_text': u'', 'request_feedback': u'Request Feedback'}, follow_redirects=True)
# Endorse person 1's change
with HTTMock(self.auth_csv_example_allowed):
response = self.test_client.post('/tree/{}/'.format(generated_branch_name_1), data={'comment_text': u'', 'endorse_edits': 'Endorse Edits'}, follow_redirects=True)
# And publish person 1's change!
with HTTMock(self.auth_csv_example_allowed):
response = self.test_client.post('/tree/{}/'.format(generated_branch_name_1), data={'comment_text': u'', 'merge': 'Publish'}, follow_redirects=True)
#
#
# Log in as person 1
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_email_1})
# Endorse person 2's change
with HTTMock(self.auth_csv_example_allowed):
response = self.test_client.post('/tree/{}/'.format(generated_branch_name_2), data={'comment_text': u'', 'endorse_edits': 'Endorse Edits'}, follow_redirects=True)
# And publish person 2's change!
with HTTMock(self.auth_csv_example_allowed):
response = self.test_client.post('/tree/{}/'.format(generated_branch_name_2), data={'comment_text': u'', 'merge': 'Publish'}, follow_redirects=True)
# verify that we got an error page about the merge conflict
self.assertTrue(PATTERN_TEMPLATE_COMMENT.format('error-500') in response.data)
self.assertTrue(u'MergeConflict' in response.data)
self.assertTrue(u'{}/index.{}'.format(fake_page_slug, constants.CONTENT_FILE_EXTENSION) in response.data)
self.assertTrue(u'<td><a href="/tree/{}/edit/{}/">{}</a></td>'.format(generated_branch_name_2, fake_page_slug, fake_new_title))
self.assertTrue(u'<td>Article</td>' in response.data)
self.assertTrue(u'<td>Edited</td>' in response.data)
# these values are set in setUp() above
self.assertTrue(u'support@example.com' in response.data)
self.assertTrue(u'(123) 456-7890' in response.data)
# in TestApp
def test_redirect_into_solo_folder(self):
''' Loading a folder with a sole non-article or -category directory in it redirects to the contents of that directory.
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in('erica@example.com')
# Start a new task
erica.open_link(constants.ROUTE_ACTIVITY)
erica.start_task(description=u'Be Shot Hundreds Of Feet Into The Air for A Geyser Of Highly Pressurized Water')
# Get the branch name
branch_name = erica.get_branch_name()
# create a directory containing only another directory
repo = view_functions.get_repo(repo_path=self.app.config['REPO_PATH'], work_path=self.app.config['WORK_PATH'], email='erica@example.com')
testing_slug = u'testing'
categories_slug = u'categories'
mkdir(join(repo.working_dir, testing_slug))
mkdir(join(repo.working_dir, testing_slug, categories_slug))
# open the top level directory
erica.open_link(url='/tree/{}/edit/'.format(branch_name))
# enter the 'testing' directory
erica.follow_link(href='/tree/{}/edit/{}/'.format(branch_name, testing_slug))
# we should've automatically been redirected into the 'categories' directory
self.assertEqual(erica.path, '/tree/{}/edit/{}/'.format(branch_name, join(testing_slug, categories_slug)))
# in TestApp
def test_article_preview(self):
''' Check edit process with a user previewing their article.
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_frances):
frances = ChimeTestClient(self.app.test_client(), self)
frances.sign_in('frances@example.com')
# Start a new task, "Diving for Dollars".
frances.open_link(constants.ROUTE_ACTIVITY)
frances.start_task(description=u'Diving for Dollars')
branch_name = frances.get_branch_name()
# Look for an "other" link that we know about - is it a category?
frances.follow_link('/tree/{}/edit/other/'.format(branch_name))
# Create a new category "Ninjas", subcategory "Flipping Out", and article "So Awesome".
frances.add_category('Ninjas')
frances.add_subcategory('Flipping Out')
frances.add_article('So Awesome')
edit_path = frances.path
# Preview the new article.
frances.preview_article('So, So Awesome', 'It was the best of times.')
expected_path = '/tree/{}/view/other/ninjas/flipping-out/so-awesome'.format(branch_name)
self.assertTrue(frances.path.startswith(expected_path), 'Should be on a preview path')
self.assertTrue('best of times' in str(frances.soup), 'Should see current content there')
# Look back at the edit form.
frances.open_link(edit_path)
self.assertTrue('best of times' in str(frances.soup), 'Should see current content there, too')
# in TestApp
def test_alpha_sort_in_admin(self):
''' Make sure items are sorted alphabetically in the Chime admin interface
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_frances):
frances = ChimeTestClient(self.app.test_client(), self)
frances.sign_in('frances@example.com')
# Start a new task
frances.open_link(constants.ROUTE_ACTIVITY)
frances.start_task(description=u'Crunching Beetles for Trap-Door Spiders')
branch_name = frances.get_branch_name()
# Look for an "other" link that we know about - is it a category?
frances.follow_link('/tree/{}/edit/other/'.format(branch_name))
# Create a bunch of new categories
frances.add_categories(['Anthicidae', 'Scydmaenidae', 'Paussinae', 'Bostrychidae', 'Scolytidae', 'Anobiidae', 'Meloidae', 'Dermestidae', 'Silphidae'])
# The categories should be sorted by title on the page
rendered_categories = [tag.text for tag in frances.soup.find_all('a', class_='category')]
sorted_categories = sorted(rendered_categories)
self.assertEqual(rendered_categories, sorted_categories)
# in TestApp
def test_overload_front_page(self):
''' Try to overload the front page with multiple simultaneous requests.
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_frances):
frances = ChimeTestClient(self.app.test_client(), self)
frances.sign_in('frances@example.com')
# Start a new task
frances.open_link(constants.ROUTE_ACTIVITY)
frances.start_task(description=u'Beating Crunches for Door-Spider Traps')
# hit the front page a bunch of times
times = 20
pros = []
for blip in range(times):
process = Process(target=frances.open_link, kwargs=dict(url='/', expected_status_code=303))
process.start()
pros.append(process)
# wait until the processes are done
for process in pros:
process.join()
# raise if any errors were raised
for process in pros:
self.assertEqual(0, process.exitcode, u'A process that was trying to load the front page failed!')
# in TestApp
def test_published_activities_displayed(self):
''' Published activities are displayed on the activities list page.
'''
with HTTMock(self.auth_csv_example_allowed):
erica_email = u'erica@example.com'
frances_email = u'frances@example.com'
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in(erica_email)
with HTTMock(self.mock_persona_verify_frances):
frances = ChimeTestClient(self.app.test_client(), self)
frances.sign_in(frances_email)
# Start a new task and create a topic, subtopic and article
erica.open_link(constants.ROUTE_ACTIVITY)
activity_title = u'Flicking Ants Off My Laptop'
args = activity_title, u'Flying', u'Through The Air', u'Goodbye'
branch_name = erica.quick_activity_setup(*args)
# Ask for feedback
erica.follow_link(href='/tree/{}/'.format(branch_name))
erica.request_feedback()
#
# Switch users and publish the article.
#
frances.open_link(url=erica.path)
frances.approve_activity()
frances.publish_activity()
#
# Load the front page and make sure the activity is listed as published
#
erica.open_link(constants.ROUTE_ACTIVITY)
pub_ul = erica.soup.select("#activity-list-published")[0]
# there should be an HTML comment with the branch name
comment = pub_ul.findAll(text=lambda text: isinstance(text, Comment))[0]
self.assertTrue(branch_name in comment)
pub_li = comment.find_parent('li')
# and the activity title wrapped in an a tag
self.assertIsNotNone(pub_li.find('a', text=activity_title))
# in TestApp
def test_renaming_activity(self):
''' We can rename an activity
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in('erica@example.com')
# Start a new task
erica.open_link(constants.ROUTE_ACTIVITY)
erica.start_task('Ingest Wolffish, Capelin, Skate Eggs And Sometimes Rocks')
branch_name = erica.get_branch_name()
# rename the task
new_description = u'Eat Greenland Halibut, Polar And Arctic Cod, Cuttlefish, Shrimp And Armhook Squid'
erica.follow_link('/tree/{}/'.format(branch_name))
erica.rename_activity(task_description=new_description)
# the new name is on the page
self.assertIsNotNone(erica.soup.find(lambda tag: new_description in tag.text))
# the new name is in the task metadata
repo = view_functions.get_repo(repo_path=self.app.config['REPO_PATH'], work_path=self.app.config['WORK_PATH'], email='erica@example.com')
task_metadata = repo_functions.get_task_metadata_for_branch(repo, branch_name)
self.assertEqual(task_metadata['task_description'], new_description)
# in TestApp
def test_renaming_activity_doesnt_affect_review_state(self):
''' Renaming the activity shouldn't reset the review state.
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in('erica@example.com')
# Start a new task and create a topic
erica.open_link(constants.ROUTE_ACTIVITY)
args = u'Their Diets Consist Of Almost Any Creature They Are Capable Of Overpowering', u'When Living Near Water, They Will Eat Other Aquatic Animals'
branch_name = erica.quick_activity_setup(*args)
# request feedback for the task
erica.request_feedback()
# verify the feedback state
repo = view_functions.get_repo(repo_path=self.app.config['REPO_PATH'], work_path=self.app.config['WORK_PATH'], email='erica@example.com')
state, _ = repo_functions.get_review_state_and_author_email(repo, 'master', branch_name)
self.assertEqual(state, constants.REVIEW_STATE_FEEDBACK)
# change the activity description
new_description = u'Food is swallowed whole'
erica.follow_link('/tree/{}/'.format(branch_name))
erica.rename_activity(task_description=new_description)
# the new name is in the task metadata
task_metadata = repo_functions.get_task_metadata_for_branch(repo, branch_name)
self.assertEqual(task_metadata['task_description'], new_description)
# the state hasn't changed
state, _ = repo_functions.get_review_state_and_author_email(repo, 'master', branch_name)
self.assertEqual(state, constants.REVIEW_STATE_FEEDBACK)
# in TestApp
def test_request_feedback_with_activity_rename(self):
''' We can rename an activity by submitting a new name via the request feedback form
'''
with HTTMock(self.auth_csv_example_allowed):
erica_email = u'erica@example.com'
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in(erica_email)
# Start a new task and create a topic
erica.open_link(constants.ROUTE_ACTIVITY)
args = u'Skates are cartilaginous fish', u'The Two Subfamilies Are Rajinae And Arhynchobatinae'
branch_name = erica.quick_activity_setup(*args)
# request feedback for the task with a new activity description
new_description = u'Skates Are Oviparous, That Is They Lay Eggs'
erica.request_feedback(task_description=new_description)
# the 'requested feedback' message is on the page
self.assertIsNotNone(erica.soup.find(text=u'{} {}'.format(erica_email, repo_functions.ACTIVITY_FEEDBACK_MESSAGE)))
# the new description is on the page
self.assertIsNotNone(erica.soup.find(lambda tag: new_description in tag.text))
# the new description is in the task metadata
repo = view_functions.get_repo(repo_path=self.app.config['REPO_PATH'], work_path=self.app.config['WORK_PATH'], email='erica@example.com')
task_metadata = repo_functions.get_task_metadata_for_branch(repo, branch_name)
self.assertEqual(task_metadata['task_description'], new_description)
# in TestApp
def test_save_unchanged_article(self):
''' Saving an unchanged article doesn't raise any errors.
'''
with HTTMock(self.auth_csv_example_allowed):
erica_email = u'erica@example.com'
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in(erica_email)
# Start a new task and create a topic, subtopic and article
erica.open_link(constants.ROUTE_ACTIVITY)
article_title = u'Open-Ocean'
args = u'The Eggs Are Spherical And Buoyant', u'The Fry Are Tiny', u'Pelagic', article_title
erica.quick_activity_setup(*args)
# Edit the article
article_text = u'Although most puffers are drab, many have bright colors and distinctive markings.'
erica.edit_article(article_title, article_text)
# Edit the article again with the same variables
erica.edit_article(article_title, article_text)
# in TestApp
def test_browse_is_default_view(self):
''' Loading root redirects to browsing the live site.
'''
with HTTMock(self.auth_csv_example_allowed):
erica_email = u'erica@example.com'
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in(erica_email)
erica.open_link('/', expected_status_code=303)
# it's the right url
self.assertEqual(erica.path, '/browse/')
# the test client can't derive a branch name
self.assertRaises(AssertionError, lambda: erica.get_branch_name())
# it's the right template
pattern_template_comment_stripped = sub(ur'<!--|-->', u'', PATTERN_TEMPLATE_COMMENT)
comments = erica.soup.findAll(text=lambda text: isinstance(text, Comment))
self.assertTrue(pattern_template_comment_stripped.format(u'articles-list') in comments)
# in TestApp
def test_no_activity_bar_when_browsing(self):
''' There's no activity bar when you're browsing the live site.
'''
with HTTMock(self.auth_csv_example_allowed):
erica_email = u'erica@example.com'
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in(erica_email)
erica.open_link('/', expected_status_code=303)
# there's no activity bar
self.assertIsNone(erica.soup.find("div", {"data-test-id": "activity-bar"}))
# in TestApp
def test_new_category_in_browse_starts_activity(self):
''' Starting a new category from browse view starts a new activity.
'''
with HTTMock(self.auth_csv_example_allowed):
erica_email = u'erica@example.com'
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in(erica_email)
repo = view_functions.get_repo(repo_path=self.app.config['REPO_PATH'], work_path=self.app.config['WORK_PATH'], email=erica_email)
# Enter the "other" folder
articles_slug = u'test-articles'
erica.open_link(url='/browse/{}/'.format(articles_slug))
# there's only the master branch
self.assertEqual(len(repo.branches), 1)
self.assertTrue('master' in repo.branches)
# create a category
category_name = u'Confuse The Predator\'s Visual Acuity'
erica.add_category(category_name=category_name)
# there is a branch name
branch_name = erica.get_branch_name()
# verify that the branch exists in the repo
self.assertEqual(len(repo.branches), 2)
self.assertTrue(branch_name in repo.branches)
# the branch name and the new category name slug are in the path
self.assertTrue(branch_name in erica.path)
self.assertTrue(slugify(category_name) in erica.path)
# a flash about the topic's creation is on the page
self.assertEqual(PATTERN_FLASH_CREATED_CATEGORY.format(title=category_name), erica.soup.find('li', class_='flash').text)
# in TestApp
def test_new_subcategory_in_browse_starts_activity(self):
''' Starting a new subcategory from browse view starts a new activity.
'''
with HTTMock(self.auth_csv_example_allowed):
erica_email = u'erica@example.com'
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in(erica_email)
repo = view_functions.get_repo(repo_path=self.app.config['REPO_PATH'], work_path=self.app.config['WORK_PATH'], email=erica_email)
# Enter the category folder in browse mode
articles_slug = u'test-articles'
topic_slug = u'test-topic'
erica.open_link(url='/browse/{}/'.format(join(articles_slug, topic_slug)))
# there's only the master branch
self.assertEqual(len(repo.branches), 1)
self.assertTrue('master' in repo.branches)
# create a subcategory
subcategory_name = u'Rolling Into A Spiny Ball'
erica.add_subcategory(subcategory_name=subcategory_name)
# there is a branch name
branch_name = erica.get_branch_name()
# verify that the branch exists in the repo
self.assertEqual(len(repo.branches), 2)
self.assertTrue(branch_name in repo.branches)
# the branch name and the new subcategory name slug are in the path
self.assertTrue(branch_name in erica.path)
self.assertTrue(slugify(subcategory_name) in erica.path)
# a flash about the topic's creation is on the page
self.assertEqual(PATTERN_FLASH_CREATED_CATEGORY.format(title=subcategory_name), erica.soup.find('li', class_='flash').text)
# in TestApp
def test_new_article_in_browse_starts_activity(self):
''' Starting a new subcategory from browse view starts a new activity.
'''
with HTTMock(self.auth_csv_example_allowed):
erica_email = u'erica@example.com'
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in(erica_email)
repo = view_functions.get_repo(repo_path=self.app.config['REPO_PATH'], work_path=self.app.config['WORK_PATH'], email=erica_email)
# Enter the category folder in browse mode
articles_slug = u'test-articles'
topic_slug = u'test-topic'
subtopic_slug = u'test-subtopic'
erica.open_link(url='/browse/{}/'.format(join(articles_slug, topic_slug, subtopic_slug)))
# there's only the master branch
self.assertEqual(len(repo.branches), 1)
self.assertTrue('master' in repo.branches)
# create a subcategory
article_name = u'Grunts, Snuffles And Squeals'
erica.add_article(article_name=article_name)
# there is a branch name
branch_name = erica.get_branch_name()
# verify that the branch exists in the repo
self.assertEqual(len(repo.branches), 2)
self.assertTrue(branch_name in repo.branches)
# the branch name and the new subcategory name slug are in the path
self.assertTrue(branch_name in erica.path)
self.assertTrue(slugify(article_name) in erica.path)
# a flash about the topic's creation is on the page
self.assertEqual(PATTERN_FLASH_CREATED_ARTICLE.format(title=article_name), erica.soup.find('li', class_='flash').text)
# in TestApp
def test_delete_category_in_browse_starts_activity(self):
''' Deleting a category from browse view starts a new activity.
'''
with HTTMock(self.auth_csv_example_allowed):
erica_email = u'erica@example.com'
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in(erica_email)
repo = view_functions.get_repo(repo_path=self.app.config['REPO_PATH'], work_path=self.app.config['WORK_PATH'], email=erica_email)
# Enter the category folder in browse mode
articles_slug = u'test-articles'
erica.open_link(url='/browse/{}/'.format(articles_slug))
# there's only the master branch
self.assertEqual(len(repo.branches), 1)
self.assertTrue('master' in repo.branches)
# delete a category
topic_title = u'Test Topic'
erica.follow_modify_category_link(topic_title)
erica.delete_category()
# there is a branch name
branch_name = erica.get_branch_name()
# verify that the branch exists in the repo
self.assertEqual(len(repo.branches), 2)
self.assertTrue(branch_name in repo.branches)
# the branch name is in the path
self.assertTrue(branch_name in erica.path)
# a flash about the topic's deletion is on the page
self.assertEqual(PATTERN_FLASH_DELETED_CATEGORY.format(title=topic_title, containing=u'(containing 1 topic and 1 article) '), erica.soup.find('li', class_='flash').text)
# in TestApp
def test_delete_article_in_browse_starts_activity(self):
''' Deleting an article from browse view starts a new activity.
'''
with HTTMock(self.auth_csv_example_allowed):
erica_email = u'erica@example.com'
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in(erica_email)
repo = view_functions.get_repo(repo_path=self.app.config['REPO_PATH'], work_path=self.app.config['WORK_PATH'], email=erica_email)
# Enter the category folder in browse mode
articles_slug = u'test-articles'
topic_slug = u'test-topic'
subtopic_slug = u'test-subtopic'
erica.open_link(url='/browse/{}/'.format(join(articles_slug, topic_slug, subtopic_slug)))
# there's only the master branch
self.assertEqual(len(repo.branches), 1)
self.assertTrue('master' in repo.branches)
# delete the article
article_title = u'Test Article'
erica.delete_article(article_title)
# there is a branch name
branch_name = erica.get_branch_name()
# verify that the branch exists in the repo
self.assertEqual(len(repo.branches), 2)
self.assertTrue(branch_name in repo.branches)
# the branch name is in the path
self.assertTrue(branch_name in erica.path)
# a flash about the topic's deletion is on the page
self.assertEqual(PATTERN_FLASH_DELETED_ARTICLE.format(title=article_title), erica.soup.find('li', class_='flash').text)
# in TestApp
def test_modify_category_in_browse_starts_activity(self):
''' Modifying a category from browse view starts a new activity.
'''
with HTTMock(self.auth_csv_example_allowed):
erica_email = u'erica@example.com'
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in(erica_email)
repo = view_functions.get_repo(repo_path=self.app.config['REPO_PATH'], work_path=self.app.config['WORK_PATH'], email=erica_email)
# Enter the category folder in browse mode
articles_slug = u'test-articles'
erica.open_link(url='/browse/{}/'.format(articles_slug))
# there's only the master branch
self.assertEqual(len(repo.branches), 1)
self.assertTrue('master' in repo.branches)
# edit a category
topic_title = u'Test Topic'
erica.follow_modify_category_link(topic_title)
# make a change
new_title = u'A Fluffy Tail That Stabilizes In Flight'
erica.edit_category(title_str=new_title, description_str=u'The tail acts as an adjunct airfoil, working as an air brake before landing on a tree trunk.')
# there is a branch name
branch_name = erica.get_branch_name()
# verify that the branch exists in the repo
self.assertEqual(len(repo.branches), 2)
self.assertTrue(branch_name in repo.branches)
# the branch name is in the path
self.assertTrue(branch_name in erica.path)
# a flash about the topic's edit is on the page
self.assertEqual(PATTERN_FLASH_SAVED_CATEGORY.format(title=new_title), erica.soup.find('li', class_='flash').text)
# in TestApp
def test_edit_article_in_browse_starts_activity(self):
''' Editing an article from browse view starts a new activity.
'''
with HTTMock(self.auth_csv_example_allowed):
erica_email = u'erica@example.com'
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in(erica_email)
repo = view_functions.get_repo(repo_path=self.app.config['REPO_PATH'], work_path=self.app.config['WORK_PATH'], email=erica_email)
# Enter the test article edit page in browse mode
articles_slug = u'test-articles'
topic_slug = u'test-topic'
subtopic_slug = u'test-subtopic'
article_slug = u'test-article'
article_url = '/browse/{}'.format(join(articles_slug, topic_slug, subtopic_slug, article_slug, u'index.{}'.format(constants.CONTENT_FILE_EXTENSION)))
erica.open_link(url=article_url)
# there's only the master branch
self.assertEqual(len(repo.branches), 1)
self.assertTrue('master' in repo.branches)
# edit the article
new_title = u'Mostly Hairless, Apart From Their Whiskers'
new_body = u'Their internal organs are visible through the skin.'
erica.edit_article(title_str=new_title, body_str=new_body)
# there is a branch name
branch_name = erica.get_branch_name()
# verify that the branch exists in the repo
self.assertEqual(len(repo.branches), 2)
self.assertTrue(branch_name in repo.branches)
# the branch name is in the path
self.assertTrue(branch_name in erica.path)
# a flash about the article's edit is on the page
self.assertEqual(PATTERN_FLASH_SAVED_ARTICLE.format(title=new_title), erica.soup.find('li', class_='flash').text)
class TestPublishApp (TestCase):
def setUp(self):
self.old_tempdir, tempfile.tempdir = tempfile.tempdir, mkdtemp(prefix='chime-TestPublishApp-')
self.work_path = mkdtemp(prefix='chime-publish-app-')
app_args = {}
self.app = publish.create_app(app_args)
self.client = self.app.test_client()
def tearDown(self):
rmtree(tempfile.tempdir)
tempfile.tempdir = self.old_tempdir
def mock_github_request(self, url, request):
'''
'''
_, host, path, _, _, _ = urlparse(url.geturl())
if (host, path) == ('github.com', '/chimecms/chime-starter/archive/93250f1308daef66c5809fe87fc242d092e61db7.zip'):
return response(302, '', headers={'Location': 'https://codeload.github.com/chimecms/chime-starter/tar.gz/93250f1308daef66c5809fe87fc242d092e61db7'})
if (host, path) == ('codeload.github.com', '/chimecms/chime-starter/tar.gz/93250f1308daef66c5809fe87fc242d092e61db7'):
with open(join(dirname(__file__), '93250f1308daef66c5809fe87fc242d092e61db7.zip')) as file:
return response(200, file.read(), headers={'Content-Type': 'application/zip'})
raise Exception('Unknown URL {}'.format(url.geturl()))
# in TestPublishApp
def test_webhook_post(self):
''' Check basic webhook flow.
'''
payload = '''
{
"head": "93250f1308daef66c5809fe87fc242d092e61db7",
"ref": "refs/heads/master",
"size": 1,
"commits": [
{
"sha": "93250f1308daef66c5809fe87fc242d092e61db7",
"message": "Clean up braces",
"author": {
"name": "Frances Berriman",
"email": "phae@example.com"
},
"url": "https://github.com/chimecms/chime-starter/commit/93250f1308daef66c5809fe87fc242d092e61db7",
"distinct": true
}
]
}
'''
with HTTMock(self.mock_github_request):
response = self.client.post('/', data=payload)
self.assertTrue(response.status_code in range(200, 299))
# in TestPublishApp
def test_load(self):
from chime import publish
''' makes sure that the file loads properly
'''
self.assertIsNotNone(publish.logger)
if __name__ == '__main__':
main()
| 54.967374 | 542 | 0.64746 | 21,104 | 173,532 | 5.10562 | 0.059136 | 0.031833 | 0.022608 | 0.026952 | 0.78526 | 0.762773 | 0.739905 | 0.72138 | 0.706447 | 0.687839 | 0 | 0.008327 | 0.251861 | 173,532 | 3,156 | 543 | 54.984791 | 0.821446 | 0.108597 | 0 | 0.631552 | 0 | 0.040477 | 0.160256 | 0.012996 | 0 | 0 | 0 | 0.000317 | 0.261546 | 0 | null | null | 0.000519 | 0.017644 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
4b31d741f851b2e6ae96bf6c27748303029617b8 | 2,388 | py | Python | highton/models/contact.py | seibert-media/Highton | 1519e4fb105f62882c2e7bc81065d994649558d8 | [
"Apache-2.0"
] | 18 | 2015-06-24T02:33:12.000Z | 2022-02-11T10:33:58.000Z | highton/models/contact.py | seibert-media/Highton | 1519e4fb105f62882c2e7bc81065d994649558d8 | [
"Apache-2.0"
] | 13 | 2016-01-14T19:11:24.000Z | 2020-04-21T08:53:27.000Z | highton/models/contact.py | seibert-media/Highton | 1519e4fb105f62882c2e7bc81065d994649558d8 | [
"Apache-2.0"
] | 15 | 2015-04-15T15:08:31.000Z | 2022-02-11T15:34:19.000Z | from highton import fields
from highton.models import HightonModel
from highton.highton_constants import HightonConstants
class Contact(
HightonModel,
):
"""
:ivar id: fields.IntegerField(name=HightonConstants.ID)
:ivar author_id: fields.IntegerField(name=HightonConstants.AUTHOR_ID)
:ivar background: fields.StringField(name=HightonConstants.BACKGROUND)
:ivar created_at: fields.DatetimeField(name=HightonConstants.CREATED_AT)
:ivar group_id: fields.IntegerField(name=HightonConstants.GROUP_ID)
:ivar owner_id: fields.IntegerField(name=HightonConstants.OWNER_ID)
:ivar updated_at: fields.DatetimeField(name=HightonConstants.UPDATED_AT)
:ivar visible_to: fields.StringField(name=HightonConstants.VISIBLE_TO)
:ivar linkedin_url: fields.StringField(name=HightonConstants.LINKEDIN_URL)
:ivar avatar_url: fields.StringField(name=HightonConstants.AVATAR_URL)
:ivar tags: fields.ListField(name=HightonConstants.TAGS, init_class=Tag)
:ivar contact_data: fields.ObjectField(name=HightonConstants.CONTACT_DATA, init_class=ContactData)
:ivar subject_datas: fields.ListField(name=HightonConstants.SUBJECT_DATAS, init_class=SubjectData)
"""
def __init__(self, **kwargs):
from highton.models import (
Tag,
ContactData,
SubjectData,
)
self.author_id = fields.IntegerField(name=HightonConstants.AUTHOR_ID)
self.background = fields.StringField(name=HightonConstants.BACKGROUND)
self.created_at = fields.DatetimeField(name=HightonConstants.CREATED_AT)
self.group_id = fields.IntegerField(name=HightonConstants.GROUP_ID)
self.owner_id = fields.IntegerField(name=HightonConstants.OWNER_ID)
self.updated_at = fields.DatetimeField(name=HightonConstants.UPDATED_AT)
self.visible_to = fields.StringField(name=HightonConstants.VISIBLE_TO)
self.linkedin_url = fields.StringField(name=HightonConstants.LINKEDIN_URL)
self.avatar_url = fields.StringField(name=HightonConstants.AVATAR_URL)
self.tags = fields.ListField(name=HightonConstants.TAGS, init_class=Tag)
self.contact_data = fields.ObjectField(name=HightonConstants.CONTACT_DATA, init_class=ContactData)
self.subject_datas = fields.ListField(name=HightonConstants.SUBJECT_DATAS, init_class=SubjectData)
super().__init__(**kwargs)
| 51.913043 | 106 | 0.766332 | 265 | 2,388 | 6.698113 | 0.158491 | 0.28169 | 0.094648 | 0.166761 | 0.823662 | 0.801127 | 0.736901 | 0.736901 | 0.24 | 0.178028 | 0 | 0 | 0.142379 | 2,388 | 45 | 107 | 53.066667 | 0.866699 | 0.404523 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04 | false | 0 | 0.16 | 0 | 0.24 | 0 | 0 | 0 | 0 | null | 1 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
4b8c9a2b95c7ad4f5a0ef4aded0871e14dc25a80 | 34 | py | Python | neighbors/KNeighborsClassifier.py | CollinHeck/MachineLearningVisualized | 0086184dfbe7004bf3a5803fd27b71627608b1a6 | [
"MIT"
] | null | null | null | neighbors/KNeighborsClassifier.py | CollinHeck/MachineLearningVisualized | 0086184dfbe7004bf3a5803fd27b71627608b1a6 | [
"MIT"
] | 3 | 2020-11-29T10:04:08.000Z | 2020-11-29T10:23:56.000Z | neighbors/KNeighborsClassifier.py | CollinHeck/MachineLearningVisualized | 0086184dfbe7004bf3a5803fd27b71627608b1a6 | [
"MIT"
] | null | null | null | # TODO: Implement a KNN Classifier | 34 | 34 | 0.794118 | 5 | 34 | 5.4 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.147059 | 34 | 1 | 34 | 34 | 0.931034 | 0.941176 | 0 | null | 0 | null | 0 | 0 | null | 0 | 0 | 1 | null | 1 | null | true | 0 | 0 | null | null | null | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
4bbbecfe55e42f31cd189fe604408269536343db | 1,531 | py | Python | util/compute_luts.py | jjt20/scripts | ec4a001b3082ba4079191ca8aae37be8e790aac2 | [
"MIT"
] | 12 | 2017-03-04T22:06:06.000Z | 2022-01-30T11:40:34.000Z | util/compute_luts.py | jjt20/scripts | ec4a001b3082ba4079191ca8aae37be8e790aac2 | [
"MIT"
] | 29 | 2017-08-20T15:22:03.000Z | 2020-09-17T06:06:17.000Z | util/compute_luts.py | jjt20/scripts | ec4a001b3082ba4079191ca8aae37be8e790aac2 | [
"MIT"
] | 6 | 2017-08-22T19:16:01.000Z | 2021-05-31T14:43:18.000Z | import os
import numpy as np
"""compute lut_in and lut_out for labelconvert from reference_table """
PRD = os.environ['PRD']
PARCEL = os.environ['PARCEL']
lut_in_names = np.loadtxt(open(os.path.join('share', 'reference_table_' + PARCEL + ".csv"), "r"), delimiter=",", skiprows=1, usecols=(1,), dtype='str')
lut_in_vals = np.loadtxt(open(os.path.join('share', 'reference_table_' + PARCEL + ".csv"), "r"), delimiter=",", skiprows=1, usecols=(0, 2, 3, 4, 8), dtype='int')
f = open(os.path.join(PRD, 'connectivity/lut_in.txt'), 'w')
for i, row in enumerate(lut_in_vals):
f.write(str(row[0]) + ' ')
f.write(lut_in_names[i]+' ')
for j in range(1,5):
f.write(str(row[j]) + ' ')
f.write('\n')
f.close()
lut_out_names = np.loadtxt(open(os.path.join('share', 'reference_table_' + PARCEL + ".csv"), "r"), delimiter=",", skiprows=1, usecols=(1,), dtype='str')
lut_out_vals = np.loadtxt(open(os.path.join('share', 'reference_table_' + PARCEL + ".csv"), "r"), delimiter=",", skiprows=1, usecols=(5, 2, 3, 4, 8), dtype='int')
f = open(os.path.join(PRD, 'connectivity/lut_out.txt'), 'w')
for i, row in enumerate(lut_out_vals):
f.write(str(row[0]) + ' ')
f.write(lut_out_names[i]+' ')
for j in range(1,5):
f.write(str(row[j]) + ' ')
f.write('\n')
f.close()
#lut_out = np.loadtxt(open(os.path.join('share', 'reference_table_' + PARCEL + ".csv"), "r"), delimiter=",", skiprows=1, usecols=(0, 5), dtype='int')
#np.savetxt(os.path.join(PRD, 'connectivity', 'lut_out.txt'), lut_out, fmt='%d %d')
| 41.378378 | 162 | 0.626388 | 250 | 1,531 | 3.7 | 0.22 | 0.058378 | 0.086486 | 0.105946 | 0.821622 | 0.821622 | 0.821622 | 0.821622 | 0.727568 | 0.671351 | 0 | 0.018925 | 0.137165 | 1,531 | 36 | 163 | 42.527778 | 0.681302 | 0.150229 | 0 | 0.416667 | 0 | 0 | 0.153094 | 0.038274 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.083333 | 0 | 0.083333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
299c2ec91046f2867af771171c8138da661e60ec | 457 | py | Python | helga_umb/signals/__init__.py | ktdreyer/helga-umb | f0c6858745d90205e74eec0eb5ebaafa655b2336 | [
"MIT"
] | null | null | null | helga_umb/signals/__init__.py | ktdreyer/helga-umb | f0c6858745d90205e74eec0eb5ebaafa655b2336 | [
"MIT"
] | 2 | 2018-04-27T15:37:10.000Z | 2018-08-22T21:00:40.000Z | helga_umb/signals/__init__.py | ktdreyer/helga-umb | f0c6858745d90205e74eec0eb5ebaafa655b2336 | [
"MIT"
] | null | null | null | import helga_umb.signals.distgit # NOQA: F401
import helga_umb.signals.errata # NOQA: F401
import helga_umb.signals.freshmaker # NOQA: F401
import helga_umb.signals.jenkins # NOQA: F401
import helga_umb.signals.pagure # NOQA: F401
import helga_umb.signals.polarion # NOQA: F401
import helga_umb.signals.resultsdb # NOQA: F401
import helga_umb.signals.robosignatory # NOQA: F401
"""
These modules register smokesignal callbacks for UMB events.
"""
| 30.466667 | 60 | 0.78337 | 64 | 457 | 5.46875 | 0.328125 | 0.251429 | 0.32 | 0.48 | 0.58 | 0.58 | 0 | 0 | 0 | 0 | 0 | 0.060453 | 0.131291 | 457 | 14 | 61 | 32.642857 | 0.821159 | 0.190372 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
29f504b2a2f77817971a3dcec405c6425c0db276 | 32 | py | Python | rank/analysis/sentiment.py | zaibacu/masters | a7c73b3b10dc9d39559ada9a27cbd8e586bb01fd | [
"MIT"
] | 1 | 2017-09-10T17:09:09.000Z | 2017-09-10T17:09:09.000Z | rank/analysis/sentiment.py | zaibacu/masters | a7c73b3b10dc9d39559ada9a27cbd8e586bb01fd | [
"MIT"
] | null | null | null | rank/analysis/sentiment.py | zaibacu/masters | a7c73b3b10dc9d39559ada9a27cbd8e586bb01fd | [
"MIT"
] | null | null | null | def get_mood(_in):
return 0
| 10.666667 | 18 | 0.65625 | 6 | 32 | 3.166667 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.041667 | 0.25 | 32 | 2 | 19 | 16 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.5 | false | 0 | 0 | 0.5 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 6 |
29fa79b74fd6802ffac629f6683bc6059983d5e2 | 42 | py | Python | shadow/unit/__init__.py | LiGhT1EsS/shadowChain | 475dbf31a0678cb2282eb978893b1cccfd48a780 | [
"Apache-2.0"
] | null | null | null | shadow/unit/__init__.py | LiGhT1EsS/shadowChain | 475dbf31a0678cb2282eb978893b1cccfd48a780 | [
"Apache-2.0"
] | null | null | null | shadow/unit/__init__.py | LiGhT1EsS/shadowChain | 475dbf31a0678cb2282eb978893b1cccfd48a780 | [
"Apache-2.0"
] | null | null | null | from .config_decode import load_conf_file
| 21 | 41 | 0.880952 | 7 | 42 | 4.857143 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.095238 | 42 | 1 | 42 | 42 | 0.894737 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
4b15ff57374a08caf7af3ece267f11e3f9c6d0d7 | 37 | py | Python | asynchronous_qiwi/call/API/QIWITerminals/ttg_groups/__init__.py | LexLuthorReal/asynchronous_qiwi | 5847a8d4008493656e973e5283888a4e57234962 | [
"MIT"
] | 3 | 2021-05-20T02:36:30.000Z | 2021-11-28T16:00:15.000Z | asynchronous_qiwi/call/API/QIWITerminals/ttg_groups/__init__.py | LexLuthorReal/asynchronous_qiwi | 5847a8d4008493656e973e5283888a4e57234962 | [
"MIT"
] | null | null | null | asynchronous_qiwi/call/API/QIWITerminals/ttg_groups/__init__.py | LexLuthorReal/asynchronous_qiwi | 5847a8d4008493656e973e5283888a4e57234962 | [
"MIT"
] | 1 | 2021-11-28T16:00:20.000Z | 2021-11-28T16:00:20.000Z | from .ttp_groups import TTPGroupsAPI
| 18.5 | 36 | 0.864865 | 5 | 37 | 6.2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.108108 | 37 | 1 | 37 | 37 | 0.939394 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
d99c12691871c9f6dcbb92f20fdf74987d92ef35 | 18 | py | Python | NNeighbor/__init__.py | L-F-A/Machine-Learning | b9472544e06fc91606c0d1a609c23e22ba30cf18 | [
"MIT"
] | null | null | null | NNeighbor/__init__.py | L-F-A/Machine-Learning | b9472544e06fc91606c0d1a609c23e22ba30cf18 | [
"MIT"
] | null | null | null | NNeighbor/__init__.py | L-F-A/Machine-Learning | b9472544e06fc91606c0d1a609c23e22ba30cf18 | [
"MIT"
] | null | null | null | from .NN import *
| 9 | 17 | 0.666667 | 3 | 18 | 4 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.222222 | 18 | 1 | 18 | 18 | 0.857143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
d9b835dbb9b7aed39e1c31376662dff9ae1c6cb6 | 53 | py | Python | example/layers/some/some_module.py | mkaraev/lof | 19be33d1283842069af0dd0776027b24676aac5e | [
"MIT"
] | 6 | 2021-07-19T07:32:30.000Z | 2021-09-21T16:10:55.000Z | example/layers/some/some_module.py | mkaraev/lof | 19be33d1283842069af0dd0776027b24676aac5e | [
"MIT"
] | null | null | null | example/layers/some/some_module.py | mkaraev/lof | 19be33d1283842069af0dd0776027b24676aac5e | [
"MIT"
] | 1 | 2021-07-25T07:00:12.000Z | 2021-07-25T07:00:12.000Z | def custom_function_from_layer():
return "bingo"
| 17.666667 | 33 | 0.754717 | 7 | 53 | 5.285714 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.150943 | 53 | 2 | 34 | 26.5 | 0.822222 | 0 | 0 | 0 | 0 | 0 | 0.09434 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.5 | true | 0 | 0 | 0.5 | 1 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 1 | 1 | 0 | 0 | 6 |
8a3f1ce19a306924585448cf650fa17b63f7339e | 44 | py | Python | bolinette/utils/__init__.py | bolinette/bolinette | b35a7d828c7d9617da6a8d7ac066e3b675a65252 | [
"MIT"
] | 4 | 2020-11-02T15:16:32.000Z | 2022-01-11T11:19:24.000Z | bolinette/utils/__init__.py | bolinette/bolinette | b35a7d828c7d9617da6a8d7ac066e3b675a65252 | [
"MIT"
] | 14 | 2021-01-04T11:06:59.000Z | 2022-03-23T17:01:49.000Z | bolinette/utils/__init__.py | bolinette/bolinette | b35a7d828c7d9617da6a8d7ac066e3b675a65252 | [
"MIT"
] | null | null | null | from bolinette.utils.proxy import InitProxy
| 22 | 43 | 0.863636 | 6 | 44 | 6.333333 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.090909 | 44 | 1 | 44 | 44 | 0.95 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
8a8053c07b741453458624af513b78c5c4da5273 | 195 | py | Python | test_bash.py | chapman-cs510-2016f/cw-02-northeast_corner | 49fdfa2847aed6771de7a71bccf7590705d8d3bb | [
"MIT"
] | null | null | null | test_bash.py | chapman-cs510-2016f/cw-02-northeast_corner | 49fdfa2847aed6771de7a71bccf7590705d8d3bb | [
"MIT"
] | null | null | null | test_bash.py | chapman-cs510-2016f/cw-02-northeast_corner | 49fdfa2847aed6771de7a71bccf7590705d8d3bb | [
"MIT"
] | null | null | null | import subprocess as sp
def test_helloworld():
assert sp.check_output("./helloworld.sh") == "Hello world.\n"
def test_countup():
assert sp.check_output(["./countup.sh","5"]) == "1 2 3 4 5\n"
| 24.375 | 62 | 0.676923 | 32 | 195 | 4 | 0.625 | 0.109375 | 0.203125 | 0.296875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.035294 | 0.128205 | 195 | 7 | 63 | 27.857143 | 0.717647 | 0 | 0 | 0 | 0 | 0 | 0.271795 | 0 | 0 | 0 | 0 | 0 | 0.4 | 1 | 0.4 | true | 0 | 0.2 | 0 | 0.6 | 0 | 0 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 6 |
8aad74f0ed26ba2f3e9dd7380a0b4a297632ca8f | 1,130 | py | Python | MillerArrays/changeMtzColumns.py | MooersLab/jupyterlabcctbxsnipsplus | 80a380046adcc9b16581ed1681884017514edbb7 | [
"MIT"
] | null | null | null | MillerArrays/changeMtzColumns.py | MooersLab/jupyterlabcctbxsnipsplus | 80a380046adcc9b16581ed1681884017514edbb7 | [
"MIT"
] | null | null | null | MillerArrays/changeMtzColumns.py | MooersLab/jupyterlabcctbxsnipsplus | 80a380046adcc9b16581ed1681884017514edbb7 | [
"MIT"
] | null | null | null | # Description: Read in mtz file and write out with fewer columns.
# Source: NA
"""
from iotbx.reflection_file_reader import any_reflection_file
hkl_in = any_reflection_file("${1:/Users/blaine/manuscripts/RETkinaseLoxo/ret_blu.mtz}")
miller_arrays = hkl_in.as_miller_arrays()
i_obs = miller_arrays[3]
r_free_flags = miller_arrays[0]
f_obs = i_obs.f_sq_as_f()
mtz_dataset = i_obs.as_mtz_dataset(column_root_label="I")
mtz_dataset.add_miller_array(f_obs, column_root_label="F")
mtz_dataset.add_miller_array(r_free_flags,column_root_label="${2:FreeR_flag}")
mtz_dataset.mtz_object().write("${3:loxodata.mtz}")
"""
from iotbx.reflection_file_reader import any_reflection_file
hkl_in = any_reflection_file("/Users/blaine/manuscripts/RETkinaseLoxo/ret_blu.mtz")
miller_arrays = hkl_in.as_miller_arrays()
i_obs = miller_arrays[3]
r_free_flags = miller_arrays[0]
f_obs = i_obs.f_sq_as_f()
mtz_dataset = i_obs.as_mtz_dataset(column_root_label="I")
mtz_dataset.add_miller_array(f_obs, column_root_label="F")
mtz_dataset.add_miller_array(r_free_flags,column_root_label="FreeR_flag")
mtz_dataset.mtz_object().write("loxodata.mtz")
| 34.242424 | 88 | 0.80885 | 194 | 1,130 | 4.257732 | 0.257732 | 0.121065 | 0.108959 | 0.09201 | 0.898305 | 0.898305 | 0.898305 | 0.818402 | 0.818402 | 0.818402 | 0 | 0.006673 | 0.071681 | 1,130 | 32 | 89 | 35.3125 | 0.780744 | 0.534513 | 0 | 0 | 0 | 0 | 0.145068 | 0.098646 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.1 | 0 | 0.1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
8ab5186d259fcadba43f76294de78d2578ff644e | 7,026 | py | Python | tests/pytests/unit/beacons/test_smartos_vmadm.py | tomdoherty/salt | f87d5d7abbf9777773c4d91fdafecb8b1a728e76 | [
"Apache-2.0"
] | 9,425 | 2015-01-01T05:59:24.000Z | 2022-03-31T20:44:05.000Z | tests/pytests/unit/beacons/test_smartos_vmadm.py | tomdoherty/salt | f87d5d7abbf9777773c4d91fdafecb8b1a728e76 | [
"Apache-2.0"
] | 33,507 | 2015-01-01T00:19:56.000Z | 2022-03-31T23:48:20.000Z | tests/pytests/unit/beacons/test_smartos_vmadm.py | tomdoherty/salt | f87d5d7abbf9777773c4d91fdafecb8b1a728e76 | [
"Apache-2.0"
] | 5,810 | 2015-01-01T19:11:45.000Z | 2022-03-31T02:37:20.000Z | # Python libs
import pytest
# Salt libs
import salt.beacons.smartos_vmadm as vmadm
from tests.support.mock import MagicMock, patch
@pytest.fixture
def configure_loader_modules():
return {vmadm: {"__context__": {}, "__salt__": {}}}
@pytest.fixture
def mock_clean_state():
return {"first_run": True, "vms": []}
@pytest.fixture
def mock_vm_none():
return {}
@pytest.fixture
def mock_vm_one():
return {
"00000000-0000-0000-0000-000000000001": {
"state": "running",
"alias": "vm1",
"hostname": "vm1",
"dns_domain": "example.org",
},
}
@pytest.fixture
def mock_vm_two_stopped():
return {
"00000000-0000-0000-0000-000000000001": {
"state": "running",
"alias": "vm1",
"hostname": "vm1",
"dns_domain": "example.org",
},
"00000000-0000-0000-0000-000000000002": {
"state": "stopped",
"alias": "vm2",
"hostname": "vm2",
"dns_domain": "example.org",
},
}
@pytest.fixture
def mock_vm_two_started():
return {
"00000000-0000-0000-0000-000000000001": {
"state": "running",
"alias": "vm1",
"hostname": "vm1",
"dns_domain": "example.org",
},
"00000000-0000-0000-0000-000000000002": {
"state": "running",
"alias": "vm2",
"hostname": "vm2",
"dns_domain": "example.org",
},
}
def test_non_list_config():
"""
We only have minimal validation so we test that here
"""
config = {}
ret = vmadm.validate(config)
assert ret == (False, "Configuration for vmadm beacon must be a list!")
def test_created_startup(mock_clean_state, mock_vm_one):
"""
Test with one vm and startup_create_event
"""
# NOTE: this should yield 1 created event + one state event
with patch.dict(vmadm.VMADM_STATE, mock_clean_state), patch.dict(
vmadm.__salt__, {"vmadm.list": MagicMock(return_value=mock_vm_one)}
):
config = [{"startup_create_event": True}]
ret = vmadm.validate(config)
assert ret == (True, "Valid beacon configuration")
ret = vmadm.beacon(config)
res = [
{
"alias": "vm1",
"tag": "created/00000000-0000-0000-0000-000000000001",
"hostname": "vm1",
"dns_domain": "example.org",
},
{
"alias": "vm1",
"tag": "running/00000000-0000-0000-0000-000000000001",
"hostname": "vm1",
"dns_domain": "example.org",
},
]
assert ret == res
def test_created_nostartup(mock_clean_state, mock_vm_one):
"""
Test with one image and startup_import_event unset/false
"""
# NOTE: this should yield 0 created event _ one state event
with patch.dict(vmadm.VMADM_STATE, mock_clean_state), patch.dict(
vmadm.__salt__, {"vmadm.list": MagicMock(return_value=mock_vm_one)}
):
config = []
ret = vmadm.validate(config)
assert ret == (True, "Valid beacon configuration")
ret = vmadm.beacon(config)
res = [
{
"alias": "vm1",
"tag": "running/00000000-0000-0000-0000-000000000001",
"hostname": "vm1",
"dns_domain": "example.org",
}
]
assert ret == res
def test_created(mock_clean_state, mock_vm_one, mock_vm_two_started):
"""
Test with one vm, create a 2nd one
"""
# NOTE: this should yield 1 created event + state event
with patch.dict(vmadm.VMADM_STATE, mock_clean_state), patch.dict(
vmadm.__salt__,
{"vmadm.list": MagicMock(side_effect=[mock_vm_one, mock_vm_two_started])},
):
config = []
ret = vmadm.validate(config)
assert ret == (True, "Valid beacon configuration")
# Initial pass (Initialized state and do not yield created events at startup)
ret = vmadm.beacon(config)
# Second pass (After create a new vm)
ret = vmadm.beacon(config)
res = [
{
"alias": "vm2",
"tag": "created/00000000-0000-0000-0000-000000000002",
"hostname": "vm2",
"dns_domain": "example.org",
},
{
"alias": "vm2",
"tag": "running/00000000-0000-0000-0000-000000000002",
"hostname": "vm2",
"dns_domain": "example.org",
},
]
assert ret == res
def test_deleted(mock_clean_state, mock_vm_two_stopped, mock_vm_one):
"""
Test with two vms and one gets destroyed
"""
# NOTE: this should yield 1 destroyed event
with patch.dict(vmadm.VMADM_STATE, mock_clean_state), patch.dict(
vmadm.__salt__,
{"vmadm.list": MagicMock(side_effect=[mock_vm_two_stopped, mock_vm_one])},
):
config = []
ret = vmadm.validate(config)
assert ret == (True, "Valid beacon configuration")
# Initial pass (Initialized state and do not yield created vms at startup)
ret = vmadm.beacon(config)
# Second pass (Destroying one vm)
ret = vmadm.beacon(config)
res = [
{
"alias": "vm2",
"tag": "deleted/00000000-0000-0000-0000-000000000002",
"hostname": "vm2",
"dns_domain": "example.org",
}
]
assert ret == res
def test_complex(
mock_clean_state, mock_vm_one, mock_vm_two_started, mock_vm_two_stopped
):
"""
Test with two vms, stop one, delete one
"""
# NOTE: this should yield 1 delete and 2 import events
with patch.dict(vmadm.VMADM_STATE, mock_clean_state), patch.dict(
vmadm.__salt__,
{
"vmadm.list": MagicMock(
side_effect=[mock_vm_two_started, mock_vm_two_stopped, mock_vm_one]
)
},
):
config = []
ret = vmadm.validate(config)
assert ret == (True, "Valid beacon configuration")
# Initial pass (Initialized state and do not yield created events at startup)
ret = vmadm.beacon(config)
# Second pass (Stop one vm)
ret = vmadm.beacon(config)
res = [
{
"alias": "vm2",
"tag": "stopped/00000000-0000-0000-0000-000000000002",
"hostname": "vm2",
"dns_domain": "example.org",
}
]
assert ret == res
# Third pass (Delete one vm)
ret = vmadm.beacon(config)
res = [
{
"alias": "vm2",
"tag": "deleted/00000000-0000-0000-0000-000000000002",
"hostname": "vm2",
"dns_domain": "example.org",
}
]
assert ret == res
| 26.91954 | 85 | 0.538286 | 746 | 7,026 | 4.871314 | 0.148794 | 0.057237 | 0.057237 | 0.071547 | 0.81508 | 0.790039 | 0.77628 | 0.739681 | 0.706935 | 0.678316 | 0 | 0.09629 | 0.336322 | 7,026 | 260 | 86 | 27.023077 | 0.683037 | 0.128238 | 0 | 0.638418 | 0 | 0 | 0.234679 | 0.088357 | 0 | 0 | 0 | 0 | 0.067797 | 1 | 0.067797 | false | 0 | 0.016949 | 0.033898 | 0.118644 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
76ef3b224ce214e8d47517634b8be9efbbd0fbcb | 176 | py | Python | python/utils/manager.py | NegriLuca/pigasus | d5057b771f81cfa05bb08ea4b0fd99088150cd7a | [
"MIT"
] | 1 | 2021-10-21T17:15:26.000Z | 2021-10-21T17:15:26.000Z | python/utils/manager.py | NegriLuca/pigasus | d5057b771f81cfa05bb08ea4b0fd99088150cd7a | [
"MIT"
] | null | null | null | python/utils/manager.py | NegriLuca/pigasus | d5057b771f81cfa05bb08ea4b0fd99088150cd7a | [
"MIT"
] | null | null | null | # -*- coding: UTF-8 -*-
class context(object):
def __init__(self):
pass
def __enter__(self):
pass
def __exit__(self, type, value, tb):
pass
| 19.555556 | 40 | 0.551136 | 21 | 176 | 4.047619 | 0.714286 | 0.188235 | 0.258824 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008264 | 0.3125 | 176 | 8 | 41 | 22 | 0.694215 | 0.119318 | 0 | 0.428571 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.428571 | false | 0.428571 | 0 | 0 | 0.571429 | 0 | 1 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 6 |
76f3a32c405796ae2a6fa54baab3f3523745b32f | 10,609 | py | Python | test_fakesmtpd/commands.py | srittau/FakeSMTPd | d33c57fbc4053a48ec27e8a42cfa262eb3ecbff0 | [
"MIT"
] | 4 | 2018-03-21T13:17:14.000Z | 2021-04-15T10:15:30.000Z | test_fakesmtpd/commands.py | srittau/FakeSMTPd | d33c57fbc4053a48ec27e8a42cfa262eb3ecbff0 | [
"MIT"
] | 27 | 2017-06-20T17:58:45.000Z | 2022-03-14T08:37:59.000Z | test_fakesmtpd/commands.py | srittau/FakeSMTPd | d33c57fbc4053a48ec27e8a42cfa262eb3ecbff0 | [
"MIT"
] | null | null | null | from unittest.mock import Mock
import pytest
from pytest_mock import MockerFixture
from fakesmtpd.commands import (
handle_ehlo,
handle_helo,
handle_mail,
handle_rcpt,
)
from fakesmtpd.smtp import (
SMTP_DOMAIN_LIMIT,
SMTP_LOCAL_PART_LIMIT,
SMTP_PATH_LIMIT,
SMTPStatus,
)
from fakesmtpd.state import State
@pytest.fixture(autouse=True)
def getfqdn(mocker: MockerFixture) -> Mock:
return mocker.patch(
"fakesmtpd.commands.getfqdn",
return_value="smtp.example.com",
)
class TestEHLO:
def test_domain(self, getfqdn: Mock) -> None:
state = State()
state.greeted = False
getfqdn.return_value = "smtp.example.org"
code, message = handle_ehlo(state, "example.com")
assert code == SMTPStatus.OK
assert message == "smtp.example.org Hello example.com"
assert state.greeted
def test_address_literal(self, getfqdn: Mock) -> None:
state = State()
state.greeted = False
getfqdn.return_value = "smtp.example.org"
code, message = handle_ehlo(state, "[192.168.99.22]")
assert code == SMTPStatus.OK
assert message == "smtp.example.org Hello [192.168.99.22]"
assert state.greeted
def test_empty_argument(self) -> None:
code, message = handle_ehlo(State(), "")
assert code == SMTPStatus.SYNTAX_ERROR_IN_PARAMETERS
assert message == "Missing arguments"
def test_invalid_argument(self) -> None:
code, message = handle_ehlo(State(), "*")
assert code == SMTPStatus.SYNTAX_ERROR_IN_PARAMETERS
assert message == "Syntax error in arguments"
class TestHELO:
def test_set_greeted(self) -> None:
state = State()
state.greeted = False
handle_helo(state, "example.com")
assert state.greeted
def test_response(self, getfqdn: Mock) -> None:
getfqdn.return_value = "smtp.example.org"
code, message = handle_helo(State(), "example.com")
assert code == SMTPStatus.OK
assert message == "smtp.example.org Hello example.com"
def test_no_argument(self) -> None:
code, message = handle_helo(State(), "")
assert code == SMTPStatus.SYNTAX_ERROR_IN_PARAMETERS
assert message == "Missing arguments"
def test_invalid_domain(self) -> None:
code, message = handle_helo(State(), "*")
assert code == SMTPStatus.SYNTAX_ERROR_IN_PARAMETERS
assert message == "Syntax error in arguments"
class TestMAIL:
def test_with_mailbox(self) -> None:
state = State()
state.greeted = True
code, message = handle_mail(state, "FROM:<foo@example.com>")
assert code == SMTPStatus.OK
assert message == "Sender OK"
assert state.reverse_path == "foo@example.com"
def test_empty_path(self) -> None:
state = State()
state.greeted = True
code, message = handle_mail(state, "FROM:<>")
assert code == SMTPStatus.OK
assert message == "Sender OK"
assert state.reverse_path == ""
def test_with_arguments(self) -> None:
state = State()
state.greeted = True
code, message = handle_mail(
state, "FROM:<foo@example.com> foo=bar abc"
)
assert code == SMTPStatus.OK
assert message == "Sender OK"
assert state.reverse_path == "foo@example.com"
def test_with_arguments_and_quoted_local_part(self) -> None:
state = State()
state.greeted = True
code, message = handle_mail(
state, 'FROM:<"foo bar"@example.com> foo=bar'
)
assert code == SMTPStatus.OK
assert message == "Sender OK"
assert state.reverse_path == '"foo bar"@example.com'
def test_empty(self) -> None:
code, message = handle_mail(State(), "")
assert code == SMTPStatus.SYNTAX_ERROR_IN_PARAMETERS
assert message == "Syntax error in arguments"
def test_invalid_path(self) -> None:
code, message = handle_mail(State(), "FROM:INVALID")
assert code == SMTPStatus.SYNTAX_ERROR_IN_PARAMETERS
assert message == "Syntax error in arguments"
def test_path_too_long(self) -> None:
code, message = handle_mail(
State(), f"FROM:<{'a' * 60}@{'a' * (SMTP_PATH_LIMIT - 61)}>"
)
assert code == SMTPStatus.SYNTAX_ERROR_IN_PARAMETERS
assert message == "Path too long"
def test_local_part_too_long(self) -> None:
code, message = handle_mail(
State(), f"FROM:<{'a' * (SMTP_LOCAL_PART_LIMIT + 1)}@example.com>"
)
assert code == SMTPStatus.SYNTAX_ERROR_IN_PARAMETERS
assert message == "Path too long"
def test_invalid_mailbox(self) -> None:
code, message = handle_mail(State(), "FROM:<INVALID>")
assert code == SMTPStatus.SYNTAX_ERROR_IN_PARAMETERS
assert message == "Syntax error in arguments"
def test_path_with_trailing_chars(self) -> None:
code, message = handle_mail(State(), "FROM:<foo@example.com>foo=bar")
assert code == SMTPStatus.SYNTAX_ERROR_IN_PARAMETERS
assert message == "Syntax error in arguments"
def test_invalid_argument(self) -> None:
state = State()
state.greeted = True
code, message = handle_mail(state, "FROM:<foo@example.com> -foo=bar")
assert code == SMTPStatus.SYNTAX_ERROR_IN_PARAMETERS
assert message == "Syntax error in arguments"
def test_not_greeted(self) -> None:
state = State()
state.greeted = False
code, message = handle_mail(state, "FROM:<foo@example.com>")
assert code == SMTPStatus.BAD_SEQUENCE
assert message == "No EHLO sent"
def test_has_reverse_path(self) -> None:
state = State()
state.greeted = True
state.reverse_path = "bar@example.org"
code, message = handle_mail(state, "FROM:<foo@example.com>")
assert code == SMTPStatus.BAD_SEQUENCE
assert message == "Bad command sequence"
def test_has_forward_path(self) -> None:
state = State()
state.greeted = True
state.forward_path = ["bar@example.org"]
code, message = handle_mail(state, "FROM:<foo@example.com>")
assert code == SMTPStatus.BAD_SEQUENCE
assert message == "Bad command sequence"
def test_has_mail_data(self) -> None:
state = State()
state.greeted = True
state.mail_data = ""
code, message = handle_mail(state, "FROM:<foo@example.com>")
assert code == SMTPStatus.BAD_SEQUENCE
assert message == "Bad command sequence"
class TestRCPT:
def test_response(self) -> None:
state = State()
state.greeted = True
state.reverse_path = "bar@example.org"
code, message = handle_rcpt(state, "TO:<foo@example.com>")
assert code == SMTPStatus.OK
assert message == "Receiver OK"
def test_forward_paths_added(self) -> None:
state = State()
state.greeted = True
state.reverse_path = "bar@example.org"
handle_rcpt(state, "TO:<foo1@example.com>")
handle_rcpt(state, "TO:<foo2@example.com>")
assert state.forward_path == ["foo1@example.com", "foo2@example.com"]
def test_postmaster(self) -> None:
state = State()
state.greeted = True
state.reverse_path = "bar@example.org"
code, message = handle_rcpt(state, "TO:<postMaster> foo")
assert code == SMTPStatus.OK
assert message == "Receiver OK"
assert state.forward_path == ["postMaster"]
def test_with_arguments(self) -> None:
state = State()
state.greeted = True
state.reverse_path = "bar@example.org"
code, message = handle_rcpt(state, "TO:<foo@example.com> foo=bar baz")
assert code == SMTPStatus.OK
assert message == "Receiver OK"
assert state.forward_path == ["foo@example.com"]
def test_empty_argument(self) -> None:
code, message = handle_rcpt(State(), "")
assert code == SMTPStatus.SYNTAX_ERROR_IN_PARAMETERS
assert message == "Syntax error in arguments"
def test_empty_path(self) -> None:
code, message = handle_rcpt(State(), "TO:<>")
assert code == SMTPStatus.SYNTAX_ERROR_IN_PARAMETERS
assert message == "Syntax error in arguments"
def test_path_too_long(self) -> None:
code, message = handle_rcpt(
State(), f"TO:<{'a' * 60}@{'a' * (SMTP_PATH_LIMIT - 61)}>"
)
assert code == SMTPStatus.SYNTAX_ERROR_IN_PARAMETERS
assert message == "Path too long"
def test_local_part_too_long(self) -> None:
code, message = handle_rcpt(
State(), f"TO:<{'a' * (SMTP_LOCAL_PART_LIMIT + 1)}@example.com>"
)
assert code == SMTPStatus.SYNTAX_ERROR_IN_PARAMETERS
assert message == "Path too long"
def test_domain_too_long(self) -> None:
code, message = handle_rcpt(
State(), f"TO:<foo@{'a' * (SMTP_DOMAIN_LIMIT + 1)}>"
)
assert code == SMTPStatus.SYNTAX_ERROR_IN_PARAMETERS
assert message == "Path too long"
def test_path_with_trailing_chars(self) -> None:
code, message = handle_rcpt(State(), "TO:<foo@example.com>foo=bar")
assert code == SMTPStatus.SYNTAX_ERROR_IN_PARAMETERS
assert message == "Syntax error in arguments"
def test_invalid_argument(self) -> None:
code, message = handle_rcpt(State(), "TO:<foo@example.com> -foo")
assert code == SMTPStatus.SYNTAX_ERROR_IN_PARAMETERS
assert message == "Syntax error in arguments"
def test_not_greeted(self) -> None:
state = State()
state.greeted = False
state.reverse_path = "bar@example.org"
code, message = handle_rcpt(state, "TO:<foo@example.com>")
assert code == SMTPStatus.BAD_SEQUENCE
assert message == "Bad command sequence"
def test_no_reverse_path(self) -> None:
state = State()
state.greeted = True
state.reverse_path = None
code, message = handle_rcpt(state, "TO:<foo@example.com>")
assert code == SMTPStatus.BAD_SEQUENCE
assert message == "Bad command sequence"
def test_mail_data(self) -> None:
state = State()
state.greeted = True
state.reverse_path = "bar@example.org"
state.mail_data = ""
code, message = handle_rcpt(state, "TO:<foo@example.com>")
assert code == SMTPStatus.BAD_SEQUENCE
assert message == "Bad command sequence"
| 36.208191 | 78 | 0.628146 | 1,269 | 10,609 | 5.066982 | 0.078802 | 0.059098 | 0.092535 | 0.056143 | 0.877294 | 0.863453 | 0.853966 | 0.834837 | 0.819907 | 0.794401 | 0 | 0.00444 | 0.256952 | 10,609 | 292 | 79 | 36.332192 | 0.811239 | 0 | 0 | 0.631579 | 0 | 0 | 0.168913 | 0.030352 | 0 | 0 | 0 | 0 | 0.323887 | 1 | 0.153846 | false | 0 | 0.024292 | 0.004049 | 0.198381 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
0a0f6ed3cc9b4b193c07ca6927bd44637f5443a0 | 25,990 | py | Python | tests/mock_data/annotation/metadata/convention/valid_array_v2_1_2.py | broadinstitute/scp-ingest-service | 1a63a27061b53a5f7909c72d59808f9af71456a6 | [
"BSD-3-Clause"
] | null | null | null | tests/mock_data/annotation/metadata/convention/valid_array_v2_1_2.py | broadinstitute/scp-ingest-service | 1a63a27061b53a5f7909c72d59808f9af71456a6 | [
"BSD-3-Clause"
] | null | null | null | tests/mock_data/annotation/metadata/convention/valid_array_v2_1_2.py | broadinstitute/scp-ingest-service | 1a63a27061b53a5f7909c72d59808f9af71456a6 | [
"BSD-3-Clause"
] | null | null | null | from bson.objectid import ObjectId
valid_array_v2_1_2_models = {
"cell_metadata_models": {
"NAME": {
"name": "NAME",
"annotation_type": "TYPE",
"values": [],
"study_file_id": ObjectId("600f42bdb067340e777b1385"),
"study_id": ObjectId("5ea08bb17b2f150f29f4d952"),
},
"disease__time_since_onset": {
"name": "disease__time_since_onset",
"annotation_type": "group",
"values": ['12|2', '1', '24|2', '36|3|1', '0'],
"study_file_id": ObjectId("600f42bdb067340e777b1385"),
"study_id": ObjectId("5ea08bb17b2f150f29f4d952"),
},
"disease__time_since_onset__unit": {
"name": "disease__time_since_onset__unit",
"annotation_type": "group",
"values": ["UO_0000035"],
"study_file_id": ObjectId("600f42bdb067340e777b1385"),
"study_id": ObjectId("5ea08bb17b2f150f29f4d952"),
},
"preservation_method": {
"name": "preservation_method",
"annotation_type": "group",
"values": ["Fresh"],
"study_file_id": ObjectId("600f42bdb067340e777b1385"),
"study_id": ObjectId("5ea08bb17b2f150f29f4d952"),
},
"organ_region": {
"name": "organ_region",
"annotation_type": "group",
"values": [
"MBA:000000944",
"MBA:000000302|MBA:000000294|MBA:000000795",
"MBA:000000714|MBA:000000972",
"MBA:000001041",
"MBA:000000909|MBA:000000502",
],
"study_file_id": ObjectId("600f42bdb067340e777b1385"),
"study_id": ObjectId("5ea08bb17b2f150f29f4d952"),
},
"organ_region__ontology_label": {
"name": "organ_region__ontology_label",
"annotation_type": "group",
"values": [
"Folium-tuber vermis (VII)",
"Superior colliculus, sensory related|Superior colliculus, motor related|Periaqueductal gray",
"",
"Paraflocculus",
"Entorhinal area|Subiculum",
],
"study_file_id": ObjectId("600f42bdb067340e777b1385"),
"study_id": ObjectId("5ea08bb17b2f150f29f4d952"),
},
"annotation_type": {
"name": "donor",
"annotation_type": "group",
"values": ["BM01"],
"study_file_id": ObjectId("600f42bdb067340e777b1385"),
"study_id": ObjectId("5ea08bb17b2f150f29f4d952"),
},
"disease__treated": {
"name": "disease__treated",
"annotation_type": "group",
"values": ["False|False", "FALSE", "True|False", "True|False|False"],
"study_file_id": ObjectId("600f42bdb067340e777b1385"),
"study_id": ObjectId("5ea08bb17b2f150f29f4d952"),
},
"species": {
"name": "species",
"annotation_type": "group",
"values": ["NCBITaxon_9606"],
"study_file_id": ObjectId("600f42bdb067340e777b1385"),
"study_id": ObjectId("5ea08bb17b2f150f29f4d952"),
},
"biosample_id": {
"name": "biosample_id",
"annotation_type": "group",
"values": ["BM01_16dpp_r3"],
"study_file_id": ObjectId("600f42bdb067340e777b1385"),
"study_id": ObjectId("5ea08bb17b2f150f29f4d952"),
},
"biosample_type": {
"name": "biosample_type",
"annotation_type": "group",
"values": ["PrimaryBioSample_BodyFluid"],
"study_file_id": ObjectId("600f42bdb067340e777b1385"),
"study_id": ObjectId("5ea08bb17b2f150f29f4d952"),
},
"donor": {
"name": "donor",
"annotation_type": "group",
"values": ["BM01"],
"study_file_id": ObjectId("600f42bdb067340e777b1385"),
"study_id": ObjectId("5ea08bb17b2f150f29f4d952"),
},
"donor_id": {
"name": "donor_id",
"annotation_type": "group",
"values": ["BM01"],
"study_file_id": ObjectId("600f42bdb067340e777b1385"),
"study_id": ObjectId("5ea08bb17b2f150f29f4d952"),
},
"species__ontology_label": {
"name": "species__ontology_label",
"annotation_type": "group",
"values": ["Homo sapiens"],
"study_file_id": ObjectId("600f42bdb067340e777b1385"),
"study_id": ObjectId("5ea08bb17b2f150f29f4d952"),
},
"geographical_region": {
"name": "geographical_region",
"annotation_type": "group",
"values": ["GAZ_00003181"],
"study_file_id": ObjectId("600f42bdb067340e777b1385"),
"study_id": ObjectId("5ea08bb17b2f150f29f4d952"),
},
"geographical_region__ontology_label": {
"name": "geographical_region__ontology_label",
"annotation_type": "group",
"values": ["Boston"],
"study_file_id": ObjectId("600f42bdb067340e777b1385"),
"study_id": ObjectId("5ea08bb17b2f150f29f4d952"),
},
"library_preparation_protocol": {
"name": "library_preparation_protocol",
"annotation_type": "group",
"values": ["EFO_0008919"],
"study_file_id": ObjectId("600f42bdb067340e777b1385"),
"study_id": ObjectId("5ea08bb17b2f150f29f4d952"),
},
"library_preparation_protocol__ontology_label": {
"name": "library_preparation_protocol__ontology_label",
"annotation_type": "group",
"values": ["Seq-Well"],
"study_file_id": ObjectId("600f42bdb067340e777b1385"),
"study_id": ObjectId("5ea08bb17b2f150f29f4d952"),
},
"annotation_type": {
"name": "organ",
"annotation_type": "group",
"values": ["UBERON_0001913"],
"study_file_id": ObjectId("600f42bdb067340e777b1385"),
"study_id": ObjectId("5ea08bb17b2f150f29f4d952"),
},
"organ__ontology_label": {
"name": "organ__ontology_label",
"annotation_type": "group",
"values": ["milk"],
"study_file_id": ObjectId("600f42bdb067340e777b1385"),
"study_id": ObjectId("5ea08bb17b2f150f29f4d952"),
},
"sex": {
"name": "sex",
"annotation_type": "group",
"values": ["female"],
"study_file_id": ObjectId("600f42bdb067340e777b1385"),
"study_id": ObjectId("5ea08bb17b2f150f29f4d952"),
},
"is_living": {
"name": "is_living",
"annotation_type": "group",
"values": ["yes"],
"study_file_id": ObjectId("600f42bdb067340e777b1385"),
"study_id": ObjectId("5ea08bb17b2f150f29f4d952"),
},
"organism_age__unit": {
"name": "organism_age__unit",
"annotation_type": "group",
"values": ["UO_0000036"],
"study_file_id": ObjectId("600f42bdb067340e777b1385"),
"study_id": ObjectId("5ea08bb17b2f150f29f4d952"),
},
"organism_age__unit_label": {
"name": "organism_age__unit_label",
"annotation_type": "group",
"values": ["year"],
"study_file_id": ObjectId("600f42bdb067340e777b1385"),
"study_id": ObjectId("5ea08bb17b2f150f29f4d952"),
},
"ethnicity__ontology_label": {
"name": "ethnicity__ontology_label",
"annotation_type": "group",
"values": ["European", "European|British", ""],
"study_file_id": ObjectId("600f42bdb067340e777b1385"),
"study_id": ObjectId("5ea08bb17b2f150f29f4d952"),
},
"ethnicity": {
"name": "ethnicity",
"annotation_type": "group",
"values": ["HANCESTRO_0005", "HANCESTRO_0005|HANCESTRO_0462"],
"study_file_id": ObjectId("600f42bdb067340e777b1385"),
"study_id": ObjectId("5ea08bb17b2f150f29f4d952"),
},
"organism_age__unit": {
"name": "organism_age__unit",
"annotation_type": "group",
"values": ["UO_0000036"],
"study_file_id": ObjectId("600f42bdb067340e777b1385"),
"study_id": ObjectId("5ea08bb17b2f150f29f4d952"),
},
"organ": {
"name": "organ",
"annotation_type": "group",
"values": ["UBERON_0001913"],
"study_file_id": ObjectId("600f42bdb067340e777b1385"),
"study_id": ObjectId("5ea08bb17b2f150f29f4d952"),
},
"disease": {
"name": "disease",
"annotation_type": "group",
"values": [
"MONDO_0005015|MONDO_0006849",
"MONDO_0005709",
"MONDO_0005015|MONDO_0005709",
"MONDO_0005015|MONDO_0006849|MONDO_0005709",
"MONDO_0000001",
],
"study_file_id": ObjectId("600f42bdb067340e777b1385"),
"study_id": ObjectId("5ea08bb17b2f150f29f4d952"),
},
"organism_age": {
"name": "organism_age",
"annotation_type": "numeric",
"values": [],
"study_file_id": ObjectId("600f42bdb067340e777b1385"),
"study_id": ObjectId("5ea08bb17b2f150f29f4d952"),
},
"ethnicity": {
"name": "ethnicity",
"annotation_type": "group",
"values": ["HANCESTRO_0005", "HANCESTRO_0005|HANCESTRO_0462"],
"study_file_id": ObjectId("600f42bdb067340e777b1385"),
"study_id": ObjectId("5ea08bb17b2f150f29f4d952"),
},
"disease__ontology_label": {
"name": "disease__ontology_label",
"annotation_type": "group",
"values": [
"diabetes mellitus|mastitis",
"common cold",
"diabetes mellitus|common cold",
"diabetes mellitus|mastitis|common cold",
"disease or disorder",
],
"study_file_id": ObjectId("600f42bdb067340e777b1385"),
"study_id": ObjectId("5ea08bb17b2f150f29f4d952"),
},
"cell_type": {
"name": "cell_type",
"annotation_type": "group",
"values": ["CL_0000066"],
"study_file_id": ObjectId("600f42bdb067340e777b1385"),
"study_id": ObjectId("5ea08bb17b2f150f29f4d952"),
},
"cell_type__ontology_label": {
"name": "cell_type__ontology_label",
"annotation_type": "group",
"values": ["epithelial cell"],
"study_file_id": ObjectId("600f42bdb067340e777b1385"),
"study_id": ObjectId("5ea08bb17b2f150f29f4d952"),
},
},
"data_arrays": {
"All Cells": {
"_id": ObjectId("600f4325e164652b111111a5"),
"name": "All Cells",
"cluster_name": "valid_array_v2.1.2.csv",
"array_type": "cells",
"array_index": 0,
"values": [
"BM01_16dpp_AAGCAGTGGTAT",
"BM01_16dpp_TAAGCAGTGGTA",
"BM01_16dpp_CTAAGCAGTGGT",
"BM01_16dpp_CGGTAAACCATT",
"BM01_16dpp_CCGAATTCACCG",
],
"subsample_threshold": None,
"subsample_annotation": None,
"linear_data_type": "Study",
"linear_data_id": ObjectId("5ea08bb17b2f150f29f4d952"),
"study_id": ObjectId("5ea08bb17b2f150f29f4d952"),
"study_file_id": ObjectId("600f42bdb067340e777b1385"),
},
"disease__time_since_onset": {
"_id": ObjectId("600f4325e164652b111111a7"),
"name": "disease__time_since_onset",
"cluster_name": "valid_array_v2.1.2.csv",
"array_type": "annotations",
"array_index": 0,
"values": ["12|2", "1", "24|2", "36|3|1", "0"],
"subsample_threshold": None,
"subsample_annotation": None,
"linear_data_type": "CellMetadatum",
"linear_data_id": ObjectId("600f4325e164652b111111a6"),
"study_id": ObjectId("5ea08bb17b2f150f29f4d952"),
"study_file_id": ObjectId("600f42bdb067340e777b1385"),
},
"disease__time_since_onset__unit": {
"_id": ObjectId("600f4325e164652b111111a9"),
"name": "disease__time_since_onset__unit",
"cluster_name": "valid_array_v2.1.2.csv",
"array_type": "annotations",
"array_index": 0,
"values": [
"UO_0000035",
"UO_0000035",
"UO_0000035",
"UO_0000035",
"UO_0000035",
],
"subsample_threshold": None,
"subsample_annotation": None,
"linear_data_type": "CellMetadatum",
"linear_data_id": ObjectId("600f4325e164652b111111a8"),
"study_id": ObjectId("5ea08bb17b2f150f29f4d952"),
"study_file_id": ObjectId("600f42bdb067340e777b1385"),
},
"organ_region": {
"_id": ObjectId("600f4325e164652b111111ab"),
"name": "organ_region",
"cluster_name": "valid_array_v2.1.2.csv",
"array_type": "annotations",
"array_index": 0,
"values": [
"MBA:000000944",
"MBA:000000302|MBA:000000294|MBA:000000795",
"MBA:000000714|MBA:000000972",
"MBA:000001041",
"MBA:000000909|MBA:000000502",
],
"subsample_threshold": None,
"subsample_annotation": None,
"linear_data_type": "CellMetadatum",
"linear_data_id": ObjectId("600f4325e164652b111111aa"),
"study_id": ObjectId("5ea08bb17b2f150f29f4d952"),
"study_file_id": ObjectId("600f42bdb067340e777b1385"),
},
"organ_region__ontology_label": {
"_id": ObjectId("600f4325e164652b111111ad"),
"name": "organ_region__ontology_label",
"cluster_name": "valid_array_v2.1.2.csv",
"array_type": "annotations",
"array_index": 0,
"values": [
"Folium-tuber vermis (VII)",
"Superior colliculus, sensory related|Superior colliculus, motor related|Periaqueductal gray",
"",
"Paraflocculus",
"Entorhinal area|Subiculum",
],
"subsample_threshold": None,
"subsample_annotation": None,
"linear_data_type": "CellMetadatum",
"linear_data_id": ObjectId("600f4325e164652b111111ac"),
"study_id": ObjectId("5ea08bb17b2f150f29f4d952"),
"study_file_id": ObjectId("600f42bdb067340e777b1385"),
},
"donor": {
"_id": ObjectId("600f4325e164652b111111af"),
"name": "donor",
"cluster_name": "valid_array_v2.1.2.csv",
"array_type": "annotations",
"array_index": 0,
"values": ["BM01", "BM01", "BM01", "BM01", "BM01"],
"subsample_threshold": None,
"subsample_annotation": None,
"linear_data_type": "CellMetadatum",
"linear_data_id": ObjectId("600f4325e164652b111111ae"),
"study_id": ObjectId("5ea08bb17b2f150f29f4d952"),
"study_file_id": ObjectId("600f42bdb067340e777b1385"),
},
"disease__treated": {
"_id": ObjectId("600f4325e164652b111111b1"),
"name": "disease__treated",
"cluster_name": "valid_array_v2.1.2.csv",
"array_type": "annotations",
"array_index": 0,
"values": [
"False|False",
"FALSE",
"True|False",
"True|False|False",
"FALSE",
],
"subsample_threshold": None,
"subsample_annotation": None,
"linear_data_type": "CellMetadatum",
"linear_data_id": ObjectId("600f4325e164652b111111b0"),
"study_id": ObjectId("5ea08bb17b2f150f29f4d952"),
"study_file_id": ObjectId("600f42bdb067340e777b1385"),
},
"species": {
"_id": ObjectId("600f4325e164652b111111b3"),
"name": "species",
"cluster_name": "valid_array_v2.1.2.csv",
"array_type": "annotations",
"array_index": 0,
"values": [
"NCBITaxon_9606",
"NCBITaxon_9606",
"NCBITaxon_9606",
"NCBITaxon_9606",
"NCBITaxon_9606",
],
"subsample_threshold": None,
"subsample_annotation": None,
"linear_data_type": "CellMetadatum",
"linear_data_id": ObjectId("600f4325e164652b111111b2"),
"study_id": ObjectId("5ea08bb17b2f150f29f4d952"),
"study_file_id": ObjectId("600f42bdb067340e777b1385"),
},
"species__ontology_label": {
"_id": ObjectId("600f4325e164652b111111b5"),
"name": "species__ontology_label",
"cluster_name": "valid_array_v2.1.2.csv",
"array_type": "annotations",
"array_index": 0,
"values": [
"Homo sapiens",
"Homo sapiens",
"Homo sapiens",
"Homo sapiens",
"Homo sapiens",
],
"subsample_threshold": None,
"subsample_annotation": None,
"linear_data_type": "CellMetadatum",
"linear_data_id": ObjectId("600f4325e164652b111111b4"),
"study_id": ObjectId("5ea08bb17b2f150f29f4d952"),
"study_file_id": ObjectId("600f42bdb067340e777b1385"),
},
"geographical_region": {
"_id": ObjectId("600f4325e164652b111111b7"),
"name": "geographical_region",
"cluster_name": "valid_array_v2.1.2.csv",
"array_type": "annotations",
"array_index": 0,
"values": [
"GAZ_00003181",
"GAZ_00003181",
"GAZ_00003181",
"GAZ_00003181",
"GAZ_00003181",
],
"subsample_threshold": None,
"subsample_annotation": None,
"linear_data_type": "CellMetadatum",
"linear_data_id": ObjectId("600f4325e164652b111111b6"),
"study_id": ObjectId("5ea08bb17b2f150f29f4d952"),
"study_file_id": ObjectId("600f42bdb067340e777b1385"),
},
"geographical_region__ontology_label": {
"_id": ObjectId("600f4325e164652b111111b9"),
"name": "geographical_region__ontology_label",
"cluster_name": "valid_array_v2.1.2.csv",
"array_type": "annotations",
"array_index": 0,
"values": ["Boston", "Boston", "Boston", "Boston", "Boston"],
"subsample_threshold": None,
"subsample_annotation": None,
"linear_data_type": "CellMetadatum",
"linear_data_id": ObjectId("600f4325e164652b111111b8"),
"study_id": ObjectId("5ea08bb17b2f150f29f4d952"),
"study_file_id": ObjectId("600f42bdb067340e777b1385"),
},
"library_preparation_protocol": {
"_id": ObjectId("600f4325e164652b111111bb"),
"name": "library_preparation_protocol",
"cluster_name": "valid_array_v2.1.2.csv",
"array_type": "annotations",
"array_index": 0,
"values": [
"EFO_0008919",
"EFO_0008919",
"EFO_0008919",
"EFO_0008919",
"EFO_0008919",
],
"subsample_threshold": None,
"subsample_annotation": None,
"linear_data_type": "CellMetadatum",
"linear_data_id": ObjectId("600f4325e164652b111111ba"),
"study_id": ObjectId("5ea08bb17b2f150f29f4d952"),
"study_file_id": ObjectId("600f42bdb067340e777b1385"),
},
"library_preparation_protocol__ontology_label": {
"_id": ObjectId("600f4325e164652b111111bd"),
"name": "library_preparation_protocol__ontology_label",
"cluster_name": "valid_array_v2.1.2.csv",
"array_type": "annotations",
"array_index": 0,
"values": ["Seq-Well", "Seq-Well", "Seq-Well", "Seq-Well", "Seq-Well"],
"subsample_threshold": None,
"subsample_annotation": None,
"linear_data_type": "CellMetadatum",
"linear_data_id": ObjectId("600f4325e164652b111111bc"),
"study_id": ObjectId("5ea08bb17b2f150f29f4d952"),
"study_file_id": ObjectId("600f42bdb067340e777b1385"),
},
"organ": {
"_id": ObjectId("600f4325e164652b111111bf"),
"name": "organ",
"cluster_name": "valid_array_v2.1.2.csv",
"array_type": "annotations",
"array_index": 0,
"values": [
"UBERON_0001913",
"UBERON_0001913",
"UBERON_0001913",
"UBERON_0001913",
"UBERON_0001913",
],
"subsample_threshold": None,
"subsample_annotation": None,
"linear_data_type": "CellMetadatum",
"linear_data_id": ObjectId("600f4325e164652b111111be"),
"study_id": ObjectId("5ea08bb17b2f150f29f4d952"),
"study_file_id": ObjectId("600f42bdb067340e777b1385"),
},
"organ__ontology_label": {
"_id": ObjectId("600f4325e164652b111111c1"),
"name": "organ__ontology_label",
"cluster_name": "valid_array_v2.1.2.csv",
"array_type": "annotations",
"array_index": 0,
"values": ["milk", "milk", "milk", "milk", "milk"],
"subsample_threshold": None,
"subsample_annotation": None,
"linear_data_type": "CellMetadatum",
"linear_data_id": ObjectId("600f4325e164652b111111c0"),
"study_id": ObjectId("5ea08bb17b2f150f29f4d952"),
"study_file_id": ObjectId("600f42bdb067340e777b1385"),
},
"sex": {
"_id": ObjectId("600f4325e164652b111111c3"),
"name": "sex",
"cluster_name": "valid_array_v2.1.2.csv",
"array_type": "annotations",
"array_index": 0,
"values": ["female", "female", "female", "female", "female"],
"subsample_threshold": None,
"subsample_annotation": None,
"linear_data_type": "CellMetadatum",
"linear_data_id": ObjectId("600f4325e164652b111111c2"),
"study_id": ObjectId("5ea08bb17b2f150f29f4d952"),
"study_file_id": ObjectId("600f42bdb067340e777b1385"),
},
"is_living": {
"_id": ObjectId("600f4325e164652b111111c5"),
"name": "is_living",
"cluster_name": "valid_array_v2.1.2.csv",
"array_type": "annotations",
"array_index": 0,
"values": ["yes", "yes", "yes", "yes", "yes"],
"subsample_threshold": None,
"subsample_annotation": None,
"linear_data_type": "CellMetadatum",
"linear_data_id": ObjectId("600f4325e164652b111111c4"),
"study_id": ObjectId("5ea08bb17b2f150f29f4d952"),
"study_file_id": ObjectId("600f42bdb067340e777b1385"),
},
"organism_age__unit": {
"_id": ObjectId("600f4325e164652b111111c7"),
"name": "organism_age__unit",
"cluster_name": "valid_array_v2.1.2.csv",
"array_type": "annotations",
"array_index": 0,
"values": [
"UO_0000036",
"UO_0000036",
"UO_0000036",
"UO_0000036",
"UO_0000036",
],
"subsample_threshold": None,
"subsample_annotation": None,
"linear_data_type": "CellMetadatum",
"linear_data_id": ObjectId("600f4325e164652b111111c6"),
"study_id": ObjectId("5ea08bb17b2f150f29f4d952"),
"study_file_id": ObjectId("600f42bdb067340e777b1385"),
},
"organism_age__unit_label": {
"_id": ObjectId("600f4325e164652b111111c9"),
"name": "organism_age__unit_label",
"cluster_name": "valid_array_v2.1.2.csv",
"array_type": "annotations",
"array_index": 0,
"values": ["year", "year", "year", "year", "year"],
"subsample_threshold": None,
"subsample_annotation": None,
"linear_data_type": "CellMetadatum",
"linear_data_id": ObjectId("600f4325e164652b111111c8"),
"study_id": ObjectId("5ea08bb17b2f150f29f4d952"),
"study_file_id": ObjectId("600f42bdb067340e777b1385"),
},
"ethnicity__ontology_label": {
"_id": ObjectId("600f4325e164652b111111cb"),
"name": "ethnicity__ontology_label",
"cluster_name": "valid_array_v2.1.2.csv",
"array_type": "annotations",
"array_index": 0,
"values": ["European", "European", "European|British", "", "European"],
"subsample_threshold": None,
"subsample_annotation": None,
"linear_data_type": "CellMetadatum",
"linear_data_id": ObjectId("600f4325e164652b111111ca"),
"study_id": ObjectId("5ea08bb17b2f150f29f4d952"),
"study_file_id": ObjectId("600f42bdb067340e777b1385"),
},
},
}
| 42.123177 | 110 | 0.542016 | 1,951 | 25,990 | 6.846745 | 0.093798 | 0.110795 | 0.139991 | 0.076808 | 0.803938 | 0.767405 | 0.737311 | 0.725034 | 0.694266 | 0.628462 | 0 | 0.182607 | 0.321316 | 25,990 | 616 | 111 | 42.191558 | 0.574692 | 0 | 0 | 0.718699 | 0 | 0 | 0.494075 | 0.222239 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.001626 | 0 | 0.001626 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
0a174e5e50bb10c04155d6639813256f9418a7fe | 75 | py | Python | fairseq_src/examples/summeration_rdrop/summeration_rdrop_src/__init__.py | truebluejason/R-Drop | 4e027c1f58a9b5e7ca9330de78bfdc5ee4af408e | [
"MIT"
] | null | null | null | fairseq_src/examples/summeration_rdrop/summeration_rdrop_src/__init__.py | truebluejason/R-Drop | 4e027c1f58a9b5e7ca9330de78bfdc5ee4af408e | [
"MIT"
] | null | null | null | fairseq_src/examples/summeration_rdrop/summeration_rdrop_src/__init__.py | truebluejason/R-Drop | 4e027c1f58a9b5e7ca9330de78bfdc5ee4af408e | [
"MIT"
] | null | null | null | from . import rdrop_translation
from .loss import rdrop_cross_entropy_loss
| 25 | 42 | 0.866667 | 11 | 75 | 5.545455 | 0.636364 | 0.360656 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.106667 | 75 | 2 | 43 | 37.5 | 0.910448 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
0a1d0cc4d67e6c13e89dd74f0d389c63a8cdc490 | 13,068 | py | Python | suites/API/DatabaseApi/AuthorityValidation/GetRequiredSignatures.py | echoprotocol/pytests | 5dce698558c2ba703aea03aab79906af1437da5d | [
"MIT"
] | 1 | 2021-03-12T05:17:02.000Z | 2021-03-12T05:17:02.000Z | suites/API/DatabaseApi/AuthorityValidation/GetRequiredSignatures.py | echoprotocol/pytests | 5dce698558c2ba703aea03aab79906af1437da5d | [
"MIT"
] | 1 | 2019-11-19T12:10:59.000Z | 2019-11-19T12:10:59.000Z | suites/API/DatabaseApi/AuthorityValidation/GetRequiredSignatures.py | echoprotocol/pytests | 5dce698558c2ba703aea03aab79906af1437da5d | [
"MIT"
] | 2 | 2019-04-29T10:46:48.000Z | 2019-10-29T10:01:03.000Z | # -*- coding: utf-8 -*-
from common.base_test import BaseTest
import lemoncheesecake.api as lcc
from lemoncheesecake.matching import equal_to, has_item, require_that
SUITE = {
"description": "Method 'get_required_signatures'"
}
@lcc.prop("main", "type")
@lcc.prop("positive", "type")
@lcc.tags("api", "database_api", "database_api_authority_validation", "get_required_signatures")
@lcc.suite("Check work of method 'get_required_signatures'", rank=1)
class GetRequiredSignatures(BaseTest):
def __init__(self):
super().__init__()
self.__database_api_identifier = None
self.__registration_api_identifier = None
self.echo_acc0 = None
self.echo_acc1 = None
def get_account_info(self, account_id):
response_id = self.send_request(
self.get_request("get_accounts", [[account_id]]), self.__database_api_identifier
)
return self.get_response(response_id)["result"][0]
def setup_suite(self):
super().setup_suite()
self._connect_to_echopy_lib()
lcc.set_step("Setup for {}".format(self.__class__.__name__))
self.__database_api_identifier = self.get_identifier("database")
self.__registration_api_identifier = self.get_identifier("registration")
lcc.log_info(
"API identifiers are: database='{}', registration='{}'".format(
self.__database_api_identifier, self.__registration_api_identifier
)
)
self.echo_acc0 = self.get_account_id(
self.accounts[0], self.__database_api_identifier, self.__registration_api_identifier
)
self.echo_acc1 = self.get_account_id(
self.accounts[1], self.__database_api_identifier, self.__registration_api_identifier
)
lcc.log_info("Echo accounts are: #1='{}', #2='{}'".format(self.echo_acc0, self.echo_acc1))
def teardown_suite(self):
self._disconnect_to_echopy_lib()
super().teardown_suite()
@lcc.test("Simple work of method 'get_required_signatures'")
def method_main_check(self):
lcc.set_step("Get account active keys")
account_info = self.get_account_info(self.echo_acc0)
lcc.log_info("Active keys of account {} were taken".format(self.echo_acc0))
lcc.set_step("Build transfer transaction")
transfer_operation = self.echo_ops.get_transfer_operation(
echo=self.echo, from_account_id=self.echo_acc0, to_account_id=self.echo_acc1
)
collected_operation = self.collect_operations(transfer_operation, self.__database_api_identifier)
signed_tx = self.echo_ops.broadcast(echo=self.echo, list_operations=collected_operation, no_broadcast=True)
del signed_tx["signatures"]
lcc.log_info("Transaction was built")
expected_keys = [account_info['active']["key_auths"][0][0]]
lcc.set_step("Get potential signatures for built transaction")
response_id = self.send_request(
self.get_request("get_potential_signatures", [signed_tx]), self.__database_api_identifier
)
response = self.get_response(response_id)
lcc.log_info("Call 'get_potential_signatures' method for built transaction")
lcc.set_step("Check 'get_potential_signatures' method result")
require_that("potential keys", response["result"], equal_to(expected_keys), quiet=True)
lcc.set_step("Get required signatures for bulded transaction")
response_id = self.send_request(
self.get_request("get_required_signatures", [signed_tx, expected_keys]), self.__database_api_identifier
)
response = self.get_response(response_id)
lcc.log_info(
"Call 'get_required_signatures' method for built transaction and "
"keys from 'get_potential_signatures' method"
)
lcc.set_step("Check 'get_required_signatures' method result")
require_that("required keys", response["result"], equal_to(expected_keys), quiet=True)
@lcc.prop("positive", "type")
@lcc.tags("api", "database_api", "database_api_authority_validation", "get_required_signatures")
@lcc.suite("Positive testing of method 'get_required_signatures'", rank=2)
class PositiveTesting(BaseTest):
def __init__(self):
super().__init__()
self.__database_api_identifier = None
self.__registration_api_identifier = None
self.echo_acc0 = None
self.echo_acc5 = None
self.echo_acc6 = None
self.reserved_public_key = None
def get_account_info(self, account_id):
response_id = self.send_request(
self.get_request("get_accounts", [[account_id]]), self.__database_api_identifier
)
return self.get_response(response_id)["result"][0]
def setup_suite(self):
super().setup_suite()
self._connect_to_echopy_lib()
lcc.set_step("Setup for {}".format(self.__class__.__name__))
self.__database_api_identifier = self.get_identifier("database")
self.__registration_api_identifier = self.get_identifier("registration")
lcc.log_info(
"API identifiers are: database='{}', registration='{}'".format(
self.__database_api_identifier, self.__registration_api_identifier
)
)
self.echo_acc0 = self.get_account_id(
self.accounts[0], self.__database_api_identifier, self.__registration_api_identifier
)
self.echo_acc5 = self.get_account_id(
self.accounts[5], self.__database_api_identifier, self.__registration_api_identifier
)
self.echo_acc6 = self.get_account_id(
self.accounts[6], self.__database_api_identifier, self.__registration_api_identifier
)
self.echo_acc7 = self.get_account_id(
self.accounts[7], self.__database_api_identifier, self.__registration_api_identifier
)
lcc.log_info(
"Echo accounts are: #1='{}', #2='{}', #3='{}', #4='{}'".format(
self.echo_acc0, self.echo_acc5, self.echo_acc6, self.echo_acc7
)
)
self.reserved_public_key = self.get_reserved_public_key()
lcc.log_info("Reserved public key: {}".format(self.reserved_public_key))
def teardown_suite(self):
self._disconnect_to_echopy_lib()
super().teardown_suite()
@lcc.test("Add additional account_auths and change weight_threshold to account and get required signatures for it")
@lcc.depends_on("API.DatabaseApi.AuthorityValidation.GetRequiredSignatures.GetRequiredSignatures.method_main_check")
def get_potential_signatures_of_accounts_with_additional_account_auths(self):
lcc.set_step("Get account active keys")
account_info_1 = self.get_account_info(self.echo_acc5)
account_active_keys_1 = account_info_1["active"]
lcc.log_info("Active keys of account {} were taken".format(self.echo_acc5))
lcc.set_step("Get account active keys")
account_info_2 = self.get_account_info(self.echo_acc6)
account_active_keys_2 = account_info_2["active"]
lcc.log_info("Active keys of account {} were taken".format(self.echo_acc6))
lcc.set_step("Update info of '{}' account (add account_auths)".format(self.echo_acc6))
account_auths = [account_auth[0] for account_auth in account_active_keys_2["account_auths"]]
account_auths_new_item = [self.echo_acc5, 2]
if self.echo_acc5 not in account_auths:
new_active_keys = account_active_keys_2.copy()
new_active_keys["account_auths"].extend([account_auths_new_item])
new_active_keys["weight_threshold"] = 2
account_info_2["active"] = new_active_keys
self.utils.perform_account_update_operation(
self, self.echo_acc6, account_info_2, self.__database_api_identifier
)
lcc.log_info("'account_auths' of '{}' account was updated".format(self.echo_acc6))
lcc.set_step("Get active keys info about account")
actual_account_info_2 = self.get_account_info(self.echo_acc6)
actual_account_active_keys_2 = actual_account_info_2["active"]
require_that(
"new keys", actual_account_active_keys_2["account_auths"], has_item(account_auths_new_item), quiet=True
)
lcc.set_step("Build transfer transaction")
transfer_operation = self.echo_ops.get_transfer_operation(
echo=self.echo, from_account_id=self.echo_acc6, to_account_id=self.echo_acc5
)
collected_operation = self.collect_operations(transfer_operation, self.__database_api_identifier)
signed_tx = self.echo_ops.broadcast(echo=self.echo, list_operations=collected_operation, no_broadcast=True)
del signed_tx["signatures"]
lcc.log_info("Transaction was built")
expected_keys = [account_active_keys_1["key_auths"][0][0]]
lcc.set_step("Get potential signatures for builded transaction")
response_id = self.send_request(
self.get_request("get_potential_signatures", [signed_tx]), self.__database_api_identifier
)
potential_keys = self.get_response(response_id)["result"]
lcc.log_info("Call 'get_potential_signatures' method for builded transaction")
lcc.set_step("Get required signatures for builded transaction with pontential keys")
response_id = self.send_request(
self.get_request("get_required_signatures", [signed_tx, potential_keys]), self.__database_api_identifier
)
response = self.get_response(response_id)
lcc.log_info("Call 'get_required_signatures' method for builded transaction with potential keys")
lcc.set_step("Check 'get_required_signatures' method result")
require_that("required keys", response["result"], equal_to(expected_keys), quiet=True)
@lcc.test("Add additional key_auths and change weight_threshold to account and get required signatures for it")
@lcc.depends_on("API.DatabaseApi.AuthorityValidation.GetRequiredSignatures.GetRequiredSignatures.method_main_check")
def get_potential_signatures_of_accounts_with_additional_key_auths(self):
lcc.set_step("Get account active keys")
account_info = self.get_account_info(self.echo_acc7)
account_active_keys = account_info["active"]
lcc.log_info("Active keys of account {} were taken".format(self.echo_acc7))
lcc.set_step("Update info of '{}' account (add key_auths)".format(self.echo_acc7))
key_auths = [key_auth[0] for key_auth in account_active_keys["key_auths"]]
key_auths_new_item = [self.reserved_public_key, 2]
if self.reserved_public_key not in key_auths:
new_active_keys = account_active_keys.copy()
new_active_keys["key_auths"].extend([key_auths_new_item])
new_active_keys["weight_threshold"] = 2
account_info["active"] = new_active_keys
self.utils.perform_account_update_operation(
self, self.echo_acc7, account_info, self.__database_api_identifier
)
lcc.log_info("'key_auths' of '{}' account was updated".format(self.echo_acc7))
lcc.set_step("Get active keys info about account")
actual_account_info = self.get_account_info(self.echo_acc7)
actual_account_active_keys = actual_account_info["active"]
require_that("new keys", actual_account_active_keys["key_auths"], has_item(key_auths_new_item), quiet=True)
lcc.set_step("Build transfer transaction")
transfer_operation = self.echo_ops.get_transfer_operation(
echo=self.echo, from_account_id=self.echo_acc7, to_account_id=self.echo_acc0
)
collected_operation = self.collect_operations(transfer_operation, self.__database_api_identifier)
signed_tx = self.echo_ops.broadcast(echo=self.echo, list_operations=collected_operation, no_broadcast=True)
del signed_tx["signatures"]
lcc.log_info("Transaction was built")
expected_keys = [self.reserved_public_key]
lcc.set_step("Get potential signatures for builded transaction")
response_id = self.send_request(
self.get_request("get_potential_signatures", [signed_tx]), self.__database_api_identifier
)
potential_keys = self.get_response(response_id)["result"]
lcc.log_info("Call 'get_potential_signatures' method for builded transaction")
lcc.set_step("Get required signatures for builded transaction with pontential keys")
response_id = self.send_request(
self.get_request("get_required_signatures", [signed_tx, potential_keys]), self.__database_api_identifier
)
response = self.get_response(response_id)
lcc.log_info("Call 'get_required_signatures' method for builded transaction with potential keys")
lcc.set_step("Check 'get_required_signatures' method result")
require_that("required keys", response["result"], equal_to(expected_keys), quiet=True)
| 49.313208 | 120 | 0.70225 | 1,641 | 13,068 | 5.182206 | 0.087751 | 0.049859 | 0.044097 | 0.073495 | 0.86477 | 0.832314 | 0.792803 | 0.770814 | 0.754351 | 0.732479 | 0 | 0.007918 | 0.197888 | 13,068 | 264 | 121 | 49.5 | 0.803377 | 0.001607 | 0 | 0.504464 | 0 | 0 | 0.236719 | 0.063396 | 0 | 0 | 0 | 0 | 0 | 1 | 0.049107 | false | 0 | 0.013393 | 0 | 0.080357 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
6abf21d096909ca0f3e4b9b41072836660109e7b | 107 | py | Python | bqskit/bqskit/synthesis.py | BQSKit/qfast | 06df0c7439ae096af2d1fa3e97b44512618f5e4a | [
"BSD-3-Clause-LBNL"
] | 12 | 2020-09-23T17:43:17.000Z | 2022-01-17T18:23:11.000Z | bqskit/bqskit/synthesis.py | edyounis/qfast | 06df0c7439ae096af2d1fa3e97b44512618f5e4a | [
"BSD-3-Clause-LBNL"
] | 3 | 2020-09-26T00:46:55.000Z | 2021-03-15T17:52:54.000Z | bqskit/bqskit/synthesis.py | BQSKit/qfast | 06df0c7439ae096af2d1fa3e97b44512618f5e4a | [
"BSD-3-Clause-LBNL"
] | 2 | 2021-05-31T05:29:20.000Z | 2021-12-06T13:18:22.000Z | import qfast
def synthesize_for_qiskit ( utry, **kwargs ):
return qfast.synthesize( utry, **kwargs )
| 17.833333 | 45 | 0.71028 | 13 | 107 | 5.692308 | 0.692308 | 0.27027 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17757 | 107 | 5 | 46 | 21.4 | 0.840909 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.333333 | false | 0 | 0.333333 | 0.333333 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 6 |
6acc40101a27b52b175bff903b9d372eec1a1ca9 | 40 | py | Python | pomaex/scrappers/__init__.py | rdlu/pomaex | 82aa746ad2d7078b2268da2d871631584df73a64 | [
"MIT"
] | null | null | null | pomaex/scrappers/__init__.py | rdlu/pomaex | 82aa746ad2d7078b2268da2d871631584df73a64 | [
"MIT"
] | null | null | null | pomaex/scrappers/__init__.py | rdlu/pomaex | 82aa746ad2d7078b2268da2d871631584df73a64 | [
"MIT"
] | null | null | null | from .alpha_vantage import AlphaVantage
| 20 | 39 | 0.875 | 5 | 40 | 6.8 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.1 | 40 | 1 | 40 | 40 | 0.944444 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
0a8d8e4dc9c06ea2c8a3391905ab97fbebc260ad | 174 | py | Python | office365/sharepoint/clientsidecomponent/storage_entity.py | theodoriss/Office365-REST-Python-Client | 3bd7a62dadcd3f0a0aceeaff7584fff3fd44886e | [
"MIT"
] | null | null | null | office365/sharepoint/clientsidecomponent/storage_entity.py | theodoriss/Office365-REST-Python-Client | 3bd7a62dadcd3f0a0aceeaff7584fff3fd44886e | [
"MIT"
] | null | null | null | office365/sharepoint/clientsidecomponent/storage_entity.py | theodoriss/Office365-REST-Python-Client | 3bd7a62dadcd3f0a0aceeaff7584fff3fd44886e | [
"MIT"
] | null | null | null | from office365.sharepoint.base_entity import BaseEntity
class StorageEntity(BaseEntity):
"""Storage entities which are available across app catalog scopes."""
pass
| 24.857143 | 73 | 0.781609 | 20 | 174 | 6.75 | 0.95 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.02027 | 0.149425 | 174 | 6 | 74 | 29 | 0.891892 | 0.362069 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0.333333 | 0.333333 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 6 |
0a9e2cee41a9cfcf32a0023cf32d04dd55026e53 | 232,249 | py | Python | ibeis/other/detectfuncs.py | brmscheiner/ibeis | 9bb93a6cd74ac47921e734c80917a38609dfe661 | [
"Apache-2.0"
] | null | null | null | ibeis/other/detectfuncs.py | brmscheiner/ibeis | 9bb93a6cd74ac47921e734c80917a38609dfe661 | [
"Apache-2.0"
] | null | null | null | ibeis/other/detectfuncs.py | brmscheiner/ibeis | 9bb93a6cd74ac47921e734c80917a38609dfe661 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""Developer convenience functions for ibs (detections).
TODO: need to split up into sub modules:
consistency_checks
feasibility_fixes
move the export stuff to dbio
then there are also convineience functions that need to be ordered at least
within this file
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from six.moves import zip, range
from os.path import expanduser, join, abspath
import numpy as np
import vtool as vt
import utool as ut
import cv2
from ibeis.control import controller_inject
import tqdm
# Inject utool functions
(print, rrr, profile) = ut.inject2(__name__, '[other.detectfuncs]')
SAMPLES = 1000
AP_SAMPLE_POINTS = [_ / float(SAMPLES) for _ in range(0, SAMPLES + 1)]
# Must import class before injection
CLASS_INJECT_KEY, register_ibs_method = (
controller_inject.make_ibs_register_decorator(__name__))
def _resize(image, t_width=None, t_height=None, verbose=False):
if verbose:
print('RESIZING WITH t_width = %r and t_height = %r' % (t_width, t_height, ))
height, width = image.shape[:2]
if t_width is None and t_height is None:
return image
elif t_width is not None and t_height is not None:
pass
elif t_width is None:
t_width = (width / height) * float(t_height)
elif t_height is None:
t_height = (height / width) * float(t_width)
t_width, t_height = float(t_width), float(t_height)
t_width, t_height = int(np.around(t_width)), int(np.around(t_height))
assert t_width > 0 and t_height > 0, 'target size too small'
assert t_width <= width * 10 and t_height <= height * 10, 'target size too large (capped at 1000%)'
# interpolation = cv2.INTER_LANCZOS4
interpolation = cv2.INTER_LINEAR
return cv2.resize(image, (t_width, t_height), interpolation=interpolation)
def simple_code(label):
from ibeis.constants import YAWALIAS, SPECIES_MAPPING
if label == 'ignore':
return 'IGNORE'
for key in SPECIES_MAPPING:
if key in label:
species_code, species_nice = SPECIES_MAPPING[key]
while species_code is None:
species_code, species_nice = SPECIES_MAPPING[species_nice]
assert species_code is not None
label = label.replace(key, species_code)
for key in sorted(YAWALIAS.keys(), key=len, reverse=True):
value = YAWALIAS[key]
label = label.replace(key, value)
return label
##########################################################################################
def general_precision_recall_algo(ibs, label_list, confidence_list, category='positive', samples=SAMPLES, **kwargs):
def errors(zipped, conf, category):
tp, tn, fp, fn = 0.0, 0.0, 0.0, 0.0
for index, (label, confidence) in enumerate(zipped):
if label == category:
if conf <= confidence:
tp += 1
else:
fn += 1
else:
if conf <= confidence:
fp += 1
else:
tn += 1
return tp, tn, fp, fn
zipped = list(zip(label_list, confidence_list))
conf_list = [ _ / float(samples) for _ in range(0, int(samples) + 1) ]
conf_dict = {}
for conf in conf_list:
conf_dict[conf] = errors(zipped, conf, category)
conf_list_ = [-1.0, -1.0]
pr_list = [1.0, 0.0]
re_list = [0.0, 1.0]
tpr_list = [0.0, 1.0]
fpr_list = [0.0, 1.0]
# conf_list_ = []
# pr_list = []
# re_list = []
# tpr_list = []
# fpr_list = []
for conf in sorted(conf_dict.keys(), reverse=True):
error_list = conf_dict[conf]
tp, tn, fp, fn = error_list
try:
pr = tp / (tp + fp)
re = tp / (tp + fn)
tpr = tp / (tp + fn)
fpr = fp / (fp + tn)
conf_list_.append(conf)
pr_list.append(pr)
re_list.append(re)
tpr_list.append(tpr)
fpr_list.append(fpr)
except ZeroDivisionError:
print('Zero division error (%r) - tp: %r tn: %r fp: %r fn: %r' % (conf, tp, tn, fp, fn, ))
return conf_list_, pr_list, re_list, tpr_list, fpr_list
def general_interpolate_precision_recall(conf_list, re_list, pr_list):
conf_list_, re_list_, pr_list_ = [], [], []
zipped = zip(re_list, conf_list, pr_list)
zipped = sorted(zipped, reverse=True)
max_pr = None
for re, conf, pr in zipped:
if max_pr is None or pr > max_pr:
if max_pr is not None:
conf_list_.append(np.nan)
re_list_.append(re)
pr_list_.append(max_pr)
max_pr = pr
if pr < max_pr:
pr = max_pr
conf_list_.append(conf)
re_list_.append(re)
pr_list_.append(pr)
return conf_list_, re_list_, pr_list_
def general_identify_operating_point(conf_list, x_list, y_list, target=(1.0, 1.0)):
best_length = np.inf
best_conf_list = []
best_x_list = []
best_y_list = []
tx, ty = target
for conf, x, y in sorted(zip(conf_list, x_list, y_list)):
x_ = x
y_ = y
x_ = (x_ - tx)
y_ = (y_ - ty)
length = np.sqrt(x_ * x_ + y_ * y_)
if length < best_length:
best_length = length
best_conf_list = [conf]
best_x_list = [x]
best_y_list = [y]
elif length == best_length:
flag_list = [
abs(best_conf - conf) > 0.01
for best_conf in best_conf_list
]
if False in flag_list:
continue
best_conf_list.append(conf)
best_x_list.append(x)
best_y_list.append(y)
return best_conf_list, best_x_list, best_y_list, best_length
def general_area_best_conf(conf_list, x_list, y_list, label='Unknown', color='b',
marker='o', plot_point=True, interpolate=True,
target=(1.0, 1.0), target_recall=None, **kwargs):
import matplotlib.pyplot as plt
zipped = list(sorted(zip(x_list, y_list, conf_list)))
x_list = [_[0] for _ in zipped]
y_list = [_[1] for _ in zipped]
conf_list = [_[2] for _ in zipped]
if interpolate:
conf_list, x_list, y_list = general_interpolate_precision_recall(
conf_list,
x_list,
y_list
)
if interpolate:
ap_list = []
for AP_POINT in AP_SAMPLE_POINTS:
for re, pr in sorted(zip(x_list, y_list)):
if AP_POINT <= re:
ap_list.append(pr)
break
ap = sum(ap_list) / len(ap_list)
else:
ap = np.trapz(y_list, x=x_list)
tup1 = general_identify_operating_point(conf_list, x_list, y_list, target=target)
best_conf_list, best_x_list, best_y_list, best_length = tup1
tup2 = None
if target_recall is not None:
for x, y, conf in sorted(zip(x_list, y_list, conf_list)):
if target_recall <= x and not np.isnan(conf):
tup2 = [conf], [x], [y], None
break
if len(best_conf_list) > 1:
print('WARNING: Multiple best operating points found %r' % (best_conf_list, ))
assert len(best_conf_list) > 0
best_conf = best_conf_list[0]
if interpolate:
# label = '%s [AP = %0.02f, OP = %0.02f]' % (label, ap * 100.0, best_conf)
label = '%s [AP = %0.02f]' % (label, ap * 100.0)
else:
label = '%s [AUC = %0.02f]' % (label, ap * 100.0, )
linestyle = '--' if kwargs.get('line_dotted', False) else '-'
plt.plot(x_list, y_list, color=color, linestyle=linestyle, label=label)
if plot_point:
plt.plot(best_x_list, best_y_list, color=color, marker=marker)
return ap, best_conf, tup1, tup2
def general_confusion_matrix_algo(label_correct_list, label_predict_list,
category_list, category_mapping,
fig_, axes_, fuzzy_dict=None, conf=None,
conf_list=None, size=10, **kwargs):
# import matplotlib.colors as colors
import matplotlib.pyplot as plt
suppressed_label = 'SUP'
if conf is not None:
assert conf_list is not None
category_list.append(suppressed_label)
index = len(category_list) - 1
category_mapping[suppressed_label] = index
if fuzzy_dict is not None:
fuzzy_dict[index] = set([])
if category_mapping is not None:
index_list = [category_mapping[category] for category in category_list]
zipped = list(sorted(zip(index_list, category_list)))
category_list = [_[1] for _ in zipped]
# Get the number of categories
num_categories = len(category_list)
# Build the confusion matrix
confusion_matrix = np.zeros((num_categories, num_categories))
zipped = zip(label_correct_list, label_predict_list)
suppressed = 0.0
suppressed_correct = 0.0
suppressed_fuzzy = 0.0
for index, (label_correct, label_predict) in enumerate(zipped):
if conf is not None:
conf_ = conf_list[index]
if conf_ < conf:
if label_correct != label_predict:
suppressed_correct += 1
if fuzzy_dict is not None:
x = category_mapping[label_correct]
y = category_mapping[label_predict]
if not (y in fuzzy_dict[x] or x in fuzzy_dict[y]):
suppressed_fuzzy += 1
label_predict = suppressed_label
suppressed += 1
# Perform any mapping that needs to be done
correct_ = category_mapping[label_correct]
predict_ = category_mapping[label_predict]
# Add to the confidence matrix
confusion_matrix[correct_][predict_] += 1
# Normalize the confusion matrix using the rows
row_normalizer = np.sum(confusion_matrix, axis=1)
confusion_normalized = np.array((confusion_matrix.T / row_normalizer).T)
# Draw the confusion matrix
res = axes_.imshow(confusion_normalized, cmap=plt.cm.jet,
interpolation='nearest')
correct = suppressed_correct
fuzzy = suppressed_fuzzy
total = 0.0
for x in range(num_categories):
for y in range(num_categories):
number = int(confusion_matrix[x][y])
if x == y:
correct += number
if fuzzy_dict is not None and (y in fuzzy_dict[x] or x in fuzzy_dict[y]):
fuzzy += number
total += number
axes_.annotate(
str(number), xy=(y, x),
horizontalalignment='center',
verticalalignment='center',
size=size,
)
cb = fig_.colorbar(res) # NOQA
cb.set_clim(0.0, 1.0)
plt.xticks(np.arange(num_categories), category_list, rotation=90)
plt.yticks(np.arange(num_categories), category_list)
margin_small = 0.1
margin_large = 0.9
plt.subplots_adjust(
left=margin_small,
right=margin_large,
bottom=margin_small,
top=margin_large
)
correct_rate = correct / total
fuzzy_rate = fuzzy / total
return correct_rate, fuzzy_rate
def general_intersection_over_union(bbox1, bbox2):
intersection_xtl = max(bbox1['xtl'], bbox2['xtl'])
intersection_ytl = max(bbox1['ytl'], bbox2['ytl'])
intersection_xbr = min(bbox1['xbr'], bbox2['xbr'])
intersection_ybr = min(bbox1['ybr'], bbox2['ybr'])
intersection_w = intersection_xbr - intersection_xtl
intersection_h = intersection_ybr - intersection_ytl
if intersection_w <= 0 or intersection_h <= 0:
return 0.0
intersection = intersection_w * intersection_h
union = (bbox1['width'] * bbox1['height']) + (bbox2['width'] * bbox2['height']) - intersection
return intersection / union
def general_overlap(gt_list, pred_list):
overlap = np.zeros((len(gt_list), len(pred_list)), dtype=np.float32)
for i, gt in enumerate(gt_list):
for j, pred in enumerate(pred_list):
overlap[i, j] = general_intersection_over_union(gt, pred)
return overlap
def general_tp_fp_fn(gt_list, pred_list, min_overlap, **kwargs):
overlap = general_overlap(gt_list, pred_list)
num_gt, num_pred = overlap.shape
if num_gt == 0:
tp = 0.0
fp = num_pred
fn = 0.0
elif num_pred == 0:
tp = 0.0
fp = 0.0
fn = num_gt
else:
pred_index_list = range(num_pred)
gt_index_list = np.argmax(overlap, axis=0)
max_overlap_list = np.max(overlap, axis=0)
confidence_list = [
pred.get('confidence', None)
for pred in pred_list
]
assert None not in confidence_list
zipped = zip(
confidence_list,
max_overlap_list,
pred_index_list,
gt_index_list
)
pred_conf_list = [
(
confidence,
max_overlap,
pred_index,
gt_index,
)
for confidence, max_overlap, pred_index, gt_index in zipped
]
pred_conf_list = sorted(pred_conf_list, reverse=True)
assignment_dict = {}
for pred_conf, max_overlap, pred_index, gt_index in pred_conf_list:
if max_overlap > min_overlap:
if gt_index not in assignment_dict:
assignment_dict[gt_index] = pred_index
tp = len(assignment_dict.keys())
fp = num_pred - tp
fn = num_gt - tp
assert tp >= 0
assert fp >= 0
assert fn >= 0
return tp, fp, fn
def general_get_imageset_gids(ibs, imageset_text, unique=True, **kwargs):
imageset_id = ibs.get_imageset_imgsetids_from_text(imageset_text)
test_gid_list = ibs.get_imageset_gids(imageset_id)
if unique:
test_gid_list = list(set(test_gid_list))
return test_gid_list
def general_parse_gt_annots(ibs, aid_list, include_parts=True, species_mapping={},
**kwargs):
gid_list = ibs.get_annot_gids(aid_list)
species_set = set([])
gt_list = []
for gid, aid in zip(gid_list, aid_list):
width, height = ibs.get_image_sizes(gid)
bbox = ibs.get_annot_bboxes(aid)
theta = ibs.get_annot_thetas(aid)
# Transformation matrix
R = vt.rotation_around_bbox_mat3x3(theta, bbox)
# Get verticies of the annotation polygon
verts = vt.verts_from_bbox(bbox, close=True)
# Rotate and transform vertices
xyz_pts = vt.add_homogenous_coordinate(np.array(verts).T)
trans_pts = vt.remove_homogenous_coordinate(R.dot(xyz_pts))
new_verts = np.round(trans_pts).astype(np.int).T.tolist()
x_points = [pt[0] for pt in new_verts]
y_points = [pt[1] for pt in new_verts]
xtl = int(min(x_points))
xbr = int(max(x_points))
ytl = int(min(y_points))
ybr = int(max(y_points))
bbox = (xtl, ytl, xbr - xtl, ybr - ytl)
species = ibs.get_annot_species_texts(aid)
viewpoint = ibs.get_annot_viewpoints(aid)
interest = ibs.get_annot_interest(aid)
temp = {
'gid' : gid,
'aid' : aid,
'xtl' : bbox[0] / width,
'ytl' : bbox[1] / height,
'xbr' : (bbox[0] + bbox[2]) / width,
'ybr' : (bbox[1] + bbox[3]) / height,
'width' : bbox[2] / width,
'height' : bbox[3] / height,
'class' : species_mapping.get(species, species),
'viewpoint' : viewpoint,
'interest' : interest,
'confidence' : 1.0,
}
species_set.add(temp['class'])
gt_list.append(temp)
part_rowid_list = ibs.get_annot_part_rowids(aid)
if include_parts:
for part_rowid in part_rowid_list:
bbox = ibs.get_part_bboxes(part_rowid)
theta = ibs.get_part_thetas(part_rowid)
# Transformation matrix
R = vt.rotation_around_bbox_mat3x3(theta, bbox)
# Get verticies of the annotation polygon
verts = vt.verts_from_bbox(bbox, close=True)
# Rotate and transform vertices
xyz_pts = vt.add_homogenous_coordinate(np.array(verts).T)
trans_pts = vt.remove_homogenous_coordinate(R.dot(xyz_pts))
new_verts = np.round(trans_pts).astype(np.int).T.tolist()
x_points = [pt[0] for pt in new_verts]
y_points = [pt[1] for pt in new_verts]
xtl = int(min(x_points))
xbr = int(max(x_points))
ytl = int(min(y_points))
ybr = int(max(y_points))
bbox = (xtl, ytl, xbr - xtl, ybr - ytl)
tag = ibs.get_part_tag_text(part_rowid)
if tag is None:
tag = species
else:
tag = '%s+%s' % (species, tag, )
temp = {
'gid' : gid,
'aid' : aid,
'part_id' : part_rowid,
'xtl' : bbox[0] / width,
'ytl' : bbox[1] / height,
'xbr' : (bbox[0] + bbox[2]) / width,
'ybr' : (bbox[1] + bbox[3]) / height,
'width' : bbox[2] / width,
'height' : bbox[3] / height,
'class' : tag,
'viewpoint' : viewpoint,
'interest' : interest,
'confidence' : 1.0,
}
species_set.add(temp['class'])
gt_list.append(temp)
return gt_list, species_set
def general_parse_gt(ibs, test_gid_list=None, **kwargs):
if test_gid_list is None:
test_gid_list = general_get_imageset_gids(ibs, 'TEST_SET', **kwargs)
uuid_list = ibs.get_image_uuids(test_gid_list)
gid_list = ibs.get_image_gids_from_uuid(uuid_list)
species_set = set([])
gt_dict = {}
for gid, uuid in zip(gid_list, uuid_list):
aid_list = ibs.get_image_aids(gid)
gt_list, species_set = general_parse_gt_annots(ibs, aid_list, **kwargs)
species_set = species_set | species_set
gt_dict[uuid] = gt_list
# print('General Parse GT species_set = %r' % (species_set, ))
return gt_dict
##########################################################################################
def localizer_parse_pred(ibs, test_gid_list=None, species_mapping={}, **kwargs):
depc = ibs.depc_image
if 'feature2_algo' not in kwargs:
kwargs['feature2_algo'] = 'resnet'
if test_gid_list is None:
test_gid_list = general_get_imageset_gids(ibs, 'TEST_SET', **kwargs)
uuid_list = ibs.get_image_uuids(test_gid_list)
size_list = ibs.get_image_sizes(test_gid_list)
# Unsure, but we need to call this multiple times? Lazy loading bug?
bboxes_list = depc.get_property('localizations', test_gid_list, 'bboxes', config=kwargs)
# Get actual data
bboxes_list = depc.get_property('localizations', test_gid_list, 'bboxes', config=kwargs)
thetas_list = depc.get_property('localizations', test_gid_list, 'thetas', config=kwargs)
confss_list = depc.get_property('localizations', test_gid_list, 'confs', config=kwargs)
classs_list = depc.get_property('localizations', test_gid_list, 'classes', config=kwargs)
length_list = [ len(bbox_list) for bbox_list in bboxes_list ]
# Establish primitives
test_gids_list = [ [test_gid] * length for test_gid, length in zip(test_gid_list, length_list) ]
sizes_list = [ [size] * length for size, length in zip(size_list, length_list) ]
keeps_list = [ [True] * length for length in length_list ]
features_list = [ [None] * length for length in length_list ]
features_lazy_list = [ [None] * length for length in length_list ]
viewpoints_list = [ [None] * length for length in length_list ]
interests_list = [ [None] * length for length in length_list ]
# Get features
if kwargs.get('features', False):
features_list = depc.get_property('localizations_features', test_gid_list,
'vector', config=kwargs)
if kwargs.get('features_lazy', False):
from functools import partial
def features_lazy_func(gid, offset):
vector_list = depc.get_property('localizations_features', gid,
'vector', config=kwargs)
vector = vector_list[offset]
return vector
features_lazy_list = [
[
partial(features_lazy_func, test_gid, test_offset)
for test_offset in range(length)
]
for test_gid, length in zip(test_gid_list, length_list)
]
# Get species and viewpoints labels
if kwargs.get('labels', False):
classs_list = depc.get_property('localizations_labeler', test_gid_list,
'species', config=kwargs)
viewpoints_list = depc.get_property('localizations_labeler', test_gid_list,
'viewpoint', config=kwargs)
# Get updated confidences for boxes
if kwargs.get('classify', False):
print('Using alternate classifications')
# depc.delete_property('localizations_classifier', test_gid_list, config=kwargs)
confss_list = depc.get_property('localizations_classifier', test_gid_list,
'score', config=kwargs)
# Get updated confidences for boxes
if kwargs.get('interest', False):
print('Using alternate AoI interest flags')
interests_list = depc.get_property('localizations_classifier', test_gid_list,
'score', config=kwargs)
# Reformat results for json
zipped_list_list = zip(
keeps_list,
test_gids_list,
sizes_list,
bboxes_list,
thetas_list,
confss_list,
classs_list,
viewpoints_list,
interests_list,
features_list,
features_lazy_list,
)
results_list = [
[
{
'gid' : test_gid,
'xtl' : bbox[0] / width,
'ytl' : bbox[1] / height,
'xbr' : (bbox[0] + bbox[2]) / width,
'ybr' : (bbox[1] + bbox[3]) / height,
'width' : bbox[2] / width,
'height' : bbox[3] / height,
'theta' : theta,
'confidence' : conf,
'class' : species_mapping.get(class_, class_),
'viewpoint' : viewpoint,
'interest' : None if interest is None else interest >= 0.84,
'feature' : feature,
'feature_lazy' : feature_lazy,
}
for keep_, test_gid, (width, height), bbox, theta, conf, class_, viewpoint, interest, feature, feature_lazy in zip(*zipped_list)
if keep_
]
for zipped_list in zipped_list_list
]
pred_dict = {
uuid_ : result_list
for uuid_, result_list in zip(uuid_list, results_list)
}
return pred_dict
def localizer_precision_recall_algo(ibs, samples=SAMPLES, test_gid_list=None,
**kwargs):
if test_gid_list is None:
test_gid_list = general_get_imageset_gids(ibs, 'TEST_SET', **kwargs)
test_uuid_list = ibs.get_image_uuids(test_gid_list)
print('\tGather Ground-Truth')
gt_dict = general_parse_gt(ibs, test_gid_list=test_gid_list, **kwargs)
print('\tGather Predictions')
pred_dict = localizer_parse_pred(ibs, test_gid_list=test_gid_list, **kwargs)
species_set = kwargs.get('species_set', None)
if species_set is not None:
# filter out any prefix ! to denote interest only
species_set_ = set([ species.lstrip('!') for species in species_set ])
dict_list = [
(gt_dict, 'Ground-Truth'),
(pred_dict, 'Predictions'),
]
for dict_, dict_tag in dict_list:
for image_uuid in dict_:
dict_[image_uuid] = [
val
for val in dict_[image_uuid]
if val.get('class', None) in species_set_
]
values = localizer_tp_fp(test_uuid_list, gt_dict, pred_dict, **kwargs)
conf_list, tp_list, fp_list, total = values
conf_list_ = [-1.0, -1.0]
pr_list = [1.0, 0.0]
re_list = [0.0, 1.0]
for conf, tp, fp in zip(conf_list, tp_list, fp_list):
try:
pr = tp / (tp + fp)
re = tp / total
except ZeroDivisionError:
continue
conf_list_.append(conf)
pr_list.append(pr)
re_list.append(re)
return conf_list_, pr_list, re_list
def localizer_assign(gt_list, pred, min_overlap):
best_overlap = min_overlap
best_index = None
for index, gt in enumerate(gt_list):
if gt['class'] != pred['class']:
continue
overlap = general_intersection_over_union(gt, pred)
if overlap < best_overlap:
continue
best_overlap = overlap
best_index = index
if best_index is None:
best_overlap = None
return best_index, best_overlap
def localizer_assignments(pred_list, gt_list, gt_list_=[], min_overlap=0.5):
pred_list = sorted(pred_list, key=lambda pred: pred['confidence'], reverse=True)
match_list = []
for pred in pred_list:
flag = False
match_index, best_overlap = localizer_assign(gt_list, pred, min_overlap)
match_index_, best_overlap_ = localizer_assign(gt_list_, pred, min_overlap)
if match_index is not None:
flag = True
del gt_list[match_index]
elif match_index_ is not None:
flag = None
if flag is not None:
match_list += [
(pred['confidence'], flag, match_index, best_overlap)
]
return match_list
def localizer_tp_fp(uuid_list, gt_dict, pred_dict, min_overlap=0.5, **kwargs):
total = 0.0
interest_species_set = set([])
species_set = kwargs.get('species_set', None)
if species_set is not None:
for species in species_set:
if species.startswith('!'):
species = species.lstrip('!')
interest_species_set.add(species)
match_list = []
for image_uuid in uuid_list:
gt_list = []
gt_list_ = []
pred_list = pred_dict[image_uuid]
for gt in gt_dict[image_uuid]:
species = gt['class']
interest = gt['interest']
if species in interest_species_set and not interest:
gt_list_.append(gt)
else:
gt_list.append(gt)
total += len(gt_list)
# Match predictions
match_list_ = localizer_assignments(pred_list, gt_list, gt_list_, min_overlap)
for match_ in match_list_:
match_list.append(match_)
# sort matches by confidence from high to low
match_list = sorted(match_list, key=lambda match: match[0], reverse=True)
conf_list = []
tp_list = []
fp_list = []
tp_counter = 0
fp_counter = 0
for conf, flag, index, overlap in match_list:
if flag:
tp_counter += 1
else:
fp_counter += 1
conf_list.append(conf)
tp_list.append(tp_counter)
fp_list.append(fp_counter)
# print('\t tps [:10] : %r' % (tp_list[:10], ))
# print('\t fps [:10] : %r' % (fp_list[:10], ))
# print('\t con [:10] : %r' % (conf_list[:10], ))
# print('\t tps [-10:] : %r' % (tp_list[-10:], ))
# print('\t fps [-10:] : %r' % (fp_list[-10:], ))
# print('\t con [-10:] : %r' % (conf_list[-10:], ))
# print('\t num_annotations: %r' % (total, ))
return conf_list, tp_list, fp_list, total
def localizer_precision_recall_algo_plot(ibs, **kwargs):
label = kwargs['label']
print('Processing Precision-Recall for: %r' % (label, ))
conf_list, pr_list, re_list = localizer_precision_recall_algo(ibs, **kwargs)
return general_area_best_conf(conf_list, re_list, pr_list, **kwargs)
def _ignore_filter_identity_func(*args, **kwargs):
return False
def localizer_iou_recall_algo(ibs, samples=100, test_gid_list=None,
ignore_filter_func=None, **kwargs):
assert 'min_overlap' not in kwargs
if test_gid_list is None:
test_gid_list = general_get_imageset_gids(ibs, 'TEST_SET', **kwargs)
test_uuid_list = ibs.get_image_uuids(test_gid_list)
if ignore_filter_func is None:
ignore_filter_func = _ignore_filter_identity_func
print('\tGather Ground-Truth')
gt_dict = general_parse_gt(ibs, test_gid_list=test_gid_list, **kwargs)
print('\tGather Predictions')
pred_dict = localizer_parse_pred(ibs, test_gid_list=test_gid_list, **kwargs)
species_set = kwargs.get('species_set', None)
if species_set is not None:
# filter out any prefix ! to denote interest only
species_set_ = set([ species.lstrip('!') for species in species_set ])
dict_list = [
(gt_dict, 'Ground-Truth'),
(pred_dict, 'Predictions'),
]
for dict_, dict_tag in dict_list:
for image_uuid in dict_:
temp = []
for val in dict_[image_uuid]:
if val.get('class', None) not in species_set_:
continue
if ignore_filter_func(ibs, val):
continue
temp.append(val)
dict_[image_uuid] = temp
target = (1.0, 1.0)
iou_list = [ _ / float(samples) for _ in range(0, int(samples) + 1) ]
conf_list_ = []
iou_list_ = []
recall_list = []
for iou in tqdm.tqdm(iou_list):
values = localizer_tp_fp(test_uuid_list, gt_dict, pred_dict, min_overlap=iou, **kwargs)
conf_list, tp_list, fp_list, total = values
conf_list_ = []
pr_list = []
re_list = []
for conf, tp, fp in zip(conf_list, tp_list, fp_list):
try:
pr = tp / (tp + fp)
re = tp / total
except ZeroDivisionError:
continue
conf_list_.append(conf)
pr_list.append(pr)
re_list.append(re)
best_tup = general_identify_operating_point(conf_list, re_list, pr_list, target=target)
best_conf_list, best_re_list, best_pr_list, best_length = best_tup
if len(best_conf_list) > 1:
print('WARNING: Multiple best operating points found %r' % (best_conf_list, ))
assert len(best_conf_list) > 0
best_re_index = np.argmax(best_re_list)
best_re = best_re_list[best_re_index]
best_conf = best_conf_list[best_re_index]
conf_list_.append(best_conf)
iou_list_.append(iou)
recall_list.append(best_re)
return conf_list_, iou_list_, recall_list
def localizer_iou_recall_algo_plot(ibs, **kwargs):
label = kwargs['label']
print('Processing IoU-Recall for: %r' % (label, ))
conf_list, iou_list, recall_list = localizer_iou_recall_algo(ibs, **kwargs)
return general_area_best_conf(conf_list, iou_list, recall_list,
interpolate=False, **kwargs)
# def localizer_iou_precision_algo_plot(ibs, **kwargs):
# label = kwargs['label']
# print('Processing Precision-Recall for: %r' % (label, ))
# conf_list, iou_list, pr_list, re_list = localizer_iou_precision_recall_algo(ibs, **kwargs)
# return general_area_best_conf(conf_list, iou_list, re_list, **kwargs)
def localizer_confusion_matrix_algo_plot(ibs, label=None, target_conf=None,
test_gid_list=None, **kwargs):
if test_gid_list is None:
test_gid_list = general_get_imageset_gids(ibs, 'TEST_SET', **kwargs)
test_uuid_list = ibs.get_image_uuids(test_gid_list)
print('\tGather Ground-Truth')
gt_dict = general_parse_gt(ibs, test_gid_list=test_gid_list, **kwargs)
print('\tGather Predictions')
pred_dict = localizer_parse_pred(ibs, test_gid_list=test_gid_list, **kwargs)
species_set = kwargs.get('species_set', None)
if species_set is not None:
# filter out any prefix ! to denote interest only
species_set_ = set([ species.lstrip('!') for species in species_set ])
dict_list = [
(gt_dict, 'Ground-Truth'),
(pred_dict, 'Predictions'),
]
for dict_, dict_tag in dict_list:
for image_uuid in dict_:
dict_[image_uuid] = [
val
for val in dict_[image_uuid]
if val.get('class', None) in species_set_
]
values = localizer_tp_fp(test_uuid_list, gt_dict, pred_dict, **kwargs)
conf_list, tp_list, fp_list, total = values
best_conf = None
best_accuracy = None
best_args = None
for conf, tp, fp in sorted(zip(conf_list, tp_list, fp_list)):
fn = total - tp
accuracy = tp / (tp + fp + fn)
if target_conf is None:
if best_accuracy is None or accuracy > best_accuracy:
best_conf = conf
best_accuracy = accuracy
best_args = (tp, fp, fn)
else:
if target_conf <= conf:
best_conf = conf
best_accuracy = accuracy
best_args = (tp, fp, fn)
break
try:
assert None not in [best_conf, best_accuracy, best_args]
except AssertionError:
ut.embed()
return np.nan, (np.nan, None)
print('Processing Confusion Matrix for: %r (Conf = %0.02f, Accuracy = %0.02f)' % (label, best_conf, best_accuracy, ))
tp, fp, fn = best_args
label_list = []
prediction_list = []
for _ in range(int(tp)):
label_list.append('positive')
prediction_list.append('positive')
for _ in range(int(fp)):
label_list.append('negative')
prediction_list.append('positive')
for _ in range(int(fn)):
label_list.append('positive')
prediction_list.append('negative')
category_list = ['positive', 'negative']
category_mapping = {
'positive': 0,
'negative': 1,
}
values = general_confusion_matrix_algo(label_list, prediction_list, category_list,
category_mapping, size=20, **kwargs)
return best_conf, values
@register_ibs_method
def localizer_precision_recall(ibs, config_dict=None, output_path=None,
test_gid_list=None, **kwargs):
if config_dict is None:
if test_gid_list is not None:
print('Using %d test gids' % (len(test_gid_list), ))
# species_mapping = { # NOQA
# 'giraffe_masai' : 'giraffe',
# 'giraffe_reticulated' : 'giraffe',
# 'zebra_grevys' : 'zebra',
# 'zebra_plains' : 'zebra',
# }
config_dict = {
# 'seaturtle': (
# [
# {'label': 'Sea Turtle', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.2, 'species_set' : set(['turtle_green', 'turtle_hawksbill'])},
# {'label': 'Sea Turtle Heads', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.2, 'species_set' : set(['turtle_green+head', 'turtle_hawksbill+head'])},
# {'label': 'Green', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.2, 'species_set' : set(['turtle_green'])},
# {'label': 'Green Heads', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.2, 'species_set' : set(['turtle_green+head'])},
# {'label': 'Hawksbill', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.2, 'species_set' : set(['turtle_hawksbill'])},
# {'label': 'Hawksbill Heads', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.2, 'species_set' : set(['turtle_hawksbill+head'])},
# ],
# {'BEST_INDEX': 0},
# ),
# '!seaturtle': (
# [
# {'label': '! Sea Turtle', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.2, 'species_set' : set(['!turtle_green', '!turtle_hawksbill'])},
# {'label': '! Sea Turtle Heads', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.2, 'species_set' : set(['!turtle_green+head', '!turtle_hawksbill+head'])},
# {'label': '! Green', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.2, 'species_set' : set(['!turtle_green'])},
# {'label': '! Green Heads', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.2, 'species_set' : set(['!turtle_green+head'])},
# {'label': '! Hawksbill', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.2, 'species_set' : set(['!turtle_hawksbill'])},
# {'label': '! Hawksbill Heads', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.2, 'species_set' : set(['!turtle_hawksbill+head'])},
# ],
# {'BEST_INDEX': 0},
# ),
# 'hawksbills': (
# [
# {'label': 'Hawksbill NMS 0%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.00, 'species_set' : set(['turtle_hawksbill'])},
# {'label': 'Hawksbill NMS 10%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.10, 'species_set' : set(['turtle_hawksbill'])},
# {'label': 'Hawksbill NMS 20%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.20, 'species_set' : set(['turtle_hawksbill'])},
# {'label': 'Hawksbill NMS 30%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.30, 'species_set' : set(['turtle_hawksbill'])},
# {'label': 'Hawksbill NMS 40%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.40, 'species_set' : set(['turtle_hawksbill'])},
# {'label': 'Hawksbill NMS 50%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.50, 'species_set' : set(['turtle_hawksbill'])},
# {'label': 'Hawksbill NMS 60%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.60, 'species_set' : set(['turtle_hawksbill'])},
# {'label': 'Hawksbill NMS 70%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.70, 'species_set' : set(['turtle_hawksbill'])},
# {'label': 'Hawksbill NMS 80%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.80, 'species_set' : set(['turtle_hawksbill'])},
# {'label': 'Hawksbill NMS 90%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.90, 'species_set' : set(['turtle_hawksbill'])},
# {'label': 'Hawksbill NMS 100%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 1.00, 'species_set' : set(['turtle_hawksbill'])},
# ],
# {},
# ),
# 'hawsbills+heads': (
# [
# {'label': 'Hawksbill Head NMS 0%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.00, 'species_set' : set(['turtle_hawksbill+head'])},
# {'label': 'Hawksbill Head NMS 10%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.10, 'species_set' : set(['turtle_hawksbill+head'])},
# {'label': 'Hawksbill Head NMS 20%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.20, 'species_set' : set(['turtle_hawksbill+head'])},
# {'label': 'Hawksbill Head NMS 30%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.30, 'species_set' : set(['turtle_hawksbill+head'])},
# {'label': 'Hawksbill Head NMS 40%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.40, 'species_set' : set(['turtle_hawksbill+head'])},
# {'label': 'Hawksbill Head NMS 50%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.50, 'species_set' : set(['turtle_hawksbill+head'])},
# {'label': 'Hawksbill Head NMS 60%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.60, 'species_set' : set(['turtle_hawksbill+head'])},
# {'label': 'Hawksbill Head NMS 70%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.70, 'species_set' : set(['turtle_hawksbill+head'])},
# {'label': 'Hawksbill Head NMS 80%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.80, 'species_set' : set(['turtle_hawksbill+head'])},
# {'label': 'Hawksbill Head NMS 90%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.90, 'species_set' : set(['turtle_hawksbill+head'])},
# {'label': 'Hawksbill Head NMS 100%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 1.00, 'species_set' : set(['turtle_hawksbill+head'])},
# ],
# {},
# ),
# 'hammerhead': (
# [
# {'label': 'Hammerhead NMS 0%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'hammerhead', 'weight_filepath' : 'hammerhead', 'nms': True, 'nms_thresh': 0.00, 'species_set' : set(['shark_hammerhead'])},
# {'label': 'Hammerhead NMS 10%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'hammerhead', 'weight_filepath' : 'hammerhead', 'nms': True, 'nms_thresh': 0.10, 'species_set' : set(['shark_hammerhead'])},
# {'label': 'Hammerhead NMS 20%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'hammerhead', 'weight_filepath' : 'hammerhead', 'nms': True, 'nms_thresh': 0.20, 'species_set' : set(['shark_hammerhead'])},
# {'label': 'Hammerhead NMS 30%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'hammerhead', 'weight_filepath' : 'hammerhead', 'nms': True, 'nms_thresh': 0.30, 'species_set' : set(['shark_hammerhead'])},
# {'label': 'Hammerhead NMS 40%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'hammerhead', 'weight_filepath' : 'hammerhead', 'nms': True, 'nms_thresh': 0.40, 'species_set' : set(['shark_hammerhead'])},
# {'label': 'Hammerhead NMS 50%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'hammerhead', 'weight_filepath' : 'hammerhead', 'nms': True, 'nms_thresh': 0.50, 'species_set' : set(['shark_hammerhead'])},
# {'label': 'Hammerhead NMS 60%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'hammerhead', 'weight_filepath' : 'hammerhead', 'nms': True, 'nms_thresh': 0.60, 'species_set' : set(['shark_hammerhead'])},
# {'label': 'Hammerhead NMS 70%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'hammerhead', 'weight_filepath' : 'hammerhead', 'nms': True, 'nms_thresh': 0.70, 'species_set' : set(['shark_hammerhead'])},
# {'label': 'Hammerhead NMS 80%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'hammerhead', 'weight_filepath' : 'hammerhead', 'nms': True, 'nms_thresh': 0.80, 'species_set' : set(['shark_hammerhead'])},
# {'label': 'Hammerhead NMS 90%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'hammerhead', 'weight_filepath' : 'hammerhead', 'nms': True, 'nms_thresh': 0.90, 'species_set' : set(['shark_hammerhead'])},
# {'label': 'Hammerhead NMS 100%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'hammerhead', 'weight_filepath' : 'hammerhead', 'nms': True, 'nms_thresh': 1.00, 'species_set' : set(['shark_hammerhead'])},
# ],
# {},
# ),
# '!hammerhead': (
# [
# {'label': 'Hammerhead NMS 40%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'hammerhead', 'weight_filepath' : 'hammerhead', 'nms': True, 'nms_thresh': 0.40, 'species_set' : set(['shark_hammerhead'])},
# {'label': 'Hammerhead ! NMS 0%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'hammerhead', 'weight_filepath' : 'hammerhead', 'nms': True, 'nms_thresh': 0.00, 'species_set' : set(['!shark_hammerhead'])},
# {'label': 'Hammerhead ! NMS 10%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'hammerhead', 'weight_filepath' : 'hammerhead', 'nms': True, 'nms_thresh': 0.10, 'species_set' : set(['!shark_hammerhead'])},
# {'label': 'Hammerhead ! NMS 20%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'hammerhead', 'weight_filepath' : 'hammerhead', 'nms': True, 'nms_thresh': 0.20, 'species_set' : set(['!shark_hammerhead'])},
# {'label': 'Hammerhead ! NMS 30%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'hammerhead', 'weight_filepath' : 'hammerhead', 'nms': True, 'nms_thresh': 0.30, 'species_set' : set(['!shark_hammerhead'])},
# {'label': 'Hammerhead ! NMS 40%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'hammerhead', 'weight_filepath' : 'hammerhead', 'nms': True, 'nms_thresh': 0.40, 'species_set' : set(['!shark_hammerhead'])},
# {'label': 'Hammerhead ! NMS 50%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'hammerhead', 'weight_filepath' : 'hammerhead', 'nms': True, 'nms_thresh': 0.50, 'species_set' : set(['!shark_hammerhead'])},
# {'label': 'Hammerhead ! NMS 60%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'hammerhead', 'weight_filepath' : 'hammerhead', 'nms': True, 'nms_thresh': 0.60, 'species_set' : set(['!shark_hammerhead'])},
# {'label': 'Hammerhead ! NMS 70%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'hammerhead', 'weight_filepath' : 'hammerhead', 'nms': True, 'nms_thresh': 0.70, 'species_set' : set(['!shark_hammerhead'])},
# {'label': 'Hammerhead ! NMS 80%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'hammerhead', 'weight_filepath' : 'hammerhead', 'nms': True, 'nms_thresh': 0.80, 'species_set' : set(['!shark_hammerhead'])},
# {'label': 'Hammerhead ! NMS 90%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'hammerhead', 'weight_filepath' : 'hammerhead', 'nms': True, 'nms_thresh': 0.90, 'species_set' : set(['!shark_hammerhead'])},
# {'label': 'Hammerhead ! NMS 100%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'hammerhead', 'weight_filepath' : 'hammerhead', 'nms': True, 'nms_thresh': 1.00, 'species_set' : set(['!shark_hammerhead'])},
# ],
# {'offset_color': 1},
# ),
# 'ggr2-giraffe-lightnet': (
# [
# {'label': 'Giraffe NMS 0%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.00, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['giraffe'])},
# {'label': 'Giraffe NMS 10%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.10, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['giraffe'])},
# {'label': 'Giraffe NMS 20%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.20, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['giraffe'])},
# {'label': 'Giraffe NMS 30%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.30, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['giraffe'])},
# {'label': 'Giraffe NMS 40%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.40, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['giraffe'])},
# {'label': 'Giraffe NMS 50%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.50, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['giraffe'])},
# {'label': 'Giraffe NMS 60%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.60, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['giraffe'])},
# {'label': 'Giraffe NMS 70%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.70, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['giraffe'])},
# {'label': 'Giraffe NMS 80%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.80, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['giraffe'])},
# {'label': 'Giraffe NMS 90%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.90, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['giraffe'])},
# {'label': 'Giraffe NMS 100%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 1.00, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['giraffe'])},
# ],
# {},
# ),
# 'ggr2-zebra-lightnet': (
# [
# {'label': 'Zebra NMS 0%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.00, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['zebra'])},
# {'label': 'Zebra NMS 10%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.10, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['zebra'])},
# {'label': 'Zebra NMS 20%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.20, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['zebra'])},
# {'label': 'Zebra NMS 30%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.30, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['zebra'])},
# {'label': 'Zebra NMS 40%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.40, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['zebra'])},
# {'label': 'Zebra NMS 50%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.50, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['zebra'])},
# {'label': 'Zebra NMS 60%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.60, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['zebra'])},
# {'label': 'Zebra NMS 70%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.70, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['zebra'])},
# {'label': 'Zebra NMS 80%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.80, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['zebra'])},
# {'label': 'Zebra NMS 90%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.90, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['zebra'])},
# {'label': 'Zebra NMS 100%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 1.00, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['zebra'])},
# ],
# {},
# ),
# 'ggr2-!giraffe-lightnet': (
# [
# {'label': 'Giraffe ! NMS 0%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.00, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!giraffe'])},
# {'label': 'Giraffe ! NMS 10%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.10, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!giraffe'])},
# {'label': 'Giraffe ! NMS 20%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.20, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!giraffe'])},
# {'label': 'Giraffe ! NMS 30%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.30, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!giraffe'])},
# {'label': 'Giraffe ! NMS 40%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.40, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!giraffe'])},
# {'label': 'Giraffe ! NMS 50%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.50, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!giraffe'])},
# {'label': 'Giraffe ! NMS 60%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.60, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!giraffe'])},
# {'label': 'Giraffe ! NMS 70%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.70, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!giraffe'])},
# {'label': 'Giraffe ! NMS 80%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.80, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!giraffe'])},
# {'label': 'Giraffe ! NMS 90%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.90, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!giraffe'])},
# {'label': 'Giraffe ! NMS 100%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 1.00, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!giraffe'])},
# ],
# {},
# ),
# 'ggr2-!zebra-lightnet': (
# [
# {'label': 'Zebra ! NMS 0%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.00, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!zebra'])},
# {'label': 'Zebra ! NMS 10%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.10, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!zebra'])},
# {'label': 'Zebra ! NMS 20%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.20, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!zebra'])},
# {'label': 'Zebra ! NMS 30%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.30, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!zebra'])},
# {'label': 'Zebra ! NMS 40%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.40, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!zebra'])},
# {'label': 'Zebra ! NMS 50%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.50, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!zebra'])},
# {'label': 'Zebra ! NMS 60%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.60, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!zebra'])},
# {'label': 'Zebra ! NMS 70%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.70, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!zebra'])},
# {'label': 'Zebra ! NMS 80%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.80, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!zebra'])},
# {'label': 'Zebra ! NMS 90%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.90, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!zebra'])},
# {'label': 'Zebra ! NMS 100%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 1.00, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!zebra'])},
# ],
# {},
# ),
# 'ggr2-giraffe-azure': (
# [
# {'label': 'Giraffe NMS 0%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.00, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['giraffe'])},
# {'label': 'Giraffe NMS 10%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.10, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['giraffe'])},
# {'label': 'Giraffe NMS 20%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.20, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['giraffe'])},
# {'label': 'Giraffe NMS 30%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.30, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['giraffe'])},
# {'label': 'Giraffe NMS 40%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.40, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['giraffe'])},
# {'label': 'Giraffe NMS 50%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.50, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['giraffe'])},
# {'label': 'Giraffe NMS 60%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.60, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['giraffe'])},
# {'label': 'Giraffe NMS 70%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.70, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['giraffe'])},
# {'label': 'Giraffe NMS 80%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.80, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['giraffe'])},
# {'label': 'Giraffe NMS 90%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.90, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['giraffe'])},
# {'label': 'Giraffe NMS 100%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 1.00, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['giraffe'])},
# ],
# {},
# ),
# 'ggr2-zebra-azure': (
# [
# {'label': 'Zebra NMS 0%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.00, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['zebra'])},
# {'label': 'Zebra NMS 10%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.10, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['zebra'])},
# {'label': 'Zebra NMS 20%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.20, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['zebra'])},
# {'label': 'Zebra NMS 30%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.30, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['zebra'])},
# {'label': 'Zebra NMS 40%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.40, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['zebra'])},
# {'label': 'Zebra NMS 50%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.50, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['zebra'])},
# {'label': 'Zebra NMS 60%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.60, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['zebra'])},
# {'label': 'Zebra NMS 70%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.70, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['zebra'])},
# {'label': 'Zebra NMS 80%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.80, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['zebra'])},
# {'label': 'Zebra NMS 90%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.90, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['zebra'])},
# {'label': 'Zebra NMS 100%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 1.00, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['zebra'])},
# ],
# {},
# ),
# 'ggr2-!giraffe-azure': (
# [
# {'label': 'Giraffe ! NMS 0%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.00, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!giraffe'])},
# {'label': 'Giraffe ! NMS 10%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.10, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!giraffe'])},
# {'label': 'Giraffe ! NMS 20%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.20, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!giraffe'])},
# {'label': 'Giraffe ! NMS 30%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.30, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!giraffe'])},
# {'label': 'Giraffe ! NMS 40%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.40, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!giraffe'])},
# {'label': 'Giraffe ! NMS 50%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.50, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!giraffe'])},
# {'label': 'Giraffe ! NMS 60%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.60, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!giraffe'])},
# {'label': 'Giraffe ! NMS 70%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.70, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!giraffe'])},
# {'label': 'Giraffe ! NMS 80%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.80, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!giraffe'])},
# {'label': 'Giraffe ! NMS 90%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.90, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!giraffe'])},
# {'label': 'Giraffe ! NMS 100%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 1.00, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!giraffe'])},
# ],
# {},
# ),
# 'ggr2-!zebra-azure': (
# [
# {'label': 'Zebra ! NMS 0%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.00, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!zebra'])},
# {'label': 'Zebra ! NMS 10%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.10, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!zebra'])},
# {'label': 'Zebra ! NMS 20%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.20, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!zebra'])},
# {'label': 'Zebra ! NMS 30%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.30, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!zebra'])},
# {'label': 'Zebra ! NMS 40%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.40, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!zebra'])},
# {'label': 'Zebra ! NMS 50%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.50, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!zebra'])},
# {'label': 'Zebra ! NMS 60%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.60, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!zebra'])},
# {'label': 'Zebra ! NMS 70%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.70, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!zebra'])},
# {'label': 'Zebra ! NMS 80%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.80, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!zebra'])},
# {'label': 'Zebra ! NMS 90%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.90, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!zebra'])},
# {'label': 'Zebra ! NMS 100%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 1.00, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!zebra'])},
# ],
# {},
# ),
# 'lynx': (
# [
# {'label': 'Lynx NMS 0%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'lynx', 'weight_filepath' : 'lynx', 'nms': True, 'nms_thresh': 0.00, 'species_set' : set(['lynx'])},
# {'label': 'Lynx NMS 10%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'lynx', 'weight_filepath' : 'lynx', 'nms': True, 'nms_thresh': 0.10, 'species_set' : set(['lynx'])},
# {'label': 'Lynx NMS 20%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'lynx', 'weight_filepath' : 'lynx', 'nms': True, 'nms_thresh': 0.20, 'species_set' : set(['lynx'])},
# {'label': 'Lynx NMS 30%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'lynx', 'weight_filepath' : 'lynx', 'nms': True, 'nms_thresh': 0.30, 'species_set' : set(['lynx'])},
# {'label': 'Lynx NMS 40%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'lynx', 'weight_filepath' : 'lynx', 'nms': True, 'nms_thresh': 0.40, 'species_set' : set(['lynx'])},
# {'label': 'Lynx NMS 50%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'lynx', 'weight_filepath' : 'lynx', 'nms': True, 'nms_thresh': 0.50, 'species_set' : set(['lynx'])},
# {'label': 'Lynx NMS 60%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'lynx', 'weight_filepath' : 'lynx', 'nms': True, 'nms_thresh': 0.60, 'species_set' : set(['lynx'])},
# {'label': 'Lynx NMS 70%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'lynx', 'weight_filepath' : 'lynx', 'nms': True, 'nms_thresh': 0.70, 'species_set' : set(['lynx'])},
# {'label': 'Lynx NMS 80%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'lynx', 'weight_filepath' : 'lynx', 'nms': True, 'nms_thresh': 0.80, 'species_set' : set(['lynx'])},
# {'label': 'Lynx NMS 90%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'lynx', 'weight_filepath' : 'lynx', 'nms': True, 'nms_thresh': 0.90, 'species_set' : set(['lynx'])},
# {'label': 'Lynx NMS 100%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'lynx', 'weight_filepath' : 'lynx', 'nms': True, 'nms_thresh': 1.00, 'species_set' : set(['lynx'])},
# ],
# {},
# ),
# 'jaguar': (
# [
# {'label': 'Jaguar NMS 0%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'jaguar_v2', 'weight_filepath' : 'jaguar_v2', 'nms': True, 'nms_thresh': 0.00, 'species_set' : set(['jaguar'])},
# {'label': 'Jaguar NMS 10%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'jaguar_v2', 'weight_filepath' : 'jaguar_v2', 'nms': True, 'nms_thresh': 0.10, 'species_set' : set(['jaguar'])},
# {'label': 'Jaguar NMS 20%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'jaguar_v2', 'weight_filepath' : 'jaguar_v2', 'nms': True, 'nms_thresh': 0.20, 'species_set' : set(['jaguar'])},
# {'label': 'Jaguar NMS 30%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'jaguar_v2', 'weight_filepath' : 'jaguar_v2', 'nms': True, 'nms_thresh': 0.30, 'species_set' : set(['jaguar'])},
# {'label': 'Jaguar NMS 40%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'jaguar_v2', 'weight_filepath' : 'jaguar_v2', 'nms': True, 'nms_thresh': 0.40, 'species_set' : set(['jaguar'])},
# {'label': 'Jaguar NMS 50%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'jaguar_v2', 'weight_filepath' : 'jaguar_v2', 'nms': True, 'nms_thresh': 0.50, 'species_set' : set(['jaguar'])},
# {'label': 'Jaguar NMS 60%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'jaguar_v2', 'weight_filepath' : 'jaguar_v2', 'nms': True, 'nms_thresh': 0.60, 'species_set' : set(['jaguar'])},
# {'label': 'Jaguar NMS 70%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'jaguar_v2', 'weight_filepath' : 'jaguar_v2', 'nms': True, 'nms_thresh': 0.70, 'species_set' : set(['jaguar'])},
# {'label': 'Jaguar NMS 80%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'jaguar_v2', 'weight_filepath' : 'jaguar_v2', 'nms': True, 'nms_thresh': 0.80, 'species_set' : set(['jaguar'])},
# {'label': 'Jaguar NMS 90%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'jaguar_v2', 'weight_filepath' : 'jaguar_v2', 'nms': True, 'nms_thresh': 0.90, 'species_set' : set(['jaguar'])},
# {'label': 'Jaguar NMS 100%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'jaguar_v2', 'weight_filepath' : 'jaguar_v2', 'nms': True, 'nms_thresh': 1.00, 'species_set' : set(['jaguar'])},
# ],
# {},
# ),
# '!jaguar': (
# [
# {'label': 'Jaguar NMS 0%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'jaguar_v2', 'weight_filepath' : 'jaguar_v2', 'nms': True, 'nms_thresh': 0.00, 'species_set' : set(['!jaguar'])},
# {'label': 'Jaguar NMS 10%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'jaguar_v2', 'weight_filepath' : 'jaguar_v2', 'nms': True, 'nms_thresh': 0.10, 'species_set' : set(['!jaguar'])},
# {'label': 'Jaguar NMS 20%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'jaguar_v2', 'weight_filepath' : 'jaguar_v2', 'nms': True, 'nms_thresh': 0.20, 'species_set' : set(['!jaguar'])},
# {'label': 'Jaguar NMS 30%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'jaguar_v2', 'weight_filepath' : 'jaguar_v2', 'nms': True, 'nms_thresh': 0.30, 'species_set' : set(['!jaguar'])},
# {'label': 'Jaguar NMS 40%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'jaguar_v2', 'weight_filepath' : 'jaguar_v2', 'nms': True, 'nms_thresh': 0.40, 'species_set' : set(['!jaguar'])},
# {'label': 'Jaguar NMS 50%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'jaguar_v2', 'weight_filepath' : 'jaguar_v2', 'nms': True, 'nms_thresh': 0.50, 'species_set' : set(['!jaguar'])},
# {'label': 'Jaguar NMS 60%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'jaguar_v2', 'weight_filepath' : 'jaguar_v2', 'nms': True, 'nms_thresh': 0.60, 'species_set' : set(['!jaguar'])},
# {'label': 'Jaguar NMS 70%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'jaguar_v2', 'weight_filepath' : 'jaguar_v2', 'nms': True, 'nms_thresh': 0.70, 'species_set' : set(['!jaguar'])},
# {'label': 'Jaguar NMS 80%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'jaguar_v2', 'weight_filepath' : 'jaguar_v2', 'nms': True, 'nms_thresh': 0.80, 'species_set' : set(['!jaguar'])},
# {'label': 'Jaguar NMS 90%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'jaguar_v2', 'weight_filepath' : 'jaguar_v2', 'nms': True, 'nms_thresh': 0.90, 'species_set' : set(['!jaguar'])},
# {'label': 'Jaguar NMS 100%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'jaguar_v2', 'weight_filepath' : 'jaguar_v2', 'nms': True, 'nms_thresh': 1.00, 'species_set' : set(['!jaguar'])},
# ],
# {},
# ),
# 'manta': (
# [
# {'label': 'Manta NMS 0%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'manta', 'weight_filepath' : 'manta', 'nms': True, 'nms_thresh': 0.00, 'species_set' : set(['manta_ray_giant'])},
# {'label': 'Manta NMS 10%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'manta', 'weight_filepath' : 'manta', 'nms': True, 'nms_thresh': 0.10, 'species_set' : set(['manta_ray_giant'])},
# {'label': 'Manta NMS 20%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'manta', 'weight_filepath' : 'manta', 'nms': True, 'nms_thresh': 0.20, 'species_set' : set(['manta_ray_giant'])},
# {'label': 'Manta NMS 30%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'manta', 'weight_filepath' : 'manta', 'nms': True, 'nms_thresh': 0.30, 'species_set' : set(['manta_ray_giant'])},
# {'label': 'Manta NMS 40%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'manta', 'weight_filepath' : 'manta', 'nms': True, 'nms_thresh': 0.40, 'species_set' : set(['manta_ray_giant'])},
# {'label': 'Manta NMS 50%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'manta', 'weight_filepath' : 'manta', 'nms': True, 'nms_thresh': 0.50, 'species_set' : set(['manta_ray_giant'])},
# {'label': 'Manta NMS 60%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'manta', 'weight_filepath' : 'manta', 'nms': True, 'nms_thresh': 0.60, 'species_set' : set(['manta_ray_giant'])},
# {'label': 'Manta NMS 70%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'manta', 'weight_filepath' : 'manta', 'nms': True, 'nms_thresh': 0.70, 'species_set' : set(['manta_ray_giant'])},
# {'label': 'Manta NMS 80%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'manta', 'weight_filepath' : 'manta', 'nms': True, 'nms_thresh': 0.80, 'species_set' : set(['manta_ray_giant'])},
# {'label': 'Manta NMS 90%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'manta', 'weight_filepath' : 'manta', 'nms': True, 'nms_thresh': 0.90, 'species_set' : set(['manta_ray_giant'])},
# {'label': 'Manta NMS 100%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'manta', 'weight_filepath' : 'manta', 'nms': True, 'nms_thresh': 1.00, 'species_set' : set(['manta_ray_giant'])},
# ],
# {},
# ),
# '!manta': (
# [
# {'label': 'Manta NMS 0%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'manta', 'weight_filepath' : 'manta', 'nms': True, 'nms_thresh': 0.00, 'species_set' : set(['!manta_ray_giant'])},
# {'label': 'Manta NMS 10%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'manta', 'weight_filepath' : 'manta', 'nms': True, 'nms_thresh': 0.10, 'species_set' : set(['!manta_ray_giant'])},
# {'label': 'Manta NMS 20%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'manta', 'weight_filepath' : 'manta', 'nms': True, 'nms_thresh': 0.20, 'species_set' : set(['!manta_ray_giant'])},
# {'label': 'Manta NMS 30%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'manta', 'weight_filepath' : 'manta', 'nms': True, 'nms_thresh': 0.30, 'species_set' : set(['!manta_ray_giant'])},
# {'label': 'Manta NMS 40%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'manta', 'weight_filepath' : 'manta', 'nms': True, 'nms_thresh': 0.40, 'species_set' : set(['!manta_ray_giant'])},
# {'label': 'Manta NMS 50%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'manta', 'weight_filepath' : 'manta', 'nms': True, 'nms_thresh': 0.50, 'species_set' : set(['!manta_ray_giant'])},
# {'label': 'Manta NMS 60%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'manta', 'weight_filepath' : 'manta', 'nms': True, 'nms_thresh': 0.60, 'species_set' : set(['!manta_ray_giant'])},
# {'label': 'Manta NMS 70%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'manta', 'weight_filepath' : 'manta', 'nms': True, 'nms_thresh': 0.70, 'species_set' : set(['!manta_ray_giant'])},
# {'label': 'Manta NMS 80%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'manta', 'weight_filepath' : 'manta', 'nms': True, 'nms_thresh': 0.80, 'species_set' : set(['!manta_ray_giant'])},
# {'label': 'Manta NMS 90%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'manta', 'weight_filepath' : 'manta', 'nms': True, 'nms_thresh': 0.90, 'species_set' : set(['!manta_ray_giant'])},
# {'label': 'Manta NMS 100%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'manta', 'weight_filepath' : 'manta', 'nms': True, 'nms_thresh': 1.00, 'species_set' : set(['!manta_ray_giant'])},
# ],
# {},
# ),
# 'giraffe': (
# [
# {'label': 'Giraffe NMS 10%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'giraffe_v1', 'weight_filepath' : 'giraffe_v1', 'nms': True, 'nms_thresh': 0.10, 'species_set' : set(['giraffe_masai', 'giraffe_reticulated'])},
# {'label': 'Giraffe NMS 30%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'giraffe_v1', 'weight_filepath' : 'giraffe_v1', 'nms': True, 'nms_thresh': 0.30, 'species_set' : set(['giraffe_masai', 'giraffe_reticulated'])},
# {'label': 'Giraffe NMS 50%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'giraffe_v1', 'weight_filepath' : 'giraffe_v1', 'nms': True, 'nms_thresh': 0.50, 'species_set' : set(['giraffe_masai', 'giraffe_reticulated'])},
# {'label': 'Giraffe NMS 70%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'giraffe_v1', 'weight_filepath' : 'giraffe_v1', 'nms': True, 'nms_thresh': 0.70, 'species_set' : set(['giraffe_masai', 'giraffe_reticulated'])},
# {'label': 'Giraffe NMS 90%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'giraffe_v1', 'weight_filepath' : 'giraffe_v1', 'nms': True, 'nms_thresh': 0.90, 'species_set' : set(['giraffe_masai', 'giraffe_reticulated'])},
# {'label': 'Masai Giraffe NMS 10%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'giraffe_v1', 'weight_filepath' : 'giraffe_v1', 'nms': True, 'nms_thresh': 0.10, 'species_set' : set(['giraffe_masai'])},
# {'label': 'Masai Giraffe NMS 30%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'giraffe_v1', 'weight_filepath' : 'giraffe_v1', 'nms': True, 'nms_thresh': 0.30, 'species_set' : set(['giraffe_masai'])},
# {'label': 'Masai Giraffe NMS 50%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'giraffe_v1', 'weight_filepath' : 'giraffe_v1', 'nms': True, 'nms_thresh': 0.50, 'species_set' : set(['giraffe_masai'])},
# {'label': 'Masai Giraffe NMS 70%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'giraffe_v1', 'weight_filepath' : 'giraffe_v1', 'nms': True, 'nms_thresh': 0.70, 'species_set' : set(['giraffe_masai'])},
# {'label': 'Masai Giraffe NMS 90%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'giraffe_v1', 'weight_filepath' : 'giraffe_v1', 'nms': True, 'nms_thresh': 0.90, 'species_set' : set(['giraffe_masai'])},
# {'label': 'Reticulated Giraffe NMS 10%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'giraffe_v1', 'weight_filepath' : 'giraffe_v1', 'nms': True, 'nms_thresh': 0.10, 'species_set' : set(['giraffe_reticulated'])},
# {'label': 'Reticulated Giraffe NMS 30%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'giraffe_v1', 'weight_filepath' : 'giraffe_v1', 'nms': True, 'nms_thresh': 0.30, 'species_set' : set(['giraffe_reticulated'])},
# {'label': 'Reticulated Giraffe NMS 50%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'giraffe_v1', 'weight_filepath' : 'giraffe_v1', 'nms': True, 'nms_thresh': 0.50, 'species_set' : set(['giraffe_reticulated'])},
# {'label': 'Reticulated Giraffe NMS 70%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'giraffe_v1', 'weight_filepath' : 'giraffe_v1', 'nms': True, 'nms_thresh': 0.70, 'species_set' : set(['giraffe_reticulated'])},
# {'label': 'Reticulated Giraffe NMS 90%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'giraffe_v1', 'weight_filepath' : 'giraffe_v1', 'nms': True, 'nms_thresh': 0.90, 'species_set' : set(['giraffe_reticulated'])},
# ],
# {},
# ),
# 'spotted_skunk_v0': (
# [
# {'label': 'Spotted Skunk NMS 0%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_skunk_v0', 'weight_filepath' : 'spotted_skunk_v0', 'nms': True, 'nms_thresh': 0.00, 'species_set' : set(['skunk_spotted'])},
# {'label': 'Spotted Skunk NMS 10%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_skunk_v0', 'weight_filepath' : 'spotted_skunk_v0', 'nms': True, 'nms_thresh': 0.10, 'species_set' : set(['skunk_spotted'])},
# {'label': 'Spotted Skunk NMS 20%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_skunk_v0', 'weight_filepath' : 'spotted_skunk_v0', 'nms': True, 'nms_thresh': 0.20, 'species_set' : set(['skunk_spotted'])},
# {'label': 'Spotted Skunk NMS 30%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_skunk_v0', 'weight_filepath' : 'spotted_skunk_v0', 'nms': True, 'nms_thresh': 0.30, 'species_set' : set(['skunk_spotted'])},
# {'label': 'Spotted Skunk NMS 40%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_skunk_v0', 'weight_filepath' : 'spotted_skunk_v0', 'nms': True, 'nms_thresh': 0.40, 'species_set' : set(['skunk_spotted'])},
# {'label': 'Spotted Skunk NMS 50%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_skunk_v0', 'weight_filepath' : 'spotted_skunk_v0', 'nms': True, 'nms_thresh': 0.50, 'species_set' : set(['skunk_spotted'])},
# {'label': 'Spotted Skunk NMS 60%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_skunk_v0', 'weight_filepath' : 'spotted_skunk_v0', 'nms': True, 'nms_thresh': 0.60, 'species_set' : set(['skunk_spotted'])},
# {'label': 'Spotted Skunk NMS 70%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_skunk_v0', 'weight_filepath' : 'spotted_skunk_v0', 'nms': True, 'nms_thresh': 0.70, 'species_set' : set(['skunk_spotted'])},
# {'label': 'Spotted Skunk NMS 80%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_skunk_v0', 'weight_filepath' : 'spotted_skunk_v0', 'nms': True, 'nms_thresh': 0.80, 'species_set' : set(['skunk_spotted'])},
# {'label': 'Spotted Skunk NMS 90%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_skunk_v0', 'weight_filepath' : 'spotted_skunk_v0', 'nms': True, 'nms_thresh': 0.90, 'species_set' : set(['skunk_spotted'])},
# {'label': 'Spotted Skunk NMS 100%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_skunk_v0', 'weight_filepath' : 'spotted_skunk_v0', 'nms': True, 'nms_thresh': 1.00, 'species_set' : set(['skunk_spotted'])},
# ],
# {},
# ),
# '!spotted_skunk_v0': (
# [
# {'label': 'Spotted Skunk NMS 0%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_skunk_v0', 'weight_filepath' : 'spotted_skunk_v0', 'nms': True, 'nms_thresh': 0.00, 'species_set' : set(['!skunk_spotted'])},
# {'label': 'Spotted Skunk NMS 10%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_skunk_v0', 'weight_filepath' : 'spotted_skunk_v0', 'nms': True, 'nms_thresh': 0.10, 'species_set' : set(['!skunk_spotted'])},
# {'label': 'Spotted Skunk NMS 20%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_skunk_v0', 'weight_filepath' : 'spotted_skunk_v0', 'nms': True, 'nms_thresh': 0.20, 'species_set' : set(['!skunk_spotted'])},
# {'label': 'Spotted Skunk NMS 30%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_skunk_v0', 'weight_filepath' : 'spotted_skunk_v0', 'nms': True, 'nms_thresh': 0.30, 'species_set' : set(['!skunk_spotted'])},
# {'label': 'Spotted Skunk NMS 40%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_skunk_v0', 'weight_filepath' : 'spotted_skunk_v0', 'nms': True, 'nms_thresh': 0.40, 'species_set' : set(['!skunk_spotted'])},
# {'label': 'Spotted Skunk NMS 50%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_skunk_v0', 'weight_filepath' : 'spotted_skunk_v0', 'nms': True, 'nms_thresh': 0.50, 'species_set' : set(['!skunk_spotted'])},
# {'label': 'Spotted Skunk NMS 60%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_skunk_v0', 'weight_filepath' : 'spotted_skunk_v0', 'nms': True, 'nms_thresh': 0.60, 'species_set' : set(['!skunk_spotted'])},
# {'label': 'Spotted Skunk NMS 70%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_skunk_v0', 'weight_filepath' : 'spotted_skunk_v0', 'nms': True, 'nms_thresh': 0.70, 'species_set' : set(['!skunk_spotted'])},
# {'label': 'Spotted Skunk NMS 80%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_skunk_v0', 'weight_filepath' : 'spotted_skunk_v0', 'nms': True, 'nms_thresh': 0.80, 'species_set' : set(['!skunk_spotted'])},
# {'label': 'Spotted Skunk NMS 90%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_skunk_v0', 'weight_filepath' : 'spotted_skunk_v0', 'nms': True, 'nms_thresh': 0.90, 'species_set' : set(['!skunk_spotted'])},
# {'label': 'Spotted Skunk NMS 100%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_skunk_v0', 'weight_filepath' : 'spotted_skunk_v0', 'nms': True, 'nms_thresh': 1.00, 'species_set' : set(['!skunk_spotted'])},
# ],
# {},
# ),
# 'nassau_grouper_v0': (
# [
# {'label': 'Nassau Grouper NMS 0%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'nassau_grouper_v0', 'weight_filepath' : 'nassau_grouper_v0', 'nms': True, 'nms_thresh': 0.00, 'species_set' : set(['grouper_nassau'])},
# {'label': 'Nassau Grouper NMS 10%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'nassau_grouper_v0', 'weight_filepath' : 'nassau_grouper_v0', 'nms': True, 'nms_thresh': 0.10, 'species_set' : set(['grouper_nassau'])},
# {'label': 'Nassau Grouper NMS 20%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'nassau_grouper_v0', 'weight_filepath' : 'nassau_grouper_v0', 'nms': True, 'nms_thresh': 0.20, 'species_set' : set(['grouper_nassau'])},
# {'label': 'Nassau Grouper NMS 30%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'nassau_grouper_v0', 'weight_filepath' : 'nassau_grouper_v0', 'nms': True, 'nms_thresh': 0.30, 'species_set' : set(['grouper_nassau'])},
# {'label': 'Nassau Grouper NMS 40%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'nassau_grouper_v0', 'weight_filepath' : 'nassau_grouper_v0', 'nms': True, 'nms_thresh': 0.40, 'species_set' : set(['grouper_nassau'])},
# {'label': 'Nassau Grouper NMS 50%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'nassau_grouper_v0', 'weight_filepath' : 'nassau_grouper_v0', 'nms': True, 'nms_thresh': 0.50, 'species_set' : set(['grouper_nassau'])},
# {'label': 'Nassau Grouper NMS 60%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'nassau_grouper_v0', 'weight_filepath' : 'nassau_grouper_v0', 'nms': True, 'nms_thresh': 0.60, 'species_set' : set(['grouper_nassau'])},
# {'label': 'Nassau Grouper NMS 70%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'nassau_grouper_v0', 'weight_filepath' : 'nassau_grouper_v0', 'nms': True, 'nms_thresh': 0.70, 'species_set' : set(['grouper_nassau'])},
# {'label': 'Nassau Grouper NMS 80%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'nassau_grouper_v0', 'weight_filepath' : 'nassau_grouper_v0', 'nms': True, 'nms_thresh': 0.80, 'species_set' : set(['grouper_nassau'])},
# {'label': 'Nassau Grouper NMS 90%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'nassau_grouper_v0', 'weight_filepath' : 'nassau_grouper_v0', 'nms': True, 'nms_thresh': 0.90, 'species_set' : set(['grouper_nassau'])},
# {'label': 'Nassau Grouper NMS 100%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'nassau_grouper_v0', 'weight_filepath' : 'nassau_grouper_v0', 'nms': True, 'nms_thresh': 1.00, 'species_set' : set(['grouper_nassau'])},
# ],
# {},
# ),
# '!nassau_grouper_v0': (
# [
# {'label': 'Nassau Grouper! NMS 0%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'nassau_grouper_v0', 'weight_filepath' : 'nassau_grouper_v0', 'nms': True, 'nms_thresh': 0.00, 'species_set' : set(['!grouper_nassau'])},
# {'label': 'Nassau Grouper! NMS 10%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'nassau_grouper_v0', 'weight_filepath' : 'nassau_grouper_v0', 'nms': True, 'nms_thresh': 0.10, 'species_set' : set(['!grouper_nassau'])},
# {'label': 'Nassau Grouper! NMS 20%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'nassau_grouper_v0', 'weight_filepath' : 'nassau_grouper_v0', 'nms': True, 'nms_thresh': 0.20, 'species_set' : set(['!grouper_nassau'])},
# {'label': 'Nassau Grouper! NMS 30%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'nassau_grouper_v0', 'weight_filepath' : 'nassau_grouper_v0', 'nms': True, 'nms_thresh': 0.30, 'species_set' : set(['!grouper_nassau'])},
# {'label': 'Nassau Grouper! NMS 40%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'nassau_grouper_v0', 'weight_filepath' : 'nassau_grouper_v0', 'nms': True, 'nms_thresh': 0.40, 'species_set' : set(['!grouper_nassau'])},
# {'label': 'Nassau Grouper! NMS 50%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'nassau_grouper_v0', 'weight_filepath' : 'nassau_grouper_v0', 'nms': True, 'nms_thresh': 0.50, 'species_set' : set(['!grouper_nassau'])},
# {'label': 'Nassau Grouper! NMS 60%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'nassau_grouper_v0', 'weight_filepath' : 'nassau_grouper_v0', 'nms': True, 'nms_thresh': 0.60, 'species_set' : set(['!grouper_nassau'])},
# {'label': 'Nassau Grouper! NMS 70%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'nassau_grouper_v0', 'weight_filepath' : 'nassau_grouper_v0', 'nms': True, 'nms_thresh': 0.70, 'species_set' : set(['!grouper_nassau'])},
# {'label': 'Nassau Grouper! NMS 80%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'nassau_grouper_v0', 'weight_filepath' : 'nassau_grouper_v0', 'nms': True, 'nms_thresh': 0.80, 'species_set' : set(['!grouper_nassau'])},
# {'label': 'Nassau Grouper! NMS 90%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'nassau_grouper_v0', 'weight_filepath' : 'nassau_grouper_v0', 'nms': True, 'nms_thresh': 0.90, 'species_set' : set(['!grouper_nassau'])},
# {'label': 'Nassau Grouper! NMS 100%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'nassau_grouper_v0', 'weight_filepath' : 'nassau_grouper_v0', 'nms': True, 'nms_thresh': 1.00, 'species_set' : set(['!grouper_nassau'])},
# ],
# {},
# ),
# 'spotted_dolphin_v0': (
# [
# {'label': 'Spotted DolphinNMS 0%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_dolphin_v0', 'weight_filepath' : 'spotted_dolphin_v0', 'nms': True, 'nms_thresh': 0.00, 'species_set' : set(['dolphin_spotted'])},
# {'label': 'Spotted DolphinNMS 10%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_dolphin_v0', 'weight_filepath' : 'spotted_dolphin_v0', 'nms': True, 'nms_thresh': 0.10, 'species_set' : set(['dolphin_spotted'])},
# {'label': 'Spotted DolphinNMS 20%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_dolphin_v0', 'weight_filepath' : 'spotted_dolphin_v0', 'nms': True, 'nms_thresh': 0.20, 'species_set' : set(['dolphin_spotted'])},
# {'label': 'Spotted DolphinNMS 30%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_dolphin_v0', 'weight_filepath' : 'spotted_dolphin_v0', 'nms': True, 'nms_thresh': 0.30, 'species_set' : set(['dolphin_spotted'])},
# {'label': 'Spotted DolphinNMS 40%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_dolphin_v0', 'weight_filepath' : 'spotted_dolphin_v0', 'nms': True, 'nms_thresh': 0.40, 'species_set' : set(['dolphin_spotted'])},
# {'label': 'Spotted DolphinNMS 50%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_dolphin_v0', 'weight_filepath' : 'spotted_dolphin_v0', 'nms': True, 'nms_thresh': 0.50, 'species_set' : set(['dolphin_spotted'])},
# {'label': 'Spotted DolphinNMS 60%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_dolphin_v0', 'weight_filepath' : 'spotted_dolphin_v0', 'nms': True, 'nms_thresh': 0.60, 'species_set' : set(['dolphin_spotted'])},
# {'label': 'Spotted DolphinNMS 70%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_dolphin_v0', 'weight_filepath' : 'spotted_dolphin_v0', 'nms': True, 'nms_thresh': 0.70, 'species_set' : set(['dolphin_spotted'])},
# {'label': 'Spotted DolphinNMS 80%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_dolphin_v0', 'weight_filepath' : 'spotted_dolphin_v0', 'nms': True, 'nms_thresh': 0.80, 'species_set' : set(['dolphin_spotted'])},
# {'label': 'Spotted DolphinNMS 90%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_dolphin_v0', 'weight_filepath' : 'spotted_dolphin_v0', 'nms': True, 'nms_thresh': 0.90, 'species_set' : set(['dolphin_spotted'])},
# {'label': 'Spotted DolphinNMS 100%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_dolphin_v0', 'weight_filepath' : 'spotted_dolphin_v0', 'nms': True, 'nms_thresh': 1.00, 'species_set' : set(['dolphin_spotted'])},
# ],
# {},
# ),
# '!spotted_dolphin_v0': (
# [
# {'label': 'Spotted Dolphin! NMS 0%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_dolphin_v0', 'weight_filepath' : 'spotted_dolphin_v0', 'nms': True, 'nms_thresh': 0.00, 'species_set' : set(['!dolphin_spotted'])},
# {'label': 'Spotted Dolphin! NMS 10%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_dolphin_v0', 'weight_filepath' : 'spotted_dolphin_v0', 'nms': True, 'nms_thresh': 0.10, 'species_set' : set(['!dolphin_spotted'])},
# {'label': 'Spotted Dolphin! NMS 20%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_dolphin_v0', 'weight_filepath' : 'spotted_dolphin_v0', 'nms': True, 'nms_thresh': 0.20, 'species_set' : set(['!dolphin_spotted'])},
# {'label': 'Spotted Dolphin! NMS 30%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_dolphin_v0', 'weight_filepath' : 'spotted_dolphin_v0', 'nms': True, 'nms_thresh': 0.30, 'species_set' : set(['!dolphin_spotted'])},
# {'label': 'Spotted Dolphin! NMS 40%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_dolphin_v0', 'weight_filepath' : 'spotted_dolphin_v0', 'nms': True, 'nms_thresh': 0.40, 'species_set' : set(['!dolphin_spotted'])},
# {'label': 'Spotted Dolphin! NMS 50%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_dolphin_v0', 'weight_filepath' : 'spotted_dolphin_v0', 'nms': True, 'nms_thresh': 0.50, 'species_set' : set(['!dolphin_spotted'])},
# {'label': 'Spotted Dolphin! NMS 60%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_dolphin_v0', 'weight_filepath' : 'spotted_dolphin_v0', 'nms': True, 'nms_thresh': 0.60, 'species_set' : set(['!dolphin_spotted'])},
# {'label': 'Spotted Dolphin! NMS 70%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_dolphin_v0', 'weight_filepath' : 'spotted_dolphin_v0', 'nms': True, 'nms_thresh': 0.70, 'species_set' : set(['!dolphin_spotted'])},
# {'label': 'Spotted Dolphin! NMS 80%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_dolphin_v0', 'weight_filepath' : 'spotted_dolphin_v0', 'nms': True, 'nms_thresh': 0.80, 'species_set' : set(['!dolphin_spotted'])},
# {'label': 'Spotted Dolphin! NMS 90%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_dolphin_v0', 'weight_filepath' : 'spotted_dolphin_v0', 'nms': True, 'nms_thresh': 0.90, 'species_set' : set(['!dolphin_spotted'])},
# {'label': 'Spotted Dolphin! NMS 100%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_dolphin_v0', 'weight_filepath' : 'spotted_dolphin_v0', 'nms': True, 'nms_thresh': 1.00, 'species_set' : set(['!dolphin_spotted'])},
# ],
# {},
# ),
'seadragon_weedy_v1': (
[
{'label': 'Weedy Body NMS 0%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.00, 'species_set' : set(['seadragon_leafy'])},
{'label': 'Weedy Body NMS 10%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.10, 'species_set' : set(['seadragon_leafy'])},
{'label': 'Weedy Body NMS 20%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.20, 'species_set' : set(['seadragon_leafy'])},
{'label': 'Weedy Body NMS 30%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.30, 'species_set' : set(['seadragon_leafy'])},
{'label': 'Weedy Body NMS 40%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.40, 'species_set' : set(['seadragon_leafy'])},
{'label': 'Weedy Body NMS 50%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.50, 'species_set' : set(['seadragon_leafy'])},
{'label': 'Weedy Body NMS 60%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.60, 'species_set' : set(['seadragon_leafy'])},
{'label': 'Weedy Body NMS 70%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.70, 'species_set' : set(['seadragon_leafy'])},
{'label': 'Weedy Body NMS 80%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.80, 'species_set' : set(['seadragon_leafy'])},
{'label': 'Weedy Body NMS 90%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.90, 'species_set' : set(['seadragon_leafy'])},
{'label': 'Weedy Body NMS 100%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 1.00, 'species_set' : set(['seadragon_leafy'])},
],
{},
),
'seadragon_leafy_v1': (
[
{'label': 'Leafy Body NMS 0%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.00, 'species_set' : set(['seadragon_weedy'])},
{'label': 'Leafy Body NMS 10%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.10, 'species_set' : set(['seadragon_weedy'])},
{'label': 'Leafy Body NMS 20%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.20, 'species_set' : set(['seadragon_weedy'])},
{'label': 'Leafy Body NMS 30%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.30, 'species_set' : set(['seadragon_weedy'])},
{'label': 'Leafy Body NMS 40%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.40, 'species_set' : set(['seadragon_weedy'])},
{'label': 'Leafy Body NMS 50%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.50, 'species_set' : set(['seadragon_weedy'])},
{'label': 'Leafy Body NMS 60%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.60, 'species_set' : set(['seadragon_weedy'])},
{'label': 'Leafy Body NMS 70%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.70, 'species_set' : set(['seadragon_weedy'])},
{'label': 'Leafy Body NMS 80%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.80, 'species_set' : set(['seadragon_weedy'])},
{'label': 'Leafy Body NMS 90%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.90, 'species_set' : set(['seadragon_weedy'])},
{'label': 'Leafy Body NMS 100%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 1.00, 'species_set' : set(['seadragon_weedy'])},
],
{},
),
'seadragon_weedy_head_v1': (
[
{'label': 'Weedy Head NMS 0%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.00, 'species_set' : set(['seadragon_leafy+head'])},
{'label': 'Weedy Head NMS 10%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.10, 'species_set' : set(['seadragon_leafy+head'])},
{'label': 'Weedy Head NMS 20%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.20, 'species_set' : set(['seadragon_leafy+head'])},
{'label': 'Weedy Head NMS 30%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.30, 'species_set' : set(['seadragon_leafy+head'])},
{'label': 'Weedy Head NMS 40%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.40, 'species_set' : set(['seadragon_leafy+head'])},
{'label': 'Weedy Head NMS 50%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.50, 'species_set' : set(['seadragon_leafy+head'])},
{'label': 'Weedy Head NMS 60%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.60, 'species_set' : set(['seadragon_leafy+head'])},
{'label': 'Weedy Head NMS 70%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.70, 'species_set' : set(['seadragon_leafy+head'])},
{'label': 'Weedy Head NMS 80%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.80, 'species_set' : set(['seadragon_leafy+head'])},
{'label': 'Weedy Head NMS 90%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.90, 'species_set' : set(['seadragon_leafy+head'])},
{'label': 'Weedy Head NMS 100%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 1.00, 'species_set' : set(['seadragon_leafy+head'])},
],
{},
),
'seadragon_leafy_head_v1': (
[
{'label': 'Leafy Head NMS 0%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.00, 'species_set' : set(['seadragon_weedy+head'])},
{'label': 'Leafy Head NMS 10%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.10, 'species_set' : set(['seadragon_weedy+head'])},
{'label': 'Leafy Head NMS 20%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.20, 'species_set' : set(['seadragon_weedy+head'])},
{'label': 'Leafy Head NMS 30%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.30, 'species_set' : set(['seadragon_weedy+head'])},
{'label': 'Leafy Head NMS 40%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.40, 'species_set' : set(['seadragon_weedy+head'])},
{'label': 'Leafy Head NMS 50%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.50, 'species_set' : set(['seadragon_weedy+head'])},
{'label': 'Leafy Head NMS 60%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.60, 'species_set' : set(['seadragon_weedy+head'])},
{'label': 'Leafy Head NMS 70%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.70, 'species_set' : set(['seadragon_weedy+head'])},
{'label': 'Leafy Head NMS 80%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.80, 'species_set' : set(['seadragon_weedy+head'])},
{'label': 'Leafy Head NMS 90%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.90, 'species_set' : set(['seadragon_weedy+head'])},
{'label': 'Leafy Head NMS 100%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 1.00, 'species_set' : set(['seadragon_weedy+head'])},
],
{},
),
}
for config_key in config_dict:
config_list, config = config_dict[config_key]
for key in kwargs:
config[key] = kwargs[key]
# Backwards compatibility hack
if test_gid_list is not None:
for config_ in config_list:
if 'test_gid_list' not in config_:
config_['test_gid_list'] = test_gid_list
ibs.localizer_precision_recall_algo_display(
config_list,
config_tag=config_key,
output_path=output_path,
**config
)
@register_ibs_method
def localizer_precision_recall_algo_display(ibs, config_list, config_tag='', min_overlap=0.5, figsize=(40, 9),
target_recall=0.8, BEST_INDEX=None, offset_color=0,
write_images=False, plot_point=True, output_path=None, **kwargs):
import matplotlib.pyplot as plt
import plottool as pt
if output_path is None:
output_path = abspath(expanduser(join('~', 'Desktop')))
color_list_ = []
for _ in range(offset_color):
color_list_ += [(0.2, 0.2, 0.2)]
color_list = pt.distinct_colors(len(config_list) - len(color_list_), randomize=False)
color_list = color_list_ + color_list
fig_ = plt.figure(figsize=figsize, dpi=400)
######################################################################################
axes_ = plt.subplot(141)
axes_.set_autoscalex_on(False)
axes_.set_autoscaley_on(False)
axes_.set_xlabel('Recall (Ground-Truth IOU >= %0.02f)' % (min_overlap, ))
axes_.set_ylabel('Precision')
axes_.set_xlim([0.0, 1.01])
axes_.set_ylim([0.0, 1.01])
ret_list = [
localizer_precision_recall_algo_plot(ibs, color=color, min_overlap=min_overlap,
plot_point=plot_point,
target_recall=target_recall, **config)
for color, config in zip(color_list, config_list)
]
area_list = [ ret[0] for ret in ret_list ]
tup2_list = [ ret[3] for ret in ret_list ]
best_index = None if BEST_INDEX is None else BEST_INDEX # Match formatting of below, this is a silly conditional
best_y = 0.0
best_index_ = None
valid_best_index = []
for index, tup2 in enumerate(tup2_list):
if tup2 is None:
continue
conf_list, x_list, y_list, length = tup2
y = y_list[0]
if best_y < y:
valid_best_index.append(index)
best_index_ = index
best_y = y
# If user defined best_index is invalid, don't use it
if best_index is None:
best_index = best_index_
else:
if best_index not in valid_best_index:
best_index = None
if best_index is not None:
best_conf_list, best_x_list, best_y_list, best_length = tup2_list[best_index]
color = 'xkcd:gold'
marker = 'D'
plt.plot(best_x_list, best_y_list, color=color, marker=marker)
plt.title('Precision-Recall Curves', y=1.19)
plt.legend(bbox_to_anchor=(0.0, 1.02, 1.0, .102), loc=3, ncol=2, mode="expand",
borderaxespad=0.0)
######################################################################################
axes_ = plt.subplot(142)
axes_.set_autoscalex_on(False)
axes_.set_autoscaley_on(False)
axes_.set_xlabel('IOU (Intersection / Union)')
axes_.set_ylabel('Recall')
axes_.set_xlim([0.0, 1.01])
axes_.set_ylim([0.0, 1.01])
ret_list = [
localizer_iou_recall_algo_plot(ibs, color=color_, plot_point=False, **config_)
for color_, config_ in zip(color_list, config_list)
]
# area_list = [ ret[0] for ret in ret_list ]
# tup2_list = [ ret[3] for ret in ret_list ]
# best_index = None if BEST_INDEX is None else BEST_INDEX # Match formatting of below, this is a silly conditional
# best_y = 0.0
# best_index_ = None
# valid_best_index = []
# for index, tup2 in enumerate(tup2_list):
# if tup2 is None:
# continue
# conf_list, x_list, y_list, length = tup2
# y = y_list[0]
# if best_y < y:
# valid_best_index.append(index)
# best_index_ = index
# best_y = y
# # If user defined best_index is invalid, don't use it
# if best_index is None:
# best_index = best_index_
# else:
# if best_index not in valid_best_index:
# best_index = None
# if best_index is not None:
# best_conf_list, best_x_list, best_y_list, best_length = tup2_list[best_index]
# color = 'xkcd:gold'
# marker = 'D'
# plt.plot(best_x_list, best_y_list, color=color, marker=marker)
plt.title('Recall-IOU Curves', y=1.19)
plt.legend(bbox_to_anchor=(0.0, 1.02, 1.0, .102), loc=3, ncol=2, mode="expand",
borderaxespad=0.0)
######################################################################################
# axes_ = plt.subplot(153)
# axes_.set_autoscalex_on(False)
# axes_.set_autoscaley_on(False)
# axes_.set_xlabel('IOU (Intersection / Union)')
# axes_.set_ylabel('Precision')
# axes_.set_xlim([0.0, 1.01])
# axes_.set_ylim([0.0, 1.01])
# ret_list = [
# localizer_iou_precision_algo_plot(ibs, color=color_, plot_point=False, **config_)
# for color_, config_ in zip(color_list, config_list)
# ]
# plt.title('Precision-IOU Curves', y=1.19)
# plt.legend(bbox_to_anchor=(0.0, 1.02, 1.0, .102), loc=3, ncol=2, mode="expand",
# borderaxespad=0.0)
######################################################################################
if best_index is not None:
axes_ = plt.subplot(144)
axes_.set_aspect(1)
gca_ = plt.gca()
gca_.grid(False)
target_conf = best_conf_list[0]
best_config = config_list[best_index]
best_label = config_list[best_index]['label']
best_area = area_list[best_index]
values = localizer_confusion_matrix_algo_plot(ibs, min_overlap=min_overlap,
fig_=fig_, axes_=axes_,
target_conf=target_conf,
**best_config)
best_conf, (correct_rate, _) = values
axes_.set_xlabel('Predicted (Correct = %0.02f%%)' % (correct_rate * 100.0, ))
axes_.set_ylabel('Ground-Truth')
args = (target_recall, best_label, best_area, best_conf, )
plt.title('Confusion Matrix for Recall >= %0.02f\n(Algo: %s, mAP = %0.02f, OP = %0.02f)' % args, y=1.26)
######################################################################################
axes_ = plt.subplot(143)
axes_.set_aspect(1)
gca_ = plt.gca()
gca_.grid(False)
best_index = np.argmax(area_list) if BEST_INDEX is None else BEST_INDEX
best_config = config_list[best_index]
best_label = config_list[best_index]['label']
best_area = area_list[best_index]
values = localizer_confusion_matrix_algo_plot(ibs, min_overlap=min_overlap,
fig_=fig_, axes_=axes_,
**best_config)
best_conf, (correct_rate, _) = values
axes_.set_xlabel('Predicted (Correct = %0.02f%%)' % (correct_rate * 100.0, ))
axes_.set_ylabel('Ground-Truth')
args = (best_label, best_area, best_conf, )
plt.title('Confusion Matrix\n(Algo: %s, mAP = %0.02f, OP = %0.02f)' % args, y=1.26)
######################################################################################
if len(config_tag) > 0:
config_tag = '%s-' % (config_tag, )
fig_filename = '%slocalizer-precision-recall-%0.2f.png' % (config_tag, min_overlap, )
fig_path = join(output_path, fig_filename)
plt.savefig(fig_path, bbox_inches='tight')
return fig_path
@register_ibs_method
def localizer_precision_recall_algo_display_animate(ibs, config_list, **kwargs):
for value in range(10):
min_overlap = value / 10.0
print('Processing: %r' % (min_overlap, ))
ibs.localizer_precision_recall_algo_display(config_list, min_overlap=min_overlap, **kwargs)
# def localizer_classification_tp_tn_fp_fn(gt_list, pred_list, conf, min_overlap,
# check_species=False,
# check_viewpoint=False, **kwargs):
# overlap = general_overlap(gt_list, pred_list)
# num_gt, num_pred = overlap.shape
# # Get confidences
# conf_list = [pred['confidence'] for pred in pred_list]
# pred_flag_list = [conf <= conf_ for conf_ in conf_list]
# if num_gt == 0:
# tp_list = [False] * len(pred_list)
# tn_list = [not pred_flag for pred_flag in pred_flag_list]
# fp_list = [ pred_flag for pred_flag in pred_flag_list]
# fn_list = [False] * len(pred_list)
# elif num_pred == 0:
# tp_list = []
# tn_list = []
# fp_list = []
# fn_list = []
# else:
# max_overlap = np.max(overlap, axis=0)
# gt_flag_list = min_overlap < max_overlap
# status_list = []
# for gt_flag, pred_flag in zip(gt_flag_list, pred_flag_list):
# if gt_flag and pred_flag:
# status_list.append('tp')
# elif gt_flag and not pred_flag:
# status_list.append('fn')
# elif not gt_flag and pred_flag:
# status_list.append('fp')
# elif not gt_flag and not pred_flag:
# status_list.append('tn')
# else:
# raise ValueError
# tp_list = [status == 'tp' for status in status_list]
# tn_list = [status == 'tn' for status in status_list]
# fp_list = [status == 'fp' for status in status_list]
# fn_list = [status == 'fn' for status in status_list]
# return tp_list, tn_list, fp_list, fn_list
# def localizer_classification_confusion_matrix_algo_plot(ibs, color, conf,
# label=None,
# min_overlap=0.25,
# write_images=False,
# **kwargs):
# print('Processing Confusion Matrix for: %r (Conf = %0.02f)' % (label, conf, ))
# test_gid_list = general_get_imageset_gids(ibs, 'TEST_SET', **kwargs)
# test_uuid_list = ibs.get_image_uuids(test_gid_list)
# print('\tGather Ground-Truth')
# gt_dict = general_parse_gt(ibs, test_gid_list=test_gid_list, **kwargs)
# print('\tGather Predictions')
# pred_dict = localizer_parse_pred(ibs, test_gid_list=test_gid_list, **kwargs)
# if write_images:
# output_folder = 'localizer-classification-confusion-matrix-%0.2f-%0.2f-images' % (min_overlap, conf, )
# output_path = abspath(expanduser(join('~', 'Desktop', output_folder)))
# ut.ensuredir(output_path)
# label_list = []
# prediction_list = []
# for index, (test_gid, test_uuid) in enumerate(zip(test_gid_list, test_uuid_list)):
# if test_uuid in pred_dict:
# gt_list = gt_dict[test_uuid]
# pred_list = pred_dict[test_uuid]
# values = localizer_classification_tp_tn_fp_fn(gt_list, pred_list, conf,
# min_overlap=min_overlap,
# **kwargs)
# tp_list, tn_list, fp_list, fn_list = values
# tp = tp_list.count(True)
# tn = tn_list.count(True)
# fp = fp_list.count(True)
# fn = fn_list.count(True)
# for _ in range(int(tp)):
# label_list.append('positive')
# prediction_list.append('positive')
# for _ in range(int(tn)):
# label_list.append('negative')
# prediction_list.append('negative')
# for _ in range(int(fp)):
# label_list.append('negative')
# prediction_list.append('positive')
# for _ in range(int(fn)):
# label_list.append('positive')
# prediction_list.append('negative')
# if write_images:
# test_image = ibs.get_images(test_gid)
# test_image = _resize(test_image, t_width=600, verbose=False)
# height_, width_, channels_ = test_image.shape
# for gt in gt_list:
# xtl = int(gt['xtl'] * width_)
# ytl = int(gt['ytl'] * height_)
# xbr = int(gt['xbr'] * width_)
# ybr = int(gt['ybr'] * height_)
# cv2.rectangle(test_image, (xtl, ytl), (xbr, ybr), (0, 0, 255))
# zipped = zip(pred_list, tp_list, tn_list, fp_list, fn_list)
# for pred, tp_, tn_, fp_, fn_ in zipped:
# if tp_:
# color = (0, 255, 0)
# elif fp_:
# continue
# # color = (255, 0, 0)
# elif fn_:
# color = (255, 0, 0)
# elif tn_:
# continue
# else:
# continue
# xtl = int(pred['xtl'] * width_)
# ytl = int(pred['ytl'] * height_)
# xbr = int(pred['xbr'] * width_)
# ybr = int(pred['ybr'] * height_)
# cv2.rectangle(test_image, (xtl, ytl), (xbr, ybr), color)
# status_str = 'success' if (fp + fn) == 0 else 'failure'
# status_val = tp - fp - fn
# args = (status_str, status_val, test_gid, tp, fp, fn, )
# output_filename = 'test_%s_%d_gid_%d_tp_%d_fp_%d_fn_%d.png' % args
# output_filepath = join(output_path, output_filename)
# cv2.imwrite(output_filepath, test_image)
# category_list = ['positive', 'negative']
# category_mapping = {
# 'positive': 0,
# 'negative': 1,
# }
# return general_confusion_matrix_algo(label_list, prediction_list, category_list,
# category_mapping, size=20, **kwargs)
# @register_ibs_method
# def localizer_classifications_confusion_matrix_algo_display(ibs, conf,
# min_overlap=0.25,
# figsize=(24, 7),
# write_images=False,
# target_recall=0.9,
# plot_point=True,
# masking=False,
# **kwargs):
# import matplotlib.pyplot as plt
# fig_ = plt.figure(figsize=figsize)
# config = {
# 'label' : 'WIC',
# 'algo' : '_COMBINED',
# 'species_set' : set(['zebra']),
# 'classify' : True,
# 'classifier_algo': 'svm',
# 'classifier_masking': masking,
# 'classifier_weight_filepath': '/home/jason/code/ibeis/models-bootstrap/classifier.svm.image.zebra.pkl',
# }
# axes_ = plt.subplot(111)
# axes_.set_aspect(1)
# gca_ = plt.gca()
# gca_.grid(False)
# correct_rate, _ = localizer_classification_confusion_matrix_algo_plot(ibs, None, conf,
# min_overlap=min_overlap,
# write_images=write_images,
# fig_=fig_, axes_=axes_,
# **config)
# axes_.set_xlabel('Predicted (Correct = %0.02f%%)' % (correct_rate * 100.0, ))
# axes_.set_ylabel('Ground-Truth')
# args = (min_overlap, conf, )
# plt.title('Confusion Matrix (IoU %0.02f, Conf %0.02f)' % args, y=1.13)
# # plt.show()
# args = (min_overlap, conf, )
# fig_filename = 'localizer-classification-confusion-matrix-%0.2f-%0.2f.png' % args
# fig_path = abspath(expanduser(join('~', 'Desktop', fig_filename)))
# plt.savefig(fig_path, bbox_inches='tight')
# @register_ibs_method
# def localizer_classifications_confusion_matrix_algo_display_animate(ibs, total=10, **kwargs):
# for index in range(0, total + 1):
# conf = index / total
# ibs.localizer_classifications_confusion_matrix_algo_display(conf, **kwargs)
def classifier_cameratrap_precision_recall_algo(ibs, positive_imageset_id, negative_imageset_id, **kwargs):
depc = ibs.depc_image
test_gid_set_ = set(general_get_imageset_gids(ibs, 'TEST_SET'))
test_gid_set_ = list(test_gid_set_)
positive_gid_set = set(ibs.get_imageset_gids(positive_imageset_id))
negative_gid_set = set(ibs.get_imageset_gids(negative_imageset_id))
test_gid_set = []
label_list = []
for gid in test_gid_set_:
if gid in positive_gid_set:
label = 'positive'
elif gid in negative_gid_set:
label = 'negative'
else:
# label = 'unknown'
continue
test_gid_set.append(gid)
label_list.append(label)
prediction_list = depc.get_property('classifier', test_gid_set, 'class', config=kwargs)
confidence_list = depc.get_property('classifier', test_gid_set, 'score', config=kwargs)
confidence_list = [
confidence if prediction == 'positive' else 1.0 - confidence
for prediction, confidence in zip(prediction_list, confidence_list)
]
return general_precision_recall_algo(ibs, label_list, confidence_list)
def classifier_cameratrap_precision_recall_algo_plot(ibs, **kwargs):
label = kwargs['label']
print('Processing Precision-Recall for: %r' % (label, ))
conf_list, pr_list, re_list, tpr_list, fpr_list = classifier_cameratrap_precision_recall_algo(ibs, **kwargs)
return general_area_best_conf(conf_list, re_list, pr_list, **kwargs)
def classifier_cameratrap_roc_algo_plot(ibs, **kwargs):
label = kwargs['label']
print('Processing ROC for: %r' % (label, ))
conf_list, pr_list, re_list, tpr_list, fpr_list = classifier_cameratrap_precision_recall_algo(ibs, **kwargs)
return general_area_best_conf(conf_list, fpr_list, tpr_list, interpolate=False,
target=(0.0, 1.0), **kwargs)
def classifier_cameratrap_confusion_matrix_algo_plot(ibs, label, color, conf, positive_imageset_id, negative_imageset_id, output_cases=False, **kwargs):
print('Processing Confusion Matrix for: %r (Conf = %0.02f)' % (label, conf, ))
depc = ibs.depc_image
test_gid_set_ = set(general_get_imageset_gids(ibs, 'TEST_SET'))
test_gid_set_ = list(test_gid_set_)
positive_gid_set = set(ibs.get_imageset_gids(positive_imageset_id))
negative_gid_set = set(ibs.get_imageset_gids(negative_imageset_id))
test_gid_set = []
label_list = []
for gid in test_gid_set_:
if gid in positive_gid_set:
label = 'positive'
elif gid in negative_gid_set:
label = 'negative'
else:
# label = 'unknown'
continue
test_gid_set.append(gid)
label_list.append(label)
prediction_list = depc.get_property('classifier', test_gid_set, 'class', config=kwargs)
confidence_list = depc.get_property('classifier', test_gid_set, 'score', config=kwargs)
confidence_list = [
confidence if prediction == 'positive' else 1.0 - confidence
for prediction, confidence in zip(prediction_list, confidence_list)
]
prediction_list = [
'positive' if confidence >= conf else 'negative'
for confidence in confidence_list
]
if output_cases:
output_path = 'cameratrap-confusion-incorrect'
output_path = abspath(expanduser(join('~', 'Desktop', output_path)))
positive_path = join(output_path, 'positive')
negative_path = join(output_path, 'negative')
ut.delete(output_path)
ut.ensuredir(output_path)
ut.ensuredir(positive_path)
ut.ensuredir(negative_path)
interpolation = cv2.INTER_LANCZOS4
warpkw = dict(interpolation=interpolation)
for gid, label, prediction in zip(test_gid_set, label_list, prediction_list):
if label == prediction:
continue
image = ibs.get_images(gid)
image = cv2.resize(image, (192, 192), **warpkw)
# Get path
image_path = positive_path if label == 'positive' else negative_path
image_filename = 'hardidx_%d_pred_%s_case_fail.jpg' % (gid, prediction, )
image_filepath = join(image_path, image_filename)
# Save path
cv2.imwrite(image_filepath, image)
category_list = ['positive', 'negative']
category_mapping = {
'positive': 0,
'negative': 1,
}
return general_confusion_matrix_algo(label_list, prediction_list, category_list,
category_mapping, **kwargs)
@register_ibs_method
def classifier_cameratrap_precision_recall_algo_display(ibs, positive_imageset_id, negative_imageset_id, config_list=None, figsize=(20, 20)):
import matplotlib.pyplot as plt
import plottool as pt
fig_ = plt.figure(figsize=figsize, dpi=400)
if config_list is None:
config_list = [
# {'label': 'Initial Model (5%) - IBEIS_CNN', 'classifier_algo': 'cnn', 'classifier_weight_filepath': 'ryan.ibeis_cnn.v1'},
# {'label': 'Initial Model (5%) - DenseNet', 'classifier_algo': 'densenet', 'classifier_weight_filepath': 'ryan_densenet_v1'},
# {'label': 'Initial Model (5%) - DenseNet 0', 'classifier_algo': 'densenet', 'classifier_weight_filepath': 'ryan_densenet_v1:0'},
# {'label': 'Initial Model (5%) - DenseNet 1', 'classifier_algo': 'densenet', 'classifier_weight_filepath': 'ryan_densenet_v1:1'},
# {'label': 'Initial Model (5%) - DenseNet 2', 'classifier_algo': 'densenet', 'classifier_weight_filepath': 'ryan_densenet_v1:2'},
{'label': 'Initial Model (10%) - DenseNet', 'classifier_algo': 'densenet', 'classifier_weight_filepath': 'ryan_densenet_v2'},
# {'label': 'Initial Model (10%) - DenseNet 0', 'classifier_algo': 'densenet', 'classifier_weight_filepath': 'ryan_densenet_v2:0'},
# {'label': 'Initial Model (10%) - DenseNet 1', 'classifier_algo': 'densenet', 'classifier_weight_filepath': 'ryan_densenet_v2:1'},
# {'label': 'Initial Model (10%) - DenseNet 2', 'classifier_algo': 'densenet', 'classifier_weight_filepath': 'ryan_densenet_v2:2'},
# {'label': 'Initial Model (0%)', 'classifier_algo': 'cnn', 'classifier_weight_filepath': 'megan2.1'},
# {'label': 'Retrained Model (1%)', 'classifier_algo': 'cnn', 'classifier_weight_filepath': 'megan2.2'},
# {'label': 'Retrained Model (2%)', 'classifier_algo': 'cnn', 'classifier_weight_filepath': 'megan2.3'},
# {'label': 'Retrained Model (3%)', 'classifier_algo': 'cnn', 'classifier_weight_filepath': 'megan2.4'},
# {'label': 'Retrained Model (4%)', 'classifier_algo': 'cnn', 'classifier_weight_filepath': 'megan2.5'},
# {'label': 'Retrained Model (5%)', 'classifier_algo': 'cnn', 'classifier_weight_filepath': 'megan2.6'},
# {'label': 'Initial Model (0%)', 'classifier_weight_filepath': 'megan1.1'},
# {'label': 'Retrained Model (1%)', 'classifier_weight_filepath': 'megan1.2'},
# {'label': 'Retrained Model (2%)', 'classifier_weight_filepath': 'megan1.3'},
# {'label': 'Retrained Model (3%)', 'classifier_weight_filepath': 'megan1.4'},
# {'label': 'Retrained Model (3.5%)', 'classifier_weight_filepath': 'megan1.5'},
# {'label': 'Retrained Model (5%)', 'classifier_weight_filepath': 'megan1.6'},
]
color_list = pt.distinct_colors(len(config_list), randomize=False)
axes_ = plt.subplot(221)
axes_.set_autoscalex_on(False)
axes_.set_autoscaley_on(False)
axes_.set_xlabel('Recall')
axes_.set_ylabel('Precision')
axes_.set_xlim([0.0, 1.01])
axes_.set_ylim([0.0, 1.01])
ret_list = [
classifier_cameratrap_precision_recall_algo_plot(ibs, color=color,
positive_imageset_id=positive_imageset_id,
negative_imageset_id=negative_imageset_id,
**config)
for color, config in zip(color_list, config_list)
]
area_list = [ ret[0] for ret in ret_list ]
conf_list = [ ret[1] for ret in ret_list ]
index = np.argmax(area_list)
# index = 0
best_label1 = config_list[index]['label']
best_config1 = config_list[index]
best_color1 = color_list[index]
best_area1 = area_list[index]
best_conf1 = conf_list[index]
plt.title('Precision-Recall Curve (Best: %s, AP = %0.02f)' % (best_label1, best_area1, ), y=1.10)
plt.legend(bbox_to_anchor=(0.0, 1.02, 1.0, .102), loc=3, ncol=2, mode="expand",
borderaxespad=0.0)
axes_ = plt.subplot(222)
axes_.set_autoscalex_on(False)
axes_.set_autoscaley_on(False)
axes_.set_xlabel('False-Positive Rate')
axes_.set_ylabel('True-Positive Rate')
axes_.set_xlim([0.0, 1.01])
axes_.set_ylim([0.0, 1.01])
ret_list = [
classifier_cameratrap_roc_algo_plot(ibs, color=color,
positive_imageset_id=positive_imageset_id,
negative_imageset_id=negative_imageset_id,
**config)
for color, config in zip(color_list, config_list)
]
area_list = [ ret[0] for ret in ret_list ]
conf_list = [ ret[1] for ret in ret_list ]
index = np.argmax(area_list)
# index = 0
best_label2 = config_list[index]['label']
best_config2 = config_list[index]
best_color2 = color_list[index]
best_area2 = area_list[index]
best_conf2 = conf_list[index]
plt.title('ROC Curve (Best: %s, AP = %0.02f)' % (best_label2, best_area2, ), y=1.10)
plt.legend(bbox_to_anchor=(0.0, 1.02, 1.0, .102), loc=3, ncol=2, mode="expand",
borderaxespad=0.0)
axes_ = plt.subplot(223)
axes_.set_aspect(1)
gca_ = plt.gca()
gca_.grid(False)
correct_rate, _ = classifier_cameratrap_confusion_matrix_algo_plot(ibs, color=best_color1,
conf=best_conf1, fig_=fig_, axes_=axes_,
positive_imageset_id=positive_imageset_id,
negative_imageset_id=negative_imageset_id,
output_cases=True, **best_config1)
axes_.set_xlabel('Predicted (Correct = %0.02f%%)' % (correct_rate * 100.0, ))
axes_.set_ylabel('Ground-Truth')
plt.title('P-R Confusion Matrix (OP = %0.02f)' % (best_conf1, ), y=1.12)
axes_ = plt.subplot(224)
axes_.set_aspect(1)
gca_ = plt.gca()
gca_.grid(False)
correct_rate, _ = classifier_cameratrap_confusion_matrix_algo_plot(ibs, color=best_color2,
conf=best_conf2, fig_=fig_, axes_=axes_,
positive_imageset_id=positive_imageset_id,
negative_imageset_id=negative_imageset_id,
**best_config2)
axes_.set_xlabel('Predicted (Correct = %0.02f%%)' % (correct_rate * 100.0, ))
axes_.set_ylabel('Ground-Truth')
plt.title('ROC Confusion Matrix (OP = %0.02f)' % (best_conf2, ), y=1.12)
fig_filename = 'classifier-cameratrap-precision-recall-roc.png'
fig_path = abspath(expanduser(join('~', 'Desktop', fig_filename)))
plt.savefig(fig_path, bbox_inches='tight')
# def classifier_binary_precision_recall_algo(ibs, category_set, **kwargs):
# depc = ibs.depc_image
# test_gid_set = set(general_get_imageset_gids(ibs, 'TEST_SET'))
# test_gid_set = list(test_gid_set)
# aids_list = ibs.get_image_aids(test_gid_set)
# species_set_list = [
# set(ibs.get_annot_species_texts(aid_list))
# for aid_list in aids_list
# ]
# label_list = [
# 'negative' if len(species_set & category_set) == 0 else 'positive'
# for species_set in species_set_list
# ]
# prediction_list = depc.get_property('classifier', test_gid_set, 'class', config=kwargs)
# confidence_list = depc.get_property('classifier', test_gid_set, 'score', config=kwargs)
# confidence_list = [
# confidence if prediction == 'positive' else 1.0 - confidence
# for prediction, confidence in zip(prediction_list, confidence_list)
# ]
# return general_precision_recall_algo(ibs, label_list, confidence_list)
# def classifier_binary_precision_recall_algo_plot(ibs, **kwargs):
# label = kwargs['label']
# print('Processing Precision-Recall for: %r' % (label, ))
# conf_list, pr_list, re_list, tpr_list, fpr_list = classifier_binary_precision_recall_algo(ibs, **kwargs)
# return general_area_best_conf(conf_list, re_list, pr_list, **kwargs)
# def classifier_binary_roc_algo_plot(ibs, **kwargs):
# label = kwargs['label']
# print('Processing ROC for: %r' % (label, ))
# conf_list, pr_list, re_list, tpr_list, fpr_list = classifier_binary_precision_recall_algo(ibs, **kwargs)
# return general_area_best_conf(conf_list, fpr_list, tpr_list, interpolate=False,
# target=(0.0, 1.0), **kwargs)
# def classifier_binary_confusion_matrix_algo_plot(ibs, label, color, conf, category_set, **kwargs):
# print('Processing Confusion Matrix for: %r (Conf = %0.02f)' % (label, conf, ))
# depc = ibs.depc_image
# test_gid_set = set(general_get_imageset_gids(ibs, 'TEST_SET'))
# test_gid_set = list(test_gid_set)
# aids_list = ibs.get_image_aids(test_gid_set)
# species_set_list = [
# set(ibs.get_annot_species_texts(aid_list))
# for aid_list in aids_list
# ]
# label_list = [
# 'negative' if len(species_set & category_set) == 0 else 'positive'
# for species_set in species_set_list
# ]
# prediction_list = depc.get_property('classifier', test_gid_set, 'class', config=kwargs)
# confidence_list = depc.get_property('classifier', test_gid_set, 'score', config=kwargs)
# confidence_list = [
# confidence if prediction == 'positive' else 1.0 - confidence
# for prediction, confidence in zip(prediction_list, confidence_list)
# ]
# prediction_list = [
# 'positive' if confidence >= conf else 'negative'
# for confidence in confidence_list
# ]
# category_list = ['positive', 'negative']
# category_mapping = {
# 'positive': 0,
# 'negative': 1,
# }
# return general_confusion_matrix_algo(label_list, prediction_list, category_list,
# category_mapping, **kwargs)
# @register_ibs_method
# def classifier_binary_precision_recall_algo_display(ibs, figsize=(16, 16), **kwargs):
# import matplotlib.pyplot as plt
# fig_ = plt.figure(figsize=figsize)
# # label = 'V1'
# # species_list = ['zebra']
# # kwargs['classifier_weight_filepath'] = 'coco_zebra'
# label = 'V3'
# species_list = ['zebra_plains', 'zebra_grevys']
# kwargs['classifier_weight_filepath'] = 'v3_zebra'
# category_set = set(species_list)
# axes_ = plt.subplot(221)
# axes_.set_autoscalex_on(False)
# axes_.set_autoscaley_on(False)
# axes_.set_xlabel('Recall')
# axes_.set_ylabel('Precision')
# axes_.set_xlim([0.0, 1.01])
# axes_.set_ylim([0.0, 1.01])
# area, best_conf1, _ = classifier_binary_precision_recall_algo_plot(ibs, label=label, color='r', category_set=category_set, **kwargs)
# plt.title('Precision-Recall Curve (AP = %0.02f)' % (area, ), y=1.10)
# plt.legend(bbox_to_anchor=(0.0, 1.02, 1.0, .102), loc=3, ncol=2, mode="expand",
# borderaxespad=0.0)
# axes_ = plt.subplot(222)
# axes_.set_autoscalex_on(False)
# axes_.set_autoscaley_on(False)
# axes_.set_xlabel('False-Positive Rate')
# axes_.set_ylabel('True-Positive Rate')
# axes_.set_xlim([0.0, 1.01])
# axes_.set_ylim([0.0, 1.01])
# area, best_conf2, _ = classifier_binary_roc_algo_plot(ibs, label=label, color='r', category_set=category_set, **kwargs)
# plt.title('ROC Curve (AP = %0.02f)' % (area, ), y=1.10)
# plt.legend(bbox_to_anchor=(0.0, 1.02, 1.0, .102), loc=3, ncol=2, mode="expand",
# borderaxespad=0.0)
# axes_ = plt.subplot(223)
# axes_.set_aspect(1)
# gca_ = plt.gca()
# gca_.grid(False)
# correct_rate, _ = classifier_binary_confusion_matrix_algo_plot(ibs, label, 'r', conf=best_conf1, fig_=fig_, axes_=axes_, category_set=category_set, **kwargs)
# axes_.set_xlabel('Predicted (Correct = %0.02f%%)' % (correct_rate * 100.0, ))
# axes_.set_ylabel('Ground-Truth')
# plt.title('P-R Confusion Matrix (OP = %0.02f)' % (best_conf1, ), y=1.12)
# axes_ = plt.subplot(224)
# axes_.set_aspect(1)
# gca_ = plt.gca()
# gca_.grid(False)
# correct_rate, _ = classifier_binary_confusion_matrix_algo_plot(ibs, label, 'r', conf=best_conf2, fig_=fig_, axes_=axes_, category_set=category_set, **kwargs)
# axes_.set_xlabel('Predicted (Correct = %0.02f%%)' % (correct_rate * 100.0, ))
# axes_.set_ylabel('Ground-Truth')
# plt.title('ROC Confusion Matrix (OP = %0.02f)' % (best_conf2, ), y=1.12)
# fig_filename = 'classifier-precision-recall-roc.png'
# fig_path = abspath(expanduser(join('~', 'Desktop', fig_filename)))
# plt.savefig(fig_path, bbox_inches='tight')
def classifier2_precision_recall_algo(ibs, category, species_mapping={},
output_path=None, test_gid_list=None,
test_label_list=None, **kwargs):
depc = ibs.depc_image
if test_gid_list is None:
test_gid_set = set(general_get_imageset_gids(ibs, 'TEST_SET'))
test_gid_list = list(test_gid_set)
if test_label_list is None:
aids_list = ibs.get_image_aids(test_gid_list)
species_list_list = list(map(ibs.get_annot_species_texts, aids_list))
species_set_list = []
for species_list in species_list_list:
species_set = set([])
for species in species_list:
species = species_mapping.get(species, species)
species_set.add(species)
species_set_list.append(species_set)
else:
species_set_list = [
set([label])
for label in test_label_list
]
label_list = [
'positive' if category in species_set_ else 'negative'
for species_set_ in species_set_list
]
confidence_dict_list = depc.get_property('classifier_two', test_gid_list, 'scores', config=kwargs)
confidence_list = [
confidence_dict[category]
for confidence_dict in confidence_dict_list
]
if output_path is not None:
ut.ensuredir(output_path)
config_ = {
'draw_annots' : False,
'thumbsize' : (192, 192),
}
thumbnail_list = depc.get_property('thumbnails', test_gid_list, 'img', config=config_)
zipped = zip(test_gid_list, thumbnail_list, species_set_list, confidence_dict_list)
for index, (test_gid, thumbnail, species_set, confidence_dict) in enumerate(zipped):
print(index)
x = ';'.join(species_set)
y = []
for key in confidence_dict:
y.append('%s-%0.04f' % (key, confidence_dict[key], ))
y = ';'.join(y)
output_filename = 'image-index-%s-gid-%s-gt-%s-pred-%s.png' % (index, test_gid, x, y)
output_filepath = join(output_path, output_filename)
cv2.imwrite(output_filepath, thumbnail)
kwargs.pop('category', None)
return general_precision_recall_algo(ibs, label_list, confidence_list)
def classifier2_precision_recall_algo_plot(ibs, **kwargs):
label = kwargs['label']
print('Processing Precision-Recall for: %r' % (label, ))
conf_list, pr_list, re_list, tpr_list, fpr_list = classifier2_precision_recall_algo(ibs, **kwargs)
return general_area_best_conf(conf_list, re_list, pr_list, **kwargs)
def classifier2_roc_algo_plot(ibs, **kwargs):
label = kwargs['label']
print('Processing ROC for: %r' % (label, ))
conf_list, pr_list, re_list, tpr_list, fpr_list = classifier2_precision_recall_algo(ibs, **kwargs)
return general_area_best_conf(conf_list, fpr_list, tpr_list, interpolate=False,
target=(0.0, 1.0), **kwargs)
@register_ibs_method
def classifier2_precision_recall_algo_display(ibs, species_list=None,
species_mapping={},
nice_mapping={},
test_gid_list=None,
test_label_list=None,
figsize=(20, 9), **kwargs):
import matplotlib.pyplot as plt
import plottool as pt
depc = ibs.depc_image
fig_ = plt.figure(figsize=figsize, dpi=400) # NOQA
# kwargs['classifier_two_weight_filepath'] = 'v3'
# kwargs['classifier_two_weight_filepath'] = 'candidacy'
# kwargs['classifier_two_weight_filepath'] = 'ggr2'
is_labeled = test_label_list is not None
kwargs['classifier_two_algo'] = 'densenet'
kwargs['classifier_two_weight_filepath'] = 'flukebook_v1'
test_gid_set = set(general_get_imageset_gids(ibs, 'TEST_SET'))
test_gid_list_ = list(test_gid_set) if test_gid_list is None else test_gid_list
test_label_list_ = test_label_list if is_labeled else [None] * len(test_gid_list_)
zipped = list(zip(test_gid_list_, test_label_list_))
test_gid_list_ = []
test_label_list_ = []
for test_gid_, test_label_ in zipped:
if test_gid_ in test_gid_set:
test_gid_list_.append(test_gid_)
test_label_list_.append(test_label_)
test_gid_list = test_gid_list_
test_label_list = test_label_list_ if is_labeled else None
# depc.delete_property('classifier_two', test_gid_list, config=kwargs)
if species_list is None:
test_gid = test_gid_list[0]
confidence_dict = depc.get_property('classifier_two', test_gid, 'scores', config=kwargs)
species_list = confidence_dict.keys()
category_set = sorted(species_list)
config_list = []
for category in category_set:
category_nice = nice_mapping.get(category, category)
config_dict = {
'label': category_nice,
'category': category,
}
config_dict.update(kwargs)
config_list.append(config_dict)
color_list_ = []
color_list = pt.distinct_colors(len(config_list) - len(color_list_), randomize=False)
color_list = color_list_ + color_list
axes_ = plt.subplot(121)
axes_.set_autoscalex_on(False)
axes_.set_autoscaley_on(False)
axes_.set_xlabel('Recall')
axes_.set_ylabel('Precision')
axes_.set_xlim([0.0, 1.01])
axes_.set_ylim([0.0, 1.01])
for color, config in zip(color_list, config_list):
classifier2_precision_recall_algo_plot(ibs, color=color,
test_gid_list=test_gid_list,
test_label_list=test_label_list,
species_mapping=species_mapping,
**config)
plt.title('Precision-Recall Curves', y=1.19)
plt.legend(bbox_to_anchor=(0.0, 1.02, 1.0, .102), loc=3, ncol=2, mode="expand",
borderaxespad=0.0)
axes_ = plt.subplot(122)
axes_.set_autoscalex_on(False)
axes_.set_autoscaley_on(False)
axes_.set_xlabel('False-Positive Rate')
axes_.set_ylabel('True-Positive Rate')
axes_.set_xlim([0.0, 1.01])
axes_.set_ylim([0.0, 1.01])
op_dict = {}
for color, config in zip(color_list, config_list):
values = classifier2_roc_algo_plot(ibs, color=color,
test_gid_list=test_gid_list,
test_label_list=test_label_list,
species_mapping=species_mapping,
**config)
ap, best_conf, tup1, tup2 = values
op_dict[config['category']] = best_conf
plt.title('ROC Curves', y=1.19)
plt.legend(bbox_to_anchor=(0.0, 1.02, 1.0, .102), loc=3, ncol=2, mode="expand",
borderaxespad=0.0)
if is_labeled:
species_set_list = [
set([label])
for label in test_label_list
]
else:
aids_list = ibs.get_image_aids(test_gid_list)
species_list_list = list(map(ibs.get_annot_species_texts, aids_list))
species_set_list = []
for species_list in species_list_list:
species_set = set([])
for species in species_list:
species = species_mapping.get(species, species)
species_set.add(species)
species_set_list.append(species_set)
confidence_dict_list = depc.get_property('classifier_two', test_gid_list, 'scores', config=kwargs)
correct = 0
for test_gid, confidence_dict, species_set in zip(test_gid_list, confidence_dict_list, species_set_list):
species_set_ = set([])
for key in confidence_dict:
if op_dict[key] <= confidence_dict[key]:
species_set_.add(key)
if len(species_set ^ species_set_) == 0:
correct += 1
else:
print(test_gid, confidence_dict, species_set)
print('Accuracy: %0.04f' % (100.0 * correct / len(test_gid_list)))
print('\t using op_dict = %r' % (op_dict, ))
fig_filename = 'classifier2-precision-recall-roc.png'
fig_path = abspath(expanduser(join('~', 'Desktop', fig_filename)))
plt.savefig(fig_path, bbox_inches='tight')
def labeler_tp_tn_fp_fn(ibs, category_list, species_mapping={}, viewpoint_mapping={},
samples=SAMPLES, test_gid_set=None, **kwargs):
def errors(zipped, conf, category):
tp, tn, fp, fn = 0.0, 0.0, 0.0, 0.0
for index, (label, confidence) in enumerate(zipped):
if label == category:
if conf <= confidence:
tp += 1
else:
fn += 1
else:
if conf <= confidence:
fp += 1
else:
tn += 1
return tp, tn, fp, fn
depc = ibs.depc_annot
if test_gid_set is None:
test_gid_set = set(general_get_imageset_gids(ibs, 'TEST_SET'))
test_gid_set = list(test_gid_set)
aids_list = ibs.get_image_aids(test_gid_set)
aid_list = ut.flatten(aids_list)
# Get annot species and viewpoints
species_list = ibs.get_annot_species_texts(aid_list)
viewpoint_list = ibs.get_annot_viewpoints(aid_list)
# Filter aids with species of interest and undefined viewpoints
species_list = [
species_mapping.get(species, species)
for species in species_list
]
viewpoint_list = [
viewpoint_mapping.get(species, {}).get(viewpoint, viewpoint)
for species, viewpoint in zip(species_list, viewpoint_list)
]
flag_list = [
species in category_list and viewpoint is not None
for species, viewpoint in zip(species_list, viewpoint_list)
]
if False in flag_list:
aid_list = ut.compress(aid_list, flag_list)
species_list = ut.compress(species_list, flag_list)
viewpoint_list = ut.compress(viewpoint_list, flag_list)
# Make ground-truth
label_list = [
'%s:%s' % (species, viewpoint_, )
for species, viewpoint_ in zip(species_list, viewpoint_list)
]
# Get predictions
# depc.delete_property('labeler', aid_list, config=kwargs)
probability_dict_list = depc.get_property('labeler', aid_list, 'probs', config=kwargs)
value1_list = set(label_list)
value2_list = set(probability_dict_list[0].keys())
assert len(value1_list - value2_list) == 0
assert len(value2_list - value1_list) == 0
conf_list = [ _ / float(samples) for _ in range(0, int(samples) + 1) ]
label_dict = {}
for key in value1_list:
print('\t%r' % (key, ))
conf_dict = {}
confidence_list = [
probability_dict[key]
for probability_dict in probability_dict_list
]
zipped = list(zip(label_list, confidence_list))
for conf in conf_list:
conf_dict[conf] = errors(zipped, conf, key)
label_dict[key] = conf_dict
return label_dict
def labeler_precision_recall_algo(ibs, category_list, label_dict, **kwargs):
if category_list is None:
category_list_ = label_dict.keys()
else:
category_list_ = []
for category in category_list:
for key in label_dict:
if category in key or category is None:
category_list_.append(key)
global_conf_dict = {}
for category in category_list_:
conf_dict = label_dict[category]
for conf in conf_dict:
new_list = conf_dict[conf]
if conf not in global_conf_dict:
global_conf_dict[conf] = new_list
else:
cur_list = global_conf_dict[conf]
zipped_ = zip(cur_list, new_list)
global_conf_dict[conf] = [cur + new for cur, new in zipped_]
conf_list_ = [-1.0, -1.0]
pr_list = [1.0, 0.0]
re_list = [0.0, 1.0]
tpr_list = [0.0, 1.0]
fpr_list = [0.0, 1.0]
# conf_list_ = []
# pr_list = []
# re_list = []
# tpr_list = []
# fpr_list = []
for conf in sorted(global_conf_dict.keys(), reverse=True):
error_list = global_conf_dict[conf]
tp, tn, fp, fn = error_list
try:
pr = tp / (tp + fp)
re = tp / (tp + fn)
tpr = tp / (tp + fn)
fpr = fp / (fp + tn)
conf_list_.append(conf)
pr_list.append(pr)
re_list.append(re)
tpr_list.append(tpr)
fpr_list.append(fpr)
except ZeroDivisionError:
print('Zero division error (%r) - tp: %r tn: %r fp: %r fn: %r' % (conf, tp, tn, fp, fn, ))
return conf_list_, pr_list, re_list, tpr_list, fpr_list
def labeler_precision_recall_algo_plot(ibs, **kwargs):
label = kwargs['label']
category_list = kwargs['category_list']
print('Processing Precision-Recall for: %r (category_list = %r)' % (label, category_list, ))
conf_list, pr_list, re_list, tpr_list, fpr_list = labeler_precision_recall_algo(ibs, **kwargs)
return general_area_best_conf(conf_list, re_list, pr_list, **kwargs)
def labeler_roc_algo_plot(ibs, **kwargs):
label = kwargs['label']
category_list = kwargs['category_list']
print('Processing ROC for: %r (category_list = %r)' % (label, category_list, ))
conf_list, pr_list, re_list, tpr_list, fpr_list = labeler_precision_recall_algo(ibs, **kwargs)
return general_area_best_conf(conf_list, fpr_list, tpr_list, interpolate=False,
target=(0.0, 1.0), **kwargs)
def labeler_confusion_matrix_algo_plot(ibs, category_list, species_mapping={},
viewpoint_mapping={}, category_mapping=None,
test_gid_set=None, **kwargs):
print('Processing Confusion Matrix')
depc = ibs.depc_annot
if test_gid_set is None:
test_gid_set = set(general_get_imageset_gids(ibs, 'TEST_SET'))
test_gid_set = list(test_gid_set)
aids_list = ibs.get_image_aids(test_gid_set)
aid_list = ut.flatten(aids_list)
species_list = ibs.get_annot_species_texts(aid_list)
viewpoint_list = ibs.get_annot_viewpoints(aid_list)
label_list = [
'%s:%s' % (
species_mapping.get(species, species),
viewpoint_mapping.get(species, {}).get(viewpoint, viewpoint),
)
for species, viewpoint in zip(species_list, viewpoint_list)
]
temp_list = [
(aid, label)
for aid, label in zip(aid_list, label_list)
if label in category_list
]
aid_list = [_[0] for _ in temp_list]
label_list = [_[1] for _ in temp_list]
conf_list = depc.get_property('labeler', aid_list, 'score', config=kwargs)
species_list = depc.get_property('labeler', aid_list, 'species', config=kwargs)
viewpoint_list = depc.get_property('labeler', aid_list, 'viewpoint', config=kwargs)
prediction_list = [
'%s:%s' % (species, viewpoint, )
for species, viewpoint in zip(species_list, viewpoint_list)
]
category_list = list(map(simple_code, category_list))
label_list = list(map(simple_code, label_list))
prediction_list = list(map(simple_code, prediction_list))
if category_mapping is None:
category_mapping = { key: index for index, key in enumerate(category_list) }
category_mapping = {
simple_code(key): category_mapping[key]
for key in category_mapping
}
return general_confusion_matrix_algo(label_list, prediction_list, category_list,
category_mapping, conf_list=conf_list,
size=8, **kwargs)
@register_ibs_method
def labeler_precision_recall_algo_display(ibs, category_list=None, species_mapping={}, viewpoint_mapping={},
category_mapping=None, fuzzy_dict=None,
figsize=(30, 9), test_gid_set=None,
use_axis_aligned_chips=False,
labeler_weight_filepath=None,
config_list=None, **kwargs):
import matplotlib.pyplot as plt
import plottool as pt
if category_list is None:
if test_gid_set is None:
test_gid_set = set(general_get_imageset_gids(ibs, 'TEST_SET'))
test_gid_set = list(test_gid_set)
aids_list = ibs.get_image_aids(test_gid_set)
aid_list = ut.flatten(aids_list)
species_list = ibs.get_annot_species_texts(aid_list)
species_list = [
species_mapping.get(species, species)
for species in species_list
]
category_list = sorted(list(set(species_list)))
print('Compiling raw numbers...')
kwargs['labeler_algo'] = 'densenet'
if labeler_weight_filepath is None:
# kwargs['labeler_weight_filepath'] = 'zebra_v1'
# kwargs['labeler_weight_filepath'] = 'seaturtle'
# kwargs['labeler_weight_filepath'] = 'giraffe_v1'
# kwargs['labeler_weight_filepath'] = 'lynx_v3'
# kwargs['labeler_weight_filepath'] = 'seaturtle_v3'
# kwargs['labeler_weight_filepath'] = 'jaguar_v3'
# kwargs['labeler_weight_filepath'] = 'hendrik_dorsal_v2'
# kwargs['labeler_weight_filepath'] = 'spotted_skunk_v0'
# kwargs['labeler_weight_filepath'] = 'nassau_grouper_v0'
# kwargs['labeler_weight_filepath'] = 'spotted_dolphin_v0'
# kwargs['labeler_weight_filepath'] = 'seadragon_v1'
kwargs['labeler_weight_filepath'] = 'seadragon_v2'
else:
kwargs['labeler_weight_filepath'] = labeler_weight_filepath
kwargs['labeler_axis_aligned'] = use_axis_aligned_chips
label_dict = labeler_tp_tn_fp_fn(ibs, category_list, species_mapping=species_mapping, viewpoint_mapping=viewpoint_mapping,
test_gid_set=test_gid_set, **kwargs)
if config_list is None:
config_list = [
# {'label': 'Giraffe', 'category_list': None},
# {'label': 'Masai Giraffe', 'category_list': ['giraffe_masai']},
# {'label': 'Reticulated Giraffe', 'category_list': ['giraffe_reticulated']},
# {'label': 'Lynx', 'category_list': ['lynx_pardinus']},
# {'label': 'Sea Turtle', 'category_list': ['turtle_sea']},
# {'label': 'Sea Turtle Head', 'category_list': ['turtle_sea+head']},
# {'label': 'Manta', 'category_list': ['manta_ray_giant']},
# {'label': 'Jaguar', 'category_list': ['jaguar']},
# {'label': 'Dorsal Fin', 'category_list': ['dolphin_bottlenose_fin']},
# {'label': 'Reticulated Giraffe', 'category_list': ['giraffe_reticulated']},
# {'label': 'Sea Turtle', 'category_list': ['turtle_sea']},
# {'label': 'Whale Fluke', 'category_list': ['whale_fluke']},
# {'label': 'Grevy\'s Zebra', 'category_list': ['zebra_grevys']},
# {'label': 'Plains Zebra', 'category_list': ['zebra_plains']},
# {'label': 'Spotted Skunk', 'category_list': ['skunk_spotted']},
# {'label': 'Nassau Grouper', 'category_list': ['grouper_nassau']},
# {'label': 'Spotted Dolphin', 'category_list': ['dolphin_spotted']},
# {'label': 'Spotted Dolphin', 'category_list': ['dolphin_spotted']},
{'label': 'Weedy SD ', 'category_list': ['seadragon_weedy']},
{'label': 'Weedy Head', 'category_list': ['seadragon_weedy+head']},
{'label': 'Leafy SD ', 'category_list': ['seadragon_leafy']},
{'label': 'Leafy Head', 'category_list': ['seadragon_leafy+head']},
]
color_list = [(0.0, 0.0, 0.0)]
color_list += pt.distinct_colors(len(config_list) - len(color_list), randomize=False)
fig_ = plt.figure(figsize=figsize, dpi=400) # NOQA
axes_ = plt.subplot(131)
axes_.set_autoscalex_on(False)
axes_.set_autoscaley_on(False)
axes_.set_xlabel('Recall')
axes_.set_ylabel('Precision')
axes_.set_xlim([0.0, 1.01])
axes_.set_ylim([0.0, 1.01])
area_list = []
for color, config in zip(color_list, config_list):
ret = labeler_precision_recall_algo_plot(ibs, label_dict=label_dict,
color=color, **config)
area = ret[0]
area_list.append(area)
plt.title('Precision-Recall Curve', y=1.19)
plt.legend(bbox_to_anchor=(0.0, 1.02, 1.0, .102), loc=3, ncol=2, mode="expand",
borderaxespad=0.0)
axes_ = plt.subplot(132)
axes_.set_autoscalex_on(False)
axes_.set_autoscaley_on(False)
axes_.set_xlabel('False-Positive Rate')
axes_.set_ylabel('True-Positive Rate')
axes_.set_xlim([0.0, 1.01])
axes_.set_ylim([0.0, 1.01])
for color, config in zip(color_list, config_list):
labeler_roc_algo_plot(ibs, label_dict=label_dict,
color=color, **config)
plt.title('ROC Curve', y=1.19)
plt.legend(bbox_to_anchor=(0.0, 1.02, 1.0, .102), loc=3, ncol=2, mode="expand",
borderaxespad=0.0)
key_list = sorted(label_dict.keys())
fuzzy = fuzzy_dict is not None
if not fuzzy:
fuzzy_dict = {}
for index1, label1 in enumerate(key_list):
if label1 == 'ignore':
fuzzy_list = []
else:
species, viewpoint = label1.strip().split(':')
fuzzy_list = []
for index2, label2 in enumerate(key_list):
if species in label2:
fuzzy_list.append(index2)
fuzzy_dict[index1] = set(fuzzy_list)
axes_ = plt.subplot(133)
axes_.set_aspect(1)
gca_ = plt.gca()
gca_.grid(False)
correct_rate, fuzzy_rate = labeler_confusion_matrix_algo_plot(
ibs,
key_list,
species_mapping=species_mapping,
viewpoint_mapping=viewpoint_mapping,
category_mapping=category_mapping,
fig_=fig_,
axes_=axes_,
fuzzy_dict=fuzzy_dict,
test_gid_set=test_gid_set,
**kwargs
)
if fuzzy:
axes_.set_xlabel('Predicted (Correct = %0.02f%%, Fuzzy = %0.02f%%)' % (correct_rate * 100.0, fuzzy_rate * 100.0, ))
else:
axes_.set_xlabel('Predicted (Correct = %0.02f%%, Species = %0.02f%%)' % (correct_rate * 100.0, fuzzy_rate * 100.0, ))
axes_.set_ylabel('Ground-Truth')
# area_list_ = area_list[1:]
area_list_ = area_list
mAP = sum(area_list_) / len(area_list_)
args = (mAP * 100.0, )
plt.title('Confusion Matrix\nmAP = %0.02f' % args, y=1.19)
fig_filename = 'labeler-precision-recall-roc.png'
fig_path = abspath(expanduser(join('~', 'Desktop', fig_filename)))
plt.savefig(fig_path, bbox_inches='tight')
def canonical_precision_recall_algo(ibs, species, **kwargs):
depc = ibs.depc_annot
test_gid_set_ = set(general_get_imageset_gids(ibs, 'TEST_SET'))
test_gid_list_ = list(test_gid_set_)
test_aid_list_ = ut.flatten(ibs.get_image_aids(test_gid_list_))
test_aid_list_ = ibs.filter_annotation_set(test_aid_list_, species=species)
test_flag_list_ = ibs.get_annot_canonical(test_aid_list_)
test_aid_set = []
label_list = []
for aid, flag in zip(test_aid_list_, test_flag_list_):
if flag:
label = 'positive'
else:
label = 'negative'
test_aid_set.append(aid)
label_list.append(label)
prediction_list = depc.get_property('classifier', test_aid_set, 'class', config=kwargs)
confidence_list = depc.get_property('classifier', test_aid_set, 'score', config=kwargs)
confidence_list = [
confidence if prediction == 'positive' else 1.0 - confidence
for prediction, confidence in zip(prediction_list, confidence_list)
]
return general_precision_recall_algo(ibs, label_list, confidence_list)
def canonical_precision_recall_algo_plot(ibs, **kwargs):
label = kwargs['label']
print('Processing Precision-Recall for: %r' % (label, ))
conf_list, pr_list, re_list, tpr_list, fpr_list = canonical_precision_recall_algo(ibs, **kwargs)
return general_area_best_conf(conf_list, re_list, pr_list, **kwargs)
def canonical_roc_algo_plot(ibs, **kwargs):
label = kwargs['label']
print('Processing ROC for: %r' % (label, ))
conf_list, pr_list, re_list, tpr_list, fpr_list = canonical_precision_recall_algo(ibs, **kwargs)
return general_area_best_conf(conf_list, fpr_list, tpr_list, interpolate=False,
target=(0.0, 1.0), **kwargs)
def canonical_confusion_matrix_algo_plot(ibs, label, color, conf, species, output_cases=False, **kwargs):
print('Processing Confusion Matrix for: %r (Conf = %0.02f)' % (label, conf, ))
depc = ibs.depc_annot
test_gid_set_ = set(general_get_imageset_gids(ibs, 'TEST_SET'))
test_gid_list_ = list(test_gid_set_)
test_aid_list_ = ut.flatten(ibs.get_image_aids(test_gid_list_))
test_aid_list_ = ibs.filter_annotation_set(test_aid_list_, species=species)
test_flag_list_ = ibs.get_annot_canonical(test_aid_list_)
test_aid_set = []
label_list = []
for aid, flag in zip(test_aid_list_, test_flag_list_):
if flag:
label = 'positive'
else:
label = 'negative'
test_aid_set.append(aid)
label_list.append(label)
prediction_list = depc.get_property('classifier', test_aid_set, 'class', config=kwargs)
confidence_list = depc.get_property('classifier', test_aid_set, 'score', config=kwargs)
confidence_list = [
confidence if prediction == 'positive' else 1.0 - confidence
for prediction, confidence in zip(prediction_list, confidence_list)
]
prediction_list = [
'positive' if confidence >= conf else 'negative'
for confidence in confidence_list
]
if output_cases:
output_path = 'canonical-confusion-incorrect'
output_path = abspath(expanduser(join('~', 'Desktop', output_path)))
positive_path = join(output_path, 'positive')
negative_path = join(output_path, 'negative')
ut.delete(output_path)
ut.ensuredir(output_path)
ut.ensuredir(positive_path)
ut.ensuredir(negative_path)
config = {
'dim_size': (192, 192),
'resize_dim': 'wh',
}
chip_list = ibs.depc_annot.get_property('chips', test_aid_set, 'img', config=config)
zipped = zip(test_aid_set, chip_list, label_list, prediction_list)
for aid, chip, label, prediction in zipped:
if label == prediction:
continue
# Get path
image_path = positive_path if label == 'positive' else negative_path
image_filename = 'hardidx_%d_pred_%s_case_fail.jpg' % (aid, prediction, )
image_filepath = join(image_path, image_filename)
# Save path
cv2.imwrite(image_filepath, chip)
category_list = ['positive', 'negative']
category_mapping = {
'positive': 0,
'negative': 1,
}
return general_confusion_matrix_algo(label_list, prediction_list, category_list,
category_mapping, **kwargs)
@register_ibs_method
def canonical_precision_recall_algo_display(ibs, figsize=(20, 20)):
import matplotlib.pyplot as plt
import plottool as pt
fig_ = plt.figure(figsize=figsize, dpi=400)
config_list = [
{'label': 'CA V1 Ensemble', 'classifier_algo': 'densenet', 'classifier_weight_filepath': 'canonical_zebra_grevys_v1', 'species': 'zebra_grevys'}, # SMALLER DATASET
{'label': 'CA V2 Ensemble', 'classifier_algo': 'densenet', 'classifier_weight_filepath': 'canonical_zebra_grevys_v2', 'species': 'zebra_grevys'}, # BROKEN L/R AUGMENTATION
{'label': 'CA V3 Ensemble', 'classifier_algo': 'densenet', 'classifier_weight_filepath': 'canonical_zebra_grevys_v3', 'species': 'zebra_grevys'}, # LARGER DATASET, TOO HARSH AUGMENTATION
{'label': 'CA V4 Ensemble', 'classifier_algo': 'densenet', 'classifier_weight_filepath': 'canonical_zebra_grevys_v4', 'species': 'zebra_grevys'}, # BETTER AUGMENTATION
# {'label': 'CA V4 Model 0', 'classifier_algo': 'densenet', 'classifier_weight_filepath': 'canonical_zebra_grevys_v4:0', 'species': 'zebra_grevys'},
# {'label': 'CA V4 Model 1', 'classifier_algo': 'densenet', 'classifier_weight_filepath': 'canonical_zebra_grevys_v4:1', 'species': 'zebra_grevys'},
# {'label': 'CA V4 Model 2', 'classifier_algo': 'densenet', 'classifier_weight_filepath': 'canonical_zebra_grevys_v4:2', 'species': 'zebra_grevys'},
]
color_list = []
# color_list = [(0, 0, 0)]
color_list += pt.distinct_colors(len(config_list) - len(color_list), randomize=False)
axes_ = plt.subplot(221)
axes_.set_autoscalex_on(False)
axes_.set_autoscaley_on(False)
axes_.set_xlabel('Recall')
axes_.set_ylabel('Precision')
axes_.set_xlim([0.0, 1.01])
axes_.set_ylim([0.0, 1.01])
ret_list = [
canonical_precision_recall_algo_plot(ibs, color=color, **config)
for color, config in zip(color_list, config_list)
]
area_list = [ ret[0] for ret in ret_list ]
conf_list = [ ret[1] for ret in ret_list ]
# index = np.argmax(area_list)
index = -1
best_label1 = config_list[index]['label']
best_config1 = config_list[index]
best_color1 = color_list[index]
best_area1 = area_list[index]
best_conf1 = conf_list[index]
plt.title('Precision-Recall Curve (Best: %s, AP = %0.02f)' % (best_label1, best_area1, ), y=1.10)
plt.legend(bbox_to_anchor=(0.0, 1.02, 1.0, .102), loc=3, ncol=2, mode="expand",
borderaxespad=0.0)
axes_ = plt.subplot(222)
axes_.set_autoscalex_on(False)
axes_.set_autoscaley_on(False)
axes_.set_xlabel('False-Positive Rate')
axes_.set_ylabel('True-Positive Rate')
axes_.set_xlim([0.0, 1.01])
axes_.set_ylim([0.0, 1.01])
ret_list = [
canonical_roc_algo_plot(ibs, color=color, **config)
for color, config in zip(color_list, config_list)
]
area_list = [ ret[0] for ret in ret_list ]
conf_list = [ ret[1] for ret in ret_list ]
# index = np.argmax(area_list)
index = -1
best_label2 = config_list[index]['label']
best_config2 = config_list[index]
best_color2 = color_list[index]
best_area2 = area_list[index]
best_conf2 = conf_list[index]
plt.title('ROC Curve (Best: %s, AP = %0.02f)' % (best_label2, best_area2, ), y=1.10)
plt.legend(bbox_to_anchor=(0.0, 1.02, 1.0, .102), loc=3, ncol=2, mode="expand",
borderaxespad=0.0)
axes_ = plt.subplot(223)
axes_.set_aspect(1)
gca_ = plt.gca()
gca_.grid(False)
correct_rate, _ = canonical_confusion_matrix_algo_plot(ibs, color=best_color1,
conf=best_conf1, fig_=fig_, axes_=axes_,
output_cases=True, **best_config1)
axes_.set_xlabel('Predicted (Correct = %0.02f%%)' % (correct_rate * 100.0, ))
axes_.set_ylabel('Ground-Truth')
plt.title('P-R Confusion Matrix (OP = %0.02f)' % (best_conf1, ), y=1.12)
axes_ = plt.subplot(224)
axes_.set_aspect(1)
gca_ = plt.gca()
gca_.grid(False)
correct_rate, _ = canonical_confusion_matrix_algo_plot(ibs, color=best_color2,
conf=best_conf2, fig_=fig_, axes_=axes_,
**best_config2)
axes_.set_xlabel('Predicted (Correct = %0.02f%%)' % (correct_rate * 100.0, ))
axes_.set_ylabel('Ground-Truth')
plt.title('ROC Confusion Matrix (OP = %0.02f)' % (best_conf2, ), y=1.12)
fig_filename = 'canonical-precision-recall-roc.png'
fig_path = abspath(expanduser(join('~', 'Desktop', fig_filename)))
plt.savefig(fig_path, bbox_inches='tight')
def _canonical_get_boxes(ibs, gid_list, species):
from ibeis.web.appfuncs import CANONICAL_PART_TYPE
aid_list = ut.flatten(ibs.get_image_aids(gid_list))
aid_list = ibs.filter_annotation_set(aid_list, species=species)
flag_list = ibs.get_annot_canonical(aid_list)
part_rowids_list = ibs.get_annot_part_rowids(aid_list)
part_types_list = list(map(ibs.get_part_types, part_rowids_list))
aid_set = []
bbox_set = []
zipped = zip(aid_list, flag_list, part_rowids_list, part_types_list)
for aid, flag, part_rowid_list, part_type_list in zipped:
part_rowid_ = None
if flag:
for part_rowid, part_type in zip(part_rowid_list, part_type_list):
if part_type == CANONICAL_PART_TYPE:
assert part_rowid_ is None, 'Cannot have multiple CA for one image'
part_rowid_ = part_rowid
if part_rowid_ is not None:
axtl, aytl, aw, ah = ibs.get_annot_bboxes(aid)
axbr, aybr = axtl + aw, aytl + ah
pxtl, pytl, pw, ph = ibs.get_part_bboxes(part_rowid_)
pxbr, pybr = pxtl + pw, pytl + ph
x0 = pxtl - axtl
y0 = pytl - aytl
x1 = axbr - pxbr
y1 = aybr - pybr
x0 = max(x0 / aw, 0.0)
y0 = max(y0 / ah, 0.0)
x1 = max(x1 / aw, 0.0)
y1 = max(y1 / ah, 0.0)
assert x0 + x1 < 0.99
assert y0 + y1 < 0.99
bbox = (x0, y0, x1, y1)
aid_set.append(aid)
bbox_set.append(bbox)
return aid_set, bbox_set
def canonical_localization_deviation_plot(ibs, attribute, color, index,
label=None, species=None, marker='o',
**kwargs):
import random
import matplotlib.pyplot as plt
assert None not in [label, species]
print('Processing Deviation for: %r' % (label, ))
depc = ibs.depc_annot
if attribute == 'x0':
take_index = 0
elif attribute == 'y0':
take_index = 1
elif attribute == 'x1':
take_index = 2
elif attribute == 'y1':
take_index = 3
else:
raise ValueError('attribute not valid')
test_gid_set_ = set(general_get_imageset_gids(ibs, 'TEST_SET'))
test_gid_list_ = list(test_gid_set_)
test_aid_set, test_bbox_set = _canonical_get_boxes(ibs, test_gid_list_, species)
value_list = ut.take_column(test_bbox_set, take_index)
prediction_list = depc.get_property('canonical', test_aid_set, attribute, config=kwargs)
x_list = []
y_list = []
overshoot = 0.0
for value, prediction in zip(value_list, prediction_list):
x = random.uniform(index, index + 1)
y = (value - prediction)
if y < 0:
overshoot += 1
x_list.append(x)
y_list.append(y)
mean = np.mean(y_list)
std = np.std(y_list)
overshoot /= len(y_list)
label = '%s (Over: %0.02f, %0.02f+/-%0.02f)' % (label, overshoot, mean, std, )
plt.plot(x_list, y_list, color=color, linestyle='None', marker=marker, label=label, alpha=0.5)
plt.plot([index, index + 1], [0.0, 0.0], color=(0.2, 0.2, 0.2), linestyle='-', alpha=0.3)
if index % 4 == 3:
plt.plot([index + 1, index + 1], [-1.0, 1.0], color=(0.2, 0.2, 0.2), linestyle='--', alpha=0.1)
color = 'xkcd:gold'
marker = 'D'
plt.errorbar([index + 0.5], [mean], [std], linestyle='None', color=color, marker=marker, zorder=999, barsabove=True)
# plt.plot([index + 0.5], [mean], color=color, marker=marker)
def canonical_localization_iou_plot(ibs, color, index,
label=None, species=None, marker='o',
threshold=0.75, **kwargs):
import random
import matplotlib.pyplot as plt
def _convert(bbox):
x0, y0, x1, y1 = bbox
retval = {
'xtl' : x0,
'ytl' : y0,
'xbr' : 1.0 - x1,
'ybr' : 1.0 - y1,
}
retval['width'] = retval['xbr'] - retval['xtl']
retval['height'] = retval['ybr'] - retval['ytl']
return retval
assert None not in [label, species]
print('Processing IoU for: %r' % (label, ))
depc = ibs.depc_annot
test_gid_set_ = set(general_get_imageset_gids(ibs, 'TEST_SET'))
test_gid_list_ = list(test_gid_set_)
test_aid_set, test_bbox_set = _canonical_get_boxes(ibs, test_gid_list_, species)
prediction_list = depc.get_property('canonical', test_aid_set, None, config=kwargs)
gt_list = [_convert(test_bbox) for test_bbox in test_bbox_set]
pred_list = [_convert(prediction) for prediction in prediction_list]
correct = 0.0
x_list = []
y_list = []
for gt, pred in zip(gt_list, pred_list):
overlap = general_overlap([gt], [pred])
x = random.uniform(index, index + 1)
y = overlap[0][0]
if y >= threshold:
correct += 1.0
x_list.append(x)
y_list.append(y)
accuracy = correct / len(y_list)
mean = np.mean(y_list)
std = np.std(y_list)
label = '%s (Acc: %0.02f, %0.02f+/-%0.02f)' % (label, accuracy, mean, std, )
plt.plot(x_list, y_list, color=color, linestyle='None', marker=marker, label=label, alpha=0.5)
for y_value in [0.5, 0.75, 0.9]:
plt.plot([index, index + 1], [y_value, y_value], color=(0.2, 0.2, 0.2), linestyle='-', alpha=0.3)
if index % 4 == 3:
plt.plot([index + 1, index + 1], [0.0, 1.0], color=(0.2, 0.2, 0.2), linestyle='--', alpha=0.1)
color = 'xkcd:gold'
marker = 'D'
plt.errorbar([index + 0.5], [mean], [std], linestyle='None', color=color, marker=marker, zorder=999, barsabove=True)
# plt.plot([index + 0.5], [mean], color=color, marker=marker)
return test_aid_set, test_bbox_set, prediction_list, y_list, accuracy
@register_ibs_method
def canonical_localization_iou_visualize(ibs, index, test_aid_set, test_bbox_set, prediction_list,
overlap_list, color_list, label=None, species=None,
**kwargs):
assert None not in [label, species]
assert len(color_list) == 4
print('Processing Renderings for: %r' % (label, ))
color_list_ = []
for color in color_list:
color_ = []
for value in color:
value_ = int(np.around(255.0 * value))
color_ = [value_] + color_
color_ = tuple(color_)
color_list_.append(color_)
color_list = color_list_
output_path = expanduser(join('~', 'Desktop', 'canonical-regression-%d' % (index, )))
ut.delete(output_path)
ut.ensuredir(output_path)
config = {
'dim_size': 600,
'resize_dim': 'maxwh',
}
chip_list = ibs.depc_annot.get_property('chips', test_aid_set, 'img', config=config)
zipped = list(zip(test_aid_set, chip_list, test_bbox_set, prediction_list, overlap_list))
for test_aid, chip, test_bbox, prediction, overlap in zipped:
h, w = chip.shape[:2]
chipa = chip.copy()
chipb = chip.copy()
x0a, y0a, x1a, y1a = test_bbox
x0b, y0b, x1b, y1b = prediction
x0a = int(np.around(x0a * w))
y0a = int(np.around(y0a * h))
x1a = int(np.around(x1a * w))
y1a = int(np.around(y1a * h))
x0b = int(np.around(x0b * w))
y0b = int(np.around(y0b * h))
x1b = int(np.around(x1b * w))
y1b = int(np.around(y1b * h))
x1a = w - x1a
x1b = w - x1b
y1a = h - y1a
y1b = h - y1b
chipa = cv2.line(chipa, (x0a, y0a), (x0a, y1a), color_list[0], 3)
chipa = cv2.line(chipa, (x0a, y0a), (x1a, y0a), color_list[1], 3)
chipa = cv2.line(chipa, (x1a, y0a), (x1a, y1a), color_list[2], 3)
chipa = cv2.line(chipa, (x0a, y1a), (x1a, y1a), color_list[3], 3)
chipb = cv2.line(chipb, (x0b, y0b), (x0b, y1b), color_list[0], 3)
chipb = cv2.line(chipb, (x0b, y0b), (x1b, y0b), color_list[1], 3)
chipb = cv2.line(chipb, (x1b, y0b), (x1b, y1b), color_list[2], 3)
chipb = cv2.line(chipb, (x0b, y1b), (x1b, y1b), color_list[3], 3)
canvas = np.hstack((chipa, chipb))
canvas_filepath = join(output_path, 'canonical-regression-iou-%0.02f-aid-%s.jpg' % (overlap, test_aid, ))
cv2.imwrite(canvas_filepath, canvas)
@register_ibs_method
def canonical_localization_precision_recall_algo_display(ibs, figsize=(20, 40)):
import matplotlib.pyplot as plt
import plottool as pt
fig_ = plt.figure(figsize=figsize, dpi=400) # NOQA
config_list = [
# {'label': 'CA V1 Ensemble', 'canonical_weight_filepath': 'canonical_zebra_grevys_v1', 'species': 'zebra_grevys'}, # OVER = 1.0, small dataset
# {'label': 'CA V1 Model 0', 'canonical_weight_filepath': 'canonical_zebra_grevys_v1:0', 'species': 'zebra_grevys'}, # OVER = 1.0, small dataset
# {'label': 'CA V1 Model 1', 'canonical_weight_filepath': 'canonical_zebra_grevys_v1:1', 'species': 'zebra_grevys'}, # OVER = 1.0, small dataset
# {'label': 'CA V1 Model 2', 'canonical_weight_filepath': 'canonical_zebra_grevys_v1:2', 'species': 'zebra_grevys'}, # OVER = 1.0, small dataset
# {'label': 'CA V2 Ensemble', 'canonical_weight_filepath': 'canonical_zebra_grevys_v2', 'species': 'zebra_grevys'}, # OVER = 1.0, large dataset
# {'label': 'CA V2 Model 0', 'canonical_weight_filepath': 'canonical_zebra_grevys_v2:0', 'species': 'zebra_grevys'}, # OVER = 1.0, large dataset
# {'label': 'CA V2 Model 1', 'canonical_weight_filepath': 'canonical_zebra_grevys_v2:1', 'species': 'zebra_grevys'}, # OVER = 1.0, large dataset
# {'label': 'CA V2 Model 2', 'canonical_weight_filepath': 'canonical_zebra_grevys_v2:2', 'species': 'zebra_grevys'}, # OVER = 1.0, large dataset
# {'label': 'CA V3 Ensemble', 'canonical_weight_filepath': 'canonical_zebra_grevys_v3', 'species': 'zebra_grevys'}, # OVER = 2.0
# {'label': 'CA V3 Model 0', 'canonical_weight_filepath': 'canonical_zebra_grevys_v3:0', 'species': 'zebra_grevys'}, # OVER = 2.0
# {'label': 'CA V3 Model 1', 'canonical_weight_filepath': 'canonical_zebra_grevys_v3:1', 'species': 'zebra_grevys'}, # OVER = 2.0
# {'label': 'CA V3 Model 2', 'canonical_weight_filepath': 'canonical_zebra_grevys_v3:2', 'species': 'zebra_grevys'}, # OVER = 2.0
{'label': 'CA V5-1.0 Ens.', 'canonical_weight_filepath': 'canonical_zebra_grevys_v5', 'species': 'zebra_grevys'}, # OVER = 1.0
{'label': 'CA V5-1.0 M0', 'canonical_weight_filepath': 'canonical_zebra_grevys_v5:0', 'species': 'zebra_grevys'}, # OVER = 1.0
{'label': 'CA V5-1.0 M1', 'canonical_weight_filepath': 'canonical_zebra_grevys_v5:1', 'species': 'zebra_grevys'}, # OVER = 1.0
{'label': 'CA V5-1.0 M2', 'canonical_weight_filepath': 'canonical_zebra_grevys_v5:2', 'species': 'zebra_grevys'}, # OVER = 1.0
{'label': 'CA V6-2.0 Ens.', 'canonical_weight_filepath': 'canonical_zebra_grevys_v6', 'species': 'zebra_grevys'}, # OVER = 2.0
{'label': 'CA V6-2.0 M0', 'canonical_weight_filepath': 'canonical_zebra_grevys_v6:0', 'species': 'zebra_grevys'}, # OVER = 2.0
{'label': 'CA V6-2.0 M1', 'canonical_weight_filepath': 'canonical_zebra_grevys_v6:1', 'species': 'zebra_grevys'}, # OVER = 2.0
{'label': 'CA V6-2.0 M2', 'canonical_weight_filepath': 'canonical_zebra_grevys_v6:2', 'species': 'zebra_grevys'}, # OVER = 2.0
{'label': 'CA V4-4.0 Ens.', 'canonical_weight_filepath': 'canonical_zebra_grevys_v4', 'species': 'zebra_grevys'}, # OVER = 4.0
{'label': 'CA V4-4.0 M0', 'canonical_weight_filepath': 'canonical_zebra_grevys_v4:0', 'species': 'zebra_grevys'}, # OVER = 4.0
{'label': 'CA V4-4.0 M1', 'canonical_weight_filepath': 'canonical_zebra_grevys_v4:1', 'species': 'zebra_grevys'}, # OVER = 4.0
{'label': 'CA V4-4.0 M2', 'canonical_weight_filepath': 'canonical_zebra_grevys_v4:2', 'species': 'zebra_grevys'}, # OVER = 4.0
]
color_list = []
# color_list = [(0, 0, 0)]
color_list += pt.distinct_colors(len(config_list) - len(color_list), randomize=False)
min_, max_ = -1.0, 1.0
axes_ = plt.subplot(321)
axes_.grid(True, which='major')
axes_.grid(False, which='minor')
axes_.set_autoscalex_on(False)
axes_.set_autoscaley_on(False)
axes_.get_xaxis().set_ticks([])
axes_.set_ylabel('GT - Pred Deviation (in percentages)')
axes_.set_xlim([0.0, len(config_list)])
axes_.set_ylim([min_, max_])
axes_.fill_between([0.0, len(config_list)], -1, 0, facecolor='red', alpha=0.1)
for index, (color, config) in enumerate(zip(color_list, config_list)):
canonical_localization_deviation_plot(ibs, 'x0', color=color, index=index, **config)
plt.title('X0 Deviation Scatter Plot')
plt.legend(bbox_to_anchor=(0.0, 1.04, 1.0, .102), loc=3, ncol=2, mode="expand",
borderaxespad=0.0)
axes_ = plt.subplot(322)
axes_.grid(True, which='major')
axes_.grid(False, which='minor')
axes_.set_autoscalex_on(False)
axes_.set_autoscaley_on(False)
axes_.get_xaxis().set_ticks([])
axes_.set_ylabel('GT - Pred Deviation (in percentages)')
axes_.set_xlim([0.0, len(config_list)])
axes_.set_ylim([min_, max_])
axes_.fill_between([0.0, len(config_list)], -1, 0, facecolor='red', alpha=0.1)
for index, (color, config) in enumerate(zip(color_list, config_list)):
canonical_localization_deviation_plot(ibs, 'x1', color=color, index=index, **config)
plt.title('Y0 Deviation Scatter Plot')
plt.legend(bbox_to_anchor=(0.0, 1.04, 1.0, .102), loc=3, ncol=2, mode="expand",
borderaxespad=0.0)
axes_ = plt.subplot(323)
axes_.grid(True, which='major')
axes_.grid(False, which='minor')
axes_.set_autoscalex_on(False)
axes_.set_autoscaley_on(False)
axes_.get_xaxis().set_ticks([])
axes_.set_ylabel('GT - Pred Deviation (in percentages)')
axes_.set_xlim([0.0, len(config_list)])
axes_.set_ylim([min_, max_])
axes_.fill_between([0.0, len(config_list)], -1, 0, facecolor='red', alpha=0.1)
for index, (color, config) in enumerate(zip(color_list, config_list)):
canonical_localization_deviation_plot(ibs, 'y0', color=color, index=index, **config)
plt.title('X1 Deviation Scatter Plot')
plt.legend(bbox_to_anchor=(0.0, 1.04, 1.0, .102), loc=3, ncol=2, mode="expand",
borderaxespad=0.0)
axes_ = plt.subplot(324)
axes_.grid(True, which='major')
axes_.grid(False, which='minor')
axes_.set_autoscalex_on(False)
axes_.set_autoscaley_on(False)
axes_.get_xaxis().set_ticks([])
axes_.set_ylabel('GT - Pred Deviation (in percentages)')
axes_.set_xlim([0.0, len(config_list)])
axes_.set_ylim([min_, max_])
axes_.fill_between([0.0, len(config_list)], -1, 0, facecolor='red', alpha=0.1)
for index, (color, config) in enumerate(zip(color_list, config_list)):
canonical_localization_deviation_plot(ibs, 'y1', color=color, index=index, **config)
plt.title('Y1 Deviation Scatter Plot')
plt.legend(bbox_to_anchor=(0.0, 1.04, 1.0, .102), loc=3, ncol=2, mode="expand",
borderaxespad=0.0)
axes_ = plt.subplot(325)
axes_.grid(True, which='major')
axes_.grid(False, which='minor')
axes_.set_autoscalex_on(False)
axes_.set_autoscaley_on(False)
axes_.get_xaxis().set_ticks([])
axes_.set_ylabel('GT - Pred Deviation (in percentages)')
axes_.set_xlim([0.0, len(config_list)])
axes_.set_ylim([min_, max_])
axes_.fill_between([0.0, len(config_list)], -1, 0, facecolor='red', alpha=0.1)
assert len(config_list) % 4 == 0
rounds = len(config_list) // 4
colors = pt.distinct_colors(4, randomize=False)
attribute_list = []
color_list_ = []
for _ in range(rounds):
attribute_list += ['x0', 'y0', 'x1', 'y1']
color_list_ += colors
for index, (attribute, color_) in enumerate(zip(attribute_list, color_list_)):
index_ = (index // 4) * 4
config_ = config_list[index_].copy()
config_['label'] = '%s %s' % (config_['label'], attribute, )
canonical_localization_deviation_plot(ibs, attribute, color=color_, index=index, **config_)
plt.title('Ensemble Deviation Scatter Plot')
plt.legend(bbox_to_anchor=(0.0, 1.04, 1.0, .102), loc=3, ncol=2, mode="expand",
borderaxespad=0.0)
axes_ = plt.subplot(326)
axes_.grid(True, which='major')
axes_.grid(False, which='minor')
axes_.set_autoscalex_on(False)
axes_.set_autoscaley_on(False)
axes_.get_xaxis().set_ticks([])
axes_.set_ylabel('IoU')
axes_.set_xlim([0.0, len(config_list)])
axes_.set_ylim([0.0, 1.0])
for index, (color, config) in enumerate(zip(color_list, config_list)):
values_ = canonical_localization_iou_plot(ibs, color=color, index=index, **config)
if index % 4 == 0:
config_ = config_list[index]
test_aid_set, test_bbox_set, prediction_list, y_list, accuracy = values_
ibs.canonical_localization_iou_visualize(index, test_aid_set, test_bbox_set,
prediction_list, y_list, colors,
**config_)
plt.title('IoU Scatter Plot')
plt.legend(bbox_to_anchor=(0.0, 1.04, 1.0, .102), loc=3, ncol=2, mode="expand",
borderaxespad=0.0)
fig_filename = 'canonical-localization-deviance.png'
fig_path = abspath(expanduser(join('~', 'Desktop', fig_filename)))
plt.savefig(fig_path, bbox_inches='tight')
@register_ibs_method
def background_accuracy_display(ibs, category_list, test_gid_set=None,
output_path=None):
if output_path is None:
output_path = abspath(expanduser(join('~', 'Desktop', 'background')))
ut.ensuredir(output_path)
if test_gid_set is None:
test_gid_set = set(general_get_imageset_gids(ibs, 'TEST_SET'))
test_gid_set = list(test_gid_set)
aids_list = ibs.get_image_aids(test_gid_set)
aid_list = ut.flatten(aids_list)
species_list = ibs.get_annot_species_texts(aid_list)
aid_list = [
aid
for aid, species in zip(aid_list, species_list)
if species in category_list
]
species_list = ibs.get_annot_species_texts(aid_list)
gid_list = ibs.get_annot_gids(aid_list)
config2_ = {
'fw_detector': 'cnn'
}
hough_cpath_list = ibs.get_annot_probchip_fpath(aid_list, config2_=config2_)
image_list = [vt.imread(hough_cpath) for hough_cpath in hough_cpath_list]
chip_list = ibs.get_annot_chips(aid_list, config2_=config2_)
zipped = zip(aid_list, gid_list, species_list, image_list, chip_list)
for index, (aid, gid, species, image, chip) in enumerate(zipped):
print(index)
mask = vt.resize_mask(image, chip)
blended = vt.blend_images_multiply(chip, mask)
blended *= 255.0
blended = np.around(blended)
blended[blended < 0] = 0
blended[blended > 255] = 255
blended = blended.astype(np.uint8)
canvas = np.hstack((chip, mask, blended))
output_filepath = join(output_path, 'background.%s.%d.%d.png' % (species, gid, aid, ))
cv2.imwrite(output_filepath, canvas)
def aoi2_precision_recall_algo(ibs, category_list=None, test_gid_set_=None, **kwargs):
depc = ibs.depc_annot
if test_gid_set_ is None:
test_gid_set_ = general_get_imageset_gids(ibs, 'TEST_SET')
test_aid_list_ = list(set(ut.flatten(ibs.get_image_aids(test_gid_set_))))
species_list = ibs.get_annot_species_texts(test_aid_list_)
interest_list = ibs.get_annot_interest(test_aid_list_)
test_aid_list = []
label_list = []
for test_aid, species, interest in zip(test_aid_list_, species_list, interest_list):
if category_list is not None:
if species not in category_list:
continue
if interest is None:
continue
label = 'positive' if interest else 'negative'
test_aid_list.append(test_aid)
label_list.append(label)
prediction_list = depc.get_property('aoi_two', test_aid_list, 'class', config=kwargs)
confidence_list = depc.get_property('aoi_two', test_aid_list, 'score', config=kwargs)
confidence_list = [
confidence if prediction == 'positive' else 1.0 - confidence
for prediction, confidence in zip(prediction_list, confidence_list)
]
return general_precision_recall_algo(ibs, label_list, confidence_list, **kwargs)
def aoi2_precision_recall_algo_plot(ibs, **kwargs):
label = kwargs['label']
print('Processing Precision-Recall for: %r' % (label, ))
conf_list, pr_list, re_list, tpr_list, fpr_list = aoi2_precision_recall_algo(ibs, **kwargs)
return general_area_best_conf(conf_list, re_list, pr_list, **kwargs)
def aoi2_roc_algo_plot(ibs, **kwargs):
label = kwargs['label']
print('Processing ROC for: %r' % (label, ))
conf_list, pr_list, re_list, tpr_list, fpr_list = aoi2_precision_recall_algo(ibs, **kwargs)
return general_area_best_conf(conf_list, fpr_list, tpr_list, interpolate=False,
target=(0.0, 1.0), **kwargs)
def aoi2_confusion_matrix_algo_plot(ibs, label, color, conf, output_cases=False,
category_list=None, test_gid_set_=None, **kwargs):
print('Processing Confusion Matrix for: %r (Conf = %0.02f)' % (label, conf, ))
depc = ibs.depc_annot
if test_gid_set_ is None:
test_gid_set_ = general_get_imageset_gids(ibs, 'TEST_SET')
test_aid_list_ = list(set(ut.flatten(ibs.get_image_aids(test_gid_set_))))
species_list = ibs.get_annot_species_texts(test_aid_list_)
interest_list = ibs.get_annot_interest(test_aid_list_)
test_aid_list = []
label_list = []
for test_aid, species, interest in zip(test_aid_list_, species_list, interest_list):
if category_list is not None:
if species not in category_list:
continue
if interest is None:
continue
label = 'positive' if interest else 'negative'
test_aid_list.append(test_aid)
label_list.append(label)
prediction_list = depc.get_property('aoi_two', test_aid_list, 'class', config=kwargs)
confidence_list = depc.get_property('aoi_two', test_aid_list, 'score', config=kwargs)
confidence_list = [
confidence if prediction == 'positive' else 1.0 - confidence
for prediction, confidence in zip(prediction_list, confidence_list)
]
prediction_list = [
'positive' if confidence >= conf else 'negative'
for confidence in confidence_list
]
if output_cases:
output_path = 'aoi2-confusion-incorrect'
output_path = abspath(expanduser(join('~', 'Desktop', output_path)))
ut.delete(output_path)
ut.ensuredir(output_path)
manifest_dict = {}
test_gid_list = ibs.get_annot_gids(test_aid_list)
zipped = zip(test_gid_list, test_aid_list, label_list, prediction_list)
for test_gid, test_aid, label, prediction in zipped:
if test_gid not in manifest_dict:
manifest_dict[test_gid] = {}
assert test_aid not in manifest_dict[test_gid]
manifest_dict[test_gid][test_aid] = (label, prediction, )
for test_gid in manifest_dict:
image = ibs.get_images(test_gid)
w, h = ibs.get_image_sizes(test_gid)
image = _resize(image, t_width=600, verbose=False)
height_, width_, channels_ = image.shape
for test_aid in manifest_dict[test_gid]:
label, prediction = manifest_dict[test_gid][test_aid]
bbox = ibs.get_annot_bboxes(test_aid)
xtl, ytl, width, height = bbox
xbr = xtl + width
ybr = ytl + height
xtl = int(np.round((xtl / w) * width_))
ytl = int(np.round((ytl / h) * height_))
xbr = int(np.round((xbr / w) * width_))
ybr = int(np.round((ybr / h) * height_))
if label == 'positive':
color = (255, 99, 46)
else:
color = (127, 255, 127)
cv2.rectangle(image, (xtl, ytl), (xbr, ybr), color, 4)
if prediction == 'positive':
color = (255, 99, 46)
else:
color = (127, 255, 127)
cv2.rectangle(image, (xtl - 4, ytl - 4), (xbr + 4, ybr + 4), color, 4)
image_filename = 'image_%d.png' % (test_gid, )
image_filepath = join(output_path, image_filename)
cv2.imwrite(image_filepath, image)
category_list = ['positive', 'negative']
category_mapping = {
'positive': 0,
'negative': 1,
}
return general_confusion_matrix_algo(label_list, prediction_list, category_list,
category_mapping, size=20, **kwargs)
@register_ibs_method
def aoi2_precision_recall_algo_display(ibs, test_gid_list=None, output_cases=False, figsize=(20, 20)):
import matplotlib.pyplot as plt
import plottool as pt
fig_ = plt.figure(figsize=figsize)
test_gid_set = None if test_gid_list is None else sorted(set(test_gid_list))
config_list = [
# {'label': 'All Species', 'aoi_two_weight_filepath': 'ggr2', 'category_list': None},
# {'label': 'Masai Giraffe', 'aoi_two_weight_filepath': 'ggr2', 'category_list': ['giraffe_masai']},
# {'label': 'Reticulated Giraffe', 'aoi_two_weight_filepath': 'ggr2', 'category_list': ['giraffe_reticulated']},
# {'label': 'Sea Turtle', 'aoi_two_weight_filepath': 'ggr2', 'category_list': ['turtle_sea']},
# {'label': 'Whale Fluke', 'aoi_two_weight_filepath': 'ggr2', 'category_list': ['whale_fluke']},
# {'label': 'Grevy\'s Zebra', 'aoi_two_weight_filepath': 'ggr2', 'category_list': ['zebra_grevys']},
# {'label': 'Plains Zebra', 'aoi_two_weight_filepath': 'ggr2', 'category_list': ['zebra_plains']},
# {'label': 'Hammerhead', 'aoi_two_weight_filepath': 'hammerhead', 'category_list': ['shark_hammerhead']},
{'label': 'Jaguar', 'aoi_two_weight_filepath': 'jaguar', 'category_list': ['jaguar']},
]
color_list = [(0, 0, 0)]
color_list += pt.distinct_colors(len(config_list) - len(color_list), randomize=False)
axes_ = plt.subplot(221)
axes_.set_autoscalex_on(False)
axes_.set_autoscaley_on(False)
axes_.set_xlabel('Recall')
axes_.set_ylabel('Precision')
axes_.set_xlim([0.0, 1.01])
axes_.set_ylim([0.0, 1.01])
ret_list = [
aoi2_precision_recall_algo_plot(ibs, color=color, test_gid_set_=test_gid_set, **config)
for color, config in zip(color_list, config_list)
]
area_list = [ ret[0] for ret in ret_list ]
conf_list = [ ret[1] for ret in ret_list ]
# index = np.argmax(area_list)
index = 0
best_label1 = config_list[index]['label']
best_config1 = config_list[index]
best_color1 = color_list[index]
best_area1 = area_list[index]
best_conf1 = conf_list[index]
plt.title('Precision-Recall Curve (Best: %s, AP = %0.02f)' % (best_label1, best_area1, ), y=1.10)
plt.legend(bbox_to_anchor=(0.0, 1.02, 1.0, .102), loc=3, ncol=2, mode="expand",
borderaxespad=0.0)
axes_ = plt.subplot(222)
axes_.set_autoscalex_on(False)
axes_.set_autoscaley_on(False)
axes_.set_xlabel('False-Positive Rate')
axes_.set_ylabel('True-Positive Rate')
axes_.set_xlim([0.0, 1.01])
axes_.set_ylim([0.0, 1.01])
ret_list = [
aoi2_roc_algo_plot(ibs, color=color, **config)
for color, config in zip(color_list, config_list)
]
area_list = [ ret[0] for ret in ret_list ]
conf_list = [ ret[1] for ret in ret_list ]
# index = np.argmax(area_list)
index = 0
best_label2 = config_list[index]['label']
best_config2 = config_list[index]
best_color2 = color_list[index]
best_area2 = area_list[index]
best_conf2 = conf_list[index]
plt.title('ROC Curve (Best: %s, AP = %0.02f)' % (best_label2, best_area2, ), y=1.10)
plt.legend(bbox_to_anchor=(0.0, 1.02, 1.0, .102), loc=3, ncol=2, mode="expand",
borderaxespad=0.0)
plt.plot([0.0, 1.0], [0.0, 1.0], color=(0.5, 0.5, 0.5), linestyle='--')
axes_ = plt.subplot(223)
axes_.set_aspect(1)
gca_ = plt.gca()
gca_.grid(False)
correct_rate, _ = aoi2_confusion_matrix_algo_plot(ibs, color=best_color1,
conf=best_conf1, fig_=fig_, axes_=axes_,
output_cases=output_cases,
test_gid_set_=test_gid_set,
**best_config1)
axes_.set_xlabel('Predicted (Correct = %0.02f%%)' % (correct_rate * 100.0, ))
axes_.set_ylabel('Ground-Truth')
plt.title('P-R Confusion Matrix (OP = %0.02f)' % (best_conf1, ), y=1.12)
axes_ = plt.subplot(224)
axes_.set_aspect(1)
gca_ = plt.gca()
gca_.grid(False)
correct_rate, _ = aoi2_confusion_matrix_algo_plot(ibs, color=best_color2,
conf=best_conf2, fig_=fig_, axes_=axes_,
test_gid_set_=test_gid_set,
**best_config2)
axes_.set_xlabel('Predicted (Correct = %0.02f%%)' % (correct_rate * 100.0, ))
axes_.set_ylabel('Ground-Truth')
plt.title('ROC Confusion Matrix (OP = %0.02f)' % (best_conf2, ), y=1.12)
fig_filename = 'aoi2-precision-recall-roc.png'
fig_path = abspath(expanduser(join('~', 'Desktop', fig_filename)))
plt.savefig(fig_path, bbox_inches='tight')
def detector_parse_gt(ibs, test_gid_list=None, **kwargs):
if test_gid_list is None:
test_gid_list = general_get_imageset_gids(ibs, 'TEST_SET', **kwargs)
uuid_list = ibs.get_image_uuids(test_gid_list)
gid_list = ibs.get_image_gids_from_uuid(uuid_list)
gt_dict = {}
for gid, uuid in zip(gid_list, uuid_list):
width, height = ibs.get_image_sizes(gid)
aid_list = ibs.get_image_aids(gid)
gt_list = []
for aid in aid_list:
bbox = ibs.get_annot_bboxes(aid)
temp = {
'gid' : gid,
'xtl' : bbox[0] / width,
'ytl' : bbox[1] / height,
'xbr' : (bbox[0] + bbox[2]) / width,
'ybr' : (bbox[1] + bbox[3]) / height,
'width' : bbox[2] / width,
'height' : bbox[3] / height,
'class' : ibs.get_annot_species_texts(aid),
'viewpoint' : ibs.get_annot_viewpoints(aid),
'confidence' : 1.0,
}
gt_list.append(temp)
gt_dict[uuid] = gt_list
return gt_dict
# def detector_parse_pred(ibs, test_gid_list=None, **kwargs):
# depc = ibs.depc_image
# if test_gid_list is None:
# test_gid_list = general_get_imageset_gids(ibs, 'TEST_SET', **kwargs)
# uuid_list = ibs.get_image_uuids(test_gid_list)
# # depc.delete_property('detections', test_gid_list, config=kwargs)
# results_list = depc.get_property('detections', test_gid_list, None, config=kwargs)
# size_list = ibs.get_image_sizes(test_gid_list)
# zipped_list = zip(results_list)
# # Reformat results for json
# results_list = [
# [
# {
# 'gid' : test_gid,
# 'xtl' : bbox[0] / width,
# 'ytl' : bbox[1] / height,
# 'width' : bbox[2] / width,
# 'height' : bbox[3] / height,
# 'theta' : theta, # round(theta, 4),
# 'confidence' : conf, # round(conf, 4),
# 'class' : class_,
# 'viewpoint' : viewpoint,
# }
# for bbox, theta, class_, viewpoint, conf in zip(*zipped[0][1:])
# ]
# for zipped, (width, height), test_gid in zip(zipped_list, size_list, test_gid_list)
# ]
# pred_dict = {
# uuid_ : result_list
# for uuid_, result_list in zip(uuid_list, results_list)
# }
# # print(pred_dict)
# return pred_dict
# def detector_precision_recall_algo(ibs, samples=SAMPLES, force_serial=FORCE_SERIAL, **kwargs):
# test_gid_list = general_get_imageset_gids(ibs, 'TEST_SET', **kwargs)
# uuid_list = ibs.get_image_uuids(test_gid_list)
# print('\tGather Ground-Truth')
# gt_dict = detector_parse_gt(ibs, test_gid_list=test_gid_list)
# print('\tGather Predictions')
# pred_dict = detector_parse_pred(ibs, test_gid_list=test_gid_list, **kwargs)
# print('\tGenerate Curves...')
# conf_list = [ _ / float(samples) for _ in range(0, int(samples) + 1) ]
# conf_list = sorted(conf_list, reverse=True)
# uuid_list_list = [ uuid_list for _ in conf_list ]
# gt_dict_list = [ gt_dict for _ in conf_list ]
# pred_dict_list = [ pred_dict for _ in conf_list ]
# kwargs_list = [ kwargs for _ in conf_list ]
# arg_iter = zip(conf_list, uuid_list_list, gt_dict_list, pred_dict_list, kwargs_list)
# pr_re_gen = ut.generate2(detector_precision_recall_algo_worker, arg_iter,
# nTasks=len(conf_list), ordered=True,
# chunksize=CHUNK_SIZE, force_serial=force_serial)
# conf_list_ = [-1.0, -1.0]
# pr_list = [1.0, 0.0]
# re_list = [0.0, 1.0]
# # conf_list_ = []
# # pr_list = []
# # re_list = []
# for conf, pr, re in pr_re_gen:
# conf_list_.append(conf)
# pr_list.append(pr)
# re_list.append(re)
# print('...complete')
# return conf_list_, pr_list, re_list
# def detector_precision_recall_algo_worker(conf, uuid_list, gt_dict, pred_dict,
# kwargs):
# tp, fp, fn = 0.0, 0.0, 0.0
# for index, uuid_ in enumerate(uuid_list):
# if uuid_ in pred_dict:
# pred_list = [
# pred
# for pred in pred_dict[uuid_]
# if pred['confidence'] >= conf
# ]
# tp_, fp_, fn_ = general_tp_fp_fn(gt_dict[uuid_], pred_list, **kwargs)
# tp += tp_
# fp += fp_
# fn += fn_
# pr = tp / (tp + fp)
# re = tp / (tp + fn)
# return (conf, pr, re)
# def detector_precision_recall_algo_plot(ibs, **kwargs):
# label = kwargs['label']
# print('Processing Precision-Recall for: %r' % (label, ))
# conf_list, pr_list, re_list = detector_precision_recall_algo(ibs, **kwargs)
# return general_area_best_conf(conf_list, re_list, pr_list, **kwargs)
# def detector_confusion_matrix_algo_plot(ibs, label, color, conf, **kwargs):
# print('Processing Confusion Matrix for: %r (Conf = %0.02f)' % (label, conf, ))
# test_gid_list = general_get_imageset_gids(ibs, 'TEST_SET', **kwargs)
# uuid_list = ibs.get_image_uuids(test_gid_list)
# print('\tGather Ground-Truth')
# gt_dict = detector_parse_gt(ibs, test_gid_list=test_gid_list)
# print('\tGather Predictions')
# pred_dict = detector_parse_pred(ibs, test_gid_list=test_gid_list, **kwargs)
# label_list = []
# prediction_list = []
# for index, uuid_ in enumerate(uuid_list):
# if uuid_ in pred_dict:
# gt_list = gt_dict[uuid_]
# pred_list = [
# pred
# for pred in pred_dict[uuid_]
# if pred['confidence'] >= conf
# ]
# tp, fp, fn = general_tp_fp_fn(gt_list, pred_list, **kwargs)
# for _ in range(int(tp)):
# label_list.append('positive')
# prediction_list.append('positive')
# for _ in range(int(fp)):
# label_list.append('negative')
# prediction_list.append('positive')
# for _ in range(int(fn)):
# label_list.append('positive')
# prediction_list.append('negative')
# category_list = ['positive', 'negative']
# category_mapping = {
# 'positive': 0,
# 'negative': 1,
# }
# return general_confusion_matrix_algo(label_list, prediction_list, category_list,
# category_mapping, **kwargs)
# @register_ibs_method
# def detector_precision_recall_algo_display(ibs, min_overlap=0.5, figsize=(24, 7), **kwargs):
# import matplotlib.pyplot as plt
# fig_ = plt.figure(figsize=figsize)
# axes_ = plt.subplot(131)
# axes_.set_autoscalex_on(False)
# axes_.set_autoscaley_on(False)
# axes_.set_xlabel('Recall (Ground-Truth IOU >= %0.02f)' % (min_overlap, ))
# axes_.set_ylabel('Precision')
# axes_.set_xlim([0.0, 1.01])
# axes_.set_ylim([0.0, 1.01])
# kwargs_list = [
# {
# 'min_overlap' : min_overlap,
# 'classifier_sensitivity' : 0.64,
# 'localizer_grid' : False,
# 'localizer_sensitivity' : 0.16,
# 'labeler_sensitivity' : 0.42,
# },
# {
# 'min_overlap' : min_overlap,
# 'classifier_sensitivity' : 0.64,
# 'localizer_grid' : False,
# 'localizer_sensitivity' : 0.16,
# 'labeler_sensitivity' : 0.42,
# 'check_species' : True,
# },
# {
# 'min_overlap' : min_overlap,
# 'classifier_sensitivity' : 0.64,
# 'localizer_grid' : False,
# 'localizer_sensitivity' : 0.16,
# 'labeler_sensitivity' : 0.42,
# 'check_viewpoint' : True,
# },
# {
# 'min_overlap' : min_overlap,
# 'classifier_sensitivity' : 0.04,
# 'localizer_grid' : True,
# 'localizer_sensitivity' : 0.05,
# 'labeler_sensitivity' : 0.39,
# },
# {
# 'min_overlap' : min_overlap,
# 'classifier_sensitivity' : 0.04,
# 'localizer_grid' : True,
# 'localizer_sensitivity' : 0.05,
# 'labeler_sensitivity' : 0.39,
# 'check_species' : True,
# },
# {
# 'min_overlap' : min_overlap,
# 'classifier_sensitivity' : 0.04,
# 'localizer_grid' : True,
# 'localizer_sensitivity' : 0.05,
# 'labeler_sensitivity' : 0.39,
# 'check_viewpoint' : True,
# },
# ]
# label_list = [
# 'Opt L',
# 'Opt L+S',
# 'Opt L+S+V',
# 'Rec L',
# 'Rec L+S',
# 'Rec L+S+V',
# ]
# color_list = [
# 'r',
# 'b',
# 'g',
# 'k',
# 'y',
# 'c',
# ]
# ret_list = [
# detector_precision_recall_algo_plot(ibs, label=label, color=color, **kwargs_)
# for label, color, kwargs_ in zip(label_list, color_list, kwargs_list)
# ]
# area_list = [ ret[0] for ret in ret_list ]
# conf_list = [ ret[1] for ret in ret_list ]
# index = np.argmax(area_list)
# best_label = label_list[index]
# best_kwargs = kwargs_list[index]
# best_area = area_list[index]
# best_conf = conf_list[index]
# plt.title('Precision-Recall Curve (Best: %s, AP = %0.02f)' % (best_label, best_area, ), y=1.20)
# # Display graph
# plt.legend(bbox_to_anchor=(0.0, 1.02, 1.0, .102), loc=3, ncol=2, mode="expand",
# borderaxespad=0.0)
# axes_ = plt.subplot(132)
# axes_.set_aspect(1)
# gca_ = plt.gca()
# gca_.grid(False)
# correct_rate, _ = detector_confusion_matrix_algo_plot(ibs, 'V1', 'r', conf=best_conf, fig_=fig_, axes_=axes_, **best_kwargs)
# axes_.set_xlabel('Predicted (Correct = %0.02f%%)' % (correct_rate * 100.0, ))
# axes_.set_ylabel('Ground-Truth')
# plt.title('P-R Confusion Matrix (Algo: %s, OP = %0.02f)' % (best_label, best_conf, ), y=1.26)
# best_index = None
# best_conf = None
# best_pr = 0.0
# best_re = 0.0
# tup_list = [ ret[2] for ret in ret_list ]
# for index, tup in enumerate(tup_list):
# for conf, re, pr in zip(*tup):
# if pr > best_pr:
# best_index = index
# best_conf = conf
# best_pr = pr
# best_re = re
# if best_index is not None:
# axes_ = plt.subplot(131)
# plt.plot([best_re], [best_pr], 'yo')
# best_label = label_list[best_index]
# best_kwargs = kwargs_list[best_index]
# axes_ = plt.subplot(133)
# axes_.set_aspect(1)
# gca_ = plt.gca()
# gca_.grid(False)
# correct_rate, _ = detector_confusion_matrix_algo_plot(ibs, 'V1', 'r', conf=best_conf, fig_=fig_, axes_=axes_, **best_kwargs)
# axes_.set_xlabel('Predicted (Correct = %0.02f%%)' % (correct_rate * 100.0, ))
# axes_.set_ylabel('Ground-Truth')
# plt.title('P-R Confusion Matrix (Algo: %s, OP = %0.02f)' % (best_label, best_conf, ), y=1.26)
# # plt.show()
# fig_filename = 'detector-precision-recall-%0.2f.png' % (min_overlap, )
# fig_path = abspath(expanduser(join('~', 'Desktop', fig_filename)))
# plt.savefig(fig_path, bbox_inches='tight')
# @register_ibs_method
# def detector_metric_graphs(ibs, species_list=[]):
# ibs.classifier_precision_recall_algo_display(species_list)
# ibs.localizer_precision_recall_algo_display()
# ibs.labeler_precision_recall_algo_display()
# ibs.detector_precision_recall_algo_display()
if __name__ == '__main__':
"""
CommandLine:
python -m ibeis.other.detectfuncs
python -m ibeis.other.detectfuncs --allexamples
python -m ibeis.other.detectfuncs --allexamples --noface --nosrc
"""
import multiprocessing
multiprocessing.freeze_support() # for win32
ut.doctest_funcs()
| 56.923775 | 288 | 0.583998 | 27,920 | 232,249 | 4.56096 | 0.030408 | 0.023087 | 0.034199 | 0.040835 | 0.833119 | 0.801479 | 0.774379 | 0.749014 | 0.720304 | 0.700374 | 0 | 0.02569 | 0.262554 | 232,249 | 4,079 | 289 | 56.93773 | 0.717821 | 0.45617 | 0 | 0.498928 | 0 | 0.002143 | 0.116507 | 0.013292 | 0 | 0 | 0 | 0.000245 | 0.010287 | 1 | 0.026575 | false | 0.000429 | 0.014145 | 0.000429 | 0.064295 | 0.01886 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
0ad153a5cf29aabbd177b296c48b7b22dca83f01 | 57 | py | Python | src/text/__init__.py | alihassanijr/Compact-Transformers | 61b656eacdf113f92900f800410bb788bb7d9a3c | [
"Apache-2.0"
] | 281 | 2021-04-13T01:17:28.000Z | 2022-03-23T15:18:24.000Z | src/text/__init__.py | alihassanijr/Compact-Transformers | 61b656eacdf113f92900f800410bb788bb7d9a3c | [
"Apache-2.0"
] | 49 | 2021-04-16T12:59:55.000Z | 2022-03-18T18:25:27.000Z | src/text/__init__.py | alihassanijr/Compact-Transformers | 61b656eacdf113f92900f800410bb788bb7d9a3c | [
"Apache-2.0"
] | 42 | 2021-04-13T01:53:04.000Z | 2022-03-13T06:31:57.000Z | from .vit import *
from .cvt import *
from .cct import *
| 14.25 | 18 | 0.684211 | 9 | 57 | 4.333333 | 0.555556 | 0.512821 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.210526 | 57 | 3 | 19 | 19 | 0.866667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
7c10c385558e9b354ccec34c0573912ff9f296b8 | 12,043 | py | Python | castoredc_api/tests/test_import/test_sync_import/test_import_translation_sync.py | reiniervlinschoten/castoredc_api | 54a71606fa681a05e795e42a37d4b4f58b97e787 | [
"MIT"
] | 1 | 2022-02-07T17:49:31.000Z | 2022-02-07T17:49:31.000Z | castoredc_api/tests/test_import/test_sync_import/test_import_translation_sync.py | reiniervlinschoten/castoredc_api | 54a71606fa681a05e795e42a37d4b4f58b97e787 | [
"MIT"
] | 48 | 2021-08-05T15:20:27.000Z | 2022-03-28T14:49:25.000Z | castoredc_api/tests/test_import/test_sync_import/test_import_translation_sync.py | reiniervlinschoten/castoredc_api | 54a71606fa681a05e795e42a37d4b4f58b97e787 | [
"MIT"
] | 1 | 2021-08-06T07:06:37.000Z | 2021-08-06T07:06:37.000Z | import pytest
from castoredc_api import CastorException
from castoredc_api.importer.import_data import import_data
class TestImportTranslationSync:
"""Tests uploading data to Castor while translating external data points."""
def test_import_study_value_translate_success(self, import_study):
"""Tests if uploading value data is successful"""
imported_data = import_data(
data_source_path="tests/test_import/data_files_for_import_tests/data_file_study_values_translate.xlsx",
column_link_path="tests/test_import/link_files_for_import_tests/study_link_file.xlsx",
study=import_study,
label_data=False,
target="Study",
translation_path="tests/test_import/translate_files_for_import_tests/study_value_translate_file.xlsx",
)
assert imported_data == self.study_success
def test_import_study_label_translate_success(self, import_study):
"""Tests if uploading label data is successful"""
imported_data = import_data(
data_source_path="tests/test_import/data_files_for_import_tests/data_file_study_labels_translate.xlsx",
column_link_path="tests/test_import/link_files_for_import_tests/study_link_file.xlsx",
study=import_study,
label_data=True,
target="Study",
translation_path="tests/test_import/translate_files_for_import_tests/study_label_translate_file.xlsx",
)
assert imported_data == self.study_success
def test_import_study_value_translate_missing(self, import_study):
"""Tests if uploading value data with missings is successful"""
imported_data = import_data(
data_source_path="tests/test_import/data_files_for_import_tests/data_file_study_values_missings_translate.xlsx",
column_link_path="tests/test_import/link_files_for_import_tests/study_link_file.xlsx",
study=import_study,
label_data=False,
target="Study",
translation_path="tests/test_import/translate_files_for_import_tests/study_value_translate_file.xlsx",
)
assert imported_data == self.study_missing
def test_import_study_label_translate_missing(self, import_study):
"""Tests if uploading label data with missings is successful"""
imported_data = import_data(
data_source_path="tests/test_import/data_files_for_import_tests/data_file_study_labels_missings_translate.xlsx",
column_link_path="tests/test_import/link_files_for_import_tests/study_link_file.xlsx",
study=import_study,
label_data=True,
target="Study",
translation_path="tests/test_import/translate_files_for_import_tests/study_label_translate_file.xlsx",
)
assert imported_data == self.study_missing
def test_import_study_value_translate_error(self, import_study):
"""Tests if uploading value data with errors is successful"""
with pytest.raises(CastorException) as e:
import_data(
data_source_path="tests/test_import/data_files_for_import_tests/data_file_study_values_errors_translate.xlsx",
column_link_path="tests/test_import/link_files_for_import_tests/study_link_file.xlsx",
study=import_study,
label_data=False,
target="Study",
translation_path="tests/test_import/translate_files_for_import_tests/study_value_translate_file.xlsx",
)
assert str(e.value) == self.study_error
def test_import_study_label_translate_error(self, import_study):
"""Tests if uploading label data with errors is successful"""
with pytest.raises(CastorException) as e:
import_data(
data_source_path="tests/test_import/data_files_for_import_tests/data_file_study_labels_errors_translate.xlsx",
column_link_path="tests/test_import/link_files_for_import_tests/study_link_file.xlsx",
study=import_study,
label_data=True,
target="Study",
translation_path="tests/test_import/translate_files_for_import_tests/study_label_translate_file.xlsx",
)
assert str(e.value) == self.study_error
def test_import_report_label_translation_success(self, import_study):
"""Tests if uploading label data with a translation and dependency is successful"""
imported_data = import_data(
data_source_path="tests/test_import/data_files_for_import_tests/data_file_report_medication_labels_translation.xlsx",
column_link_path="tests/test_import/link_files_for_import_tests/report_link_file.xlsx",
study=import_study,
label_data=True,
target="Report",
target_name="Medication",
translation_path="tests/test_import/translate_files_for_import_tests/report_label_translate_file.xlsx",
)
assert imported_data == self.report_success
study_success = {
"110001": [
{
"success": {
"base_bl_date": "16-03-2021",
"base_hb": "8.3",
"fac_V_leiden": "55;16-03-2021",
"onset_stroke": "16-03-2021;07:30",
"onset_trombectomy": "09:25",
"pat_birth_year": "1999",
"pat_sex": "0",
"pat_race": "1",
"his_family": "2;3;4",
},
"failed": {},
}
],
"110002": [
{
"success": {
"base_bl_date": "17-03-2021",
"base_hb": "7.2",
"fac_V_leiden": "33;17-03-2021",
"onset_stroke": "17-03-2021;15:30",
"onset_trombectomy": "06:33",
"pat_birth_year": "1956",
"pat_sex": "0",
"pat_race": "2",
"his_family": "1;2",
},
"failed": {},
}
],
"110003": [
{
"success": {
"base_bl_date": "16-03-2022",
"base_hb": "9.1",
"fac_V_leiden": "-45;18-03-2022",
"onset_stroke": "18-03-2022;02:00",
"onset_trombectomy": "12:24",
"pat_birth_year": "1945",
"pat_sex": "1",
"pat_race": "3",
"his_family": "0",
},
"failed": {},
}
],
"110004": [
{
"success": {
"base_bl_date": "17-03-2022",
"base_hb": "3.2",
"fac_V_leiden": "28;19-03-2022",
"onset_stroke": "17-03-2022;21:43",
"onset_trombectomy": "23:23",
"pat_birth_year": "1933",
"pat_sex": "1",
"pat_race": "4",
"his_family": "5;7",
},
"failed": {},
}
],
"110005": [
{
"success": {
"base_bl_date": "16-03-2023",
"base_hb": "10.3",
"fac_V_leiden": "5;20-03-2023",
"onset_stroke": "16-03-2023;07:22",
"onset_trombectomy": "08:14",
"pat_birth_year": "1921",
"pat_sex": "0",
"pat_race": "5",
"his_family": "8",
},
"failed": {},
}
],
}
study_missing = {
"110001": [
{
"success": {
"base_bl_date": "16-03-2021",
"base_hb": "8.3",
"fac_V_leiden": "55;16-03-2021",
"onset_trombectomy": "09:25",
"pat_birth_year": "1999",
"pat_sex": "0",
"pat_race": "1",
},
"failed": {},
}
],
"110002": [
{
"success": {
"base_bl_date": "17-03-2021",
"fac_V_leiden": "33;17-03-2021",
"onset_stroke": "17-03-2021;15:30",
"onset_trombectomy": "06:33",
"pat_sex": "0",
"pat_race": "2",
},
"failed": {},
}
],
"110003": [
{
"success": {
"base_hb": "9.1",
"fac_V_leiden": "-45;18-03-2022",
"onset_stroke": "18-03-2022;02:00",
"onset_trombectomy": "12:24",
"his_family": "0",
},
"failed": {},
}
],
"110004": [
{
"success": {
"base_bl_date": "17-03-2022",
"base_hb": "3.2",
"onset_stroke": "17-03-2022;21:43",
"pat_sex": "1",
"pat_race": "4",
"his_family": "5;7",
},
"failed": {},
}
],
"110005": [
{
"success": {
"base_bl_date": "16-03-2023",
"base_hb": "10.3",
"fac_V_leiden": "5;20-03-2023",
"onset_stroke": "16-03-2023;07:22",
"onset_trombectomy": "08:14",
"pat_birth_year": "1921",
"pat_sex": "0",
"pat_race": "5",
"his_family": "8",
},
"failed": {},
}
],
}
study_error = (
"Non-viable data found in dataset to be imported. See output folder for details"
)
report_success = {
"110001": [
{
"success": {
"med_name": "Azathioprine",
"med_start": "05-12-2019",
"med_stop": "05-12-2020",
"med_dose": "0.05",
"med_units": "3",
},
"failed": {},
}
],
"110002": [
{
"success": {
"med_name": "Vedolizumab",
"med_start": "17-08-2018",
"med_stop": "17-09-2020",
"med_dose": "300",
"med_units": "7",
"med_other_unit": "mg/4 weeks",
},
"failed": {},
}
],
"110003": [
{
"success": {
"med_name": "Ustekinumab",
"med_start": "19-12-2017",
"med_stop": "03-06-2019",
"med_dose": "90",
"med_units": "7",
"med_other_unit": "mg/8 weeks",
},
"failed": {},
}
],
"110004": [
{
"success": {
"med_name": "Thioguanine",
"med_start": "25-04-2020",
"med_stop": "27-05-2021",
"med_dose": "15",
"med_units": "2",
},
"failed": {},
}
],
"110005": [
{
"success": {
"med_name": "Tofacitinib",
"med_start": "01-03-2020",
"med_stop": "31-12-2999",
"med_dose": "10",
"med_units": "2",
},
"failed": {},
}
],
}
| 36.941718 | 129 | 0.471145 | 1,171 | 12,043 | 4.479078 | 0.139197 | 0.053384 | 0.05205 | 0.076072 | 0.814299 | 0.803241 | 0.783985 | 0.766444 | 0.734986 | 0.6755 | 0 | 0.080439 | 0.409533 | 12,043 | 325 | 130 | 37.055385 | 0.657151 | 0.038529 | 0 | 0.601351 | 0 | 0 | 0.333507 | 0.144306 | 0 | 0 | 0 | 0 | 0.023649 | 1 | 0.023649 | false | 0 | 0.175676 | 0 | 0.216216 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
7c3de34d10fb4725b6ce063161ee8975c62e4b41 | 104 | py | Python | tests/conftest.py | JeffreyWardman/FluentNet | d87dd8eb40894ffb66b6042cfc2add368c8de827 | [
"MIT"
] | null | null | null | tests/conftest.py | JeffreyWardman/FluentNet | d87dd8eb40894ffb66b6042cfc2add368c8de827 | [
"MIT"
] | null | null | null | tests/conftest.py | JeffreyWardman/FluentNet | d87dd8eb40894ffb66b6042cfc2add368c8de827 | [
"MIT"
] | null | null | null | from pytest import fixture
import torch
@fixture
def input():
return torch.rand((1, 3, 256, 256))
| 13 | 39 | 0.692308 | 16 | 104 | 4.5 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.095238 | 0.192308 | 104 | 7 | 40 | 14.857143 | 0.761905 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | true | 0 | 0.4 | 0.2 | 0.8 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 0 | 6 |
7c58de5cc44ecbf3de1ff3f85d3d1689617297ef | 1,991 | py | Python | markyp_bootstrap4/colors.py | volfpeter/markyp-bootstrap4 | 1af5a1f9dc861a14323706ace28882ef6555739a | [
"MIT"
] | 21 | 2019-07-16T15:03:43.000Z | 2021-11-16T10:51:58.000Z | markyp_bootstrap4/colors.py | volfpeter/markyp-bootstrap4 | 1af5a1f9dc861a14323706ace28882ef6555739a | [
"MIT"
] | null | null | null | markyp_bootstrap4/colors.py | volfpeter/markyp-bootstrap4 | 1af5a1f9dc861a14323706ace28882ef6555739a | [
"MIT"
] | null | null | null | """
CSS class names for coloring.
See https://getbootstrap.com/docs/4.0/utilities/colors/ for more information.
"""
__all__ = ("bg", "text")
class __Background(object):
"""
CSS class names for background coloring.
"""
__slots__ = ()
@property
def primary(self) -> str:
return "bg-primary"
@property
def secondary(self) -> str:
return "bg-secondary"
@property
def success(self) -> str:
return "bg-success"
@property
def danger(self) -> str:
return "bg-danger"
@property
def warning(self) -> str:
return "bg-warning"
@property
def info(self) -> str:
return "bg-info"
@property
def light(self) -> str:
return "bg-light"
@property
def dark(self) -> str:
return "bg-dark"
@property
def white(self) -> str:
return "bg-white"
class __Text(object):
"""
CSS class names for text coloring.
"""
__slots__ = ()
@property
def primary(self) -> str:
return "text-primary"
@property
def secondary(self) -> str:
return "text-secondary"
@property
def success(self) -> str:
return "text-success"
@property
def danger(self) -> str:
return "text-danger"
@property
def warning(self) -> str:
return "text-warning"
@property
def info(self) -> str:
return "text-info"
@property
def light(self) -> str:
return "text-light"
@property
def dark(self) -> str:
return "text-dark"
@property
def muted(self) -> str:
return "text-muted"
@property
def white(self) -> str:
return "text-white"
bg: __Background = __Background()
"""
CSS class names for background coloring.
See https://getbootstrap.com/docs/4.0/utilities/colors/.
"""
text: __Text = __Text()
"""
CSS class names for text coloring.
See https://getbootstrap.com/docs/4.0/utilities/colors/.
"""
| 17.017094 | 77 | 0.578101 | 228 | 1,991 | 4.942982 | 0.162281 | 0.185448 | 0.219166 | 0.150843 | 0.812777 | 0.80213 | 0.669033 | 0.216504 | 0.138421 | 0.138421 | 0 | 0.004231 | 0.287795 | 1,991 | 116 | 78 | 17.163793 | 0.79055 | 0.092416 | 0 | 0.609375 | 0 | 0 | 0.1254 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.296875 | false | 0 | 0 | 0.296875 | 0.65625 | 0 | 0 | 0 | 0 | null | 0 | 1 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 6 |
7c80933d5b2e98d963d6420181a06d08257f1993 | 35,458 | py | Python | tests/test_core.py | luizirber/ncbi-genome-download | fbad1918c0b258e5f0259358fb3c17de302b88c0 | [
"Apache-2.0"
] | 1 | 2019-11-25T03:46:32.000Z | 2019-11-25T03:46:32.000Z | tests/test_core.py | luizirber/ncbi-genome-download | fbad1918c0b258e5f0259358fb3c17de302b88c0 | [
"Apache-2.0"
] | null | null | null | tests/test_core.py | luizirber/ncbi-genome-download | fbad1918c0b258e5f0259358fb3c17de302b88c0 | [
"Apache-2.0"
] | 1 | 2019-11-13T15:28:47.000Z | 2019-11-13T15:28:47.000Z | """Core module tests."""
from argparse import Namespace
from collections import OrderedDict
import os
from os import path
import pytest
import requests_mock
from requests.exceptions import ConnectionError
from ncbi_genome_download import core
from ncbi_genome_download import NgdConfig, SUPPORTED_TAXONOMIC_GROUPS
def _get_file(fname):
"""Get a file from the test directory."""
return path.join(path.dirname(__file__), fname)
@pytest.yield_fixture
def req():
"""Fake requests object."""
with requests_mock.mock() as req:
yield req
def test_download_defaults(monkeypatch, mocker):
"""Test download does the right thing."""
entry = {
'assembly_accession': 'FAKE0.1',
'organism_name': 'Example species',
'infraspecific_name': 'strain=ABC 1234',
'ftp_path': 'https://fake/genomes/FAKE0.1'
}
worker_mock = mocker.MagicMock()
select_candidates_mock = mocker.MagicMock(return_value=[(entry, 'bacteria')])
create_downloadjob_mock = mocker.MagicMock(return_value=[core.DownloadJob(None, None, None, None)])
monkeypatch.setattr(core, 'select_candidates', select_candidates_mock)
monkeypatch.setattr(core, 'create_downloadjob', create_downloadjob_mock)
monkeypatch.setattr(core, 'worker', worker_mock)
assert core.download() == 0
assert select_candidates_mock.call_args_list[0][0][0].group == SUPPORTED_TAXONOMIC_GROUPS
assert create_downloadjob_mock.call_args_list[0][0][0] == entry
def test_args_download_defaults(monkeypatch, mocker):
"""Test args_download does the correct thing."""
entry = {
'assembly_accession': 'FAKE0.1',
'organism_name': 'Example species',
'infraspecific_name': 'strain=ABC 1234',
'ftp_path': 'https://fake/genomes/FAKE0.1'
}
worker_mock = mocker.MagicMock()
select_candidates_mock = mocker.MagicMock(return_value=[(entry, 'bacteria')])
create_downloadjob_mock = mocker.MagicMock(return_value=[core.DownloadJob(None, None, None, None)])
monkeypatch.setattr(core, 'select_candidates', select_candidates_mock)
monkeypatch.setattr(core, 'create_downloadjob', create_downloadjob_mock)
monkeypatch.setattr(core, 'worker', worker_mock)
assert core.args_download(Namespace()) == 0
assert select_candidates_mock.call_args_list[0][0][0].group == SUPPORTED_TAXONOMIC_GROUPS
assert create_downloadjob_mock.call_args_list[0][0][0] == entry
def test_download_defaults_nomatch(monkeypatch, mocker):
"""Test download bails with a 1 return code if no entries match."""
select_candidates_mock = mocker.MagicMock(return_value=[])
monkeypatch.setattr(core, 'select_candidates', select_candidates_mock)
assert core.download() == 1
def test_download_dry_run(monkeypatch, mocker):
"""Test _download is not called for a dry run."""
entry = {
'assembly_accession': 'FAKE0.1',
'organism_name': 'Example species',
'infraspecific_name': 'strain=ABC 1234',
'ftp_path': 'https://fake/genomes/FAKE0.1'
}
worker_mock = mocker.MagicMock()
select_candidates_mock = mocker.MagicMock(return_value=[(entry, 'bacteria')])
create_downloadjob_mock = mocker.MagicMock(return_value=[core.DownloadJob(None, None, None, None)])
monkeypatch.setattr(core, 'select_candidates', select_candidates_mock)
monkeypatch.setattr(core, 'create_downloadjob', create_downloadjob_mock)
monkeypatch.setattr(core, 'worker', worker_mock)
assert core.download(dry_run=True) == 0
assert select_candidates_mock.call_count == 1
assert create_downloadjob_mock.call_count == 0
assert worker_mock.call_count == 0
def test_download_one(monkeypatch, mocker):
download_mock = mocker.MagicMock()
monkeypatch.setattr(core, 'download', download_mock)
kwargs = {'group': 'bacteria', 'output': '/tmp/fake'}
core.download(**kwargs)
download_mock.assert_called_with(**kwargs)
def test_download_connection_err(monkeypatch, mocker):
select_candidates_mock = mocker.MagicMock(side_effect=ConnectionError)
monkeypatch.setattr(core, 'select_candidates', select_candidates_mock)
assert core.download() == 75
def test_download(monkeypatch, mocker, req):
summary_contents = open(_get_file('partial_summary.txt'), 'r').read()
req.get('https://ftp.ncbi.nih.gov/genomes/refseq/bacteria/assembly_summary.txt',
text=summary_contents)
mocker.spy(core, 'get_summary')
mocker.spy(core, 'parse_summary')
mocker.patch('ncbi_genome_download.core.create_downloadjob')
core.download(group='bacteria', output='/tmp/fake')
assert core.get_summary.call_count == 1
assert core.parse_summary.call_count == 1
assert core.create_downloadjob.call_count == 4
def test_download_metadata(monkeypatch, mocker, req, tmpdir):
"""Test creating the metadata file works."""
metadata_file = tmpdir.join('metadata.tsv')
summary_contents = open(_get_file('partial_summary.txt'), 'r').read()
req.get('https://ftp.ncbi.nih.gov/genomes/refseq/bacteria/assembly_summary.txt',
text=summary_contents)
mocker.spy(core, 'get_summary')
mocker.spy(core, 'parse_summary')
mocker.patch('ncbi_genome_download.core.create_downloadjob', return_value=[core.DownloadJob(None, None, None, None)])
core.download(group='bacteria', output='/tmp/fake', metadata_table=str(metadata_file))
assert core.get_summary.call_count == 1
assert core.parse_summary.call_count == 1
assert core.create_downloadjob.call_count == 4
assert metadata_file.check()
def test_download_complete(monkeypatch, mocker, req):
summary_contents = open(_get_file('assembly_status.txt'), 'r').read()
req.get('https://ftp.ncbi.nih.gov/genomes/refseq/bacteria/assembly_summary.txt',
text=summary_contents)
mocker.spy(core, 'get_summary')
mocker.spy(core, 'parse_summary')
mocker.patch('ncbi_genome_download.core.create_downloadjob')
core.download(group='bacteria', output='/tmp/fake', assembly_level='complete')
assert core.get_summary.call_count == 1
assert core.parse_summary.call_count == 1
assert core.create_downloadjob.call_count == 1
# Many nested tuples in call_args_list, no kidding.
assert core.create_downloadjob.call_args_list[0][0][0]['assembly_level'] == 'Complete Genome'
def test_download_chromosome(monkeypatch, mocker, req):
summary_contents = open(_get_file('assembly_status.txt'), 'r').read()
req.get('https://ftp.ncbi.nih.gov/genomes/refseq/bacteria/assembly_summary.txt',
text=summary_contents)
mocker.spy(core, 'get_summary')
mocker.spy(core, 'parse_summary')
mocker.patch('ncbi_genome_download.core.create_downloadjob')
core.download(group='bacteria', output='/tmp/fake', assembly_level='chromosome')
assert core.get_summary.call_count == 1
assert core.parse_summary.call_count == 1
assert core.create_downloadjob.call_count == 1
# Many nested tuples in call_args_list, no kidding.
assert core.create_downloadjob.call_args_list[0][0][0]['assembly_level'] == 'Chromosome'
def test_download_scaffold(monkeypatch, mocker, req):
summary_contents = open(_get_file('assembly_status.txt'), 'r').read()
req.get('https://ftp.ncbi.nih.gov/genomes/refseq/bacteria/assembly_summary.txt',
text=summary_contents)
mocker.spy(core, 'get_summary')
mocker.spy(core, 'parse_summary')
mocker.patch('ncbi_genome_download.core.create_downloadjob')
core.download(group='bacteria', output='/tmp/fake', assembly_level='scaffold')
assert core.get_summary.call_count == 1
assert core.parse_summary.call_count == 1
assert core.create_downloadjob.call_count == 1
# Many nested tuples in call_args_list, no kidding.
assert core.create_downloadjob.call_args_list[0][0][0]['assembly_level'] == 'Scaffold'
def test_download_contig(monkeypatch, mocker, req):
summary_contents = open(_get_file('assembly_status.txt'), 'r').read()
req.get('https://ftp.ncbi.nih.gov/genomes/refseq/bacteria/assembly_summary.txt',
text=summary_contents)
mocker.spy(core, 'get_summary')
mocker.spy(core, 'parse_summary')
mocker.patch('ncbi_genome_download.core.create_downloadjob')
core.download(group='bacteria', output='/tmp/fake', assembly_level='contig')
assert core.get_summary.call_count == 1
assert core.parse_summary.call_count == 1
assert core.create_downloadjob.call_count == 1
# Many nested tuples in call_args_list, no kidding.
assert core.create_downloadjob.call_args_list[0][0][0]['assembly_level'] == 'Contig'
def test_download_genus(monkeypatch, mocker, req):
summary_contents = open(_get_file('partial_summary.txt'), 'r').read()
req.get('https://ftp.ncbi.nih.gov/genomes/refseq/bacteria/assembly_summary.txt',
text=summary_contents)
mocker.spy(core, 'get_summary')
mocker.spy(core, 'parse_summary')
mocker.patch('ncbi_genome_download.core.create_downloadjob')
core.download(group='bacteria', output='/tmp/fake', genus='Azorhizobium')
assert core.get_summary.call_count == 1
assert core.parse_summary.call_count == 1
assert core.create_downloadjob.call_count == 1
# Many nested tuples in call_args_list, no kidding.
assert core.create_downloadjob.call_args_list[0][0][0][
'organism_name'] == 'Azorhizobium caulinodans ORS 571'
def test_download_genus_lowercase(monkeypatch, mocker, req):
summary_contents = open(_get_file('partial_summary.txt'), 'r').read()
req.get('https://ftp.ncbi.nih.gov/genomes/refseq/bacteria/assembly_summary.txt',
text=summary_contents)
mocker.spy(core, 'get_summary')
mocker.spy(core, 'parse_summary')
mocker.patch('ncbi_genome_download.core.create_downloadjob')
core.download(group='bacteria', output='/tmp/fake', genus='azorhizobium')
assert core.get_summary.call_count == 1
assert core.parse_summary.call_count == 1
assert core.create_downloadjob.call_count == 1
# Many nested tuples in call_args_list, no kidding.
assert core.create_downloadjob.call_args_list[0][0][0][
'organism_name'] == 'Azorhizobium caulinodans ORS 571'
def test_download_genus_fuzzy(monkeypatch, mocker, req):
summary_contents = open(_get_file('partial_summary.txt'), 'r').read()
req.get('https://ftp.ncbi.nih.gov/genomes/refseq/bacteria/assembly_summary.txt',
text=summary_contents)
mocker.spy(core, 'get_summary')
mocker.spy(core, 'parse_summary')
mocker.patch('ncbi_genome_download.core.create_downloadjob')
core.download(group='bacteria', output='/tmp/fake', genus='ors', fuzzy_genus=True)
assert core.get_summary.call_count == 1
assert core.parse_summary.call_count == 1
assert core.create_downloadjob.call_count == 1
# Many nested tuples in call_args_list, no kidding.
assert core.create_downloadjob.call_args_list[0][0][0][
'organism_name'] == 'Azorhizobium caulinodans ORS 571'
def test_download_taxid(monkeypatch, mocker, req):
summary_contents = open(_get_file('partial_summary.txt'), 'r').read()
req.get('https://ftp.ncbi.nih.gov/genomes/refseq/bacteria/assembly_summary.txt',
text=summary_contents)
mocker.spy(core, 'get_summary')
mocker.spy(core, 'parse_summary')
mocker.patch('ncbi_genome_download.core.create_downloadjob')
core.download(group='bacteria', output='/tmp/fake', taxid='438753')
assert core.get_summary.call_count == 1
assert core.parse_summary.call_count == 1
assert core.create_downloadjob.call_count == 1
# Many nested tuples in call_args_list, no kidding.
assert core.create_downloadjob.call_args_list[0][0][0][
'organism_name'] == 'Azorhizobium caulinodans ORS 571'
def test_download_species_taxid(monkeypatch, mocker, req):
summary_contents = open(_get_file('partial_summary.txt'), 'r').read()
req.get('https://ftp.ncbi.nih.gov/genomes/refseq/bacteria/assembly_summary.txt',
text=summary_contents)
mocker.spy(core, 'get_summary')
mocker.spy(core, 'parse_summary')
mocker.patch('ncbi_genome_download.core.create_downloadjob')
core.download(group='bacteria', output='/tmp/fake', species_taxid='7')
assert core.get_summary.call_count == 1
assert core.parse_summary.call_count == 1
assert core.create_downloadjob.call_count == 1
# Many nested tuples in call_args_list, no kidding.
assert core.create_downloadjob.call_args_list[0][0][0][
'organism_name'] == 'Azorhizobium caulinodans ORS 571'
def test_download_refseq_category(monkeypatch, mocker, req):
summary_contents = open(_get_file('assembly_status.txt'), 'r').read()
req.get('https://ftp.ncbi.nih.gov/genomes/refseq/bacteria/assembly_summary.txt',
text=summary_contents)
mocker.spy(core, 'get_summary')
mocker.spy(core, 'parse_summary')
mocker.patch('ncbi_genome_download.core.create_downloadjob')
core.download(group='bacteria', output='/tmp/fake', refseq_category='reference')
assert core.get_summary.call_count == 1
assert core.parse_summary.call_count == 1
assert core.create_downloadjob.call_count == 1
# Many nested tuples in call_args_list, no kidding.
assert core.create_downloadjob.call_args_list[0][0][0][
'organism_name'] == 'Streptomyces coelicolor A3(2)'
def test_get_summary(monkeypatch, req, tmpdir):
"""Test getting the assembly summary file."""
cache_dir = tmpdir.mkdir('cache')
monkeypatch.setattr(core, 'CACHE_DIR', str(cache_dir))
cache_file = cache_dir.join('refseq_bacteria_assembly_summary.txt')
req.get('https://ftp.ncbi.nih.gov/genomes/refseq/bacteria/assembly_summary.txt', text='test')
ret = core.get_summary('refseq', 'bacteria', NgdConfig.get_default('uri'), False)
assert ret.read() == 'test'
assert not cache_file.check()
ret = core.get_summary('refseq', 'bacteria', NgdConfig.get_default('uri'), True)
assert ret.read() == 'test'
assert cache_file.check()
req.get('https://ftp.ncbi.nih.gov/genomes/refseq/bacteria/assembly_summary.txt', text='never read')
ret = core.get_summary('refseq', 'bacteria', NgdConfig.get_default('uri'), True)
assert ret.read() == 'test'
def test_get_summary_error_handling(monkeypatch, mocker, req, tmpdir):
"""Test get_summary error handling."""
cache_dir = tmpdir.join('cache')
monkeypatch.setattr(core, 'CACHE_DIR', str(cache_dir))
req.get('https://ftp.ncbi.nih.gov/genomes/refseq/bacteria/assembly_summary.txt', text='test')
fake_makedirs = mocker.MagicMock(side_effect=OSError(13, "Permission denied"))
monkeypatch.setattr(os, 'makedirs', fake_makedirs)
with pytest.raises(OSError):
core.get_summary('refseq', 'bacteria', NgdConfig.get_default('uri'), True)
def test_parse_summary():
with open(_get_file('partial_summary.txt'), 'r') as fh:
reader = core.parse_summary(fh)
first = next(reader)
assert 'ftp_path' in first
assert 'assembly_accession' in first
fh.seek(2)
reader = core.parse_summary(fh)
first = next(reader)
assert 'assembly_accession' in first
def test_filter_entries():
"""Test filter_entries."""
config = NgdConfig()
with open(_get_file('assembly_status.txt'), 'r') as fh:
entries = list(core.parse_summary(fh))
assert core.filter_entries(entries, config) == entries
expected = entries[-1:]
config.assembly_accessions = "GCF_000203835.1"
assert core.filter_entries(entries, config) == expected
def prepare_create_downloadjob(req, tmpdir, format_map=NgdConfig._FORMATS, human_readable=False,
create_local_file=False):
# Set up test env
entry = {
'assembly_accession': 'FAKE0.1',
'organism_name': 'Example species',
'infraspecific_name': 'strain=ABC 1234',
'ftp_path': 'https://fake/genomes/FAKE0.1'
}
config = NgdConfig()
outdir = tmpdir.mkdir('output')
download_jobs = []
config.output = str(outdir)
config.human_readable = human_readable
checksum_file_content = ''
for key, val in format_map.items():
seqfile = tmpdir.join('fake{}'.format(val))
seqfile.write(key)
checksum = core.md5sum(str(seqfile))
filename = path.basename(str(seqfile))
full_url = 'https://fake/genomes/FAKE0.1/{}'.format(filename)
local_file = outdir.join('refseq', 'bacteria', 'FAKE0.1', filename)
if create_local_file:
local_file.write(seqfile.read(), ensure=True)
symlink_path = None
if human_readable:
symlink_path = str(
outdir.join('human_readable', 'refseq', 'bacteria', 'Example', 'species',
'ABC_1234', filename))
download_jobs.append(core.DownloadJob(full_url, str(local_file), checksum, symlink_path))
checksum_file_content += '{}\t./{}\n'.format(checksum, filename)
req.get(full_url, text=seqfile.read())
req.get('https://fake/genomes/FAKE0.1/md5checksums.txt', text=checksum_file_content)
return entry, config, download_jobs
def test_create_downloadjob_genbank(req, tmpdir):
entry, config, joblist = prepare_create_downloadjob(req, tmpdir)
jobs = core.create_downloadjob(entry, 'bacteria', config)
expected = [j for j in joblist if j.local_file.endswith('_genomic.gbff.gz')]
assert jobs == expected
def test_create_downloadjob_all(req, tmpdir):
entry, config, expected = prepare_create_downloadjob(req, tmpdir)
config.file_format = "all"
jobs = core.create_downloadjob(entry, 'bacteria', config)
assert jobs == expected
def test_create_downloadjob_missing(req, tmpdir):
name_map_copy = OrderedDict(NgdConfig._FORMATS)
del name_map_copy['genbank']
entry, config, _ = prepare_create_downloadjob(req, tmpdir, name_map_copy)
jobs = core.create_downloadjob(entry, 'bacteria', config)
assert jobs == []
def test_create_downloadjob_human_readable(req, tmpdir):
entry, config, joblist = prepare_create_downloadjob(req, tmpdir, human_readable=True)
jobs = core.create_downloadjob(entry, 'bacteria', config)
expected = [j for j in joblist if j.local_file.endswith('_genomic.gbff.gz')]
assert jobs == expected
def test_create_downloadjob_symlink_only(req, tmpdir):
entry, config, joblist = prepare_create_downloadjob(req, tmpdir, human_readable=True,
create_local_file=True)
jobs = core.create_downloadjob(entry, 'bacteria', config)
expected = [core.DownloadJob(None, j.local_file, None, j.symlink_path)
for j in joblist if j.local_file.endswith('_genomic.gbff.gz')]
assert jobs == expected
def test_create_dir(tmpdir):
entry = {'assembly_accession': 'FAKE0.1'}
output = tmpdir.mkdir('output')
ret = core.create_dir(entry, 'refseq', 'bacteria', str(output), flat_output=False)
expected = output.join('refseq', 'bacteria', 'FAKE0.1')
assert expected.check()
assert ret == str(expected)
def test_create_dir_exists(tmpdir):
entry = {'assembly_accession': 'FAKE0.1'}
output = tmpdir.mkdir('output')
expected = output.mkdir('refseq').mkdir('bacteria').mkdir('FAKE0.1')
ret = core.create_dir(entry, 'refseq', 'bacteria', str(output), flat_output=False)
assert ret == str(expected)
def test_create_dir_isfile(tmpdir):
entry = {'assembly_accession': 'FAKE0.1'}
output = tmpdir.mkdir('output')
output.join('refseq', 'bacteria', 'FAKE0.1').write('foo', ensure=True)
with pytest.raises(OSError):
core.create_dir(entry, 'refseq', 'bacteria', str(output), flat_output=False)
def test_create_dir_flat(tmpdir):
entry = {'assembly_accession': 'FAKE0.1'}
output = tmpdir.mkdir('output')
ret = core.create_dir(entry, 'refseq', 'bacteria', str(output), flat_output=True)
assert ret == str(output)
def test_create_readable_dir(tmpdir):
entry = {'organism_name': 'Example species', 'infraspecific_name': 'strain=ABC 1234'}
output = tmpdir.mkdir('output')
ret = core.create_readable_dir(entry, 'refseq', 'bacteria', str(output))
expected = output.join('human_readable', 'refseq', 'bacteria', 'Example', 'species',
'ABC_1234')
assert expected.check()
assert ret == str(expected)
def test_create_readable_dir_exists(tmpdir):
entry = {'organism_name': 'Example species', 'infraspecific_name': 'strain=ABC 1234'}
output = tmpdir.mkdir('output')
expected = output.mkdir('human_readable').mkdir('refseq').mkdir('bacteria').mkdir(
'Example').mkdir('species').mkdir('ABC_1234')
ret = core.create_readable_dir(entry, 'refseq', 'bacteria', str(output))
assert ret == str(expected)
def test_create_readable_dir_isfile(tmpdir):
entry = {'organism_name': 'Example species', 'infraspecific_name': 'strain=ABC 1234'}
output = tmpdir.mkdir('output')
output.join('human_readable', 'refseq', 'bacteria', 'Example', 'species', 'ABC_1234').write(
'foo', ensure=True)
with pytest.raises(OSError):
core.create_readable_dir(entry, 'refseq', 'bacteria', str(output))
def test_create_readable_dir_virus(tmpdir):
output = tmpdir.mkdir('output')
entry = {'organism_name': 'OnlyOneString-1', 'infraspecific_name': 'strain=ABC 1234'}
ret = core.create_readable_dir(entry, 'refseq', 'viral', str(output))
expected = output.join('human_readable', 'refseq', 'viral', 'OnlyOneString-1', 'ABC_1234')
assert expected.check()
assert ret == str(expected)
entry = {'organism_name': 'Two strings', 'infraspecific_name': 'strain=ABC 1234'}
ret = core.create_readable_dir(entry, 'refseq', 'viral', str(output))
expected = output.join('human_readable', 'refseq', 'viral', 'Two_strings', 'ABC_1234')
assert expected.check()
assert ret == str(expected)
entry = {'organism_name': 'This is four strings', 'infraspecific_name': 'strain=ABC 1234'}
ret = core.create_readable_dir(entry, 'refseq', 'viral', str(output))
expected = output.join('human_readable', 'refseq', 'viral', 'This_is_four_strings', 'ABC_1234')
assert expected.check()
assert ret == str(expected)
entry = {'organism_name': 'This is four strings', 'infraspecific_name': '',
'isolate': '', 'assembly_accession': 'ABC12345'}
ret = core.create_readable_dir(entry, 'refseq', 'viral', str(output))
expected = output.join('human_readable', 'refseq', 'viral', 'This_is_four_strings', 'ABC12345')
assert expected.check()
assert ret == str(expected)
def test_grab_checksums_file(req):
req.get('https://ftp.ncbi.nih.gov/genomes/all/FAKE0.1/md5checksums.txt', text='test')
entry = {'ftp_path': 'ftp://ftp.ncbi.nih.gov/genomes/all/FAKE0.1'}
ret = core.grab_checksums_file(entry)
assert ret == 'test'
def test_parse_checksums():
checksums_string = """\
d3c2634cedd0efe05cbf8a5f5384d921 ./GCF_000009605.1_ASM960v1_feature_table.txt.gz
42c1bb1447aea2512a17aeb3645b55e9 ./GCF_000009605.1_ASM960v1_genomic.fna.gz
8a685d49d826c4f0ad05152e906f3250 ./GCF_000009605.1_ASM960v1_genomic.gbff.gz
e2d9e1cfa085cb462a73d3d2d2c22be5 ./GCF_000009605.1_ASM960v1_genomic.gff.gz
d8ce7c80d457e012f9d368a4673dea65 ./GCF_000009605.1_ASM960v1_protein.faa.gz
This_is_totally_an_invalid_line!
620a09de4286f66113317456c0dc8f66 ./GCF_000009605.1_ASM960v1_protein.gpff.gz
"""
expected = [
{'checksum': 'd3c2634cedd0efe05cbf8a5f5384d921', 'file': 'GCF_000009605.1_ASM960v1_feature_table.txt.gz'},
{'checksum': '42c1bb1447aea2512a17aeb3645b55e9', 'file': 'GCF_000009605.1_ASM960v1_genomic.fna.gz'},
{'checksum': '8a685d49d826c4f0ad05152e906f3250', 'file': 'GCF_000009605.1_ASM960v1_genomic.gbff.gz'},
{'checksum': 'e2d9e1cfa085cb462a73d3d2d2c22be5', 'file': 'GCF_000009605.1_ASM960v1_genomic.gff.gz'},
{'checksum': 'd8ce7c80d457e012f9d368a4673dea65', 'file': 'GCF_000009605.1_ASM960v1_protein.faa.gz'},
{'checksum': '620a09de4286f66113317456c0dc8f66', 'file': 'GCF_000009605.1_ASM960v1_protein.gpff.gz'},
]
ret = core.parse_checksums(checksums_string)
assert ret == expected
def test_has_file_changed_no_file(tmpdir):
checksums = [
{'checksum': 'fake', 'file': 'skipped'},
{'checksum': 'fake', 'file': 'fake_genomic.gbff.gz'},
]
assert core.has_file_changed(str(tmpdir), checksums)
def test_has_file_changed(tmpdir):
checksums = [
{'checksum': 'fake', 'file': 'skipped'},
{'checksum': 'fake', 'file': 'fake_genomic.gbff.gz'},
]
fake_file = tmpdir.join(checksums[-1]['file'])
fake_file.write('foo')
assert fake_file.check()
assert core.has_file_changed(str(tmpdir), checksums)
def test_has_file_changed_unchanged(tmpdir):
fake_file = tmpdir.join('fake_genomic.gbff.gz')
fake_file.write('foo')
assert fake_file.check()
checksum = core.md5sum(str(fake_file))
checksums = [
{'checksum': 'fake', 'file': 'skipped'},
{'checksum': checksum, 'file': fake_file.basename},
]
assert core.has_file_changed(str(tmpdir), checksums) is False
def test_need_to_create_symlink_no_symlink(tmpdir):
checksums = [
{'checksum': 'fake', 'file': 'skipped'},
{'checksum': 'fake', 'file': 'fake_genomic.gbff.gz'},
]
assert core.need_to_create_symlink(str(tmpdir), checksums, 'genbank', None) is False
def test_need_to_create_symlink_correct_link(tmpdir):
fake_file = tmpdir.join('fake_genomic.gbff.gz')
fake_file.write('foo')
assert fake_file.check()
checksum = core.md5sum(str(fake_file))
human_readable_dir = tmpdir.mkdir('human_readable')
fake_link = human_readable_dir.join('fake_genomic.gbff.gz')
fake_link.mksymlinkto(str(fake_file))
checksums = [
{'checksum': 'fake', 'file': 'skipped'},
{'checksum': checksum, 'file': fake_file.basename},
]
assert core.need_to_create_symlink(str(tmpdir), checksums, 'genbank',
str(human_readable_dir)) is False
def test_need_to_create_symlink(tmpdir):
fake_file = tmpdir.join('fake_genomic.gbff.gz')
fake_file.write('foo')
assert fake_file.check()
checksum = core.md5sum(str(fake_file))
human_readable_dir = tmpdir.mkdir('human_readable')
checksums = [
{'checksum': 'fake', 'file': 'skipped'},
{'checksum': checksum, 'file': fake_file.basename},
]
assert core.need_to_create_symlink(str(tmpdir), checksums, 'genbank', str(human_readable_dir))
def test_md5sum():
expected = '74d72df33d621f5eb6300dc9a2e06573'
filename = _get_file('partial_summary.txt')
ret = core.md5sum(filename)
assert ret == expected
def test_download_file_genbank(req, tmpdir):
entry = {'ftp_path': 'ftp://fake/path'}
fake_file = tmpdir.join('fake_genomic.gbff.gz')
fake_file.write('foo')
assert fake_file.check()
checksum = core.md5sum(str(fake_file))
checksums = [{'checksum': checksum, 'file': fake_file.basename}]
dl_dir = tmpdir.mkdir('download')
req.get('https://fake/path/fake_genomic.gbff.gz', text=fake_file.read())
assert core.worker(core.download_file_job(entry, str(dl_dir), checksums))
def test_download_file_genbank_mismatch(req, tmpdir):
entry = {'ftp_path': 'ftp://fake/path'}
fake_file = tmpdir.join('fake_genomic.gbff.gz')
fake_file.write('foo')
assert fake_file.check()
checksums = [{'checksum': 'fake', 'file': fake_file.basename}]
dl_dir = tmpdir.mkdir('download')
req.get('https://fake/path/fake_genomic.gbff.gz', text=fake_file.read())
assert core.worker(core.download_file_job(entry, str(dl_dir), checksums)) is False
def test_download_file_fasta(req, tmpdir):
entry = {'ftp_path': 'ftp://fake/path'}
bogus_file = tmpdir.join('fake_cds_from_genomic.fna.gz')
bogus_file.write("we don't want this one")
bogus_checksum = core.md5sum(str(bogus_file))
fake_file = tmpdir.join('fake_genomic.fna.gz')
fake_file.write('foo')
assert fake_file.check()
checksum = core.md5sum(str(fake_file))
checksums = [
{'checksum': bogus_checksum, 'file': bogus_file.basename},
{'checksum': checksum, 'file': fake_file.basename},
]
dl_dir = tmpdir.mkdir('download')
req.get('https://fake/path/fake_genomic.fna.gz', text=fake_file.read())
assert core.worker(core.download_file_job(entry, str(dl_dir), checksums, 'fasta'))
def test_download_file_cds_fasta(req, tmpdir):
entry = {'ftp_path': 'ftp://fake/path'}
fake_file = tmpdir.join('fake_cds_from_genomic.fna.gz')
fake_file.write('foo')
assert fake_file.check()
checksum = core.md5sum(str(fake_file))
checksums = [
{'checksum': checksum, 'file': fake_file.basename},
]
dl_dir = tmpdir.mkdir('download')
req.get('https://fake/path/fake_cds_from_genomic.fna.gz', text=fake_file.read())
assert core.worker(core.download_file_job(entry, str(dl_dir), checksums, 'cds-fasta'))
def test_download_file_rna_fasta(req, tmpdir):
entry = {'ftp_path': 'ftp://fake/path'}
fake_file = tmpdir.join('fake_rna_from_genomic.fna.gz')
fake_file.write('foo')
assert fake_file.check()
checksum = core.md5sum(str(fake_file))
checksums = [
{'checksum': checksum, 'file': fake_file.basename},
]
dl_dir = tmpdir.mkdir('download')
req.get('https://fake/path/fake_rna_from_genomic.fna.gz', text=fake_file.read())
assert core.worker(core.download_file_job(entry, str(dl_dir), checksums, 'rna-fasta'))
def test_download_file_rna_fna(req, tmpdir):
entry = {'ftp_path': 'ftp://fake/path'}
fake_file = tmpdir.join('fake_rna.fna.gz')
fake_file.write('foo')
assert fake_file.check()
checksum = core.md5sum(str(fake_file))
checksums = [{'checksum': checksum, 'file': fake_file.basename}]
dl_dir = tmpdir.mkdir('download')
req.get('https://fake/path/fake_rna.fna.gz', text=fake_file.read())
assert core.worker(core.download_file_job(entry, str(dl_dir), checksums, 'rna-fna'))
def test_download_file_rm_out(req, tmpdir):
entry = {'ftp_path': 'ftp://fake/path'}
fake_file = tmpdir.join('fake_rm.out.gz')
fake_file.write('foo')
assert fake_file.check()
checksum = core.md5sum(str(fake_file))
checksums = [{'checksum': checksum, 'file': fake_file.basename}]
dl_dir = tmpdir.mkdir('download')
req.get('https://fake/path/fake_rm.out.gz', text=fake_file.read())
assert core.worker(core.download_file_job(entry, str(dl_dir), checksums, 'rm'))
def test_download_file_symlink_path(req, tmpdir):
entry = {'ftp_path': 'ftp://fake/path'}
fake_file = tmpdir.join('fake_genomic.gbff.gz')
fake_file.write('foo')
assert fake_file.check()
checksum = core.md5sum(str(fake_file))
checksums = [{'checksum': checksum, 'file': fake_file.basename}]
dl_dir = tmpdir.mkdir('download')
symlink_dir = tmpdir.mkdir('symlink')
req.get('https://fake/path/fake_genomic.gbff.gz', text=fake_file.read())
assert core.worker(
core.download_file_job(entry, str(dl_dir), checksums, symlink_path=str(symlink_dir)))
symlink = symlink_dir.join('fake_genomic.gbff.gz')
assert symlink.check()
def test_create_symlink_job(tmpdir):
dl_dir = tmpdir.mkdir('download')
fake_file = dl_dir.join('fake_genomic.gbff.gz')
fake_file.write('foo')
assert fake_file.check()
checksum = core.md5sum(str(fake_file))
checksums = [{'checksum': checksum, 'file': fake_file.basename}]
symlink_dir = tmpdir.mkdir('symlink')
assert core.worker(
core.create_symlink_job(str(dl_dir), checksums, 'genbank', str(symlink_dir)))
symlink = symlink_dir.join('fake_genomic.gbff.gz')
assert symlink.check()
def test_create_symlink_job_remove_symlink(tmpdir):
dl_dir = tmpdir.mkdir('download')
fake_file = dl_dir.join('fake_genomic.gbff.gz')
fake_file.write('foo')
assert fake_file.check()
checksum = core.md5sum(str(fake_file))
checksums = [{'checksum': checksum, 'file': fake_file.basename}]
symlink_dir = tmpdir.mkdir('symlink')
wrong_file = symlink_dir.join('fake_genomic.gbff.gz')
wrong_file.write('bar')
assert wrong_file.check()
assert core.worker(
core.create_symlink_job(str(dl_dir), checksums, 'genbank', str(symlink_dir)))
symlink = symlink_dir.join('fake_genomic.gbff.gz')
assert symlink.check()
assert str(symlink.realpath()) == str(fake_file)
def test_download_file_symlink_path_existed(req, tmpdir):
entry = {'ftp_path': 'ftp://fake/path'}
fake_file = tmpdir.join('fake_genomic.gbff.gz')
fake_file.write('foo')
assert fake_file.check()
checksum = core.md5sum(str(fake_file))
checksums = [{'checksum': checksum, 'file': fake_file.basename}]
dl_dir = tmpdir.mkdir('download')
symlink_dir = tmpdir.mkdir('symlink')
symlink = symlink_dir.join('fake_genomic.gbff.gz')
os.symlink("/foo/bar", str(symlink))
req.get('https://fake/path/fake_genomic.gbff.gz', text=fake_file.read())
assert core.worker(
core.download_file_job(entry, str(dl_dir), checksums, symlink_path=str(symlink_dir)))
assert symlink.check()
def test_get_genus_label():
fake_entry = {'organism_name': 'Example species ABC 1234'}
assert core.get_genus_label(fake_entry) == 'Example'
def test_get_species_label():
fake_entry = {'organism_name': 'Example species ABC 1234'}
assert core.get_species_label(fake_entry) == 'species'
fake_entry = {'organism_name': 'archaeon', 'infraspecific_name': '',
'isolate': 'ARS1334'}
assert core.get_species_label(fake_entry) == 'sp.'
def test_get_strain_label():
fake_entry = {'infraspecific_name': 'strain=ABC 1234'}
assert core.get_strain_label(fake_entry) == 'ABC_1234'
fake_entry = {'infraspecific_name': '', 'isolate': 'ABC 1234'}
assert core.get_strain_label(fake_entry) == 'ABC_1234'
fake_entry = {'infraspecific_name': '', 'isolate': '',
'organism_name': 'Example species ABC 1234'}
assert core.get_strain_label(fake_entry) == 'ABC_1234'
fake_entry = {'infraspecific_name': '', 'isolate': '',
'organism_name': 'Example strain', 'assembly_accession': 'ABC12345'}
assert core.get_strain_label(fake_entry) == 'ABC12345'
fake_entry = {'infraspecific_name': '', 'isolate': '',
'organism_name': 'Example strain with stupid name',
'assembly_accession': 'ABC12345'}
assert core.get_strain_label(fake_entry, viral=True) == 'ABC12345'
fake_entry = {'infraspecific_name': 'strain=ABC 1234; FOO'}
assert core.get_strain_label(fake_entry) == 'ABC_1234__FOO'
fake_entry = {'infraspecific_name': 'strain=ABC 1234 '}
assert core.get_strain_label(fake_entry) == 'ABC_1234'
fake_entry = {'infraspecific_name': 'strain= ABC 1234'}
assert core.get_strain_label(fake_entry) == 'ABC_1234'
fake_entry = {'infraspecific_name': 'strain=ABC/1234'}
assert core.get_strain_label(fake_entry) == 'ABC_1234'
fake_entry = {'infraspecific_name': 'strain=ABC//1234'}
assert core.get_strain_label(fake_entry) == 'ABC__1234'
fake_entry = {'infraspecific_name': 'strain=ABC\\1234'}
assert core.get_strain_label(fake_entry) == 'ABC_1234'
| 41.278231 | 121 | 0.702155 | 4,568 | 35,458 | 5.203152 | 0.063266 | 0.031303 | 0.037109 | 0.016829 | 0.824722 | 0.779325 | 0.765441 | 0.733633 | 0.715205 | 0.684366 | 0 | 0.026825 | 0.157877 | 35,458 | 858 | 122 | 41.32634 | 0.769156 | 0.025692 | 0 | 0.57622 | 0 | 0.001524 | 0.254894 | 0.047388 | 0 | 0 | 0 | 0 | 0.221037 | 1 | 0.092988 | false | 0 | 0.01372 | 0 | 0.109756 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
7c9ae8eac3bd1142725975b7660a7ddec40666a6 | 114 | py | Python | docify/__init__.py | rapidstack/docify | 8f15ed10dddac3728c9e4f5c2683ceefa204ce65 | [
"MIT"
] | 1 | 2019-11-19T08:06:26.000Z | 2019-11-19T08:06:26.000Z | docify/__init__.py | rapidstack/docify | 8f15ed10dddac3728c9e4f5c2683ceefa204ce65 | [
"MIT"
] | 2 | 2019-01-22T15:30:29.000Z | 2019-04-04T13:48:07.000Z | docify/__init__.py | rapidstack/docify | 8f15ed10dddac3728c9e4f5c2683ceefa204ce65 | [
"MIT"
] | 1 | 2019-01-22T11:04:05.000Z | 2019-01-22T11:04:05.000Z | from docify.lib.document import Document
from docify.lib import components
__ALL__ = ['Document', 'components']
| 19 | 40 | 0.780702 | 14 | 114 | 6.071429 | 0.5 | 0.235294 | 0.305882 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.122807 | 114 | 5 | 41 | 22.8 | 0.85 | 0 | 0 | 0 | 0 | 0 | 0.157895 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.666667 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
7cda9b39f4a8179b7e1e9fb48e64e345c7e7dd41 | 187 | py | Python | repo2apptainer/__init__.py | andersy005/repo2apptainer | 6ba9bda304ecb410e74d53d4124c98aaf0660a1e | [
"BSD-3-Clause"
] | 1 | 2022-03-16T20:12:08.000Z | 2022-03-16T20:12:08.000Z | repo2apptainer/__init__.py | andersy005/repo2apptainer | 6ba9bda304ecb410e74d53d4124c98aaf0660a1e | [
"BSD-3-Clause"
] | 1 | 2022-03-16T20:13:51.000Z | 2022-03-16T20:13:51.000Z | repo2apptainer/__init__.py | andersy005/repo2apptainer | 6ba9bda304ecb410e74d53d4124c98aaf0660a1e | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# flake8: noqa
""" Top-level module for repo2apptainer. """
from ._version import version as __version__
from .app import Repo2Apptainer
from .config import config
| 26.714286 | 44 | 0.770053 | 25 | 187 | 5.56 | 0.68 | 0.258993 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.024691 | 0.13369 | 187 | 6 | 45 | 31.166667 | 0.833333 | 0.385027 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
7cdba50056318c7f20f4fc94fce41e9386d745be | 26,192 | py | Python | Functions/SimulationType/OP.py | SunYongshuai/SunSpice | 7f9368a7af1421a63b69dcce3decc6d06ff1ff6b | [
"Apache-2.0"
] | 1 | 2018-12-10T06:06:54.000Z | 2018-12-10T06:06:54.000Z | Functions/SimulationType/OP.py | SunYongshuai/SunSpice | 7f9368a7af1421a63b69dcce3decc6d06ff1ff6b | [
"Apache-2.0"
] | null | null | null | Functions/SimulationType/OP.py | SunYongshuai/SunSpice | 7f9368a7af1421a63b69dcce3decc6d06ff1ff6b | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
# -*- encoding="UTF-8" -*-
#Import Package
import sys
sys.path.append("../../")
import numpy as np
from tkinter.messagebox import showinfo
import parameters
from Functions.string2num import string2num
def getOutTitleOp():
WriteString = ''
for index in parameters.NodesDict:
if index == '0':
pass
else:
parameters.opExp.append('v_' + index)
WriteString = WriteString + 'v_' + index + '\t'
#V -> Pluse -> SinV -> E -> F -> H -> L
#Source Current
for index in parameters.listDCV:
parameters.opExp.append('i_' + index.name)
WriteString = WriteString + 'i_' + index.name + '\t'
for index in parameters.listPulseV:
parameters.opExp.append('i_' + index.name)
WriteString = WriteString + 'i_' + index.name + '\t'
for index in parameters.listSinV:
parameters.opExp.append('i_' + index.name)
WriteString = WriteString + 'i_' + index.name + '\t'
for index in parameters.listE:
parameters.opExp.append('i_' + index.name)
WriteString = WriteString + 'i_' + index.name + '\t'
for index in parameters.listF:
parameters.opExp.append('i_' + index.name)
WriteString = WriteString + 'i_' + index.name + '\t'
for index in parameters.listH:
parameters.opExp.append('i_' + index.name)
WriteString = WriteString + 'i_' + index.name + '\t'
for index in parameters.listL:
parameters.opExp.append('i_' + index.name)
WriteString = WriteString + 'i_' + index.name + '\t'
#Elem Current
for index in parameters.listR:
parameters.opExp.append('i_' + index.name)
WriteString = WriteString + 'i_' + index.name + '\t'
for index in parameters.listD:
parameters.opExp.append('i_' + index.name)
WriteString = WriteString + 'i_' + index.name + '\t'
for index in parameters.listG:
parameters.opExp.append('i_' + index.name)
WriteString = WriteString + 'i_' + index.name + '\t'
for index in parameters.listM:
parameters.opExp.append('i_' + index.name)
WriteString = WriteString + 'i_' + index.name + '\t'
return WriteString
def getOutDataOp(MatResult):
WriteString = ''
for node in parameters.NodesDict:
if node != '0':
WriteString = WriteString + str(MatResult[parameters.NodesDict.get(node)-1,0]) + '\t'
parameters.opValue.append(MatResult[parameters.NodesDict.get(node)-1,0])
for index in range(len(parameters.listDCV)):
#V -> Pluse -> SinV -> E -> F -> H -> L
matAddr = len(parameters.NodesDict) + index - 1
WriteString = WriteString + str(MatResult[matAddr,0]) + '\t'
parameters.opValue.append(MatResult[matAddr,0])
for index in range(len(parameters.listPulseV)):
#V -> Pluse -> SinV -> E -> F -> H -> L
matAddr = len(parameters.NodesDict) + len(parameters.listDCV) + index - 1
WriteString = WriteString + str(MatResult[matAddr,0]) + '\t'
parameters.opValue.append(MatResult[matAddr,0])
for index in range(len(parameters.listSinV)):
#V -> Pluse -> SinV -> E -> F -> H -> L
matAddr = len(parameters.NodesDict) + len(parameters.listDCV) \
+ len(parameters.listPulseV) + index - 1
WriteString = WriteString + str(MatResult[matAddr,0]) + '\t'
parameters.opValue.append(MatResult[matAddr,0])
for index in range(len(parameters.listE)):
#V -> Pluse -> SinV -> E -> F -> H -> L
matAddr = len(parameters.NodesDict) + len(parameters.listDCV) \
+ len(parameters.listPulseV) + len(parameters.listSinV) \
+ index - 1
WriteString = WriteString + str(MatResult[matAddr,0]) + '\t'
parameters.opValue.append(MatResult[matAddr,0])
for index in range(len(parameters.listF)):
#V -> Pluse -> SinV -> E -> F -> H -> L
matAddr = len(parameters.NodesDict) + len(parameters.listDCV) \
+ len(parameters.listPulseV) + len(parameters.listSinV) \
+ len(parameters.listE) + index - 1
WriteString = WriteString + str(MatResult[matAddr,0]) + '\t'
parameters.opValue.append(MatResult[matAddr,0])
for index in range(len(parameters.listH)):
#V -> Pluse -> SinV -> E -> F -> H -> L
matAddr = len(parameters.NodesDict) + len(parameters.listDCV) \
+ len(parameters.listPulseV) + len(parameters.listSinV) \
+ len(parameters.listE) + len(parameters.listF) \
+ 2 * index - 1
WriteString = WriteString + str(MatResult[matAddr,0]) + '\t'
parameters.opValue.append(MatResult[matAddr,0])
for index in range(len(parameters.listL)):
#V -> Pluse -> SinV -> E -> F -> H -> L
matAddr = len(parameters.NodesDict) + len(parameters.listDCV) \
+ len(parameters.listPulseV) + len(parameters.listSinV) \
+ len(parameters.listE) + len(parameters.listF) \
+ 2 * len(parameters.listH) + index - 1
WriteString = WriteString + str(MatResult[matAddr,0]) + '\t'
parameters.opValue.append(MatResult[matAddr,0])
for resistor in parameters.listR:
port1 = parameters.NodesDict.get(resistor.port1)
port2 = parameters.NodesDict.get(resistor.port2)
Rvalue = string2num(resistor.value)
if (port1 != 0) & (port2 != 0):
R_V = MatResult[port1-1,0] - MatResult[port2-1,0]
elif (port1 == 0) & (port2 != 0):
R_V = -1 * MatResult[port2-1,0]
elif (port1 != 0) & (port2 == 0):
R_V = MatResult[port1-1,0]
else:
R_V = 0
IValue = R_V/Rvalue
WriteString = WriteString + str(IValue) + '\t'
parameters.opValue.append(IValue)
for diode in parameters.listD:
port1 = parameters.NodesDict.get(diode.port1)
port2 = parameters.NodesDict.get(diode.port2)
if (port1 != 0) & (port2 != 0):
D_V = MatResult[port1-1,0] - MatResult[port2-1,0]
elif (port1 == 0) & (port2 != 0):
D_V = -1 * MatResult[port2-1,0]
elif (port1 != 0) & (port2 == 0):
D_V = MatResult[port1-1,0]
else:
D_V = 0
D_I = diode.getI_V(D_V)
WriteString = WriteString + str(D_I) + '\t'
parameters.opValue.append(D_I)
for MosFet in parameters.listM:
pass
for vccs_g in parameters.listG:
ctlNodePos = parameters.NodesDict.get(vccs_g.ctlNodePos)
ctlNodeNeg = parameters.NodesDict.get(vccs_g.ctlNodeNeg)
if (ctlNodePos != 0) & (ctlNodeNeg != 0):
C_V = MatResult[ctlNodePos-1,0] - MatResult[ctlNodeNeg-1,0]
elif (port1 == 0) & (port2 != 0):
C_V = -1 * MatResult[ctlNodeNeg-1,0]
elif (port1 != 0) & (port2 == 0):
C_V = MatResult[ctlNodePos,0]
else:
C_V = 0
vccs_I = vccs_g.getI(C_V)
WriteString = WriteString + str(vccs_I) + '\t'
parameters.opValue.append(vccs_I)
return WriteString
def OpSimulation():
print("Info: OP Simulation ...")
NodeNum = len(parameters.NodesDict) - 1 # Nodes but GND
branchnum = len(parameters.listDCV) + len(parameters.listPulseV) \
+ len(parameters.listSinV) + len(parameters.listE) \
+ len(parameters.listF) + 2 * len(parameters.listH) \
+ len(parameters.listL)
#V -> Pluse -> SinV -> E -> F -> H -> L
MatNum = NodeNum + branchnum
MatStamps = np.mat(np.zeros((MatNum,MatNum)))
MatRhs = np.mat(np.zeros((MatNum,1)))
MatResult = np.mat(np.zeros((MatNum,1)))
parameters.opExpString = getOutTitleOp()
if (len(parameters.listD)!=0) | (len(parameters.listM)!=0):
if (len(parameters.listD)!=0):
MarkPort = parameters.NodesDict.get(parameters.listD[0].port1)
elif len(parameters.listM) != 0:
MarkPort = parameters.NodesDict.get(parameters.listM[0].portD)
else:
print("Error: Logic Error!") #Wouldn't Here
return
lastVMarkPort = 1.8 #No mater
VMarkPort = 0.9
#count = 0
InitFlag = True
while abs(VMarkPort - lastVMarkPort) > 0.000001:
#print(VMarkPort)
MatStamps = np.mat(np.zeros((MatNum,MatNum)))
MatRhs = np.mat(np.zeros((MatNum,1)))
for elem in parameters.listR: #Load R
elem.loadMatResistor()
for keyPoint in elem.StampMatDict:
MatStamps[keyPoint.pointX,keyPoint.pointY] += elem.StampMatDict.get(keyPoint)
for elem in parameters.listD: #load D
port1 = parameters.NodesDict.get(elem.port1)
port2 = parameters.NodesDict.get(elem.port2)
if (port1 != 0) & (port2 != 0):
ResultVd_temp = MatResult[port1-1,0] - MatResult[port2-1,0]
elif port1 == 0:
ResultVd_temp = -1 * MatResult[port2-1,0]
elif port2 == 0:
ResultVd_temp = MatResult[port1-1,0]
else:
ResultVd_temp = 0
elem.loadMatDiode(ResultVd_temp)
for keyPoint in elem.StampMatDict:
MatStamps[keyPoint.pointX,keyPoint.pointY] += elem.StampMatDict.get(keyPoint)
for keyPoint in elem.RHSMatDict:
MatRhs[keyPoint.pointX,keyPoint.pointY] += elem.RHSMatDict.get(keyPoint)
for elem in range(len(parameters.listE)): #Load E
#V -> Pluse -> SinV -> E -> F -> H -> L
matAddr = len(parameters.NodesDict) + len(parameters.listDCV) \
+ len(parameters.listPulseV) + len(parameters.listSinV) + elem - 1
parameters.listE[elem].loadMatE(matAddr)
for keyPoint in parameters.listE[elem].matStampsE:
MatStamps[keyPoint.pointX,keyPoint.pointY] = parameters.listE[elem].matStampsE.get(keyPoint)
for elem in range(len(parameters.listF)): #loadF
#V -> Pluse -> SinV -> E -> F -> H -> L
portCtlPos = parameters.NodesDict.get(parameters.listF[elem].ctlNodePos)
portCtlNeg = parameters.NodesDict.get(parameters.listF[elem].ctlNodePos)
matAddr = len(parameters.NodesDict) + len(parameters.listDCV) \
+ len(parameters.listPulseV) + len(parameters.listSinV) \
+ len(parameters.listE) + elem - 1
Vctl = MatResult[portCtlPos-1] - MatResult[portCtlNeg-1]
parameters.listF[elem].loadMatF(matAddr,Vctl)
for keyPoint in parameters.listF[elem].matStampsF:
MatStamps[keyPoint.pointX,keyPoint.pointY] = parameters.listF[elem].matStampsF.get(keyPoint)
for keyPoint in parameters.listF[elem].matStampsF:
MatRhs[keyPoint.pointX,keyPoint.pointY] = parameters.listF[elem].matRhsF.get(keyPoint)
for elem in parameters.listG: #loadG
elem.loadMatG()
for keyPoint in elem.matStampsG:
MatStamps[keyPoint.pointX,keyPoint.pointY] += elem.matStampsG.get(keyPoint)
for elem in range(len(parameters.listH)): #load H
#V -> Pluse -> SinV -> E -> F -> H -> L
matAddr = len(parameters.NodesDict) + len(parameters.listDCV) \
+ len(parameters.listPulseV) + len(parameters.listSinV) \
+ len(parameters.listE) + len(parameters.listF) + 2 * elem - 1
portCtlPos = parameters.NodesDict.get(parameters.listH[elem].ctlNodePos)
portCtlNeg = parameters.NodesDict.get(parameters.listH[elem].ctlNodePos)
Vctl = MatResult[portCtlPos-1] - MatResult[portCtlNeg-1]
parameters.listH[elem].loadMatH(matAddr,Vctl)
MatRhs[matAddr+1] = string2num(parameters.listH[elem].transResValue)
for keyPoint in parameters.listH[elem].matStampsH:
MatStamps[keyPoint.pointX,keyPoint.pointY] = parameters.listH[elem].matStampsH.get(keyPoint)
for elem in parameters.listDCI: #load DC Is
elem.loadMatDCIs()
for keyPoint in elem.RHSMatDict:
MatRhs[keyPoint.pointX,keyPoint.pointY] += elem.RHSMatDict.get(keyPoint)
for elem in range(len(parameters.listDCV)): #load DC Vs
#V -> Pluse -> SinV -> E -> F -> H -> L
VsDC = parameters.listDCV[elem]
matAddr = len(parameters.NodesDict) + elem - 1
portPos = parameters.NodesDict.get(VsDC.portPos)
portNeg = parameters.NodesDict.get(VsDC.portNeg)
DCV_Value = string2num(VsDC.value)
MatRhs[matAddr,0] = DCV_Value
MatResult[portPos-1,0] = DCV_Value
if portPos != 0:
MatStamps[matAddr,portPos-1] = 1
MatStamps[portPos-1,matAddr] = 1
if portNeg != 0:
MatStamps[matAddr,portNeg-1] = -1
MatStamps[portNeg-1,matAddr] = -1
for elem in range(len(parameters.listPulseV)): #load DC Pluse
#V -> Pluse -> SinV -> E -> F -> H -> L
VsPluseDC = parameters.listPulseV[elem]
matAddr = len(parameters.NodesDict) + len(parameters.listDCV) + elem - 1
portPos = parameters.NodesDict.get(VsPluseDC.portPos)
portNeg = parameters.NodesDict.get(VsPluseDC.portNeg)
Pluse_DCValue = VsPluseDC.getVoltage(0)
MatRhs[matAddr,0] = Pluse_DCValue
MatResult[portPos-1,0] = Pluse_DCValue
if portPos != 0:
MatStamps[matAddr,portPos-1] = 1
MatStamps[portPos-1,matAddr] = 1
if portNeg != 0:
MatStamps[matAddr,portNeg-1] = -1
MatStamps[portNeg-1,matAddr] = -1
for elem in range(len(parameters.listSinV)): #load DC sin Vs
#V -> Pluse -> SinV -> E -> F -> H -> L
VsSinDC = parameters.listSinV[elem]
matAddr = len(parameters.NodesDict) + len(parameters.listDCV) \
+ len(parameters.listPulseV) + elem - 1
portPos = parameters.NodesDict.get(VsSinDC.portPos)
portNeg = parameters.NodesDict.get(VsSinDC.portNeg)
Sin_DcValue = VsSinDC.getValue(0)
MatRhs[matAddr,0] = Sin_DcValue
MatResult[portPos-1,0] = Sin_DcValue
if portPos != 0:
MatStamps[matAddr,portPos-1] = 1
MatStamps[portPos-1,matAddr] = 1
if portNeg != 0:
MatStamps[matAddr,portNeg-1] = -1
MatStamps[portNeg-1,matAddr] = -1
for elem in range(len(parameters.listL)): #Load L in DC simulation
LTemp = parameters.listL[elem]
#V -> Pluse -> SinV -> E -> F -> H -> L
matAddr = len(parameters.NodesDict) + len(parameters.listDCV) \
+ len(parameters.listPulseV) + len(parameters.listSinV) \
+ len(parameters.listE) + len(parameters.listF) \
+ 2 * len(parameters.listH) + elem - 1
portPos = parameters.NodesDict.get(LTemp.port1)
portNeg = parameters.NodesDict.get(LTemp.port2)
if portPos != 0:
MatStamps[matAddr,portPos-1] = 1
MatStamps[portPos-1,matAddr] = 1
if portNeg != 0:
MatStamps[matAddr,portNeg-1] = -1
MatStamps[portNeg-1,matAddr] = -1
for elem in parameters.listM:
portD = parameters.NodesDict.get(elem.portD)
portG = parameters.NodesDict.get(elem.portG)
portS = parameters.NodesDict.get(elem.portS)
portB = parameters.NodesDict.get(elem.portB)
if portD == 0:
Vd = 0
else:
Vd = MatResult[portD-1,0]
if portG == 0:
Vg = 0
else:
Vg = MatResult[portG-1,0]
if portS == 0:
Vs = 0
else:
Vs = MatResult[portS-1,0]
if portB == 0:
#Vb = 0
pass
else:
#Vb = MatResult[portB-1,0]
pass
Vds = Vd - Vs
if (elem.MosType == 'pmos'):
if Vd > Vs:
Vgs = Vg - Vd
else :
Vgs = Vg - Vs
elif (elem.MosType == 'nmos'):
if Vd < Vs:
Vgs = Vg - Vd
else :
Vgs = Vg - Vs
if (InitFlag) :
if (elem.MosType == 'pmos'):
#Vds = -0.6
Vgs = -1.3
elif (elem.MosType == 'nmos'):
#Vds = 0.6
Vgs = 0.5
else:
pass
Ids = elem.getIds(vgs=Vgs,vds=Vds)
Gm = elem.getGm(vds=Vds,vgs=Vgs)
Gds = elem.getGds(vds=Vds,vgs=Vgs)
if portD != 0:
MatRhs[portD-1,0] -= Ids
MatStamps[portD-1,portD-1] += Gds
if portS != 0:
MatRhs[portS-1,0] += Ids
MatStamps[portS-1,portS-1] += (Gds + Gm)
if (portD!=0) & (portS!=0):
MatStamps[portD-1,portS-1] -= (Gds + Gm)
MatStamps[portS-1,portD-1] -= Gds
if (portD!=0) & (portG!=0):
MatStamps[portD-1,portG-1] += Gm
if (portS!=0) & (portG!=0):
MatStamps[portS-1,portG-1] -= Gm
InitFlag = False
MatResult = np.linalg.solve(MatStamps,MatRhs) #Result
lastVMarkPort = VMarkPort
VMarkPort = MatResult[MarkPort-1,0]
parameters.opValueString = getOutDataOp(MatResult)
else:
MatStamps = np.mat(np.zeros((MatNum,MatNum)))
MatRhs = np.mat(np.zeros((MatNum,1)))
for elem in parameters.listR: #Load R
elem.loadMatResistor()
for keyPoint in elem.StampMatDict:
MatStamps[keyPoint.pointX,keyPoint.pointY] += elem.StampMatDict.get(keyPoint)
for elem in range(len(parameters.listE)): #Load E
#V -> Pluse -> SinV -> E -> F -> H -> L
matAddr = len(parameters.NodesDict) + len(parameters.listDCV)\
+ len(parameters.listPulseV) + len(parameters.listSinV) + elem - 1
parameters.listE[elem].loadMatE(matAddr)
for keyPoint in parameters.listE[elem].matStampsE:
MatStamps[keyPoint.pointX,keyPoint.pointY] = parameters.listE[elem].matStampsE.get(keyPoint)
for elem in range(len(parameters.listF)): #loadF
#V -> Pluse -> SinV -> E -> F -> H -> L
portCtlPos = parameters.NodesDict.get(parameters.listF[elem].ctlNodePos)
portCtlNeg = parameters.NodesDict.get(parameters.listF[elem].ctlNodePos)
matAddr = len(parameters.NodesDict) + len(parameters.listDCV) \
+ len(parameters.listPulseV) + len(parameters.listSinV) \
+ len(parameters.listE) + elem - 1
Vctl = MatResult[portCtlPos-1] - MatResult[portCtlNeg-1]
parameters.listF[elem].loadMatF(matAddr,Vctl)
for keyPoint in parameters.listF[elem].matStampsF:
MatStamps[keyPoint.pointX,keyPoint.pointY] = parameters.listF[elem].matStampsF.get(keyPoint)
for keyPoint in parameters.listF[elem].matStampsF:
MatRhs[keyPoint.pointX,keyPoint.pointY] = parameters.listF[elem].matRhsF.get(keyPoint)
for elem in parameters.listG: #loadG
elem.loadMatG()
for keyPoint in elem.matStampsG:
MatStamps[keyPoint.pointX,keyPoint.pointY] += elem.matStampsG.get(keyPoint)
for elem in range(len(parameters.listH)): #load H
#V -> Pluse -> SinV -> E -> F -> H -> L
matAddr = len(parameters.NodesDict) + len(parameters.listDCV) \
+ len(parameters.listPulseV) + len(parameters.listSinV) \
+ len(parameters.listE) + len(parameters.listF) + 2 * elem - 1
portCtlPos = parameters.NodesDict.get(parameters.listH[elem].ctlNodePos)
portCtlNeg = parameters.NodesDict.get(parameters.listH[elem].ctlNodePos)
Vctl = MatResult[portCtlPos-1] - MatResult[portCtlNeg-1]
parameters.listH[elem].loadMatH(matAddr,Vctl)
MatRhs[matAddr+1] = string2num(parameters.listH[elem].transResValue)
for keyPoint in parameters.listH[elem].matStampsH:
MatStamps[keyPoint.pointX,keyPoint.pointY] = parameters.listH[elem].matStampsH.get(keyPoint)
for elem in parameters.listDCI: #load DC Is
elem.loadMatDCIs()
for keyPoint in elem.RHSMatDict:
MatRhs[keyPoint.pointX,keyPoint.pointY] += elem.RHSMatDict.get(keyPoint)
for elem in range(len(parameters.listDCV)): #load DC Vs
#V -> Pluse -> SinV -> E -> F -> H -> L
VsDC = parameters.listDCV[elem]
matAddr = len(parameters.NodesDict) + elem - 1
portPos = parameters.NodesDict.get(VsDC.portPos)
portNeg = parameters.NodesDict.get(VsDC.portNeg)
DCV_Value = string2num(VsDC.value)
MatRhs[matAddr,0] = DCV_Value
if portPos != 0:
MatStamps[matAddr,portPos-1] = 1
MatStamps[portPos-1,matAddr] = 1
if portNeg != 0:
MatStamps[matAddr,portNeg-1] = -1
MatStamps[portNeg-1,matAddr] = -1
for elem in range(len(parameters.listPulseV)): #load DC Pluse
#V -> Pluse -> SinV -> E -> F -> H -> L
VsPluseDC = parameters.listPulseV[elem]
matAddr = len(parameters.NodesDict) + len(parameters.listDCV) + elem - 1
portPos = parameters.NodesDict.get(VsPluseDC.portPos)
portNeg = parameters.NodesDict.get(VsPluseDC.portNeg)
Pluse_DCValue = VsPluseDC.getVoltage(0)
MatRhs[matAddr,0] = Pluse_DCValue
if portPos != 0:
MatStamps[matAddr,portPos-1] = 1
MatStamps[portPos-1,matAddr] = 1
if portNeg != 0:
MatStamps[matAddr,portNeg-1] = -1
MatStamps[portNeg-1,matAddr] = -1
for elem in range(len(parameters.listSinV)): #load DC sin Vs
VsSinDC = parameters.listSinV[elem]
#V -> Pluse -> SinV -> E -> F -> H -> L
matAddr = len(parameters.NodesDict) + len(parameters.listDCV) \
+ len(parameters.listPulseV) + elem - 1
portPos = parameters.NodesDict.get(VsSinDC.portPos)
portNeg = parameters.NodesDict.get(VsSinDC.portNeg)
Sin_DcValue = VsSinDC.getValue(0)
MatRhs[matAddr,0] = Sin_DcValue
if portPos != 0:
MatStamps[matAddr,portPos-1] = 1
MatStamps[portPos-1,matAddr] = 1
if portNeg != 0:
MatStamps[matAddr,portNeg-1] = -1
MatStamps[portNeg-1,matAddr] = -1
for elem in range(len(parameters.listL)): #Load L in DC simulation
LTemp = parameters.listL[elem]
#V -> Pluse -> SinV -> E -> F -> H -> L
matAddr = len(parameters.NodesDict) + len(parameters.listDCV) \
+ len(parameters.listPulseV) + len(parameters.listSinV) \
+ len(parameters.listE) + len(parameters.listF) \
+ 2 * len(parameters.listH) + elem - 1
portPos = parameters.NodesDict.get(LTemp.port1)
portNeg = parameters.NodesDict.get(LTemp.port2)
if portPos != 0:
MatStamps[matAddr,portPos-1] = 1
MatStamps[portPos-1,matAddr] = 1
if portNeg != 0:
MatStamps[matAddr,portNeg-1] = -1
MatStamps[portNeg-1,matAddr] = -1
MatResult = np.linalg.solve(MatStamps,MatRhs) #Result
parameters.opValueString = getOutDataOp(MatResult)
print('MNA:')
print(MatStamps)
print('RHS: ')
print(MatRhs)
print('Result: ')
print(MatResult)
print(parameters.opExpString)
print(parameters.opValueString)
print('-------------------------------------')
showinfo('OP','OP Simulation End!')
return MatResult
| 42.381877 | 112 | 0.527184 | 2,639 | 26,192 | 5.20576 | 0.071239 | 0.110715 | 0.064056 | 0.018416 | 0.820061 | 0.785558 | 0.772165 | 0.757461 | 0.752366 | 0.752366 | 0 | 0.020546 | 0.357056 | 26,192 | 617 | 113 | 42.450567 | 0.795249 | 0.049519 | 0 | 0.646667 | 0 | 0 | 0.009427 | 0.001491 | 0 | 0 | 0 | 0 | 0 | 1 | 0.006667 | false | 0.011111 | 0.011111 | 0 | 0.026667 | 0.024444 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
6b2bdaaac03d42958f0d06908f0fc88c5f303e65 | 339 | py | Python | espaloma/data/__init__.py | jstaker7/espaloma | d80d280acd608dc04c93966afe15cc3cb74f65a8 | [
"MIT"
] | null | null | null | espaloma/data/__init__.py | jstaker7/espaloma | d80d280acd608dc04c93966afe15cc3cb74f65a8 | [
"MIT"
] | null | null | null | espaloma/data/__init__.py | jstaker7/espaloma | d80d280acd608dc04c93966afe15cc3cb74f65a8 | [
"MIT"
] | null | null | null | import os
import espaloma
import espaloma.data
import espaloma.data.dataset
import espaloma.data.md
import espaloma.data.normalize
import espaloma.data.utils
import espaloma.data.qcarchive_utils
import espaloma.data.md17_utils
from espaloma.data.collection import *
# esol = utils.from_csv(os.path.dirname(utils.__file__) + "/esol.csv")
| 24.214286 | 70 | 0.820059 | 49 | 339 | 5.530612 | 0.346939 | 0.413284 | 0.464945 | 0.169742 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006472 | 0.088496 | 339 | 13 | 71 | 26.076923 | 0.87055 | 0.20059 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
860a68aa025c5271c7af24500ce7b4baa36c6304 | 37 | py | Python | kikimr/public/sdk/python/client/connection.py | yandex-cloud/ydb-python-sdk | 0df2dce2d77fc41ad3020072740f51dd91630177 | [
"Apache-2.0"
] | 19 | 2019-07-01T08:25:29.000Z | 2022-01-26T14:46:51.000Z | kikimr/public/sdk/python/client/connection.py | yandex-cloud/ydb-python-sdk | 0df2dce2d77fc41ad3020072740f51dd91630177 | [
"Apache-2.0"
] | 5 | 2019-07-02T13:36:42.000Z | 2021-09-14T06:46:48.000Z | kikimr/public/sdk/python/client/connection.py | yandex-cloud/ydb-python-sdk | 0df2dce2d77fc41ad3020072740f51dd91630177 | [
"Apache-2.0"
] | 10 | 2019-06-07T10:36:19.000Z | 2021-10-15T08:58:11.000Z | from ydb.connection import * # noqa
| 18.5 | 36 | 0.72973 | 5 | 37 | 5.4 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.189189 | 37 | 1 | 37 | 37 | 0.9 | 0.108108 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
865832bebcbaeec63a2d5d6a9e58b5e38d5b0f70 | 170 | py | Python | otrans/train/__init__.py | jjjjohnson/OpenTransformer | 9a6371095ee83896d886addf55bda3a42c3918f6 | [
"MIT"
] | 321 | 2019-12-08T20:04:21.000Z | 2022-03-25T05:35:21.000Z | otrans/train/__init__.py | jjjjohnson/OpenTransformer | 9a6371095ee83896d886addf55bda3a42c3918f6 | [
"MIT"
] | 45 | 2020-02-12T06:29:59.000Z | 2021-11-24T03:13:49.000Z | otrans/train/__init__.py | jjjjohnson/OpenTransformer | 9a6371095ee83896d886addf55bda3a42c3918f6 | [
"MIT"
] | 71 | 2019-12-07T03:33:18.000Z | 2022-03-22T06:39:58.000Z | '''
@Author: Zhengkun Tian
@Email: zhengkun.tian@outlook.com
@Date: 2020-04-02 16:58:26
@LastEditTime: 2020-04-02 16:58:27
@FilePath: \OpenASR\oasr\train\__init__.py
'''
| 21.25 | 42 | 0.723529 | 28 | 170 | 4.25 | 0.75 | 0.201681 | 0.134454 | 0.168067 | 0.201681 | 0 | 0 | 0 | 0 | 0 | 0 | 0.180645 | 0.088235 | 170 | 7 | 43 | 24.285714 | 0.587097 | 0.982353 | 0 | null | 0 | null | 0 | 0 | null | 0 | 0 | 0 | null | 1 | null | true | 0 | 0 | null | null | null | 1 | 0 | 0 | null | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
8676c6778dcfb0c7dae309cbda27bfa092f003f0 | 37 | py | Python | pdbutil/__init__.py | ShintaroMinami/pdbutil | 764284ad081c120e9adabbc92fb09c6cf830d9d3 | [
"MIT"
] | null | null | null | pdbutil/__init__.py | ShintaroMinami/pdbutil | 764284ad081c120e9adabbc92fb09c6cf830d9d3 | [
"MIT"
] | null | null | null | pdbutil/__init__.py | ShintaroMinami/pdbutil | 764284ad081c120e9adabbc92fb09c6cf830d9d3 | [
"MIT"
] | null | null | null | from .pdbutil import ProteinBackbone
| 18.5 | 36 | 0.864865 | 4 | 37 | 8 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.108108 | 37 | 1 | 37 | 37 | 0.969697 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
8695177aa6ef3bd6600cf18057e873b0ce4cc197 | 37,335 | py | Python | azure-iot-device/tests/provisioning/internal/test_polling_machine.py | nextdynamic/azure-iot-sdk-python | 217853005ea507a5a415e8ca9ca4f6adb7284b7a | [
"MIT"
] | 1 | 2019-02-06T06:52:44.000Z | 2019-02-06T06:52:44.000Z | azure-iot-device/tests/provisioning/internal/test_polling_machine.py | nextdynamic/azure-iot-sdk-python | 217853005ea507a5a415e8ca9ca4f6adb7284b7a | [
"MIT"
] | null | null | null | azure-iot-device/tests/provisioning/internal/test_polling_machine.py | nextdynamic/azure-iot-sdk-python | 217853005ea507a5a415e8ca9ca4f6adb7284b7a | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import pytest
import datetime
import logging
from mock import MagicMock
from azure.iot.device.provisioning.internal.request_response_provider import RequestResponseProvider
from azure.iot.device.provisioning.internal.polling_machine import PollingMachine
from azure.iot.device.provisioning.models.registration_result import RegistrationResult
from azure.iot.device.provisioning.pipeline import constant
import time
logging.basicConfig(level=logging.DEBUG)
fake_request_id = "Request1234"
fake_retry_after = "3"
fake_operation_id = "Operation4567"
fake_status = "Flying"
fake_device_id = "MyNimbus2000"
fake_assigned_hub = "Dumbledore'sArmy"
fake_sub_status = "FlyingOnHippogriff"
fake_created_dttm = datetime.datetime(2020, 5, 17)
fake_last_update_dttm = datetime.datetime(2020, 10, 17)
fake_etag = "HighQualityFlyingBroom"
fake_symmetric_key = "Zm9vYmFy"
fake_registration_id = "MyPensieve"
fake_id_scope = "Enchanted0000Ceiling7898"
fake_success_response_topic = "$dps/registrations/res/200/?"
fake_failure_response_topic = "$dps/registrations/res/400/?"
fake_greater_429_response_topic = "$dps/registrations/res/430/?"
fake_assigning_status = "assigning"
fake_assigned_status = "assigned"
fake_payload = "Petrificus Totalus"
class SomeRequestResponseProvider(RequestResponseProvider):
def receive_response(self, request_id, status_code, key_values, payload_str):
return super(SomeRequestResponseProvider, self)._receive_response(
request_id=request_id,
status_code=status_code,
key_value_dict=key_values,
response_payload=payload_str,
)
@pytest.fixture
def mock_request_response_provider(mocker):
return mocker.MagicMock(spec=SomeRequestResponseProvider)
@pytest.fixture
def mock_polling_machine(mocker, mock_request_response_provider):
state_based_mqtt = MagicMock()
mock_init_request_response_provider = mocker.patch(
"azure.iot.device.provisioning.internal.polling_machine.RequestResponseProvider"
)
mock_init_request_response_provider.return_value = mock_request_response_provider
mock_polling_machine = PollingMachine(state_based_mqtt)
return mock_polling_machine
@pytest.mark.describe("PollingMachine - Register")
class TestRegister(object):
@pytest.mark.it("Calls subscribe on RequestResponseProvider")
def test_register_calls_subscribe_on_request_response_provider(self, mock_polling_machine):
mock_request_response_provider = mock_polling_machine._request_response_provider
mock_polling_machine.register()
assert mock_request_response_provider.enable_responses.call_count == 1
assert (
mock_request_response_provider.enable_responses.call_args[1]["callback"]
== mock_polling_machine._on_subscribe_completed
)
@pytest.mark.it("Sets the payload when register is called with an user supplied payload")
def test_register_with_payload_calls_send_request_with_payload_on_request_response_provider(
self, mocker, mock_polling_machine
):
mock_polling_machine.register(payload=fake_payload)
mock_init_uuid = mocker.patch(
"azure.iot.device.provisioning.internal.polling_machine.uuid.uuid4"
)
mock_init_uuid.return_value = fake_request_id
mock_init_query_timer = mocker.patch(
"azure.iot.device.provisioning.internal.polling_machine.Timer"
)
mock_query_timer = mock_init_query_timer.return_value
mocker.patch.object(mock_query_timer, "start")
mock_polling_machine.state = "initializing"
mock_request_response_provider = mock_polling_machine._request_response_provider
spy_method = mocker.spy(mock_request_response_provider, "send_request")
mock_polling_machine._on_subscribe_completed()
assert spy_method.call_count == 1
assert spy_method.call_args_list[0][1]["request_id"] == fake_request_id
assert spy_method.call_args_list[0][1]["request_payload"] == fake_payload
@pytest.mark.it("Completes subscription and calls send request on RequestResponseProvider")
def test_on_subscribe_completed_calls_send_register_request_on_request_response_provider(
self, mock_polling_machine, mocker
):
mock_init_uuid = mocker.patch(
"azure.iot.device.provisioning.internal.polling_machine.uuid.uuid4"
)
mock_init_uuid.return_value = fake_request_id
mock_init_query_timer = mocker.patch(
"azure.iot.device.provisioning.internal.polling_machine.Timer"
)
mock_query_timer = mock_init_query_timer.return_value
mocker.patch.object(mock_query_timer, "start")
mock_polling_machine.state = "initializing"
mock_request_response_provider = mock_polling_machine._request_response_provider
spy_method = mocker.spy(mock_request_response_provider, "send_request")
mock_polling_machine._on_subscribe_completed()
assert spy_method.call_count == 1
assert spy_method.call_args_list[0][1]["request_id"] == fake_request_id
assert spy_method.call_args_list[0][1]["request_payload"] is None
@pytest.mark.describe("PollingMachine - Register Response")
class TestRegisterResponse(object):
# Change the timeout so that the test does not hang for more time
constant.DEFAULT_TIMEOUT_INTERVAL = 0.2
constant.DEFAULT_POLLING_INTERVAL = 0.01
@pytest.mark.it("Starts querying when there is a response with 'assigning' registration status")
def test_receive_register_response_assigning_does_query_with_operation_id(self, mocker):
state_based_mqtt = MagicMock()
mock_request_response_provider = SomeRequestResponseProvider(state_based_mqtt)
polling_machine = PollingMachine(state_based_mqtt)
polling_machine._request_response_provider = mock_request_response_provider
mocker.patch.object(mock_request_response_provider, "enable_responses")
mocker.patch.object(state_based_mqtt, "send_request")
# to transition into initializing
polling_machine.register(callback=MagicMock())
mock_init_uuid = mocker.patch(
"azure.iot.device.provisioning.internal.polling_machine.uuid.uuid4"
)
mock_init_uuid.return_value = fake_request_id
key_value_dict = {}
key_value_dict["request_id"] = [fake_request_id, " "]
key_value_dict["retry-after"] = [fake_retry_after, " "]
# to transition into registering
polling_machine._on_subscribe_completed()
# reset mock to generate different request id for query
mock_init_uuid.reset_mock()
fake_request_id_query = "Request4567"
mock_init_uuid.return_value = fake_request_id_query
fake_payload_result = (
'{"operationId":"' + fake_operation_id + '","status":"' + fake_assigning_status + '"}'
)
mock_init_polling_timer = mocker.patch(
"azure.iot.device.provisioning.internal.polling_machine.Timer"
)
# Complete string pre-fixed by a b is the one that works for all versions of python
# or a encode on a string works for all versions of python
# For only python 3 , bytes(JsonString, "utf-8") can be done
mock_request_response_provider.receive_response(
fake_request_id, "200", key_value_dict, fake_payload_result
)
# call polling timer's time up call to simulate polling
time_up_call = mock_init_polling_timer.call_args[0][1]
time_up_call()
assert state_based_mqtt.send_request.call_count == 2
assert state_based_mqtt.send_request.call_args_list[0][1]["request_id"] == fake_request_id
assert state_based_mqtt.send_request.call_args_list[0][1]["request_payload"] is None
assert (
state_based_mqtt.send_request.call_args_list[1][1]["request_id"]
== fake_request_id_query
)
assert (
state_based_mqtt.send_request.call_args_list[1][1]["operation_id"] == fake_operation_id
)
assert state_based_mqtt.send_request.call_args_list[1][1]["request_payload"] == " "
@pytest.mark.it(
"Completes registration process when there is a response with 'assigned' registration status"
)
def test_receive_register_response_assigned_completes_registration(self, mocker):
state_based_mqtt = MagicMock()
mock_request_response_provider = SomeRequestResponseProvider(state_based_mqtt)
polling_machine = PollingMachine(state_based_mqtt)
polling_machine._request_response_provider = mock_request_response_provider
mocker.patch.object(mock_request_response_provider, "enable_responses")
mocker.patch.object(state_based_mqtt, "send_request")
mocker.patch.object(mock_request_response_provider, "disconnect")
# to transition into initializing
mock_callback = MagicMock()
polling_machine.register(callback=mock_callback)
mock_init_uuid = mocker.patch(
"azure.iot.device.provisioning.internal.polling_machine.uuid.uuid4"
)
mock_init_uuid.return_value = fake_request_id
key_value_dict = {}
key_value_dict["request_id"] = [fake_request_id, " "]
key_value_dict["retry-after"] = [fake_retry_after, " "]
# to transition into registering
polling_machine._on_subscribe_completed()
fake_registration_state = (
'{"registrationId":"'
+ fake_registration_id
+ '","assignedHub":"'
+ fake_assigned_hub
+ '","deviceId":"'
+ fake_device_id
+ '","substatus":"'
+ fake_sub_status
+ '"}'
)
fake_payload_result = (
'{"operationId":"'
+ fake_operation_id
+ '","status":"'
+ fake_assigned_status
+ '","registrationState":'
+ fake_registration_state
+ "}"
)
mock_request_response_provider.receive_response(
fake_request_id, "200", key_value_dict, fake_payload_result
)
polling_machine._on_disconnect_completed_register()
assert state_based_mqtt.send_request.call_count == 1
assert state_based_mqtt.send_request.call_args_list[0][1]["request_id"] == fake_request_id
assert state_based_mqtt.send_request.call_args_list[0][1]["request_payload"] is None
assert mock_callback.call_count == 1
assert isinstance(mock_callback.call_args[1]["result"], RegistrationResult)
registration_result = mock_callback.call_args[1]["result"]
registration_result.request_id == fake_request_id
registration_result.operation_id == fake_operation_id
registration_result.status == fake_assigned_status
registration_result.registration_state.device_id == fake_device_id
registration_result.registration_state.sub_status == fake_sub_status
@pytest.mark.it(
"Calls callback of register with error when there is a failed response with status code > 300 & status code < 429"
)
def test_receive_register_response_failure_calls_callback_of_register_error(self, mocker):
state_based_mqtt = MagicMock()
mock_request_response_provider = SomeRequestResponseProvider(state_based_mqtt)
polling_machine = PollingMachine(state_based_mqtt)
polling_machine._request_response_provider = mock_request_response_provider
mocker.patch.object(mock_request_response_provider, "enable_responses")
mocker.patch.object(state_based_mqtt, "send_request")
mocker.patch.object(mock_request_response_provider, "disconnect")
# to transition into initializing
mock_callback = MagicMock()
polling_machine.register(callback=mock_callback)
mock_init_uuid = mocker.patch(
"azure.iot.device.provisioning.internal.polling_machine.uuid.uuid4"
)
mock_init_uuid.return_value = fake_request_id
key_value_dict = {}
key_value_dict["request_id"] = [fake_request_id, " "]
# to transition into registering
polling_machine._on_subscribe_completed()
fake_payload_result = "HelloHogwarts"
mock_request_response_provider.receive_response(
fake_request_id, "400", key_value_dict, fake_payload_result
)
polling_machine._on_disconnect_completed_error()
assert state_based_mqtt.send_request.call_count == 1
assert state_based_mqtt.send_request.call_args_list[0][1]["request_id"] == fake_request_id
assert state_based_mqtt.send_request.call_args_list[0][1]["request_payload"] is None
assert mock_callback.call_count == 1
assert isinstance(mock_callback.call_args[1]["error"], ValueError)
assert mock_callback.call_args[1]["error"].args[0] == "Incoming message failure"
@pytest.mark.it(
"Calls callback of register with error when there is a response with unknown registration status"
)
def test_receive_register_response_some_unknown_status_calls_callback_of_register_error(
self, mocker
):
state_based_mqtt = MagicMock()
mock_request_response_provider = SomeRequestResponseProvider(state_based_mqtt)
polling_machine = PollingMachine(state_based_mqtt)
polling_machine._request_response_provider = mock_request_response_provider
mocker.patch.object(mock_request_response_provider, "enable_responses")
mocker.patch.object(state_based_mqtt, "send_request")
mocker.patch.object(mock_request_response_provider, "disconnect")
# to transition into initializing
mock_callback = MagicMock()
polling_machine.register(callback=mock_callback)
mock_init_uuid = mocker.patch(
"azure.iot.device.provisioning.internal.polling_machine.uuid.uuid4"
)
mock_init_uuid.return_value = fake_request_id
key_value_dict = {}
key_value_dict["request_id"] = [fake_request_id, " "]
# to transition into registering
polling_machine._on_subscribe_completed()
fake_unknown_status = "disabled"
fake_payload_result = (
'{"operationId":"' + fake_operation_id + '","status":"' + fake_unknown_status + '"}'
)
mock_request_response_provider.receive_response(
fake_request_id, "200", key_value_dict, fake_payload_result
)
polling_machine._on_disconnect_completed_error()
assert mock_callback.call_count == 1
assert isinstance(mock_callback.call_args[1]["error"], ValueError)
assert (
mock_callback.call_args[1]["error"].args[0] == "Other types of failure have occurred."
)
assert mock_callback.call_args[1]["error"].args[1] == fake_payload_result
@pytest.mark.it("Calls register again when there is a response with status code > 429")
def test_receive_register_response_greater_than_429_does_register_again(self, mocker):
state_based_mqtt = MagicMock()
mock_request_response_provider = SomeRequestResponseProvider(state_based_mqtt)
polling_machine = PollingMachine(state_based_mqtt)
polling_machine._request_response_provider = mock_request_response_provider
mocker.patch.object(mock_request_response_provider, "enable_responses")
mocker.patch.object(state_based_mqtt, "send_request")
# to transition into initializing
polling_machine.register(callback=MagicMock())
mock_init_uuid = mocker.patch(
"azure.iot.device.provisioning.internal.polling_machine.uuid.uuid4"
)
mock_init_uuid.return_value = fake_request_id
key_value_dict = {}
key_value_dict["request_id"] = [fake_request_id, " "]
key_value_dict["retry-after"] = [fake_retry_after, " "]
# to transition into registering
polling_machine._on_subscribe_completed()
# reset mock to generate different request id for second time register
mock_init_uuid.reset_mock()
fake_request_id_2 = "Request4567"
mock_init_uuid.return_value = fake_request_id_2
fake_payload_result = "HelloHogwarts"
mock_init_polling_timer = mocker.patch(
"azure.iot.device.provisioning.internal.polling_machine.Timer"
)
mock_request_response_provider.receive_response(
fake_request_id, "430", key_value_dict, fake_payload_result
)
# call polling timer's time up call to simulate polling
time_up_call = mock_init_polling_timer.call_args[0][1]
time_up_call()
assert state_based_mqtt.send_request.call_count == 2
assert state_based_mqtt.send_request.call_args_list[0][1]["request_id"] == fake_request_id
assert state_based_mqtt.send_request.call_args_list[0][1]["request_payload"] is None
assert state_based_mqtt.send_request.call_args_list[1][1]["request_id"] == fake_request_id_2
assert state_based_mqtt.send_request.call_args_list[1][1]["request_payload"] is None
@pytest.mark.it("Calls callback of register with error when there is a time out")
def test_receive_register_response_after_query_time_passes_calls_callback_with_error(
self, mocker
):
state_based_mqtt = MagicMock()
mock_request_response_provider = SomeRequestResponseProvider(state_based_mqtt)
polling_machine = PollingMachine(state_based_mqtt)
polling_machine._request_response_provider = mock_request_response_provider
mocker.patch.object(mock_request_response_provider, "enable_responses")
mocker.patch.object(state_based_mqtt, "send_request")
# to transition into initializing
mock_callback = MagicMock()
polling_machine.register(callback=mock_callback)
mock_init_uuid = mocker.patch(
"azure.iot.device.provisioning.internal.polling_machine.uuid.uuid4"
)
mock_init_uuid.return_value = fake_request_id
# to transition into registering
polling_machine._on_subscribe_completed()
# sleep so that it times out query
time.sleep(constant.DEFAULT_TIMEOUT_INTERVAL + 0.2)
polling_machine._on_disconnect_completed_error()
assert state_based_mqtt.send_request.call_count == 1
assert state_based_mqtt.send_request.call_args_list[0][1]["request_id"] == fake_request_id
assert mock_callback.call_count == 1
assert mock_callback.call_args[1]["error"].args[0] == "Time is up for query timer"
@pytest.mark.describe("PollingMachine - Query Response")
class TestQueryResponse(object):
# Change the timeout so that the test does not hang for more time
constant.DEFAULT_TIMEOUT_INTERVAL = 0.2
constant.DEFAULT_POLLING_INTERVAL = 0.01
@pytest.mark.it(
"Does query again when there is a response with 'assigning' registration status"
)
def test_receive_query_response_assigning_does_query_again_with_same_operation_id(self, mocker):
state_based_mqtt = MagicMock()
mock_request_response_provider = SomeRequestResponseProvider(state_based_mqtt)
polling_machine = PollingMachine(state_based_mqtt)
polling_machine._request_response_provider = mock_request_response_provider
mocker.patch.object(mock_request_response_provider, "enable_responses")
mocker.patch.object(state_based_mqtt, "send_request")
# to transition into initializing
polling_machine.register(callback=MagicMock())
mock_init_uuid = mocker.patch(
"azure.iot.device.provisioning.internal.polling_machine.uuid.uuid4"
)
mock_init_uuid.return_value = fake_request_id
key_value_dict = {}
key_value_dict["request_id"] = [fake_request_id, " "]
# to transition into registering
polling_machine._on_subscribe_completed()
# reset mock to generate different request id for first query
mock_init_uuid.reset_mock()
fake_request_id_query = "Request4567"
mock_init_uuid.return_value = fake_request_id_query
key_value_dict_2 = {}
key_value_dict_2["request_id"] = [fake_request_id_query, " "]
# fake_register_topic = fake_success_response_topic + "$rid={}".format(fake_request_id)
fake_register_payload_result = (
'{"operationId":"' + fake_operation_id + '","status":"' + fake_assigning_status + '"}'
)
mock_init_polling_timer = mocker.patch(
"azure.iot.device.provisioning.internal.polling_machine.Timer"
)
# Response for register to transition to waiting polling
mock_request_response_provider.receive_response(
fake_request_id, "200", key_value_dict, fake_register_payload_result
)
# call polling timer's time up call to simulate polling
time_up_call = mock_init_polling_timer.call_args[0][1]
time_up_call()
# reset mock to generate different request id for second query
mock_init_uuid.reset_mock()
fake_request_id_query_2 = "Request7890"
mock_init_uuid.return_value = fake_request_id_query_2
fake_query_payload_result = (
'{"operationId":"' + fake_operation_id + '","status":"' + fake_assigning_status + '"}'
)
mock_init_polling_timer.reset_mock()
mock_request_response_provider.receive_response(
fake_request_id_query, "200", key_value_dict_2, fake_query_payload_result
)
# call polling timer's time up call to simulate polling
time_up_call = mock_init_polling_timer.call_args[0][1]
time_up_call()
assert state_based_mqtt.send_request.call_count == 3
assert state_based_mqtt.send_request.call_args_list[0][1]["request_id"] == fake_request_id
assert state_based_mqtt.send_request.call_args_list[0][1]["request_payload"] is None
assert (
state_based_mqtt.send_request.call_args_list[1][1]["request_id"]
== fake_request_id_query
)
assert (
state_based_mqtt.send_request.call_args_list[1][1]["operation_id"] == fake_operation_id
)
assert state_based_mqtt.send_request.call_args_list[1][1]["request_payload"] == " "
assert (
state_based_mqtt.send_request.call_args_list[2][1]["request_id"]
== fake_request_id_query_2
)
assert (
state_based_mqtt.send_request.call_args_list[2][1]["operation_id"] == fake_operation_id
)
assert state_based_mqtt.send_request.call_args_list[2][1]["request_payload"] == " "
@pytest.mark.it(
"Completes registration process when there is a query response with 'assigned' registration status"
)
def test_receive_query_response_assigned_completes_registration(self, mocker):
state_based_mqtt = MagicMock()
mock_request_response_provider = SomeRequestResponseProvider(state_based_mqtt)
polling_machine = PollingMachine(state_based_mqtt)
polling_machine._request_response_provider = mock_request_response_provider
mocker.patch.object(mock_request_response_provider, "enable_responses")
mocker.patch.object(state_based_mqtt, "send_request")
mocker.patch.object(mock_request_response_provider, "disconnect")
# to transition into initializing
mock_callback = MagicMock()
polling_machine.register(callback=mock_callback)
mock_init_uuid = mocker.patch(
"azure.iot.device.provisioning.internal.polling_machine.uuid.uuid4"
)
mock_init_uuid.return_value = fake_request_id
key_value_dict = {}
key_value_dict["request_id"] = [fake_request_id, " "]
# to transition into registering
polling_machine._on_subscribe_completed()
# reset mock to generate different request id for first query
mock_init_uuid.reset_mock()
fake_request_id_query = "Request4567"
mock_init_uuid.return_value = fake_request_id_query
key_value_dict_2 = {}
key_value_dict_2["request_id"] = [fake_request_id_query, " "]
fake_register_payload_result = (
'{"operationId":"' + fake_operation_id + '","status":"' + fake_assigning_status + '"}'
)
mock_init_polling_timer = mocker.patch(
"azure.iot.device.provisioning.internal.polling_machine.Timer"
)
# Response for register to transition to waiting and polling
mock_request_response_provider.receive_response(
fake_request_id, "200", key_value_dict, fake_register_payload_result
)
# call polling timer's time up call to simulate polling
time_up_call = mock_init_polling_timer.call_args[0][1]
time_up_call()
fake_registration_state = (
'{"registrationId":"'
+ fake_registration_id
+ '","assignedHub":"'
+ fake_assigned_hub
+ '","deviceId":"'
+ fake_device_id
+ '","substatus":"'
+ fake_sub_status
+ '"}'
)
fake_query_payload_result = (
'{"operationId":"'
+ fake_operation_id
+ '","status":"'
+ fake_assigned_status
+ '","registrationState":'
+ fake_registration_state
+ "}"
)
# Response for query
mock_request_response_provider.receive_response(
fake_request_id_query, "200", key_value_dict_2, fake_query_payload_result
)
polling_machine._on_disconnect_completed_register()
assert state_based_mqtt.send_request.call_count == 2
assert state_based_mqtt.send_request.call_args_list[0][1]["request_id"] == fake_request_id
assert state_based_mqtt.send_request.call_args_list[0][1]["request_payload"] is None
assert (
state_based_mqtt.send_request.call_args_list[1][1]["request_id"]
== fake_request_id_query
)
assert (
state_based_mqtt.send_request.call_args_list[1][1]["operation_id"] == fake_operation_id
)
assert state_based_mqtt.send_request.call_args_list[1][1]["request_payload"] == " "
assert mock_callback.call_count == 1
assert isinstance(mock_callback.call_args[1]["result"], RegistrationResult)
@pytest.mark.it(
"Calls callback of register with error when there is a failed query response with status code > 300 & status code < 429"
)
def test_receive_query_response_failure_calls_callback_of_register_error(self, mocker):
state_based_mqtt = MagicMock()
mock_request_response_provider = SomeRequestResponseProvider(state_based_mqtt)
polling_machine = PollingMachine(state_based_mqtt)
polling_machine._request_response_provider = mock_request_response_provider
mocker.patch.object(mock_request_response_provider, "enable_responses")
mocker.patch.object(state_based_mqtt, "send_request")
mocker.patch.object(mock_request_response_provider, "disconnect")
# to transition into initializing
mock_callback = MagicMock()
polling_machine.register(callback=mock_callback)
mock_init_uuid = mocker.patch(
"azure.iot.device.provisioning.internal.polling_machine.uuid.uuid4"
)
mock_init_uuid.return_value = fake_request_id
key_value_dict = {}
key_value_dict["request_id"] = [fake_request_id, " "]
# to transition into registering
polling_machine._on_subscribe_completed()
# reset mock to generate different request id for first query
mock_init_uuid.reset_mock()
fake_request_id_query = "Request4567"
mock_init_uuid.return_value = fake_request_id_query
key_value_dict_2 = {}
key_value_dict_2["request_id"] = [fake_request_id_query, " "]
fake_register_payload_result = (
'{"operationId":"' + fake_operation_id + '","status":"' + fake_assigning_status + '"}'
)
mock_init_polling_timer = mocker.patch(
"azure.iot.device.provisioning.internal.polling_machine.Timer"
)
# Response for register to transition to waiting and polling
mock_request_response_provider.receive_response(
fake_request_id, "200", key_value_dict, fake_register_payload_result
)
# call polling timer's time up call to simulate polling
time_up_call = mock_init_polling_timer.call_args[0][1]
time_up_call()
fake_query_payload_result = "HelloHogwarts"
# Response for query
mock_request_response_provider.receive_response(
fake_request_id_query, "400", key_value_dict_2, fake_query_payload_result
)
polling_machine._on_disconnect_completed_error()
assert state_based_mqtt.send_request.call_count == 2
assert state_based_mqtt.send_request.call_args_list[0][1]["request_id"] == fake_request_id
assert state_based_mqtt.send_request.call_args_list[0][1]["request_payload"] is None
assert (
state_based_mqtt.send_request.call_args_list[1][1]["request_id"]
== fake_request_id_query
)
assert (
state_based_mqtt.send_request.call_args_list[1][1]["operation_id"] == fake_operation_id
)
assert state_based_mqtt.send_request.call_args_list[1][1]["request_payload"] == " "
assert mock_callback.call_count == 1
assert isinstance(mock_callback.call_args[1]["error"], ValueError)
assert mock_callback.call_args[1]["error"].args[0] == "Incoming message failure"
@pytest.mark.it("Calls query again when there is a response with status code > 429")
def test_receive_query_response_greater_than_429_does_query_again_with_same_operation_id(
self, mocker
):
state_based_mqtt = MagicMock()
mock_request_response_provider = SomeRequestResponseProvider(state_based_mqtt)
polling_machine = PollingMachine(state_based_mqtt)
polling_machine._request_response_provider = mock_request_response_provider
mocker.patch.object(mock_request_response_provider, "enable_responses")
mocker.patch.object(state_based_mqtt, "send_request")
mocker.patch.object(mock_request_response_provider, "disconnect")
# to transition into initializing
polling_machine.register(callback=MagicMock())
mock_init_uuid = mocker.patch(
"azure.iot.device.provisioning.internal.polling_machine.uuid.uuid4"
)
mock_init_uuid.return_value = fake_request_id
key_value_dict = {}
key_value_dict["request_id"] = [fake_request_id, " "]
# to transition into registering
polling_machine._on_subscribe_completed()
# reset mock to generate different request id for first query
mock_init_uuid.reset_mock()
fake_request_id_query = "Request4567"
mock_init_uuid.return_value = fake_request_id_query
key_value_dict_2 = {}
key_value_dict_2["request_id"] = [fake_request_id_query, " "]
# fake_register_topic = fake_success_response_topic + "$rid={}".format(fake_request_id)
fake_register_payload_result = (
'{"operationId":"' + fake_operation_id + '","status":"' + fake_assigning_status + '"}'
)
mock_init_polling_timer = mocker.patch(
"azure.iot.device.provisioning.internal.polling_machine.Timer"
)
# Response for register to transition to waiting polling
mock_request_response_provider.receive_response(
fake_request_id, "200", key_value_dict, fake_register_payload_result
)
# call polling timer's time up call to simulate polling
time_up_call = mock_init_polling_timer.call_args[0][1]
time_up_call()
# reset mock to generate different request id for second query
mock_init_uuid.reset_mock()
fake_request_id_query_2 = "Request7890"
mock_init_uuid.return_value = fake_request_id_query_2
fake_query_payload_result = "HelloHogwarts"
mock_init_polling_timer.reset_mock()
# Response for query
mock_request_response_provider.receive_response(
fake_request_id_query, "430", key_value_dict_2, fake_query_payload_result
)
# call polling timer's time up call to simulate polling
time_up_call = mock_init_polling_timer.call_args[0][1]
time_up_call()
assert state_based_mqtt.send_request.call_count == 3
assert state_based_mqtt.send_request.call_args_list[0][1]["request_id"] == fake_request_id
assert state_based_mqtt.send_request.call_args_list[0][1]["request_payload"] is None
assert (
state_based_mqtt.send_request.call_args_list[1][1]["request_id"]
== fake_request_id_query
)
assert (
state_based_mqtt.send_request.call_args_list[1][1]["operation_id"] == fake_operation_id
)
assert state_based_mqtt.send_request.call_args_list[1][1]["request_payload"] == " "
assert (
state_based_mqtt.send_request.call_args_list[2][1]["request_id"]
== fake_request_id_query_2
)
assert (
state_based_mqtt.send_request.call_args_list[2][1]["operation_id"] == fake_operation_id
)
assert state_based_mqtt.send_request.call_args_list[2][1]["request_payload"] == " "
@pytest.mark.describe("PollingMachine - Cancel")
class TestCancel(object):
# Change the timeout so that the test does not hang for more time
constant.DEFAULT_TIMEOUT_INTERVAL = 0.9
constant.DEFAULT_POLLING_INTERVAL = 0.09
@pytest.mark.it("Calls disconnect on RequestResponseProvider and calls callback")
def test_cancel_disconnects_on_request_response_provider_and_calls_callback(
self, mock_polling_machine
):
mock_request_response_provider = mock_polling_machine._request_response_provider
mock_polling_machine.register(callback=MagicMock())
mock_cancel_callback = MagicMock()
mock_polling_machine.cancel(mock_cancel_callback)
mock_request_response_provider.disconnect.assert_called_once_with(
callback=mock_polling_machine._on_disconnect_completed_cancel
)
mock_polling_machine._on_disconnect_completed_cancel()
assert mock_cancel_callback.call_count == 1
@pytest.mark.it("Calls disconnect on RequestResponseProvider, clears timers and calls callback")
def test_register_and_cancel_clears_timers_and_disconnects(self, mocker):
state_based_mqtt = MagicMock()
mock_request_response_provider = SomeRequestResponseProvider(state_based_mqtt)
polling_machine = PollingMachine(state_based_mqtt)
polling_machine._request_response_provider = mock_request_response_provider
mocker.patch.object(mock_request_response_provider, "enable_responses")
mocker.patch.object(state_based_mqtt, "send_request")
mocker.patch.object(mock_request_response_provider, "disconnect")
# to transition into initializing
polling_machine.register(callback=MagicMock())
mock_init_uuid = mocker.patch(
"azure.iot.device.provisioning.internal.polling_machine.uuid.uuid4"
)
mock_init_uuid.return_value = fake_request_id
key_value_dict = {}
key_value_dict["request_id"] = [fake_request_id, " "]
# to transition into registering
polling_machine._on_subscribe_completed()
# reset mock to generate different request id for query
mock_init_uuid.reset_mock()
fake_request_id_query = "Request4567"
mock_init_uuid.return_value = fake_request_id_query
key_value_dict_2 = {}
key_value_dict_2["request_id"] = [fake_request_id_query, " "]
fake_payload_result = (
'{"operationId":"' + fake_operation_id + '","status":"' + fake_assigning_status + '"}'
)
mock_request_response_provider.receive_response(
fake_request_id, "200", key_value_dict, fake_payload_result
)
polling_timer = polling_machine._polling_timer
query_timer = polling_machine._query_timer
poling_timer_cancel = mocker.patch.object(polling_timer, "cancel")
query_timer_cancel = mocker.patch.object(query_timer, "cancel")
mock_cancel_callback = MagicMock()
polling_machine.cancel(mock_cancel_callback)
assert poling_timer_cancel.call_count == 1
assert query_timer_cancel.call_count == 1
assert mock_request_response_provider.disconnect.call_count == 1
polling_machine._on_disconnect_completed_cancel()
assert mock_cancel_callback.call_count == 1
| 42.281993 | 128 | 0.704808 | 4,466 | 37,335 | 5.459472 | 0.05665 | 0.047986 | 0.054548 | 0.074194 | 0.885325 | 0.852432 | 0.836601 | 0.818842 | 0.800139 | 0.787261 | 0 | 0.011871 | 0.210312 | 37,335 | 882 | 129 | 42.329932 | 0.815114 | 0.076175 | 0 | 0.689441 | 0 | 0.003106 | 0.145687 | 0.048504 | 0 | 0 | 0 | 0 | 0.122671 | 1 | 0.02795 | false | 0.001553 | 0.013975 | 0.003106 | 0.054348 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
86a7549a969a825f8d46268fed9381216fa75270 | 2,176 | py | Python | app/selenium_ui/confluence_ui.py | lukesolar/dc-app-performance-toolkit | 3ac69e52cfd7954fe0acb0766c43d22d54c5c605 | [
"Apache-2.0"
] | null | null | null | app/selenium_ui/confluence_ui.py | lukesolar/dc-app-performance-toolkit | 3ac69e52cfd7954fe0acb0766c43d22d54c5c605 | [
"Apache-2.0"
] | null | null | null | app/selenium_ui/confluence_ui.py | lukesolar/dc-app-performance-toolkit | 3ac69e52cfd7954fe0acb0766c43d22d54c5c605 | [
"Apache-2.0"
] | null | null | null | from selenium_ui.confluence import modules
from extension.confluence import extension_ui # noqa F401
# this action should be the first one
def test_0_selenium_a_login(confluence_webdriver, confluence_datasets, confluence_screen_shots):
modules.login(confluence_webdriver, confluence_datasets)
def test_1_selenium_view_page(confluence_webdriver, confluence_datasets, confluence_screen_shots):
modules.view_page(confluence_webdriver, confluence_datasets)
def test_1_selenium_view_meetical_page(confluence_webdriver, confluence_datasets, confluence_screen_shots):
extension_ui.view_meetical_page(confluence_webdriver, confluence_datasets)
def test_1_selenium_create_page(confluence_webdriver, confluence_datasets, confluence_screen_shots):
modules.create_confluence_page(confluence_webdriver, confluence_datasets)
def test_1_selenium_edit_page(confluence_webdriver, confluence_datasets, confluence_screen_shots):
modules.edit_confluence_page(confluence_webdriver, confluence_datasets)
def test_1_selenium_create_comment(confluence_webdriver, confluence_datasets, confluence_screen_shots):
modules.create_comment(confluence_webdriver, confluence_datasets)
def test_1_selenium_view_blog(confluence_webdriver, confluence_datasets, confluence_screen_shots):
modules.view_blog(confluence_webdriver, confluence_datasets)
def test_1_selenium_view_dashboard(confluence_webdriver, confluence_datasets, confluence_screen_shots):
modules.view_dashboard(confluence_webdriver, confluence_datasets)
"""
Add custom actions anywhere between login and log out action. Move this to a different line as needed.
Write your custom selenium scripts in `app/extension/confluence/extension_ui.py`.
Refer to `app/selenium_ui/confluence/modules.py` for examples.
"""
# def test_1_selenium_custom_action(confluence_webdriver, confluence_datasets, confluence_screen_shots):
# extension_ui.app_specific_action(confluence_webdriver, confluence_datasets)
# this action should be the last one
def test_2_selenium_z_log_out(confluence_webdriver, confluence_datasets, confluence_screen_shots):
modules.log_out(confluence_webdriver, confluence_datasets)
| 45.333333 | 107 | 0.862132 | 275 | 2,176 | 6.381818 | 0.221818 | 0.216524 | 0.330484 | 0.421652 | 0.786325 | 0.728775 | 0.65698 | 0.645584 | 0.569231 | 0.11567 | 0 | 0.006516 | 0.08318 | 2,176 | 47 | 108 | 46.297872 | 0.873183 | 0.121324 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.45 | false | 0 | 0.1 | 0 | 0.55 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 6 |
86bf0e6bb520d491684b97730f1d006e54b4a7c5 | 21,771 | py | Python | deepchem/trans/tests/test_transformers.py | n3011/deepchem | c316d998c462ce01032f0dae883856b400ea4765 | [
"MIT"
] | null | null | null | deepchem/trans/tests/test_transformers.py | n3011/deepchem | c316d998c462ce01032f0dae883856b400ea4765 | [
"MIT"
] | null | null | null | deepchem/trans/tests/test_transformers.py | n3011/deepchem | c316d998c462ce01032f0dae883856b400ea4765 | [
"MIT"
] | null | null | null | """
Tests for transformer objects.
"""
from __future__ import division
from __future__ import unicode_literals
from deepchem.molnet import load_delaney
from deepchem.trans.transformers import FeaturizationTransformer
__author__ = "Bharath Ramsundar"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"
import os
import unittest
import numpy as np
import pandas as pd
import deepchem as dc
class TestTransformers(unittest.TestCase):
"""
Test top-level API for transformer objects.
"""
def setUp(self):
super(TestTransformers, self).setUp()
self.current_dir = os.path.dirname(os.path.abspath(__file__))
def test_y_log_transformer(self):
"""Tests logarithmic data transformer."""
solubility_dataset = dc.data.tests.load_solubility_data()
log_transformer = dc.trans.LogTransformer(
transform_y=True, dataset=solubility_dataset)
X, y, w, ids = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
solubility_dataset = log_transformer.transform(solubility_dataset)
X_t, y_t, w_t, ids_t = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check X is unchanged since this is a y transformer
np.testing.assert_allclose(X, X_t)
# Check w is unchanged since this is a y transformer
np.testing.assert_allclose(w, w_t)
# Check y is now a logarithmic version of itself
np.testing.assert_allclose(y_t, np.log(y + 1))
# Check that untransform does the right thing.
np.testing.assert_allclose(log_transformer.untransform(y_t), y)
def test_transform_unlabelled(self):
ul_dataset = dc.data.tests.load_unlabelled_data()
# transforming y should raise an exception
with self.assertRaises(ValueError) as context:
dc.trans.transformers.Transformer(transform_y=True).transform(ul_dataset)
# transforming w should raise an exception
with self.assertRaises(ValueError) as context:
dc.trans.transformers.Transformer(transform_w=True).transform(ul_dataset)
# transforming X should be okay
dc.trans.NormalizationTransformer(
transform_X=True, dataset=ul_dataset).transform(ul_dataset)
def test_X_log_transformer(self):
"""Tests logarithmic data transformer."""
solubility_dataset = dc.data.tests.load_solubility_data()
log_transformer = dc.trans.LogTransformer(
transform_X=True, dataset=solubility_dataset)
X, y, w, ids = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
solubility_dataset = log_transformer.transform(solubility_dataset)
X_t, y_t, w_t, ids_t = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check y is unchanged since this is a X transformer
np.testing.assert_allclose(y, y_t)
# Check w is unchanged since this is a y transformer
np.testing.assert_allclose(w, w_t)
# Check y is now a logarithmic version of itself
np.testing.assert_allclose(X_t, np.log(X + 1))
# Check that untransform does the right thing.
np.testing.assert_allclose(log_transformer.untransform(X_t), X)
def test_y_log_transformer_select(self):
"""Tests logarithmic data transformer with selection."""
multitask_dataset = dc.data.tests.load_feat_multitask_data()
dfe = pd.read_csv(
os.path.join(self.current_dir,
"../../models/tests/feat_multitask_example.csv"))
tid = []
tasklist = ["task0", "task3", "task4", "task5"]
first_task = "task0"
for task in tasklist:
tiid = dfe.columns.get_loc(task) - dfe.columns.get_loc(first_task)
tid = np.concatenate((tid, np.array([tiid])))
tasks = tid.astype(int)
log_transformer = dc.trans.LogTransformer(
transform_y=True, tasks=tasks, dataset=multitask_dataset)
X, y, w, ids = (multitask_dataset.X, multitask_dataset.y,
multitask_dataset.w, multitask_dataset.ids)
multitask_dataset = log_transformer.transform(multitask_dataset)
X_t, y_t, w_t, ids_t = (multitask_dataset.X, multitask_dataset.y,
multitask_dataset.w, multitask_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check X is unchanged since this is a y transformer
np.testing.assert_allclose(X, X_t)
# Check w is unchanged since this is a y transformer
np.testing.assert_allclose(w, w_t)
# Check y is now a logarithmic version of itself
np.testing.assert_allclose(y_t[:, tasks], np.log(y[:, tasks] + 1))
# Check that untransform does the right thing.
np.testing.assert_allclose(log_transformer.untransform(y_t), y)
def test_X_log_transformer_select(self):
# Tests logarithmic data transformer with selection.
multitask_dataset = dc.data.tests.load_feat_multitask_data()
dfe = pd.read_csv(
os.path.join(self.current_dir,
"../../models/tests/feat_multitask_example.csv"))
fid = []
featurelist = ["feat0", "feat1", "feat2", "feat3", "feat5"]
first_feature = "feat0"
for feature in featurelist:
fiid = dfe.columns.get_loc(feature) - dfe.columns.get_loc(first_feature)
fid = np.concatenate((fid, np.array([fiid])))
features = fid.astype(int)
log_transformer = dc.trans.LogTransformer(
transform_X=True, features=features, dataset=multitask_dataset)
X, y, w, ids = (multitask_dataset.X, multitask_dataset.y,
multitask_dataset.w, multitask_dataset.ids)
multitask_dataset = log_transformer.transform(multitask_dataset)
X_t, y_t, w_t, ids_t = (multitask_dataset.X, multitask_dataset.y,
multitask_dataset.w, multitask_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check y is unchanged since this is a X transformer
np.testing.assert_allclose(y, y_t)
# Check w is unchanged since this is a y transformer
np.testing.assert_allclose(w, w_t)
# Check y is now a logarithmic version of itself
np.testing.assert_allclose(X_t[:, features], np.log(X[:, features] + 1))
# Check that untransform does the right thing.
np.testing.assert_allclose(log_transformer.untransform(X_t), X)
def test_y_normalization_transformer(self):
"""Tests normalization transformer."""
solubility_dataset = dc.data.tests.load_solubility_data()
normalization_transformer = dc.trans.NormalizationTransformer(
transform_y=True, dataset=solubility_dataset)
X, y, w, ids = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
solubility_dataset = normalization_transformer.transform(solubility_dataset)
X_t, y_t, w_t, ids_t = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check X is unchanged since this is a y transformer
np.testing.assert_allclose(X, X_t)
# Check w is unchanged since this is a y transformer
np.testing.assert_allclose(w, w_t)
# Check that y_t has zero mean, unit std.
assert np.isclose(y_t.mean(), 0.)
assert np.isclose(y_t.std(), 1.)
# Check that untransform does the right thing.
np.testing.assert_allclose(normalization_transformer.untransform(y_t), y)
def test_X_normalization_transformer(self):
"""Tests normalization transformer."""
solubility_dataset = dc.data.tests.load_solubility_data()
normalization_transformer = dc.trans.NormalizationTransformer(
transform_X=True, dataset=solubility_dataset)
X, y, w, ids = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
solubility_dataset = normalization_transformer.transform(solubility_dataset)
X_t, y_t, w_t, ids_t = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check y is unchanged since this is a X transformer
np.testing.assert_allclose(y, y_t)
# Check w is unchanged since this is a y transformer
np.testing.assert_allclose(w, w_t)
# Check that X_t has zero mean, unit std.
# np.set_printoptions(threshold='nan')
mean = X_t.mean(axis=0)
assert np.amax(np.abs(mean - np.zeros_like(mean))) < 1e-7
orig_std_array = X.std(axis=0)
std_array = X_t.std(axis=0)
# Entries with zero std are not normalized
for orig_std, std in zip(orig_std_array, std_array):
if not np.isclose(orig_std, 0):
assert np.isclose(std, 1)
# TODO(rbharath): Untransform doesn't work properly for binary feature
# vectors. Need to figure out what's wrong here. (low priority)
## Check that untransform does the right thing.
# np.testing.assert_allclose(normalization_transformer.untransform(X_t), X)
def test_cdf_X_transformer(self):
"""Test CDF transformer on Gaussian normal dataset."""
target = np.array(np.transpose(np.linspace(0., 1., 1001)))
target = np.transpose(np.array(np.append([target], [target], axis=0)))
gaussian_dataset = dc.data.tests.load_gaussian_cdf_data()
bins = 1001
cdf_transformer = dc.trans.CDFTransformer(
transform_X=True, dataset=gaussian_dataset, bins=bins)
X, y, w, ids = (gaussian_dataset.X, gaussian_dataset.y, gaussian_dataset.w,
gaussian_dataset.ids)
gaussian_dataset = cdf_transformer.transform(gaussian_dataset, bins=bins)
X_t, y_t, w_t, ids_t = (gaussian_dataset.X, gaussian_dataset.y,
gaussian_dataset.w, gaussian_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check y is unchanged since this is an X transformer
np.testing.assert_allclose(y, y_t)
# Check w is unchanged since this is an X transformer
np.testing.assert_allclose(w, w_t)
# Check X is now holding the proper values when sorted.
sorted = np.sort(X_t, axis=0)
np.testing.assert_allclose(sorted, target)
def test_cdf_y_transformer(self):
# Test CDF transformer on Gaussian normal dataset.
target = np.array(np.transpose(np.linspace(0., 1., 1001)))
target = np.transpose(np.array(np.append([target], [target], axis=0)))
gaussian_dataset = dc.data.tests.load_gaussian_cdf_data()
bins = 1001
cdf_transformer = dc.trans.CDFTransformer(
transform_y=True, dataset=gaussian_dataset, bins=bins)
X, y, w, ids = (gaussian_dataset.X, gaussian_dataset.y, gaussian_dataset.w,
gaussian_dataset.ids)
gaussian_dataset = cdf_transformer.transform(gaussian_dataset, bins=bins)
X_t, y_t, w_t, ids_t = (gaussian_dataset.X, gaussian_dataset.y,
gaussian_dataset.w, gaussian_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check X is unchanged since this is an y transformer
np.testing.assert_allclose(X, X_t)
# Check w is unchanged since this is an y transformer
np.testing.assert_allclose(w, w_t)
# Check y is now holding the proper values when sorted.
sorted = np.sort(y_t, axis=0)
np.testing.assert_allclose(sorted, target)
# Check that untransform does the right thing.
np.testing.assert_allclose(cdf_transformer.untransform(y_t), y)
def test_clipping_X_transformer(self):
"""Test clipping transformer on X of singletask dataset."""
n_samples = 10
n_features = 3
n_tasks = 1
ids = np.arange(n_samples)
X = np.ones((n_samples, n_features))
target = 5. * X
X *= 6.
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
transformer = dc.trans.ClippingTransformer(transform_X=True, x_max=5.)
clipped_dataset = transformer.transform(dataset)
X_t, y_t, w_t, ids_t = (clipped_dataset.X, clipped_dataset.y,
clipped_dataset.w, clipped_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check y is unchanged since this is an X transformer
np.testing.assert_allclose(y, y_t)
# Check w is unchanged since this is an X transformer
np.testing.assert_allclose(w, w_t)
# Check X is now holding the proper values when sorted.
np.testing.assert_allclose(X_t, target)
def test_clipping_y_transformer(self):
"""Test clipping transformer on y of singletask dataset."""
n_samples = 10
n_features = 3
n_tasks = 1
ids = np.arange(n_samples)
X = np.zeros((n_samples, n_features))
y = np.ones((n_samples, n_tasks))
target = 5. * y
y *= 6.
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
transformer = dc.trans.ClippingTransformer(transform_y=True, y_max=5.)
clipped_dataset = transformer.transform(dataset)
X_t, y_t, w_t, ids_t = (clipped_dataset.X, clipped_dataset.y,
clipped_dataset.w, clipped_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check X is unchanged since this is a y transformer
np.testing.assert_allclose(X, X_t)
# Check w is unchanged since this is a y transformer
np.testing.assert_allclose(w, w_t)
# Check y is now holding the proper values when sorted.
np.testing.assert_allclose(y_t, target)
def test_power_X_transformer(self):
"""Test Power transformer on Gaussian normal dataset."""
gaussian_dataset = dc.data.tests.load_gaussian_cdf_data()
powers = [1, 2, 0.5]
power_transformer = dc.trans.PowerTransformer(
transform_X=True, powers=powers)
X, y, w, ids = (gaussian_dataset.X, gaussian_dataset.y, gaussian_dataset.w,
gaussian_dataset.ids)
gaussian_dataset2 = power_transformer.transform(gaussian_dataset)
X_t, y_t, w_t, ids_t = (gaussian_dataset2.X, gaussian_dataset2.y,
gaussian_dataset2.w, gaussian_dataset2.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check y is unchanged since this is an X transformer
np.testing.assert_allclose(y, y_t)
# Check w is unchanged since this is an X transformer
np.testing.assert_allclose(w, w_t)
# Check X is now holding the proper values in each column.
np.testing.assert_allclose(X_t.shape[1], len(powers) * X.shape[1])
np.testing.assert_allclose(X, X_t[:, :2])
np.testing.assert_allclose(np.power(X, 2), X_t[:, 2:4])
np.testing.assert_allclose(np.power(X, 0.5), X_t[:, 4:])
def test_power_y_transformer(self):
"""Test Power transformer on Gaussian normal dataset."""
gaussian_dataset = dc.data.tests.load_gaussian_cdf_data()
powers = [1, 2, 0.5]
power_transformer = dc.trans.PowerTransformer(
transform_y=True, powers=powers)
X, y, w, ids = (gaussian_dataset.X, gaussian_dataset.y, gaussian_dataset.w,
gaussian_dataset.ids)
gaussian_dataset2 = power_transformer.transform(gaussian_dataset)
X_t, y_t, w_t, ids_t = (gaussian_dataset2.X, gaussian_dataset2.y,
gaussian_dataset2.w, gaussian_dataset2.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check X is unchanged since this is an X transformer
np.testing.assert_allclose(X, X_t)
# Check w is unchanged since this is an X transformer
np.testing.assert_allclose(w, w_t)
# Check y is now holding the proper values in each column.
np.testing.assert_allclose(y_t.shape[1], len(powers) * y.shape[1])
np.testing.assert_allclose(y, y_t[:, :2])
np.testing.assert_allclose(np.power(y, 2), y_t[:, 2:4])
np.testing.assert_allclose(np.power(y, 0.5), y_t[:, 4:])
# Check that untransform does the right thing.
np.testing.assert_allclose(power_transformer.untransform(y_t), y)
def test_singletask_balancing_transformer(self):
"""Test balancing transformer on single-task dataset."""
classification_dataset = dc.data.tests.load_classification_data()
balancing_transformer = dc.trans.BalancingTransformer(
transform_w=True, dataset=classification_dataset)
X, y, w, ids = (classification_dataset.X, classification_dataset.y,
classification_dataset.w, classification_dataset.ids)
classification_dataset = balancing_transformer.transform(
classification_dataset)
X_t, y_t, w_t, ids_t = (classification_dataset.X, classification_dataset.y,
classification_dataset.w,
classification_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check X is unchanged since this is a w transformer
np.testing.assert_allclose(X, X_t)
# Check y is unchanged since this is a w transformer
np.testing.assert_allclose(y, y_t)
for ind, task in enumerate(classification_dataset.get_task_names()):
y_task = y_t[:, ind]
w_task = w_t[:, ind]
w_orig_task = w[:, ind]
# Assert that entries with zero weight retain zero weight
np.testing.assert_allclose(w_task[w_orig_task == 0],
np.zeros_like(w_task[w_orig_task == 0]))
# Check that sum of 0s equals sum of 1s in transformed for each task
assert np.isclose(
np.sum(w_task[y_task == 0]), np.sum(w_task[y_task == 1]))
def test_multitask_balancing_transformer(self):
"""Test balancing transformer on multitask dataset."""
multitask_dataset = dc.data.tests.load_multitask_data()
balancing_transformer = dc.trans.BalancingTransformer(
transform_w=True, dataset=multitask_dataset)
X, y, w, ids = (multitask_dataset.X, multitask_dataset.y,
multitask_dataset.w, multitask_dataset.ids)
multitask_dataset = balancing_transformer.transform(multitask_dataset)
X_t, y_t, w_t, ids_t = (multitask_dataset.X, multitask_dataset.y,
multitask_dataset.w, multitask_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check X is unchanged since this is a w transformer
np.testing.assert_allclose(X, X_t)
# Check y is unchanged since this is a w transformer
np.testing.assert_allclose(y, y_t)
for ind, task in enumerate(multitask_dataset.get_task_names()):
y_task = y_t[:, ind]
w_task = w_t[:, ind]
w_orig_task = w[:, ind]
# Assert that entries with zero weight retain zero weight
np.testing.assert_allclose(w_task[w_orig_task == 0],
np.zeros_like(w_task[w_orig_task == 0]))
# Check that sum of 0s equals sum of 1s in transformed for each task
assert np.isclose(
np.sum(w_task[y_task == 0]), np.sum(w_task[y_task == 1]))
def test_coulomb_fit_transformer(self):
"""Test coulomb fit transformer on singletask dataset."""
n_samples = 10
n_features = 3
n_tasks = 1
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
fit_transformer = dc.trans.CoulombFitTransformer(dataset)
X_t = fit_transformer.X_transform(dataset.X)
assert len(X_t.shape) == 2
def test_IRV_transformer(self):
n_features = 128
n_samples = 20
test_samples = 5
n_tasks = 2
X = np.random.randint(2, size=(n_samples, n_features))
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids=None)
X_test = np.random.randint(2, size=(test_samples, n_features))
y_test = np.zeros((test_samples, n_tasks))
w_test = np.ones((test_samples, n_tasks))
test_dataset = dc.data.NumpyDataset(X_test, y_test, w_test, ids=None)
sims = np.sum(
X_test[0, :] * X, axis=1, dtype=float) / np.sum(
np.sign(X_test[0, :] + X), axis=1, dtype=float)
sims = sorted(sims, reverse=True)
IRV_transformer = dc.trans.IRVTransformer(10, n_tasks, dataset)
test_dataset_trans = IRV_transformer.transform(test_dataset)
dataset_trans = IRV_transformer.transform(dataset)
assert test_dataset_trans.X.shape == (test_samples, 20 * n_tasks)
assert np.allclose(test_dataset_trans.X[0, :10], sims[:10])
assert np.allclose(test_dataset_trans.X[0, 10:20], [0] * 10)
assert not np.isclose(dataset_trans.X[0, 0], 1.)
def test_featurization_transformer(self):
fp_size = 2048
tasks, all_dataset, transformers = load_delaney('Raw')
train = all_dataset[0]
transformer = FeaturizationTransformer(
transform_X=True,
dataset=train,
featurizer=dc.feat.CircularFingerprint(size=fp_size))
new_train = transformer.transform(train)
self.assertEqual(new_train.y.shape, train.y.shape)
self.assertEqual(new_train.X.shape[-1], fp_size)
| 44.88866 | 80 | 0.693354 | 3,195 | 21,771 | 4.492958 | 0.082316 | 0.033856 | 0.056426 | 0.08652 | 0.825078 | 0.797144 | 0.782166 | 0.762661 | 0.746221 | 0.729711 | 0 | 0.00915 | 0.206835 | 21,771 | 484 | 81 | 44.981405 | 0.822157 | 0.191172 | 0 | 0.57971 | 0 | 0 | 0.011634 | 0.005158 | 0 | 0 | 0 | 0.002066 | 0.237681 | 1 | 0.055072 | false | 0 | 0.026087 | 0 | 0.084058 | 0.002899 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
86e6a557655f31fdb8e6c24f45ca1ce01263ee5d | 6,121 | py | Python | src/ros_carla_rllib/memories.py | 50sven/ros_rllib | e3b38da925af6900e65b4d953d2e33a64f76faed | [
"MIT"
] | 1 | 2020-12-14T16:14:06.000Z | 2020-12-14T16:14:06.000Z | src/ros_carla_rllib/memories.py | 50sven/ros_rllib | e3b38da925af6900e65b4d953d2e33a64f76faed | [
"MIT"
] | null | null | null | src/ros_carla_rllib/memories.py | 50sven/ros_rllib | e3b38da925af6900e65b4d953d2e33a64f76faed | [
"MIT"
] | null | null | null | """Sample Buffers
This script provides sample buffers for Rl alogorithms.
Class:
* PPOBuffer - sample buffer (list) for the PPO algorithm
* PPOBuffer2 - sample buffer (dequeu) for the PPO algorithm
* A3CMemory - sample buffer for n-step A3C
"""
import torch
from collections import deque
class PPOBuffer(object):
"""
Replay Buffer to save samples for PPO
and diagnostic training data
"""
def __init__(self, batch_size, norm_adv=True):
# Samples
self.obs = [[], [], []]
self.actions = []
self.logps = []
self.values = []
self.returns = []
self.advantages = []
# Diagnostics
self.episode_rewards = []
self.episode_lengths = []
self.norm_adv = norm_adv
self.batch_size = batch_size
self.buffer_size = 0
def append(self, obs_t, action_t, logp_t, value_t, return_t, advantage_t):
"""Adds a sample to the buffer"""
self.obs[0].append(obs_t[0])
self.obs[1].append(obs_t[1])
self.obs[2].append(obs_t[2])
self.actions.append(action_t)
self.logps.append(logp_t)
self.values.append(value_t)
self.returns.append(return_t)
self.advantages.append(advantage_t)
self.buffer_size += 1
def eject(self):
"""Prepares and returns the collected batch"""
# Convert batch to tensors
(obs, actions, logps, values, returns, advantages) = self.batch_to_tensor()
# Normalize advantages
if self.norm_adv:
advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-5)
return obs, actions, logps, values, returns, advantages
def batch_to_tensor(self):
"""Transforms batch to torch.Tensors"""
# Convert arrays/vectors to torch.Tensors
xV = torch.Tensor(self.obs[0]).float()
xE = torch.Tensor(self.obs[1]).float()
xO = torch.Tensor(self.obs[2]).float()
# For LSTM
# xO = [torch.Tensor(o).float() for o in self.obs[2]]
obs = [xV, xE, xO]
actions = torch.Tensor(self.actions).float()
logps = torch.Tensor(self.logps).float()
values = torch.Tensor(self.values).float()
returns = torch.Tensor(self.returns).float()
advantages = torch.Tensor(self.advantages).float()
return obs, actions, logps, values, returns, advantages
def flush(self):
"""Clears the buffer"""
self.obs = [[], [], []]
self.actions = []
self.logps = []
self.values = []
self.returns = []
self.advantages = []
self.episode_rewards = []
self.episode_lengths = []
self.buffer_size = 0
def __len__(self):
"""Returns the current batch size"""
return self.buffer_size
class PPOBuffer2(object):
"""
Replay Buffer to save samples for PPO
and diagnostic training data
"""
def __init__(self, batch_size, norm_adv=False):
# Samples
self.obs = [deque(maxlen=batch_size),
deque(maxlen=batch_size),
deque(maxlen=batch_size)]
self.actions = deque(maxlen=batch_size)
self.logps = deque(maxlen=batch_size)
self.values = deque(maxlen=batch_size)
self.returns = deque(maxlen=batch_size)
self.advantages = deque(maxlen=batch_size)
# Diagnostics
self.episode_rewards = deque(maxlen=batch_size)
self.episode_lengths = deque(maxlen=batch_size)
self.norm_adv = norm_adv
self.batch_size = batch_size
self.buffer_size = 0
def append(self, obs_t, action_t, logp_t, value_t, return_t, advantage_t):
"""Adds a sample to the buffer"""
if self.buffer_size < self.batch_size:
self.buffer_size += 1
self.obs[0].append(obs_t[0])
self.obs[1].append(obs_t[1])
self.obs[2].append(obs_t[2])
self.actions.append(action_t)
self.logps.append(logp_t)
self.values.append(value_t)
self.returns.append(return_t)
self.advantages.append(advantage_t)
def eject(self):
"""Prepares and returns the collected batch"""
# Convert batch to tensors
(obs, actions, logps, values, returns, advantages) = self.batch_to_tensor()
# Normalize advantages
if self.norm_adv:
advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-5)
self.buffer_size = 0
return obs, actions, logps, values, returns, advantages
def batch_to_tensor(self):
"""Transforms batch to torch.Tensors"""
# Convert arrays/vectors to torch.Tensors
xV = torch.Tensor(self.obs[0]).float()
xE = torch.Tensor(self.obs[1]).float()
xO = torch.Tensor(self.obs[2]).float()
# For LSTM
# xO = [torch.Tensor(o).float() for o in self.obs[2]]
obs = [xV, xE, xO]
actions = torch.Tensor(self.actions).float()
logps = torch.Tensor(self.logps).float()
values = torch.Tensor(self.values).float()
returns = torch.Tensor(self.returns).float()
advantages = torch.Tensor(self.advantages).float()
return obs, actions, logps, values, returns, advantages
def __len__(self):
"""Returns the current batch size"""
return self.buffer_size
class A3CMemory(object):
"""
Memory to save n-steps
"""
def __init__(self):
self.log_probs = []
self.entropies = []
self.values = []
self.rewards = []
def store(self, log_prob, entropy, value, reward):
self.log_probs.append(log_prob)
self.entropies.append(entropy)
self.values.append(value)
self.rewards.append(reward)
def get_history(self):
return iter(zip(self.log_probs[::-1],
self.entropies[::-1],
self.values[::-1],
self.rewards[::-1]))
def clear(self):
self.log_probs = []
self.entropies = []
self.values = []
self.rewards = []
| 31.229592 | 85 | 0.591897 | 745 | 6,121 | 4.724832 | 0.143624 | 0.04858 | 0.068182 | 0.056818 | 0.79517 | 0.744602 | 0.744602 | 0.723011 | 0.705966 | 0.705966 | 0 | 0.008892 | 0.28345 | 6,121 | 195 | 86 | 31.389744 | 0.793662 | 0.16811 | 0 | 0.739496 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12605 | false | 0 | 0.016807 | 0.008403 | 0.226891 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
86fad3422614ffdee9937ab7c4e993bbc7d3e071 | 94 | py | Python | demo/config.py | konflic/python_qa_socket | 3d268b2348b760a5d1c0d78b468dc6a8a6b6d127 | [
"MIT"
] | null | null | null | demo/config.py | konflic/python_qa_socket | 3d268b2348b760a5d1c0d78b468dc6a8a6b6d127 | [
"MIT"
] | null | null | null | demo/config.py | konflic/python_qa_socket | 3d268b2348b760a5d1c0d78b468dc6a8a6b6d127 | [
"MIT"
] | null | null | null | import random
LOCALHOST = "127.0.0.1"
def random_port(): return random.randint(20000, 30000)
| 18.8 | 54 | 0.744681 | 15 | 94 | 4.6 | 0.8 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.192771 | 0.117021 | 94 | 4 | 55 | 23.5 | 0.638554 | 0 | 0 | 0 | 0 | 0 | 0.095745 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.333333 | false | 0 | 0.333333 | 0.333333 | 0.666667 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | 1 | 0 | 0 | 6 |
86fdaaf9968bbcd247d7a5d545dafbd87e3cb45c | 866 | py | Python | test.py | DableUTeeF/pytorch_api | 18aca4e2a4abe80185a3fff418e455cf14a86a00 | [
"MIT"
] | null | null | null | test.py | DableUTeeF/pytorch_api | 18aca4e2a4abe80185a3fff418e455cf14a86a00 | [
"MIT"
] | null | null | null | test.py | DableUTeeF/pytorch_api | 18aca4e2a4abe80185a3fff418e455cf14a86a00 | [
"MIT"
] | null | null | null | import time
class test:
def test(self):
pass
"""
4.105237722396851
4.414266586303711
3.648768186569214
3.707662582397461
3.6752700805664062
4.047108888626099
"""
starttime = time.time()
batch_time = 0
for i in range(1000000):
# time.sleep(1)
try:
batch_time = time.time()
except KeyboardInterrupt:
break
print(time.time() - starttime)
starttime = time.time()
batch_time = 0
for i in range(1000000):
# time.sleep(1)
if 1:
pass
if 1:
pass
try:
batch_time = time.time()
except KeyboardInterrupt:
break
print(time.time() - starttime)
starttime = time.time()
batch_time = 0
x = test()
for i in range(1000000):
# time.sleep(1)
x.test()
x.test()
try:
batch_time = time.time()
except KeyboardInterrupt:
break
print(time.time() - starttime)
| 16.339623 | 32 | 0.624711 | 110 | 866 | 4.863636 | 0.290909 | 0.179439 | 0.095327 | 0.123364 | 0.700935 | 0.700935 | 0.700935 | 0.700935 | 0.648598 | 0.648598 | 0 | 0.196875 | 0.26097 | 866 | 52 | 33 | 16.653846 | 0.639063 | 0.047344 | 0 | 0.885714 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028571 | false | 0.085714 | 0.028571 | 0 | 0.085714 | 0.085714 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 6 |
81235f09a6534616656404b8f8a6e547d78766fd | 224 | py | Python | jetbrains-academy/Tic-Tac-Toe/Problems/Visual poetry/task.py | robinpatra/ML-Study-3 | 6f401706a8da4cac5e63304ce09ff6ff62756d0b | [
"MIT"
] | null | null | null | jetbrains-academy/Tic-Tac-Toe/Problems/Visual poetry/task.py | robinpatra/ML-Study-3 | 6f401706a8da4cac5e63304ce09ff6ff62756d0b | [
"MIT"
] | null | null | null | jetbrains-academy/Tic-Tac-Toe/Problems/Visual poetry/task.py | robinpatra/ML-Study-3 | 6f401706a8da4cac5e63304ce09ff6ff62756d0b | [
"MIT"
] | null | null | null | print(" * * * ")
print(" * * ")
print(" * Which * ")
print(" * came first: * ")
print("* the chicken *")
print(" * or the * ")
print(" * egg? * ")
print(" * * * ")
| 24.888889 | 27 | 0.321429 | 16 | 224 | 4.5 | 0.5 | 0.277778 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.446429 | 224 | 8 | 28 | 28 | 0.580645 | 0 | 0 | 0.25 | 0 | 0 | 0.642857 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 6 |
d4d143098925aa5ecd274f354e6e578ab95d3166 | 199 | py | Python | flash/vision/__init__.py | ibraheemmmoosa/lightning-flash | c60fef81b27174543d7ad3a4d841faf71ad8536c | [
"Apache-2.0"
] | null | null | null | flash/vision/__init__.py | ibraheemmmoosa/lightning-flash | c60fef81b27174543d7ad3a4d841faf71ad8536c | [
"Apache-2.0"
] | null | null | null | flash/vision/__init__.py | ibraheemmmoosa/lightning-flash | c60fef81b27174543d7ad3a4d841faf71ad8536c | [
"Apache-2.0"
] | null | null | null | from flash.vision.classification import ImageClassificationData, ImageClassifier
from flash.vision.detection import ImageDetectionData, ImageDetector
from flash.vision.embedding import ImageEmbedder
| 49.75 | 80 | 0.889447 | 20 | 199 | 8.85 | 0.6 | 0.152542 | 0.254237 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.070352 | 199 | 3 | 81 | 66.333333 | 0.956757 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
d4df3420ff16c472a849aa5c8347877d6e9e38a0 | 128 | py | Python | apps/tournaments/admin.py | kevotovar/topdeck-arena | e09753a29837847bdc239cd98a1942711c953bbe | [
"MIT"
] | null | null | null | apps/tournaments/admin.py | kevotovar/topdeck-arena | e09753a29837847bdc239cd98a1942711c953bbe | [
"MIT"
] | 24 | 2018-08-16T03:17:08.000Z | 2021-06-10T20:43:13.000Z | apps/tournaments/admin.py | kevotovar/topdeck-arena | e09753a29837847bdc239cd98a1942711c953bbe | [
"MIT"
] | null | null | null | from django.contrib import admin
from . import models
admin.register(models.Tournament)
admin.register(models.TournamentEntry)
| 21.333333 | 38 | 0.835938 | 16 | 128 | 6.6875 | 0.5625 | 0.242991 | 0.35514 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.085938 | 128 | 5 | 39 | 25.6 | 0.91453 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.5 | 0 | 0.5 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
d4e5788cb16adcf03bbdad1ff59d9e5ef82f4833 | 172 | py | Python | app.py | shivaneeshinde/nsetools | ce70d42e568e2b90932235c4fb4b46a2a2c35dc9 | [
"MIT"
] | null | null | null | app.py | shivaneeshinde/nsetools | ce70d42e568e2b90932235c4fb4b46a2a2c35dc9 | [
"MIT"
] | null | null | null | app.py | shivaneeshinde/nsetools | ce70d42e568e2b90932235c4fb4b46a2a2c35dc9 | [
"MIT"
] | null | null | null | from googlefinance import getQuotes
from googlefinance import getNews
import json
print json.dumps(getQuotes('hdfc'), indent=2)
#print json.dumps(getNews('hdfc'), indent=2) | 34.4 | 45 | 0.802326 | 24 | 172 | 5.75 | 0.458333 | 0.246377 | 0.333333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012739 | 0.087209 | 172 | 5 | 46 | 34.4 | 0.866242 | 0.25 | 0 | 0 | 0 | 0 | 0.031008 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.75 | null | null | 0.25 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
d4e7a8a3ed5bb02533e17826443f7efc7333110b | 44 | py | Python | __init__.py | bonifield/RequestInjector | ec05331e5e7105c3d2a3fcc6629f587c1882d300 | [
"MIT"
] | 2 | 2021-09-30T11:20:44.000Z | 2022-02-22T03:00:51.000Z | __init__.py | bonifield/RequestInjector | ec05331e5e7105c3d2a3fcc6629f587c1882d300 | [
"MIT"
] | null | null | null | __init__.py | bonifield/RequestInjector | ec05331e5e7105c3d2a3fcc6629f587c1882d300 | [
"MIT"
] | null | null | null | from requestinjector import RequestInjector
| 22 | 43 | 0.909091 | 4 | 44 | 10 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.090909 | 44 | 1 | 44 | 44 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
076f1e9c2a40ce6228753abda52b5debfd14e672 | 2,276 | py | Python | tests/cupy_tests/sorting_tests/test_count.py | andersk/cupy | c73a325dd034ee9abfac2c4af11aa9107ec89042 | [
"MIT"
] | 2 | 2020-02-28T09:27:58.000Z | 2020-10-12T07:10:24.000Z | tests/cupy_tests/sorting_tests/test_count.py | andersk/cupy | c73a325dd034ee9abfac2c4af11aa9107ec89042 | [
"MIT"
] | 1 | 2019-08-05T09:36:13.000Z | 2019-08-06T12:03:01.000Z | tests/cupy_tests/sorting_tests/test_count.py | andersk/cupy | c73a325dd034ee9abfac2c4af11aa9107ec89042 | [
"MIT"
] | 1 | 2022-03-24T13:19:55.000Z | 2022-03-24T13:19:55.000Z | import unittest
import numpy
import six
import cupy
from cupy import testing
@testing.gpu
class TestCount(unittest.TestCase):
@testing.for_all_dtypes()
def test_count_nonzero(self, dtype):
def func(xp):
m = testing.shaped_random((2, 3), xp, xp.bool_)
a = testing.shaped_random((2, 3), xp, dtype) * m
c = xp.count_nonzero(a)
if xp is cupy:
# CuPy returns zero-dimensional array instead of
# returning a scalar value
self.assertIsInstance(c, xp.ndarray)
self.assertEqual(c.dtype, 'l')
self.assertEqual(c.shape, ())
return int(c)
self.assertEqual(func(numpy), func(cupy))
@testing.for_all_dtypes()
def test_count_nonzero_zero_dim(self, dtype):
def func(xp):
a = xp.array(1.0, dtype=dtype)
c = xp.count_nonzero(a)
if xp is cupy:
# CuPy returns zero-dimensional array instead of
# returning a scalar value
self.assertIsInstance(c, xp.ndarray)
self.assertEqual(c.dtype, 'l')
self.assertEqual(c.shape, ())
return int(c)
self.assertEqual(func(numpy), func(cupy))
@testing.with_requires('numpy>=1.12')
@testing.for_all_dtypes()
def test_count_nonzero_int_axis(self, dtype):
for ax in six.moves.range(3):
def func(xp):
m = testing.shaped_random((2, 3, 4), xp, xp.bool_)
a = testing.shaped_random((2, 3, 4), xp, dtype) * m
return xp.count_nonzero(a, axis=ax)
testing.assert_allclose(func(numpy), func(cupy))
@testing.with_requires('numpy>=1.12')
@testing.for_all_dtypes()
def test_count_nonzero_tuple_axis(self, dtype):
for ax in six.moves.range(3):
for ay in six.moves.range(3):
if ax == ay:
continue
def func(xp):
m = testing.shaped_random((2, 3, 4), xp, xp.bool_)
a = testing.shaped_random((2, 3, 4), xp, dtype) * m
return xp.count_nonzero(a, axis=(ax, ay))
testing.assert_allclose(func(numpy), func(cupy))
| 35.015385 | 71 | 0.554482 | 293 | 2,276 | 4.174061 | 0.221843 | 0.078496 | 0.093213 | 0.098119 | 0.863451 | 0.828291 | 0.826656 | 0.778414 | 0.721995 | 0.672118 | 0 | 0.01774 | 0.331283 | 2,276 | 64 | 72 | 35.5625 | 0.785808 | 0.06283 | 0 | 0.627451 | 0 | 0 | 0.011278 | 0 | 0 | 0 | 0 | 0 | 0.196078 | 1 | 0.156863 | false | 0 | 0.098039 | 0 | 0.352941 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
07b44a54bca51f29598d3f41a1cf80cc6ecd2c5d | 62 | py | Python | rA9/neurons/__init__.py | junhoyeo/rA9 | 6ab5537880f842b36ae666f0ef5645acc62c236e | [
"MIT"
] | 2 | 2020-10-09T00:36:06.000Z | 2020-10-20T06:20:19.000Z | rA9/neurons/__init__.py | junhoyeo/rA9 | 6ab5537880f842b36ae666f0ef5645acc62c236e | [
"MIT"
] | null | null | null | rA9/neurons/__init__.py | junhoyeo/rA9 | 6ab5537880f842b36ae666f0ef5645acc62c236e | [
"MIT"
] | 1 | 2020-10-09T00:36:08.000Z | 2020-10-09T00:36:08.000Z | from .Input import *
from .LIF import *
from .Output import *
| 15.5 | 21 | 0.709677 | 9 | 62 | 4.888889 | 0.555556 | 0.454545 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.193548 | 62 | 3 | 22 | 20.666667 | 0.88 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
ed533b550ce2d3b9ebc40a1505938f8b629bf920 | 1,109 | py | Python | oslib/keypair/__init__.py | fbacchella/oscmd | 7e60f7b761a14f519b971d0cc760c949adb6fa9e | [
"Apache-2.0"
] | null | null | null | oslib/keypair/__init__.py | fbacchella/oscmd | 7e60f7b761a14f519b971d0cc760c949adb6fa9e | [
"Apache-2.0"
] | null | null | null | oslib/keypair/__init__.py | fbacchella/oscmd | 7e60f7b761a14f519b971d0cc760c949adb6fa9e | [
"Apache-2.0"
] | null | null | null | from oslib.command import Command
class_ref = []
class Save(Command):
object = 'keypair'
verb = 'save'
def fill_parser(self,parser):
parser.add_option("-n", "--name", dest="name", help="name", default=None)
parser.add_option("-o", "--output-directory", dest="output", help="output directory", default="~/.ssh/")
def execute(self, *args, **kwargs):
keypair = self.ctxt.cnx_ec2.get_key_pair(kwargs['name'])
keypair.save(kwargs['output'])
def validate(self, options):
return True
class_ref.append(Save)
class Create(Command):
object = 'keypair'
verb = 'create'
def fill_parser(self,parser):
parser.add_option("-n", "--name", dest="name", help="name", default=None)
parser.add_option("-o", "--output-directory", dest="output", help="output directory", default="~/.ssh/")
def execute(self, *args, **kwargs):
keypair = self.ctxt.cnx_ec2.create_key_pair(kwargs['name'])
keypair.save(kwargs['output'])
def validate(self, options):
return True
class_ref.append(Create)
import dump
| 29.972973 | 112 | 0.632101 | 139 | 1,109 | 4.935252 | 0.302158 | 0.052478 | 0.087464 | 0.069971 | 0.790087 | 0.790087 | 0.790087 | 0.790087 | 0.790087 | 0.790087 | 0 | 0.00224 | 0.19477 | 1,109 | 36 | 113 | 30.805556 | 0.765957 | 0 | 0 | 0.592593 | 0 | 0 | 0.156898 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.222222 | false | 0 | 0.074074 | 0.074074 | 0.592593 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
ed71cfb4e98168cddf744bfc51ca91e05628dc52 | 7,079 | bzl | Python | bazel/revisions.bzl | gapcguy/emsdk | 1c420fd6e5a6b8d330aa9ebc04c875a0534e31ae | [
"MIT"
] | 1,423 | 2019-01-21T23:13:11.000Z | 2022-03-31T18:12:54.000Z | bazel/revisions.bzl | gapcguy/emsdk | 1c420fd6e5a6b8d330aa9ebc04c875a0534e31ae | [
"MIT"
] | 590 | 2019-01-21T17:11:14.000Z | 2022-03-31T08:10:30.000Z | bazel/revisions.bzl | gapcguy/emsdk | 1c420fd6e5a6b8d330aa9ebc04c875a0534e31ae | [
"MIT"
] | 364 | 2019-01-26T14:00:58.000Z | 2022-03-31T09:39:39.000Z | # This file is automatically updated by emsdk/scripts/update_bazel_workspace.sh
# DO NOT MODIFY
EMSCRIPTEN_TAGS = {
"2.0.32": struct(
hash = "74646397e3c5010824ad60d1de86c6bcbe334dff",
sha_linux = "236b3954e71d3bb30d347c655b9f47f2a091aa2e61046e1912c8da90152f4ca1",
sha_mac = "6a03267574534948e3b041e5d3e31bd757751ef17912eb6e90b96a47da03afb6",
sha_win = "2f8fbf0db097d67d0c364946faceec27c569c5c2d7b22068eef8db55645aba36",
),
"2.0.31": struct(
hash = "597724ca3f6cd6e84bea73f1f519a3953b5c273d",
sha_linux = "ef70c7733aa0df41cb4c812f5a89bf6b2ed13ca8aa252872396c0be271156d9e",
sha_mac = "77e57c3e98758488ef676f8f58a85faa0bd65a1d326a91771ad83d7cb0e373ca",
sha_win = "541605b740afccd08a39f5ae815978f699f350d621a1b2dfba0763970b56aee4",
),
"2.0.30": struct(
hash = "c69458f1bbf3ef5b8da4e934de210659cc9bca04",
sha_linux = "ee1c8270096a728966ae38af548047d1f64c18318e06ba75952e657136f02537",
sha_mac = "574a5819308eba6c8be6a780e26dff415a0e7178d3f44162dd8dca87eb40d4a7",
sha_win = "242d244f4f5f5af08e6e6ac9c143aebf1b7bb2a23fd2992350731e59acfee07c",
),
"2.0.29": struct(
hash = "c2369dc425725fff86ba90a9007a4603ddf7941b",
sha_linux = "7df4a8f3e25820becadfa7f1fe0d78e764102ec3ee50c474ca1634ed90d48890",
sha_mac = "d998521ba95882a27792f0113ea2c972fbb891c240649f4c994f0260c0e1a213",
sha_win = "c64aa3f2af6503f6711b2322986a45784e00d7c7fe13ec3f5c4f740472d065a0",
),
"2.0.28": struct(
hash = "866055ea639d64dfedc625d28ec981e47ce37168",
sha_linux = "7dca7704eb14e367bb67e9abc9eaf59e75f59b74e32422e04556de10897a9a86",
sha_mac = "370f76493e3805e2538290b698a381f04b6d78a77771e48fc0099cf89dad985f",
sha_win = "e913c50ea5f196d36971f7cf5b1cf9a9ca27ce0818aba56be3a66e31e95c0e5b",
),
"2.0.27": struct(
hash = "1ac46e3b84955231ab4a4f4cbe0c7ac28c86b8cc",
sha_linux = "3e124e278de168cf22e03b93b2f14a65a86777e428cdaab7e5e1c2289eb41605",
sha_mac = "388262b9e1042ef9a3a1945d5a23dcd634c8042a225e8fdf80bcc2c1cb7e05cc",
sha_win = "762276a332432e717afb988310d21ae10e36facc1e05bfd77042a364fb43cc3c",
),
"2.0.26": struct(
hash = "823d37b15d1ab61bc9ac0665ceef6951d3703842",
sha_linux = "996e16d368a99dd4dd12126acbcb8bea9a607b5257cc7b747c4afc2f036fd8cf",
sha_mac = "8b2d7e84cc449531e88034beb31da89a0b61ccaeaa1584ffb6da7842c6348fdc",
sha_win = "095e772764d7f8c0f8228bda4b8500ae43aac2303567da5cdc9f8623f70a5743",
),
"2.0.25": struct(
hash = "f6f001b08fbb67935379cf13d17fd9bfdbaff791",
sha_linux = "06d8e2f3d4f4b35a57de9c15e62a559c941cfba1dd7ec02353d815904d912c3b",
sha_mac = "6541bf3a648aae7df84de424ff392dd1513ab5450203c84f72a6a03e321a301b",
sha_win = "267fbfa809ec0eb911c1962b1b9768675cb82228e694a5f9ef570232ee71db76",
),
"2.0.24": struct(
hash = "6ab7fc5622a67e6111d07c4ba61c8d3c8fc33ed2",
sha_linux = "e5daa0e87f3afd2197e7975297cb0cd4c245edccb964ca5f1f32ee7d985bf440",
sha_mac = "e4b7f2a7b71d6ac4610ee7b14743570e0dfba3668dc6b4f984cbe7a135888527",
sha_win = "db2aad422a3ca2295be6101b0151eeee55dcea29ba1f31b4594c02ba46591cbe",
),
"2.0.23": struct(
hash = "77b065ace39e6ab21446e13f92897f956c80476a",
sha_linux = "7713a9a5572d839aea9eaa84a7c4779d11c6c8818ee64a0f443b62081fae6d47",
sha_mac = "b793087462d581e25c8c267fca9d30519619e3272480862a56cc316a32c7afab",
sha_win = "b8885cbb41a39e4734861462e05ee58c7ff7562016a842bcee2603f229940e8b",
),
"2.0.22": struct(
hash = "6465a9acb820207acf7da44661a7de52d0a1ae3c",
sha_linux = "c079781124e763c53c9fc73781fcee40296ce3314276836bc694f07bd331a859",
sha_mac = "ab95574dfc685b0300e37bea36aba413045bbfa2ab06b93eceb881670489eec1",
sha_win = "ba142e7e380596cba763e3a414de6511bcb86de48e4b48cf393b1ea449a24aaa",
),
"2.0.21": struct(
hash = "72f4ec97fbc7ec16c15ae68a75b0a257b2835160",
sha_linux = "741264f33f96ba4b785ed0b133861ebdfefbaefab76ddcfe7bde6522829d6f70",
sha_mac = "b07c0d65ee7e2799170c6f3b2aacebfe070c2e4975088bcd1b3a4140fecd8418",
sha_win = "dc3cbf47aa4be52a92526f1790a013734ecbd407f7f36286ed0283c96355999a",
),
"2.0.20": struct(
hash = "e0c15cd14170f407a9eb27fcbad22931dc67feb7",
sha_linux = "a196504fd1095836ca3961208338ff9e292be7729ea529bc19800aa7c966d34a",
sha_mac = "6cdbf17ed61486b38ea79d3f31d74483e7388d1e7468518dccba3f24e0ddd4c4",
sha_win = "4d22a32c219dbe18c55b635d014b9eaf7da60536171b7af37d9a8099fd33794b",
),
"2.0.19": struct(
hash = "9b9ff2dabfb4a7fbacbc004c0bead12a60f9d05c",
sha_linux = "bd7c2a38ac88d219a1ab5003ddbf8fdc66a6ba55bc69f99077346edf2753b4ea",
sha_mac = "6cc44029c9052855a55938eb6496b5659da4b1ce9cb34502b740af5993a94f93",
sha_win = "a1fa8b1c387b9307f9b87c43dc83c0ff1bc04b9f29fbe4f39aff2dd946ca4b70",
),
"2.0.18": struct(
hash = "c2ac7520fad29a7937ed60ab6a95b08eb374c7ba",
sha_linux = "e9f777de592f606b10104b2efe5179a7a8f44e3a9dffa1e3aaf73e05eb8893d7",
sha_mac = "86b1dd62e424e3788bf132292a694a25ca9b0875d06f50d0f5d424593697452c",
sha_win = "49ce07bda6be070251db44a08fcc05cae21ffdbd7522423a0c79bde635e87e28",
),
"2.0.17": struct(
hash = "f5c45e60392b82f603e3a8039c62db294fab02d2",
sha_linux = "b40a4874057e4cace600f8ee9787dcbe236e3dc5b2fff5c2ecb0e867e426f99c",
sha_mac = "081f61abf7d5ac0ec31aaffc5550013d4093ea4ea39520b7a32b7448d2a6ee70",
sha_win = "45d06e597e6a1185a76200bd0481495e7298800a4805045d9cdbcce6311c91b2",
),
"2.0.16": struct(
hash = "80d9674f2fafa6b9346d735c42d5c52b8cc8aa8e",
sha_linux = "e527638b224d9a30dc7e5fa4b9bd2eb2ab76ad306739ba8cacf5a5e333933a2a",
sha_mac = "061020eb0e3ee0611dc5a0008ccc7778168a4f838d49ca41c0aad8c52c1a01c9",
sha_win = "99364ed0388f928e0594f790662bf3a30c2894b0eff81797e1b64f62128561cb",
),
"2.0.15": struct(
hash = "89202930a98fe7f9ed59b574469a9471b0bda7dd",
sha_linux = "7ff49fc63adf29970f6e7af1df445d7f554bdbbb2606db1cb5d3567ce69df1db",
sha_mac = "e35cced1514ad0da40584f8dd6f76aabf847ce0fa82c6dc8dd9442fb74ed6d0d",
sha_win = "31d5f8107c87833cea57edc57613bba4b36b16152772f744c5ad204594b4e666",
),
"2.0.14": struct(
hash = "fc5562126762ab26c4757147a3b4c24e85a7289e",
sha_linux = "e466cd47ddd4bf0acd645412fdf08eda6d232484e48e5a2643e08062a7a4cf56",
sha_mac = "1c554c08459b7025638ca4eddba0d35babe8c26b202a70a74e9442d577896211",
sha_win = "428bc6094671937af96f26d803871fc5cd83d4d2b1c1df45fa6873a9bc5cac51",
),
"2.0.13": struct(
hash = "ce0e4a4d1cab395ee5082a60ebb4f3891a94b256",
sha_linux = "8986ed886e111c661099c5147126b8a379a4040aab6a1f572fe01f0f9b99a343",
sha_mac = "88c91332c8c76fed14ebf0edc9a08f586012f54f04ad61e5b1b6d02bf96bdeab",
sha_win = "9fb3b945b7bd56e34d17ec04de4cce475f26c49d161aee9d9c0b8b1434591f88",
),
}
| 56.18254 | 87 | 0.773273 | 317 | 7,079 | 17.069401 | 0.394322 | 0.007392 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.494297 | 0.157791 | 7,079 | 125 | 88 | 56.632 | 0.413284 | 0.012855 | 0 | 0.163934 | 0 | 0 | 0.68146 | 0.664281 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
9c2d11454f7ca162864d3d49b5240617f40c9f87 | 36,213 | py | Python | backend/tests/utils/blockchain_client/test_eth_client_eth.py | ravirahman/sancus | 6563852b98edeb1068574e2d99e1fc18b815bee3 | [
"MIT"
] | 2 | 2022-03-17T04:50:20.000Z | 2022-03-17T04:51:31.000Z | backend/tests/utils/blockchain_client/test_eth_client_eth.py | ravirahman/sancus | 6563852b98edeb1068574e2d99e1fc18b815bee3 | [
"MIT"
] | null | null | null | backend/tests/utils/blockchain_client/test_eth_client_eth.py | ravirahman/sancus | 6563852b98edeb1068574e2d99e1fc18b815bee3 | [
"MIT"
] | null | null | null | import unittest
from decimal import Decimal
from typing import cast
from unittest.mock import patch
import grpc
import petlib.bn
import web3
from common.constants import ADMIN_UUID, Blockchain, Currency
from eth_account.account import Account as ETHAccount
from protobufs.account_pb2 import AccountType
from protobufs.eth_pb2 import EthereumTxParams
from protobufs.institution.account_pb2 import (
KeyType,
TransactionStatus,
TransactionType,
)
from sqlalchemy.orm.exc import NoResultFound
from web3.types import TxReceipt
from backend.backend import Backend
from backend.sql.account import Account
from backend.sql.blockchain_address_key import BlockchainAddressKey
from backend.sql.blockchain_transaction import BlockchainTransaction
from backend.sql.blockchain_withdrawal import BlockchainWithdrawal
from backend.sql.key import Key
from backend.sql.key_account_commitment import KeyAccountCommitment
from backend.sql.key_currency_account import KeyCurrencyAccount
from backend.sql.key_currency_block import KeyCurrencyBlock
from backend.sql.transaction import Transaction
from backend.utils.blockchain_client.eth import ETHClient
from tests.base import BaseBackendTestCase
from tests.fixtures import (
ETH1_AMOUNT,
ETH2_AMOUNT,
MAIN_ETH_ACCOUNT,
MOCK_USER_UUID,
EthFixturesContainer,
wait_for_eth_block,
)
GAS_PRICE_WEI = 17
def mock_get_eth_gas_price(self: ETHClient) -> int: # pylint: disable=unused-argument
return GAS_PRICE_WEI
@patch.object(ETHClient, "_get_gas_price", mock_get_eth_gas_price)
class TestETHClientETH(BaseBackendTestCase):
backend: Backend
w3: web3.Web3
channel: grpc.Channel
start_block: int
fixture_container: EthFixturesContainer
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
cls.w3 = cls.backend.eth_client._w3 # pylint: disable=protected-access
start_block = cls.backend.eth_client.start_block_number
cls.start_block = start_block
num_tests = len(list(filter(lambda x: x.startswith("test_"), dir(cls))))
cls.fixture_container = EthFixturesContainer(cls.backend.eth_client, num_tests)
def setUp(self) -> None:
super().setUp()
self.eth_fixture = self.fixture_container()
with self.backend.sessionmaker() as session:
# add an ethereum account
eth_account = Account(
user_uuid=MOCK_USER_UUID,
currency=Currency.ETH,
account_type=AccountType.DEPOSIT_ACCOUNT,
)
session.add(eth_account)
session.commit()
self.eth_account_uuid = eth_account.uuid
private_key_bn = petlib.bn.Bn.from_binary(self.eth_fixture.private_key)
# track the keys
self.key_uuid = self.backend.key_client.import_hot_key(
private_key_bn,
self.w3.eth.get_transaction_count(self.eth_fixture.address),
)
with self.backend.sessionmaker() as session:
key = session.query(Key).filter(Key.key_uuid == self.key_uuid).one()
self.assertEqual(key.get_address(Blockchain.ETH), self.eth_fixture.address)
# assign the keys
self.backend.key_client.assign_key_for_deposits_to_account(
key_uuid=self.key_uuid, account_uuid=self.eth_account_uuid
)
# process the blocks
for block_number in range(self.start_block, self.eth_fixture.eth2_tx_receipt.blockNumber + 1):
self.backend.blockchain_client.process_block(Blockchain.ETH, block_number)
self.backend.blockchain_client.process_block(Blockchain.ETH, block_number)
def test_deposits(self) -> None:
# as of right now, tx1 should be pending or confirmed, and tx2 should be pending
tx1_confirmation_block_number = (
self.eth_fixture.eth1_tx_receipt.blockNumber + self.backend.eth_client.num_confirmations - 1
)
wait_for_eth_block(self.backend.eth_client, tx1_confirmation_block_number)
if self.eth_fixture.eth2_tx_receipt.blockNumber + 1 < tx1_confirmation_block_number:
for block_number in range(self.eth_fixture.eth2_tx_receipt.blockNumber + 1, tx1_confirmation_block_number):
self.backend.blockchain_client.process_block(Blockchain.ETH, block_number)
self.backend.blockchain_client.process_block(Blockchain.ETH, block_number)
# tx1 should be confirmed, and tx2 should be pending.
with self.backend.sessionmaker() as session:
account = session.query(Account).filter(Account.uuid == self.eth_account_uuid).one()
self.assertEqual(account.available_amount, ETH1_AMOUNT)
self.assertEqual(account.pending_amount, ETH2_AMOUNT)
blockchain_transaction_1, transaction_1 = (
session.query(BlockchainTransaction, Transaction)
.filter(
Transaction.account_uuid == self.eth_account_uuid,
Transaction.status == TransactionStatus.COMPLETED,
BlockchainTransaction.transaction_uuid == Transaction.uuid,
)
.one()
)
self.assertEqual(transaction_1.amount, ETH1_AMOUNT)
self.assertEqual(transaction_1.transaction_type, TransactionType.DEPOSIT)
self.assertEqual(blockchain_transaction_1.block_number, self.eth_fixture.eth1_tx_receipt.blockNumber)
blockchain_transaction_2, transaction_2 = (
session.query(BlockchainTransaction, Transaction)
.filter(
Transaction.account_uuid == self.eth_account_uuid,
Transaction.status == TransactionStatus.PENDING,
BlockchainTransaction.transaction_uuid == Transaction.uuid,
)
.one()
)
self.assertEqual(transaction_2.amount, ETH2_AMOUNT)
self.assertEqual(transaction_2.transaction_type, TransactionType.DEPOSIT)
self.assertEqual(blockchain_transaction_2.block_number, self.eth_fixture.eth2_tx_receipt.blockNumber)
tx2_confirmation_block_number = (
self.eth_fixture.eth2_tx_receipt.blockNumber + self.backend.eth_client.num_confirmations - 1
) # pylint: disable=protected-access
wait_for_eth_block(self.backend.eth_client, tx2_confirmation_block_number)
for block_number in range(self.eth_fixture.eth2_tx_receipt.blockNumber + 1, tx2_confirmation_block_number + 1):
self.backend.blockchain_client.process_block(Blockchain.ETH, block_number)
self.backend.blockchain_client.process_block(Blockchain.ETH, block_number)
with self.backend.sessionmaker() as session:
account = session.query(Account).filter(Account.uuid == self.eth_account_uuid).one()
self.assertEqual(account.available_amount, ETH1_AMOUNT + ETH2_AMOUNT)
self.assertEqual(account.pending_amount, Decimal(0))
self.assertEqual(
session.query(Transaction)
.filter(
Transaction.account_uuid == self.eth_account_uuid,
Transaction.status == TransactionStatus.COMPLETED,
)
.count(),
2,
)
def test_get_available_and_pending_eth_balance(self) -> None:
with self.backend.sessionmaker() as session:
eth_account = session.query(Account).filter(Account.uuid == self.eth_account_uuid).one()
amount = eth_account.pending_amount + eth_account.available_amount
self.assertEqual(amount, ETH1_AMOUNT + ETH2_AMOUNT)
def test_get_cumulative_deposits(self) -> None:
self.assertEqual(
self.backend.eth_client.get_cumulative_deposits(
self.key_uuid,
Currency.ETH,
from_block_number=self.start_block + 1,
to_block_number=self.eth_fixture.eth1_tx_receipt.blockNumber - 1,
),
Decimal(0),
)
for block_number in range(
self.eth_fixture.eth1_tx_receipt.blockNumber, self.eth_fixture.eth2_tx_receipt.blockNumber
):
self.assertEqual(
self.backend.eth_client.get_cumulative_deposits(
self.key_uuid, Currency.ETH, from_block_number=self.start_block + 1, to_block_number=block_number
),
ETH1_AMOUNT,
)
self.assertEqual(
self.backend.eth_client.get_cumulative_deposits(
self.key_uuid,
Currency.ETH,
from_block_number=self.start_block + 1,
to_block_number=self.eth_fixture.eth2_tx_receipt.blockNumber,
),
ETH1_AMOUNT + ETH2_AMOUNT,
)
def test_key_approximate_bal(self) -> None:
with self.backend.sessionmaker() as session:
eth_key_currency = (
session.query(KeyCurrencyAccount)
.filter(
KeyCurrencyAccount.key_uuid == self.key_uuid,
KeyCurrencyAccount.currency == Currency.ETH,
)
.one()
)
expected_bal = ETH1_AMOUNT + ETH2_AMOUNT
self.assertEqual(eth_key_currency.available_balance, expected_bal)
def test_create_pending_transaction(self) -> None:
amount = self.backend.eth_client.wei_to_eth(1)
with self.backend.sessionmaker() as session:
pending_tx_id, pending_tx_params_any_pb = self.backend.eth_client.create_pending_transaction(
session,
amount=amount,
currency=Currency.ETH,
destination_address=MAIN_ETH_ACCOUNT,
key_type=KeyType.HOT,
should_dest_be_admin=False,
)
session.commit()
with self.backend.sessionmaker() as session:
pending_tx = (
session.query(BlockchainWithdrawal)
.filter(
BlockchainWithdrawal.uuid == pending_tx_id,
)
.one()
)
pending_tx_pb = EthereumTxParams()
self.assertTrue(pending_tx_params_any_pb.Unpack(pending_tx_pb))
self.assertEqual(pending_tx.tx_params, pending_tx_params_any_pb)
self.assertEqual(pending_tx.blockchain, Blockchain.ETH)
estimated_tx_fee = self.backend.eth_client.wei_to_eth(pending_tx_pb.gas * pending_tx_pb.gasPrice)
self.assertEqual(
estimated_tx_fee,
self.backend.eth_client.wei_to_eth(GAS_PRICE_WEI * 21_000),
)
self.assertIsNone(pending_tx.signed_tx)
self.assertIsNone(pending_tx.txn_hash)
self.assertIsNone(pending_tx.last_broadcast_at)
self.assertIsNone(pending_tx.block_number)
eth_key_currency_account = (
session.query(KeyCurrencyAccount)
.filter(
KeyCurrencyAccount.key_uuid == self.key_uuid,
KeyCurrencyAccount.currency == Currency.ETH,
)
.one()
)
self.assertEqual(
eth_key_currency_account.available_balance,
ETH1_AMOUNT + ETH2_AMOUNT - amount - estimated_tx_fee,
)
def test_create_pending_transaction_admin(self) -> None:
amount = self.backend.eth_client.wei_to_eth(1)
admin_key_uuid = self.backend.key_client.make_new_hot_key()
with self.backend.sessionmaker() as session:
admin_key = session.query(Key).filter(Key.key_uuid == admin_key_uuid).one()
destination_address = admin_key.get_address(Blockchain.ETH)
pending_tx_id, pending_tx_params_any_pb = self.backend.eth_client.create_pending_transaction(
session,
amount=amount,
currency=Currency.ETH,
destination_address=destination_address,
key_type=KeyType.HOT,
should_dest_be_admin=True,
)
session.commit()
with self.backend.sessionmaker() as session:
pending_tx = (
session.query(BlockchainWithdrawal)
.filter(
BlockchainWithdrawal.uuid == pending_tx_id,
)
.one()
)
pending_tx_pb = EthereumTxParams()
self.assertTrue(pending_tx_params_any_pb.Unpack(pending_tx_pb))
self.assertEqual(pending_tx.tx_params, pending_tx_params_any_pb)
self.assertEqual(pending_tx.blockchain, Blockchain.ETH)
estimated_tx_fee = self.backend.eth_client.wei_to_eth(pending_tx_pb.gas * pending_tx_pb.gasPrice)
self.assertEqual(
estimated_tx_fee,
Decimal(GAS_PRICE_WEI * 21_000) / Decimal(10 ** 18),
)
self.assertIsNone(pending_tx.signed_tx)
self.assertIsNone(pending_tx.txn_hash)
self.assertIsNone(pending_tx.last_broadcast_at)
self.assertIsNone(pending_tx.block_number)
eth_key_currency_account = (
session.query(KeyCurrencyAccount)
.filter(
KeyCurrencyAccount.key_uuid == self.key_uuid,
KeyCurrencyAccount.currency == Currency.ETH,
)
.one()
)
self.assertEqual(
eth_key_currency_account.available_balance,
ETH1_AMOUNT + ETH2_AMOUNT - amount - estimated_tx_fee,
)
dest_key_currency_account = (
session.query(KeyCurrencyAccount)
.filter(
KeyCurrencyAccount.key_uuid == admin_key_uuid,
KeyCurrencyAccount.currency == Currency.ETH,
)
.one()
)
self.assertEqual(dest_key_currency_account.pending_admin_deposits, 1)
def test_create_pending_transaction_admin_fails(self) -> None:
amount = self.backend.eth_client.wei_to_eth(1)
new_key_uuid = self.backend.key_client.make_new_hot_key()
with self.backend.sessionmaker() as session:
new_key = session.query(Key).filter(Key.key_uuid == new_key_uuid).one()
destination_address = new_key.get_address(Blockchain.ETH)
self.backend.key_client.assign_key_for_deposits_to_account(
key_uuid=new_key_uuid, account_uuid=self.eth_account_uuid
)
with self.assertRaises(RuntimeError):
with self.backend.sessionmaker() as session:
self.backend.eth_client.create_pending_transaction(
session,
amount=amount,
currency=Currency.ETH,
destination_address=destination_address,
key_type=KeyType.HOT,
should_dest_be_admin=True,
)
session.commit()
def test_queue_hot_transactions(self) -> None:
amount = self.backend.eth_client.wei_to_eth(1)
with self.backend.sessionmaker() as session:
pending_tx_id, ignored_pending_tx_any_pb = self.backend.eth_client.create_pending_transaction(
session,
amount=amount,
currency=Currency.ETH,
destination_address=MAIN_ETH_ACCOUNT,
key_type=KeyType.HOT,
should_dest_be_admin=False,
)
session.commit()
with self.backend.sessionmaker() as session:
self.backend.eth_client.queue_hot_transaction(session, pending_tx_id)
session.commit()
with self.backend.sessionmaker() as session:
pending_eth_tx = (
session.query(BlockchainWithdrawal)
.filter(
BlockchainWithdrawal.uuid == pending_tx_id,
)
.one()
)
self.assertIsNotNone(pending_eth_tx.signed_tx)
recovered_from_address = ETHAccount.recover_transaction( # pylint: disable=no-value-for-parameter
pending_eth_tx.signed_tx
)
self.assertEqual(recovered_from_address, self.eth_fixture.address)
with self.assertRaises(NoResultFound):
with self.backend.sessionmaker() as session:
self.backend.eth_client.queue_hot_transaction(session, pending_tx_id) # can't queue twice
session.commit()
def test_queue_cold_transaction(self) -> None:
amount = self.backend.eth_client.wei_to_eth(1)
with self.backend.sessionmaker() as session:
pending_tx_id, pending_tx_any_pb = self.backend.eth_client.create_pending_transaction(
session,
amount=amount,
currency=Currency.ETH,
destination_address=MAIN_ETH_ACCOUNT,
key_type=KeyType.HOT,
should_dest_be_admin=False,
)
session.commit()
tx_params_pb = EthereumTxParams()
self.assertTrue(pending_tx_any_pb.Unpack(tx_params_pb))
tx_params = self.backend.eth_client._deserialize_tx_params(tx_params_pb) # pylint: disable=protected-access
account = ETHAccount.from_key(self.eth_fixture.private_key) # pylint: disable=no-value-for-parameter
signed_tx = account.sign_transaction(tx_params)
with self.backend.sessionmaker() as session:
blockchain_transaction_identifier = self.backend.eth_client.queue_cold_transaction(
session, pending_tx_id, signed_tx.rawTransaction
)
self.assertEqual(
self.backend.eth_client._create_withdrawal_transaction_identifier( # pylint: disable=protected-access
signed_tx.hash
),
blockchain_transaction_identifier,
)
session.commit()
with self.backend.sessionmaker() as session:
pending_eth_tx = (
session.query(BlockchainWithdrawal)
.filter(
BlockchainWithdrawal.uuid == pending_tx_id,
)
.one()
)
self.assertIsNotNone(pending_eth_tx.signed_tx)
recovered_from_address = ETHAccount.recover_transaction( # pylint: disable=no-value-for-parameter
pending_eth_tx.signed_tx
)
self.assertEqual(recovered_from_address, self.eth_fixture.address)
with self.assertRaises(NoResultFound):
with self.backend.sessionmaker() as session:
self.backend.eth_client.queue_hot_transaction(session, pending_tx_id) # can't queue twice
session.commit()
def test_broadcast_reconcile_prune(self) -> None:
admin_key_uuid = self.backend.key_client.make_new_hot_key()
with self.backend.sessionmaker() as session:
admin_key = session.query(Key).filter(Key.key_uuid == admin_key_uuid).one()
destination_address = admin_key.get_address(Blockchain.ETH)
amount = self.backend.eth_client.wei_to_eth(1)
pending_tx_id, ignored_pending_tx_any_pb = self.backend.eth_client.create_pending_transaction(
session,
amount=amount,
currency=Currency.ETH,
destination_address=destination_address,
key_type=KeyType.HOT,
should_dest_be_admin=True,
)
session.commit()
with self.backend.sessionmaker() as session:
admin_key_currency = (
session.query(KeyCurrencyAccount)
.filter(
KeyCurrencyAccount.key_uuid == admin_key_uuid,
KeyCurrencyAccount.currency == Currency.ETH,
)
.one()
)
self.assertEqual(admin_key_currency.pending_admin_deposits, 1)
with self.backend.sessionmaker() as session:
blockchain_transaction_identifier = self.backend.eth_client.queue_hot_transaction(session, pending_tx_id)
session.commit()
wait_for_eth_block(self.backend.eth_client, self.eth_fixture.eth2_tx_receipt.blockNumber + 1)
self.backend.blockchain_client.process_block(Blockchain.ETH, self.eth_fixture.eth2_tx_receipt.blockNumber + 1)
self.backend.blockchain_client.process_block(Blockchain.ETH, self.eth_fixture.eth2_tx_receipt.blockNumber + 1)
with self.backend.sessionmaker() as session:
pending_eth_tx = (
session.query(BlockchainWithdrawal)
.filter(
BlockchainWithdrawal.uuid == pending_tx_id,
)
.one()
)
txn_hash = pending_eth_tx.txn_hash
estimated_tx_fee = self.backend.eth_client.wei_to_eth(GAS_PRICE_WEI * 21_000)
tx_params_any_pb = pending_eth_tx.tx_params
tx_params = EthereumTxParams()
self.assertTrue(tx_params_any_pb.Unpack(tx_params))
key = (
session.query(Key)
.filter(
BlockchainAddressKey.blockchain == Blockchain.ETH,
BlockchainAddressKey.address == tx_params.fromAddress,
BlockchainAddressKey.key_uuid == Key.key_uuid,
)
.one()
)
key_uuid = key.key_uuid
eth_key_currency = (
session.query(KeyCurrencyAccount)
.filter(
KeyCurrencyAccount.key_uuid == key_uuid,
KeyCurrencyAccount.currency == Currency.ETH,
)
.one()
)
available_balance_with_estimated_fee = eth_key_currency.available_balance
original_balance = available_balance_with_estimated_fee + estimated_tx_fee
gas_price_wei = tx_params.gasPrice
admin_key_currency = (
session.query(KeyCurrencyAccount)
.filter(
KeyCurrencyAccount.key_uuid == admin_key_uuid,
KeyCurrencyAccount.currency == Currency.ETH,
)
.one()
)
self.assertEqual(admin_key_currency.pending_admin_deposits, 1)
# it SHOULD be included within the next block
tx_receipt = cast(TxReceipt, self.w3.eth.waitForTransactionReceipt(txn_hash, timeout=20))
self.assertEqual(
self.backend.eth_client._create_withdrawal_transaction_identifier( # pylint: disable=protected-access
txn_hash
),
blockchain_transaction_identifier,
)
reconcile_block_number = tx_receipt.blockNumber
prune_block_number = reconcile_block_number + self.backend.eth_client.num_confirmations - 1
gas_used = tx_receipt.gasUsed
gas_used_wei = gas_used * gas_price_wei
gas_used_eth = self.backend.eth_client.wei_to_eth(gas_used_wei)
expected_new_balance = original_balance - gas_used_eth
self.assertTrue(tx_receipt["status"])
for block_number in range(self.eth_fixture.eth2_tx_receipt.blockNumber + 2, reconcile_block_number + 1):
self.backend.blockchain_client.process_block(Blockchain.ETH, block_number)
self.backend.blockchain_client.process_block(Blockchain.ETH, block_number)
with self.backend.sessionmaker() as session:
eth_key_currency = (
session.query(KeyCurrencyAccount)
.filter(
KeyCurrencyAccount.key_uuid == key_uuid,
KeyCurrencyAccount.currency == Currency.ETH,
)
.one()
)
self.assertEqual(eth_key_currency.available_balance, expected_new_balance)
admin_key_currency = (
session.query(KeyCurrencyAccount)
.filter(
KeyCurrencyAccount.key_uuid == admin_key_uuid,
KeyCurrencyAccount.currency == Currency.ETH,
)
.one()
)
self.assertEqual(admin_key_currency.pending_admin_deposits, 1)
pending_eth_tx = (
session.query(BlockchainWithdrawal)
.filter(
BlockchainWithdrawal.uuid == pending_tx_id,
)
.one()
)
self.assertEqual(pending_eth_tx.block_number, reconcile_block_number)
wait_for_eth_block(self.backend.eth_client, prune_block_number)
for block_number in range(reconcile_block_number + 1, prune_block_number + 1):
self.backend.blockchain_client.process_block(Blockchain.ETH, block_number)
self.backend.blockchain_client.process_block(Blockchain.ETH, block_number)
with self.backend.sessionmaker() as session:
admin_key_currency = (
session.query(KeyCurrencyAccount)
.filter(
KeyCurrencyAccount.currency == Currency.ETH,
KeyCurrencyAccount.key_uuid == key_uuid,
)
.one()
)
# the deposits are no longer pending
self.assertEqual(admin_key_currency.pending_admin_deposits, 0)
# no more pending transactions; they've been deleted
self.assertEqual(
session.query(BlockchainWithdrawal)
.filter(BlockchainWithdrawal.pending_admin_deposits_reconciled.is_(False))
.count(),
0,
)
def test_void_transaction_and_broadcast(self) -> None:
amount = self.backend.eth_client.wei_to_eth(3)
with self.backend.sessionmaker() as session:
pending_tx_id, ignored_pending_tx_any_pb = self.backend.eth_client.create_pending_transaction(
session,
amount=amount,
currency=Currency.ETH,
destination_address=MAIN_ETH_ACCOUNT,
key_type=KeyType.HOT,
should_dest_be_admin=False,
)
session.commit()
wait_for_eth_block(self.backend.eth_client, self.eth_fixture.eth2_tx_receipt.blockNumber + 1)
self.backend.blockchain_client.process_block(Blockchain.ETH, self.eth_fixture.eth2_tx_receipt.blockNumber + 1)
self.backend.blockchain_client.process_block(Blockchain.ETH, self.eth_fixture.eth2_tx_receipt.blockNumber + 1)
estimated_tx_fee = self.backend.eth_client.wei_to_eth(GAS_PRICE_WEI * 21_000)
with self.backend.sessionmaker() as session:
pending_eth_tx = (
session.query(BlockchainWithdrawal)
.filter(
BlockchainWithdrawal.uuid == pending_tx_id,
)
.one()
)
# should be replaced with a transaction of 1
tx_params_any_pb = pending_eth_tx.tx_params
tx_params = EthereumTxParams()
self.assertTrue(tx_params_any_pb.Unpack(tx_params))
self.assertEqual(tx_params.gas, 21000) # send gas is 21000
# to address should be firm controlled
key = (
session.query(Key)
.filter(
BlockchainAddressKey.blockchain == Blockchain.ETH,
BlockchainAddressKey.address == tx_params.toAddress,
BlockchainAddressKey.key_uuid == Key.key_uuid,
)
.one()
)
self.assertIn(key.key_type, (KeyType.COLD, KeyType.HOT))
# assert that the account is not assigned and has a pending deposit
key_currency = (
session.query(KeyCurrencyAccount)
.filter(
KeyCurrencyAccount.key_uuid == key.key_uuid,
KeyCurrencyAccount.currency == Currency.ETH,
)
.one()
)
self.assertEqual(key_currency.account_uuid, ADMIN_UUID)
self.assertEqual(key_currency.pending_admin_deposits, 1)
eth_key_currency_account = (
session.query(KeyCurrencyAccount)
.filter(
KeyCurrencyAccount.key_uuid == self.key_uuid,
KeyCurrencyAccount.currency == Currency.ETH,
)
.one()
)
self.assertEqual(
eth_key_currency_account.available_balance,
ETH1_AMOUNT + ETH2_AMOUNT - estimated_tx_fee - self.backend.eth_client.wei_to_eth(tx_params.value),
)
# process the next block so we will broadcast the transaction
wait_for_eth_block(self.backend.eth_client, self.eth_fixture.eth2_tx_receipt.blockNumber + 2)
self.backend.blockchain_client.process_block(Blockchain.ETH, self.eth_fixture.eth2_tx_receipt.blockNumber + 2)
self.backend.blockchain_client.process_block(Blockchain.ETH, self.eth_fixture.eth2_tx_receipt.blockNumber + 2)
with self.backend.sessionmaker() as session:
pending_eth_tx = (
session.query(BlockchainWithdrawal)
.filter(
BlockchainWithdrawal.uuid == pending_tx_id,
)
.one()
)
txn_hash = pending_eth_tx.txn_hash
# it SHOULD be included within the next block
tx_receipt = cast(TxReceipt, self.w3.eth.waitForTransactionReceipt(txn_hash, timeout=20))
self.assertTrue(tx_receipt["status"])
class TestETHInitialBalance(BaseBackendTestCase):
# Using a separate class since we don't want the same setUp method
# specifically, we do NOT want to manually track the key, since thatis
# what we are testing
backend: Backend
w3: web3.Web3
channel: grpc.Channel
start_block: int
fixture_container: EthFixturesContainer
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
cls.w3 = cls.backend.eth_client._w3 # pylint: disable=protected-access
start_block = cls.backend.eth_client.start_block_number
cls.start_block = start_block
num_tests = len(list(filter(lambda x: x.startswith("test_"), dir(cls))))
cls.fixture_container = EthFixturesContainer(cls.backend.eth_client, num_tests)
def setUp(self) -> None:
super().setUp()
self.eth_fixture = self.fixture_container()
for block_number in range(self.start_block, self.eth_fixture.eth2_tx_receipt.blockNumber + 1):
self.backend.blockchain_client.process_block(Blockchain.ETH, block_number)
self.backend.blockchain_client.process_block(Blockchain.ETH, block_number)
with self.backend.sessionmaker() as session:
# add an ethereum account
eth_account = Account(
user_uuid=MOCK_USER_UUID,
currency=Currency.ETH,
account_type=AccountType.DEPOSIT_ACCOUNT,
)
session.add(eth_account)
session.commit()
self.eth_account_uuid = eth_account.uuid
def test_late_import(self) -> None:
private_key_bn = petlib.bn.Bn.from_binary(self.eth_fixture.private_key)
key_uuid = self.backend.key_client.import_hot_key(private_key_bn)
self.backend.key_client.assign_key_for_deposits_to_account(
key_uuid=key_uuid,
account_uuid=self.eth_account_uuid,
)
wait_for_eth_block(self.backend.eth_client, self.eth_fixture.eth2_tx_receipt.blockNumber + 1)
self.backend.blockchain_client.process_block(Blockchain.ETH, self.eth_fixture.eth2_tx_receipt.blockNumber + 1)
with self.backend.sessionmaker() as session:
key_currency_block = (
session.query(KeyCurrencyBlock)
.filter(
KeyCurrencyBlock.key_uuid == key_uuid,
KeyCurrencyBlock.currency == Currency.ETH,
)
.one()
)
key_account_commitment, key_currency_account = (
session.query(
KeyAccountCommitment,
KeyCurrencyAccount,
)
.filter(
KeyAccountCommitment.key_uuid == key_uuid,
KeyAccountCommitment.account_uuid == self.eth_account_uuid,
KeyAccountCommitment.key_uuid == KeyCurrencyAccount.key_uuid,
KeyCurrencyAccount.currency == Currency.ETH,
)
.one()
)
self.assertEqual(key_currency_block.block_number, key_currency_account.initial_balance_block_number)
self.assertEqual(key_currency_block.block_number, key_account_commitment.block_number)
self.assertEqual(key_currency_account.initial_balance, ETH1_AMOUNT + ETH2_AMOUNT)
self.assertEqual(key_currency_account.available_balance, ETH1_AMOUNT + ETH2_AMOUNT)
def test_late_import_with_withdrawal(self) -> None:
withdrawn_amount_wei = 3
tx_params = {
"to": MAIN_ETH_ACCOUNT,
"value": withdrawn_amount_wei,
"gas": 21000,
"gasPrice": 18,
"nonce": 0,
"chainId": self.backend.eth_client._chain_id, # pylint: disable=protected-access
}
total_debit = self.backend.eth_client.wei_to_eth(withdrawn_amount_wei + 21000 * 18)
account = ETHAccount.from_key(self.eth_fixture.private_key) # pylint: disable=no-value-for-parameter
signed_tx = account.sign_transaction(tx_params)
txn_hash = self.w3.eth.send_raw_transaction(signed_tx.rawTransaction)
tx3receipt = cast(TxReceipt, self.w3.eth.waitForTransactionReceipt(txn_hash))
tx3_block_number = tx3receipt.blockNumber
for block_number in range(self.eth_fixture.eth2_tx_receipt.blockNumber + 1, tx3_block_number + 1):
self.backend.blockchain_client.process_block(Blockchain.ETH, block_number)
private_key_bn = petlib.bn.Bn.from_binary(self.eth_fixture.private_key)
key_uuid = self.backend.key_client.import_hot_key(private_key_bn)
self.backend.key_client.assign_key_for_deposits_to_account(
key_uuid=key_uuid,
account_uuid=self.eth_account_uuid,
)
wait_for_eth_block(self.backend.eth_client, tx3_block_number + 1)
self.backend.blockchain_client.process_block(Blockchain.ETH, tx3_block_number + 1)
with self.backend.sessionmaker() as session:
key_currency_block = (
session.query(KeyCurrencyBlock)
.filter(
KeyCurrencyBlock.key_uuid == key_uuid,
KeyCurrencyBlock.currency == Currency.ETH,
)
.one()
)
key_account_commitment, key_currency_account = (
session.query(
KeyAccountCommitment,
KeyCurrencyAccount,
)
.filter(
KeyAccountCommitment.key_uuid == key_uuid,
KeyAccountCommitment.account_uuid == self.eth_account_uuid,
KeyAccountCommitment.key_uuid == KeyCurrencyAccount.key_uuid,
KeyCurrencyAccount.currency == Currency.ETH,
)
.one()
)
self.assertEqual(key_currency_block.block_number, key_currency_account.initial_balance_block_number)
self.assertEqual(key_currency_block.block_number, key_account_commitment.block_number)
# we are simply sending bitcoin to ourselves and burning the rest
self.assertEqual(key_currency_account.initial_balance, ETH1_AMOUNT + ETH2_AMOUNT - total_debit)
self.assertEqual(key_currency_account.available_balance, ETH1_AMOUNT + ETH2_AMOUNT - total_debit)
if __name__ == "__main__":
unittest.main()
| 46.308184 | 119 | 0.629553 | 3,806 | 36,213 | 5.665265 | 0.07278 | 0.055097 | 0.037844 | 0.04174 | 0.834848 | 0.80512 | 0.783462 | 0.772656 | 0.740098 | 0.719043 | 0 | 0.008162 | 0.296247 | 36,213 | 781 | 120 | 46.367478 | 0.837905 | 0.035871 | 0 | 0.636746 | 0 | 0 | 0.002122 | 0 | 0 | 0 | 0 | 0 | 0.098177 | 1 | 0.025245 | false | 0 | 0.044881 | 0.001403 | 0.088359 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
9c6848fa6b51ad425b6574f5e74859c5fa19c022 | 107 | py | Python | exo_mentions/exceptions.py | exolever/django-mentions | 65d2417da9633bc4ff602d067271ea0f2bf46133 | [
"MIT"
] | 1 | 2020-05-04T00:11:35.000Z | 2020-05-04T00:11:35.000Z | exo_mentions/exceptions.py | exolever/django-mentions | 65d2417da9633bc4ff602d067271ea0f2bf46133 | [
"MIT"
] | 3 | 2018-10-17T17:29:18.000Z | 2019-11-12T13:16:43.000Z | exo_mentions/exceptions.py | exolever/django-mentions | 65d2417da9633bc4ff602d067271ea0f2bf46133 | [
"MIT"
] | null | null | null | class DjangoMentionException(Exception):
pass
class MentionedObjectDoesNotExist(Exception):
pass
| 15.285714 | 45 | 0.794393 | 8 | 107 | 10.625 | 0.625 | 0.305882 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.149533 | 107 | 6 | 46 | 17.833333 | 0.934066 | 0 | 0 | 0.5 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0.5 | 0 | 0 | 0.5 | 0 | 1 | 0 | 1 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 6 |
9c7085b568c24d381c10881a2f62943d55859afa | 172 | py | Python | tests/communities/test_communities.py | hbrunie/PyFloT | 016e1092d0da8226e8b214c40e9fc02b933f372d | [
"MIT"
] | 3 | 2020-11-18T15:39:36.000Z | 2021-04-12T06:54:42.000Z | tests/communities/test_communities.py | hbrunie/PyFloT | 016e1092d0da8226e8b214c40e9fc02b933f372d | [
"MIT"
] | 1 | 2022-03-28T07:48:58.000Z | 2022-03-28T17:36:51.000Z | tests/communities/test_communities.py | hbrunie/PyFloT | 016e1092d0da8226e8b214c40e9fc02b933f372d | [
"MIT"
] | 1 | 2022-03-24T08:11:35.000Z | 2022-03-24T08:11:35.000Z | from communities import build_graph
from communities import generate_graph
import sys
tracefile = sys.argv[1]
(ge, gn) = build_graph(tracefile)
generate_graph(ge, gn, 50)
| 21.5 | 38 | 0.796512 | 26 | 172 | 5.115385 | 0.5 | 0.225564 | 0.315789 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.019868 | 0.122093 | 172 | 7 | 39 | 24.571429 | 0.860927 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.5 | 0 | 0.5 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
92cabf35c75a78ca65a10132334b0dc0349ccd95 | 103 | py | Python | func/__init__.py | takaaki82/GoogleNetBN_Chainer | b6d7e4ba5179246d1c8b24709f320f625e07fff7 | [
"MIT"
] | null | null | null | func/__init__.py | takaaki82/GoogleNetBN_Chainer | b6d7e4ba5179246d1c8b24709f320f625e07fff7 | [
"MIT"
] | null | null | null | func/__init__.py | takaaki82/GoogleNetBN_Chainer | b6d7e4ba5179246d1c8b24709f320f625e07fff7 | [
"MIT"
] | null | null | null | from . import compute_mean
from . import dataset_function
from . import model2pkl
from . import resize
| 20.6 | 30 | 0.805825 | 14 | 103 | 5.785714 | 0.571429 | 0.493827 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011494 | 0.15534 | 103 | 4 | 31 | 25.75 | 0.91954 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
92d7782704b7d281270572c17829578ce825d05f | 63 | py | Python | Bonsucesso/Semana 06/Exemplos de Sala de Aula/Exemplo007/main.py | profoswaldo/Unisuam_2022-1 | cd0faad61480030d1320515a8104373ada70545b | [
"MIT"
] | 2 | 2022-03-25T02:04:11.000Z | 2022-03-25T09:26:44.000Z | Bonsucesso/Semana 06/Exemplos de Sala de Aula/Exemplo007/main.py | profoswaldo/Unisuam_2022-1 | cd0faad61480030d1320515a8104373ada70545b | [
"MIT"
] | null | null | null | Bonsucesso/Semana 06/Exemplos de Sala de Aula/Exemplo007/main.py | profoswaldo/Unisuam_2022-1 | cd0faad61480030d1320515a8104373ada70545b | [
"MIT"
] | null | null | null | def somar(val1, val2):
return val1 + val2
print(somar(2,3))
| 12.6 | 22 | 0.666667 | 11 | 63 | 3.818182 | 0.727273 | 0.380952 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.115385 | 0.174603 | 63 | 4 | 23 | 15.75 | 0.692308 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.333333 | false | 0 | 0 | 0.333333 | 0.666667 | 0.333333 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 6 |
92e6fd113a09e6c7ef37f5a9dd06455b4f00f3ad | 91 | py | Python | src/test_generator.py | skandabhairava/Scripting_tools | cb36358412732bbd36ecdad079c719518689105e | [
"MIT"
] | 1 | 2021-10-11T13:49:57.000Z | 2021-10-11T13:49:57.000Z | src/test_generator.py | skandabhairava/Scripting_tools | cb36358412732bbd36ecdad079c719518689105e | [
"MIT"
] | 1 | 2022-02-16T18:57:36.000Z | 2022-02-16T18:57:36.000Z | src/test_generator.py | skandabhairava/Scripting_tools | cb36358412732bbd36ecdad079c719518689105e | [
"MIT"
] | null | null | null | from scripting_tools.generator import random_string
print(random_string(10, numbers=True)) | 30.333333 | 51 | 0.857143 | 13 | 91 | 5.769231 | 0.846154 | 0.32 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.023529 | 0.065934 | 91 | 3 | 52 | 30.333333 | 0.858824 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.5 | 0 | 0.5 | 0.5 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 1 | 0 | 6 |
135bd982a94666d7dc4793333486d2f5969137e2 | 9,322 | py | Python | atoman/filtering/filters/tests/test_cropBox.py | chrisdjscott/Atoman | e87ac31bbdcf53bb8f3efdfb109787d604890394 | [
"MIT"
] | 9 | 2015-11-23T12:13:34.000Z | 2021-11-18T05:23:35.000Z | atoman/filtering/filters/tests/test_cropBox.py | chrisdjscott/Atoman | e87ac31bbdcf53bb8f3efdfb109787d604890394 | [
"MIT"
] | 1 | 2017-07-17T20:27:50.000Z | 2017-07-23T05:27:15.000Z | atoman/filtering/filters/tests/test_cropBox.py | chrisdjscott/Atoman | e87ac31bbdcf53bb8f3efdfb109787d604890394 | [
"MIT"
] | 4 | 2015-11-23T12:13:37.000Z | 2017-05-03T08:24:19.000Z |
"""
Unit tests for the crop box filter
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import unittest
import numpy as np
from ....system import lattice
from .. import cropBoxFilter
from .. import base
################################################################################
class TestCropBoxAtomsFilter(unittest.TestCase):
"""
Test crop box filter
"""
def setUp(self):
"""
Called before each test
"""
# generate lattice
self.lattice = lattice.Lattice()
self.lattice.addAtom("He", [0,0,0], 0)
self.lattice.addAtom("He", [0,0,4], 0)
self.lattice.addAtom("He", [2,0,0], 0)
self.lattice.addAtom("He", [0,2,0], 0)
self.lattice.addAtom("He", [4,0,0], 0)
self.lattice.addAtom("He", [0,0,2], 0)
self.lattice.addAtom("He", [0,4,0], 0)
self.lattice.addAtom("He", [4,4,4], 0)
# filter
self.filter = cropBoxFilter.CropBoxFilter("Crop box")
def tearDown(self):
"""
Called after each test
"""
# remove refs
self.lattice = None
self.filter = None
def test_cropBoxFilter(self):
"""
Crop box atoms
"""
# TEST 1
# settings - all clusters visible
settings = cropBoxFilter.CropBoxFilterSettings()
settings.updateSetting("xEnabled", True)
settings.updateSetting("xmin", 2.5)
settings.updateSetting("xmax", 9.9)
settings.updateSetting("yEnabled", True)
settings.updateSetting("ymin", 2.5)
settings.updateSetting("ymax", 9.9)
settings.updateSetting("zEnabled", True)
settings.updateSetting("zmin", 2.5)
settings.updateSetting("zmax", 9.9)
settings.updateSetting("invertSelection", False)
# set PBC
self.lattice.PBC[:] = 1
# filter input
filterInput = base.FilterInput()
filterInput.inputState = self.lattice
visibleAtoms = np.arange(self.lattice.NAtoms, dtype=np.int32)
filterInput.visibleAtoms = visibleAtoms
filterInput.NScalars = 0
filterInput.fullScalars = np.empty(0, np.float64)
filterInput.NVectors = 0
filterInput.fullVectors = np.empty(0, np.float64)
# call filter
result = self.filter.apply(filterInput, settings)
self.assertIsInstance(result, base.FilterResult)
# make sure num visible is correct
self.assertEqual(len(visibleAtoms), 1)
# make sure correct atoms selected
self.assertTrue(7 in visibleAtoms)
# TEST 2
# settings - all clusters visible
settings = cropBoxFilter.CropBoxFilterSettings()
settings.updateSetting("xEnabled", True)
settings.updateSetting("xmin", 2.5)
settings.updateSetting("xmax", 9.9)
settings.updateSetting("yEnabled", True)
settings.updateSetting("ymin", 2.5)
settings.updateSetting("ymax", 9.9)
settings.updateSetting("zEnabled", True)
settings.updateSetting("zmin", 2.5)
settings.updateSetting("zmax", 9.9)
settings.updateSetting("invertSelection", True)
# set PBC
self.lattice.PBC[:] = 1
# filter input
filterInput = base.FilterInput()
filterInput.inputState = self.lattice
visibleAtoms = np.arange(self.lattice.NAtoms, dtype=np.int32)
filterInput.visibleAtoms = visibleAtoms
filterInput.NScalars = 0
filterInput.fullScalars = np.empty(0, np.float64)
filterInput.NVectors = 0
filterInput.fullVectors = np.empty(0, np.float64)
# call filter
result = self.filter.apply(filterInput, settings)
self.assertIsInstance(result, base.FilterResult)
# make sure num visible is correct
self.assertEqual(len(visibleAtoms), 7)
# make sure correct atoms selected
self.assertTrue(0 in visibleAtoms)
self.assertTrue(1 in visibleAtoms)
self.assertTrue(2 in visibleAtoms)
self.assertTrue(3 in visibleAtoms)
self.assertTrue(4 in visibleAtoms)
self.assertTrue(5 in visibleAtoms)
self.assertTrue(6 in visibleAtoms)
################################################################################
class TestCropBoxDefectsFilter(unittest.TestCase):
"""
Test crop box filter (defects)
"""
def setUp(self):
"""
Called before each test
"""
# generate lattice
self.lattice = lattice.Lattice()
self.lattice.addAtom("He", [0,0,0], 0)
self.lattice.addAtom("He", [0,0,4], 0)
self.lattice.addAtom("He", [2,0,0], 0)
self.lattice.addAtom("He", [0,2,0], 0)
self.lattice.addAtom("He", [4,0,0], 0)
self.lattice.addAtom("He", [0,0,2], 0)
self.lattice.addAtom("He", [0,4,0], 0)
self.lattice.addAtom("He", [4,4,4], 0)
self.ref = lattice.Lattice()
self.ref.addAtom("H_", [0,0,0], 0)
self.ref.addAtom("He", [4,0,4], 0)
self.ref.addAtom("He", [2,0,2], 0)
self.ref.addAtom("He", [0,2,0], 0)
self.ref.addAtom("He", [4,0,0], 0)
self.ref.addAtom("He", [0,0,2], 0)
self.ref.addAtom("He", [4,4,0], 0)
self.ref.addAtom("H_", [4,4,4], 0)
self.vacancies = np.asarray([1,2,3,4,5,6], dtype=np.int32)
self.vacancies = np.asarray([1,2,3,4,5,6], dtype=np.int32)
# filter
self.filter = cropBoxFilter.CropBoxFilter("Crop box")
def tearDown(self):
"""
Called after each test
"""
# remove refs
self.lattice = None
self.ref = None
self.filter = None
self.vacancies = None
# def test_cropBoxFilter(self):
# """
# Crop box defects
#
# """
# # TEST 1
#
# # settings - all clusters visible
# settings = cropBoxFilter.CropBoxFilterSettings()
# settings.updateSetting("xEnabled", True)
# settings.updateSetting("xmin", 2.5)
# settings.updateSetting("xmax", 9.9)
# settings.updateSetting("yEnabled", True)
# settings.updateSetting("ymin", 2.5)
# settings.updateSetting("ymax", 9.9)
# settings.updateSetting("zEnabled", True)
# settings.updateSetting("zmin", 2.5)
# settings.updateSetting("zmax", 9.9)
# settings.updateSetting("invertSelection", False)
#
# # set PBC
# self.lattice.PBC[:] = 1
#
# # filter input
# filterInput = base.FilterInput()
# filterInput.inputState = self.lattice
# visibleAtoms = np.arange(self.lattice.NAtoms, dtype=np.int32)
# filterInput.visibleAtoms = visibleAtoms
# filterInput.NScalars = 0
# filterInput.fullScalars = np.empty(0, np.float64)
# filterInput.NVectors = 0
# filterInput.fullVectors = np.empty(0, np.float64)
#
# # call filter
# result = self.filter.apply(filterInput, settings)
# self.assertIsInstance(result, base.FilterResult)
#
# # make sure num visible is correct
# self.assertEqual(len(visibleAtoms), 1)
#
# # make sure correct atoms selected
# self.assertTrue(7 in visibleAtoms)
#
# # TEST 2
#
# # settings - all clusters visible
# settings = cropBoxFilter.CropBoxFilterSettings()
# settings.updateSetting("xEnabled", True)
# settings.updateSetting("xmin", 2.5)
# settings.updateSetting("xmax", 9.9)
# settings.updateSetting("yEnabled", True)
# settings.updateSetting("ymin", 2.5)
# settings.updateSetting("ymax", 9.9)
# settings.updateSetting("zEnabled", True)
# settings.updateSetting("zmin", 2.5)
# settings.updateSetting("zmax", 9.9)
# settings.updateSetting("invertSelection", True)
#
# # set PBC
# self.lattice.PBC[:] = 1
#
# # filter input
# filterInput = base.FilterInput()
# filterInput.inputState = self.lattice
# visibleAtoms = np.arange(self.lattice.NAtoms, dtype=np.int32)
# filterInput.visibleAtoms = visibleAtoms
# filterInput.NScalars = 0
# filterInput.fullScalars = np.empty(0, np.float64)
# filterInput.NVectors = 0
# filterInput.fullVectors = np.empty(0, np.float64)
#
# # call filter
# result = self.filter.apply(filterInput, settings)
# self.assertIsInstance(result, base.FilterResult)
#
# # make sure num visible is correct
# self.assertEqual(len(visibleAtoms), 7)
#
# # make sure correct atoms selected
# self.assertTrue(0 in visibleAtoms)
# self.assertTrue(1 in visibleAtoms)
# self.assertTrue(2 in visibleAtoms)
# self.assertTrue(3 in visibleAtoms)
# self.assertTrue(4 in visibleAtoms)
# self.assertTrue(5 in visibleAtoms)
# self.assertTrue(6 in visibleAtoms)
| 33.775362 | 80 | 0.568118 | 966 | 9,322 | 5.467909 | 0.10766 | 0.159031 | 0.054525 | 0.060583 | 0.937713 | 0.929761 | 0.917077 | 0.892465 | 0.884892 | 0.884892 | 0 | 0.034582 | 0.292748 | 9,322 | 275 | 81 | 33.898182 | 0.766571 | 0.397983 | 0 | 0.685714 | 0 | 0 | 0.037088 | 0 | 0 | 0 | 0 | 0 | 0.114286 | 1 | 0.047619 | false | 0 | 0.066667 | 0 | 0.133333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
137941ffaef5f37858adc11dbf608734c570dd38 | 29 | py | Python | backend/home/models.py | crowdbotics-apps/test-31818 | 3c0be5481a1adec1445c703af63db4ff1d0b9146 | [
"FTL",
"AML",
"RSA-MD"
] | 790 | 2015-01-03T02:13:39.000Z | 2020-05-10T19:53:57.000Z | backend/home/models.py | crowdbotics-apps/test-31818 | 3c0be5481a1adec1445c703af63db4ff1d0b9146 | [
"FTL",
"AML",
"RSA-MD"
] | 1,361 | 2015-01-08T23:09:40.000Z | 2020-04-14T00:03:04.000Z | backend/home/models.py | crowdbotics-apps/rwar-33953 | 69c3a19f094ce817df5dd5f3130f0103c7da4dcd | [
"FTL",
"AML",
"RSA-MD"
] | 155 | 2015-01-08T22:59:31.000Z | 2020-04-08T08:01:53.000Z | from django.db import models
| 14.5 | 28 | 0.827586 | 5 | 29 | 4.8 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.137931 | 29 | 1 | 29 | 29 | 0.96 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
137d1da84f1680bfcc9bc7d2839ecdcc44b3aca0 | 13,148 | py | Python | packages/gtmcore/gtmcore/dataset/tests/test_hash.py | jjwatts/gigantum-client | 88ce0475fb6880322bdd06d987c494e29064f278 | [
"MIT"
] | null | null | null | packages/gtmcore/gtmcore/dataset/tests/test_hash.py | jjwatts/gigantum-client | 88ce0475fb6880322bdd06d987c494e29064f278 | [
"MIT"
] | null | null | null | packages/gtmcore/gtmcore/dataset/tests/test_hash.py | jjwatts/gigantum-client | 88ce0475fb6880322bdd06d987c494e29064f278 | [
"MIT"
] | null | null | null | import pytest
import os
import time
from pathlib import Path
from hashlib import blake2b
from gtmcore.dataset.manifest.hash import SmartHash
from gtmcore.fixtures.datasets import mock_dataset_with_cache_dir, mock_dataset_with_manifest
def helper_append_file(cache_dir, revision, rel_path, content):
with open(os.path.join(cache_dir, revision, rel_path), 'at') as fh:
fh.write(content)
class TestHashing(object):
def test_init(self, mock_dataset_with_manifest):
ds, manifest, working_dir = mock_dataset_with_manifest
sh = SmartHash(ds.root_dir, manifest.cache_mgr.cache_root, manifest.dataset_revision)
assert sh.fast_hash_data == {}
@pytest.mark.asyncio
async def test_hash(self, event_loop, mock_dataset_with_manifest):
ds, manifest, working_dir = mock_dataset_with_manifest
sh = SmartHash(ds.root_dir, manifest.cache_mgr.cache_root, manifest.dataset_revision)
cache_dir = manifest.cache_mgr.cache_root
revision = manifest.dataset_revision
assert sh.fast_hash_data == {}
filename = "test1.txt"
helper_append_file(cache_dir, revision, filename, "pupper")
assert sh.fast_hash_data == {}
assert sh.is_cached(filename) is False
assert os.path.exists(os.path.join(cache_dir, revision, ".smarthash")) is False
hash_result = await sh.hash([filename])
hash_result = hash_result[0]
assert len(hash_result) == 128
@pytest.mark.asyncio
async def test_hash_same_as_nonchunked(self, event_loop, mock_dataset_with_manifest):
ds, manifest, working_dir = mock_dataset_with_manifest
sh = SmartHash(ds.root_dir, manifest.cache_mgr.cache_root, manifest.dataset_revision)
cache_dir = manifest.cache_mgr.cache_root
revision = manifest.dataset_revision
filename = "test1.txt"
helper_append_file(cache_dir, revision, filename, "asdfdsfgkdfshuhwedfgft345wfd" * 100000)
assert sh.fast_hash_data == {}
assert sh.is_cached(filename) is False
assert os.path.exists(os.path.join(cache_dir, revision, ".smarthash")) is False
hash_result = await sh.hash([filename])
hash_result = hash_result[0]
h = blake2b()
with open(sh.get_abs_path(filename), 'rb') as fh:
h.update(fh.read())
assert hash_result == h.hexdigest()
@pytest.mark.asyncio
async def test_hash_same_as_nonchunked_multiple(self, event_loop, mock_dataset_with_manifest):
ds, manifest, working_dir = mock_dataset_with_manifest
sh = SmartHash(ds.root_dir, manifest.cache_mgr.cache_root, manifest.dataset_revision)
cache_dir = manifest.cache_mgr.cache_root
revision = manifest.dataset_revision
filename1 = "test1.txt"
helper_append_file(cache_dir, revision, filename1, "asdfdsfgkdfshuhwedfgft345wfd" * 100000)
assert sh.is_cached(filename1) is False
filename2 = "test2.txt"
helper_append_file(cache_dir, revision, filename2, "gfggfgfgfgwee" * 100000)
assert sh.is_cached(filename2) is False
assert os.path.exists(os.path.join(cache_dir, revision, ".smarthash")) is False
assert sh.fast_hash_data == {}
h = blake2b()
with open(sh.get_abs_path(filename1), 'rb') as fh:
h.update(fh.read())
hash1 = h.hexdigest()
h = blake2b()
with open(sh.get_abs_path(filename2), 'rb') as fh:
h.update(fh.read())
hash2 = h.hexdigest()
hash_result = await sh.hash([filename1, filename2])
assert hash1 == hash_result[0]
assert hash2 == hash_result[1]
hash_result = await sh.hash([filename2, filename1])
assert hash2 == hash_result[0]
assert hash1 == hash_result[1]
@pytest.mark.asyncio
async def test_hash_list(self, mock_dataset_with_manifest):
ds, manifest, working_dir = mock_dataset_with_manifest
sh = SmartHash(ds.root_dir, manifest.cache_mgr.cache_root, manifest.dataset_revision)
cache_dir = manifest.cache_mgr.cache_root
revision = manifest.dataset_revision
os.makedirs(os.path.join(cache_dir, revision, "test_dir"))
filenames = ["test1.txt", "test2.txt", "test3.txt", "test_dir/nested.txt"]
for f in filenames:
helper_append_file(cache_dir, revision, f, "sdfadfgfdgh")
filenames.append('test_dir/') # Append the directory, since dirs can be stored in the manifest
hash_results = await sh.hash(filenames)
assert len(hash_results) == 5
@pytest.mark.asyncio
async def test_hash_big(self, mock_dataset_with_manifest):
ds, manifest, working_dir = mock_dataset_with_manifest
sh = SmartHash(ds.root_dir, manifest.cache_mgr.cache_root, manifest.dataset_revision)
cache_dir = manifest.cache_mgr.cache_root
revision = manifest.dataset_revision
os.makedirs(os.path.join(cache_dir, revision, "test_dir"))
helper_append_file(cache_dir, revision, 'test1.txt', "asdf " * 100000000)
helper_append_file(cache_dir, revision, 'test2.txt', "hgfd " * 100000000)
helper_append_file(cache_dir, revision, 'test3.txt', "jjhf " * 10000000)
helper_append_file(cache_dir, revision, 'test4.txt', "jjhf " * 10000000)
filenames = ['test1.txt', 'test2.txt', 'test3.txt', 'test4.txt']
hash_results = await sh.hash(filenames)
assert len(hash_results) == 4
for hr in hash_results:
assert len(hr) == 128
assert hash_results[0] != hash_results[1]
assert hash_results[0] != hash_results[2]
assert hash_results[0] != hash_results[3]
assert hash_results[1] != hash_results[2]
assert hash_results[1] != hash_results[3]
assert hash_results[2] == hash_results[3]
def test_fast_hash_save(self, mock_dataset_with_manifest):
ds, manifest, working_dir = mock_dataset_with_manifest
sh = SmartHash(ds.root_dir, manifest.cache_mgr.cache_root, manifest.dataset_revision)
cache_dir = manifest.cache_mgr.cache_root
revision = manifest.dataset_revision
assert sh.fast_hash_data == {}
assert os.path.exists(os.path.join(cache_dir, revision, ".smarthash")) is False
filename = "test1.txt"
helper_append_file(cache_dir, revision, filename, "pupper")
hash_result1 = sh.fast_hash([filename], save=False)
assert sh.fast_hash_data == {}
assert os.path.exists(os.path.join(cache_dir, revision, ".smarthash")) is False
hash_result2 = sh.fast_hash([filename])
assert hash_result1 == hash_result2
assert filename in sh.fast_hash_data
assert os.path.exists(os.path.join(cache_dir, revision, ".smarthash")) is True
def test_has_changed_fast(self, mock_dataset_with_manifest):
ds, manifest, working_dir = mock_dataset_with_manifest
sh = SmartHash(ds.root_dir, manifest.cache_mgr.cache_root, manifest.dataset_revision)
cache_dir = manifest.cache_mgr.cache_root
revision = manifest.dataset_revision
assert sh.fast_hash_data == {}
assert os.path.exists(os.path.join(cache_dir, revision, ".smarthash")) is False
filename = "test1.txt"
helper_append_file(cache_dir, revision, filename, "pupper")
assert sh.is_cached(filename) is False
hash_result = sh.fast_hash([filename])
hash_result = hash_result[0]
fname, fsize, mtime = hash_result.split("||")
assert fname == "test1.txt"
assert fsize == '6'
assert sh.fast_hash_data is not None
assert os.path.exists(os.path.join(cache_dir, revision, ".smarthash")) is True
assert sh.is_cached(filename) is True
assert sh.has_changed_fast(filename) is False
time.sleep(1.1)
assert sh.has_changed_fast(filename) is False
# Change file
helper_append_file(cache_dir, revision, filename, "jgfdjfdgsjfdgsj")
assert sh.has_changed_fast(filename) is True
assert sh.has_changed_fast(filename) is True
sh.fast_hash([filename])
assert sh.has_changed_fast(filename) is False
# Touch file, so only change mtime
time.sleep(1.1)
Path(sh.get_abs_path(filename)).touch()
assert sh.has_changed_fast(filename) is True
sh.fast_hash([filename])
assert sh.has_changed_fast(filename) is False
def test_has_changed_fast_from_loaded(self, mock_dataset_with_manifest):
ds, manifest, working_dir = mock_dataset_with_manifest
sh = SmartHash(ds.root_dir, manifest.cache_mgr.cache_root, manifest.dataset_revision)
cache_dir = manifest.cache_mgr.cache_root
revision = manifest.dataset_revision
assert sh.fast_hash_data == {}
filename = "test1.txt"
helper_append_file(cache_dir, revision, filename, "pupper")
hash_result = sh.fast_hash([filename])
hash_result = hash_result[0]
fname, fsize, mtime = hash_result.split("||")
assert fname == "test1.txt"
assert fsize == '6'
assert sh.fast_hash_data is not None
assert os.path.exists(os.path.join(cache_dir, revision, ".smarthash")) is True
assert sh.is_cached(filename) is True
assert sh.has_changed_fast(filename) is False
sh2 = SmartHash(ds.root_dir, cache_dir, revision)
assert sh2.fast_hash_data is not None
assert sh2.is_cached(filename) is True
assert sh2.has_changed_fast(filename) is False
assert sh2.fast_hash_data[filename] == hash_result
def test_fast_hash_list(self, mock_dataset_with_manifest):
ds, manifest, working_dir = mock_dataset_with_manifest
sh = SmartHash(ds.root_dir, manifest.cache_mgr.cache_root, manifest.dataset_revision)
cache_dir = manifest.cache_mgr.cache_root
revision = manifest.dataset_revision
os.makedirs(os.path.join(cache_dir, revision, "test_dir"))
filenames = ["test1.txt", "test2.txt", "test3.txt", "test_dir/nested.txt"]
for f in filenames:
helper_append_file(cache_dir, revision, f, "sdfadfgfdgh")
filenames.append('test_dir/') # Append the directory, since dirs can be stored in the manifest
hash_results = sh.fast_hash(filenames)
assert len(hash_results) == 5
for fname, result in zip(filenames, hash_results):
if fname == 'test_dir/':
assert len(result.split("||")) == 3
path, fsize, _ = result.split("||")
assert path == fname
assert fsize == '4096'
else:
assert len(result.split("||")) == 3
path, fsize, _ = result.split("||")
assert path == fname
assert fsize == '11'
def test_fast_hash_big(self, mock_dataset_with_manifest):
ds, manifest, working_dir = mock_dataset_with_manifest
sh = SmartHash(ds.root_dir, manifest.cache_mgr.cache_root, manifest.dataset_revision)
cache_dir = manifest.cache_mgr.cache_root
revision = manifest.dataset_revision
helper_append_file(cache_dir, revision, 'test1.txt', "asdf " * 100000000)
helper_append_file(cache_dir, revision, 'test2.txt', "hgfd " * 100000000)
helper_append_file(cache_dir, revision, 'test3.txt', "jjh " * 10000000)
helper_append_file(cache_dir, revision, 'test4.txt', "jjh " * 10000000)
filenames = ['test1.txt', 'test2.txt', 'test3.txt', 'test4.txt']
hash_results = sh.fast_hash(filenames)
fname, fsize, mtime = hash_results[0].split("||")
assert 'test1.txt' == fname
assert fsize == "500000000"
fname, fsize, mtime = hash_results[1].split("||")
assert 'test2.txt' in fname
assert fsize == "500000000"
fname, fsize, mtime = hash_results[2].split("||")
assert 'test3.txt' in fname
assert fsize == "40000000"
fname, fsize, mtime = hash_results[3].split("||")
assert 'test4.txt' in fname
assert fsize == "40000000"
assert hash_results[2] != hash_results[3]
def test_get_deleted_files(self, mock_dataset_with_manifest):
ds, manifest, working_dir = mock_dataset_with_manifest
sh = SmartHash(ds.root_dir, manifest.cache_mgr.cache_root, manifest.dataset_revision)
cache_dir = manifest.cache_mgr.cache_root
revision = manifest.dataset_revision
os.makedirs(os.path.join(cache_dir, revision, "test_dir"))
filenames = ["test1.txt", "test2.txt", "test3.txt", "test_dir/nested.txt"]
for f in filenames:
helper_append_file(cache_dir, revision, f, "sdfadfgfdgh")
hash_results = sh.fast_hash(filenames)
assert len(hash_results) == 4
assert len(sh.get_deleted_files(filenames)) == 0
test_new_filenames = ["test1.txt", "test_dir/nested.txt"]
deleted = sh.get_deleted_files(test_new_filenames)
assert len(deleted) == 2
assert deleted[0] == "test2.txt"
assert deleted[1] == "test3.txt"
| 42.00639 | 103 | 0.669379 | 1,711 | 13,148 | 4.882525 | 0.084746 | 0.045008 | 0.067034 | 0.068829 | 0.853005 | 0.813981 | 0.77017 | 0.737012 | 0.712712 | 0.680153 | 0 | 0.024796 | 0.227031 | 13,148 | 312 | 104 | 42.141026 | 0.797206 | 0.01293 | 0 | 0.646091 | 0 | 0 | 0.064981 | 0.004317 | 0 | 0 | 0 | 0 | 0.333333 | 1 | 0.032922 | false | 0 | 0.028807 | 0 | 0.065844 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
13a4f4b5c0382014cc6bb35a6e061b3725ffdcfc | 67 | py | Python | tests/test_add.py | vaskoz/example_pytest | 82a103ce105dd24b13bcbaef4281cbb3dcc90599 | [
"MIT"
] | null | null | null | tests/test_add.py | vaskoz/example_pytest | 82a103ce105dd24b13bcbaef4281cbb3dcc90599 | [
"MIT"
] | null | null | null | tests/test_add.py | vaskoz/example_pytest | 82a103ce105dd24b13bcbaef4281cbb3dcc90599 | [
"MIT"
] | null | null | null | from src.add import add
def test_add():
assert 4 == add(2, 2)
| 13.4 | 25 | 0.626866 | 13 | 67 | 3.153846 | 0.692308 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.058824 | 0.238806 | 67 | 4 | 26 | 16.75 | 0.745098 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.333333 | 1 | 0.333333 | true | 0 | 0.333333 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
13f55f02c129f4677cef82449794162ee7d39647 | 8,959 | py | Python | scheduler_sdk/api/task/task_client.py | easyopsapis/easyops-api-python | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | [
"Apache-2.0"
] | 5 | 2019-07-31T04:11:05.000Z | 2021-01-07T03:23:20.000Z | scheduler_sdk/api/task/task_client.py | easyopsapis/easyops-api-python | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | [
"Apache-2.0"
] | null | null | null | scheduler_sdk/api/task/task_client.py | easyopsapis/easyops-api-python | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import sys
import scheduler_sdk.model.scheduler.task_pb2
import scheduler_sdk.api.task.create_task_pb2
import scheduler_sdk.api.task.delete_task_detail_pb2
import google.protobuf.empty_pb2
import scheduler_sdk.api.task.get_task_detail_pb2
import scheduler_sdk.api.task.list_task_pb2
import scheduler_sdk.api.task.update_task_detail_pb2
import scheduler_sdk.utils.http_util
import google.protobuf.json_format
class TaskClient(object):
def __init__(self, server_ip="", server_port=0, service_name="", host=""):
"""
初始化client
:param server_ip: 指定sdk请求的server_ip,为空时走名字服务路由
:param server_port: 指定sdk请求的server_port,与server_ip一起使用, 为空时走名字服务路由
:param service_name: 指定sdk请求的service_name, 为空时按契约名称路由。如果server_ip和service_name同时设置,server_ip优先级更高
:param host: 指定sdk请求服务的host名称, 如cmdb.easyops-only.com
"""
if server_ip == "" and server_port != 0 or server_ip != "" and server_port == 0:
raise Exception("server_ip和server_port必须同时指定")
self._server_ip = server_ip
self._server_port = server_port
self._service_name = service_name
self._host = host
def create_task(self, request, org, user, timeout=10):
# type: (scheduler_sdk.model.scheduler.task_pb2.SchedulerTask, int, str, int) -> scheduler_sdk.api.task.create_task_pb2.CreateTaskResponse
"""
创建定时调试任务
:param request: create_task请求
:param org: 客户的org编号,为数字
:param user: 调用api使用的用户名
:param timeout: 调用超时时间,单位秒
:return: scheduler_sdk.api.task.create_task_pb2.CreateTaskResponse
"""
headers = {"org": org, "user": user}
route_name = ""
server_ip = self._server_ip
if self._service_name != "":
route_name = self._service_name
elif self._server_ip != "":
route_name = "easyops.api.scheduler.task.CreateTask"
uri = "/api/v1/scheduler/task"
requestParam = request
rsp_obj = scheduler_sdk.utils.http_util.do_api_request(
method="POST",
src_name="logic.scheduler_sdk",
dst_name=route_name,
server_ip=server_ip,
server_port=self._server_port,
host=self._host,
uri=uri,
params=google.protobuf.json_format.MessageToDict(
requestParam, preserving_proto_field_name=True),
headers=headers,
timeout=timeout,
)
rsp = scheduler_sdk.api.task.create_task_pb2.CreateTaskResponse()
google.protobuf.json_format.ParseDict(rsp_obj, rsp, ignore_unknown_fields=True)
return rsp
def del_task(self, request, org, user, timeout=10):
# type: (scheduler_sdk.api.task.delete_task_detail_pb2.DelTaskRequest, int, str, int) -> google.protobuf.empty_pb2.Empty
"""
删除任定时务
:param request: del_task请求
:param org: 客户的org编号,为数字
:param user: 调用api使用的用户名
:param timeout: 调用超时时间,单位秒
:return: google.protobuf.empty_pb2.Empty
"""
headers = {"org": org, "user": user}
route_name = ""
server_ip = self._server_ip
if self._service_name != "":
route_name = self._service_name
elif self._server_ip != "":
route_name = "easyops.api.scheduler.task.DelTask"
uri = "/api/v1/scheduler/task/{taskId}".format(
taskId=request.taskId,
)
requestParam = request
rsp_obj = scheduler_sdk.utils.http_util.do_api_request(
method="DELETE",
src_name="logic.scheduler_sdk",
dst_name=route_name,
server_ip=server_ip,
server_port=self._server_port,
host=self._host,
uri=uri,
params=google.protobuf.json_format.MessageToDict(
requestParam, preserving_proto_field_name=True),
headers=headers,
timeout=timeout,
)
rsp = google.protobuf.empty_pb2.Empty()
google.protobuf.json_format.ParseDict(rsp_obj, rsp, ignore_unknown_fields=True)
return rsp
def get_task(self, request, org, user, timeout=10):
# type: (scheduler_sdk.api.task.get_task_detail_pb2.GetTaskRequest, int, str, int) -> scheduler_sdk.model.scheduler.task_pb2.SchedulerTask
"""
获取任务详情
:param request: get_task请求
:param org: 客户的org编号,为数字
:param user: 调用api使用的用户名
:param timeout: 调用超时时间,单位秒
:return: scheduler_sdk.model.scheduler.task_pb2.SchedulerTask
"""
headers = {"org": org, "user": user}
route_name = ""
server_ip = self._server_ip
if self._service_name != "":
route_name = self._service_name
elif self._server_ip != "":
route_name = "easyops.api.scheduler.task.GetTask"
uri = "/api/v1/scheduler/task/{taskId}".format(
taskId=request.taskId,
)
requestParam = request
rsp_obj = scheduler_sdk.utils.http_util.do_api_request(
method="GET",
src_name="logic.scheduler_sdk",
dst_name=route_name,
server_ip=server_ip,
server_port=self._server_port,
host=self._host,
uri=uri,
params=google.protobuf.json_format.MessageToDict(
requestParam, preserving_proto_field_name=True),
headers=headers,
timeout=timeout,
)
rsp = scheduler_sdk.model.scheduler.task_pb2.SchedulerTask()
google.protobuf.json_format.ParseDict(rsp_obj["data"], rsp, ignore_unknown_fields=True)
return rsp
def list_task(self, request, org, user, timeout=10):
# type: (scheduler_sdk.api.task.list_task_pb2.ListTaskRequest, int, str, int) -> scheduler_sdk.api.task.list_task_pb2.ListTaskResponse
"""
获取任务列表
:param request: list_task请求
:param org: 客户的org编号,为数字
:param user: 调用api使用的用户名
:param timeout: 调用超时时间,单位秒
:return: scheduler_sdk.api.task.list_task_pb2.ListTaskResponse
"""
headers = {"org": org, "user": user}
route_name = ""
server_ip = self._server_ip
if self._service_name != "":
route_name = self._service_name
elif self._server_ip != "":
route_name = "easyops.api.scheduler.task.ListTask"
uri = "/api/v1/scheduler/task"
requestParam = request
rsp_obj = scheduler_sdk.utils.http_util.do_api_request(
method="GET",
src_name="logic.scheduler_sdk",
dst_name=route_name,
server_ip=server_ip,
server_port=self._server_port,
host=self._host,
uri=uri,
params=google.protobuf.json_format.MessageToDict(
requestParam, preserving_proto_field_name=True),
headers=headers,
timeout=timeout,
)
rsp = scheduler_sdk.api.task.list_task_pb2.ListTaskResponse()
google.protobuf.json_format.ParseDict(rsp_obj, rsp, ignore_unknown_fields=True)
return rsp
def update_task_detail(self, request, org, user, timeout=10):
# type: (scheduler_sdk.api.task.update_task_detail_pb2.UpdateTaskDetailRequest, int, str, int) -> scheduler_sdk.api.task.update_task_detail_pb2.UpdateTaskDetailResponse
"""
更新定时任务
:param request: update_task_detail请求
:param org: 客户的org编号,为数字
:param user: 调用api使用的用户名
:param timeout: 调用超时时间,单位秒
:return: scheduler_sdk.api.task.update_task_detail_pb2.UpdateTaskDetailResponse
"""
headers = {"org": org, "user": user}
route_name = ""
server_ip = self._server_ip
if self._service_name != "":
route_name = self._service_name
elif self._server_ip != "":
route_name = "easyops.api.scheduler.task.UpdateTaskDetail"
uri = "/api/v1/scheduler/task/{taskId}".format(
taskId=request.taskId,
)
requestParam = request
rsp_obj = scheduler_sdk.utils.http_util.do_api_request(
method="PUT",
src_name="logic.scheduler_sdk",
dst_name=route_name,
server_ip=server_ip,
server_port=self._server_port,
host=self._host,
uri=uri,
params=google.protobuf.json_format.MessageToDict(
requestParam, preserving_proto_field_name=True),
headers=headers,
timeout=timeout,
)
rsp = scheduler_sdk.api.task.update_task_detail_pb2.UpdateTaskDetailResponse()
google.protobuf.json_format.ParseDict(rsp_obj, rsp, ignore_unknown_fields=True)
return rsp
| 36.717213 | 176 | 0.619266 | 1,003 | 8,959 | 5.232303 | 0.124626 | 0.077744 | 0.051448 | 0.065168 | 0.845274 | 0.825648 | 0.806021 | 0.74657 | 0.667492 | 0.639291 | 0 | 0.007179 | 0.284742 | 8,959 | 243 | 177 | 36.868313 | 0.811798 | 0.209287 | 0 | 0.703226 | 0 | 0 | 0.074538 | 0.051729 | 0 | 0 | 0 | 0 | 0 | 1 | 0.03871 | false | 0 | 0.070968 | 0 | 0.148387 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
b91c8e16f8a9fa742d7a5ab34a0b3654f30e9266 | 146 | py | Python | notify/__init__.py | Blatzar/notify-send | 16e597faec4adbb0f81ff721e5df811de52aeaf8 | [
"MIT"
] | null | null | null | notify/__init__.py | Blatzar/notify-send | 16e597faec4adbb0f81ff721e5df811de52aeaf8 | [
"MIT"
] | null | null | null | notify/__init__.py | Blatzar/notify-send | 16e597faec4adbb0f81ff721e5df811de52aeaf8 | [
"MIT"
] | null | null | null | from .notification import Notification
from .notification import notification
__version__ = '0.0.16'
__all__ = ["Notification", "notification"]
| 20.857143 | 42 | 0.780822 | 15 | 146 | 7.066667 | 0.466667 | 0.301887 | 0.415094 | 0.641509 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.031008 | 0.116438 | 146 | 6 | 43 | 24.333333 | 0.790698 | 0 | 0 | 0 | 0 | 0 | 0.205479 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.5 | 0 | 0.5 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
b9430cb26d78345e2371584e8eae85779f4bda19 | 2,447 | py | Python | speechbrain/nnet/dropout.py | RuABraun/speechbrain | bb5048cbd7cc8162efadf031fdd9eef6f9d7e512 | [
"Apache-2.0"
] | null | null | null | speechbrain/nnet/dropout.py | RuABraun/speechbrain | bb5048cbd7cc8162efadf031fdd9eef6f9d7e512 | [
"Apache-2.0"
] | null | null | null | speechbrain/nnet/dropout.py | RuABraun/speechbrain | bb5048cbd7cc8162efadf031fdd9eef6f9d7e512 | [
"Apache-2.0"
] | null | null | null | """Library implementing dropout.
Authors
* Mirco Ravanelli 2020
"""
import torch # noqa: F401
import logging
import torch.nn as nn
logger = logging.getLogger(__name__)
class Dropout2d(nn.Module):
"""This function implements dropout 2d. It randomly put zeros on
entire channels.
Arguments
---------
dropout_rate : float
It is the dropout factor (between 0 and 1).
inplace : bool
If True, it uses inplace operations.
Example
-------
>>> drop = Dropout2d(drop_rate=0.5)
>>> inputs = torch.rand(10, 50, 40)
>>> output=drop(inputs)
>>> output.shape
torch.Size([10, 50, 40])
"""
def __init__(
self, drop_rate, inplace=False,
):
super().__init__()
self.drop_rate = drop_rate
self.inplace = inplace
self.drop = nn.Dropout2d(p=self.drop_rate, inplace=self.inplace)
def forward(self, x):
"""Applies dropout 2d to the input tensor.
Arguments
---------
x : torch.Tensor (batch, time, channel1, channel2)
input to normalize. 4d tensors are expected.
"""
# time must be the last
x = x.transpose(1, 2).transpose(2, -1)
x_drop = self.drop(x)
x_drop = x_drop.transpose(-1, 1).transpose(2, -1)
return x_drop
class Dropout1d(nn.Module):
"""This function implements dropout 1d. It randomly put zeros on
entire channels.
Arguments
---------
dropout_rate : float
It is the dropout factor (between 0 and 1).
inplace : bool
If True, it uses inplace operations.
Example
-------
>>> drop = Dropout2d(drop_rate=0.5)
>>> inputs = torch.rand(10, 50, 40)
>>> output=drop(inputs)
>>> output.shape
torch.Size([10, 50, 40])
"""
def __init__(
self, drop_rate, inplace=False,
):
super().__init__()
self.drop_rate = drop_rate
self.inplace = inplace
self.drop = nn.Dropout2d(p=self.drop_rate, inplace=self.inplace)
def forward(self, x):
"""Applies dropout 2d to the input tensor.
Arguments
---------
x : torch.Tensor (batch, time, channel1)
input to normalize. 4d tensors are expected.
"""
# time must be the last
x = x.transpose(1, 2).unsqueeze(2)
x_drop = self.drop(x)
x_drop = x_drop.squeeze(2).transpose(1, 2)
return x_drop
| 23.757282 | 72 | 0.579485 | 309 | 2,447 | 4.459547 | 0.288026 | 0.058055 | 0.05225 | 0.046444 | 0.818578 | 0.818578 | 0.764877 | 0.764877 | 0.764877 | 0.730044 | 0 | 0.039375 | 0.294238 | 2,447 | 102 | 73 | 23.990196 | 0.758541 | 0.487127 | 0 | 0.666667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0.1 | 0 | 0.366667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
b9553a7cfb4c5b59f7a3193f0939eb092e190fbc | 38 | py | Python | pywren_ibm_cloud/runtime/function_handler/__init__.py | gerardparis/pywren-ibm-cloud | ca69bed54f5bd5157bcda961b86dbfcfecf3c54a | [
"Apache-2.0"
] | null | null | null | pywren_ibm_cloud/runtime/function_handler/__init__.py | gerardparis/pywren-ibm-cloud | ca69bed54f5bd5157bcda961b86dbfcfecf3c54a | [
"Apache-2.0"
] | null | null | null | pywren_ibm_cloud/runtime/function_handler/__init__.py | gerardparis/pywren-ibm-cloud | ca69bed54f5bd5157bcda961b86dbfcfecf3c54a | [
"Apache-2.0"
] | null | null | null | from .handler import function_handler
| 19 | 37 | 0.868421 | 5 | 38 | 6.4 | 0.8 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.105263 | 38 | 1 | 38 | 38 | 0.941176 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
b98f955dada4df87e926c5101abff0754b247868 | 70,414 | py | Python | src/olympia/scanners/tests/test_admin.py | bhushan-borole/addons-server | ad609f6ac2cb128e61a935d63e03ee67de75405c | [
"BSD-3-Clause"
] | null | null | null | src/olympia/scanners/tests/test_admin.py | bhushan-borole/addons-server | ad609f6ac2cb128e61a935d63e03ee67de75405c | [
"BSD-3-Clause"
] | null | null | null | src/olympia/scanners/tests/test_admin.py | bhushan-borole/addons-server | ad609f6ac2cb128e61a935d63e03ee67de75405c | [
"BSD-3-Clause"
] | null | null | null | import json
from datetime import datetime
from unittest import mock
from django.conf import settings
from django.contrib.admin.sites import AdminSite
from django.test import RequestFactory
from django.test.utils import override_settings
from django.urls import reverse
from django.utils.html import format_html
from django.utils.http import urlencode
from pyquery import PyQuery as pq
from urllib.parse import urljoin, urlparse
from olympia import amo
from olympia.amo.tests import (
AMOPaths,
TestCase,
addon_factory,
user_factory,
version_factory,
)
from olympia.constants.scanners import (
ABORTING,
COMPLETED,
CUSTOMS,
FALSE_POSITIVE,
INCONCLUSIVE,
MAD,
NEW,
RUNNING,
SCHEDULED,
TRUE_POSITIVE,
UNKNOWN,
WAT,
YARA,
)
from olympia.files.models import FileUpload
from olympia.reviewers.templatetags.code_manager import code_manager_url
from olympia.scanners.admin import (
ExcludeMatchedRuleFilter,
MatchesFilter,
ScannerQueryResultAdmin,
ScannerResultAdmin,
ScannerRuleAdmin,
StateFilter,
WithVersionFilter,
_is_safe_url,
)
from olympia.scanners.models import (
ScannerQueryResult,
ScannerQueryRule,
ScannerResult,
ScannerRule,
)
from olympia.versions.models import Version
class TestScannerResultAdmin(TestCase):
def setUp(self):
super().setUp()
self.user = user_factory(email='someone@mozilla.com')
self.grant_permission(self.user, 'Admin:ScannersResultsEdit')
self.grant_permission(self.user, 'Admin:ScannersResultsView')
self.client.login(email=self.user.email)
self.list_url = reverse('admin:scanners_scannerresult_changelist')
self.admin = ScannerResultAdmin(model=ScannerResult, admin_site=AdminSite())
def test_list_view(self):
rule = ScannerRule.objects.create(name='rule', scanner=CUSTOMS)
ScannerResult.objects.create(
scanner=CUSTOMS,
version=addon_factory().current_version,
results={'matchedRules': [rule.name]},
)
response = self.client.get(self.list_url)
assert response.status_code == 200
html = pq(response.content)
assert html('.column-result_actions').length == 1
def test_list_view_for_non_admins(self):
rule = ScannerRule.objects.create(name='rule', scanner=CUSTOMS)
ScannerResult.objects.create(
scanner=CUSTOMS,
version=addon_factory().current_version,
results={'matchedRules': [rule.name]},
)
user = user_factory(email='somebodyelse@mozilla.com')
self.grant_permission(user, 'Admin:ScannersResultsView')
self.client.login(email=user.email)
response = self.client.get(self.list_url)
assert response.status_code == 200
html = pq(response.content)
assert html('.column-result_actions').length == 0
def test_list_view_is_restricted(self):
user = user_factory(email='curator@mozilla.com')
self.grant_permission(user, 'Admin:Curation')
self.client.login(email=user.email)
response = self.client.get(self.list_url)
assert response.status_code == 403
def test_has_add_permission(self):
assert self.admin.has_add_permission(request=None) is False
def test_has_delete_permission(self):
assert self.admin.has_delete_permission(request=None) is False
def test_has_change_permission(self):
assert self.admin.has_change_permission(request=None) is False
def test_formatted_listed_addon(self):
addon = addon_factory()
version = version_factory(addon=addon, channel=amo.RELEASE_CHANNEL_LISTED)
result = ScannerResult(version=version)
formatted_addon = self.admin.formatted_addon(result)
assert (
'<a href="{}">Link to review page</a>'.format(
urljoin(
settings.EXTERNAL_SITE_URL,
reverse('reviewers.review', args=['listed', addon.id]),
),
)
in formatted_addon
)
assert f'Name:</td><td>{addon.name}' in formatted_addon
assert f'Version:</td><td>{version.version}' in formatted_addon
assert f'Channel:</td><td>{version.get_channel_display()}' in formatted_addon
def test_formatted_unlisted_addon(self):
addon = addon_factory()
version = version_factory(addon=addon, channel=amo.RELEASE_CHANNEL_UNLISTED)
result = ScannerResult(version=version)
formatted_addon = self.admin.formatted_addon(result)
assert (
'<a href="{}">Link to review page</a>'.format(
urljoin(
settings.EXTERNAL_SITE_URL,
reverse('reviewers.review', args=['unlisted', addon.id]),
),
)
in formatted_addon
)
assert f'Name:</td><td>{addon.name}' in formatted_addon
assert f'Version:</td><td>{version.version}' in formatted_addon
assert f'Channel:</td><td>{version.get_channel_display()}' in formatted_addon
def test_formatted_addon_without_version(self):
result = ScannerResult(version=None)
assert self.admin.formatted_addon(result) == '-'
def test_guid(self):
version = version_factory(addon=addon_factory())
result = ScannerResult(version=version)
assert self.admin.guid(result) == version.addon.guid
def test_guid_without_version(self):
result = ScannerResult(version=None)
assert self.admin.guid(result) == '-'
def test_listed_channel(self):
version = version_factory(
addon=addon_factory(), channel=amo.RELEASE_CHANNEL_LISTED
)
result = ScannerResult(version=version)
assert self.admin.channel(result) == 'Listed'
def test_unlisted_channel(self):
version = version_factory(
addon=addon_factory(), channel=amo.RELEASE_CHANNEL_UNLISTED
)
result = ScannerResult(version=version)
assert self.admin.channel(result) == 'Unlisted'
def test_channel_without_version(self):
result = ScannerResult(version=None)
assert self.admin.channel(result) == '-'
def test_formatted_results(self):
results = {'some': 'results'}
result = ScannerResult(results=results)
assert self.admin.formatted_results(result) == format_html(
'<pre>{}</pre>', json.dumps(results, indent=2)
)
def test_formatted_results_without_results(self):
result = ScannerResult()
assert self.admin.formatted_results(result) == '<pre>[]</pre>'
def test_formatted_created(self):
created = datetime.now()
result = ScannerResult(created=created)
assert self.admin.formatted_created(result) == '-'
result.version = Version(created=created)
assert self.admin.formatted_created(result) == created.strftime(
'%Y-%m-%d %H:%M:%S'
)
def test_formatted_matched_rules_with_files(self):
version = addon_factory().current_version
result = ScannerResult.objects.create(scanner=YARA, version=version)
rule = ScannerRule.objects.create(name='bar', scanner=YARA)
filename = 'some/file.js'
result.add_yara_result(rule=rule.name, meta={'filename': filename})
result.save()
file_id = version.all_files[0].id
assert file_id is not None
expect_file_item = code_manager_url(
'browse', version.addon.pk, version.pk, file=filename
)
assert expect_file_item in self.admin.formatted_matched_rules_with_files(result)
def test_formatted_matched_rules_with_files_without_version(self):
result = ScannerResult.objects.create(scanner=YARA)
rule = ScannerRule.objects.create(name='bar', scanner=YARA)
filename = 'some/file.js'
result.add_yara_result(rule=rule.name, meta={'filename': filename})
result.save()
# We list the file related to the matched rule...
assert filename in self.admin.formatted_matched_rules_with_files(result)
# ...but we do not add a link to it because there is no associated
# version.
assert '/browse/' not in self.admin.formatted_matched_rules_with_files(result)
def test_formatted_score_when_scanner_is_not_mad_or_customs(self):
result = ScannerResult(score=0.123, scanner=WAT)
assert self.admin.formatted_score(result) == '-'
def test_formatted_score_for_customs(self):
result = ScannerResult(score=0.123, scanner=CUSTOMS)
assert self.admin.formatted_score(result) == '12%'
def test_formatted_score_for_mad(self):
result = ScannerResult(score=0.456, scanner=MAD)
assert self.admin.formatted_score(result) == '46%'
def test_formatted_score_when_not_available(self):
result = ScannerResult(score=-1, scanner=MAD)
assert self.admin.formatted_score(result) == 'n/a'
def test_list_queries(self):
ScannerResult.objects.create(
scanner=CUSTOMS, version=addon_factory().current_version
)
ScannerResult.objects.create(
scanner=WAT, version=addon_factory().current_version
)
deleted_addon = addon_factory(name='a deleted add-on')
ScannerResult.objects.create(
scanner=CUSTOMS, version=deleted_addon.current_version
)
deleted_addon.delete()
with self.assertNumQueries(14):
# 14 queries:
# - 2 transaction savepoints because of tests
# - 2 request user and groups
# - 2 COUNT(*) on scanners results for pagination and total display
# - 2 get all available rules for filtering
# - 1 scanners results and versions in one query
# - 1 all add-ons in one query
# - 1 all files in one query
# - 1 all authors in one query
# - 1 all add-ons translations in one query
# - 1 all scanner rules in one query
response = self.client.get(
self.list_url, {MatchesFilter.parameter_name: 'all'}
)
assert response.status_code == 200
html = pq(response.content)
expected_length = ScannerResult.objects.count()
assert html('#result_list tbody > tr').length == expected_length
# The name of the deleted add-on should be displayed.
assert str(deleted_addon.name) in html.text()
def test_guid_column_is_sortable_in_list(self):
rule_foo = ScannerRule.objects.create(name='foo', scanner=CUSTOMS)
ScannerResult.objects.create(
scanner=CUSTOMS,
results={'matchedRules': [rule_foo.name]},
version=version_factory(addon=addon_factory()),
)
response = self.client.get(self.list_url)
doc = pq(response.content)
assert 'sortable' in doc('.column-guid').attr('class').split(' ')
def test_list_filters(self):
rule_bar = ScannerRule.objects.create(name='bar', scanner=YARA)
rule_hello = ScannerRule.objects.create(name='hello', scanner=YARA)
rule_foo = ScannerRule.objects.create(name='foo', scanner=CUSTOMS)
response = self.client.get(self.list_url)
assert response.status_code == 200
doc = pq(response.content)
expected = [
('All', '?'),
('customs', '?scanner__exact=1'),
('wat', '?scanner__exact=2'),
('yara', '?scanner__exact=3'),
('mad', '?scanner__exact=4'),
('All', '?has_matched_rules=all'),
(' With matched rules only', '?'),
('All', '?state=all'),
('Unknown', '?'),
('True positive', '?state=1'),
('False positive', '?state=2'),
('Inconclusive', '?state=3'),
('All', '?'),
('foo (customs)', f'?matched_rules__id__exact={rule_foo.pk}'),
('bar (yara)', f'?matched_rules__id__exact={rule_bar.pk}'),
('hello (yara)', f'?matched_rules__id__exact={rule_hello.pk}'),
('All', '?has_version=all'),
(' With version only', '?'),
('No excluded rule', '?'),
('foo (customs)', f'?exclude_rule={rule_foo.pk}'),
('bar (yara)', f'?exclude_rule={rule_bar.pk}'),
('hello (yara)', f'?exclude_rule={rule_hello.pk}'),
]
filters = [(x.text, x.attrib['href']) for x in doc('#changelist-filter a')]
assert filters == expected
def test_list_filter_matched_rules(self):
rule_bar = ScannerRule.objects.create(name='bar', scanner=YARA)
rule_hello = ScannerRule.objects.create(name='hello', scanner=YARA)
rule_foo = ScannerRule.objects.create(name='foo', scanner=CUSTOMS)
with_bar_matches = ScannerResult(scanner=YARA)
with_bar_matches.add_yara_result(rule=rule_bar.name)
with_bar_matches.add_yara_result(rule=rule_hello.name)
with_bar_matches.save()
ScannerResult.objects.create(
scanner=CUSTOMS, results={'matchedRules': [rule_foo.name]}
)
with_hello_match = ScannerResult(scanner=YARA)
with_hello_match.add_yara_result(rule=rule_hello.name)
response = self.client.get(
self.list_url,
{
'matched_rules__id__exact': rule_bar.pk,
WithVersionFilter.parameter_name: 'all',
},
)
assert response.status_code == 200
doc = pq(response.content)
assert doc('#result_list tbody > tr').length == 1
assert doc('.field-formatted_matched_rules').text() == (
'bar (yara), hello (yara)'
)
def test_exclude_matched_rule_filter(self):
rule_bar = ScannerRule.objects.create(name='bar', scanner=YARA)
rule_hello = ScannerRule.objects.create(name='hello', scanner=YARA)
rule_foo = ScannerRule.objects.create(name='foo', scanner=CUSTOMS)
with_bar_and_hello_matches = ScannerResult(scanner=YARA)
with_bar_and_hello_matches.add_yara_result(rule=rule_bar.name)
with_bar_and_hello_matches.add_yara_result(rule=rule_hello.name)
with_bar_and_hello_matches.save()
with_bar_and_hello_matches.update(created=self.days_ago(3))
with_foo_match = ScannerResult(
scanner=CUSTOMS, results={'matchedRules': [rule_foo.name]}
)
with_foo_match.save()
with_foo_match.update(created=self.days_ago(2))
with_hello_match = ScannerResult(scanner=YARA)
with_hello_match.add_yara_result(rule=rule_hello.name)
with_hello_match.save()
with_hello_match.update(created=self.days_ago(1))
# Exclude 'bar'. Because exclude excludes results that *only* match
# the target rule, we should still get 3 results.
response = self.client.get(
self.list_url,
{
ExcludeMatchedRuleFilter.parameter_name: rule_bar.pk,
WithVersionFilter.parameter_name: 'all',
},
)
assert response.status_code == 200
doc = pq(response.content)
assert doc('#result_list tbody > tr').length == 3
expected_ids = [
with_hello_match.pk,
with_foo_match.pk,
with_bar_and_hello_matches.pk,
]
ids = list(map(int, doc('#result_list .field-id').text().split(' ')))
assert ids == expected_ids
# Exclude 'hello'. with_bar_and_hello_matches should still be present
# as it matches another rule, but with_hello_match should be absent.
# with_foo_match should not be affected.
response = self.client.get(
self.list_url,
{
ExcludeMatchedRuleFilter.parameter_name: rule_hello.pk,
WithVersionFilter.parameter_name: 'all',
},
)
assert response.status_code == 200
doc = pq(response.content)
assert doc('#result_list tbody > tr').length == 2
expected_ids = [
with_foo_match.pk,
with_bar_and_hello_matches.pk,
]
ids = list(map(int, doc('#result_list .field-id').text().split(' ')))
assert ids == expected_ids
# Exclude 'foo'. with_bar_and_hello_matches and with_hello_match should
# still be present.
response = self.client.get(
self.list_url,
{
ExcludeMatchedRuleFilter.parameter_name: rule_foo.pk,
WithVersionFilter.parameter_name: 'all',
},
)
assert response.status_code == 200
doc = pq(response.content)
assert doc('#result_list tbody > tr').length == 2
expected_ids = [
with_hello_match.pk,
with_bar_and_hello_matches.pk,
]
ids = list(map(int, doc('#result_list .field-id').text().split(' ')))
assert ids == expected_ids
def test_list_default(self):
# Create one entry without matches, it will not be shown by default
ScannerResult.objects.create(
scanner=YARA,
version=version_factory(addon=addon_factory()),
)
# Create one entry with matches, it will be shown by default
rule = ScannerRule.objects.create(name='some-rule', scanner=YARA)
with_matches = ScannerResult(
scanner=YARA,
version=version_factory(addon=addon_factory()),
)
with_matches.add_yara_result(rule=rule.name)
with_matches.save()
# Create a false positive, it will not be shown by default
false_positive = ScannerResult(
scanner=YARA,
state=FALSE_POSITIVE,
version=version_factory(addon=addon_factory()),
)
false_positive.add_yara_result(rule=rule.name)
false_positive.save()
# Create an entry without a version, it will not be shown by default
without_version = ScannerResult(scanner=YARA)
without_version.add_yara_result(rule=rule.name)
without_version.save()
response = self.client.get(self.list_url)
assert response.status_code == 200
html = pq(response.content)
assert html('#result_list tbody > tr').length == 1
def test_list_can_show_all_entries(self):
# Create one entry without matches
ScannerResult.objects.create(scanner=YARA)
# Create one entry with matches
rule = ScannerRule.objects.create(name='some-rule', scanner=YARA)
with_matches = ScannerResult(scanner=YARA)
with_matches.add_yara_result(rule=rule.name)
with_matches.save()
# Create a false positive
false_positive = ScannerResult(scanner=YARA, state=FALSE_POSITIVE)
false_positive.add_yara_result(rule=rule.name)
false_positive.save()
# Create an entry without a version
without_version = ScannerResult(scanner=YARA)
without_version.add_yara_result(rule=rule.name)
without_version.save()
response = self.client.get(
self.list_url,
{
MatchesFilter.parameter_name: 'all',
StateFilter.parameter_name: 'all',
WithVersionFilter.parameter_name: 'all',
},
)
assert response.status_code == 200
html = pq(response.content)
expected_length = ScannerResult.objects.count()
assert html('#result_list tbody > tr').length == expected_length
def test_handle_true_positive(self):
# Create one entry with matches
rule = ScannerRule.objects.create(name='some-rule', scanner=YARA)
result = ScannerResult(scanner=YARA)
result.add_yara_result(rule=rule.name)
result.save()
assert result.state == UNKNOWN
response = self.client.post(
reverse(
'admin:scanners_scannerresult_handletruepositive',
args=[result.pk],
),
follow=True,
)
result.refresh_from_db()
assert result.state == TRUE_POSITIVE
# The action should send a redirect.
last_url, status_code = response.redirect_chain[-1]
assert status_code == 302
# The action should redirect to the list view and the default list
# filters should hide the result (because its state is not UNKNOWN
# anymore).
html = pq(response.content)
assert html('#result_list tbody > tr').length == 0
# A confirmation message should also appear.
assert html('.messagelist .info').length == 1
def test_handle_true_positive_uses_referer_if_available(self):
# Create one entry with matches
rule = ScannerRule.objects.create(name='some-rule', scanner=YARA)
result = ScannerResult(scanner=YARA)
result.add_yara_result(rule=rule.name)
result.save()
assert result.state == UNKNOWN
referer = f'{settings.SITE_URL}/en-US/firefox/previous/page'
response = self.client.post(
reverse(
'admin:scanners_scannerresult_handletruepositive',
args=[result.pk],
),
follow=True,
HTTP_REFERER=referer,
)
last_url, status_code = response.redirect_chain[-1]
assert last_url == referer
def test_handle_true_positive_with_invalid_referer(self):
# Create one entry with matches
rule = ScannerRule.objects.create(name='some-rule', scanner=YARA)
result = ScannerResult(scanner=YARA)
result.add_yara_result(rule=rule.name)
result.save()
assert result.state == UNKNOWN
referer = '{}/en-US/firefox/previous/page'.format('http://example.org')
response = self.client.post(
reverse(
'admin:scanners_scannerresult_handletruepositive',
args=[result.pk],
),
follow=True,
HTTP_REFERER=referer,
)
last_url, status_code = response.redirect_chain[-1]
assert last_url == reverse('admin:scanners_scannerresult_changelist')
def test_handle_yara_false_positive(self):
# Create one entry with matches
rule = ScannerRule.objects.create(name='some-rule', scanner=YARA)
result = ScannerResult(scanner=YARA)
result.add_yara_result(rule=rule.name)
result.save()
assert result.state == UNKNOWN
response = self.client.post(
reverse(
'admin:scanners_scannerresult_handlefalsepositive',
args=[result.pk],
),
follow=True,
)
result.refresh_from_db()
assert result.state == FALSE_POSITIVE
# The action should send a redirect.
last_url, status_code = response.redirect_chain[-1]
assert status_code == 302
# The action should redirect to the list view and the default list
# filters should hide the result (because its state is not UNKNOWN
# anymore).
html = pq(response.content)
assert html('#result_list tbody > tr').length == 0
# A confirmation message should also appear.
assert html('.messagelist .info').length == 1
def test_handle_yara_false_positive_uses_referer_if_available(self):
# Create one entry with matches
rule = ScannerRule.objects.create(name='some-rule', scanner=YARA)
result = ScannerResult(scanner=YARA)
result.add_yara_result(rule=rule.name)
result.save()
assert result.state == UNKNOWN
referer = f'{settings.SITE_URL}/en-US/firefox/previous/page'
response = self.client.post(
reverse(
'admin:scanners_scannerresult_handlefalsepositive',
args=[result.pk],
),
follow=True,
HTTP_REFERER=referer,
)
last_url, status_code = response.redirect_chain[-1]
assert last_url == referer
def test_handle_yara_false_positive_with_invalid_referer(self):
# Create one entry with matches
rule = ScannerRule.objects.create(name='some-rule', scanner=YARA)
result = ScannerResult(scanner=YARA)
result.add_yara_result(rule=rule.name)
result.save()
assert result.state == UNKNOWN
referer = '{}/en-US/firefox/previous/page'.format('http://example.org')
response = self.client.post(
reverse(
'admin:scanners_scannerresult_handlefalsepositive',
args=[result.pk],
),
follow=True,
HTTP_REFERER=referer,
)
last_url, status_code = response.redirect_chain[-1]
assert last_url == reverse('admin:scanners_scannerresult_changelist')
@override_settings(CUSTOMS_GIT_REPOSITORY='git/repo')
def test_handle_customs_false_positive(self):
# Create one entry with matches
rule = ScannerRule.objects.create(name='some-rule', scanner=CUSTOMS)
result = ScannerResult(scanner=CUSTOMS, results={'matchedRules': [rule.name]})
result.save()
assert result.state == UNKNOWN
response = self.client.post(
reverse(
'admin:scanners_scannerresult_handlefalsepositive',
args=[result.pk],
)
)
result.refresh_from_db()
assert result.state == FALSE_POSITIVE
# We create a GitHub issue draft by passing some query parameters to
# GitHub.
assert response['Location'].startswith(
'https://github.com/git/repo/issues/new?'
)
assert (
urlencode(
{
'title': 'False positive report for '
'ScannerResult {}'.format(result.pk)
}
)
in response['Location']
)
assert urlencode({'body': '### Report'}) in response['Location']
assert urlencode({'labels': 'false positive report'}) in response['Location']
assert 'Raw+scanner+results' not in response['Location']
def test_handle_revert_report(self):
# Create one entry with matches
rule = ScannerRule.objects.create(name='some-rule', scanner=YARA)
result = ScannerResult(
scanner=YARA, version=version_factory(addon=addon_factory())
)
result.add_yara_result(rule=rule.name)
result.state = TRUE_POSITIVE
result.save()
assert result.state == TRUE_POSITIVE
response = self.client.post(
reverse('admin:scanners_scannerresult_handlerevert', args=[result.pk]),
follow=True,
)
result.refresh_from_db()
assert result.state == UNKNOWN
# The action should send a redirect.
last_url, status_code = response.redirect_chain[-1]
assert status_code == 302
# The action should redirect to the list view and the default list
# filters should show the result (because its state is UNKNOWN again).
html = pq(response.content)
assert html('#result_list tbody > tr').length == 1
# A confirmation message should also appear.
assert html('.messagelist .info').length == 1
def test_handle_revert_report_uses_referer_if_available(self):
# Create one entry with matches
rule = ScannerRule.objects.create(name='some-rule', scanner=YARA)
result = ScannerResult(
scanner=YARA, version=version_factory(addon=addon_factory())
)
result.add_yara_result(rule=rule.name)
result.state = TRUE_POSITIVE
result.save()
assert result.state == TRUE_POSITIVE
referer = f'{settings.SITE_URL}/en-US/firefox/previous/page'
response = self.client.post(
reverse('admin:scanners_scannerresult_handlerevert', args=[result.pk]),
follow=True,
HTTP_REFERER=referer,
)
last_url, status_code = response.redirect_chain[-1]
assert last_url == referer
def test_handle_revert_with_invalid_referer(self):
# Create one entry with matches
rule = ScannerRule.objects.create(name='some-rule', scanner=YARA)
result = ScannerResult(
scanner=YARA, version=version_factory(addon=addon_factory())
)
result.add_yara_result(rule=rule.name)
result.state = TRUE_POSITIVE
result.save()
assert result.state == TRUE_POSITIVE
referer = '{}/en-US/firefox/previous/page'.format('http://example.org')
response = self.client.post(
reverse('admin:scanners_scannerresult_handlerevert', args=[result.pk]),
follow=True,
HTTP_REFERER=referer,
)
last_url, status_code = response.redirect_chain[-1]
assert last_url == reverse('admin:scanners_scannerresult_changelist')
def test_handle_true_positive_and_non_admin_user(self):
result = ScannerResult(scanner=CUSTOMS)
user = user_factory(email='somebodyelse@mozilla.com')
self.grant_permission(user, 'Admin:ScannersResultsView')
self.client.login(email=user.email)
response = self.client.post(
reverse(
'admin:scanners_scannerresult_handletruepositive',
args=[result.pk],
)
)
assert response.status_code == 404
def test_handle_false_positive_and_non_admin_user(self):
result = ScannerResult(scanner=CUSTOMS)
user = user_factory(email='somebodyelse@mozilla.com')
self.grant_permission(user, 'Admin:ScannersResultsView')
self.client.login(email=user.email)
response = self.client.post(
reverse(
'admin:scanners_scannerresult_handlefalsepositive',
args=[result.pk],
)
)
assert response.status_code == 404
def test_handle_revert_report_and_non_admin_user(self):
result = ScannerResult(scanner=CUSTOMS)
user = user_factory(email='somebodyelse@mozilla.com')
self.grant_permission(user, 'Admin:ScannersResultsView')
self.client.login(email=user.email)
response = self.client.post(
reverse(
'admin:scanners_scannerresult_handlerevert',
args=[result.pk],
)
)
assert response.status_code == 404
def test_change_page(self):
upload = FileUpload.objects.create()
version = addon_factory().current_version
result = ScannerResult.objects.create(
scanner=YARA, upload=upload, version=version
)
url = reverse('admin:scanners_scannerresult_change', args=(result.pk,))
response = self.client.get(url)
assert response.status_code == 200
def test_handle_inconclusive(self):
# Create one entry with matches
rule = ScannerRule.objects.create(name='some-rule', scanner=YARA)
result = ScannerResult(scanner=YARA)
result.add_yara_result(rule=rule.name)
result.save()
assert result.state == UNKNOWN
response = self.client.post(
reverse(
'admin:scanners_scannerresult_handleinconclusive',
args=[result.pk],
),
follow=True,
)
result.refresh_from_db()
assert result.state == INCONCLUSIVE
html = pq(response.content)
assert html('#result_list tbody > tr').length == 0
# A confirmation message should also appear.
assert html('.messagelist .info').length == 1
def test_handle_inconclusive_uses_referer_if_available(self):
# Create one entry with matches
rule = ScannerRule.objects.create(name='some-rule', scanner=YARA)
result = ScannerResult(scanner=YARA)
result.add_yara_result(rule=rule.name)
result.save()
referer = f'{settings.SITE_URL}/en-US/firefox/previous/page'
response = self.client.post(
reverse(
'admin:scanners_scannerresult_handleinconclusive',
args=[result.pk],
),
follow=True,
HTTP_REFERER=referer,
)
last_url, status_code = response.redirect_chain[-1]
assert last_url == referer
def test_handle_inconclusive_with_invalid_referer(self):
# Create one entry with matches
rule = ScannerRule.objects.create(name='some-rule', scanner=YARA)
result = ScannerResult(scanner=YARA)
result.add_yara_result(rule=rule.name)
result.save()
referer = '{}/en-US/firefox/previous/page'.format('http://example.org')
response = self.client.post(
reverse(
'admin:scanners_scannerresult_handleinconclusive',
args=[result.pk],
),
follow=True,
HTTP_REFERER=referer,
)
last_url, status_code = response.redirect_chain[-1]
assert last_url == reverse('admin:scanners_scannerresult_changelist')
class TestScannerRuleAdmin(TestCase):
def setUp(self):
super().setUp()
self.user = user_factory(email='someone@mozilla.com')
self.grant_permission(self.user, 'Admin:*')
self.client.login(email=self.user.email)
self.list_url = reverse('admin:scanners_scannerrule_changelist')
self.admin = ScannerRuleAdmin(model=ScannerRule, admin_site=AdminSite())
def test_list_view(self):
ScannerRule.objects.create(name='bar', scanner=YARA)
response = self.client.get(self.list_url)
assert response.status_code == 200
def test_list_view_is_restricted(self):
user = user_factory(email='curator@mozilla.com')
self.grant_permission(user, 'Admin:Curation')
self.client.login(email=user.email)
response = self.client.get(self.list_url)
assert response.status_code == 403
def test_change_view_contains_link_to_results(self):
rule = ScannerRule.objects.create(name='bar', scanner=YARA)
addon = addon_factory()
version = addon.current_version
result = ScannerResult(scanner=YARA, version=version)
result.add_yara_result(rule=rule.name)
result.save()
# Create another version that matches for the same add-on.
version = version_factory(addon=addon)
result = ScannerResult(scanner=YARA, version=version)
result.add_yara_result(rule=rule.name)
result.save()
# Create another add-on that has a matching version
addon = addon_factory()
result = ScannerResult(scanner=YARA, version=addon.current_version)
result.add_yara_result(rule=rule.name)
result.save()
# Create an extra result on the same add-on that doesn't match the rule
# we'll be looking at: it shouldn't affect anything.
ScannerResult.objects.create(scanner=YARA, version=version_factory(addon=addon))
url = reverse('admin:scanners_scannerrule_change', args=(rule.pk,))
response = self.client.get(url)
assert response.status_code == 200
doc = pq(response.content)
link = doc('.field-matched_results_link a')
assert link
results_list_url = reverse('admin:scanners_scannerresult_changelist')
expected_href = (
f'{results_list_url}?matched_rules__id__exact={rule.pk}'
f'&has_version=all&state=all'
)
assert link.attr('href') == expected_href
assert link.text() == '3 (2 add-ons)'
link_response = self.client.get(expected_href)
assert link_response.status_code == 200
def test_create_view_doesnt_contain_link_to_results(self):
url = reverse('admin:scanners_scannerrule_add')
response = self.client.get(url)
assert response.status_code == 200
doc = pq(response.content)
field = doc('.field-matched_results_link')
assert field
assert field.text() == 'Matched Results:\n-'
link = doc('.field-matched_results_link a')
assert not link
def test_get_fields(self):
request = RequestFactory().get('/')
request.user = self.user
assert 'definition' in self.admin.get_fields(request=request)
assert 'formatted_definition' not in self.admin.get_fields(request=request)
def test_get_fields_for_non_admins(self):
user = user_factory(email='somebodyelse@mozilla.com')
self.grant_permission(user, 'Admin:ScannersRulesView')
request = RequestFactory().get('/')
request.user = user
assert 'definition' not in self.admin.get_fields(request=request)
assert 'formatted_definition' in self.admin.get_fields(request=request)
def test_create_form_filters_list_of_scanners(self):
url = reverse('admin:scanners_scannerrule_add')
response = self.client.get(url)
select = pq(response.content)('#id_scanner')
assert len(select.children()) == 3
class TestScannerQueryRuleAdmin(AMOPaths, TestCase):
def setUp(self):
super().setUp()
self.user = user_factory(email='someone@mozilla.com')
self.grant_permission(self.user, 'Admin:ScannersQueryEdit')
self.client.login(email=self.user.email)
self.list_url = reverse('admin:scanners_scannerqueryrule_changelist')
def test_list_view(self):
ScannerQueryRule.objects.create(name='bar', scanner=YARA)
response = self.client.get(self.list_url)
assert response.status_code == 200
doc = pq(response.content)
classes = set(doc('body')[0].attrib['class'].split())
expected_classes = {
'app-scanners',
'model-scannerqueryrule',
'change-list',
}
assert classes == expected_classes
def test_list_view_viewer(self):
self.user.groupuser_set.all().delete()
self.grant_permission(self.user, 'Admin:ScannersQueryView')
ScannerQueryRule.objects.create(name='bar', scanner=YARA)
response = self.client.get(self.list_url)
assert response.status_code == 200
doc = pq(response.content)
classes = set(doc('body')[0].attrib['class'].split())
expected_classes = {
'app-scanners',
'model-scannerqueryrule',
'change-list',
'hide-action-buttons',
}
assert classes == expected_classes
def test_list_view_is_restricted(self):
user = user_factory(email='curator@mozilla.com')
self.grant_permission(user, 'Admin:Curation')
self.client.login(email=user.email)
response = self.client.get(self.list_url)
assert response.status_code == 403
def test_change_view_contains_link_to_results(self):
rule = ScannerQueryRule.objects.create(name='bar', scanner=YARA)
addon = addon_factory()
version = addon.current_version
result = ScannerQueryResult(scanner=YARA, version=version)
result.add_yara_result(rule=rule.name)
result.save()
# Create another version that matches for the same add-on.
version = version_factory(addon=addon)
result = ScannerQueryResult(scanner=YARA, version=version)
result.add_yara_result(rule=rule.name)
result.save()
# Create another add-on that has a matching version
addon = addon_factory()
result = ScannerQueryResult(scanner=YARA, version=addon.current_version)
result.add_yara_result(rule=rule.name)
result.save()
url = reverse('admin:scanners_scannerqueryrule_change', args=(rule.pk,))
response = self.client.get(url)
assert response.status_code == 200
doc = pq(response.content)
classes = set(doc('body')[0].attrib['class'].split())
expected_classes = {
'app-scanners',
'model-scannerqueryrule',
'change-form',
}
assert classes == expected_classes
link = doc('.field-matched_results_link a')
assert link
results_list_url = reverse('admin:scanners_scannerqueryresult_changelist')
expected_href = f'{results_list_url}?matched_rules__id__exact={rule.pk}'
assert link.attr('href') == expected_href
assert link.text() == '3 (2 add-ons)'
link_response = self.client.get(expected_href)
assert link_response.status_code == 200
def test_change_view_viewer(self):
self.user.groupuser_set.all().delete()
self.grant_permission(self.user, 'Admin:ScannersQueryView')
rule = ScannerQueryRule.objects.create(name='bar', scanner=YARA)
url = reverse('admin:scanners_scannerqueryrule_change', args=(rule.pk,))
response = self.client.get(url)
assert response.status_code == 200
doc = pq(response.content)
classes = set(doc('body')[0].attrib['class'].split())
expected_classes = {
'app-scanners',
'model-scannerqueryrule',
'change-form',
'hide-action-buttons',
}
assert classes == expected_classes
def test_create_view_doesnt_contain_link_to_results(self):
url = reverse('admin:scanners_scannerqueryrule_add')
response = self.client.get(url)
assert response.status_code == 200
doc = pq(response.content)
field = doc('.field-matched_results_link')
assert field
assert field.text() == 'Matched Results:\n-'
link = doc('.field-matched_results_link a')
assert not link
def test_run_button_in_list_view_for_new_rule(self):
rule = ScannerQueryRule.objects.create(name='bar', scanner=YARA, state=NEW)
response = self.client.get(self.list_url)
assert response.status_code == 200
doc = pq(response.content)
field = doc('.field-state_with_actions')
assert field
assert field.text() == 'New \xa0 Run'
url = reverse('admin:scanners_scannerqueryrule_handle_run', args=(rule.pk,))
button = field.find('button')[0]
assert button.attrib['formaction'] == url
def test_abort_button_in_list_view_for_running_rule(self):
rule = ScannerQueryRule.objects.create(name='bar', scanner=YARA, state=RUNNING)
response = self.client.get(self.list_url)
assert response.status_code == 200
doc = pq(response.content)
field = doc('.field-state_with_actions')
assert field
assert field.text() == 'Running \xa0 Abort'
url = reverse('admin:scanners_scannerqueryrule_handle_abort', args=(rule.pk,))
button = field.find('button')[0]
assert button.attrib['formaction'] == url
def test_no_button_for_completed_rule_query(self):
rule = ScannerQueryRule.objects.create(
name='bar',
scanner=YARA,
state=COMPLETED,
completed=datetime(2020, 9, 29, 14, 1, 2),
)
response = self.client.get(self.list_url)
assert response.status_code == 200
doc = pq(response.content)
field = doc('.field-state_with_actions')
assert field
assert field.text() == 'Completed (Sept. 29, 2020, 14:01)'
assert not field.find('button')
rule.update(completed=None) # If somehow None (unknown finished time)
response = self.client.get(self.list_url)
assert response.status_code == 200
doc = pq(response.content)
field = doc('.field-state_with_actions')
assert field
assert field.text() == 'Completed'
assert not field.find('button')
def test_button_in_change_view(self):
rule = ScannerQueryRule.objects.create(name='bar', scanner=YARA, state=RUNNING)
change_url = reverse('admin:scanners_scannerqueryrule_change', args=(rule.pk,))
response = self.client.get(change_url)
assert response.status_code == 200
doc = pq(response.content)
field = doc('.field-state_with_actions')
assert field
assert field.text() == 'State:\nRunning \xa0 Abort'
url = reverse('admin:scanners_scannerqueryrule_handle_abort', args=(rule.pk,))
button = field.find('button')[0]
assert button.attrib['formaction'] == url
def test_no_run_button_in_add_view(self):
add_url = reverse('admin:scanners_scannerqueryrule_add')
response = self.client.get(add_url)
assert response.status_code == 200
doc = pq(response.content)
field = doc('.field-state_with_actions')
assert field
assert field.text() == 'State:\nNew'
assert not field.find('button')
@mock.patch('olympia.scanners.admin.run_yara_query_rule.delay')
def test_run_action(self, run_yara_query_rule_mock):
rule = ScannerQueryRule.objects.create(name='bar', scanner=YARA, state=NEW)
response = self.client.post(
reverse(
'admin:scanners_scannerqueryrule_handle_run',
args=[rule.pk],
),
follow=True,
)
assert response.status_code == 200
assert response.redirect_chain == [(self.list_url, 302)]
assert run_yara_query_rule_mock.call_count == 1
assert run_yara_query_rule_mock.call_args[0] == (rule.pk,)
messages = list(response.context['messages'])
assert len(messages) == 1
assert f'Rule {rule.pk} has been successfully' in str(messages[0])
rule.reload()
assert rule.state == SCHEDULED
def test_run_action_functional(self):
version = addon_factory(file_kw={'is_webextension': True}).current_version
self.xpi_copy_over(version.all_files[0], 'webextension.xpi')
rule = ScannerQueryRule.objects.create(
name='always_true',
scanner=YARA,
state=NEW,
definition='rule always_true { condition: true }',
)
response = self.client.post(
reverse(
'admin:scanners_scannerqueryrule_handle_run',
args=[rule.pk],
),
follow=True,
)
assert response.status_code == 200
assert response.redirect_chain == [(self.list_url, 302)]
messages = list(response.context['messages'])
assert len(messages) == 1
assert f'Rule {rule.pk} has been successfully' in str(messages[0])
rule.reload()
# We're not mocking the task in this test so it's ran in eager mode
# directly.
# We should have gone through SCHEDULED, RUNNING, and then COMPLETED.
assert rule.state == COMPLETED
# The rule should have been executed, it should have matched our
# version.
assert ScannerQueryResult.objects.count() == 1
assert ScannerQueryResult.objects.get().version == version
@mock.patch('olympia.scanners.admin.run_yara_query_rule.delay')
def test_run_action_wrong_state(self, run_yara_query_rule_mock):
rule = ScannerQueryRule.objects.create(name='bar', scanner=YARA, state=ABORTING)
response = self.client.post(
reverse(
'admin:scanners_scannerqueryrule_handle_run',
args=[rule.pk],
),
follow=True,
)
assert response.status_code == 200
assert response.redirect_chain == [(self.list_url, 302)]
assert run_yara_query_rule_mock.call_count == 0
messages = list(response.context['messages'])
assert len(messages) == 1
assert f'Rule {rule.pk} could not be queued' in str(messages[0])
rule.reload()
assert rule.state == ABORTING
def test_run_action_no_permission(self):
user = user_factory(email='somebodyelse@mozilla.com')
self.grant_permission(user, 'Admin:ScannersQueryView')
self.client.login(email=user.email)
rule = ScannerQueryRule.objects.create(name='bar', scanner=YARA, state=NEW)
response = self.client.post(
reverse(
'admin:scanners_scannerqueryrule_handle_run',
args=[rule.pk],
),
follow=True,
)
assert response.status_code == 404
def test_abort_action(self):
rule = ScannerQueryRule.objects.create(name='bar', scanner=YARA, state=RUNNING)
response = self.client.post(
reverse(
'admin:scanners_scannerqueryrule_handle_abort',
args=[rule.pk],
),
follow=True,
)
assert response.status_code == 200
assert response.redirect_chain == [(self.list_url, 302)]
messages = list(response.context['messages'])
assert len(messages) == 1
assert f'Rule {rule.pk} is being aborted' in str(messages[0])
rule.reload()
assert rule.state == ABORTING
def test_abort_action_wrong_state(self):
rule = ScannerQueryRule.objects.create(
name='bar', scanner=YARA, state=COMPLETED
)
response = self.client.post(
reverse(
'admin:scanners_scannerqueryrule_handle_abort',
args=[rule.pk],
),
follow=True,
)
assert response.status_code == 200
assert response.redirect_chain == [(self.list_url, 302)]
messages = list(response.context['messages'])
assert len(messages) == 1
assert f'Rule {rule.pk} could not be aborted' in str(messages[0])
assert f'was in "{rule.get_state_display()}" state' in str(messages[0])
rule.reload()
assert rule.state == COMPLETED
def test_abort_action_no_permission(self):
user = user_factory(email='somebodyelse@mozilla.com')
self.grant_permission(user, 'Admin:ScannersQueryView')
self.client.login(email=user.email)
rule = ScannerQueryRule.objects.create(name='bar', scanner=YARA, state=RUNNING)
response = self.client.post(
reverse(
'admin:scanners_scannerqueryrule_handle_abort',
args=[rule.pk],
),
follow=True,
)
assert response.status_code == 404
def test_cannot_change_non_new_query_rule(self):
rule = ScannerQueryRule.objects.create(name='bar', scanner=YARA)
url = reverse('admin:scanners_scannerqueryrule_change', args=(rule.pk,))
response = self.client.get(url)
assert response.status_code == 200
doc = pq(response.content)
# NEW query rule, it can be modified.
assert not doc('.field-formatted_definition .readonly')
# RUNNING query rule, it can not be modified
rule.update(state=RUNNING)
response = self.client.get(url)
assert response.status_code == 200
doc = pq(response.content)
assert doc('.field-formatted_definition .readonly')
class TestScannerQueryResultAdmin(TestCase):
def setUp(self):
super().setUp()
self.user = user_factory(email='someone@mozilla.com')
self.grant_permission(self.user, 'Admin:ScannersQueryEdit')
self.client.login(email=self.user.email)
self.list_url = reverse('admin:scanners_scannerqueryresult_changelist')
self.admin = ScannerQueryResultAdmin(
model=ScannerQueryResult, admin_site=AdminSite()
)
def test_list_view(self):
addon = addon_factory()
addon.authors.add(user_factory(email='foo@bar.com'))
addon.authors.add(user_factory(email='bar@foo.com'))
rule = ScannerQueryRule.objects.create(name='rule', scanner=YARA)
result = ScannerQueryResult.objects.create(
scanner=YARA, version=addon.current_version
)
result.add_yara_result(rule=rule.name)
result.save()
response = self.client.get(self.list_url)
assert response.status_code == 200
html = pq(response.content)
assert html('.field-addon_name').length == 1
authors = html('.field-authors a')
assert authors.length == 3
authors_links = list(
(a.text, a.attrib['href']) for a in html('.field-authors a')
)
# Last link should point to the addons model.
link_to_addons = authors_links.pop()
result = sorted(authors_links)
expected = sorted(
(
user.email,
'%s%s'
% (
settings.EXTERNAL_SITE_URL,
reverse('admin:users_userprofile_change', args=(user.pk,)),
),
)
for user in addon.authors.all()
)
assert result == expected
assert 'Other add-ons' in link_to_addons[0]
expected_querystring = '?authors__in={}'.format(
','.join(str(author.pk) for author in addon.authors.all())
)
assert expected_querystring in link_to_addons[1]
download_link = addon.current_version.current_file.get_absolute_url(
attachment=True
)
assert html('.field-download a')[0].attrib['href'] == download_link
assert '/icon-no.svg' in html('.field-is_file_signed img')[0].attrib['src']
addon.versions.all()[0].files.all()[0].update(is_signed=True)
response = self.client.get(self.list_url)
html = pq(response.content)
assert '/icon-yes.svg' in html('.field-is_file_signed img')[0].attrib['src']
def test_list_view_no_query_permissions(self):
rule = ScannerQueryRule.objects.create(name='rule', scanner=YARA)
result = ScannerQueryResult.objects.create(
scanner=YARA, version=addon_factory().current_version
)
result.add_yara_result(rule=rule.name)
result.save()
self.user = user_factory(email='somebodyelse@mozilla.com')
# Give the user permission to edit ScannersResults, but not
# ScannerQueryResults.
self.grant_permission(self.user, 'Admin:ScannersResultsEdit')
self.client.login(email=self.user.email)
response = self.client.get(self.list_url)
assert response.status_code == 403
def test_list_view_query_view_permission(self):
self.user = user_factory(email='somebodyelse@mozilla.com')
self.grant_permission(self.user, 'Admin:ScannersQueryView')
self.client.login(email=self.user.email)
self.test_list_view()
def test_list_filters(self):
rule_foo = ScannerQueryRule.objects.create(name='foo', scanner=YARA)
rule_bar = ScannerQueryRule.objects.create(name='bar', scanner=YARA)
response = self.client.get(self.list_url)
assert response.status_code == 200
doc = pq(response.content)
expected = [
('All', '?'),
('bar (yara)', f'?matched_rules__id__exact={rule_bar.pk}'),
('foo (yara)', f'?matched_rules__id__exact={rule_foo.pk}'),
('All', '?'),
('Unlisted', '?version__channel__exact=1'),
('Listed', '?version__channel__exact=2'),
('All', '?'),
('Incomplete', '?version__addon__status__exact=0'),
('Awaiting Review', '?version__addon__status__exact=3'),
('Approved', '?version__addon__status__exact=4'),
('Disabled by Mozilla', '?version__addon__status__exact=5'),
('Deleted', '?version__addon__status__exact=11'),
('All', '?'),
('Invisible', '?version__addon__disabled_by_user__exact=1'),
('Visible', '?version__addon__disabled_by_user__exact=0'),
('All', '?'),
('Awaiting Review', '?version__files__status__exact=1'),
('Approved', '?version__files__status__exact=4'),
('Disabled by Mozilla', '?version__files__status__exact=5'),
('All', '?'),
('Yes', '?version__files__is_signed__exact=1'),
('No', '?version__files__is_signed__exact=0'),
('All', '?'),
('Yes', '?was_blocked__exact=1'),
('No', '?was_blocked__exact=0'),
('Unknown', '?was_blocked__isnull=True'),
]
filters = [(x.text, x.attrib['href']) for x in doc('#changelist-filter a')]
assert filters == expected
def test_list_filter_matched_rules(self):
rule_foo = ScannerQueryRule.objects.create(name='foo', scanner=YARA)
rule_bar = ScannerQueryRule.objects.create(name='bar', scanner=YARA)
with_foo_match = ScannerQueryResult(scanner=YARA)
with_foo_match.add_yara_result(rule=rule_foo.name)
with_foo_match.save()
with_bar_matches = ScannerQueryResult(scanner=YARA)
with_bar_matches.add_yara_result(rule=rule_bar.name)
with_bar_matches.save()
response = self.client.get(
self.list_url,
{
'matched_rules__id__exact': rule_bar.pk,
},
)
assert response.status_code == 200
doc = pq(response.content)
assert doc('#result_list tbody > tr').length == 1
assert doc('.field-formatted_matched_rules').text() == 'bar (yara)'
def test_list_filter_channel(self):
addon = addon_factory()
ScannerQueryResult.objects.create(scanner=YARA, version=addon.versions.all()[0])
unlisted_addon = addon_factory(
version_kw={'channel': amo.RELEASE_CHANNEL_UNLISTED}, status=amo.STATUS_NULL
)
ScannerQueryResult.objects.create(
scanner=YARA, version=unlisted_addon.versions.all()[0]
)
response = self.client.get(
self.list_url,
{
'version__channel__exact': amo.RELEASE_CHANNEL_UNLISTED,
},
)
assert response.status_code == 200
doc = pq(response.content)
assert doc('#result_list tbody > tr').length == 1
assert doc('.field-guid').text() == unlisted_addon.guid
response = self.client.get(
self.list_url,
{
'version__channel__exact': amo.RELEASE_CHANNEL_LISTED,
},
)
assert response.status_code == 200
doc = pq(response.content)
assert doc('#result_list tbody > tr').length == 1
assert doc('.field-guid').text() == addon.guid
def test_list_filter_addon_status(self):
incomplete_addon = addon_factory(status=amo.STATUS_NULL)
ScannerQueryResult.objects.create(
scanner=YARA, version=incomplete_addon.versions.all()[0]
)
deleted_addon = addon_factory(status=amo.STATUS_DELETED)
ScannerQueryResult.objects.create(
scanner=YARA, version=deleted_addon.versions.all()[0]
)
response = self.client.get(
self.list_url,
{
'version__addon__status__exact': amo.STATUS_NULL,
},
)
assert response.status_code == 200
doc = pq(response.content)
assert doc('#result_list tbody > tr').length == 1
assert doc('.field-guid').text() == incomplete_addon.guid
response = self.client.get(
self.list_url,
{
'version__addon__status__exact': amo.STATUS_DELETED,
},
)
assert response.status_code == 200
doc = pq(response.content)
assert doc('#result_list tbody > tr').length == 1
assert doc('.field-guid').text() == deleted_addon.guid
def test_list_filter_addon_visibility(self):
visible_addon = addon_factory()
ScannerQueryResult.objects.create(
scanner=YARA, version=visible_addon.versions.all()[0]
)
invisible_addon = addon_factory(disabled_by_user=True)
ScannerQueryResult.objects.create(
scanner=YARA, version=invisible_addon.versions.all()[0]
)
response = self.client.get(
self.list_url,
{
'version__addon__disabled_by_user__exact': '1',
},
)
assert response.status_code == 200
doc = pq(response.content)
assert doc('#result_list tbody > tr').length == 1
assert doc('.field-guid').text() == invisible_addon.guid
response = self.client.get(
self.list_url,
{
'version__addon__disabled_by_user__exact': '0',
},
)
assert response.status_code == 200
doc = pq(response.content)
assert doc('#result_list tbody > tr').length == 1
assert doc('.field-guid').text() == visible_addon.guid
def test_list_filter_file_status(self):
addon_disabled_file = addon_factory()
disabled_file_version = version_factory(
addon=addon_disabled_file, file_kw={'status': amo.STATUS_DISABLED}
)
ScannerQueryResult.objects.create(scanner=YARA, version=disabled_file_version)
addon_approved_file = addon_factory()
ScannerQueryResult.objects.create(
scanner=YARA, version=addon_approved_file.versions.all()[0]
)
response = self.client.get(
self.list_url,
{
'version__files__status': '5',
},
)
assert response.status_code == 200
doc = pq(response.content)
assert doc('#result_list tbody > tr').length == 1
assert doc('.field-guid').text() == addon_disabled_file.guid
response = self.client.get(
self.list_url,
{
'version__files__status': '4',
},
)
assert response.status_code == 200
doc = pq(response.content)
assert doc('#result_list tbody > tr').length == 1
assert doc('.field-guid').text() == addon_approved_file.guid
def test_list_filter_file_is_signed(self):
signed_addon = addon_factory(file_kw={'is_signed': True})
ScannerQueryResult.objects.create(
scanner=YARA, version=signed_addon.versions.all()[0]
)
unsigned_addon = addon_factory(file_kw={'is_signed': False})
ScannerQueryResult.objects.create(
scanner=YARA, version=unsigned_addon.versions.all()[0]
)
response = self.client.get(
self.list_url,
{
'version__files__is_signed': '1',
},
)
assert response.status_code == 200
doc = pq(response.content)
assert doc('#result_list tbody > tr').length == 1
assert doc('.field-guid').text() == signed_addon.guid
response = self.client.get(
self.list_url,
{
'version__files__is_signed': '0',
},
)
assert response.status_code == 200
doc = pq(response.content)
assert doc('#result_list tbody > tr').length == 1
assert doc('.field-guid').text() == unsigned_addon.guid
def test_list_filter_was_blocked(self):
was_blocked_addon = addon_factory()
was_blocked_unknown_addon = addon_factory()
was_blocked_false_addon = addon_factory()
ScannerQueryResult.objects.create(
scanner=YARA, version=was_blocked_addon.current_version, was_blocked=True
)
ScannerQueryResult.objects.create(
scanner=YARA,
version=was_blocked_unknown_addon.current_version,
was_blocked=None,
)
ScannerQueryResult.objects.create(
scanner=YARA,
version=was_blocked_false_addon.current_version,
was_blocked=False,
)
response = self.client.get(
self.list_url,
{
'was_blocked__exact': '1',
},
)
assert response.status_code == 200
doc = pq(response.content)
assert doc('#result_list tbody > tr').length == 1
assert doc('.field-guid').text() == was_blocked_addon.guid
response = self.client.get(
self.list_url,
{
'was_blocked__exact': '0',
},
)
assert response.status_code == 200
doc = pq(response.content)
assert doc('#result_list tbody > tr').length == 1
assert doc('.field-guid').text() == was_blocked_false_addon.guid
response = self.client.get(
self.list_url,
{
'was_blocked__isnull': 'True',
},
)
assert response.status_code == 200
doc = pq(response.content)
assert doc('#result_list tbody > tr').length == 1
assert doc('.field-guid').text() == was_blocked_unknown_addon.guid
def test_change_page(self):
rule = ScannerQueryRule.objects.create(name='darule', scanner=YARA)
result = ScannerQueryResult.objects.create(
scanner=YARA, version=addon_factory().current_version
)
result.add_yara_result(rule=rule.name)
result.save()
url = reverse('admin:scanners_scannerqueryresult_change', args=(result.pk,))
response = self.client.get(url)
assert response.status_code == 200
rule_url = reverse('admin:scanners_scannerqueryrule_change', args=(rule.pk,))
doc = pq(response.content)
link = doc('.field-formatted_matched_rules_with_files td a')
assert link.text() == 'darule ???'
assert link.attr('href') == rule_url
link_response = self.client.get(rule_url)
assert link_response.status_code == 200
def test_change_view_no_query_permissions(self):
self.user = user_factory(email='somebodyelse@mozilla.com')
# Give the user permission to edit ScannersResults, but not
# ScannerQueryResults.
self.grant_permission(self.user, 'Admin:ScannersResultsEdit')
self.client.login(email=self.user.email)
rule = ScannerQueryRule.objects.create(name='darule', scanner=YARA)
result = ScannerQueryResult.objects.create(
scanner=YARA, version=addon_factory().current_version
)
result.add_yara_result(rule=rule.name)
result.save()
url = reverse('admin:scanners_scannerqueryresult_change', args=(result.pk,))
response = self.client.get(url)
assert response.status_code == 403
def test_change_view_query_view_permission(self):
self.user = user_factory(email='somebodyelse@mozilla.com')
self.grant_permission(self.user, 'Admin:ScannersQueryView')
self.client.login(email=self.user.email)
self.test_change_page()
def test_formatted_matched_rules_with_files(self):
version = addon_factory().current_version
result = ScannerQueryResult.objects.create(scanner=YARA, version=version)
rule = ScannerQueryRule.objects.create(name='bar', scanner=YARA)
filename = 'some/file.js'
result.add_yara_result(rule=rule.name, meta={'filename': filename})
result.save()
rule_url = reverse('admin:scanners_scannerqueryrule_change', args=(rule.pk,))
file_id = version.all_files[0].id
assert file_id is not None
expect_file_item = code_manager_url(
'browse', version.addon.pk, version.pk, file=filename
)
content = self.admin.formatted_matched_rules_with_files(result)
assert expect_file_item in content
assert rule_url in content
def test_matching_filenames_in_changelist(self):
rule = ScannerQueryRule.objects.create(
name='foo', scanner=YARA, created=self.days_ago(2)
)
result1 = ScannerQueryResult.objects.create(
scanner=YARA, version=addon_factory().current_version
)
result1.add_yara_result(
rule=rule.name, meta={'filename': 'some/file/somewhere.js'}
)
result1.add_yara_result(
rule=rule.name, meta={'filename': 'another/file/somewhereelse.js'}
)
result1.save()
result2 = ScannerQueryResult.objects.create(
scanner=YARA,
version=addon_factory().current_version,
created=self.days_ago(1),
)
result2.add_yara_result(
rule=rule.name, meta={'filename': 'a/file/from/another_addon.js'}
)
result2.save()
response = self.client.get(self.list_url)
assert response.status_code == 200
doc = pq(response.content)
links = doc('.field-matching_filenames a')
assert len(links) == 3
expected = [
code_manager_url(
'browse',
result1.version.addon.pk,
result1.version.pk,
file='some/file/somewhere.js',
),
code_manager_url(
'browse',
result1.version.addon.pk,
result1.version.pk,
file='another/file/somewhereelse.js',
),
code_manager_url(
'browse',
result2.version.addon.pk,
result2.version.pk,
file='a/file/from/another_addon.js',
),
]
assert [link.attrib['href'] for link in links] == expected
class TestIsSafeUrl(TestCase):
def test_enforces_https_when_request_is_secure(self):
request = RequestFactory().get('/', secure=True)
assert _is_safe_url(f'https://{settings.DOMAIN}', request)
assert not _is_safe_url(f'http://{settings.DOMAIN}', request)
def test_does_not_require_https_when_request_is_not_secure(self):
request = RequestFactory().get('/', secure=False)
assert _is_safe_url(f'https://{settings.DOMAIN}', request)
assert _is_safe_url(f'http://{settings.DOMAIN}', request)
def test_allows_domain(self):
request = RequestFactory().get('/', secure=True)
assert _is_safe_url(f'https://{settings.DOMAIN}/foo', request)
assert not _is_safe_url('https://not-olympia.dev', request)
def test_allows_external_site_url(self):
request = RequestFactory().get('/', secure=True)
external_domain = urlparse(settings.EXTERNAL_SITE_URL).netloc
assert _is_safe_url(f'https://{external_domain}/foo', request)
| 39.359419 | 88 | 0.625998 | 7,924 | 70,414 | 5.347804 | 0.058809 | 0.027516 | 0.033557 | 0.033982 | 0.829219 | 0.787096 | 0.75472 | 0.729847 | 0.704715 | 0.680598 | 0 | 0.007904 | 0.265118 | 70,414 | 1,788 | 89 | 39.381432 | 0.811019 | 0.047547 | 0 | 0.595254 | 0 | 0 | 0.141388 | 0.082569 | 0 | 0 | 0 | 0 | 0.176005 | 1 | 0.063942 | false | 0 | 0.013184 | 0 | 0.080422 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
b99bc95c61ca5c94991b957dda1159da1143d4aa | 210 | py | Python | advertising/admin.py | PURNA-ROCK/pythondigest | ba21758a25a47de19800b208c420f16d6688a16b | [
"MIT"
] | 124 | 2015-08-17T19:41:16.000Z | 2022-01-12T00:25:52.000Z | advertising/admin.py | PURNA-ROCK/pythondigest | ba21758a25a47de19800b208c420f16d6688a16b | [
"MIT"
] | 62 | 2015-08-17T02:13:20.000Z | 2020-04-17T19:07:40.000Z | advertising/admin.py | PURNA-ROCK/pythondigest | ba21758a25a47de19800b208c420f16d6688a16b | [
"MIT"
] | 73 | 2015-08-18T13:50:47.000Z | 2021-09-27T14:09:47.000Z | from django.contrib import admin
from .models import AdPage, AdType, AdAlign, Advertising
admin.site.register(AdAlign)
admin.site.register(AdPage)
admin.site.register(AdType)
admin.site.register(Advertising)
| 23.333333 | 56 | 0.819048 | 28 | 210 | 6.142857 | 0.428571 | 0.209302 | 0.395349 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.080952 | 210 | 8 | 57 | 26.25 | 0.891192 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.333333 | 0 | 0.333333 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
b9c1262d8c4e4ce39aa3086b6125eff446d384c4 | 8,635 | py | Python | test/sysl/test_epa.py | anz-rfc/sysl | a145361fb17f17f36c483128a2eac42d08232870 | [
"Apache-2.0"
] | 2 | 2021-11-12T03:18:18.000Z | 2021-11-12T14:51:05.000Z | test/sysl/test_epa.py | anz-rfc/sysl | a145361fb17f17f36c483128a2eac42d08232870 | [
"Apache-2.0"
] | null | null | null | test/sysl/test_epa.py | anz-rfc/sysl | a145361fb17f17f36c483128a2eac42d08232870 | [
"Apache-2.0"
] | 1 | 2020-02-18T21:50:52.000Z | 2020-02-18T21:50:52.000Z | # -*- coding: utf-8 -*-
from sysl.core import syslloader, syslints
from sysl.util import debug
import unittest
import re
import os
import sys
import traceback
import argparse as ap
import tempfile
from os import path
class TestEpa(unittest.TestCase):
def setUp(self):
self.outpath = tempfile.gettempdir()
def integration_view_helper(self, modulename, d):
(module, deps, _) = syslloader.load(modulename, True, '.')
args = ap.Namespace(**d)
if not args.exclude and args.project:
args.exclude = {args.project}
return syslints.integration_views(module, deps, args)
def test_ints(self):
try:
d = {
'project': 'Test EPA :: Integrations',
'exclude': '',
'output': path.join(self.outpath, 'test_ints-ints.png'),
'plantuml': '',
'clustered': '',
'title': 'Test EPA',
'epa': False,
'filter': None,
'verbose': ''}
out = self.integration_view_helper('/test/data/test_epa', d)
self.assertTrue('_0 --> _1' in out[0])
except (IOError, Exception) as e:
self.fail(traceback.format_exc())
def test_epa(self):
try:
d = {
'project': 'Test EPA :: Integrations',
'exclude': '',
'output': path.join(self.outpath, 'test_epa-ints.png'),
'plantuml': '',
'clustered': '',
'epa': True,
'filter': None,
'title': 'Test EPA',
'verbose': ''}
out = self.integration_view_helper('/test/data/test_epa', d)
self.assertTrue(re.search('_0 -.*> _1', out[0]))
self.assertTrue(re.search('_1 -.*> _2', out[0]))
except (IOError, Exception) as e:
self.fail(traceback.format_exc())
def test_epa_repeated_calls(self):
try:
d = {
'project': 'Test EPA :: Integrations',
'exclude': '',
'output': path.join(self.outpath, 'test_epa_repeated_calls-ints.png'),
'plantuml': '',
'clustered': '',
'epa': True,
'filter': None,
'title': 'Test EPA Repeated Calls',
'verbose': ''}
out = self.integration_view_helper(
'/test/data/test_epa_repeated_calls', d)
self.assertTrue(
'state "**App1 Input Method 1 client**" as _2' in out[0])
self.assertTrue('state "**App1 Input Method 1**" as _3' in out[0])
self.assertTrue(re.search('_1 -.*> _2', out[0]))
self.assertTrue(re.search('_2 -.*> _3', out[0]))
except (IOError, Exception) as e:
self.fail(traceback.format_exc())
def test_int_repeated_calls(self):
try:
d = {
'project': 'Test EPA :: Integrations',
'exclude': '',
'output': path.join(self.outpath, 'test_int_repeated_calls-ints.png'),
'plantuml': '',
'clustered': '',
'epa': False,
'filter': None,
'title': 'Test EPA Repeated Calls',
'verbose': ''}
out = self.integration_view_helper(
'/test/data/test_epa_repeated_calls', d)
self.assertTrue('_0 --> _1' in out[0])
self.assertFalse('_1 --> _3' in out[0])
self.assertFalse('_2 --> _3' in out[0])
except (IOError, Exception) as e:
self.fail(traceback.format_exc())
def test_ignore_keyword(self):
try:
d = {
'project': 'Test EPA :: Integrations',
'exclude': '',
'output': path.join(self.outpath, 'test_epa_ignore_keyword-ints.png'),
'plantuml': '',
'clustered': '',
'epa': True,
'filter': None,
'title': 'Test EPA Ignore Keyword',
'verbose': ''}
out = self.integration_view_helper(
'/test/data/test_epa_ignore_keyword', d)
self.assertFalse('state "**.. * <- ***"' in out[0])
except (IOError, Exception) as e:
self.fail(traceback.format_exc())
def test_labels(self):
try:
d = {
'project': 'Test EPA :: Integrations',
'exclude': '',
'output': path.join(self.outpath, 'test_epa_labels-ints.png'),
'plantuml': '',
'clustered': '',
'epa': True,
'filter': None,
'title': 'Test EPA Labels',
'verbose': ''}
out = self.integration_view_helper(
'/test/data/test_epa_ignore_keyword', d)
self.assertTrue('**«INT-001»**' in out[0])
except (IOError, Exception) as e:
self.fail(traceback.format_exc())
def test_labels_for_events(self):
try:
d = {
'project': 'Test EPA :: Events',
'exclude': '',
'output': path.join(self.outpath, 'test_epa_labels_for_events-ints.png'),
'plantuml': '',
'clustered': '',
'epa': True,
'filter': None,
'title': 'Test EPA Labels',
'verbose': ''}
out = self.integration_view_helper(
'/test/data/test_epa_labels_for_events', d)
self.assertTrue('**«INT-001»**' in out[0])
except (IOError, Exception) as e:
self.fail(traceback.format_exc())
def test_patterns(self):
try:
d = {
'project': 'Test EPA :: Integrations',
'exclude': '',
'output': path.join(self.outpath, 'test_epa_patterns-ints.png'),
'plantuml': '',
'clustered': '',
'epa': True,
'filter': None,
'title': 'Test EPA Patterns',
'verbose': ''}
out = self.integration_view_helper(
'/test/data/test_epa_patterns', d)
self.assertTrue('** <color green> → soap</color>**' in out[0])
except (IOError, Exception) as e:
self.fail(traceback.format_exc())
def test_missing_patterns(self):
try:
d = {
'project': 'Test EPA :: Integrations',
'exclude': '',
'output': path.join(self.outpath, 'test_epa_missing_patterns-ints.png'),
'plantuml': '',
'clustered': '',
'epa': True,
'filter': None,
'title': 'Test EPA Patterns',
'verbose': ''}
out = self.integration_view_helper(
'/test/data/test_epa_missing_patterns', d)
self.assertTrue('** <color red>pattern?</color>**' in out[0])
except (IOError, Exception) as e:
self.fail(traceback.format_exc())
def test_missing_labels(self):
try:
d = {
'project': 'Test EPA :: Integrations',
'exclude': '',
'output': path.join(self.outpath, 'test_epa_missing_labels-ints.png'),
'plantuml': '',
'clustered': '',
'epa': True,
'filter': None,
'title': 'Test EPA Patterns',
'verbose': ''}
out = self.integration_view_helper(
'/test/data/test_epa_missing_labels', d)
self.assertTrue('<color red>(missing INT)</color>' in out[0])
except (IOError, Exception) as e:
self.fail(traceback.format_exc())
def test_int_passthrough(self):
try:
d = {
'project': 'Test EPA :: Passthrough',
'exclude': '',
'output': path.join(self.outpath, 'test_epa_passthrough-ints.png'),
'plantuml': '',
'clustered': '',
'epa': False,
'title': 'Test EPA Passthrough',
'verbose': '',
'filter': ''}
out = self.integration_view_helper(
'/test/data/test_epa_passthrough', d)
except (IOError, Exception) as e:
self.fail(traceback.format_exc())
if __name__ == '__main__':
debug.init()
unittest.main()
| 29.982639 | 89 | 0.475159 | 829 | 8,635 | 4.787696 | 0.129071 | 0.077601 | 0.063492 | 0.041572 | 0.813051 | 0.774754 | 0.748803 | 0.72134 | 0.708995 | 0.686823 | 0 | 0.008438 | 0.382397 | 8,635 | 287 | 90 | 30.087108 | 0.734858 | 0.002432 | 0 | 0.690141 | 0 | 0 | 0.237111 | 0.06967 | 0 | 0 | 0 | 0 | 0.075117 | 1 | 0.061033 | false | 0.023474 | 0.046948 | 0 | 0.117371 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
b9faae2079e6af94d5cba8234424f934bc61e109 | 4,608 | py | Python | pandapower/test/opf/test_costs_mixed.py | junmuz/pandapower | 06dac12afb5725332ec497c2eda239d178e4882b | [
"BSD-3-Clause"
] | 104 | 2017-02-21T17:13:51.000Z | 2022-03-21T13:52:27.000Z | pandapower/test/opf/test_costs_mixed.py | lvzhibai/pandapower | 24ed3056558887cc89f67d15b5527523990ae9a1 | [
"BSD-3-Clause"
] | 126 | 2017-02-15T17:09:08.000Z | 2018-07-16T13:25:15.000Z | pandapower/test/opf/test_costs_mixed.py | lvzhibai/pandapower | 24ed3056558887cc89f67d15b5527523990ae9a1 | [
"BSD-3-Clause"
] | 57 | 2017-03-08T13:49:32.000Z | 2022-02-28T10:36:55.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2016-2021 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
import numpy as np
import pytest
import pandapower as pp
try:
import pplog as logging
except ImportError:
import logging
def test_cost_mixed():
""" Testing a very simple network for the resulting cost value
constraints with OPF """
vm_max = 1.05
vm_min = 0.95
# create net
net = pp.create_empty_network()
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.)
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4)
pp.create_gen(net, 1, p_mw=-0.1, controllable=True, min_p_mw=0.005, max_p_mw=0.15, max_q_mvar=.05,
min_q_mvar=-.05)
pp.create_ext_grid(net, 0)
pp.create_load(net, 1, p_mw=0.02, controllable=False, max_q_mvar=.05, max_p_mw=0.1, min_p_mw=0.0050,
min_q_mvar=-.05)
pp.create_line_from_parameters(net, 0, 1, 50, name="line2", r_ohm_per_km=0.876,
c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876,
max_loading_percent=100 * 690)
# testing some combinations
pp.create_poly_cost(net, 0, "gen", cp1_eur_per_mw=1)
pp.runopp(net)
assert net["OPF_converged"]
assert np.isclose(net.res_cost, net.res_gen.p_mw.values[0])
net.poly_cost.cp1_eur_per_mw.at[0] = 0
net.poly_cost.cp2_eur_per_mw2.at[0] = 1
pp.runopp(net)
assert net["OPF_converged"]
assert np.isclose(net.res_cost, net.res_gen.p_mw.values**2)
net.poly_cost.cp0_eur.at[0] = 1
pp.runopp(net)
assert net["OPF_converged"]
assert np.isclose(net.res_cost, net.res_gen.p_mw.values**2 + 1)
net.load.controllable.at[0] = True
pp.runopp(net)
assert np.isclose(net.res_cost, net.res_gen.p_mw.values ** 2 + 1)
net.load.controllable.at[0] = False
net.pwl_cost.drop(net.pwl_cost.index, inplace=True)
pp.create_pwl_cost(net, 0, "ext_grid", [[-1000, 0, -2000], [0, 1000, 2000]], power_type="p")
net.poly_cost.cp1_eur_per_mw.at[0] = 1000
net.poly_cost.cp2_eur_per_mw2.at[0] = 0
pp.runopp(net)
assert np.isclose(net.res_ext_grid.p_mw.values[0], 0, atol=1e-4)
assert np.isclose(net.res_cost, net.res_gen.p_mw.values[0]*1000, atol=1e-3)
def test_mixed_p_q_pol():
vm_max = 1.05
vm_min = 0.95
# create net
net = pp.create_empty_network()
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.)
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4)
pp.create_gen(net, 1, p_mw=0.1, controllable=True, min_p_mw=0.005, max_p_mw=0.15, max_q_mvar=.05,
min_q_mvar=-.05)
pp.create_ext_grid(net, 0)
pp.create_load(net, 1, p_mw=0.02, controllable=False, max_q_mvar=.05, max_p_mw=0.1, min_p_mw=0.0050,
min_q_mvar=-.05)
pp.create_line_from_parameters(net, 0, 1, 50, name="line2", r_ohm_per_km=0.876,
c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876,
max_loading_percent=100 * 690)
# testing some combinations
pp.create_poly_cost(net, 0, "gen", cp1_eur_per_mw=1, cq1_eur_per_mvar=1)
pp.runopp(net)
assert net["OPF_converged"]
assert np.isclose(net.res_cost, (net.res_gen.p_mw.values + net.res_gen.q_mvar.values))
def test_mixed_p_q_pwl():
vm_max = 1.05
vm_min = 0.95
# create net
net = pp.create_empty_network()
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.)
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4)
pp.create_gen(net, 1, p_mw=0.1, controllable=True, min_p_mw=0.005, max_p_mw=0.15, max_q_mvar=.05,
min_q_mvar=-.05)
pp.create_ext_grid(net, 0)
pp.create_load(net, 1, p_mw=0.02, controllable=False, max_q_mvar=.05, max_p_mw=0.1, min_p_mw=0.005,
min_q_mvar=-.05)
pp.create_line_from_parameters(net, 0, 1, 50, name="line2", r_ohm_per_km=0.876,
c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876,
max_loading_percent=100 * 690)
# testing some combinations
pp.create_pwl_cost(net, 0, "gen", [[-150, 150, 1]])
pp.create_pwl_cost(net, 0, "gen", [[-150, 150, 1]], power_type="q")
pp.runopp(net)
assert net["OPF_converged"]
assert np.allclose(net.res_cost, net.res_gen.p_mw.values + net.res_gen.q_mvar.values)
if __name__ == "__main__":
pytest.main([__file__, "-xs"])
| 38.4 | 104 | 0.653212 | 848 | 4,608 | 3.229953 | 0.162736 | 0.07594 | 0.026287 | 0.043447 | 0.807229 | 0.795911 | 0.788974 | 0.788974 | 0.773275 | 0.722526 | 0 | 0.082554 | 0.211372 | 4,608 | 119 | 105 | 38.722689 | 0.671161 | 0.080946 | 0 | 0.607143 | 0 | 0 | 0.026803 | 0 | 0 | 0 | 0 | 0 | 0.154762 | 1 | 0.035714 | false | 0 | 0.071429 | 0 | 0.107143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
6a14adc65a3c2ad9f4b5b9cbfd04364374fc4c32 | 27 | py | Python | pocket/_init_.py | Dmitrii388444/python_lesson_5 | da6f9640b149ccece65ce751ea6de4bfcc186658 | [
"MIT"
] | null | null | null | pocket/_init_.py | Dmitrii388444/python_lesson_5 | da6f9640b149ccece65ce751ea6de4bfcc186658 | [
"MIT"
] | null | null | null | pocket/_init_.py | Dmitrii388444/python_lesson_5 | da6f9640b149ccece65ce751ea6de4bfcc186658 | [
"MIT"
] | null | null | null | from .math_op import my_add | 27 | 27 | 0.851852 | 6 | 27 | 3.5 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.111111 | 27 | 1 | 27 | 27 | 0.875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
6a26025536d4de518954c5599562ece5f2d41118 | 54 | py | Python | urlfinder/__init__.py | lis-space/flood-test | 77a19c3c268627d6842fa9beda6e67ac7875c728 | [
"MIT"
] | null | null | null | urlfinder/__init__.py | lis-space/flood-test | 77a19c3c268627d6842fa9beda6e67ac7875c728 | [
"MIT"
] | null | null | null | urlfinder/__init__.py | lis-space/flood-test | 77a19c3c268627d6842fa9beda6e67ac7875c728 | [
"MIT"
] | 1 | 2019-10-21T07:13:58.000Z | 2019-10-21T07:13:58.000Z | from .find import find
from .find_alt import find_alt
| 18 | 30 | 0.814815 | 10 | 54 | 4.2 | 0.4 | 0.380952 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.148148 | 54 | 2 | 31 | 27 | 0.913043 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
dbfa38efb461b8c61c3647a5d0a22b2bf95538a4 | 10,228 | py | Python | tests/test_basic.py | Ahuge/sept | da898e0a81a16ef1b0b5a0d0d13655b77c4a2aab | [
"MIT"
] | 5 | 2021-01-04T17:59:45.000Z | 2021-03-26T14:58:56.000Z | tests/test_basic.py | Ahuge/sept | da898e0a81a16ef1b0b5a0d0d13655b77c4a2aab | [
"MIT"
] | 7 | 2021-01-04T17:13:33.000Z | 2022-03-07T19:59:27.000Z | tests/test_basic.py | Ahuge/sept | da898e0a81a16ef1b0b5a0d0d13655b77c4a2aab | [
"MIT"
] | null | null | null | import pytest
from sept.parser import PathTemplateParser
from sept.balancer import ParenthesisBalancer
from sept.errors import (
ParsingError,
OperatorNotFoundError,
OpeningBalancingParenthesisError,
ClosingBalancingParenthesisError,
MultipleBalancingError,
)
state_data = {
"name": "AhUgHeS",
"first_name": "Alex",
"last_name": "Hughes",
"data_with_space": "This is a sentence",
"deep": {
"nested": {
"data": {
"githubUsername": "Ahuge",
}
}
},
}
parser = PathTemplateParser()
def test_lower():
template_str = r"{{lower:name}}"
template_obj = parser.validate_template(template_str)
resolved_path = template_obj.resolve(state_data)
assert resolved_path == "ahughes"
def test_upper():
template_str = r"{{upper:name}}"
template_obj = parser.validate_template(template_str)
resolved_path = template_obj.resolve(state_data)
assert resolved_path == "AHUGHES"
def test_replace():
template_str = r"{{replace[AhUgHeS,Bobby]: name}}"
template_obj = parser.validate_template(template_str)
resolved_path = template_obj.resolve(state_data)
assert resolved_path == "Bobby"
def test_substr():
template_str = r"{{substr[0,2]:name}}"
template_obj = parser.validate_template(template_str)
resolved_path = template_obj.resolve(state_data)
assert resolved_path == "Ah"
def test_substr_keyword_start():
template_str = r"{{substr[start,2]:name}}"
template_obj = parser.validate_template(template_str)
resolved_path = template_obj.resolve(state_data)
assert resolved_path == "Ah"
def test_substr_keyword_end():
template_str = r"{{substr[1,end]:name}}"
template_obj = parser.validate_template(template_str)
resolved_path = template_obj.resolve(state_data)
assert resolved_path == "hUgHeS"
def test_null_operator():
template_str = r"{{name}}"
template_obj = parser.validate_template(template_str)
resolved_path = template_obj.resolve(state_data)
assert resolved_path == "AhUgHeS"
def test_replace_keyword_space():
template_str = r"{{replace[\s,-]: data_with_space}}"
template_obj = parser.validate_template(template_str)
resolved_path = template_obj.resolve(state_data)
assert resolved_path == "This-is-a-sentence"
def test_lower_substr_nested():
template_str = r"{{lower:{{substr[1,end]:name}}}}"
template_obj = parser.validate_template(template_str)
resolved_path = template_obj.resolve(state_data)
assert resolved_path == "hughes"
def test_add_custom_operator():
from sept import Operator
class SoupOperator(Operator):
name = "soup"
def is_invalid(self, token_value):
return None
def execute(self, input_data):
return "tomato soup"
template_str = r"{{soup:name}}"
custom_parser = PathTemplateParser(additional_operators=[SoupOperator])
template_obj = custom_parser.validate_template(template_str)
resolved_path = template_obj.resolve(state_data)
assert resolved_path == "tomato soup"
def test_add_custom_token():
from sept import Token
class GithubUsernameToken(Token):
name = "githubusername"
def getValue(self, data):
return (
data.get("deep", {}).get("nested", {}).get("data").get("githubUsername")
)
template_str = r"{{lower:githubUsername}}"
custom_parser = PathTemplateParser(additional_tokens=[GithubUsernameToken])
template_obj = custom_parser.validate_template(template_str)
resolved_path = template_obj.resolve(state_data)
assert resolved_path == "ahuge"
def test_add_custom_token_with_casing():
from sept import Token
class GithubUsernameToken(Token):
name = "githubUsername"
def getValue(self, data):
return (
data.get("deep", {}).get("nested", {}).get("data").get("githubUsername")
)
template_str = r"{{lower:githubUsername}}"
custom_parser = PathTemplateParser(additional_tokens=[GithubUsernameToken])
template_obj = custom_parser.validate_template(template_str)
resolved_path = template_obj.resolve(state_data)
assert resolved_path == "ahuge"
def test_incorrect_token_name_casing():
template_str = r"{{lower:Name}}"
template_obj = parser.validate_template(template_str)
resolved_path = template_obj.resolve(state_data)
assert resolved_path == "ahughes"
def test_token_length_subset():
template_str = r"{{lower:name}}/{{upper:name}}"
template_obj = parser.validate_template(template_str)
resolved_path = template_obj.resolve(state_data)
assert resolved_path == "ahughes/AHUGHES"
def test_token_length_subset_leading_space():
template_str = r"{{lower: name}}/{{upper:name}}"
template_obj = parser.validate_template(template_str)
resolved_path = template_obj.resolve(state_data)
assert resolved_path == "ahughes/AHUGHES"
def test_incorrect_token_name_spacing():
template_str = r"{{lower: name }}"
template_obj = parser.validate_template(template_str)
resolved_path = template_obj.resolve(state_data)
assert resolved_path == "ahughes"
def test_keeps_external_spacing():
template_str = r"My username is {{lower: name }}"
template_obj = parser.validate_template(template_str)
resolved_path = template_obj.resolve(state_data)
assert resolved_path == "My username is ahughes"
def test_bad_parsing_error():
template_str = r"{{lower:name}"
try:
parser.validate_template(template_str)
except ParsingError as err:
assert (
str(err)
== 'Error: Missing closing "}}" characters for Token Expression "{{lower:name}" (0-12)'
)
else:
raise AssertionError("Should have raised a ParsingError!")
def test_bad_parsing_error_multi_expression():
template_str = r"{{lower:name}}{{upper:name}"
try:
parser.validate_template(template_str)
except ParsingError as err:
assert (
str(err)
== 'Error: Missing closing "}}" characters for Token Expression "{{upper:name}" (14-26)'
)
else:
raise AssertionError("Should have raised a ParsingError!")
def test_bad_parsing_error_multi_expression_start():
template_str = r"{{lower:name}{{upper:name}}"
try:
parser.validate_template(template_str)
except ParsingError as err:
assert (
str(err)
== 'Error: Missing closing "}}" characters for Token Expression "{{lower:name}{{upper:name}}" (0-12)'
)
else:
raise AssertionError("Should have raised a ParsingError!")
def test_balencer_basic():
template_str = r"{{lower:name}}"
token_expresion_locations, errors = ParenthesisBalancer.parse_string(template_str)
assert errors == []
assert len(token_expresion_locations) == 1
assert token_expresion_locations[0] == (0, len(template_str) - 1)
def test_balencer_double():
template_str = r"{{lower:name}}{{lower:name}}"
token_expresion_locations, errors = ParenthesisBalancer.parse_string(template_str)
assert errors == []
assert len(token_expresion_locations) == 2
assert token_expresion_locations[0] == (0, 13)
assert token_expresion_locations[1] == (14, len(template_str) - 1)
def test_balencer_double_leading():
template_str = r"This is a {{lower:name}}{{lower:name}}"
token_expresion_locations, errors = ParenthesisBalancer.parse_string(template_str)
assert errors == []
assert len(token_expresion_locations) == 2
assert token_expresion_locations[0] == (10, 23)
assert token_expresion_locations[1] == (24, len(template_str) - 1)
def test_balencer_basic_missing_opener():
template_str = r"{lower:name}}"
token_expresion_locations, errors = ParenthesisBalancer.parse_string(template_str)
assert len(errors) == 1
assert isinstance(errors[0], OpeningBalancingParenthesisError)
assert len(token_expresion_locations) == 0
def test_balencer_double_missing_closer():
template_str = r"{{lower:name}}{lower:name}}"
token_expresion_locations, errors = ParenthesisBalancer.parse_string(template_str)
assert len(errors) == 1
assert isinstance(errors[0], OpeningBalancingParenthesisError)
assert len(token_expresion_locations) == 1
assert token_expresion_locations[0] == (0, 13)
def test_balencer_basic_missing_closer():
template_str = r"{{lower:name}"
token_expresion_locations, errors = ParenthesisBalancer.parse_string(template_str)
assert len(errors) == 1
assert isinstance(errors[0], ClosingBalancingParenthesisError)
assert len(token_expresion_locations) == 0
def test_balencer_double_missing_closer():
template_str = r"{{lower:name}}{{lower:name}"
token_expresion_locations, errors = ParenthesisBalancer.parse_string(template_str)
assert len(errors) == 1
assert isinstance(errors[0], ClosingBalancingParenthesisError)
assert len(token_expresion_locations) == 1
assert token_expresion_locations[0] == (0, 13)
def test_balencer_double_missing_closer_and_opener():
template_str = r"{lower:name}}{{lower:name}"
token_expresion_locations, errors = ParenthesisBalancer.parse_string(template_str)
assert len(errors) == 2
assert isinstance(errors[0], OpeningBalancingParenthesisError)
assert isinstance(errors[1], ClosingBalancingParenthesisError)
assert len(token_expresion_locations) == 0
def test_parse_partial_expression():
template_str = r"{lower:name}}/{{upper:name}}"
try:
_ = parser.validate_template(template_str)
except MultipleBalancingError:
return True
else:
raise AssertionError("Should have raised an OpeningBalancingParenthesisError!")
def test_parse_raise_missing_operator():
template_str = r"{{lowerr:name}}/{{upper:name}}"
try:
_ = parser.validate_template(template_str)
except ParsingError as err:
assert str(err) == "Could not find an Operator with the name lowerr"
else:
raise AssertionError("Should have raised a OperatorNotFoundError!")
if __name__ == "__main__":
pytest.main()
| 31.088146 | 113 | 0.706883 | 1,186 | 10,228 | 5.800169 | 0.114671 | 0.100741 | 0.052333 | 0.095944 | 0.816543 | 0.789359 | 0.765373 | 0.752871 | 0.742404 | 0.731938 | 0 | 0.007427 | 0.183809 | 10,228 | 328 | 114 | 31.182927 | 0.816603 | 0 | 0 | 0.527426 | 0 | 0.004219 | 0.152914 | 0.052112 | 0 | 0 | 0 | 0 | 0.232068 | 1 | 0.14346 | false | 0 | 0.029536 | 0.016878 | 0.219409 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
dbfb19d901ca0d384c84143390ee08782c9beba0 | 145 | py | Python | comvex/cait/__init__.py | shrenik-jain/ComVEX | 93622de3a4771cda13b14f8bba52990eb47c2409 | [
"Apache-2.0"
] | 29 | 2021-06-14T08:27:43.000Z | 2022-02-07T13:40:27.000Z | comvex/cait/__init__.py | shrenik-jain/ComVEX | 93622de3a4771cda13b14f8bba52990eb47c2409 | [
"Apache-2.0"
] | 3 | 2021-11-23T16:11:51.000Z | 2021-12-21T17:24:36.000Z | comvex/cait/__init__.py | shrenik-jain/ComVEX | 93622de3a4771cda13b14f8bba52990eb47c2409 | [
"Apache-2.0"
] | 3 | 2021-06-27T08:18:57.000Z | 2021-12-17T07:29:59.000Z | from .model import ClassAttention, ClassAttentionLayer, SelfAttentionLayer, CaiTBackbone, CaiTWithLinearClassifier
from .config import CaiTConfig | 72.5 | 114 | 0.882759 | 12 | 145 | 10.666667 | 0.833333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.075862 | 145 | 2 | 115 | 72.5 | 0.955224 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
e0067d577e167bdc767cb14638678e2fbea2471d | 437 | py | Python | Day_58/today_details.py | kiranrraj/100Days_Of_Coding | ab75d83be9be87fb7bc83a3f3b72a4638dab22a1 | [
"MIT"
] | null | null | null | Day_58/today_details.py | kiranrraj/100Days_Of_Coding | ab75d83be9be87fb7bc83a3f3b72a4638dab22a1 | [
"MIT"
] | null | null | null | Day_58/today_details.py | kiranrraj/100Days_Of_Coding | ab75d83be9be87fb7bc83a3f3b72a4638dab22a1 | [
"MIT"
] | null | null | null | # Title : Print details about today
# Author : Kiran raj R.
# Date : 29:10:2020
import datetime
import time
print(f"Today is : { datetime.datetime.now().strftime('%y/%m/%d')}")
print(f"Day : {datetime.date.today().strftime('%A')}")
print(f"Name of month : {datetime.date.today().strftime('%B')}")
print(f"Day of the year : {datetime.date.today().strftime('%j')}")
print(f"Week of the year : {datetime.date.today().strftime('%W')}")
| 33.615385 | 68 | 0.659039 | 68 | 437 | 4.235294 | 0.5 | 0.104167 | 0.236111 | 0.347222 | 0.236111 | 0.236111 | 0.236111 | 0 | 0 | 0 | 0 | 0.020725 | 0.116705 | 437 | 12 | 69 | 36.416667 | 0.725389 | 0.173913 | 0 | 0 | 0 | 0 | 0.753501 | 0.551821 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.285714 | 0 | 0.285714 | 0.714286 | 0 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 6 |
e03fdf13d56b6ff4ef7117502fc2cd24de32d67b | 699 | bzl | Python | index.bzl | zaycev/rules_typescript_proto | c6ee53325bdcb251c71f2d14d28a25e9db73fd4e | [
"Apache-2.0"
] | 28 | 2019-10-21T15:39:27.000Z | 2022-03-16T16:15:03.000Z | index.bzl | zaycev/rules_typescript_proto | c6ee53325bdcb251c71f2d14d28a25e9db73fd4e | [
"Apache-2.0"
] | 40 | 2019-10-21T14:11:24.000Z | 2022-03-11T12:47:29.000Z | index.bzl | zaycev/rules_typescript_proto | c6ee53325bdcb251c71f2d14d28a25e9db73fd4e | [
"Apache-2.0"
] | 36 | 2019-11-05T20:21:14.000Z | 2022-03-23T18:34:56.000Z | load("//src:typescript_proto_library.bzl", _typescript_proto_library = "typescript_proto_library")
load("//src:typescript_grpc_node_library.bzl", _typescript_grpc_node_library = "typescript_grpc_node_library")
load("//src:typescript_grpc_web_library.bzl", _typescript_grpc_web_library = "typescript_grpc_web_library")
load("//src:rules_typescript_proto_dependencies.bzl", _rules_typescript_proto_dependencies = "rules_typescript_proto_dependencies")
rules_typescript_proto_dependencies = _rules_typescript_proto_dependencies
typescript_proto_library = _typescript_proto_library
typescript_grpc_node_library = _typescript_grpc_node_library
typescript_grpc_web_library = _typescript_grpc_web_library
| 69.9 | 131 | 0.88412 | 87 | 699 | 6.37931 | 0.126437 | 0.27027 | 0.227027 | 0.225225 | 0.818018 | 0.742342 | 0.596396 | 0.583784 | 0.230631 | 0.230631 | 0 | 0 | 0.041488 | 699 | 9 | 132 | 77.666667 | 0.828358 | 0 | 0 | 0 | 0 | 0 | 0.383405 | 0.383405 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
e049504daf336119b0085e9fe3e178d5ca6086a3 | 46 | py | Python | SVDD/dataset/__init__.py | SolidusAbi/SVDD-Python | ce2b834bf31cfdbbbebc08c8a1bac8c37b081d0e | [
"MIT"
] | null | null | null | SVDD/dataset/__init__.py | SolidusAbi/SVDD-Python | ce2b834bf31cfdbbbebc08c8a1bac8c37b081d0e | [
"MIT"
] | null | null | null | SVDD/dataset/__init__.py | SolidusAbi/SVDD-Python | ce2b834bf31cfdbbbebc08c8a1bac8c37b081d0e | [
"MIT"
] | null | null | null | #__init__.py
from .banana import BananaDataset | 23 | 33 | 0.847826 | 6 | 46 | 5.833333 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.086957 | 46 | 2 | 33 | 23 | 0.833333 | 0.23913 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
e04b81113947575faac7b667c2e0654a4f69437c | 3,138 | py | Python | test/api_test.py | Spferical/matrix-python-sdk | e200f0b811cdc8d87f9778395b372aa7d06e9beb | [
"Apache-2.0"
] | null | null | null | test/api_test.py | Spferical/matrix-python-sdk | e200f0b811cdc8d87f9778395b372aa7d06e9beb | [
"Apache-2.0"
] | null | null | null | test/api_test.py | Spferical/matrix-python-sdk | e200f0b811cdc8d87f9778395b372aa7d06e9beb | [
"Apache-2.0"
] | null | null | null | import responses
from matrix_client import client
class TestTagsApi:
cli = client.MatrixClient("http://example.com")
user_id = "@user:matrix.org"
room_id = "#foo:matrix.org"
@responses.activate
def test_get_user_tags(self):
tags_url = "http://example.com" \
"/_matrix/client/r0/user/@user:matrix.org/rooms/#foo:matrix.org/tags"
responses.add(responses.GET, tags_url, body='{}')
self.cli.api.get_user_tags(self.user_id, self.room_id)
req = responses.calls[0].request
assert req.url == tags_url
assert req.method == 'GET'
@responses.activate
def test_add_user_tags(self):
tags_url = "http://example.com" \
"/_matrix/client/r0/user/@user:matrix.org/rooms/#foo:matrix.org/tags/foo"
responses.add(responses.PUT, tags_url, body='{}')
self.cli.api.add_user_tag(self.user_id, self.room_id, "foo", body={"order": "5"})
req = responses.calls[0].request
assert req.url == tags_url
assert req.method == 'PUT'
@responses.activate
def test_remove_user_tags(self):
tags_url = "http://example.com" \
"/_matrix/client/r0/user/@user:matrix.org/rooms/#foo:matrix.org/tags/foo"
responses.add(responses.DELETE, tags_url, body='{}')
self.cli.api.remove_user_tag(self.user_id, self.room_id, "foo")
req = responses.calls[0].request
assert req.url == tags_url
assert req.method == 'DELETE'
class TestAccountDataApi:
cli = client.MatrixClient("http://example.com")
user_id = "@user:matrix.org"
room_id = "#foo:matrix.org"
@responses.activate
def test_set_account_data(self):
account_data_url = "http://example.com" \
"/_matrix/client/r0/user/@user:matrix.org/account_data/foo"
responses.add(responses.PUT, account_data_url, body='{}')
self.cli.api.set_account_data(self.user_id, 'foo', {'bar': 1})
req = responses.calls[0].request
assert req.url == account_data_url
assert req.method == 'PUT'
@responses.activate
def test_set_room_account_data(self):
account_data_url = "http://example.com/_matrix/client/r0/user" \
"/@user:matrix.org/rooms/#foo:matrix.org/account_data/foo"
responses.add(responses.PUT, account_data_url, body='{}')
self.cli.api.set_room_account_data(self.user_id, self.room_id, 'foo', {'bar': 1})
req = responses.calls[0].request
assert req.url == account_data_url
assert req.method == 'PUT'
class TestUnbanApi:
cli = client.MatrixClient("http://example.com")
user_id = "@user:matrix.org"
room_id = "#foo:matrix.org"
@responses.activate
def test_unban(self):
unban_url = "http://example.com" \
"/_matrix/client/r0/rooms/#foo:matrix.org/unban"
body = '{"user_id": "'+ self.user_id + '"}'
responses.add(responses.POST, unban_url, body=body)
self.cli.api.unban_user(self.room_id, self.user_id)
req = responses.calls[0].request
assert req.url == unban_url
assert req.method == 'POST'
| 39.225 | 89 | 0.637986 | 427 | 3,138 | 4.498829 | 0.117096 | 0.074961 | 0.065591 | 0.074961 | 0.809995 | 0.785528 | 0.743363 | 0.717855 | 0.717855 | 0.63873 | 0 | 0.006066 | 0.211918 | 3,138 | 79 | 90 | 39.721519 | 0.770724 | 0 | 0 | 0.521739 | 0 | 0.043478 | 0.228489 | 0.117272 | 0 | 0 | 0 | 0 | 0.173913 | 1 | 0.086957 | false | 0 | 0.028986 | 0 | 0.289855 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
160e6b208a88e9e2eb4e051b2f0a4211d29a2ef5 | 41 | py | Python | jinahub/encoders/text/FlairTextEncoder/__init__.py | vivek2301/executors | 8159681d68408ab8f797497bc3374be77e6ca392 | [
"Apache-2.0"
] | null | null | null | jinahub/encoders/text/FlairTextEncoder/__init__.py | vivek2301/executors | 8159681d68408ab8f797497bc3374be77e6ca392 | [
"Apache-2.0"
] | null | null | null | jinahub/encoders/text/FlairTextEncoder/__init__.py | vivek2301/executors | 8159681d68408ab8f797497bc3374be77e6ca392 | [
"Apache-2.0"
] | null | null | null | from .flair_text import FlairTextEncoder
| 20.5 | 40 | 0.878049 | 5 | 41 | 7 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.097561 | 41 | 1 | 41 | 41 | 0.945946 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
165314aba4b70a9f5490c6146b2dbf41a1113bcd | 155 | py | Python | arte/time_series/__init__.py | ArcetriAdaptiveOptics/arte | 3d21ae59ba6490be3f52c7957f259097bb42f511 | [
"MIT"
] | 1 | 2021-01-11T20:01:29.000Z | 2021-01-11T20:01:29.000Z | arte/time_series/__init__.py | ArcetriAdaptiveOptics/arte | 3d21ae59ba6490be3f52c7957f259097bb42f511 | [
"MIT"
] | 22 | 2020-04-15T15:48:14.000Z | 2021-07-09T07:57:37.000Z | arte/time_series/__init__.py | ArcetriAdaptiveOptics/arte | 3d21ae59ba6490be3f52c7957f259097bb42f511 | [
"MIT"
] | null | null | null |
from .indexer import Indexer, ModeIndexer
from .time_series import TimeSeries, TimeSeriesWithInterpolation
from .multi_time_series import MultiTimeSeries
| 31 | 64 | 0.870968 | 17 | 155 | 7.764706 | 0.588235 | 0.151515 | 0.242424 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.096774 | 155 | 4 | 65 | 38.75 | 0.942857 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
166504cc2de76b9a952ef3462c9bdfcf1256759e | 40 | py | Python | metromobilite/__init__.py | PierreBerger/metromobilite | b54cbd79ede9526d5739ffa13e819efbfec62aad | [
"MIT"
] | null | null | null | metromobilite/__init__.py | PierreBerger/metromobilite | b54cbd79ede9526d5739ffa13e819efbfec62aad | [
"MIT"
] | null | null | null | metromobilite/__init__.py | PierreBerger/metromobilite | b54cbd79ede9526d5739ffa13e819efbfec62aad | [
"MIT"
] | null | null | null | from .metromobilite import Metromobilite | 40 | 40 | 0.9 | 4 | 40 | 9 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.075 | 40 | 1 | 40 | 40 | 0.972973 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
16abbe7218ad54e0c2a9870f4a467b9b0db7cd5a | 158 | py | Python | criticalityMaps/criticality/__init__.py | pshassett/CriticalityMaps | 08b3cf7cada083e5bf32f75c52bdda5bd45742be | [
"MIT"
] | 6 | 2019-11-21T20:53:07.000Z | 2020-10-28T07:19:46.000Z | criticalityMaps/criticality/__init__.py | pshassett/criticalityMaps | 08b3cf7cada083e5bf32f75c52bdda5bd45742be | [
"MIT"
] | 3 | 2020-02-28T22:19:17.000Z | 2021-04-08T21:43:00.000Z | criticalityMaps/criticality/__init__.py | pshassett/CriticalityMaps | 08b3cf7cada083e5bf32f75c52bdda5bd45742be | [
"MIT"
] | 3 | 2020-01-21T17:29:02.000Z | 2021-04-08T16:02:59.000Z | from .core import fire_criticality_analysis, pipe_criticality_analysis, segment_criticality_analysis, process_criticality
from .mp_queue_tools import runner
| 52.666667 | 121 | 0.892405 | 20 | 158 | 6.6 | 0.65 | 0.431818 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.075949 | 158 | 2 | 122 | 79 | 0.90411 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
16baac399630dffaad3db0778c5a99f3020c4a80 | 32 | py | Python | metrics/__init__.py | ruixiangcui/implicit_parser | 741dd6eaaff42ab8fff390f7ab01f690e3b5d439 | [
"Apache-2.0"
] | 1 | 2020-07-18T13:40:06.000Z | 2020-07-18T13:40:06.000Z | metrics/__init__.py | ruixiangcui/implicit_parser | 741dd6eaaff42ab8fff390f7ab01f690e3b5d439 | [
"Apache-2.0"
] | null | null | null | metrics/__init__.py | ruixiangcui/implicit_parser | 741dd6eaaff42ab8fff390f7ab01f690e3b5d439 | [
"Apache-2.0"
] | 2 | 2020-05-28T13:16:39.000Z | 2022-02-15T01:58:03.000Z | from .mrp_score import MCESScore | 32 | 32 | 0.875 | 5 | 32 | 5.4 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.09375 | 32 | 1 | 32 | 32 | 0.931034 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
16efad70e2cf879b33d95b9c38e3b8b89b18e1f7 | 132 | py | Python | src/common/__init__.py | gabeorlanski/zero-shot-cross-task | a8bfd3c817c207e0f667978e23723676c6393d3d | [
"Apache-2.0"
] | null | null | null | src/common/__init__.py | gabeorlanski/zero-shot-cross-task | a8bfd3c817c207e0f667978e23723676c6393d3d | [
"Apache-2.0"
] | null | null | null | src/common/__init__.py | gabeorlanski/zero-shot-cross-task | a8bfd3c817c207e0f667978e23723676c6393d3d | [
"Apache-2.0"
] | null | null | null | from src.common.registrable import Registrable
from src.common.log_util import prepare_global_logging
from src.common.util import *
| 33 | 54 | 0.856061 | 20 | 132 | 5.5 | 0.5 | 0.190909 | 0.354545 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.090909 | 132 | 3 | 55 | 44 | 0.916667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
16f08105b4a2775755c801e11eefb1db62213118 | 1,621 | py | Python | backend/edegal/migrations/0003_auto_20180217_1712.py | japsu/edegal2 | a3a327b19cc3d06a680f8f1175225bad8be7c5f1 | [
"MIT"
] | 1 | 2021-11-22T19:28:35.000Z | 2021-11-22T19:28:35.000Z | backend/edegal/migrations/0003_auto_20180217_1712.py | japsu/edegal2 | a3a327b19cc3d06a680f8f1175225bad8be7c5f1 | [
"MIT"
] | 26 | 2017-05-30T09:55:28.000Z | 2020-12-16T12:08:52.000Z | backend/edegal/migrations/0003_auto_20180217_1712.py | japsu/edegal2 | a3a327b19cc3d06a680f8f1175225bad8be7c5f1 | [
"MIT"
] | 3 | 2015-11-20T13:45:47.000Z | 2017-05-30T09:44:55.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-02-17 15:12
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('edegal', '0002_auto_20171005_1044'),
]
operations = [
migrations.AlterField(
model_name='album',
name='slug',
field=models.CharField(blank=True, help_text='Tekninen nimi eli "slug" näkyy URL-osoitteissa. Sallittuja merkkejä ovat pienet kirjaimet, numerot ja väliviiva. Jos jätät teknisen nimen tyhjäksi, se generoidaan automaattisesti otsikosta. Jos muutat teknistä nimeä julkaisun jälkeen, muista luoda tarvittavat uudelleenohjaukset.', max_length=255, validators=[django.core.validators.RegexValidator(message='Tekninen nimi saa sisältää vain pieniä kirjaimia, numeroita sekä väliviivoja.', regex='[a-z0-9-]+')], verbose_name='Tekninen nimi'),
),
migrations.AlterField(
model_name='picture',
name='slug',
field=models.CharField(blank=True, help_text='Tekninen nimi eli "slug" näkyy URL-osoitteissa. Sallittuja merkkejä ovat pienet kirjaimet, numerot ja väliviiva. Jos jätät teknisen nimen tyhjäksi, se generoidaan automaattisesti otsikosta. Jos muutat teknistä nimeä julkaisun jälkeen, muista luoda tarvittavat uudelleenohjaukset.', max_length=255, validators=[django.core.validators.RegexValidator(message='Tekninen nimi saa sisältää vain pieniä kirjaimia, numeroita sekä väliviivoja.', regex='[a-z0-9-]+')], verbose_name='Tekninen nimi'),
),
]
| 60.037037 | 547 | 0.732881 | 190 | 1,621 | 6.168421 | 0.5 | 0.061433 | 0.051195 | 0.049488 | 0.755973 | 0.755973 | 0.755973 | 0.755973 | 0.755973 | 0.755973 | 0 | 0.031157 | 0.168415 | 1,621 | 26 | 548 | 62.346154 | 0.838279 | 0.040716 | 0 | 0.421053 | 1 | 0.105263 | 0.519974 | 0.01482 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.157895 | 0 | 0.315789 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
bc410389c72e3b7d8fb8046ad63ea44b596e5505 | 4,492 | py | Python | test/test_word_service.py | Scandinaf/ll_free | 7d35dce5955f11e4af52400f961c76c9904c2f05 | [
"Apache-2.0"
] | null | null | null | test/test_word_service.py | Scandinaf/ll_free | 7d35dce5955f11e4af52400f961c76c9904c2f05 | [
"Apache-2.0"
] | null | null | null | test/test_word_service.py | Scandinaf/ll_free | 7d35dce5955f11e4af52400f961c76c9904c2f05 | [
"Apache-2.0"
] | null | null | null | import pytest
from mock import MagicMock
from service.word_service import *
async def __default_coroutine__(value=None):
return value
def __mock_objects__():
word_service.db_layer = MagicMock()
word_service.db_layer.word.save.return_value = __default_coroutine__()
word_service.producer = MagicMock()
word_service.producer.send_message.return_value = __default_coroutine__()
word_service = WordService(db_layer=None, producer=None)
__mock_objects__()
@pytest.mark.asyncio
async def test_update_word_correct():
word_service.db_layer.word.find_one_and_update.return_value = __default_coroutine__({"word": "bad"})
result = await word_service.update_word("""{"word" : "bad", "translation": "плохой"}""")
assert result == "Word was updated!!!"
@pytest.mark.asyncio
async def test_update_word_invalid_json():
result = await word_service.update_word("""{"translation": "плохой"}""")
assert isinstance(result, Error)
result = await word_service.update_word("""{"word_new": "bad"}""")
assert isinstance(result, Error)
result = await word_service.update_word([])
assert isinstance(result, Error)
result = await word_service.update_word(123)
assert isinstance(result, Error)
result = await word_service.update_word("just text")
assert isinstance(result, Error)
@pytest.mark.asyncio
async def test_update_word_not_found():
word_service.db_layer.word.find_one_and_update.return_value = __default_coroutine__()
result = await word_service.update_word("""{"word" : "bad", "translation": "плохой"}""")
assert isinstance(result, Error)
@pytest.mark.asyncio
async def test_get_word_not_found():
word_service.db_layer.word.find_one_by_word.return_value = __default_coroutine__()
result = await word_service.get_word("test")
assert isinstance(result, Error)
@pytest.mark.asyncio
async def test_get_word_correct():
word_dict = {'word': 'bad', 'translation': 'плохой', 'synonyms': ["poor", "bad"]}
word_service.db_layer.word.find_one_by_word.return_value = __default_coroutine__(word_dict)
result = await word_service.get_word("test")
assert isinstance(result, str)
@pytest.mark.asyncio
async def test_delete_word_not_found():
word_service.db_layer.word.find_one_and_delete.return_value = __default_coroutine__()
result = await word_service.delete_word("test")
assert isinstance(result, Error)
@pytest.mark.asyncio
async def test_delete_word_correct():
word_service.db_layer.word.find_one_and_delete.return_value = \
__default_coroutine__({"sound_record_path" : None})
result = await word_service.delete_word("test")
assert result == "Word was deleted!!!"
@pytest.mark.asyncio
async def test_not_valid_type():
result = await word_service.save_word(123)
assert isinstance(result, Error)
result = await word_service.save_word(1.0234)
assert isinstance(result, Error)
result = await word_service.save_word("test")
assert isinstance(result, Error)
result = await word_service.save_word([])
assert isinstance(result, Error)
@pytest.mark.asyncio
async def test_not_valid_json():
result = await word_service.save_word("{}")
assert isinstance(result, Error)
result = await word_service.save_word("""{"translation" : "Anyone who reads Old and Middle English literary texts"}""")
assert isinstance(result, Error)
result = await word_service.save_word("""{"word" : 123}""")
assert isinstance(result, Error)
result = await word_service.save_word("""{"synonyms" : 123}""")
assert isinstance(result, Error)
result = await word_service.save_word("""{"synonyms" : [1,2,3]}""")
assert isinstance(result, Error)
word_service.db_layer.word.record_is_exists.return_value = __default_coroutine__(True)
result = await word_service.save_word("""{"word" : "bad", "translation": "плохой"}""")
assert isinstance(result, Error)
@pytest.mark.asyncio
async def test_required_fields():
result = await word_service.save_word("""{"word": "bad"}""")
assert isinstance(result, Error)
result = await word_service.save_word("""{"translation": "плохой"}""")
assert isinstance(result, Error)
@pytest.mark.asyncio
async def test_valid_json():
word_service.db_layer.word.record_is_exists.return_value = __default_coroutine__(False)
result = await word_service.save_word("""{"word" : "bad", "translation": "плохой"}""")
assert result == "Word was added!!!"
| 33.274074 | 123 | 0.731968 | 579 | 4,492 | 5.314335 | 0.138169 | 0.135847 | 0.116997 | 0.171596 | 0.839454 | 0.827104 | 0.783555 | 0.775431 | 0.687683 | 0.677283 | 0 | 0.005175 | 0.139581 | 4,492 | 134 | 124 | 33.522388 | 0.790944 | 0 | 0 | 0.423913 | 0 | 0 | 0.116874 | 0 | 0 | 0 | 0 | 0 | 0.26087 | 1 | 0.01087 | false | 0 | 0.032609 | 0 | 0.054348 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
bc8bab59702d1885b599e07a4949ecf1c6cd625d | 334 | py | Python | keystone/logic/types/extension.py | admiyo/keystone | 9452cf04bc8b0a4dc66dc640615d5ace1ca715f2 | [
"Apache-2.0"
] | null | null | null | keystone/logic/types/extension.py | admiyo/keystone | 9452cf04bc8b0a4dc66dc640615d5ace1ca715f2 | [
"Apache-2.0"
] | null | null | null | keystone/logic/types/extension.py | admiyo/keystone | 9452cf04bc8b0a4dc66dc640615d5ace1ca715f2 | [
"Apache-2.0"
] | null | null | null | class Extensions(object):
"""An extensions type to hold static extensions content."""
def __init__(self, json_content, xml_content):
self.xml_content = xml_content
self.json_content = json_content
def to_json(self):
return self.json_content
def to_xml(self):
return self.xml_content
| 25.692308 | 63 | 0.682635 | 44 | 334 | 4.863636 | 0.340909 | 0.205607 | 0.21028 | 0.196262 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.236527 | 334 | 12 | 64 | 27.833333 | 0.839216 | 0.158683 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.375 | false | 0 | 0 | 0.25 | 0.75 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 6 |
4c00a0256f58dc1f9081647e6c51eb27d9964266 | 79 | py | Python | datasets/bored.py | arnavk/tumblr-emotions | 0ed03201ab833b8b400cb0cff6c5b064fac5edfb | [
"Apache-2.0"
] | null | null | null | datasets/bored.py | arnavk/tumblr-emotions | 0ed03201ab833b8b400cb0cff6c5b064fac5edfb | [
"Apache-2.0"
] | null | null | null | datasets/bored.py | arnavk/tumblr-emotions | 0ed03201ab833b8b400cb0cff6c5b064fac5edfb | [
"Apache-2.0"
] | null | null | null | import download_images
download_images.download_im('bored', 0, 209228, 'data')
| 26.333333 | 55 | 0.797468 | 11 | 79 | 5.454545 | 0.727273 | 0.466667 | 0.733333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.09589 | 0.075949 | 79 | 2 | 56 | 39.5 | 0.726027 | 0 | 0 | 0 | 0 | 0 | 0.113924 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.5 | 0 | 0.5 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
4c021ea4442d649a5f3d1eba975fe31ce27cdf85 | 32,525 | py | Python | tools/mo/unit_tests/extensions/back/add_outputs_recursive_test.py | ytorzuk-altran/openvino | 68d460a3bb578a738ba0e4d0e1f2e321afa73ab0 | [
"Apache-2.0"
] | 1 | 2021-02-01T06:35:55.000Z | 2021-02-01T06:35:55.000Z | tools/mo/unit_tests/extensions/back/add_outputs_recursive_test.py | ytorzuk-altran/openvino | 68d460a3bb578a738ba0e4d0e1f2e321afa73ab0 | [
"Apache-2.0"
] | 55 | 2020-11-16T09:55:29.000Z | 2022-03-28T13:18:15.000Z | tools/mo/unit_tests/extensions/back/add_outputs_recursive_test.py | ytorzuk-altran/openvino | 68d460a3bb578a738ba0e4d0e1f2e321afa73ab0 | [
"Apache-2.0"
] | 1 | 2021-02-15T01:13:57.000Z | 2021-02-15T01:13:57.000Z | # Copyright (C) 2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import unittest
from openvino.tools.mo.back.add_outputs_recursive import AddOutputRecursive
from openvino.tools.mo.ops.If import If
from openvino.tools.mo.ops.loop import Loop
from openvino.tools.mo.ops.tensor_iterator import TensorIterator
from openvino.tools.mo.front.common.partial_infer.elemental import copy_shape_infer
from openvino.tools.mo.front.common.partial_infer.utils import int64_array, dynamic_dimension_value, shape_array
from openvino.tools.mo.graph.graph import Node
from unit_tests.utils.graph import build_graph, regular_op_with_empty_data, result, connect, shaped_parameter, \
valued_const_with_data, shaped_const_with_data, regular_op_with_shaped_data
# test for Loop
main_graph_nodes = {
**shaped_parameter("IN_1", [1, 4, 64, 54]),
**shaped_parameter("IN_2", [1, 4, 64, 54]),
**valued_const_with_data("M", int64_array([5])),
**valued_const_with_data("cond", int64_array([1])),
**regular_op_with_empty_data("Loop", {'op': "Loop", 'type': 'Loop', 'sub_graphs': ['body'], "body": None,
'input_port_map': [{'external_port_id': 1, 'internal_layer_id': 2,
'axis': None},
{'external_port_id': 2, 'internal_layer_id': 0,
'axis': None},
{'external_port_id': 3, 'internal_layer_id': 1,
'axis': None}],
'output_port_map': [{'external_port_id': 0, 'internal_layer_id': 4,
'axis': None},
{'external_port_id': -1, 'internal_layer_id': 5,
'axis': None, 'purpose': "execution_condition"}],
'back_edges': [{'from_layer': 8, 'to_layer': 7},
{'from_layer': 10, 'to_layer': 9}],
'infer': Loop.infer}),
**result("OUT_1")
}
sub_graph_1_nodes = {
**shaped_parameter("IN_2", int64_array([1, 4, 64, 54]), {'internal_layer_id': 0}),
**valued_const_with_data("M_2", int64_array([10])),
**valued_const_with_data("cond_2", int64_array([1])),
**regular_op_with_empty_data("Loop_2", {'op': "Loop", 'type': 'Loop', 'sub_graphs': ['body'], "body": None,
'input_port_map': [{'external_port_id': 1, 'internal_layer_id': 0,
'axis': None},
{'external_port_id': 2, 'internal_layer_id': 2,
'axis': None}],
'output_port_map': [{'external_port_id': 0, 'internal_layer_id': 7,
'axis': None},
{'external_port_id': -1, 'internal_layer_id': 6,
'axis': None,
'purpose': "execution_condition"}],
'back_edges': [{'from_layer': 1, 'to_layer': 0},
{'from_layer': 8, 'to_layer': 2}],
'infer': Loop.infer}),
**regular_op_with_empty_data('Loop_2_out', {'op': 'Result', 'type': 'Result', 'infer': lambda x: None,
'internal_layer_id': 3}),
**shaped_parameter("in_1_int", int64_array([1, 4, 64, 54]), {'internal_layer_id': 1}),
**regular_op_with_empty_data("in_1_int_out",
{'op': 'Result', 'type': 'Result', 'infer': lambda x: None, 'internal_layer_id': 4}),
**shaped_parameter("cond_1_int", int64_array([1]), {'internal_layer_id': 2}),
**regular_op_with_empty_data("cond_1_int_out", {'op': 'Result', 'type': 'Result', 'infer': lambda x: None,
'internal_layer_id': 5}),
}
sub_graph_2_nodes = {
**shaped_parameter('cond_2_int', [1, 4, 64, 54], {'internal_layer_id': 0}),
**result("cond_2_int_out"),
**shaped_parameter('in_2_int', [1, 4, 64, 54], {'internal_layer_id': 1}),
**shaped_const_with_data('ones', int64_array([1, 4, 64, 54])),
**regular_op_with_shaped_data('OUT_2', int64_array([1, 4, 64, 54]), {'op': "Add", 'infer': copy_shape_infer}),
**regular_op_with_empty_data('OUT_2_out',
{'op': 'Result', 'type': 'Result', 'infer': lambda x: None, 'internal_layer_id': 7}),
**regular_op_with_shaped_data('in_2_int_out', int64_array([1, 4, 64, 54]),
{'op': 'Result', 'type': 'Result', 'infer': lambda x: None, 'internal_layer_id': 6})
}
def ti_create_main_graph(body):
main_graph = build_graph(nodes_attrs=ti_main_graph_nodes,
edges=[*connect('M', '0:Loop'),
*connect('cond', '1:Loop'),
*connect('IN_2', '2:Loop'),
*connect('IN_1', "3:Loop"),
*connect('Loop:0', 'OUT_1')],
nodes_with_edges_only=True)
loop_node = Node(main_graph, 'Loop')
loop_node.body = body
loop_node.in_edge(0)['external_port_id'] = 0
loop_node.in_edge(1)['external_port_id'] = 1
loop_node.in_edge(2)['external_port_id'] = 2
loop_node.in_edge(3)['external_port_id'] = 3
loop_node.out_edge(0)['external_port_id'] = 4
return main_graph
def if_create_main_graph():
sub_graph_2 = build_graph(nodes_attrs=if_sub_graph_2_then_nodes,
edges=[*connect('in_2_int', 'OUT_2'),
*connect('ones', 'OUT_2'),
*connect('OUT_2', 'OUT_2_out')],
nodes_with_edges_only=True)
sub_graph_2_else = build_graph(nodes_attrs=if_sub_graph_2_else_nodes,
edges=[*connect('in_2_int_else', 'OUT_2_else'),
*connect('ones_else', 'OUT_2_else'),
*connect('OUT_2_else', 'OUT_2_out_else')],
nodes_with_edges_only=True)
sub_graph_1 = build_graph(nodes_attrs=if_sub_graph_1_then_nodes,
edges=[*connect('cond_2', '0:If_2'),
*connect('IN_2', '1:If_2'),
*connect('If_2:0', 'If_2_out'),
*connect('in_1_int', 'in_1_int_out')],
nodes_with_edges_only=True)
if_node_1 = Node(sub_graph_1, 'If_2')
if_node_1.then_graph = sub_graph_2
if_node_1.else_graph = sub_graph_2_else
return sub_graph_1
class AddOutputRecursiveTest(unittest.TestCase):
def test_add_output_1(self):
sub_graph_2 = build_graph(nodes_attrs=sub_graph_2_nodes,
edges=[*connect('cond_2_int', 'cond_2_int_out'),
*connect('in_2_int', 'OUT_2'),
*connect('ones', 'OUT_2'),
*connect('OUT_2', 'OUT_2_out'),
*connect('in_2_int', 'in_2_int_out')],
nodes_with_edges_only=True)
sub_graph_1 = build_graph(nodes_attrs=sub_graph_1_nodes,
edges=[*connect('M_2', '0:Loop_2'),
*connect('cond_2', '1:Loop_2'),
*connect('IN_2', '2:Loop_2'),
*connect('Loop_2:0', 'Loop_2_out'),
*connect('in_1_int', 'in_1_int_out'),
*connect('cond_1_int', 'cond_1_int_out')],
nodes_with_edges_only=True)
loop_node_1 = Node(sub_graph_1, 'Loop_2')
loop_node_1.body = sub_graph_2
main_graph = build_graph(nodes_attrs=main_graph_nodes,
edges=[*connect('M', '0:Loop'),
*connect('cond', '1:Loop'),
*connect('IN_2', '2:Loop'),
*connect('IN_1', "3:Loop"),
*connect('Loop:0', 'OUT_1')],
nodes_with_edges_only=True)
loop_node = Node(main_graph, 'Loop')
loop_node.body = sub_graph_1
main_graph.graph['additional_outputs'] = ['Loop', 'Loop_2']
loop_node_output_port_map_len = len(loop_node.output_port_map)
loop_node_out_ports_len = len(loop_node.out_ports())
loop_2_out_ports_len = len(loop_node_1.out_ports())
max_layer_id = 5
AddOutputRecursive().find_and_replace_pattern(main_graph)
loop_node = Node(main_graph, 'Loop')
self.assertEqual(len(loop_node.output_port_map), loop_node_output_port_map_len + 1)
self.assertEqual(len(loop_node.out_ports()), loop_node_out_ports_len + 1)
self.assertEqual(loop_node.out_port(1).get_destination().node.op, 'Result')
self.assertTrue(np.all(loop_node.out_port(1).data.get_shape() == int64_array([5, 10, 4, 64, 54])))
last_node = Node(sub_graph_1, 'Loop_2')
self.assertEqual(len(last_node.out_ports()), loop_2_out_ports_len)
unsq_node = last_node.out_port(0).get_destinations()[1].node
self.assertEqual(unsq_node.op, 'Unsqueeze')
self.assertEqual(unsq_node.out_port(0).get_destination().node.op, 'Result')
self.assertEqual(unsq_node.out_port(0).get_destination().node.internal_layer_id, max_layer_id + 3)
self.assertTrue(np.all(unsq_node.out_port(0).data.get_shape() == int64_array([1, 10, 4, 64, 54])))
# test for TensorIterator
ti_main_graph_nodes = {
**shaped_parameter("IN_1", [1, 4, 64, 54]),
**shaped_parameter("IN_2", [1, 4, 64, 54]),
**valued_const_with_data("M", int64_array([5])),
**valued_const_with_data("cond", int64_array([1])),
**regular_op_with_empty_data("Loop", {'op': "TensorIterator", 'type': 'TensorIterator',
'sub_graphs': ['body'], "body": None,
'input_port_map': [{'external_port_id': 1, 'internal_layer_id': 2, 'axis': None},
{'external_port_id': 2, 'internal_layer_id': 0, 'axis': None},
{'external_port_id': 3, 'internal_layer_id': 1, 'axis': None}],
'output_port_map': [{'external_port_id': 4, 'internal_layer_id': 4, 'axis': None}],
'back_edges': [{'from_layer': 8, 'to_layer': 7},
{'from_layer': 10, 'to_layer': 9}],
'infer': TensorIterator.infer}),
**result("OUT_1")
}
ti_sub_graph_1_nodes = {
**shaped_parameter("IN_2", int64_array([1, 4, 64, 54]), {'internal_layer_id': 0}),
**valued_const_with_data("cond_2", int64_array([1])),
**regular_op_with_empty_data("Loop_2", {'op': "TensorIterator", 'type': 'TensorIterator',
'sub_graphs': ['body'], "body": None,
'input_port_map': [{'external_port_id': 1, 'internal_layer_id': 0, 'axis': None},
{'external_port_id': 0, 'internal_layer_id': 1, 'axis': 0}],
'output_port_map': [{'external_port_id': 2, 'internal_layer_id': 7,
'axis': None},
],
'back_edges': [{'from_layer': 1, 'to_layer': 0},
{'from_layer': 8, 'to_layer': 2}],
'infer': TensorIterator.infer}),
**regular_op_with_empty_data('Loop_2_out', {'op': 'Result', 'type': 'Result', 'infer': lambda x: None,
'internal_layer_id': 3}),
**shaped_parameter("in_1_int", int64_array([1, 4, 64, 54]), {'internal_layer_id': 1}),
**regular_op_with_empty_data("in_1_int_out", {'op': 'Result', 'type': 'Result', 'infer': lambda x: None,
'internal_layer_id': 4}),
**shaped_parameter("cond_1_int", int64_array([1]), {'internal_layer_id': 2}),
**regular_op_with_empty_data("cond_1_int_out", {'op': 'Result', 'type': 'Result', 'infer': lambda x: None,
'internal_layer_id': 5}),
}
ti_sub_graph_2_nodes = {
**shaped_parameter('cond_2_int', [1, 4, 64, 54], {'internal_layer_id': 0}),
**result("cond_2_int_out"),
**shaped_parameter('in_2_int', [1, 4, 64, 54], {'internal_layer_id': 1}),
**shaped_const_with_data('ones', int64_array([1, 4, 64, 54])),
**regular_op_with_shaped_data('OUT_2', int64_array([1, 4, 64, 54]),
{'op': "Add", 'infer': copy_shape_infer}),
**regular_op_with_empty_data('OUT_2_out', {'op': 'Result', 'type': 'Result', 'infer': lambda x: None,
'internal_layer_id': 7}),
**regular_op_with_empty_data('in_2_int_out', {'op': 'Result', 'type': 'Result', 'infer': lambda x: None,
'internal_layer_id': 6})
}
class TI_AddOutputRecursiveTest(unittest.TestCase):
@staticmethod
def create_graph():
sub_graph_2 = build_graph(nodes_attrs=ti_sub_graph_2_nodes,
edges=[*connect('cond_2_int', 'cond_2_int_out'),
*connect('in_2_int', 'OUT_2'),
*connect('ones', 'OUT_2'),
*connect('OUT_2', 'OUT_2_out'),
*connect('in_2_int', 'in_2_int_out')],
nodes_with_edges_only=True)
sub_graph_1 = build_graph(nodes_attrs=ti_sub_graph_1_nodes,
edges=[*connect('cond_2', '1:Loop_2'),
*connect('IN_2', '0:Loop_2'),
*connect('Loop_2:0', 'Loop_2_out'),
*connect('in_1_int', 'in_1_int_out'),
*connect('cond_1_int', 'cond_1_int_out')],
nodes_with_edges_only=True)
loop_node_1 = Node(sub_graph_1, 'Loop_2')
loop_node_1.body = sub_graph_2
loop_node_1.in_edge(0)['external_port_id'] = 0
loop_node_1.in_edge(1)['external_port_id'] = 1
loop_node_1.out_edge(0)['external_port_id'] = 2
main_graph = ti_create_main_graph(sub_graph_1)
main_graph.graph['additional_outputs'] = ['Loop', 'Loop_2']
return main_graph, sub_graph_1
def check_body_last_node(self, body, node_id, loop_2_node_out_ports_len):
last_node = Node(body, node_id)
max_layer_id = 5
self.assertEqual(len(last_node.out_ports()), loop_2_node_out_ports_len)
unsq_node = last_node.out_port(0).get_destinations()[1].node
self.assertEqual(unsq_node.op, 'Unsqueeze')
self.assertEqual(unsq_node.out_port(0).get_destination().node.op, 'Result')
self.assertEqual(unsq_node.out_port(0).get_destination().node.internal_layer_id, max_layer_id + 3)
self.assertTrue(np.all(unsq_node.out_port(0).data.get_shape() == int64_array([1, 1, 4, 64, 54])))
def check_loop_node(self, graph, node_id, port_map_len, out_ports_len):
loop_node = Node(graph, node_id)
self.assertEqual(len(loop_node.output_port_map), port_map_len + 1)
self.assertEqual(len(loop_node.out_ports()), out_ports_len + 1)
self.assertEqual(loop_node.out_port(1).get_destination().node.op, 'Result')
def test_add_output_1(self):
main_graph, sub_graph_1 = self.create_graph()
loop_node = Node(main_graph, 'Loop')
loop_node_output_port_map_len = len(loop_node.output_port_map)
loop_node_out_ports_len = len(loop_node.out_ports())
loop_node_2 = Node(sub_graph_1, 'Loop_2')
loop_2_node_out_ports_len = len(loop_node_2.out_ports())
AddOutputRecursive().find_and_replace_pattern(main_graph)
self.check_loop_node(main_graph, 'Loop', loop_node_output_port_map_len, loop_node_out_ports_len)
self.assertTrue(np.all(loop_node.out_port(1).data.get_shape() == int64_array([1, 1, 4, 64, 54])))
self.check_body_last_node(sub_graph_1, 'Loop_2', loop_2_node_out_ports_len)
def test_add_output_dynamic(self):
main_graph, sub_graph_1 = self.create_graph()
loop_node = Node(main_graph, 'Loop')
loop_node_output_port_map_len = len(loop_node.output_port_map)
loop_node_out_ports_len = len(loop_node.out_ports())
loop_node_2 = Node(sub_graph_1, 'Loop_2')
loop_2_node_out_ports_len = len(loop_node_2.out_ports())
loop_node.input_port_map[2]['axis'] = 1
loop_node.input_port_map[2]['start'] = 0
loop_node.input_port_map[2]['end'] = -1
loop_node.input_port_map[2]['stride'] = 1
in_1_node = Node(main_graph, 'IN_1')
in_1_node['shape'] = shape_array([1, dynamic_dimension_value, 64, 54])
AddOutputRecursive().find_and_replace_pattern(main_graph)
self.check_loop_node(main_graph, 'Loop', loop_node_output_port_map_len, loop_node_out_ports_len)
self.assertTrue(np.all(loop_node.out_port(1).data.get_shape() ==
shape_array([dynamic_dimension_value, 1, 4, 64, 54])))
self.check_body_last_node(sub_graph_1, 'Loop_2', loop_2_node_out_ports_len)
def test_add_output_several_iterations(self):
main_graph, sub_graph_1 = self.create_graph()
loop_node = Node(main_graph, 'Loop')
loop_node_output_port_map_len = len(loop_node.output_port_map)
loop_node_out_ports_len = len(loop_node.out_ports())
loop_node_2 = Node(sub_graph_1, 'Loop_2')
loop_2_node_out_ports_len = len(loop_node_2.out_ports())
loop_node.input_port_map[2]['axis'] = 1
loop_node.input_port_map[2]['start'] = 0
loop_node.input_port_map[2]['end'] = -1
loop_node.input_port_map[2]['stride'] = 1
loop_node.output_port_map[0]['axis'] = 1
loop_node.output_port_map[0]['start'] = 0
loop_node.output_port_map[0]['end'] = 10
loop_node.output_port_map[0]['stride'] = 2
AddOutputRecursive().find_and_replace_pattern(main_graph)
self.check_loop_node(main_graph, 'Loop', loop_node_output_port_map_len, loop_node_out_ports_len)
self.assertTrue(np.all(loop_node.out_port(1).data.get_shape() == shape_array([4, 1, 4, 64, 54])))
self.assertTrue(np.all(loop_node.out_port(0).data.get_shape() == shape_array([1, 5, 64, 54])))
self.check_body_last_node(sub_graph_1, 'Loop_2', loop_2_node_out_ports_len)
def test_add_output_several_iterations_wo_start_end(self):
main_graph, sub_graph_1 = self.create_graph()
loop_node = Node(main_graph, 'Loop')
loop_node_output_port_map_len = len(loop_node.output_port_map)
loop_node_out_ports_len = len(loop_node.out_ports())
loop_node.input_port_map[2]['axis'] = 1
loop_node.input_port_map[2]['stride'] = 1
loop_node_2 = Node(sub_graph_1, 'Loop_2')
loop_2_node_out_ports_len = len(loop_node_2.out_ports())
AddOutputRecursive().find_and_replace_pattern(main_graph)
self.check_loop_node(main_graph, 'Loop', loop_node_output_port_map_len, loop_node_out_ports_len)
self.assertTrue(np.all(loop_node.out_port(1).data.get_shape() == shape_array([4, 1, 4, 64, 54])))
self.check_body_last_node(sub_graph_1, 'Loop_2', loop_2_node_out_ports_len)
def test_add_output_several_iterations_negative_end(self):
main_graph, sub_graph_1 = self.create_graph()
loop_node = Node(main_graph, 'Loop')
loop_node_output_port_map_len = len(loop_node.output_port_map)
loop_node_out_ports_len = len(loop_node.out_ports())
loop_node_2 = Node(sub_graph_1, 'Loop_2')
loop_2_node_out_ports_len = len(loop_node_2.out_ports())
loop_node.input_port_map[2]['axis'] = 1
loop_node.input_port_map[2]['start'] = 0
loop_node.input_port_map[2]['end'] = -3
loop_node.input_port_map[2]['stride'] = 1
loop_node.output_port_map[0]['axis'] = 1
loop_node.output_port_map[0]['start'] = 0
loop_node.output_port_map[0]['end'] = -1
loop_node.output_port_map[0]['stride'] = 2
AddOutputRecursive().find_and_replace_pattern(main_graph)
self.check_loop_node(main_graph, 'Loop', loop_node_output_port_map_len, loop_node_out_ports_len)
self.assertTrue(np.all(loop_node.out_port(1).data.get_shape() == shape_array([2, 1, 4, 64, 54])))
self.assertTrue(np.all(loop_node.out_port(0).data.get_shape() == shape_array([1, 2, 64, 54])))
self.check_body_last_node(sub_graph_1, 'Loop_2', loop_2_node_out_ports_len)
def test_add_output_several_iterations_negative_stride(self):
main_graph, sub_graph_1 = self.create_graph()
loop_node = Node(main_graph, 'Loop')
loop_node_output_port_map_len = len(loop_node.output_port_map)
loop_node_out_ports_len = len(loop_node.out_ports())
loop_node_2 = Node(sub_graph_1, 'Loop_2')
loop_2_node_out_ports_len = len(loop_node_2.out_ports())
loop_node.input_port_map[2]['axis'] = 1
loop_node.input_port_map[2]['start'] = -1
loop_node.input_port_map[2]['end'] = 0
loop_node.input_port_map[2]['stride'] = -2
loop_node.output_port_map[0]['axis'] = 1
loop_node.output_port_map[0]['start'] = 0
loop_node.output_port_map[0]['end'] = -1
loop_node.output_port_map[0]['stride'] = 2
AddOutputRecursive().find_and_replace_pattern(main_graph)
self.check_loop_node(main_graph, 'Loop', loop_node_output_port_map_len, loop_node_out_ports_len)
self.assertTrue(np.all(loop_node.out_port(1).data.get_shape() == shape_array([2, 1, 4, 64, 54])))
self.assertTrue(np.all(loop_node.out_port(0).data.get_shape() == shape_array([1, 2, 64, 54])))
self.check_body_last_node(sub_graph_1, 'Loop_2', loop_2_node_out_ports_len)
def test_add_output_several_iterations_negative_start_end_input(self):
main_graph, sub_graph_1 = self.create_graph()
loop_node = Node(main_graph, 'Loop')
loop_node_output_port_map_len = len(loop_node.output_port_map)
loop_node_out_ports_len = len(loop_node.out_ports())
loop_node_2 = Node(sub_graph_1, 'Loop_2')
loop_2_node_out_ports_len = len(loop_node_2.out_ports())
loop_node.input_port_map[2]['axis'] = 1
loop_node.input_port_map[2]['start'] = -1
loop_node.input_port_map[2]['end'] = -4
loop_node.input_port_map[2]['stride'] = -2
loop_node.output_port_map[0]['axis'] = 1
loop_node.output_port_map[0]['start'] = 0
loop_node.output_port_map[0]['end'] = -1
loop_node.output_port_map[0]['stride'] = 2
AddOutputRecursive().find_and_replace_pattern(main_graph)
self.check_loop_node(main_graph, 'Loop', loop_node_output_port_map_len, loop_node_out_ports_len)
self.assertTrue(np.all(loop_node.out_port(1).data.get_shape() == shape_array([2, 1, 4, 64, 54])))
self.assertTrue(np.all(loop_node.out_port(0).data.get_shape() == shape_array([1, 2, 64, 54])))
self.check_body_last_node(sub_graph_1, 'Loop_2', loop_2_node_out_ports_len)
def test_add_output_several_iterations_negative_start_end_output(self):
main_graph, sub_graph_1 = self.create_graph()
loop_node = Node(main_graph, 'Loop')
loop_node_output_port_map_len = len(loop_node.output_port_map)
loop_node_out_ports_len = len(loop_node.out_ports())
loop_node_2 = Node(sub_graph_1, 'Loop_2')
loop_2_node_out_ports_len = len(loop_node_2.out_ports())
loop_node.input_port_map[2]['axis'] = 1
loop_node.input_port_map[2]['start'] = -1
loop_node.input_port_map[2]['end'] = -4
loop_node.input_port_map[2]['stride'] = -2
loop_node.output_port_map[0]['axis'] = 1
loop_node.output_port_map[0]['start'] = -4
loop_node.output_port_map[0]['end'] = -1
loop_node.output_port_map[0]['stride'] = 1
AddOutputRecursive().find_and_replace_pattern(main_graph)
self.check_loop_node(main_graph, 'Loop', loop_node_output_port_map_len, loop_node_out_ports_len)
self.assertTrue(np.all(loop_node.out_port(1).data.get_shape() == shape_array([2, 1, 4, 64, 54])))
self.assertTrue(np.all(loop_node.out_port(0).data.get_shape() == shape_array([1, 3, 64, 54])))
self.check_body_last_node(sub_graph_1, 'Loop_2', loop_2_node_out_ports_len)
# test for If
if_main_graph_nodes = {
**shaped_parameter("IN_1", [1, 4, 64, 54]),
**shaped_parameter("IN_2", [1, 4, 64, 54]),
**valued_const_with_data("cond", int64_array([1])),
**regular_op_with_empty_data("If", {'op': "If", 'type': 'If', 'sub_graphs': ['then_graph', 'else_graph'],
"then_graph": None, 'else_graph': None, 'infer': If.infer}),
**result("OUT_1")
}
if_sub_graph_1_then_nodes = {
**shaped_parameter("IN_2", int64_array([1, 4, 64, 54]), {'input_id': 2}),
**valued_const_with_data("cond_2", int64_array([1])),
**regular_op_with_empty_data("If_2", {'op': "If", 'type': 'If', 'sub_graphs': ['then_graph', 'else_graph'],
"then_graph": None, 'else_graph': None, 'infer': If.infer}),
**regular_op_with_empty_data('If_2_out', {'op': 'Result', 'type': 'Result', 'infer': lambda x: None}),
**shaped_parameter("in_1_int", int64_array([1, 4, 64, 54]), {'input_id': 1}),
**regular_op_with_empty_data("in_1_int_out", {'op': 'Result', 'type': 'Result', 'output_id': 0})
}
if_sub_graph_1_else_nodes = {
**shaped_parameter("in_1_int", int64_array([1, 4, 64, 54]), {'input_id': 1}),
**regular_op_with_empty_data("in_1_int_out", {'op': 'Result', 'type': 'Result', 'output_id': 0})
}
if_sub_graph_2_then_nodes = {
**shaped_parameter('in_2_int', [1, 4, 64, 54], {'input_id': 1}),
**shaped_const_with_data('ones', int64_array([1, 4, 64, 54])),
**regular_op_with_shaped_data('OUT_2', int64_array([1, 4, 64, 54]), {'op': "Add"}),
**regular_op_with_empty_data('OUT_2_out', {'op': 'Result', 'type': 'Result', 'output_id': 0}),
}
if_sub_graph_2_else_nodes = {
**shaped_parameter('in_2_int_else', [1, 4, 64, 54], {'input_id': 1}),
**shaped_const_with_data('ones_else', int64_array([1, 4, 64, 54])),
**regular_op_with_shaped_data('OUT_2_else', int64_array([1, 4, 64, 54]), {'op': "Sub"}),
**regular_op_with_empty_data('OUT_2_out_else', {'op': 'Result', 'type': 'Result', 'output_id': 0}),
}
class IF_AddOutputRecursiveTest(unittest.TestCase):
def test_add_output_1(self):
sub_graph_1 = if_create_main_graph()
if_node_1 = Node(sub_graph_1, 'If_2')
sub_graph_1_else = build_graph(nodes_attrs=if_sub_graph_1_else_nodes,
edges=[*connect('in_1_int', 'in_1_int_out')],
nodes_with_edges_only=True)
main_graph = build_graph(nodes_attrs=if_main_graph_nodes,
edges=[*connect('cond', '0:If'),
*connect('IN_1', '1:If'),
*connect('IN_2', "2:If"),
*connect('If:0', 'OUT_1')],
nodes_with_edges_only=True)
if_node = Node(main_graph, 'If')
if_node.then_graph = sub_graph_1
if_node.else_graph = sub_graph_1_else
if_node_out_ports_len = len(if_node.out_ports())
if_2_node_out_ports_len = len(if_node_1.out_ports())
main_graph.graph['additional_outputs'] = ['If', ['If_2', 'in_1_int']]
AddOutputRecursive().find_and_replace_pattern(main_graph)
if_node = Node(main_graph, 'If')
self.assertEqual(len(if_node.out_ports()), if_node_out_ports_len + 1)
self.assertEqual(if_node.out_port(1).get_destination().node.op, 'Result')
self.assertTrue(np.all(if_node.out_port(1).data.get_shape() == int64_array([1, 4, 64, 54])))
last_node = Node(sub_graph_1, 'If_2')
self.assertEqual(len(last_node.out_ports()), if_2_node_out_ports_len)
self.assertEqual(last_node.out_port(0).get_destinations()[1].node.op, 'Result')
self.assertTrue(np.all(last_node.out_port(0).data.get_shape() == int64_array([1, 4, 64, 54])))
class SplitUserPathTest(unittest.TestCase):
@staticmethod
def create_graph():
sub_graph_1 = if_create_main_graph()
out_node = Node(sub_graph_1, 'If_2_out')
out_node['internal_layer_id'] = 4
main_graph = ti_create_main_graph(sub_graph_1)
return main_graph
def test_linear_graph_change(self):
graph = self.create_graph()
path = ['Loop', 'in_1_int']
ref_path = []
loop_node = Node(graph, 'Loop')
ref_path.append({'node': loop_node, 'graph': graph})
ref_path.append({'node': Node(loop_node.body, 'in_1_int'), 'graph': loop_node.body})
tracks = AddOutputRecursive().split_path_to_simple_tracks(graph, path)
self.assertTrue(np.all(tracks[0] == ref_path))
def test_1_if_graph_change(self):
graph = self.create_graph()
path = ['Loop', 'If_2', ['OUT_2', 'OUT_2_else']]
ref_path = [[]]
loop_node = Node(graph, 'Loop')
ref_path[0].append({'node': loop_node, 'graph': graph})
if_node = Node(loop_node.body, 'If_2')
ref_path[0].append({'node': if_node, 'graph': loop_node.body})
ref_path.append([])
ref_path[1] = ref_path[0][:]
ref_path[0].append({'node': Node(if_node.then_graph, 'OUT_2'), 'graph': if_node.then_graph})
ref_path[1].append({'node': Node(if_node.else_graph, 'OUT_2_else'), 'graph': if_node.else_graph})
tracks = AddOutputRecursive().split_path_to_simple_tracks(graph, path)
self.assertTrue(np.all(tracks[0] == ref_path[0]))
self.assertTrue(np.all(tracks[1] == ref_path[1]))
def test_1_if_graph_change_add_output(self):
graph = self.create_graph()
graph.graph['additional_outputs'] = ['Loop', 'If_2', ['OUT_2', 'OUT_2_else']]
AddOutputRecursive().find_and_replace_pattern(graph)
loop_node = Node(graph, 'Loop')
if_node = Node(loop_node.body, 'If_2')
left_node = Node(if_node.then_graph, 'OUT_2')
right_node = Node(if_node.else_graph, 'OUT_2_else')
self.assertEqual(len(left_node.out_port(0).get_destinations()), 2)
self.assertEqual(left_node.out_port(0).get_destinations()[1].node.op, 'Result')
self.assertEqual(len(right_node.out_port(0).get_destinations()), 2)
self.assertEqual(right_node.out_port(0).get_destinations()[1].node.op, 'Result')
self.assertTrue(len(if_node.out_ports()), 2)
self.assertTrue(if_node.out_port(1).get_destination().node.op, 'Result')
self.assertTrue(len(loop_node.out_ports()), 2)
self.assertTrue(loop_node.out_port(1).get_destination().node.op, 'Result')
| 54.480737 | 125 | 0.580753 | 4,404 | 32,525 | 3.848774 | 0.034741 | 0.086844 | 0.041062 | 0.052035 | 0.906313 | 0.873923 | 0.83764 | 0.811091 | 0.770737 | 0.713569 | 0 | 0.040354 | 0.28 | 32,525 | 596 | 126 | 54.572148 | 0.683449 | 0.003751 | 0 | 0.569106 | 0 | 0 | 0.125968 | 0 | 0 | 0 | 0 | 0 | 0.095528 | 1 | 0.038618 | false | 0 | 0.020325 | 0 | 0.075203 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
4c192253d77e989dc8141a3f22749c5ce3b09211 | 32 | py | Python | src/tarski/fstrips/manipulation/__init__.py | phoeft670/tarski | 7d955e535fbbca012bfd1a12402b97febc6b35b9 | [
"Apache-2.0"
] | 29 | 2018-11-26T20:31:04.000Z | 2021-12-29T11:08:40.000Z | src/tarski/fstrips/manipulation/__init__.py | phoeft670/tarski | 7d955e535fbbca012bfd1a12402b97febc6b35b9 | [
"Apache-2.0"
] | 101 | 2018-06-07T13:10:01.000Z | 2022-03-11T11:54:00.000Z | src/tarski/fstrips/manipulation/__init__.py | phoeft670/tarski | 7d955e535fbbca012bfd1a12402b97febc6b35b9 | [
"Apache-2.0"
] | 18 | 2018-11-01T22:44:39.000Z | 2022-02-28T04:57:15.000Z |
from .simplify import Simplify
| 10.666667 | 30 | 0.8125 | 4 | 32 | 6.5 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.15625 | 32 | 2 | 31 | 16 | 0.962963 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
d5c94301132baa658ec4ddb62e116aa3105292d4 | 18,729 | py | Python | UNET/fcn.py | JiangguoZhang/ELEC576project | f4d61cc101ce8af3f236d578feef3a7a048bb41d | [
"Unlicense"
] | 1 | 2021-12-15T05:38:33.000Z | 2021-12-15T05:38:33.000Z | UNET/fcn.py | JiangguoZhang/ELEC576project | f4d61cc101ce8af3f236d578feef3a7a048bb41d | [
"Unlicense"
] | null | null | null | UNET/fcn.py | JiangguoZhang/ELEC576project | f4d61cc101ce8af3f236d578feef3a7a048bb41d | [
"Unlicense"
] | null | null | null | import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import models
from torchvision.models.vgg import VGG
import torch
import torch.nn as nn
#from .utils import load_state_dict_from_url
__all__ = [
'VGG', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn',
'vgg19_bn', 'vgg19',
]
model_urls = {
'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth',
'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth',
'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',
'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth',
'vgg11_bn': 'https://download.pytorch.org/models/vgg11_bn-6002323d.pth',
'vgg13_bn': 'https://download.pytorch.org/models/vgg13_bn-abd245e5.pth',
'vgg16_bn': 'https://download.pytorch.org/models/vgg16_bn-6c64b313.pth',
'vgg19_bn': 'https://download.pytorch.org/models/vgg19_bn-c79401a0.pth',
}
class VGG(nn.Module):
def __init__(self, features, num_classes=1000, init_weights=True):
super(VGG, self).__init__()
self.features = features
self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
self.classifier = nn.Sequential(
nn.Linear(512 * 7 * 7, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, num_classes),
)
if init_weights:
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfgs = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
def _vgg(arch, cfg, batch_norm, pretrained, progress, **kwargs):
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfgs[cfg], batch_norm=batch_norm), **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
def vgg11(pretrained=False, progress=True, **kwargs):
r"""VGG 11-layer model (configuration "A") from
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg11', 'A', False, pretrained, progress, **kwargs)
def vgg11_bn(pretrained=False, progress=True, **kwargs):
r"""VGG 11-layer model (configuration "A") with batch normalization
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg11_bn', 'A', True, pretrained, progress, **kwargs)
def vgg13(pretrained=False, progress=True, **kwargs):
r"""VGG 13-layer model (configuration "B")
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg13', 'B', False, pretrained, progress, **kwargs)
def vgg13_bn(pretrained=False, progress=True, **kwargs):
r"""VGG 13-layer model (configuration "B") with batch normalization
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg13_bn', 'B', True, pretrained, progress, **kwargs)
def vgg16(pretrained=False, progress=True, **kwargs):
r"""VGG 16-layer model (configuration "D")
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg16', 'D', False, pretrained, progress, **kwargs)
def vgg16_bn(pretrained=False, progress=True, **kwargs):
r"""VGG 16-layer model (configuration "D") with batch normalization
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg16_bn', 'D', True, pretrained, progress, **kwargs)
def vgg19(pretrained=False, progress=True, **kwargs):
r"""VGG 19-layer model (configuration "E")
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg19', 'E', False, pretrained, progress, **kwargs)
def vgg19_bn(pretrained=False, progress=True, **kwargs):
r"""VGG 19-layer model (configuration 'E') with batch normalization
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg19_bn', 'E', True, pretrained, progress, **kwargs)
class FCN32s(nn.Module):
def __init__(self, pretrained_net, n_class):
super().__init__()
self.n_class = n_class
self.pretrained_net = pretrained_net
self.relu = nn.ReLU(inplace=True)
self.deconv1 = nn.ConvTranspose2d(512, 512, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn1 = nn.BatchNorm2d(512)
self.deconv2 = nn.ConvTranspose2d(512, 256, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn2 = nn.BatchNorm2d(256)
self.deconv3 = nn.ConvTranspose2d(256, 128, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn3 = nn.BatchNorm2d(128)
self.deconv4 = nn.ConvTranspose2d(128, 64, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn4 = nn.BatchNorm2d(64)
self.deconv5 = nn.ConvTranspose2d(64, 32, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn5 = nn.BatchNorm2d(32)
self.classifier = nn.Conv2d(64, n_class, kernel_size=1)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
#print(x.shape)
output = self.pretrained_net(x)
# print(output['x1'].shape)
# print(output['x2'].shape)
# print(output['x3'].shape)
# print(output['x4'].shape)
x4 = output['x4'] # size=(N, 512, x.H/32, x.W/32)
#print(x5.shape)
score = self.bn1(self.relu(self.deconv1(x4))) # size=(N, 512, x.H/16, x.W/16)
#print(score.shape)
score = self.bn2(self.relu(self.deconv2(score))) # size=(N, 256, x.H/8, x.W/8)
#print(score.shape)
score = self.bn3(self.relu(self.deconv3(score))) # size=(N, 128, x.H/4, x.W/4)
#print(score.shape)
score = self.bn4(self.relu(self.deconv4(score))) # size=(N, 64, x.H/2, x.W/2)
#print(score.shape)
#score = self.bn5(self.relu(self.deconv5(score))) # size=(N, 32, x.H, x.W)
score = self.classifier(score) # size=(N, n_class, x.H/1, x.W/1)
score = self.sigmoid(score)
# print(score.shape)
return score # size=(N, n_class, x.H/1, x.W/1)
class FCN16s(nn.Module):
def __init__(self, pretrained_net, n_class):
super().__init__()
self.n_class = n_class
self.pretrained_net = pretrained_net
self.relu = nn.ReLU(inplace=True)
self.deconv1 = nn.ConvTranspose2d(512, 512, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn1 = nn.BatchNorm2d(512)
self.deconv2 = nn.ConvTranspose2d(512, 256, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn2 = nn.BatchNorm2d(256)
self.deconv3 = nn.ConvTranspose2d(256, 128, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn3 = nn.BatchNorm2d(128)
self.deconv4 = nn.ConvTranspose2d(128, 64, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn4 = nn.BatchNorm2d(64)
self.deconv5 = nn.ConvTranspose2d(64, 32, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn5 = nn.BatchNorm2d(32)
self.classifier = nn.Conv2d(32, n_class, kernel_size=1)
def forward(self, x):
output = self.pretrained_net(x)
x5 = output['x5'] # size=(N, 512, x.H/32, x.W/32)
x4 = output['x4'] # size=(N, 512, x.H/16, x.W/16)
score = self.relu(self.deconv1(x5)) # size=(N, 512, x.H/16, x.W/16)
score = self.bn1(score + x4) # element-wise add, size=(N, 512, x.H/16, x.W/16)
score = self.bn2(self.relu(self.deconv2(score))) # size=(N, 256, x.H/8, x.W/8)
score = self.bn3(self.relu(self.deconv3(score))) # size=(N, 128, x.H/4, x.W/4)
score = self.bn4(self.relu(self.deconv4(score))) # size=(N, 64, x.H/2, x.W/2)
score = self.bn5(self.relu(self.deconv5(score))) # size=(N, 32, x.H, x.W)
score = self.classifier(score) # size=(N, n_class, x.H/1, x.W/1)
return score # size=(N, n_class, x.H/1, x.W/1)
class FCN8s(nn.Module):
def __init__(self, pretrained_net, n_class):
super().__init__()
self.n_class = n_class
self.pretrained_net = pretrained_net
self.relu = nn.ReLU(inplace=True)
self.deconv1 = nn.ConvTranspose2d(512, 512, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn1 = nn.BatchNorm2d(512)
self.deconv2 = nn.ConvTranspose2d(512, 256, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn2 = nn.BatchNorm2d(256)
self.deconv3 = nn.ConvTranspose2d(256, 128, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn3 = nn.BatchNorm2d(128)
self.deconv4 = nn.ConvTranspose2d(128, 64, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn4 = nn.BatchNorm2d(64)
self.deconv5 = nn.ConvTranspose2d(64, 32, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn5 = nn.BatchNorm2d(32)
self.classifier = nn.Conv2d(32, n_class, kernel_size=1)
def forward(self, x):
output = self.pretrained_net(x)
x5 = output['x5'] # size=(N, 512, x.H/32, x.W/32)
x4 = output['x4'] # size=(N, 512, x.H/16, x.W/16)
x3 = output['x3'] # size=(N, 256, x.H/8, x.W/8)
score = self.relu(self.deconv1(x5)) # size=(N, 512, x.H/16, x.W/16)
score = self.bn1(score + x4) # element-wise add, size=(N, 512, x.H/16, x.W/16)
score = self.relu(self.deconv2(score)) # size=(N, 256, x.H/8, x.W/8)
score = self.bn2(score + x3) # element-wise add, size=(N, 256, x.H/8, x.W/8)
score = self.bn3(self.relu(self.deconv3(score))) # size=(N, 128, x.H/4, x.W/4)
score = self.bn4(self.relu(self.deconv4(score))) # size=(N, 64, x.H/2, x.W/2)
score = self.bn5(self.relu(self.deconv5(score))) # size=(N, 32, x.H, x.W)
score = self.classifier(score) # size=(N, n_class, x.H/1, x.W/1)
score = nn.Sigmoid()(score)
return score # size=(N, n_class, x.H/1, x.W/1)
class FCNs(nn.Module):
def __init__(self, pretrained_net, n_class):
super().__init__()
self.n_class = n_class
self.pretrained_net = pretrained_net
self.relu = nn.ReLU(inplace=True)
self.deconv1 = nn.ConvTranspose2d(512, 512, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn1 = nn.BatchNorm2d(512)
self.deconv2 = nn.ConvTranspose2d(512, 256, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn2 = nn.BatchNorm2d(256)
self.deconv3 = nn.ConvTranspose2d(256, 128, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn3 = nn.BatchNorm2d(128)
self.deconv4 = nn.ConvTranspose2d(128, 64, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn4 = nn.BatchNorm2d(64)
self.deconv5 = nn.ConvTranspose2d(64, 32, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn5 = nn.BatchNorm2d(32)
self.classifier = nn.Conv2d(32, n_class, kernel_size=1)
def forward(self, x):
output = self.pretrained_net(x)
x5 = output['x5'] # size=(N, 512, x.H/32, x.W/32)
x4 = output['x4'] # size=(N, 512, x.H/16, x.W/16)
x3 = output['x3'] # size=(N, 256, x.H/8, x.W/8)
x2 = output['x2'] # size=(N, 128, x.H/4, x.W/4)
x1 = output['x1'] # size=(N, 64, x.H/2, x.W/2)
score = self.bn1(self.relu(self.deconv1(x5))) # size=(N, 512, x.H/16, x.W/16)
score = score + x4 # element-wise add, size=(N, 512, x.H/16, x.W/16)
score = self.bn2(self.relu(self.deconv2(score))) # size=(N, 256, x.H/8, x.W/8)
score = score + x3 # element-wise add, size=(N, 256, x.H/8, x.W/8)
score = self.bn3(self.relu(self.deconv3(score))) # size=(N, 128, x.H/4, x.W/4)
score = score + x2 # element-wise add, size=(N, 128, x.H/4, x.W/4)
score = self.bn4(self.relu(self.deconv4(score))) # size=(N, 64, x.H/2, x.W/2)
score = score + x1 # element-wise add, size=(N, 64, x.H/2, x.W/2)
score = self.bn5(self.relu(self.deconv5(score))) # size=(N, 32, x.H, x.W)
score = self.classifier(score) # size=(N, n_class, x.H/1, x.W/1)
return score # size=(N, n_class, x.H/1, x.W/1)
class VGGNet(VGG):
def __init__(self, pretrained=True, model='vgg16', requires_grad=True, remove_fc=True, show_params=False):
super().__init__(make_layers(cfg[model]))
self.ranges = ranges[model]
if pretrained:
exec("self.load_state_dict(models.%s(pretrained=True).state_dict())" % model)
if not requires_grad:
for param in super().parameters():
param.requires_grad = False
if remove_fc: # delete redundant fully-connected layer params, can save memory
del self.classifier
if show_params:
for name, param in self.named_parameters():
print(name, param.size())
def forward(self, x):
output = {}
# get the output of each maxpooling layer (5 maxpool in VGG net)
for idx in range(len(self.ranges)):
for layer in range(self.ranges[idx][0], self.ranges[idx][1]):
x = self.features[layer](x)
output["x%d"%(idx+1)] = x
return output
ranges = {
'vgg11': ((0, 3), (3, 6), (6, 11), (11, 16), (16, 21)),
'vgg13': ((0, 5), (5, 10), (10, 15), (15, 20), (20, 25)),
'vgg16': ((0, 5), (5, 10), (10, 17), (17, 24), (24, 31)),
'vgg19': ((0, 5), (5, 10), (10, 19), (19, 28), (28, 37))
}
# cropped version from https://github.com/pytorch/vision/blob/master/torchvision/models/vgg.py
cfg = {
'vgg11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'vgg13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'vgg16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'vgg19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
def get_fcn32s(n_class=1):
vgg_model = VGGNet(requires_grad=True)
return FCN32s(pretrained_net=vgg_model, n_class=n_class)
def get_fcn8s(n_class=1):
vgg_model = VGGNet(requires_grad=True)
return FCN8s(pretrained_net=vgg_model, n_class=n_class)
| 45.13012 | 117 | 0.599498 | 2,747 | 18,729 | 3.995632 | 0.085184 | 0.020955 | 0.021866 | 0.030977 | 0.811862 | 0.768039 | 0.740707 | 0.732507 | 0.724854 | 0.715743 | 0 | 0.09044 | 0.244327 | 18,729 | 414 | 118 | 45.23913 | 0.685084 | 0.243313 | 0 | 0.533333 | 0 | 0 | 0.058337 | 0.004388 | 0 | 0 | 0 | 0 | 0 | 1 | 0.091228 | false | 0 | 0.024561 | 0 | 0.203509 | 0.003509 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
d5f02a4b788e9588d0cb4cb95fe1aad35ebc1876 | 24,061 | py | Python | mars/tensor/execution/tests/test_datasource_execute.py | cclauss/mars | 85decf86f6489ab1acaee6222731d66fcecd2718 | [
"Apache-2.0"
] | 1 | 2018-12-26T08:37:04.000Z | 2018-12-26T08:37:04.000Z | mars/tensor/execution/tests/test_datasource_execute.py | cclauss/mars | 85decf86f6489ab1acaee6222731d66fcecd2718 | [
"Apache-2.0"
] | null | null | null | mars/tensor/execution/tests/test_datasource_execute.py | cclauss/mars | 85decf86f6489ab1acaee6222731d66fcecd2718 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import scipy.sparse as sps
from mars.tests.core import TestBase
from mars.tensor.execution.core import Executor
from mars.tensor.expressions.datasource import tensor, ones_like, zeros, zeros_like, full, \
arange, empty, empty_like, diag, diagflat, eye, linspace, meshgrid, indices, \
triu, tril
from mars.lib.sparse import SparseNDArray
from mars.tensor.expressions.lib import nd_grid
class Test(TestBase):
def setUp(self):
super(Test, self).setUp()
self.executor = Executor()
def testCreateSparseExecution(self):
mat = sps.csr_matrix([[0, 0, 2], [2, 0, 0]])
t = tensor(mat, dtype='f8', chunks=2)
res = self.executor.execute_tensor(t)
self.assertIsInstance(res[0], SparseNDArray)
self.assertEqual(res[0].dtype, np.float64)
np.testing.assert_array_equal(res[0].toarray(), mat[..., :2].toarray())
np.testing.assert_array_equal(res[1].toarray(), mat[..., 2:].toarray())
t2 = ones_like(t, dtype='f4')
res = self.executor.execute_tensor(t2)
expected = sps.csr_matrix([[0, 0, 1], [1, 0, 0]])
self.assertIsInstance(res[0], SparseNDArray)
self.assertEqual(res[0].dtype, np.float32)
np.testing.assert_array_equal(res[0].toarray(), expected[..., :2].toarray())
np.testing.assert_array_equal(res[1].toarray(), expected[..., 2:].toarray())
t3 = tensor(np.array([[0, 0, 2], [2, 0, 0]]), chunks=2).tosparse()
res = self.executor.execute_tensor(t3)
self.assertIsInstance(res[0], SparseNDArray)
self.assertEqual(res[0].dtype, np.int_)
np.testing.assert_array_equal(res[0].toarray(), mat[..., :2].toarray())
np.testing.assert_array_equal(res[1].toarray(), mat[..., 2:].toarray())
def testZerosExecution(self):
t = zeros((20, 30), dtype='i8', chunks=5)
res = self.executor.execute_tensor(t, concat=True)
self.assertTrue(np.array_equal(res[0], np.zeros((20, 30), dtype='i8')))
self.assertEqual(res[0].dtype, np.int64)
t2 = zeros_like(t)
res = self.executor.execute_tensor(t2, concat=True)
self.assertTrue(np.array_equal(res[0], np.zeros((20, 30), dtype='i8')))
self.assertEqual(res[0].dtype, np.int64)
t = zeros((20, 30), dtype='i4', chunks=5, sparse=True)
res = self.executor.execute_tensor(t, concat=True)
self.assertEqual(res[0].nnz, 0)
def testEmptyExecution(self):
t = empty((20, 30), dtype='i8', chunks=5)
res = self.executor.execute_tensor(t, concat=True)
self.assertEqual(res[0].shape, (20, 30))
self.assertEqual(res[0].dtype, np.int64)
self.assertFalse(np.array_equal(res, np.zeros((20, 30))))
t = empty((20, 30), chunks=5)
res = self.executor.execute_tensor(t, concat=True)
self.assertFalse(np.allclose(res, np.zeros((20, 30))))
t2 = empty_like(t)
res = self.executor.execute_tensor(t2, concat=True)
self.assertEqual(res[0].shape, (20, 30))
self.assertEqual(res[0].dtype, np.float64)
def testFullExecution(self):
t = full((2, 2), 1, dtype='f4', chunks=1)
res = self.executor.execute_tensor(t, concat=True)
self.assertTrue(np.array_equal(res[0], np.full((2, 2), 1, dtype='f4')))
t = full((2, 2), [1, 2], dtype='f8', chunks=1)
res = self.executor.execute_tensor(t, concat=True)
self.assertTrue(np.array_equal(res[0], np.full((2, 2), [1, 2], dtype='f8')))
def testArangeExecution(self):
t = arange(1, 20, 3, chunks=2)
res = self.executor.execute_tensor(t, concat=True)[0]
self.assertTrue(np.array_equal(res, np.arange(1, 20, 3)))
t = arange(1, 20, .3, chunks=4)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.arange(1, 20, .3)
self.assertTrue(np.allclose(res, expected))
t = arange(1.0, 1.8, .3, chunks=4)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.arange(1.0, 1.8, .3)
self.assertTrue(np.allclose(res, expected))
t = arange('1066-10-13', '1066-10-31', dtype=np.datetime64, chunks=3)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.arange('1066-10-13', '1066-10-31', dtype=np.datetime64)
self.assertTrue(np.array_equal(res, expected))
def testDiagExecution(self):
# 2-d 6 * 6
a = arange(36, chunks=2).reshape(6, 6)
d = diag(a)
res = self.executor.execute_tensor(d, concat=True)[0]
expected = np.diag(np.arange(36).reshape(6, 6))
np.testing.assert_equal(res, expected)
d = diag(a, k=1)
res = self.executor.execute_tensor(d, concat=True)[0]
expected = np.diag(np.arange(36).reshape(6, 6), k=1)
np.testing.assert_equal(res, expected)
d = diag(a, k=3)
res = self.executor.execute_tensor(d, concat=True)[0]
expected = np.diag(np.arange(36).reshape(6, 6), k=3)
np.testing.assert_equal(res, expected)
d = diag(a, k=-2)
res = self.executor.execute_tensor(d, concat=True)[0]
expected = np.diag(np.arange(36).reshape(6, 6), k=-2)
np.testing.assert_equal(res, expected)
d = diag(a, k=-5)
res = self.executor.execute_tensor(d)[0]
expected = np.diag(np.arange(36).reshape(6, 6), k=-5)
np.testing.assert_equal(res, expected)
# 2-d 4 * 9
a = arange(36, chunks=2).reshape(4, 9)
d = diag(a)
res = self.executor.execute_tensor(d, concat=True)[0]
expected = np.diag(np.arange(36).reshape(4, 9))
np.testing.assert_equal(res, expected)
d = diag(a, k=1)
res = self.executor.execute_tensor(d, concat=True)[0]
expected = np.diag(np.arange(36).reshape(4, 9), k=1)
np.testing.assert_equal(res, expected)
d = diag(a, k=3)
res = self.executor.execute_tensor(d, concat=True)[0]
expected = np.diag(np.arange(36).reshape(4, 9), k=3)
np.testing.assert_equal(res, expected)
d = diag(a, k=-2)
res = self.executor.execute_tensor(d, concat=True)[0]
expected = np.diag(np.arange(36).reshape(4, 9), k=-2)
np.testing.assert_equal(res, expected)
d = diag(a, k=-3)
res = self.executor.execute_tensor(d)[0]
expected = np.diag(np.arange(36).reshape(4, 9), k=-3)
np.testing.assert_equal(res, expected)
# 1-d
a = arange(5, chunks=2)
d = diag(a)
res = self.executor.execute_tensor(d, concat=True)[0]
expected = np.diag(np.arange(5))
np.testing.assert_equal(res, expected)
d = diag(a, k=1)
res = self.executor.execute_tensor(d, concat=True)[0]
expected = np.diag(np.arange(5), k=1)
np.testing.assert_equal(res, expected)
d = diag(a, k=3)
res = self.executor.execute_tensor(d, concat=True)[0]
expected = np.diag(np.arange(5), k=3)
np.testing.assert_equal(res, expected)
d = diag(a, k=-2)
res = self.executor.execute_tensor(d, concat=True)[0]
expected = np.diag(np.arange(5), k=-2)
np.testing.assert_equal(res, expected)
d = diag(a, k=-3)
res = self.executor.execute_tensor(d, concat=True)[0]
expected = np.diag(np.arange(5), k=-3)
np.testing.assert_equal(res, expected)
d = diag(a, sparse=True)
res = self.executor.execute_tensor(d, concat=True)[0]
expected = np.diag(np.arange(5))
self.assertIsInstance(res, SparseNDArray)
np.testing.assert_equal(res.toarray(), expected)
d = diag(a, k=1, sparse=True)
res = self.executor.execute_tensor(d, concat=True)[0]
expected = np.diag(np.arange(5), k=1)
self.assertIsInstance(res, SparseNDArray)
np.testing.assert_equal(res.toarray(), expected)
d = diag(a, k=2, sparse=True)
res = self.executor.execute_tensor(d, concat=True)[0]
expected = np.diag(np.arange(5), k=2)
self.assertIsInstance(res, SparseNDArray)
np.testing.assert_equal(res.toarray(), expected)
d = diag(a, k=-2, sparse=True)
res = self.executor.execute_tensor(d, concat=True)[0]
expected = np.diag(np.arange(5), k=-2)
self.assertIsInstance(res, SparseNDArray)
np.testing.assert_equal(res.toarray(), expected)
d = diag(a, k=-3, sparse=True)
res = self.executor.execute_tensor(d, concat=True)[0]
expected = np.diag(np.arange(5), k=-3)
self.assertIsInstance(res, SparseNDArray)
np.testing.assert_equal(res.toarray(), expected)
def testDiagflatExecution(self):
a = diagflat([[1, 2], [3, 4]], chunks=1)
res = self.executor.execute_tensor(a, concat=True)[0]
expected = np.diagflat([[1, 2], [3, 4]])
np.testing.assert_equal(res, expected)
d = tensor([[1, 2], [3, 4]], chunks=1)
a = diagflat(d)
res = self.executor.execute_tensor(a, concat=True)[0]
expected = np.diagflat([[1, 2], [3, 4]])
np.testing.assert_equal(res, expected)
a = diagflat([1, 2], 1, chunks=1)
res = self.executor.execute_tensor(a, concat=True)[0]
expected = np.diagflat([1, 2], 1)
np.testing.assert_equal(res, expected)
d = tensor([[1, 2]], chunks=1)
a = diagflat(d, 1)
res = self.executor.execute_tensor(a, concat=True)[0]
expected = np.diagflat([1, 2], 1)
np.testing.assert_equal(res, expected)
def testEyeExecution(self):
t = eye(5, chunks=2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.eye(5)
np.testing.assert_equal(res, expected)
t = eye(5, k=1, chunks=2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.eye(5, k=1)
np.testing.assert_equal(res, expected)
t = eye(5, k=2, chunks=2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.eye(5, k=2)
np.testing.assert_equal(res, expected)
t = eye(5, k=-1, chunks=2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.eye(5, k=-1)
np.testing.assert_equal(res, expected)
t = eye(5, k=-3, chunks=2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.eye(5, k=-3)
np.testing.assert_equal(res, expected)
t = eye(5, M=3, k=1, chunks=2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.eye(5, M=3, k=1)
np.testing.assert_equal(res, expected)
t = eye(5, M=3, k=-3, chunks=2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.eye(5, M=3, k=-3)
np.testing.assert_equal(res, expected)
t = eye(5, M=7, k=1, chunks=2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.eye(5, M=7, k=1)
np.testing.assert_equal(res, expected)
t = eye(5, M=8, k=-3, chunks=2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.eye(5, M=8, k=-3)
np.testing.assert_equal(res, expected)
t = eye(2, dtype=int)
res = self.executor.execute_tensor(t, concat=True)[0]
self.assertEqual(res.dtype, np.int_)
# test sparse
t = eye(5, sparse=True, chunks=2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.eye(5)
self.assertIsInstance(res, SparseNDArray)
np.testing.assert_equal(res.toarray(), expected)
t = eye(5, k=1, sparse=True, chunks=2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.eye(5, k=1)
self.assertIsInstance(res, SparseNDArray)
np.testing.assert_equal(res.toarray(), expected)
t = eye(5, k=2, sparse=True, chunks=2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.eye(5, k=2)
self.assertIsInstance(res, SparseNDArray)
np.testing.assert_equal(res.toarray(), expected)
t = eye(5, k=-1, sparse=True, chunks=2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.eye(5, k=-1)
self.assertIsInstance(res, SparseNDArray)
np.testing.assert_equal(res.toarray(), expected)
t = eye(5, k=-3, sparse=True, chunks=2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.eye(5, k=-3)
self.assertIsInstance(res, SparseNDArray)
np.testing.assert_equal(res.toarray(), expected)
t = eye(5, M=3, k=1, sparse=True, chunks=2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.eye(5, M=3, k=1)
self.assertIsInstance(res, SparseNDArray)
np.testing.assert_equal(res.toarray(), expected)
t = eye(5, M=3, k=-3, sparse=True, chunks=2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.eye(5, M=3, k=-3)
self.assertIsInstance(res, SparseNDArray)
np.testing.assert_equal(res.toarray(), expected)
t = eye(5, M=7, k=1, sparse=True, chunks=2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.eye(5, M=7, k=1)
self.assertIsInstance(res, SparseNDArray)
np.testing.assert_equal(res.toarray(), expected)
t = eye(5, M=8, k=-3, sparse=True, chunks=2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.eye(5, M=8, k=-3)
self.assertIsInstance(res, SparseNDArray)
np.testing.assert_equal(res.toarray(), expected)
def testLinspaceExecution(self):
a = linspace(2.0, 9.0, num=11, chunks=3)
res = self.executor.execute_tensor(a, concat=True)[0]
expected = np.linspace(2.0, 9.0, num=11)
np.testing.assert_allclose(res, expected)
a = linspace(2.0, 9.0, num=11, endpoint=False, chunks=3)
res = self.executor.execute_tensor(a, concat=True)[0]
expected = np.linspace(2.0, 9.0, num=11, endpoint=False)
np.testing.assert_allclose(res, expected)
a = linspace(2.0, 9.0, num=11, chunks=3, dtype=int)
res = self.executor.execute_tensor(a, concat=True)[0]
self.assertEqual(res.dtype, np.int_)
def testMeshgridExecution(self):
a = arange(5, chunks=2)
b = arange(6, 12, chunks=3)
c = arange(12, 19, chunks=4)
A, B, C = meshgrid(a, b, c)
A_res = self.executor.execute_tensor(A, concat=True)[0]
A_expected = np.meshgrid(np.arange(5), np.arange(6, 12), np.arange(12, 19))[0]
np.testing.assert_equal(A_res, A_expected)
B_res = self.executor.execute_tensor(B, concat=True)[0]
B_expected = np.meshgrid(np.arange(5), np.arange(6, 12), np.arange(12, 19))[1]
np.testing.assert_equal(B_res, B_expected)
C_res = self.executor.execute_tensor(C, concat=True)[0]
C_expected = np.meshgrid(np.arange(5), np.arange(6, 12), np.arange(12, 19))[2]
np.testing.assert_equal(C_res, C_expected)
A, B, C = meshgrid(a, b, c, indexing='ij')
A_res = self.executor.execute_tensor(A, concat=True)[0]
A_expected = np.meshgrid(np.arange(5), np.arange(6, 12), np.arange(12, 19), indexing='ij')[0]
np.testing.assert_equal(A_res, A_expected)
B_res = self.executor.execute_tensor(B, concat=True)[0]
B_expected = np.meshgrid(np.arange(5), np.arange(6, 12), np.arange(12, 19), indexing='ij')[1]
np.testing.assert_equal(B_res, B_expected)
C_res = self.executor.execute_tensor(C, concat=True)[0]
C_expected = np.meshgrid(np.arange(5), np.arange(6, 12), np.arange(12, 19), indexing='ij')[2]
np.testing.assert_equal(C_res, C_expected)
A, B, C = meshgrid(a, b, c, sparse=True)
A_res = self.executor.execute_tensor(A, concat=True)[0]
A_expected = np.meshgrid(np.arange(5), np.arange(6, 12), np.arange(12, 19), sparse=True)[0]
np.testing.assert_equal(A_res, A_expected)
B_res = self.executor.execute_tensor(B, concat=True)[0]
B_expected = np.meshgrid(np.arange(5), np.arange(6, 12), np.arange(12, 19), sparse=True)[1]
np.testing.assert_equal(B_res, B_expected)
C_res = self.executor.execute_tensor(C, concat=True)[0]
C_expected = np.meshgrid(np.arange(5), np.arange(6, 12), np.arange(12, 19), sparse=True)[2]
np.testing.assert_equal(C_res, C_expected)
A, B, C = meshgrid(a, b, c, indexing='ij', sparse=True)
A_res = self.executor.execute_tensor(A, concat=True)[0]
A_expected = np.meshgrid(np.arange(5), np.arange(6, 12), np.arange(12, 19),
indexing='ij', sparse=True)[0]
np.testing.assert_equal(A_res, A_expected)
B_res = self.executor.execute_tensor(B, concat=True)[0]
B_expected = np.meshgrid(np.arange(5), np.arange(6, 12), np.arange(12, 19),
indexing='ij', sparse=True)[1]
np.testing.assert_equal(B_res, B_expected)
C_res = self.executor.execute_tensor(C, concat=True)[0]
C_expected = np.meshgrid(np.arange(5), np.arange(6, 12), np.arange(12, 19),
indexing='ij', sparse=True)[2]
np.testing.assert_equal(C_res, C_expected)
def testIndicesExecution(self):
grid = indices((2, 3), chunks=1)
res = self.executor.execute_tensor(grid, concat=True)[0]
expected = np.indices((2, 3))
np.testing.assert_equal(res, expected)
res = self.executor.execute_tensor(grid[0], concat=True)[0]
np.testing.assert_equal(res, expected[0])
res = self.executor.execute_tensor(grid[1], concat=True)[0]
np.testing.assert_equal(res, expected[1])
def testTriuExecution(self):
a = arange(24, chunks=2).reshape(2, 3, 4)
t = triu(a)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.triu(np.arange(24).reshape(2, 3, 4))
np.testing.assert_equal(res, expected)
t = triu(a, k=1)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.triu(np.arange(24).reshape(2, 3, 4), k=1)
np.testing.assert_equal(res, expected)
t = triu(a, k=2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.triu(np.arange(24).reshape(2, 3, 4), k=2)
np.testing.assert_equal(res, expected)
t = triu(a, k=-1)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.triu(np.arange(24).reshape(2, 3, 4), k=-1)
np.testing.assert_equal(res, expected)
t = triu(a, k=-2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.triu(np.arange(24).reshape(2, 3, 4), k=-2)
np.testing.assert_equal(res, expected)
# test sparse
a = arange(12, chunks=2).reshape(3, 4).tosparse()
t = triu(a)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.triu(np.arange(12).reshape(3, 4))
self.assertIsInstance(res, SparseNDArray)
np.testing.assert_equal(res, expected)
t = triu(a, k=1)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.triu(np.arange(12).reshape(3, 4), k=1)
self.assertIsInstance(res, SparseNDArray)
np.testing.assert_equal(res, expected)
t = triu(a, k=2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.triu(np.arange(12).reshape(3, 4), k=2)
self.assertIsInstance(res, SparseNDArray)
np.testing.assert_equal(res, expected)
t = triu(a, k=-1)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.triu(np.arange(12).reshape(3, 4), k=-1)
self.assertIsInstance(res, SparseNDArray)
np.testing.assert_equal(res, expected)
t = triu(a, k=-2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.triu(np.arange(12).reshape(3, 4), k=-2)
self.assertIsInstance(res, SparseNDArray)
np.testing.assert_equal(res, expected)
def testTrilExecution(self):
a = arange(24, chunks=2).reshape(2, 3, 4)
t = tril(a)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.tril(np.arange(24).reshape(2, 3, 4))
np.testing.assert_equal(res, expected)
t = tril(a, k=1)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.tril(np.arange(24).reshape(2, 3, 4), k=1)
np.testing.assert_equal(res, expected)
t = tril(a, k=2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.tril(np.arange(24).reshape(2, 3, 4), k=2)
np.testing.assert_equal(res, expected)
t = tril(a, k=-1)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.tril(np.arange(24).reshape(2, 3, 4), k=-1)
np.testing.assert_equal(res, expected)
t = tril(a, k=-2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.tril(np.arange(24).reshape(2, 3, 4), k=-2)
np.testing.assert_equal(res, expected)
a = arange(12, chunks=2).reshape(3, 4).tosparse()
t = tril(a)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.tril(np.arange(12).reshape(3, 4))
self.assertIsInstance(res, SparseNDArray)
np.testing.assert_equal(res, expected)
t = tril(a, k=1)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.tril(np.arange(12).reshape(3, 4), k=1)
self.assertIsInstance(res, SparseNDArray)
np.testing.assert_equal(res, expected)
t = tril(a, k=2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.tril(np.arange(12).reshape(3, 4), k=2)
self.assertIsInstance(res, SparseNDArray)
np.testing.assert_equal(res, expected)
t = tril(a, k=-1)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.tril(np.arange(12).reshape(3, 4), k=-1)
self.assertIsInstance(res, SparseNDArray)
np.testing.assert_equal(res, expected)
t = tril(a, k=-2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.tril(np.arange(12).reshape(3, 4), k=-2)
self.assertIsInstance(res, SparseNDArray)
np.testing.assert_equal(res, expected)
def testIndexTrickExecution(self):
mgrid = nd_grid()
t = mgrid[0:5, 0:5]
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.lib.index_tricks.nd_grid()[0:5, 0:5]
np.testing.assert_equal(res, expected)
t = mgrid[-1:1:5j]
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.lib.index_tricks.nd_grid()[-1:1:5j]
np.testing.assert_equal(res, expected)
ogrid = nd_grid(sparse=True)
t = ogrid[0:5, 0:5]
res = [self.executor.execute_tensor(o, concat=True)[0] for o in t]
expected = np.lib.index_tricks.nd_grid(sparse=True)[0:5, 0:5]
[np.testing.assert_equal(r, e) for r, e in zip(res, expected)]
| 36.734351 | 101 | 0.607747 | 3,577 | 24,061 | 4.01314 | 0.055913 | 0.083595 | 0.103448 | 0.151724 | 0.885545 | 0.870428 | 0.849042 | 0.833438 | 0.823964 | 0.801811 | 0 | 0.047329 | 0.235152 | 24,061 | 654 | 102 | 36.79052 | 0.732707 | 0.027513 | 0 | 0.627753 | 0 | 0 | 0.003336 | 0 | 0 | 0 | 0 | 0 | 0.301762 | 1 | 0.03304 | false | 0 | 0.015419 | 0 | 0.050661 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
91491ec4442c6b9fe4eeddd1da12213448a49330 | 3,000 | py | Python | aliyun-python-sdk-core/tests/endpoint/test_local_config_regional_endpoint_resolver.py | jia-jerry/aliyun-openapi-python-sdk | e90f3683a250cfec5b681b5f1d73a68f0dc9970d | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-core/tests/endpoint/test_local_config_regional_endpoint_resolver.py | jia-jerry/aliyun-openapi-python-sdk | e90f3683a250cfec5b681b5f1d73a68f0dc9970d | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-core/tests/endpoint/test_local_config_regional_endpoint_resolver.py | jia-jerry/aliyun-openapi-python-sdk | e90f3683a250cfec5b681b5f1d73a68f0dc9970d | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
from tests import unittest
from aliyunsdkcore.endpoint.local_config_regional_endpoint_resolver \
import LocalConfigRegionalEndpointResolver
from aliyunsdkcore.endpoint.resolver_endpoint_request import ResolveEndpointRequest
class TestLocalConfigRegionalEndpointResolver(unittest.TestCase):
def test_resolver(self):
resolver = LocalConfigRegionalEndpointResolver()
request = ResolveEndpointRequest("", "", "", "")
self.assertEqual(resolver.resolve(request), None)
self.assertEqual(resolver._make_endpoint_entry_key(
"ecs", "cn-huhehaote"), "ecs.cn-huhehaote")
request = ResolveEndpointRequest("cn-huhehaote", "ecs", "", "")
self.assertEqual(resolver.resolve(request),
'ecs.cn-huhehaote.aliyuncs.com')
self.assertTrue(resolver.is_region_id_valid(request))
# resolver.put_endpoint_entry("ecs", "my-endpoint-for-cnhuhehaote-ecs")
# request = ResolveEndpointRequest("cn-huhehaote", "ecs", "", "")
# self.assertEqual(resolver.resolve(request), "my-endpoint-for-cnhuhehaote-ecs")
# self.assertTrue(resolver.is_region_id_valid(request))
request = ResolveEndpointRequest("cn-huhehaote", "ecs", "", "innerAPI")
self.assertEqual(resolver.resolve(request), None)
# _get_normalized_product_code
self.assertEqual(resolver._get_normalized_product_code(
"cloudapi"), "apigateway")
self.assertEqual(resolver._get_normalized_product_code("ecs"), "ecs")
self.assertEqual(len(resolver.get_valid_region_ids_by_product('ecs')), 19)
self.assertIsNone(resolver.get_valid_region_ids_by_product('xxx'))
self.assertTrue(resolver.is_product_code_valid(request))
def test_resolver_with_jsonstr(self):
resolver = LocalConfigRegionalEndpointResolver("{}")
request = ResolveEndpointRequest("", "", "", "")
self.assertEqual(resolver.resolve(request), None)
self.assertEqual(resolver._make_endpoint_entry_key(
"ecs", "cn-huhehaote"), "ecs.cn-huhehaote")
request = ResolveEndpointRequest("cn-huhehaote", "ecs", "", "")
self.assertEqual(resolver.resolve(request), None)
self.assertFalse(resolver.is_region_id_valid(request))
resolver.put_endpoint_entry(
"ecs.cn-huhehaote", "my-endpoint-for-cnhuhehaote-ecs")
request = ResolveEndpointRequest("cn-huhehaote", "ecs", "", "")
self.assertEqual(resolver.resolve(request),
"my-endpoint-for-cnhuhehaote-ecs")
self.assertFalse(resolver.is_region_id_valid(request))
request = ResolveEndpointRequest("cn-huhehaote", "ecs", "", "innerAPI")
self.assertEqual(resolver.resolve(request), None)
# _get_normalized_product_code
self.assertEqual(resolver._get_normalized_product_code(
"cloudapi"), "cloudapi")
self.assertEqual(resolver._get_normalized_product_code("ecs"), "ecs")
| 50.847458 | 88 | 0.693667 | 293 | 3,000 | 6.860068 | 0.197952 | 0.11194 | 0.160199 | 0.119403 | 0.774129 | 0.774129 | 0.774129 | 0.738308 | 0.713433 | 0.660697 | 0 | 0.00122 | 0.18 | 3,000 | 58 | 89 | 51.724138 | 0.815854 | 0.112333 | 0 | 0.55814 | 0 | 0 | 0.118267 | 0.034275 | 0 | 0 | 0 | 0 | 0.44186 | 1 | 0.046512 | false | 0 | 0.069767 | 0 | 0.139535 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
e6ce6313af6cc9f6bc323ad7ca06be64475ca66b | 34 | py | Python | sigpy/learn/__init__.py | davidyzeng/sigpy | 56f8eb9be57b5a80e53ae09f2ba0802586fe69bc | [
"BSD-3-Clause"
] | null | null | null | sigpy/learn/__init__.py | davidyzeng/sigpy | 56f8eb9be57b5a80e53ae09f2ba0802586fe69bc | [
"BSD-3-Clause"
] | null | null | null | sigpy/learn/__init__.py | davidyzeng/sigpy | 56f8eb9be57b5a80e53ae09f2ba0802586fe69bc | [
"BSD-3-Clause"
] | null | null | null | from sigpy.learn import app, util
| 17 | 33 | 0.794118 | 6 | 34 | 4.5 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.147059 | 34 | 1 | 34 | 34 | 0.931034 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
e6ed4c01e26964e3f91f1eed15ed260f7db339c9 | 26 | py | Python | src/senjyu/ml/clustering/__init__.py | Koukyosyumei/Senjyu | 70faa45e13cb3b1ccdee8a40146a03d60abe11e5 | [
"Apache-2.0"
] | null | null | null | src/senjyu/ml/clustering/__init__.py | Koukyosyumei/Senjyu | 70faa45e13cb3b1ccdee8a40146a03d60abe11e5 | [
"Apache-2.0"
] | null | null | null | src/senjyu/ml/clustering/__init__.py | Koukyosyumei/Senjyu | 70faa45e13cb3b1ccdee8a40146a03d60abe11e5 | [
"Apache-2.0"
] | null | null | null | from .kmeans import Kmeans | 26 | 26 | 0.846154 | 4 | 26 | 5.5 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.115385 | 26 | 1 | 26 | 26 | 0.956522 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
fc3f985358f9753b669ecba795d9b65aaf590629 | 1,794 | py | Python | src/genie/libs/parser/iosxe/tests/ShowIsisFlexAlgo/cli/equal/golden_output4_expected.py | jacobgarder/genieparser | cc19fcd2f6248d3b08ca8cb35e77c9a6dca50d68 | [
"Apache-2.0"
] | 204 | 2018-06-27T00:55:27.000Z | 2022-03-06T21:12:18.000Z | src/genie/libs/parser/iosxe/tests/ShowIsisFlexAlgo/cli/equal/golden_output4_expected.py | jacobgarder/genieparser | cc19fcd2f6248d3b08ca8cb35e77c9a6dca50d68 | [
"Apache-2.0"
] | 468 | 2018-06-19T00:33:18.000Z | 2022-03-31T23:23:35.000Z | src/genie/libs/parser/iosxe/tests/ShowIsisFlexAlgo/cli/equal/golden_output4_expected.py | jacobgarder/genieparser | cc19fcd2f6248d3b08ca8cb35e77c9a6dca50d68 | [
"Apache-2.0"
] | 309 | 2019-01-16T20:21:07.000Z | 2022-03-30T12:56:41.000Z | expected_output = {
"tag": {
"1": {
"flex_algo": {
"128": {
"level": {
"2": {
"def_priority": 131,
"def_source": "asr1k-25.00",
"def_equal_to_local": False,
"def_metric_type": "IGP",
"def_include_all_affinity": [
"0x00000001",
"0x00800000",
"0x00000000",
"0x00000000",
"0x00000000",
"0x00000000",
"0x00000000",
"0x40000000"
],
"def_exclude_any_affinity": [
"0x00000000",
"0x00000000",
"0x00000000",
"0x00000000",
"0x00000000",
"0x00000000",
"0x00000200"
],
"def_include_any_affinity": [
"0x00000002"
],
"def_prefix_metric": True,
"disabled": False,
"microloop_avoidance_timer_running": False
}
},
"local_priority": 128,
"frr_disabled": False,
"microloop_avoidance_disabled": False
}
}
}
}
} | 39 | 70 | 0.269231 | 75 | 1,794 | 6.08 | 0.56 | 0.394737 | 0.460526 | 0.438596 | 0.241228 | 0 | 0 | 0 | 0 | 0 | 0 | 0.25641 | 0.652174 | 1,794 | 46 | 71 | 39 | 0.474359 | 0 | 0 | 0.304348 | 0 | 0 | 0.24234 | 0.074095 | 0 | 0 | 0.089136 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
fc82ccc5c2fd9e75fd690e9b8e9f98b40f7cd6cc | 35 | py | Python | scratchpad/beta.py | bvraghav/argparse_tree | 93153c6339ddbbaaafb996f0ab0c56d1f5988fed | [
"MIT"
] | 1 | 2021-11-18T06:45:10.000Z | 2021-11-18T06:45:10.000Z | scratchpad/beta.py | bvraghav/argparse_tree | 93153c6339ddbbaaafb996f0ab0c56d1f5988fed | [
"MIT"
] | 3 | 2020-08-01T16:21:11.000Z | 2020-10-24T02:53:49.000Z | scratchpad/beta.py | bvraghav/argparse_tree | 93153c6339ddbbaaafb996f0ab0c56d1f5988fed | [
"MIT"
] | null | null | null | from alpha import A
print (A().p)
| 8.75 | 19 | 0.657143 | 7 | 35 | 3.285714 | 0.857143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.2 | 35 | 3 | 20 | 11.666667 | 0.821429 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.5 | 0 | 0.5 | 0.5 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 1 | 0 | 6 |
fc93f5b48a76f44bd35a395b044a9ac50f097770 | 183 | py | Python | legal_advice_builder/permissions.py | prototypefund/django-legal-advice-builder | 081987d803f9ab38f8ac8dfc327f711dd48f0759 | [
"MIT"
] | 4 | 2021-07-22T10:16:49.000Z | 2022-01-27T16:41:55.000Z | legal_advice_builder/permissions.py | prototypefund/django-legal-advice-builder | 081987d803f9ab38f8ac8dfc327f711dd48f0759 | [
"MIT"
] | 10 | 2021-08-29T11:37:17.000Z | 2022-03-22T18:20:21.000Z | legal_advice_builder/permissions.py | prototypefund/django-legal-advice-builder | 081987d803f9ab38f8ac8dfc327f711dd48f0759 | [
"MIT"
] | 1 | 2022-02-14T09:41:34.000Z | 2022-02-14T09:41:34.000Z | from django.contrib.auth.mixins import UserPassesTestMixin
class DefaultAccessToAdminMixin(UserPassesTestMixin):
def test_func(self):
return self.request.user.is_staff
| 22.875 | 58 | 0.797814 | 20 | 183 | 7.2 | 0.9 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.136612 | 183 | 7 | 59 | 26.142857 | 0.911392 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 | false | 0.5 | 0.25 | 0.25 | 1 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | 0 | 6 |
fc9e79600b07af0b5a85aa425254a056825ed63d | 26,055 | py | Python | pybind/slxos/v16r_1_00b/routing_system/router/isis/router_isis_cmds_holder/address_family/ipv6/af_ipv6_unicast/af_ipv6_attributes/af_common_attributes/redistribute/connected/__init__.py | shivharis/pybind | 4e1c6d54b9fd722ccec25546ba2413d79ce337e6 | [
"Apache-2.0"
] | null | null | null | pybind/slxos/v16r_1_00b/routing_system/router/isis/router_isis_cmds_holder/address_family/ipv6/af_ipv6_unicast/af_ipv6_attributes/af_common_attributes/redistribute/connected/__init__.py | shivharis/pybind | 4e1c6d54b9fd722ccec25546ba2413d79ce337e6 | [
"Apache-2.0"
] | null | null | null | pybind/slxos/v16r_1_00b/routing_system/router/isis/router_isis_cmds_holder/address_family/ipv6/af_ipv6_unicast/af_ipv6_attributes/af_common_attributes/redistribute/connected/__init__.py | shivharis/pybind | 4e1c6d54b9fd722ccec25546ba2413d79ce337e6 | [
"Apache-2.0"
] | 1 | 2021-11-05T22:15:42.000Z | 2021-11-05T22:15:42.000Z |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class connected(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-common-def - based on the path /routing-system/router/isis/router-isis-cmds-holder/address-family/ipv6/af-ipv6-unicast/af-ipv6-attributes/af-common-attributes/redistribute/connected. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__connected_metric','__connected_route_map','__connected_level1','__connected_level2','__connected_level12','__connected_metric_type',)
_yang_name = 'connected'
_rest_name = 'connected'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__connected_metric = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..4261412863']}), is_leaf=True, yang_name="connected-metric", rest_name="metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Metric for redistributed routes', u'cli-full-command': None, u'alt-name': u'metric'}}, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='conn-metric', is_config=True)
self.__connected_level2 = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="connected-level2", rest_name="level-2", parent=self, choice=(u'ch-connected-levels', u'ca-connected-level2'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'IS-IS Level-2 routes only', u'alt-name': u'level-2', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='empty', is_config=True)
self.__connected_level1 = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="connected-level1", rest_name="level-1", parent=self, choice=(u'ch-connected-levels', u'ca-connected-level1'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'IS-IS Level-1 routes only', u'alt-name': u'level-1', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='empty', is_config=True)
self.__connected_level12 = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="connected-level12", rest_name="level-1-2", parent=self, choice=(u'ch-connected-levels', u'ca-connected-level12'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'IS-IS Level-1-2 routes', u'alt-name': u'level-1-2', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='empty', is_config=True)
self.__connected_route_map = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..63']}), is_leaf=True, yang_name="connected-route-map", rest_name="route-map", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Route map reference', u'cli-full-command': None, u'alt-name': u'route-map'}}, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='rmap-type', is_config=True)
self.__connected_metric_type = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'internal': {'value': 1}, u'external': {'value': 2}},), default=unicode("internal"), is_leaf=True, yang_name="connected-metric-type", rest_name="metric-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'IS-IS metric type for redistributed routes', u'alt-name': u'metric-type'}}, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='is-metric-type-t', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'routing-system', u'router', u'isis', u'router-isis-cmds-holder', u'address-family', u'ipv6', u'af-ipv6-unicast', u'af-ipv6-attributes', u'af-common-attributes', u'redistribute', u'connected']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'router', u'isis', u'address-family', u'ipv6', u'unicast', u'redistribute', u'connected']
def _get_connected_metric(self):
"""
Getter method for connected_metric, mapped from YANG variable /routing_system/router/isis/router_isis_cmds_holder/address_family/ipv6/af_ipv6_unicast/af_ipv6_attributes/af_common_attributes/redistribute/connected/connected_metric (conn-metric)
"""
return self.__connected_metric
def _set_connected_metric(self, v, load=False):
"""
Setter method for connected_metric, mapped from YANG variable /routing_system/router/isis/router_isis_cmds_holder/address_family/ipv6/af_ipv6_unicast/af_ipv6_attributes/af_common_attributes/redistribute/connected/connected_metric (conn-metric)
If this variable is read-only (config: false) in the
source YANG file, then _set_connected_metric is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_connected_metric() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..4261412863']}), is_leaf=True, yang_name="connected-metric", rest_name="metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Metric for redistributed routes', u'cli-full-command': None, u'alt-name': u'metric'}}, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='conn-metric', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """connected_metric must be of a type compatible with conn-metric""",
'defined-type': "brocade-isis:conn-metric",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..4261412863']}), is_leaf=True, yang_name="connected-metric", rest_name="metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Metric for redistributed routes', u'cli-full-command': None, u'alt-name': u'metric'}}, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='conn-metric', is_config=True)""",
})
self.__connected_metric = t
if hasattr(self, '_set'):
self._set()
def _unset_connected_metric(self):
self.__connected_metric = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..4261412863']}), is_leaf=True, yang_name="connected-metric", rest_name="metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Metric for redistributed routes', u'cli-full-command': None, u'alt-name': u'metric'}}, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='conn-metric', is_config=True)
def _get_connected_route_map(self):
"""
Getter method for connected_route_map, mapped from YANG variable /routing_system/router/isis/router_isis_cmds_holder/address_family/ipv6/af_ipv6_unicast/af_ipv6_attributes/af_common_attributes/redistribute/connected/connected_route_map (rmap-type)
"""
return self.__connected_route_map
def _set_connected_route_map(self, v, load=False):
"""
Setter method for connected_route_map, mapped from YANG variable /routing_system/router/isis/router_isis_cmds_holder/address_family/ipv6/af_ipv6_unicast/af_ipv6_attributes/af_common_attributes/redistribute/connected/connected_route_map (rmap-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_connected_route_map is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_connected_route_map() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..63']}), is_leaf=True, yang_name="connected-route-map", rest_name="route-map", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Route map reference', u'cli-full-command': None, u'alt-name': u'route-map'}}, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='rmap-type', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """connected_route_map must be of a type compatible with rmap-type""",
'defined-type': "brocade-isis:rmap-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..63']}), is_leaf=True, yang_name="connected-route-map", rest_name="route-map", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Route map reference', u'cli-full-command': None, u'alt-name': u'route-map'}}, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='rmap-type', is_config=True)""",
})
self.__connected_route_map = t
if hasattr(self, '_set'):
self._set()
def _unset_connected_route_map(self):
self.__connected_route_map = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..63']}), is_leaf=True, yang_name="connected-route-map", rest_name="route-map", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Route map reference', u'cli-full-command': None, u'alt-name': u'route-map'}}, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='rmap-type', is_config=True)
def _get_connected_level1(self):
"""
Getter method for connected_level1, mapped from YANG variable /routing_system/router/isis/router_isis_cmds_holder/address_family/ipv6/af_ipv6_unicast/af_ipv6_attributes/af_common_attributes/redistribute/connected/connected_level1 (empty)
"""
return self.__connected_level1
def _set_connected_level1(self, v, load=False):
"""
Setter method for connected_level1, mapped from YANG variable /routing_system/router/isis/router_isis_cmds_holder/address_family/ipv6/af_ipv6_unicast/af_ipv6_attributes/af_common_attributes/redistribute/connected/connected_level1 (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_connected_level1 is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_connected_level1() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="connected-level1", rest_name="level-1", parent=self, choice=(u'ch-connected-levels', u'ca-connected-level1'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'IS-IS Level-1 routes only', u'alt-name': u'level-1', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='empty', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """connected_level1 must be of a type compatible with empty""",
'defined-type': "empty",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="connected-level1", rest_name="level-1", parent=self, choice=(u'ch-connected-levels', u'ca-connected-level1'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'IS-IS Level-1 routes only', u'alt-name': u'level-1', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='empty', is_config=True)""",
})
self.__connected_level1 = t
if hasattr(self, '_set'):
self._set()
def _unset_connected_level1(self):
self.__connected_level1 = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="connected-level1", rest_name="level-1", parent=self, choice=(u'ch-connected-levels', u'ca-connected-level1'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'IS-IS Level-1 routes only', u'alt-name': u'level-1', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='empty', is_config=True)
def _get_connected_level2(self):
"""
Getter method for connected_level2, mapped from YANG variable /routing_system/router/isis/router_isis_cmds_holder/address_family/ipv6/af_ipv6_unicast/af_ipv6_attributes/af_common_attributes/redistribute/connected/connected_level2 (empty)
"""
return self.__connected_level2
def _set_connected_level2(self, v, load=False):
"""
Setter method for connected_level2, mapped from YANG variable /routing_system/router/isis/router_isis_cmds_holder/address_family/ipv6/af_ipv6_unicast/af_ipv6_attributes/af_common_attributes/redistribute/connected/connected_level2 (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_connected_level2 is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_connected_level2() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="connected-level2", rest_name="level-2", parent=self, choice=(u'ch-connected-levels', u'ca-connected-level2'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'IS-IS Level-2 routes only', u'alt-name': u'level-2', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='empty', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """connected_level2 must be of a type compatible with empty""",
'defined-type': "empty",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="connected-level2", rest_name="level-2", parent=self, choice=(u'ch-connected-levels', u'ca-connected-level2'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'IS-IS Level-2 routes only', u'alt-name': u'level-2', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='empty', is_config=True)""",
})
self.__connected_level2 = t
if hasattr(self, '_set'):
self._set()
def _unset_connected_level2(self):
self.__connected_level2 = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="connected-level2", rest_name="level-2", parent=self, choice=(u'ch-connected-levels', u'ca-connected-level2'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'IS-IS Level-2 routes only', u'alt-name': u'level-2', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='empty', is_config=True)
def _get_connected_level12(self):
"""
Getter method for connected_level12, mapped from YANG variable /routing_system/router/isis/router_isis_cmds_holder/address_family/ipv6/af_ipv6_unicast/af_ipv6_attributes/af_common_attributes/redistribute/connected/connected_level12 (empty)
"""
return self.__connected_level12
def _set_connected_level12(self, v, load=False):
"""
Setter method for connected_level12, mapped from YANG variable /routing_system/router/isis/router_isis_cmds_holder/address_family/ipv6/af_ipv6_unicast/af_ipv6_attributes/af_common_attributes/redistribute/connected/connected_level12 (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_connected_level12 is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_connected_level12() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="connected-level12", rest_name="level-1-2", parent=self, choice=(u'ch-connected-levels', u'ca-connected-level12'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'IS-IS Level-1-2 routes', u'alt-name': u'level-1-2', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='empty', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """connected_level12 must be of a type compatible with empty""",
'defined-type': "empty",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="connected-level12", rest_name="level-1-2", parent=self, choice=(u'ch-connected-levels', u'ca-connected-level12'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'IS-IS Level-1-2 routes', u'alt-name': u'level-1-2', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='empty', is_config=True)""",
})
self.__connected_level12 = t
if hasattr(self, '_set'):
self._set()
def _unset_connected_level12(self):
self.__connected_level12 = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="connected-level12", rest_name="level-1-2", parent=self, choice=(u'ch-connected-levels', u'ca-connected-level12'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'IS-IS Level-1-2 routes', u'alt-name': u'level-1-2', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='empty', is_config=True)
def _get_connected_metric_type(self):
"""
Getter method for connected_metric_type, mapped from YANG variable /routing_system/router/isis/router_isis_cmds_holder/address_family/ipv6/af_ipv6_unicast/af_ipv6_attributes/af_common_attributes/redistribute/connected/connected_metric_type (is-metric-type-t)
"""
return self.__connected_metric_type
def _set_connected_metric_type(self, v, load=False):
"""
Setter method for connected_metric_type, mapped from YANG variable /routing_system/router/isis/router_isis_cmds_holder/address_family/ipv6/af_ipv6_unicast/af_ipv6_attributes/af_common_attributes/redistribute/connected/connected_metric_type (is-metric-type-t)
If this variable is read-only (config: false) in the
source YANG file, then _set_connected_metric_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_connected_metric_type() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'internal': {'value': 1}, u'external': {'value': 2}},), default=unicode("internal"), is_leaf=True, yang_name="connected-metric-type", rest_name="metric-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'IS-IS metric type for redistributed routes', u'alt-name': u'metric-type'}}, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='is-metric-type-t', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """connected_metric_type must be of a type compatible with is-metric-type-t""",
'defined-type': "brocade-isis:is-metric-type-t",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'internal': {'value': 1}, u'external': {'value': 2}},), default=unicode("internal"), is_leaf=True, yang_name="connected-metric-type", rest_name="metric-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'IS-IS metric type for redistributed routes', u'alt-name': u'metric-type'}}, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='is-metric-type-t', is_config=True)""",
})
self.__connected_metric_type = t
if hasattr(self, '_set'):
self._set()
def _unset_connected_metric_type(self):
self.__connected_metric_type = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'internal': {'value': 1}, u'external': {'value': 2}},), default=unicode("internal"), is_leaf=True, yang_name="connected-metric-type", rest_name="metric-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'IS-IS metric type for redistributed routes', u'alt-name': u'metric-type'}}, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='is-metric-type-t', is_config=True)
connected_metric = __builtin__.property(_get_connected_metric, _set_connected_metric)
connected_route_map = __builtin__.property(_get_connected_route_map, _set_connected_route_map)
connected_level1 = __builtin__.property(_get_connected_level1, _set_connected_level1)
connected_level2 = __builtin__.property(_get_connected_level2, _set_connected_level2)
connected_level12 = __builtin__.property(_get_connected_level12, _set_connected_level12)
connected_metric_type = __builtin__.property(_get_connected_metric_type, _set_connected_metric_type)
__choices__ = {u'ch-connected-levels': {u'ca-connected-level12': [u'connected_level12'], u'ca-connected-level2': [u'connected_level2'], u'ca-connected-level1': [u'connected_level1']}}
_pyangbind_elements = {'connected_metric': connected_metric, 'connected_route_map': connected_route_map, 'connected_level1': connected_level1, 'connected_level2': connected_level2, 'connected_level12': connected_level12, 'connected_metric_type': connected_metric_type, }
| 87.432886 | 705 | 0.739014 | 3,660 | 26,055 | 5.01694 | 0.057104 | 0.033765 | 0.030498 | 0.018299 | 0.860963 | 0.825237 | 0.806775 | 0.799967 | 0.793595 | 0.790219 | 0 | 0.014546 | 0.121359 | 26,055 | 297 | 706 | 87.727273 | 0.787533 | 0.183612 | 0 | 0.439153 | 0 | 0.031746 | 0.39487 | 0.135167 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.042328 | 0 | 0.280423 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
b90516859a9c51952c30bb00a1eee4c927bbd41f | 104 | py | Python | cfltools/__init__.py | bradley-evans/cfltools | 940014313063c97875a2fe1085cbfe392cb3ec44 | [
"MIT"
] | 8 | 2018-07-26T02:32:33.000Z | 2022-02-18T00:55:32.000Z | cfltools/__init__.py | bradley-evans/cfltools | 940014313063c97875a2fe1085cbfe392cb3ec44 | [
"MIT"
] | 3 | 2018-07-23T17:13:45.000Z | 2018-07-31T19:57:43.000Z | cfltools/__init__.py | bradley-evans/cfltools | 940014313063c97875a2fe1085cbfe392cb3ec44 | [
"MIT"
] | 1 | 2019-10-06T23:20:17.000Z | 2019-10-06T23:20:17.000Z | """
Highest level interface objects.
"""
from .objects import Session
from .objects import CLISession
| 13 | 32 | 0.759615 | 12 | 104 | 6.583333 | 0.666667 | 0.278481 | 0.43038 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.153846 | 104 | 7 | 33 | 14.857143 | 0.897727 | 0.307692 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
b9108c7b07cf1f2d0b31dc57f6aacf4949fd410d | 47 | py | Python | openhab_creator/output/__init__.py | DerOetzi/openhab_creator | 197876df5aae84192c34418f6b9a7cfcee23b195 | [
"MIT"
] | 1 | 2021-11-16T22:48:26.000Z | 2021-11-16T22:48:26.000Z | openhab_creator/output/__init__.py | DerOetzi/openhab_creator | 197876df5aae84192c34418f6b9a7cfcee23b195 | [
"MIT"
] | null | null | null | openhab_creator/output/__init__.py | DerOetzi/openhab_creator | 197876df5aae84192c34418f6b9a7cfcee23b195 | [
"MIT"
] | null | null | null | from openhab_creator.output.color import Color
| 23.5 | 46 | 0.87234 | 7 | 47 | 5.714286 | 0.857143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.085106 | 47 | 1 | 47 | 47 | 0.930233 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
5d1fd4bba9a3eda63da46d3dba9c554b58ced53a | 9,026 | py | Python | test/swig/Gemm.py | NixonZ/dnnCompiler | 1f3c89248e279c6b5625cd8cb134a4c718eb7764 | [
"Apache-2.0"
] | 1 | 2019-08-19T05:35:07.000Z | 2019-08-19T05:35:07.000Z | test/swig/Gemm.py | SubhamIO/dnnCompiler | a9df5ab0eefe0f48a1416fe504f50e2bf71aeecc | [
"Apache-2.0"
] | null | null | null | test/swig/Gemm.py | SubhamIO/dnnCompiler | a9df5ab0eefe0f48a1416fe504f50e2bf71aeecc | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for divitional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License") you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
#
# This file is part of DNN compiler maintained at
# https://github.com/ai-techsystems/dnnCompiler
import common
import dnnc as dc
import numpy as np
import unittest
def temp_gemm(np_a, np_b, np_c, alpha, beta, transA, transB):
np_a = np_a.T if (transA==1) else np_a
np_b = np_b.T if (transB==1) else np_b
y = (alpha * np.dot(np_a, np_b)) + (beta * np_c)
return y
class GemmTest(unittest.TestCase):
def setUp(self):
self.len_a_b = 48
self.len_c = 64
self.alpha = 0.5
self.beta = 0.5
self.np_float_a = np.random.randn(self.len_a_b).astype(np.float32)
self.np_float_b = np.random.randn(self.len_a_b).astype(np.float32)
self.np_float_c = np.random.randn(self.len_c).astype(np.float32)
self.dc_float_a = dc.array(list(self.np_float_a))
self.dc_float_b = dc.array(list(self.np_float_b))
self.dc_float_c = dc.array(list(self.np_float_c))
self.np_double_a = np.random.randn(self.len_a_b).astype(np.double)
self.np_double_b = np.random.randn(self.len_a_b).astype(np.double)
self.np_double_c = np.random.randn(self.len_c).astype(np.double)
self.dc_double_a = dc.array(list(self.np_double_a))
self.dc_double_b = dc.array(list(self.np_double_b))
self.dc_double_c = dc.array(list(self.np_double_c))
# Gemm by default takes 2D tensor only
def test_Gemm2D_float_1 (self):
shape_a = (8,6)
shape_b = (6,8)
shape_c = (8,8)
transA = 0
transB = 0
np_float_a = np.reshape(self.np_float_a, shape_a)
np_float_b = np.reshape(self.np_float_b, shape_b)
np_float_c = np.reshape(self.np_float_c, shape_c)
dc_float_a = dc.reshape(self.dc_float_a, shape_a)
dc_float_b = dc.reshape(self.dc_float_b, shape_b)
dc_float_c = dc.reshape(self.dc_float_c, shape_c)
npr = temp_gemm(np_float_a, np_float_b, np_float_c, self.alpha, self.beta, transA, transB)
dcr = dc.gemm(dc_float_a, dc_float_b, dc_float_c, self.alpha, self.beta, transA, transB)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float32),
rtol=1e-3, atol=1e-3)
def test_Gemm2D_float_2 (self):
shape_a = (8,6)
shape_b = (8,6)
shape_c = (8,8)
transA = 0
transB = 1
np_float_a = np.reshape(self.np_float_a, shape_a)
np_float_b = np.reshape(self.np_float_b, shape_b)
np_float_c = np.reshape(self.np_float_c, shape_c)
dc_float_a = dc.reshape(self.dc_float_a, shape_a)
dc_float_b = dc.reshape(self.dc_float_b, shape_b)
dc_float_c = dc.reshape(self.dc_float_c, shape_c)
npr = temp_gemm(np_float_a, np_float_b, np_float_c, self.alpha, self.beta, transA, transB)
dcr = dc.gemm(dc_float_a, dc_float_b, dc_float_c, self.alpha, self.beta, transA, transB)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float32),
rtol=1e-3, atol=1e-3)
def test_Gemm2D_float_3 (self):
shape_a = (6,8)
shape_b = (6,8)
shape_c = (8,8)
transA = 1
transB = 0
np_float_a = np.reshape(self.np_float_a, shape_a)
np_float_b = np.reshape(self.np_float_b, shape_b)
np_float_c = np.reshape(self.np_float_c, shape_c)
dc_float_a = dc.reshape(self.dc_float_a, shape_a)
dc_float_b = dc.reshape(self.dc_float_b, shape_b)
dc_float_c = dc.reshape(self.dc_float_c, shape_c)
npr = temp_gemm(np_float_a, np_float_b, np_float_c, self.alpha, self.beta, transA, transB)
dcr = dc.gemm(dc_float_a, dc_float_b, dc_float_c, self.alpha, self.beta, transA, transB)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float32),
rtol=1e-3, atol=1e-3)
def test_Gemm2D_float_4 (self):
shape_a = (6,8)
shape_b = (8,6)
shape_c = (8,8)
transA = 1
transB = 1
np_float_a = np.reshape(self.np_float_a, shape_a)
np_float_b = np.reshape(self.np_float_b, shape_b)
np_float_c = np.reshape(self.np_float_c, shape_c)
dc_float_a = dc.reshape(self.dc_float_a, shape_a)
dc_float_b = dc.reshape(self.dc_float_b, shape_b)
dc_float_c = dc.reshape(self.dc_float_c, shape_c)
npr = temp_gemm(np_float_a, np_float_b, np_float_c, self.alpha, self.beta, transA, transB)
dcr = dc.gemm(dc_float_a, dc_float_b, dc_float_c, self.alpha, self.beta, transA, transB)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.float32),
rtol=1e-3, atol=1e-3)
def test_Gemm2D_double_1 (self):
shape_a = (8,6)
shape_b = (6,8)
shape_c = (8,8)
transA = 0
transB = 0
np_double_a = np.reshape(self.np_double_a, shape_a)
np_double_b = np.reshape(self.np_double_b, shape_b)
np_double_c = np.reshape(self.np_double_c, shape_c)
dc_double_a = dc.reshape(self.dc_double_a, shape_a)
dc_double_b = dc.reshape(self.dc_double_b, shape_b)
dc_double_c = dc.reshape(self.dc_double_c, shape_c)
npr = temp_gemm(np_double_a, np_double_b, np_double_c, self.alpha, self.beta, transA, transB)
dcr = dc.gemm(dc_double_a, dc_double_b, dc_double_c, self.alpha, self.beta, transA, transB)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.double),
rtol=1e-3, atol=1e-3)
def test_Gemm2D_double_2 (self):
shape_a = (8,6)
shape_b = (8,6)
shape_c = (8,8)
transA = 0
transB = 1
np_double_a = np.reshape(self.np_double_a, shape_a)
np_double_b = np.reshape(self.np_double_b, shape_b)
np_double_c = np.reshape(self.np_double_c, shape_c)
dc_double_a = dc.reshape(self.dc_double_a, shape_a)
dc_double_b = dc.reshape(self.dc_double_b, shape_b)
dc_double_c = dc.reshape(self.dc_double_c, shape_c)
npr = temp_gemm(np_double_a, np_double_b, np_double_c, self.alpha, self.beta, transA, transB)
dcr = dc.gemm(dc_double_a, dc_double_b, dc_double_c, self.alpha, self.beta, transA, transB)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.double),
rtol=1e-3, atol=1e-3)
def test_Gemm2D_double_3 (self):
shape_a = (6,8)
shape_b = (6,8)
shape_c = (8,8)
transA = 1
transB = 0
np_double_a = np.reshape(self.np_double_a, shape_a)
np_double_b = np.reshape(self.np_double_b, shape_b)
np_double_c = np.reshape(self.np_double_c, shape_c)
dc_double_a = dc.reshape(self.dc_double_a, shape_a)
dc_double_b = dc.reshape(self.dc_double_b, shape_b)
dc_double_c = dc.reshape(self.dc_double_c, shape_c)
npr = temp_gemm(np_double_a, np_double_b, np_double_c, self.alpha, self.beta, transA, transB)
dcr = dc.gemm(dc_double_a, dc_double_b, dc_double_c, self.alpha, self.beta, transA, transB)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.double),
rtol=1e-3, atol=1e-3)
def test_Gemm2D_double_4 (self):
shape_a = (6,8)
shape_b = (8,6)
shape_c = (8,8)
transA = 1
transB = 1
np_double_a = np.reshape(self.np_double_a, shape_a)
np_double_b = np.reshape(self.np_double_b, shape_b)
np_double_c = np.reshape(self.np_double_c, shape_c)
dc_double_a = dc.reshape(self.dc_double_a, shape_a)
dc_double_b = dc.reshape(self.dc_double_b, shape_b)
dc_double_c = dc.reshape(self.dc_double_c, shape_c)
npr = temp_gemm(np_double_a, np_double_b, np_double_c, self.alpha, self.beta, transA, transB)
dcr = dc.gemm(dc_double_a, dc_double_b, dc_double_c, self.alpha, self.beta, transA, transB)
np.testing.assert_allclose(npr.flatten(), np.array(dcr.data()).astype(np.double),
rtol=1e-3, atol=1e-3)
def tearDown(self):
return "test finished"
if __name__ == '__main__':
unittest.main()
| 45.585859 | 101 | 0.653335 | 1,535 | 9,026 | 3.535505 | 0.100326 | 0.072231 | 0.05749 | 0.066335 | 0.783858 | 0.78091 | 0.754929 | 0.754929 | 0.754929 | 0.743873 | 0 | 0.020335 | 0.231775 | 9,026 | 197 | 102 | 45.817259 | 0.762331 | 0.102814 | 0 | 0.754717 | 0 | 0 | 0.0026 | 0 | 0 | 0 | 0 | 0 | 0.050314 | 1 | 0.069182 | false | 0 | 0.025157 | 0.006289 | 0.113208 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.