text
stringlengths
12
1.05M
repo_name
stringlengths
5
86
path
stringlengths
4
191
language
stringclasses
1 value
license
stringclasses
15 values
size
int32
12
1.05M
keyword
listlengths
1
23
text_hash
stringlengths
64
64
# -*- coding: utf-8 -*- import time import unittest import logging import functools from nose.tools import * # flake8: noqa (PEP8 asserts) import mock from modularodm import Q from framework.auth.core import Auth from website import settings import website.search.search as search from website.search import elastic_search from website.search.util import build_query from website.search_migration.migrate import migrate from website.models import Retraction, NodeLicense, Tag from tests.base import OsfTestCase from tests.test_features import requires_search from tests.factories import ( UserFactory, ProjectFactory, NodeFactory, UnregUserFactory, UnconfirmedUserFactory, RegistrationFactory, NodeLicenseRecordFactory ) TEST_INDEX = 'test' @requires_search class SearchTestCase(OsfTestCase): def tearDown(self): super(SearchTestCase, self).tearDown() search.delete_index(elastic_search.INDEX) search.create_index(elastic_search.INDEX) def setUp(self): super(SearchTestCase, self).setUp() elastic_search.INDEX = TEST_INDEX settings.ELASTIC_INDEX = TEST_INDEX search.delete_index(elastic_search.INDEX) search.create_index(elastic_search.INDEX) def query(term): results = search.search(build_query(term), index=elastic_search.INDEX) return results def query_user(name): term = 'category:user AND "{}"'.format(name) return query(term) def query_file(name): term = 'category:file AND "{}"'.format(name) return query(term) def query_tag_file(name): term = 'category:file AND (tags:u"{}")'.format(name) return query(term) def retry_assertion(interval=0.3, retries=3): def test_wrapper(func): t_interval = interval t_retries = retries @functools.wraps(func) def wrapped(*args, **kwargs): try: func(*args, **kwargs) except AssertionError as e: if retries: time.sleep(t_interval) retry_assertion(interval=t_interval, retries=t_retries - 1)(func)(*args, **kwargs) else: raise e return wrapped return test_wrapper @requires_search class TestUserUpdate(SearchTestCase): def setUp(self): super(TestUserUpdate, self).setUp() search.delete_index(elastic_search.INDEX) search.create_index(elastic_search.INDEX) self.user = UserFactory(fullname='David Bowie') def test_new_user(self): # Verify that user has been added to Elastic Search docs = query_user(self.user.fullname)['results'] assert_equal(len(docs), 1) def test_new_user_unconfirmed(self): user = UnconfirmedUserFactory() docs = query_user(user.fullname)['results'] assert_equal(len(docs), 0) token = user.get_confirmation_token(user.username) user.confirm_email(token) user.save() docs = query_user(user.fullname)['results'] assert_equal(len(docs), 1) def test_change_name(self): # Add a user, change her name, and verify that only the new name is # found in search. user = UserFactory(fullname='Barry Mitchell') fullname_original = user.fullname user.fullname = user.fullname[::-1] user.save() docs_original = query_user(fullname_original)['results'] assert_equal(len(docs_original), 0) docs_current = query_user(user.fullname)['results'] assert_equal(len(docs_current), 1) def test_disabled_user(self): # Test that disabled users are not in search index user = UserFactory(fullname='Bettie Page') user.save() # Ensure user is in search index assert_equal(len(query_user(user.fullname)['results']), 1) # Disable the user user.is_disabled = True user.save() # Ensure user is not in search index assert_equal(len(query_user(user.fullname)['results']), 0) def test_merged_user(self): user = UserFactory(fullname='Annie Lennox') merged_user = UserFactory(fullname='Lisa Stansfield') user.save() merged_user.save() assert_equal(len(query_user(user.fullname)['results']), 1) assert_equal(len(query_user(merged_user.fullname)['results']), 1) user.merge_user(merged_user) assert_equal(len(query_user(user.fullname)['results']), 1) assert_equal(len(query_user(merged_user.fullname)['results']), 0) def test_employment(self): user = UserFactory(fullname='Helga Finn') user.save() institution = 'Finn\'s Fine Filers' docs = query_user(institution)['results'] assert_equal(len(docs), 0) user.jobs.append({ 'institution': institution, 'title': 'The Big Finn', }) user.save() docs = query_user(institution)['results'] assert_equal(len(docs), 1) def test_education(self): user = UserFactory(fullname='Henry Johnson') user.save() institution = 'Henry\'s Amazing School!!!' docs = query_user(institution)['results'] assert_equal(len(docs), 0) user.schools.append({ 'institution': institution, 'degree': 'failed all classes', }) user.save() docs = query_user(institution)['results'] assert_equal(len(docs), 1) def test_name_fields(self): names = ['Bill Nye', 'William', 'the science guy', 'Sanford', 'the Great'] user = UserFactory(fullname=names[0]) user.given_name = names[1] user.middle_names = names[2] user.family_name = names[3] user.suffix = names[4] user.save() docs = [query_user(name)['results'] for name in names] assert_equal(sum(map(len, docs)), len(docs)) # 1 result each assert_true(all([user._id == doc[0]['id'] for doc in docs])) @requires_search class TestProject(SearchTestCase): def setUp(self): super(TestProject, self).setUp() search.delete_index(elastic_search.INDEX) search.create_index(elastic_search.INDEX) self.user = UserFactory(fullname='John Deacon') self.project = ProjectFactory(title='Red Special', creator=self.user) def test_new_project_private(self): # Verify that a private project is not present in Elastic Search. docs = query(self.project.title)['results'] assert_equal(len(docs), 0) def test_make_public(self): # Make project public, and verify that it is present in Elastic # Search. self.project.set_privacy('public') docs = query(self.project.title)['results'] assert_equal(len(docs), 1) @requires_search class TestNodeSearch(SearchTestCase): def setUp(self): super(TestNodeSearch, self).setUp() self.node = ProjectFactory(is_public=True, title='node') self.public_child = ProjectFactory(parent=self.node, is_public=True, title='public_child') self.private_child = ProjectFactory(parent=self.node, title='private_child') self.public_subchild = ProjectFactory(parent=self.private_child, is_public=True) self.node.node_license = NodeLicenseRecordFactory() self.node.save() self.query = 'category:project & category:component' @retry_assertion() def test_node_license_added_to_search(self): docs = query(self.query)['results'] node = [d for d in docs if d['title'] == self.node.title][0] assert_in('license', node) assert_equal(node['license']['id'], self.node.node_license.id) @retry_assertion(retries=10) def test_node_license_propogates_to_children(self): docs = query(self.query)['results'] child = [d for d in docs if d['title'] == self.public_child.title][0] assert_in('license', child) assert_equal(child['license'].get('id'), self.node.node_license.id) child = [d for d in docs if d['title'] == self.public_subchild.title][0] assert_in('license', child) assert_equal(child['license'].get('id'), self.node.node_license.id) @retry_assertion(retries=10) def test_node_license_updates_correctly(self): other_license = NodeLicense.find_one( Q('name', 'eq', 'MIT License') ) new_license = NodeLicenseRecordFactory(node_license=other_license) self.node.node_license = new_license self.node.save() docs = query(self.query)['results'] for doc in docs: assert_equal(doc['license'].get('id'), new_license.id) @requires_search class TestRegistrationRetractions(SearchTestCase): def setUp(self): super(TestRegistrationRetractions, self).setUp() self.user = UserFactory(usename='Doug Bogie') self.title = 'Red Special' self.consolidate_auth = Auth(user=self.user) self.project = ProjectFactory( title=self.title, creator=self.user, is_public=True, ) self.registration = RegistrationFactory( project=self.project, title=self.title, creator=self.user, is_public=True, is_registration=True ) def test_retraction_is_searchable(self): self.registration.retract_registration(self.user) docs = query('category:registration AND ' + self.title)['results'] assert_equal(len(docs), 1) @mock.patch('website.project.model.Node.archiving', mock.PropertyMock(return_value=False)) def test_pending_retraction_wiki_content_is_searchable(self): # Add unique string to wiki wiki_content = {'home': 'public retraction test'} for key, value in wiki_content.items(): docs = query(value)['results'] assert_equal(len(docs), 0) self.registration.update_node_wiki( key, value, self.consolidate_auth, ) # Query and ensure unique string shows up docs = query(value)['results'] assert_equal(len(docs), 1) # Query and ensure registration does show up docs = query('category:registration AND ' + self.title)['results'] assert_equal(len(docs), 1) # Retract registration self.registration.retract_registration(self.user, '') self.registration.save() self.registration.reload() # Query and ensure unique string in wiki doesn't show up docs = query('category:registration AND "{}"'.format(wiki_content['home']))['results'] assert_equal(len(docs), 1) # Query and ensure registration does show up docs = query('category:registration AND ' + self.title)['results'] assert_equal(len(docs), 1) @mock.patch('website.project.model.Node.archiving', mock.PropertyMock(return_value=False)) def test_retraction_wiki_content_is_not_searchable(self): # Add unique string to wiki wiki_content = {'home': 'public retraction test'} for key, value in wiki_content.items(): docs = query(value)['results'] assert_equal(len(docs), 0) self.registration.update_node_wiki( key, value, self.consolidate_auth, ) # Query and ensure unique string shows up docs = query(value)['results'] assert_equal(len(docs), 1) # Query and ensure registration does show up docs = query('category:registration AND ' + self.title)['results'] assert_equal(len(docs), 1) # Retract registration self.registration.retract_registration(self.user, '') self.registration.retraction.state = Retraction.APPROVED self.registration.retraction.save() self.registration.save() self.registration.update_search() # Query and ensure unique string in wiki doesn't show up docs = query('category:registration AND "{}"'.format(wiki_content['home']))['results'] assert_equal(len(docs), 0) # Query and ensure registration does show up docs = query('category:registration AND ' + self.title)['results'] assert_equal(len(docs), 1) @requires_search class TestPublicNodes(SearchTestCase): def setUp(self): super(TestPublicNodes, self).setUp() self.user = UserFactory(usename='Doug Bogie') self.title = 'Red Special' self.consolidate_auth = Auth(user=self.user) self.project = ProjectFactory( title=self.title, creator=self.user, is_public=True, ) self.component = NodeFactory( parent=self.project, title=self.title, creator=self.user, is_public=True ) self.registration = ProjectFactory( title=self.title, creator=self.user, is_public=True, is_registration=True ) def test_make_private(self): # Make project public, then private, and verify that it is not present # in search. self.project.set_privacy('private') docs = query('category:project AND ' + self.title)['results'] assert_equal(len(docs), 0) self.component.set_privacy('private') docs = query('category:component AND ' + self.title)['results'] assert_equal(len(docs), 0) def test_public_parent_title(self): self.project.set_title('hello & world', self.consolidate_auth) self.project.save() docs = query('category:component AND ' + self.title)['results'] assert_equal(len(docs), 1) assert_equal(docs[0]['parent_title'], 'hello & world') assert_true(docs[0]['parent_url']) def test_make_parent_private(self): # Make parent of component, public, then private, and verify that the # component still appears but doesn't link to the parent in search. self.project.set_privacy('private') docs = query('category:component AND ' + self.title)['results'] assert_equal(len(docs), 1) assert_equal(docs[0]['parent_title'], '-- private project --') assert_false(docs[0]['parent_url']) def test_delete_project(self): self.component.remove_node(self.consolidate_auth) docs = query('category:component AND ' + self.title)['results'] assert_equal(len(docs), 0) self.project.remove_node(self.consolidate_auth) docs = query('category:project AND ' + self.title)['results'] assert_equal(len(docs), 0) def test_change_title(self): title_original = self.project.title self.project.set_title( 'Blue Ordinary', self.consolidate_auth, save=True) docs = query('category:project AND ' + title_original)['results'] assert_equal(len(docs), 0) docs = query('category:project AND ' + self.project.title)['results'] assert_equal(len(docs), 1) def test_add_tags(self): tags = ['stonecoldcrazy', 'just a poor boy', 'from-a-poor-family'] for tag in tags: docs = query('tags:"{}"'.format(tag))['results'] assert_equal(len(docs), 0) self.project.add_tag(tag, self.consolidate_auth, save=True) for tag in tags: docs = query('tags:"{}"'.format(tag))['results'] assert_equal(len(docs), 1) def test_remove_tag(self): tags = ['stonecoldcrazy', 'just a poor boy', 'from-a-poor-family'] for tag in tags: self.project.add_tag(tag, self.consolidate_auth, save=True) self.project.remove_tag(tag, self.consolidate_auth, save=True) docs = query('tags:"{}"'.format(tag))['results'] assert_equal(len(docs), 0) def test_update_wiki(self): """Add text to a wiki page, then verify that project is found when searching for wiki text. """ wiki_content = { 'home': 'Hammer to fall', 'swag': '#YOLO' } for key, value in wiki_content.items(): docs = query(value)['results'] assert_equal(len(docs), 0) self.project.update_node_wiki( key, value, self.consolidate_auth, ) docs = query(value)['results'] assert_equal(len(docs), 1) def test_clear_wiki(self): # Add wiki text to page, then delete, then verify that project is not # found when searching for wiki text. wiki_content = 'Hammer to fall' self.project.update_node_wiki( 'home', wiki_content, self.consolidate_auth, ) self.project.update_node_wiki('home', '', self.consolidate_auth) docs = query(wiki_content)['results'] assert_equal(len(docs), 0) def test_add_contributor(self): # Add a contributor, then verify that project is found when searching # for contributor. user2 = UserFactory(fullname='Adam Lambert') docs = query('category:project AND "{}"'.format(user2.fullname))['results'] assert_equal(len(docs), 0) self.project.add_contributor(user2, save=True) docs = query('category:project AND "{}"'.format(user2.fullname))['results'] assert_equal(len(docs), 1) def test_remove_contributor(self): # Add and remove a contributor, then verify that project is not found # when searching for contributor. user2 = UserFactory(fullname='Brian May') self.project.add_contributor(user2, save=True) self.project.remove_contributor(user2, self.consolidate_auth) docs = query('category:project AND "{}"'.format(user2.fullname))['results'] assert_equal(len(docs), 0) def test_hide_contributor(self): user2 = UserFactory(fullname='Brian May') self.project.add_contributor(user2) self.project.set_visible(user2, False, save=True) docs = query('category:project AND "{}"'.format(user2.fullname))['results'] assert_equal(len(docs), 0) self.project.set_visible(user2, True, save=True) docs = query('category:project AND "{}"'.format(user2.fullname))['results'] assert_equal(len(docs), 1) def test_wrong_order_search(self): title_parts = self.title.split(' ') title_parts.reverse() title_search = ' '.join(title_parts) docs = query(title_search)['results'] assert_equal(len(docs), 3) def test_tag_aggregation(self): tags = ['stonecoldcrazy', 'just a poor boy', 'from-a-poor-family'] for tag in tags: self.project.add_tag(tag, self.consolidate_auth, save=True) docs = query(self.title)['tags'] assert len(docs) == 3 for doc in docs: assert doc['key'] in tags @requires_search class TestAddContributor(SearchTestCase): # Tests of the search.search_contributor method def setUp(self): super(TestAddContributor, self).setUp() self.name1 = 'Roger1 Taylor1' self.name2 = 'John2 Deacon2' self.name3 = u'j\xc3\xb3ebert3 Smith3' self.name4 = u'B\xc3\xb3bbert4 Jones4' self.user = UserFactory(fullname=self.name1) self.user3 = UserFactory(fullname=self.name3) def test_unreg_users_dont_show_in_search(self): unreg = UnregUserFactory() contribs = search.search_contributor(unreg.fullname) assert_equal(len(contribs['users']), 0) def test_unreg_users_do_show_on_projects(self): unreg = UnregUserFactory(fullname='Robert Paulson') self.project = ProjectFactory( title='Glamour Rock', creator=unreg, is_public=True, ) results = query(unreg.fullname)['results'] assert_equal(len(results), 1) def test_search_fullname(self): # Searching for full name yields exactly one result. contribs = search.search_contributor(self.name1) assert_equal(len(contribs['users']), 1) contribs = search.search_contributor(self.name2) assert_equal(len(contribs['users']), 0) def test_search_firstname(self): # Searching for first name yields exactly one result. contribs = search.search_contributor(self.name1.split(' ')[0]) assert_equal(len(contribs['users']), 1) contribs = search.search_contributor(self.name2.split(' ')[0]) assert_equal(len(contribs['users']), 0) def test_search_partial(self): # Searching for part of first name yields exactly one # result. contribs = search.search_contributor(self.name1.split(' ')[0][:-1]) assert_equal(len(contribs['users']), 1) contribs = search.search_contributor(self.name2.split(' ')[0][:-1]) assert_equal(len(contribs['users']), 0) def test_search_fullname_special_character(self): # Searching for a fullname with a special character yields # exactly one result. contribs = search.search_contributor(self.name3) assert_equal(len(contribs['users']), 1) contribs = search.search_contributor(self.name4) assert_equal(len(contribs['users']), 0) def test_search_firstname_special_charcter(self): # Searching for a first name with a special character yields # exactly one result. contribs = search.search_contributor(self.name3.split(' ')[0]) assert_equal(len(contribs['users']), 1) contribs = search.search_contributor(self.name4.split(' ')[0]) assert_equal(len(contribs['users']), 0) def test_search_partial_special_character(self): # Searching for a partial name with a special character yields # exctly one result. contribs = search.search_contributor(self.name3.split(' ')[0][:-1]) assert_equal(len(contribs['users']), 1) contribs = search.search_contributor(self.name4.split(' ')[0][:-1]) assert_equal(len(contribs['users']), 0) @requires_search class TestProjectSearchResults(SearchTestCase): def setUp(self): super(TestProjectSearchResults, self).setUp() self.user = UserFactory(usename='Doug Bogie') self.singular = 'Spanish Inquisition' self.plural = 'Spanish Inquisitions' self.possessive = 'Spanish\'s Inquisition' self.project_singular = ProjectFactory( title=self.singular, creator=self.user, is_public=True, ) self.project_plural = ProjectFactory( title=self.plural, creator=self.user, is_public=True, ) self.project_possessive = ProjectFactory( title=self.possessive, creator=self.user, is_public=True, ) self.project_unrelated = ProjectFactory( title='Cardinal Richelieu', creator=self.user, is_public=True, ) def test_singular_query(self): # Verify searching for singular term includes singular, # possessive and plural versions in results. results = query(self.singular)['results'] assert_equal(len(results), 3) def test_plural_query(self): # Verify searching for singular term includes singular, # possessive and plural versions in results. results = query(self.plural)['results'] assert_equal(len(results), 3) def test_possessive_query(self): # Verify searching for possessive term includes singular, # possessive and plural versions in results. results = query(self.possessive)['results'] assert_equal(len(results), 3) def job(**kwargs): keys = [ 'title', 'institution', 'department', 'location', 'startMonth', 'startYear', 'endMonth', 'endYear', 'ongoing', ] job = {} for key in keys: if key[-5:] == 'Month': job[key] = kwargs.get(key, 'December') elif key[-4:] == 'Year': job[key] = kwargs.get(key, '2000') else: job[key] = kwargs.get(key, 'test_{}'.format(key)) return job class TestUserSearchResults(SearchTestCase): def setUp(self): super(TestUserSearchResults, self).setUp() self.user_one = UserFactory(jobs=[job(institution='Oxford'), job(institution='Star Fleet')], fullname='Date Soong') self.user_two = UserFactory(jobs=[job(institution='Grapes la Picard'), job(institution='Star Fleet')], fullname='Jean-Luc Picard') self.user_three = UserFactory(jobs=[job(institution='Star Fleet'), job(institution='Federation Medical')], fullname='Beverly Crusher') self.user_four = UserFactory(jobs=[job(institution='Star Fleet')], fullname='William Riker') self.user_five = UserFactory(jobs=[job(institution='Traveler intern'), job(institution='Star Fleet Academy'), job(institution='Star Fleet Intern')], fullname='Wesley Crusher') for i in range(25): UserFactory(jobs=[job()]) self.current_starfleet = [ self.user_three, self.user_four, ] self.were_starfleet = [ self.user_one, self.user_two, self.user_three, self.user_four, self.user_five ] @unittest.skip('Cannot guarentee always passes') def test_current_job_first_in_results(self): results = query_user('Star Fleet')['results'] result_names = [r['names']['fullname'] for r in results] current_starfleet_names = [u.fullname for u in self.current_starfleet] for name in result_names[:2]: assert_in(name, current_starfleet_names) def test_had_job_in_results(self): results = query_user('Star Fleet')['results'] result_names = [r['names']['fullname'] for r in results] were_starfleet_names = [u.fullname for u in self.were_starfleet] for name in result_names: assert_in(name, were_starfleet_names) class TestSearchExceptions(OsfTestCase): # Verify that the correct exception is thrown when the connection is lost @classmethod def setUpClass(cls): logging.getLogger('website.project.model').setLevel(logging.CRITICAL) super(TestSearchExceptions, cls).setUpClass() if settings.SEARCH_ENGINE == 'elastic': cls._es = search.search_engine.es search.search_engine.es = None @classmethod def tearDownClass(cls): super(TestSearchExceptions, cls).tearDownClass() if settings.SEARCH_ENGINE == 'elastic': search.search_engine.es = cls._es def test_connection_error(self): # Ensures that saving projects/users doesn't break as a result of connection errors self.user = UserFactory(usename='Doug Bogie') self.project = ProjectFactory( title="Tom Sawyer", creator=self.user, is_public=True, ) self.user.save() self.project.save() class TestSearchMigration(SearchTestCase): # Verify that the correct indices are created/deleted during migration @classmethod def tearDownClass(cls): super(TestSearchMigration, cls).tearDownClass() search.create_index(settings.ELASTIC_INDEX) def setUp(self): super(TestSearchMigration, self).setUp() self.es = search.search_engine.es search.delete_index(settings.ELASTIC_INDEX) search.create_index(settings.ELASTIC_INDEX) self.user = UserFactory(fullname='David Bowie') self.project = ProjectFactory( title=settings.ELASTIC_INDEX, creator=self.user, is_public=True ) def test_first_migration_no_delete(self): migrate(delete=False, index=settings.ELASTIC_INDEX, app=self.app.app) var = self.es.indices.get_aliases() assert_equal(var[settings.ELASTIC_INDEX + '_v1']['aliases'].keys()[0], settings.ELASTIC_INDEX) def test_multiple_migrations_no_delete(self): for n in xrange(1, 21): migrate(delete=False, index=settings.ELASTIC_INDEX, app=self.app.app) var = self.es.indices.get_aliases() assert_equal(var[settings.ELASTIC_INDEX + '_v{}'.format(n)]['aliases'].keys()[0], settings.ELASTIC_INDEX) def test_first_migration_with_delete(self): migrate(delete=True, index=settings.ELASTIC_INDEX, app=self.app.app) var = self.es.indices.get_aliases() assert_equal(var[settings.ELASTIC_INDEX + '_v1']['aliases'].keys()[0], settings.ELASTIC_INDEX) def test_multiple_migrations_with_delete(self): for n in xrange(1, 21, 2): migrate(delete=True, index=settings.ELASTIC_INDEX, app=self.app.app) var = self.es.indices.get_aliases() assert_equal(var[settings.ELASTIC_INDEX + '_v{}'.format(n)]['aliases'].keys()[0], settings.ELASTIC_INDEX) migrate(delete=True, index=settings.ELASTIC_INDEX, app=self.app.app) var = self.es.indices.get_aliases() assert_equal(var[settings.ELASTIC_INDEX + '_v{}'.format(n + 1)]['aliases'].keys()[0], settings.ELASTIC_INDEX) assert not var.get(settings.ELASTIC_INDEX + '_v{}'.format(n)) class TestSearchFiles(SearchTestCase): def setUp(self): super(TestSearchFiles, self).setUp() self.node = ProjectFactory(is_public=True, title='Otis') self.osf_storage = self.node.get_addon('osfstorage') self.root = self.osf_storage.get_root() def test_search_file(self): self.root.append_file('Shake.wav') find = query_file('Shake.wav')['results'] assert_equal(len(find), 1) def test_delete_file(self): file_ = self.root.append_file('I\'ve Got Dreams To Remember.wav') find = query_file('I\'ve Got Dreams To Remember.wav')['results'] assert_equal(len(find), 1) file_.delete() find = query_file('I\'ve Got Dreams To Remember.wav')['results'] assert_equal(len(find), 0) def test_add_tag(self): file_ = self.root.append_file('That\'s How Strong My Love Is.mp3') tag = Tag(_id='Redding') tag.save() file_.tags.append(tag) file_.save() find = query_tag_file('Redding')['results'] assert_equal(len(find), 1) def test_remove_tag(self): file_ = self.root.append_file('I\'ve Been Loving You Too Long.mp3') tag = Tag(_id='Blue') tag.save() file_.tags.append(tag) file_.save() find = query_tag_file('Blue')['results'] assert_equal(len(find), 1) file_.tags.remove('Blue') file_.save() find = query_tag_file('Blue')['results'] assert_equal(len(find), 0) def test_make_node_private(self): file_ = self.root.append_file('Change_Gonna_Come.wav') find = query_file('Change_Gonna_Come.wav')['results'] assert_equal(len(find), 1) self.node.is_public = False self.node.save() find = query_file('Change_Gonna_Come.wav')['results'] assert_equal(len(find), 0) def test_make_private_node_public(self): self.node.is_public = False self.node.save() file_ = self.root.append_file('Try a Little Tenderness.flac') find = query_file('Try a Little Tenderness.flac')['results'] assert_equal(len(find), 0) self.node.is_public = True self.node.save() find = query_file('Try a Little Tenderness.flac')['results'] assert_equal(len(find), 1) def test_delete_node(self): node = ProjectFactory(is_public=True, title='The Soul Album') osf_storage = node.get_addon('osfstorage') root = osf_storage.get_root() root.append_file('The Dock of the Bay.mp3') find = query_file('The Dock of the Bay.mp3')['results'] assert_equal(len(find), 1) node.is_deleted = True node.save() find = query_file('The Dock of the Bay.mp3')['results'] assert_equal(len(find), 0)
haoyuchen1992/osf.io
tests/test_elastic.py
Python
apache-2.0
32,592
[ "Brian" ]
578c3ba2875b28e643fd91067d70a874aa213d664830006dea819b2746363a04
__author__ = 'aclapes' import numpy as np from os.path import join from os.path import isfile, exists from os import makedirs import cPickle from sklearn import preprocessing from sklearn.decomposition import PCA, IncrementalPCA from yael import ynumpy import time import sys from joblib import delayed, Parallel import videodarwin from Queue import PriorityQueue INTERNAL_PARAMETERS = dict( # dimensionality reduction n_samples = 1000000, #1000*256, # See paper of "A robust and efficient video representation for action recognition" reduction_factor = 0.5, # keep after a fraction of the dimensions after applying pca # bulding codebooks bovw_codebook_k = 4000, bovw_lnorm = 1, # building GMMs fv_gmm_k = 256, # number of gaussian components fv_repr_feats = ['mu','sigma'] ) def compute_bovw_descriptors(tracklets_path, intermediates_path, videonames, traintest_parts, feat_types, feats_path, \ pca_reduction=False, treelike=True, clusters_path=None, verbose=False): _compute_bovw_descriptors(tracklets_path, intermediates_path, videonames, traintest_parts, np.arange(len(videonames)), feat_types, feats_path, \ pca_reduction=pca_reduction, treelike=treelike, clusters_path=clusters_path, verbose=verbose) def compute_fv_descriptors(tracklets_path, intermediates_path, videonames, traintest_parts, feat_types, feats_path, \ pca_reduction=False, treelike=True, clusters_path=None, verbose=False): _compute_fv_descriptors(tracklets_path, intermediates_path, videonames, traintest_parts, np.arange(len(videonames)), feat_types, feats_path, \ pca_reduction=pca_reduction, treelike=treelike, clusters_path=clusters_path, verbose=verbose) def compute_vd_descriptors(tracklets_path, intermediates_path, videonames, traintest_parts, feat_types, feats_path, \ pca_reduction=False, treelike=True, clusters_path=None, verbose=False): _compute_vd_descriptors(tracklets_path, intermediates_path, videonames, traintest_parts, np.arange(len(videonames)), feat_types, feats_path, \ pca_reduction=pca_reduction, treelike=treelike, clusters_path=clusters_path, verbose=verbose) def compute_bovw_descriptors_multiprocess(tracklets_path, intermediates_path, videonames, traintest_parts, st, num_videos, feat_types, feats_path, \ pca_reduction=False, treelike=True, clusters_path=None, verbose=False): inds = np.linspace(st, st+num_videos-1, num_videos) _compute_bovw_descriptors(tracklets_path, intermediates_path, videonames, traintest_parts, inds, feat_types, feats_path, \ pca_reduction=pca_reduction, treelike=treelike, clusters_path=clusters_path, verbose=verbose) def compute_fv_descriptors_multiprocess(tracklets_path, intermediates_path, videonames, traintest_parts, st, num_videos, feat_types, feats_path, \ pca_reduction=False, treelike=True, clusters_path=None, verbose=False): inds = np.linspace(st, st+num_videos-1, num_videos) _compute_fv_descriptors(tracklets_path, intermediates_path, videonames, traintest_parts, inds, feat_types, feats_path, \ pca_reduction=pca_reduction, treelike=treelike, clusters_path=clusters_path, verbose=verbose) def compute_vd_descriptors_multiprocess(tracklets_path, intermediates_path, videonames, traintest_parts, st, num_videos, feat_types, feats_path, \ pca_reduction=False, treelike=True, clusters_path=None, verbose=False): inds = np.linspace(st, st+num_videos-1, num_videos) _compute_vd_descriptors(tracklets_path, intermediates_path, videonames, traintest_parts, inds, feat_types, feats_path, \ pca_reduction=pca_reduction, treelike=treelike, clusters_path=clusters_path, verbose=verbose) def compute_bovw_descriptors_multithread(tracklets_path, intermediates_path, videonames, traintest_parts, feat_types, feats_path, \ nt=4, pca_reduction=False, treelike=True, clusters_path=None, verbose=False): Parallel(n_jobs=nt, backend='threading')(delayed(_compute_bovw_descriptors)(tracklets_path, intermediates_path, videonames, traintest_parts, \ [i], feat_types, feats_path, \ pca_reduction=pca_reduction, treelike=treelike, clusters_path=clusters_path, verbose=verbose) for i in xrange(len(videonames))) def compute_fv_descriptors_multithread(tracklets_path, intermediates_path, videonames, traintest_parts, feat_types, feats_path, \ nt=4, pca_reduction=False, treelike=True, clusters_path=None, verbose=False): Parallel(n_jobs=nt, backend='threading')(delayed(_compute_fv_descriptors)(tracklets_path, intermediates_path, videonames, traintest_parts, \ [i], feat_types, feats_path, \ pca_reduction=pca_reduction, treelike=treelike, clusters_path=clusters_path, verbose=verbose) for i in xrange(len(videonames))) def compute_vd_descriptors_multithread(tracklets_path, intermediates_path, videonames, traintest_parts, feat_types, feats_path, \ nt=4, pca_reduction=False, treelike=True, clusters_path=None, verbose=False): Parallel(n_jobs=nt, backend='threading')(delayed(_compute_vd_descriptors)(tracklets_path, intermediates_path, videonames, traintest_parts, \ [i], feat_types, feats_path, \ pca_reduction=pca_reduction, treelike=treelike, clusters_path=clusters_path, verbose=verbose) for i in xrange(len(videonames))) # ============================================================================== # Main functions # ============================================================================== def _compute_bovw_descriptors(tracklets_path, intermediates_path, videonames, traintest_parts, indices, feat_types, feats_path, \ pca_reduction=False, treelike=True, clusters_path=None, verbose=False): try: makedirs(feats_path) except OSError: pass for k, part in enumerate(traintest_parts): # cach'd pca and gmm for j, feat_t in enumerate(feat_types): try: makedirs( join(feats_path, feat_t + '-' + str(k)) ) except OSError: pass cache = None # process videos total = len(videonames) for i in indices: # FV computed for all feature types? see the last in INTERNAL_PARAMETERS['feature_types'] all_done = np.all([isfile(join(feats_path, feat_t + '-' + str(k), videonames[i] + '.pkl')) for feat_t in feat_types]) if all_done: if verbose: print('[_compute_bovw_descriptors] %s -> OK' % videonames[i]) continue if cache is None: cache = dict() for j, feat_t in enumerate(feat_types): with open(join(intermediates_path, 'bovw' + ('_pca-' if pca_reduction else '-') + feat_t + '-' + str(k) + '.pkl'), 'rb') as f: cache[feat_t] = cPickle.load(f) start_time = time.time() # object features used for the per-frame FV representation computation (cach'd) with open(join(tracklets_path, 'obj', videonames[i] + '.pkl'), 'rb') as f: obj = cPickle.load(f) for j, feat_t in enumerate(feat_types): # load video tracklets' feature with open(join(tracklets_path, feat_t, videonames[i] + '.pkl'), 'rb') as f: d = cPickle.load(f) if feat_t == 'trj': # (special case) d = convert_positions_to_displacements(d) if feat_t == 'mbh': dx = preprocessing.normalize(d[:,:d.shape[1]/2], norm='l1', axis=1) dy = preprocessing.normalize(d[:,d.shape[1]/2:], norm='l1', axis=1) d = np.hstack((dx,dy)) else: d = preprocessing.normalize(d, norm='l1', axis=1) d = rootSIFT(d) if pca_reduction: d = cache[feat_t]['pca'].transform(d) # reduce dimensionality output_filepath = join(feats_path, feat_t + '-' + str(k), videonames[i] + '.pkl') # compute BOVW of the video if not treelike: b = bovw(cache[feat_t]['codebook'], d) with open(output_filepath, 'wb') as f: cPickle.dump(dict(v=b), f) else: # or separately the BOVWs of the tree nodes with open(join(clusters_path, videonames[i] + '.pkl'), 'rb') as f: clusters = cPickle.load(f) bovwtree = dict() if len(clusters['tree']) == 1: bovwtree[1] = bovw(cache[feat_t]['codebook'], d) else: T = reconstruct_tree_from_leafs(np.unique(clusters['int_paths'])) for parent_idx, children_inds in T.iteritems(): # (in a global representation) node_inds = np.where(np.any([clusters['int_paths'] == idx for idx in children_inds], axis=0))[0] bovwtree[parent_idx] = bovw(cache[feat_t]['codebook'], d[node_inds,:]) # bovw vec with open(output_filepath, 'wb') as f: cPickle.dump(dict(tree=bovwtree), f) elapsed_time = time.time() - start_time if verbose: print('[_compute_bovw_descriptors] %s -> DONE (in %.2f secs)' % (videonames[i], elapsed_time)) def _compute_fv_descriptors(tracklets_path, intermediates_path, videonames, traintest_parts, indices, feat_types, feats_path, \ pca_reduction=False, treelike=True, clusters_path=None, verbose=False): try: makedirs(feats_path) except OSError: pass for k, part in enumerate(traintest_parts): for j, feat_t in enumerate(feat_types): try: makedirs(join(feats_path, feat_t + '-' + str(k))) except OSError: pass cache = None # process videos total = len(videonames) for i in indices: # FV computed for all feature types? see the last in INTERNAL_PARAMETERS['feature_types'] all_done = np.all([isfile(join(feats_path, feat_t + '-' + str(k), videonames[i] + '.pkl')) for feat_t in feat_types]) if all_done: if verbose: print('[_compute_fv_descriptors] %s -> OK' % videonames[i]) continue if cache is None: cache = dict() for j, feat_t in enumerate(feat_types): with open(join(intermediates_path, 'gmm' + ('_pca-' if pca_reduction else '-') + feat_t + '-' + str(k) + '.pkl'), 'rb') as f: cache[feat_t] = cPickle.load(f) start_time = time.time() # object features used for the per-frame FV representation computation (cach'd) with open(join(tracklets_path, 'obj', videonames[i] + '.pkl'), 'rb') as f: obj = cPickle.load(f) with open(join(clusters_path, videonames[i] + '.pkl'), 'rb') as f: clusters = cPickle.load(f) for j, feat_t in enumerate(feat_types): if isfile(join(feats_path, feat_t + '-' + str(k), videonames[i] + '.pkl')): continue # load video tracklets' feature with open(join(tracklets_path, feat_t, videonames[i] + '.pkl'), 'rb') as f: d = cPickle.load(f) if feat_t == 'trj': # (special case) d = convert_positions_to_displacements(d) if feat_t == 'mbh': dx = preprocessing.normalize(d[:,:d.shape[1]/2], norm='l1', axis=1) dy = preprocessing.normalize(d[:,d.shape[1]/2:], norm='l1', axis=1) d = np.hstack((dx,dy)) else: d = preprocessing.normalize(d, norm='l1', axis=1) d = rootSIFT(d) if pca_reduction: d = cache[feat_t]['pca'].transform(d) # reduce dimensionality d = np.ascontiguousarray(d, dtype=np.float32) # required in many of Yael functions output_filepath = join(feats_path, feat_t + '-' + str(k), videonames[i] + '.pkl') # compute FV of the video if not treelike: fv = ynumpy.fisher(cache[feat_t]['gmm'], d, INTERNAL_PARAMETERS['fv_repr_feats']) # fisher vec with open(output_filepath, 'wb') as f: cPickle.dump(dict(v=fv), f) else: # or separately the FVs of the tree nodes fvtree = dict() if len(clusters['tree']) == 1: fvtree[1] = ynumpy.fisher(cache[feat_t]['gmm'], d, INTERNAL_PARAMETERS['fv_repr_feats']) # fisher vec else: T = reconstruct_tree_from_leafs(np.unique(clusters['int_paths'])) for parent_idx, children_inds in T.iteritems(): # (in a global representation) node_inds = np.where(np.any([clusters['int_paths'] == idx for idx in children_inds], axis=0))[0] fvtree[parent_idx] = ynumpy.fisher(cache[feat_t]['gmm'], d[node_inds,:], INTERNAL_PARAMETERS['fv_repr_feats']) # fisher vec with open(output_filepath, 'wb') as f: cPickle.dump(dict(tree=fvtree), f) elapsed_time = time.time() - start_time if verbose: print('[_compute_fv_descriptors] %s -> DONE (in %.2f secs)' % (videonames[i], elapsed_time)) def _compute_vd_descriptors(tracklets_path, intermediates_path, videonames, traintest_parts, indices, feat_types, feats_path, \ pca_reduction=False, treelike=True, clusters_path=None, verbose=False): try: makedirs(feats_path) except OSError: pass for k, part in enumerate(traintest_parts): # cach'd pca and gmm for j, feat_t in enumerate(feat_types): try: makedirs(join(feats_path, feat_t + '-' + str(k))) except OSError: pass cache = None # process videos total = len(videonames) for i in indices: # FV computed for all feature types? see the last in INTERNAL_PARAMETERS['feature_types'] all_done = np.all([isfile(join(feats_path, feat_t + '-' + str(k), videonames[i] + '.pkl')) for feat_t in feat_types]) if all_done: if verbose: print('[_compute_vd_descriptors] %s -> OK' % videonames[i]) continue if cache is None: cache = dict() for j, feat_t in enumerate(feat_types): with open(join(intermediates_path, 'gmm' + ('_pca-' if pca_reduction else '-') + feat_t + '-' + str(k) + '.pkl'), 'rb') as f: cache[feat_t] = cPickle.load(f) start_time = time.time() # object features used for the per-frame FV representation computation (cach'd) with open(join(tracklets_path, 'obj', videonames[i] + '.pkl'), 'rb') as f: obj = cPickle.load(f) with open(join(clusters_path, videonames[i] + '.pkl'), 'rb') as f: clusters = cPickle.load(f) for j, feat_t in enumerate(feat_types): if isfile(join(feats_path, feat_t + '-' + str(k), videonames[i] + '.pkl')): continue # load video tracklets' feature with open(join(tracklets_path, feat_t, videonames[i] + '.pkl'), 'rb') as f: d = cPickle.load(f) if feat_t == 'trj': # (special case) d = convert_positions_to_displacements(d) if feat_t == 'mbh': dx = preprocessing.normalize(d[:,:d.shape[1]/2], norm='l1', axis=1) dy = preprocessing.normalize(d[:,d.shape[1]/2:], norm='l1', axis=1) d = np.hstack((dx,dy)) else: d = preprocessing.normalize(d, norm='l1', axis=1) d = rootSIFT(d) if pca_reduction: d = cache[feat_t]['pca'].transform(d) # reduce dimensionality d = np.ascontiguousarray(d, dtype=np.float32) # required in many of Yael functions output_filepath = join(feats_path, feat_t + '-' + str(k), videonames[i] + '.pkl') # compute FV of the video if not treelike: # (in a per-frame representation) fids = np.unique(obj[:,0]) V = [] # row-wise fisher vectors (matrix) for f in fids: tmp = d[np.where(obj[:,0] == f)[0],:] # hopefully this is contiguous if d already was fv = ynumpy.fisher(cache[feat_t]['gmm'], tmp, include=INTERNAL_PARAMETERS['fv_repr_feats']) # f-th frame fisher vec V.append(fv) # no normalization or nothing (it's done when computing darwin) vd = videodarwin.darwin(np.array(V)) with open(output_filepath, 'wb') as f: cPickle.dump(dict(v=vd), f) else: # or separately the FVs of the tree nodes vdtree = dict() if len(clusters['tree']) == 1: fids = np.unique(obj[:,0]) V = [ynumpy.fisher(cache[feat_t]['gmm'], d[np.where(obj[:,0] == f)[0],:], INTERNAL_PARAMETERS['fv_repr_feats']) for f in fids] vdtree[1] = videodarwin.darwin(np.array(V)) else: T = reconstruct_tree_from_leafs(np.unique(clusters['int_paths'])) for parent_idx, children_inds in T.iteritems(): # (in a per-frame representation) node_inds = np.where(np.any([clusters['int_paths'] == idx for idx in children_inds], axis=0))[0] fids = np.unique(obj[node_inds,0]) V = [] for f in fids: tmp = d[np.where(obj[node_inds,0] == f)[0],:] fv = ynumpy.fisher(cache[feat_t]['gmm'], tmp, INTERNAL_PARAMETERS['fv_repr_feats']) V.append(fv) # no normalization or nothing (it's done when computing darwin) vdtree[parent_idx] = videodarwin.darwin(np.array(V)) with open(output_filepath, 'wb') as f: cPickle.dump(dict(tree=vdtree), f) elapsed_time = time.time() - start_time if verbose: print('[_compute_vd_descriptors] %s -> DONE (in %.2f secs)' % (videonames[i], elapsed_time)) def train_bovw_codebooks(tracklets_path, videonames, traintest_parts, feat_types, intermediates_path, pca_reduction=False, nt=1, verbose=False): try: makedirs(intermediates_path) except OSError: pass for k, part in enumerate(traintest_parts): train_inds = np.where(part <= 0)[0] # train codebook for each possible training parition total = len(train_inds) num_samples_per_vid = int(INTERNAL_PARAMETERS['n_samples'] / float(total)) # process the videos for i, feat_t in enumerate(feat_types): output_filepath = join(intermediates_path, 'bovw' + ('-' if pca_reduction else '-nopca-') + feat_t + '-' + str(k) + '.pkl') if isfile(output_filepath): if verbose: print('[train_bovw_codebooks] %s -> OK' % output_filepath) continue start_time = time.time() D = load_tracklets_sample(tracklets_path, videonames, train_inds, feat_t, num_samples_per_vid, verbose=verbose) # (special case) trajectory features are originally positions if feat_t == 'trj': D = convert_positions_to_displacements(D) if feat_t == 'mbh': Dx = preprocessing.normalize(D[:,:D.shape[1]/2], norm='l1', axis=1) Dy = preprocessing.normalize(D[:,D.shape[1]/2:], norm='l1', axis=1) D = np.hstack((Dx,Dy)) else: D = preprocessing.normalize(D, norm='l1', axis=1) if feat_t != 'trj': D = rootSIFT(D) # compute PCA map and reduce dimensionality if pca_reduction: pca = PCA(n_components=int(INTERNAL_PARAMETERS['reduction_factor']*D.shape[1]), copy=False) D = pca.fit_transform(D) # train codebook for later BOVW computation D = np.ascontiguousarray(D, dtype=np.float32) cb = ynumpy.kmeans(D, INTERNAL_PARAMETERS['bovw_codebook_k'], \ distance_type=2, nt=nt, niter=100, seed=0, redo=1, \ verbose=verbose, normalize=False, init='kmeans++') with open(output_filepath, 'wb') as f: cPickle.dump(dict(pca=(pca if pca_reduction else None), codebook=cb), f) elapsed_time = time.time() - start_time if verbose: print('[train_bovw_codebooks] %s -> DONE (in %.2f secs)' % (feat_t, elapsed_time)) def train_fv_gmms(tracklets_path, videonames, traintest_parts, feat_types, intermediates_path, pca_reduction=False, nt=4, verbose=False): try: makedirs(intermediates_path) except OSError: pass for k, part in enumerate(traintest_parts): train_inds = np.where(np.array(part) <= 0)[0] # train codebook for each possible training parition num_samples_per_vid = int(INTERNAL_PARAMETERS['n_samples'] / float(len(train_inds))) # process the videos for i, feat_t in enumerate(feat_types): D = None # Train GMMs output_filepath = join(intermediates_path, 'gmm' + ('_pca-' if pca_reduction else '-') + feat_t + '-' + str(k) + '.pkl') if isfile(output_filepath): if verbose: print('[train_fv_gmms] %s -> OK' % output_filepath) continue start_time = time.time() D = load_tracklets_sample(tracklets_path, videonames, train_inds, feat_t, num_samples_per_vid, verbose=verbose) # (special case) trajectory features are originally positions if feat_t == 'trj': D = convert_positions_to_displacements(D) if feat_t == 'mbh': Dx = preprocessing.normalize(D[:,:D.shape[1]/2], norm='l1', axis=1) Dy = preprocessing.normalize(D[:,D.shape[1]/2:], norm='l1', axis=1) D = np.hstack((Dx,Dy)) else: D = preprocessing.normalize(D, norm='l1', axis=1) if feat_t != 'trj': D = rootSIFT(D) # compute PCA map and reduce dimensionality if pca_reduction: pca = PCA(n_components=int(INTERNAL_PARAMETERS['reduction_factor']*D.shape[1]), copy=False) D = pca.fit_transform(D) # train GMMs for later FV computation D = np.ascontiguousarray(D, dtype=np.float32) gmm = ynumpy.gmm_learn(D, INTERNAL_PARAMETERS['fv_gmm_k'], nt=nt, niter=500, redo=1, verbose=verbose) with open(output_filepath, 'wb') as f: cPickle.dump(dict(pca=(pca if pca_reduction else None), gmm=gmm), f) # with open(join(intermediates_path, 'gmm-sample' + ('_pca-' if pca_reduction else '-') + feat_t + '-' + str(k) + '.pkl'), 'wb') as f: # cPickle.dump(D,f) elapsed_time = time.time() - start_time if verbose: print('[train_fv_gmms] %s -> DONE (in %.2f secs)' % (feat_t, elapsed_time)) def load_tracklets_sample(tracklets_path, videonames, data_inds, feat_t, num_samples_per_vid, verbose=False): D = None # feat_t's sampled tracklets ptr = 0 for j in range(0, len(data_inds)): idx = data_inds[j] filepath = join(tracklets_path, feat_t, videonames[idx] + '.pkl') if not isfile(filepath): sys.stderr.write('# ERROR: missing training instance' ' {}\n'.format(filepath)) sys.stderr.flush() quit() with open(filepath, 'rb') as f: d = cPickle.load(f) if verbose: print('[load_tracklets_sample] %s (num feats: %d)' % (filepath, d.shape[1])) # init sample if D is None: D = np.zeros((INTERNAL_PARAMETERS['n_samples'], d.shape[1]), dtype=np.float32) # create a random permutation for sampling some tracklets in this vids randp = np.random.permutation(d.shape[0]) if d.shape[0] > num_samples_per_vid: randp = randp[:num_samples_per_vid] D[ptr:ptr+len(randp),:] = d[randp,:] ptr += len(randp) return D[:ptr,:] # cut out extra reserved space # ============================================================================== # Helper functions # ============================================================================== def convert_positions_to_displacements(P): ''' From positions to normalized displacements :param D: :return: ''' X, Y = P[:,::2], P[:,1::2] # X (resp. Y) are odd (resp. even) columns of D Vx = X[:,1:] - X[:,:-1] # get relative displacement (velocity vector) Vy = Y[:,1:] - Y[:,:-1] # D = np.zeros((P.shape[0], Vx.shape[1]+Vy.shape[1]), dtype=P.dtype) # normx = np.linalg.norm(Vx, ord=2, axis=1)[:,np.newaxis] # normy = np.linalg.norm(Vy, ord=2, axis=1)[:,np.newaxis] # D[:,::2] = Vx / normx # l2-normalize # D[:,1::2] = Vy / normy D = np.zeros((P.shape[0], Vx.shape[1]+Vy.shape[1]), dtype=P.dtype) D[:,::2] = Vx D[:,1::2] = Vy return D def reconstruct_tree_from_leafs(leafs): """ Given a list of leaf, recover all the nodes. Parameters ---------- leafs: Leafs are integers, each representing a path in the binary tree. For instance, a leaf value of 5 indicates the leaf is the one reached going throught the folliwing path: root-left-right. Returns ------- A dictionary indicating for each node a list of all its descendents. Exemple: { 1 : [2,3,4,5,6,7,12,13,26,27], 2 : [4,5], 3 : [6,7,12,13,26,27], ... } """ h = dict() q = PriorityQueue() # recover first intermediate nodes (direct parents from leafs) for path in leafs: parent_path = int(path/2) if not parent_path in h and parent_path > 1: q.put(-parent_path) # deeper nodes go first (queue reversed by "-") h.setdefault(parent_path, []).append(path) # recover other intermediates notes recursevily while not q.empty(): path = -q.get() parent_path = int(path/2) if not parent_path in h and parent_path > 1: # list parent also for further processing q.put(-parent_path) h.setdefault(parent_path, []) h[parent_path] += ([path] + h[path]) # append children from current node to their parent # update with leafs h.update(dict((i,[i]) for i in leafs)) return h def bovw(codebook, X, nt=1): inds, dists = ynumpy.knn(X, codebook, nnn=1, distance_type=2, nt=1) bins, _ = np.histogram(inds[:,0], bins=INTERNAL_PARAMETERS['bovw_codebook_k']) return bins def rootSIFT(X, p=0.5): return np.sign(X) * (np.abs(X) ** p) def normalize(x, norm='l2',dtype=np.float32): if norm == 'l1': return x.astype(dtype=dtype) / (np.abs(x)).sum() elif norm == 'l2': # norms = np.sqrt(np.sum(x ** 2, 1)) # return x / norms.reshape(-1, 1) return x.astype(dtype=dtype) / np.sqrt(np.dot(x,x)) else: raise AttributeError(norm)
aclapes/darwintree
tracklet_representation.py
Python
bsd-3-clause
29,414
[ "Gaussian" ]
d705b20c16f125834d31f1262b0da3d9bc94bc67a080d838a50542553172fba3
# // Copyright (c) <2014> <Brian Wheatman> import simulator as sim import os Task = sim.Task TaskCombo = sim.TaskCombo make_grid = sim.make_grid # to read the text document define a factory def read_input(doc): group = [] # the group of tasks text = open(doc, 'r') # open the document to read for i in range(10): # looking through a couple of lines to find the right one line = text.readline() if line == 'How many different tasks do you have? (an integer)\n': # the line I am looking for break number_of_tasks = int(text.readline().replace('-', '').replace('\n', '').replace(' ', '')) # converting it to the right form for i in range(10): # looking through a couple of lines to find the right one line = text.readline() if line == 'How much money to start with? (an integer)\n': # the line I am looking for break money = int(text.readline().replace('-', '').replace('\n', '').replace(' ', '')) # converting it to the right form for i in range(10): # looking through a couple of lines to find the right one line = text.readline() if line == 'What is the period length? (an integer number of time steps)\n': # the line I am looking for break length_period = int(text.readline().replace('-', '').replace('\n', '').replace(' ', '')) # converting it to the right form for i in range(10): # looking through a couple of lines to find the right one line = text.readline() if line == 'How many periods are there? (an integer)\n': # the line I am looking for break number_periods = int(text.readline().replace('-', '').replace('\n', '').replace(' ', '')) # converting it to the right form for i in range(10): # looking through a couple of lines to find the right one line = text.readline() if line == 'What is the period cost? (an integer)\n': # the line I am looking for break period_cost = int(text.readline().replace('-', '').replace('\n', '').replace(' ', '')) # converting it to the right form for j in range(number_of_tasks): # for each task for i in range(10): # finding the task name line = text.readline() if line == 'Task name?\n': # the line I am looking for break task_name = text.readline().replace('-', '').replace('\n', '').replace(' ', '') # converting it to the right form for i in range(10): # which robot line = text.readline() if line == 'What robot will be completing the task?\n': # the line I am looking for break robot_id = text.readline().replace('-', '').replace('\n', '').replace(' ', '') # converting it to the right form for i in range(10): # what inputs line = text.readline() if line == 'What inputs are used? (the name of each input seperated by commas)\n': # the line I am looking for break inputs = text.readline().replace('-', '').replace('\n', '').replace(' ', '').split(',') # converting it to the right form for i in range(10): # how many of the inputs line = text.readline() if line == 'How many of each input is nessasary to make one round of outputs? (integers, same order as above, seperated by commas)\n': # the line I am looking for break number_inputs = [int(term) for term in text.readline().replace('-', '').replace('\n', '').replace(' ', '').split(',')] # converting it to the right form for i in range(10): # set up times line = text.readline() if line == 'The number of time steps it taks to set up? (an integer)\n': # the line I am looking for break set_up = int(text.readline().replace('-', '').replace('\n', '').replace(' ', '')) # converting it to the right form for i in range(10): # processing times line = text.readline() if line == 'The number of time steps to process one set of inputs? (an integer)\n': # the line I am looking for break processing = int(text.readline().replace('-', '').replace('\n', '').replace(' ', '')) # converting it to the right form for i in range(10): # output line = text.readline() if line == 'What the output is?\n': # the line I am looking for break output = text.readline().replace('-', '').replace('\n', '').replace(' ', '') # converting it to the right form for i in range(10): # outputs a round line = text.readline() if line == 'How many outputs are made each round? (an integer)\n': # the line I am looking for break outputs_a_round = int(text.readline().replace('-', '').replace('\n', '').replace(' ', '')) # converting it to the right form for i in range(10): # input queue line = text.readline() if line == 'How many of each item starts in the input queue? (integers seperated by commas)\n': # the line I am looking for break queue = [int(term) for term in text.readline().replace('-', '').replace('\n', '').replace(' ', '').split(',')] # converting it to the right form for i in range(10): # input price line = text.readline() if line == "How much does it cost to buy an input? (integers seperated by commas, 0 if it can't be bought)\n": # the line I am looking for break input_price = [int(term) for term in text.readline().replace('-', '').replace('\n', '').replace(' ', '').split(',')] # converting it to the right form for i in range(10): # output price line = text.readline() if line == "How much can you sell an output for? (an integer, 0 if it can't be sold)\n": # the line I am looking for break output_price = int(text.readline().replace('-', '').replace('\n', '').replace(' ', '')) # converting it to the right form task_name = Task(inputs, number_inputs, set_up, processing,output,\ outputs_a_round, queue, [0], [True], [0], [True], [False],\ [[0,0,0,0,0],0,0], robot_id, input_price, output_price, str(task_name) ) group.append(task_name) limitations = {} for i in range(10): # looking through a couple of lines to find the right one line = text.readline() if line == 'How many limitations are there? (A limitation is a rule that says one output can only sell an amount up to the amount sold of another output)\n': break num_limits = int(text.readline().replace('-', '').replace('\n', '').replace(' ', '')) # converting it to the right form skip = text.readline() for j in range(num_limits): line = [int(term)-1 for term in text.readline().replace('-', '').replace('\n', '').replace(' ', '').split(',')] limitations[group[line[0]]]=group[line[1]] return (group, limitations, money, period_cost, length_period, number_periods) # declaring the groups and factories (group, limitations, money, period_cost, length_period, number_periods) = read_input(str(os.getcwd()) + '\input_group.txt') tg = group tc = TaskCombo(tg, limitations, money, period_cost, length_period, number_periods) (group, limitations, money, period_cost, length_period, number_periods) = read_input(str(os.getcwd()) + '\sim1.txt') OPT_G = group OPT_C = TaskCombo(OPT_G, limitations, money, period_cost, length_period, number_periods) (group, limitations, money, period_cost, length_period, number_periods) = read_input(str(os.getcwd()) + '\sim2.txt') MG1 = group MC1 = TaskCombo(MG1, limitations, money, period_cost, length_period, number_periods) (group, limitations, money, period_cost, length_period, number_periods) = read_input(str(os.getcwd()) + '\sim3.txt') AG = group AC = TaskCombo(AG, limitations, money, period_cost, length_period, number_periods)
wheatman/Simulation
inputs.py
Python
mit
8,025
[ "Brian" ]
f5b1c49185fa35d8fab6a72beb5a632d4b5b9733cda4d697b8841317befd0dd9
""" SOM-based learning functions for CFProjections. $Id$ """ __version__ = "$Revision$" from math import ceil import param from topo.base.arrayutil import L2norm, array_argmax from topo.base.boundingregion import BoundingBox from topo.base.cf import CFPLearningFn from topo.base.patterngenerator import PatternGenerator from topo.pattern import Gaussian ### JABHACKALERT: This class will be removed once the examples no ### longer rely upon it class CFPLF_SOM(CFPLearningFn): """ An abstract base class of learning functions for Self-Organizing Maps. This implementation is obsolete and will be removed soon. Please see examples/cfsom_or.ty for current SOM support. """ __abstract = True learning_radius = param.Number(default=0.0,doc= """ The radius of the neighborhood function to be used for learning. Typically, this value will be set by the Sheet or Projection owning this CFPLearningFn, but it can also be set explicitly by the user. """) def __init__(self,**params): self.warning("CFPLF_SOM is deprecated -- see the example in cfsom_or.ty for how to build a SOM") def __call__(self, proj, input_activity, output_activity, learning_rate, **params): raise NotImplementedError ### JABHACKALERT: This class will be removed once the examples no ### longer rely upon it class CFPLF_HebbianSOM(CFPLF_SOM): """ Hebbian learning rule for CFProjections to Self-Organizing Maps. This implementation is obsolete and will be removed soon. Please see examples/cfsom_or.ty for current SOM support. """ learning_radius = param.Number(default=0.0) crop_radius_multiplier = param.Number(default=3.0,doc= """ Factor by which the radius should be multiplied, when deciding how far from the winner to keep updating the weights. """) neighborhood_kernel_generator = param.ClassSelector(PatternGenerator, default=Gaussian(x=0.0,y=0.0,aspect_ratio=1.0), doc="Neighborhood function") def __call__(self, iterator, input_activity, output_activity, learning_rate, **params): cfs = iterator.proj.cfs.tolist() # CEBALERT: convert to use flatcfs rows,cols = output_activity.shape # This learning function does not need to scale the learning # rate like some do, so it does not use constant_sum_connection_rate() single_connection_learning_rate = learning_rate ### JABALERT: The learning_radius is normally set by ### the learn() function of CFSOM, so it doesn't matter ### much that the value accepted here is in matrix and ### not sheet coordinates. It's confusing that anything ### would accept matrix coordinates, but the learning_fn ### doesn't have access to the sheet, so it can't easily ### convert from sheet coords. radius = self.learning_radius crop_radius = max(1.25,radius*self.crop_radius_multiplier) # find out the matrix coordinates of the winner # # NOTE: when there are multiple projections, it would be # slightly more efficient to calculate the winner coordinates # within the Sheet, e.g. by moving winner_coords() to CFSOM # and passing in the results here. However, finding the # coordinates does not take much time, and requiring the # winner to be passed in would make it harder to mix and match # Projections and learning rules with different Sheets. wr,wc = array_argmax(output_activity) # Optimization: Calculate the bounding box around the winner # in which weights will be changed, to avoid considering those # units below. cmin = int(max(wc-crop_radius,0)) cmax = int(min(wc+crop_radius+1,cols)) # at least 1 between cmin and cmax rmin = int(max(wr-crop_radius,0)) rmax = int(min(wr+crop_radius+1,rows)) # generate the neighborhood kernel matrix so that the values # can be read off easily using matrix coordinates. nk_generator = self.neighborhood_kernel_generator radius_int = int(ceil(crop_radius)) rbound = radius_int + 0.5 bb = BoundingBox(points=((-rbound,-rbound), (rbound,rbound))) # Print parameters designed to match fm2d's output #print "%d rad= %d std= %f alpha= %f" % (topo.sim._time, radius_int, radius, single_connection_learning_rate) neighborhood_matrix = nk_generator(bounds=bb,xdensity=1,ydensity=1, size=2*radius) for r in range(rmin,rmax): for c in range(cmin,cmax): cwc = c - wc rwr = r - wr lattice_dist = L2norm((cwc,rwr)) if lattice_dist <= crop_radius: cf = cfs[r][c] rate = single_connection_learning_rate * neighborhood_matrix[rwr+radius_int,cwc+radius_int] X = cf.get_input_matrix(input_activity) cf.weights += rate * (X - cf.weights) # CEBHACKALERT: see ConnectionField.__init__() cf.weights *= cf.mask
ioam/svn-history
topo/learningfn/som.py
Python
bsd-3-clause
5,271
[ "Gaussian" ]
b1e22bfb693d8720a46c61151d8ba9dc363fe82a6f83004bcf03297771de8146
# -*- coding: utf-8 -*- """ baserawio ====== Classes ------- BaseRawIO abstract class which should be overridden to write a RawIO. RawIO is a new API in neo that is supposed to acces as fast as possible raw data. All IO with theses carractéristics should/could be rewritten: * internally use of memmap (or hdf5) * reading header is quite cheap (not read all the file) * neo tree object is symetric and logical: same channel/units/event along all block and segments. So this handle **only** one simplified but very frequent case of dataset: * Only one channel set for AnalogSignal (aka ChannelIndex) stable along Segment * Only one channel set for SpikeTrain (aka Unit) stable along Segment * AnalogSignal have all the same sampling_rate acroos all Segment * t_start/t_stop are the same for many object (SpikeTrain, Event) inside a Segment * AnalogSignal should all have the same sampling_rate otherwise the won't be read a the same time. So signal_group_mode=='split-all' in BaseFromRaw An helper class `neo.io.basefromrawio.BaseFromRaw` should transform a RawIO to neo legacy IO from free. With this API the IO have an attributes `header` with necessary keys. See ExampleRawIO as example. BaseRawIO implement a possible presistent cache system that can be used by some IOs to avoid very long parse_header(). The idea is that some variable or vector can be store somewhere (near the fiel, /tmp, any path) """ # from __future__ import unicode_literals, print_function, division, absolute_import from __future__ import print_function, division, absolute_import import logging import numpy as np import os import sys from neo import logging_handler try: import joblib HAVE_JOBLIB = True except ImportError: HAVE_JOBLIB = False possible_raw_modes = ['one-file', 'multi-file', 'one-dir', ] # 'multi-dir', 'url', 'other' error_header = 'Header is not read yet, do parse_header() first' _signal_channel_dtype = [ ('name', 'U64'), ('id', 'int64'), ('sampling_rate', 'float64'), ('dtype', 'U16'), ('units', 'U64'), ('gain', 'float64'), ('offset', 'float64'), ('group_id', 'int64'), ] _common_sig_characteristics = ['sampling_rate', 'dtype', 'group_id'] _unit_channel_dtype = [ ('name', 'U64'), ('id', 'U64'), # for waveform ('wf_units', 'U64'), ('wf_gain', 'float64'), ('wf_offset', 'float64'), ('wf_left_sweep', 'int64'), ('wf_sampling_rate', 'float64'), ] _event_channel_dtype = [ ('name', 'U64'), ('id', 'U64'), ('type', 'S5'), # epoch ot event ] class BaseRawIO(object): """ Generic class to handle. """ name = 'BaseIO' description = '' extensions = [] rawmode = None # one key in possible_raw_modes def __init__(self, use_cache=False, cache_path='same_as_resource', **kargs): """ When rawmode=='one-file' kargs MUST contains 'filename' the filename When rawmode=='multi-file' kargs MUST contains 'filename' one of the filenames. When rawmode=='one-dir' kargs MUST contains 'dirname' the dirname. """ # create a logger for the IO class fullname = self.__class__.__module__ + '.' + self.__class__.__name__ self.logger = logging.getLogger(fullname) # create a logger for 'neo' and add a handler to it if it doesn't # have one already. # (it will also not add one if the root logger has a handler) corename = self.__class__.__module__.split('.')[0] corelogger = logging.getLogger(corename) rootlogger = logging.getLogger() if not corelogger.handlers and not rootlogger.handlers: corelogger.addHandler(logging_handler) self.use_cache = use_cache if use_cache: assert HAVE_JOBLIB, 'You need to install joblib for cache' self.setup_cache(cache_path) else: self._cache = None self.header = None def parse_header(self): """ This must parse the file header to get all stuff for fast later one. This must contain self.header['nb_block'] self.header['nb_segment'] self.header['signal_channels'] self.header['units_channels'] self.header['event_channels'] """ self._parse_header() self._group_signal_channel_characteristics() def source_name(self): """Return fancy name of file source""" return self._source_name() def __repr__(self): txt = '{}: {}\n'.format(self.__class__.__name__, self.source_name()) if self.header is not None: nb_block = self.block_count() txt += 'nb_block: {}\n'.format(nb_block) nb_seg = [self.segment_count(i) for i in range(nb_block)] txt += 'nb_segment: {}\n'.format(nb_seg) for k in ('signal_channels', 'unit_channels', 'event_channels'): ch = self.header[k] if len(ch) > 8: chantxt = "[{} ... {}]".format(', '.join(e for e in ch['name'][:4]), ' '.join(e for e in ch['name'][-4:])) else: chantxt = "[{}]".format(', '.join(e for e in ch['name'])) txt += '{}: {}\n'.format(k, chantxt) return txt def _generate_minimal_annotations(self): """ Helper function that generate a nested dict of all annotations. must be called when theses are Ok: * block_count() * segment_count() * signal_channels_count() * unit_channels_count() * event_channels_count() Usage: raw_annotations['blocks'][block_index] = { 'nickname' : 'super block', 'segments' : ...} raw_annotations['blocks'][block_index] = { 'nickname' : 'super block', 'segments' : ...} raw_annotations['blocks'][block_index]['segments'][seg_index]['signals'][channel_index] = {'nickname': 'super channel'} raw_annotations['blocks'][block_index]['segments'][seg_index]['units'][unit_index] = {'nickname': 'super neuron'} raw_annotations['blocks'][block_index]['segments'][seg_index]['events'][ev_chan] = {'nickname': 'super trigger'} Theses annotations will be used at the neo.io API directly in objects. Standard annotation like name/id/file_origin are already generated here. """ signal_channels = self.header['signal_channels'] unit_channels = self.header['unit_channels'] event_channels = self.header['event_channels'] a = {'blocks': [], 'signal_channels': [], 'unit_channels': [], 'event_channels': []} for block_index in range(self.block_count()): d = {'segments': []} d['file_origin'] = self.source_name() a['blocks'].append(d) for seg_index in range(self.segment_count(block_index)): d = {'signals': [], 'units': [], 'events': []} d['file_origin'] = self.source_name() a['blocks'][block_index]['segments'].append(d) for c in range(signal_channels.size): # use for AnalogSignal.annotations d = {} d['name'] = signal_channels['name'][c] d['channel_id'] = signal_channels['id'][c] a['blocks'][block_index]['segments'][seg_index]['signals'].append(d) for c in range(unit_channels.size): # use for SpikeTrain.annotations d = {} d['name'] = unit_channels['name'][c] d['id'] = unit_channels['id'][c] a['blocks'][block_index]['segments'][seg_index]['units'].append(d) for c in range(event_channels.size): # use for Event.annotations d = {} d['name'] = event_channels['name'][c] d['id'] = event_channels['id'][c] d['file_origin'] = self._source_name() a['blocks'][block_index]['segments'][seg_index]['events'].append(d) for c in range(signal_channels.size): # use for ChannelIndex.annotations d = {} d['name'] = signal_channels['name'][c] d['channel_id'] = signal_channels['id'][c] d['file_origin'] = self._source_name() a['signal_channels'].append(d) for c in range(unit_channels.size): # use for Unit.annotations d = {} d['name'] = unit_channels['name'][c] d['id'] = unit_channels['id'][c] d['file_origin'] = self._source_name() a['unit_channels'].append(d) for c in range(event_channels.size): # not used in neo.io at the moment could usefull one day d = {} d['name'] = event_channels['name'][c] d['id'] = event_channels['id'][c] d['file_origin'] = self._source_name() a['event_channels'].append(d) self.raw_annotations = a def _raw_annotate(self, obj_name, chan_index=0, block_index=0, seg_index=0, **kargs): """ Annotate a object in the list/dict tree annotations. """ bl_annotations = self.raw_annotations['blocks'][block_index] seg_annotations = bl_annotations['segments'][seg_index] if obj_name == 'blocks': bl_annotations.update(kargs) elif obj_name == 'segments': seg_annotations.update(kargs) elif obj_name in ['signals', 'events', 'units']: obj_annotations = seg_annotations[obj_name][chan_index] obj_annotations.update(kargs) elif obj_name in ['signal_channels', 'unit_channels', 'event_channel']: obj_annotations = self.raw_annotations[obj_name][chan_index] obj_annotations.update(kargs) def _repr_annotations(self): txt = 'Raw annotations\n' for block_index in range(self.block_count()): bl_a = self.raw_annotations['blocks'][block_index] txt += '*Block {}\n'.format(block_index) for k, v in bl_a.items(): if k in ('segments',): continue txt += ' -{}: {}\n'.format(k, v) for seg_index in range(self.segment_count(block_index)): seg_a = bl_a['segments'][seg_index] txt += ' *Segment {}\n'.format(seg_index) for k, v in seg_a.items(): if k in ('signals', 'units', 'events',): continue txt += ' -{}: {}\n'.format(k, v) for child in ('signals', 'units', 'events'): n = self.header[child[:-1] + '_channels'].shape[0] for c in range(n): neo_name = {'signals': 'AnalogSignal', 'units': 'SpikeTrain', 'events': 'Event/Epoch'}[child] txt += ' *{} {}\n'.format(neo_name, c) child_a = seg_a[child][c] for k, v in child_a.items(): txt += ' -{}: {}\n'.format(k, v) return txt def print_annotations(self): """Print formated raw_annotations""" print(self._repr_annotations()) def block_count(self): """return number of blocks""" return self.header['nb_block'] def segment_count(self, block_index): """return number of segment for a given block""" return self.header['nb_segment'][block_index] def signal_channels_count(self): """Return the number of signal channel. Same allong all block and Segment. """ return len(self.header['signal_channels']) def unit_channels_count(self): """Return the number of unit (aka spike) channel. Same allong all block and Segment. """ return len(self.header['unit_channels']) def event_channels_count(self): """Return the number of event/epoch channel. Same allong all block and Segment. """ return len(self.header['event_channels']) def segment_t_start(self, block_index, seg_index): """Global t_start of a Segment in s. shared by all objects except for AnalogSignal. """ return self._segment_t_start(block_index, seg_index) def segment_t_stop(self, block_index, seg_index): """Global t_start of a Segment in s. shared by all objects except for AnalogSignal. """ return self._segment_t_stop(block_index, seg_index) ### # signal and channel zone def _group_signal_channel_characteristics(self): """ Usefull for few IOs (TdtrawIO, NeuroExplorerRawIO, ...). Group signals channels by same characteristics: * sampling_rate (global along block and segment) * group_id (explicite channel group) If all channels have the same characteristics them `get_analogsignal_chunk` can be call wihtout restriction. If not then **channel_indexes** must be specified in `get_analogsignal_chunk` and only channels with same caracteristics can be read at the same time. This is usefull for some IO than have internally several signals channels familly. For many RawIO all channels have the same sampling_rate/size/t_start. In that cases, internal flag **self._several_channel_groups will be set to False, so `get_analogsignal_chunk(..)` won't suffer in performance. Note that at neo.io level this have an impact on `signal_group_mode`. 'split-all' will work in any situation But grouping channel in the same AnalogSignal with 'group-by-XXX' will depend on common characteristics of course. """ characteristics = self.header['signal_channels'][_common_sig_characteristics] unique_characteristics = np.unique(characteristics) if len(unique_characteristics) == 1: self._several_channel_groups = False else: self._several_channel_groups = True def _check_common_characteristics(self, channel_indexes): """ Usefull for few IOs (TdtrawIO, NeuroExplorerRawIO, ...). Check is a set a signal channel_indexes share common characteristics (**sampling_rate/t_start/size**) Usefull only when RawIO propose differents channels groups with differents sampling_rate for instance. """ # ~ print('_check_common_characteristics', channel_indexes) assert channel_indexes is not None, \ 'You must specify channel_indexes' characteristics = self.header['signal_channels'][_common_sig_characteristics] # ~ print(characteristics[channel_indexes]) assert np.unique(characteristics[channel_indexes]).size == 1, \ 'This channel set have differents characteristics' def get_group_channel_indexes(self): """ Usefull for few IOs (TdtrawIO, NeuroExplorerRawIO, ...). Return a list of channel_indexes than have same characteristics """ if self._several_channel_groups: characteristics = self.header['signal_channels'][_common_sig_characteristics] unique_characteristics = np.unique(characteristics) channel_indexes_list = [] for e in unique_characteristics: channel_indexes, = np.nonzero(characteristics == e) channel_indexes_list.append(channel_indexes) return channel_indexes_list else: return [None] def channel_name_to_index(self, channel_names): """ Transform channel_names to channel_indexes. Based on self.header['signal_channels'] """ ch = self.header['signal_channels'] channel_indexes, = np.nonzero(np.in1d(ch['name'], channel_names)) assert len(channel_indexes) == len(channel_names), 'not match' return channel_indexes def channel_id_to_index(self, channel_ids): """ Transform channel_ids to channel_indexes. Based on self.header['signal_channels'] """ ch = self.header['signal_channels'] channel_indexes, = np.nonzero(np.in1d(ch['id'], channel_ids)) assert len(channel_indexes) == len(channel_ids), 'not match' return channel_indexes def _get_channel_indexes(self, channel_indexes, channel_names, channel_ids): """ select channel_indexes from channel_indexes/channel_names/channel_ids depending which is not None """ if channel_indexes is None and channel_names is not None: channel_indexes = self.channel_name_to_index(channel_names) if channel_indexes is None and channel_ids is not None: channel_indexes = self.channel_id_to_index(channel_ids) return channel_indexes def get_signal_size(self, block_index, seg_index, channel_indexes=None): if self._several_channel_groups: self._check_common_characteristics(channel_indexes) return self._get_signal_size(block_index, seg_index, channel_indexes) def get_signal_t_start(self, block_index, seg_index, channel_indexes=None): if self._several_channel_groups: self._check_common_characteristics(channel_indexes) return self._get_signal_t_start(block_index, seg_index, channel_indexes) def get_signal_sampling_rate(self, channel_indexes=None): if self._several_channel_groups: self._check_common_characteristics(channel_indexes) chan_index0 = channel_indexes[0] else: chan_index0 = 0 sr = self.header['signal_channels'][chan_index0]['sampling_rate'] return float(sr) def get_analogsignal_chunk(self, block_index=0, seg_index=0, i_start=None, i_stop=None, channel_indexes=None, channel_names=None, channel_ids=None): """ Return a chunk of raw signal. """ channel_indexes = self._get_channel_indexes(channel_indexes, channel_names, channel_ids) if self._several_channel_groups: self._check_common_characteristics(channel_indexes) raw_chunk = self._get_analogsignal_chunk( block_index, seg_index, i_start, i_stop, channel_indexes) return raw_chunk def rescale_signal_raw_to_float(self, raw_signal, dtype='float32', channel_indexes=None, channel_names=None, channel_ids=None): channel_indexes = self._get_channel_indexes(channel_indexes, channel_names, channel_ids) if channel_indexes is None: channel_indexes = slice(None) channels = self.header['signal_channels'][channel_indexes] float_signal = raw_signal.astype(dtype) if np.any(channels['gain'] != 1.): float_signal *= channels['gain'] if np.any(channels['offset'] != 0.): float_signal += channels['offset'] return float_signal # spiketrain and unit zone def spike_count(self, block_index=0, seg_index=0, unit_index=0): return self._spike_count(block_index, seg_index, unit_index) def get_spike_timestamps(self, block_index=0, seg_index=0, unit_index=0, t_start=None, t_stop=None): """ The timestamp is as close to the format itself. Sometimes float/int32/int64. Sometimes it is the index on the signal but not always. The conversion to second or index_on_signal is done outside here. t_start/t_sop are limits in seconds. """ timestamp = self._get_spike_timestamps(block_index, seg_index, unit_index, t_start, t_stop) return timestamp def rescale_spike_timestamp(self, spike_timestamps, dtype='float64'): """ Rescale spike timestamps to second """ return self._rescale_spike_timestamp(spike_timestamps, dtype) # spiketrain waveform zone def get_spike_raw_waveforms(self, block_index=0, seg_index=0, unit_index=0, t_start=None, t_stop=None): wf = self._get_spike_raw_waveforms(block_index, seg_index, unit_index, t_start, t_stop) return wf def rescale_waveforms_to_float(self, raw_waveforms, dtype='float32', unit_index=0): wf_gain = self.header['unit_channels']['wf_gain'][unit_index] wf_offset = self.header['unit_channels']['wf_offset'][unit_index] float_waveforms = raw_waveforms.astype(dtype) if wf_gain != 1.: float_waveforms *= wf_gain if wf_offset != 0.: float_waveforms += wf_offset return float_waveforms # event and epoch zone def event_count(self, block_index=0, seg_index=0, event_channel_index=0): return self._event_count(block_index, seg_index, event_channel_index) def get_event_timestamps(self, block_index=0, seg_index=0, event_channel_index=0, t_start=None, t_stop=None): """ The timestamp is as close to the format itself. Sometimes float/int32/int64. Sometimes it is the index on the signal but not always. The conversion to second or index_on_signal is done outside here. t_start/t_sop are limits in seconds. returns timestamp labels durations """ timestamp, durations, labels = self._get_event_timestamps( block_index, seg_index, event_channel_index, t_start, t_stop) return timestamp, durations, labels def rescale_event_timestamp(self, event_timestamps, dtype='float64'): """ Rescale event timestamps to s """ return self._rescale_event_timestamp(event_timestamps, dtype) def rescale_epoch_duration(self, raw_duration, dtype='float64'): """ Rescale epoch raw duration to s """ return self._rescale_epoch_duration(raw_duration, dtype) def setup_cache(self, cache_path, **init_kargs): if self.rawmode in ('one-file', 'multi-file'): ressource_name = self.filename elif self.rawmode == 'one-dir': ressource_name = self.dirname else: raise (NotImlementedError) if cache_path == 'home': if sys.platform.startswith('win'): dirname = os.path.join(os.environ['APPDATA'], 'neo_rawio_cache') elif sys.platform.startswith('darwin'): dirname = '~/Library/Application Support/neo_rawio_cache' else: dirname = os.path.expanduser('~/.config/neo_rawio_cache') dirname = os.path.join(dirname, self.__class__.__name__) if not os.path.exists(dirname): os.makedirs(dirname) elif cache_path == 'same_as_resource': dirname = os.path.dirname(ressource_name) else: assert os.path.exists(cache_path), \ 'cache_path do not exists use "home" or "same_as_file" to make this auto' # the hash of the ressource (dir of file) is done with filename+datetime # TODO make something more sofisticated when rawmode='one-dir' that use all filename and datetime d = dict(ressource_name=ressource_name, mtime=os.path.getmtime(ressource_name)) hash = joblib.hash(d, hash_name='md5') # name is compund by the real_n,ame and the hash name = '{}_{}'.format(os.path.basename(ressource_name), hash) self.cache_filename = os.path.join(dirname, name) if os.path.exists(self.cache_filename): self.logger.warning('Use existing cache file {}'.format(self.cache_filename)) self._cache = joblib.load(self.cache_filename) else: self.logger.warning('Create cache file {}'.format(self.cache_filename)) self._cache = {} self.dump_cache() def add_in_cache(self, **kargs): assert self.use_cache self._cache.update(kargs) self.dump_cache() def dump_cache(self): assert self.use_cache joblib.dump(self._cache, self.cache_filename) ################## # Functions to be implement in IO below here def _parse_header(self): raise (NotImplementedError) # must call # self._generate_empty_annotations() def _source_name(self): raise (NotImplementedError) def _segment_t_start(self, block_index, seg_index): raise (NotImplementedError) def _segment_t_stop(self, block_index, seg_index): raise (NotImplementedError) ### # signal and channel zone def _get_signal_size(self, block_index, seg_index, channel_indexes): raise (NotImplementedError) def _get_signal_t_start(self, block_index, seg_index, channel_indexes): raise (NotImplementedError) def _get_analogsignal_chunk(self, block_index, seg_index, i_start, i_stop, channel_indexes): raise (NotImplementedError) ### # spiketrain and unit zone def _spike_count(self, block_index, seg_index, unit_index): raise (NotImplementedError) def _get_spike_timestamps(self, block_index, seg_index, unit_index, t_start, t_stop): raise (NotImplementedError) def _rescale_spike_timestamp(self, spike_timestamps, dtype): raise (NotImplementedError) ### # spike waveforms zone def _get_spike_raw_waveforms(self, block_index, seg_index, unit_index, t_start, t_stop): raise (NotImplementedError) ### # event and epoch zone def _event_count(self, block_index, seg_index, event_channel_index): raise (NotImplementedError) def _get_event_timestamps(self, block_index, seg_index, event_channel_index, t_start, t_stop): raise (NotImplementedError) def _rescale_event_timestamp(self, event_timestamps, dtype): raise (NotImplementedError) def _rescale_epoch_duration(self, raw_duration, dtype): raise (NotImplementedError)
rgerkin/python-neo
neo/rawio/baserawio.py
Python
bsd-3-clause
26,300
[ "NEURON" ]
24f63f61e8c39274ec91c61d3ea1af2fa6b3a1bd568c0fe3565afd62def6fd48
# # PyOphidia - Python bindings for Ophidia # Copyright (C) 2015-2021 CMCC Foundation # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # from __future__ import division from __future__ import print_function from __future__ import absolute_import import sys import os import base64 import struct import PyOphidia.client as _client from inspect import currentframe sys.path.append(os.path.dirname(__file__)) def get_linenumber(): cf = currentframe() return __file__, cf.f_back.f_lineno class Cube: """Cube(container='-', cwd=None, exp_dim='auto', host_partition='auto', imp_dim='auto', measure=None, src_path=None, cdd=None, compressed='no', exp_concept_level='c', grid='-', imp_concept_level='c', import_metadata='no', check_compliance='no', offset=0, ioserver='mysql_table', ncores=1, nfrag=0, nhost=0, subset_dims='none', subset_filter='all', time_filter='yes', subset_type='index', exec_mode='sync', base_time='1900-01-01 00:00:00', calendar='standard', hierarchy='oph_base', leap_month=2, leap_year=0, month_lengths='31,28,31,30,31,30,31,31,30,31,30,31', run='yes', units='d', vocabulary='-', policy='rr', description='-', schedule=0, pid=None, check_grid='no', save='yes', display=False) -> obj or Cube(pid=None) -> obj Attributes: pid: cube PID creation_date: creation date of the cube measure: name of the variable imported into the cube measure_type: measure data type level: number of operations between the original imported cube and the actual cube nfragments: total number of fragments source_file: parent of the actual cube hostxcube: number of hosts associated with the cube fragxdb: number of fragments for each database rowsxfrag: number of rows for each fragment elementsxrow: number of elements for each row compressed: 'yes' for a compressed cube, 'no' otherwise size: size of the cube nelements: total number of elements dim_info: list of dict with information on each cube dimension Class Attributes: client: instance of class Client through which it is possible to submit all requests Methods: aggregate(ncores=1, nthreads=1, exec_mode='sync', schedule=0, group_size='all', operation=None, missingvalue='-', grid='-', container='-', description='-', check_grid='no', save='yes', display=False) -> Cube or None : wrapper of the operator OPH_AGGREGATE aggregate2(ncores=1, nthreads=1, exec_mode='sync', schedule=0, dim='-', concept_level='A', midnight='24', operation=None, grid='-', missingvalue='-', container='-', description='-', check_grid='no', save='yes', display=False) -> Cube or None : wrapper of the operator OPH_AGGREGATE2 apply(ncores=1, nthreads=1, exec_mode='sync', query='measure', dim_query='null', measure='null', measure_type='manual', dim_type='manual', check_type='yes', on_reduce='skip', compressed='auto', schedule=0,container='-', description='-', save='yes', display=False) -> Cube or None : wrapper of the operator OPH_APPLY concatnc(src_path=None, cdd=None, grid='-', check_exp_dim='yes', dim_offset='-', dim_continue='no', offset=0, description='-', subset_dims='none', subset_filter='all', subset_type='index', time_filter='yes', ncores=1, exec_mode='sync', schedule=0, save='yes', display=False) -> Cube or None : wrapper of the operator OPH_CONCATNC concatnc2(src_path=None, cdd=None, grid='-', check_exp_dim='yes', dim_offset='-', dim_continue='no', offset=0, description='-', subset_dims='none', subset_filter='all', subset_type='index', time_filter='yes', ncores=1, nthreads=1, exec_mode='sync', schedule=0, save='yes', display=False) -> Cube or None : wrapper of the operator OPH_CONCATNC2 cubeelements( schedule=0, algorithm='dim_product', ncores=1, exec_mode='sync', objkey_filter='all', save='yes', display=True) -> dict or None : wrapper of the operator OPH_CUBEELEMENTS cubeschema(objkey_filter='all', exec_mode='sync', level=0, dim=None, show_index='no', show_time='no', base64='no', action='read', concept_level='c', dim_level=1, dim_array='yes', save='yes', display=True) -> dict or None : wrapper of the operator OPH_CUBESCHEMA cubesize(schedule=0, ncores=1, byte_unit='MB', algorithm='euristic', objkey_filter='all', exec_mode='sync', save='yes', display=True) -> dict or None : wrapper of the operator OPH_CUBESIZE delete(ncores=1, nthreads=1, exec_mode='sync', schedule=0, save='yes', display=False) -> None : wrapper of the operator OPH_DELETE drilldown(ndim=1, container='-', ncores=1, exec_mode='sync', schedule=0, description='-', save='yes', display=False) -> Cube or None : wrapper of the operator OPH_DRILLDOWN duplicate(container='-', ncores=1, nthreads=1, exec_mode='sync', description='-', save='yes', display=False) -> Cube or None : wrapper of the operator OPH_DUPLICATE explore(schedule=0, limit_filter=100, subset_dims=None, subset_filter='all', time_filter='yes', subset_type='index', show_index='no', show_id='no', show_time='no', level=1, output_path='default', output_name='default', cdd=None, base64='no', ncores=1, exec_mode='sync', objkey_filter='all', save='yes', display=True) -> dict or None : wrapper of the operator OPH_EXPLORECUBE exportnc(misc='no', output_path='default', output_name='default', cdd=None, force='no', export_metadata='yes', schedule=0, exec_mode='sync', ncores=1, save='yes', display=False) -> None : wrapper of the operator OPH_EXPORTNC exportnc2(misc='no', output_path='default', output_name='default', cdd=None, force='no', export_metadata='yes', schedule=0, exec_mode='sync', ncores=1, save='yes', display=False) -> None : wrapper of the operator OPH_EXPORTNC2 export_array(show_id='no', show_time='no', subset_dims=None, subset_filter=None, time_filter='no') -> dict or None : return data from an Ophidia datacube into a Python structure info(display=True) -> None : call OPH_CUBESIZE and OPH_CUBESCHEMA to fill all Cube attributes intercube(cube2=None, cubes=None, operation='sub', missingvalue="-", container='-', exec_mode='sync', ncores=1, description='-', save='yes', display=False) -> Cube or None : wrapper of the operator OPH_INTERCUBE merge(nmerge=0, schedule=0, description='-', container='-', exec_mode='sync', ncores=1, save='yes', display=False) -> Cube or None : wrapper of the operator OPH_MERGE metadata(mode='read', metadata_id=0, metadata_key='all', variable='global', metadata_type='text', metadata_value=None, variable_filter=None, metadata_type_filter=None, metadata_value_filter=None, force='no', exec_mode='sync', objkey_filter='all', save='yes', display=True) -> dict or None : wrapper of the operator OPH_METADATA permute(dim_pos=None, container='-', exec_mode='sync', ncores=1, nthreads=1, schedule=0, description='-', save='yes', display=False) -> Cube or None : wrapper of the operator OPH_PERMUTE provenance(branch='all', exec_mode='sync', objkey_filter='all', save='yes', display=True) -> dict or None : wrapper of the operator OPH_CUBEIO publish(ncores=1, content='all', exec_mode='sync', show_id= 'no', show_index='no', schedule=0, show_time='no', save='yes', display=True) -> dict or None : wrapper of the operator OPH_PUBLISH reduce(operation=None, container=None, exec_mode='sync', missingvalue="-", grid='-', group_size='all', ncores=1, nthreads=1, schedule=0, order=2, description='-', objkey_filter='all', check_grid='no', save='yes', display=False) -> Cube or None : wrapper of the operator OPH_REDUCE reduce2(dim=None, operation=None, concept_level='A', missingvalue="-", container='-', exec_mode='sync', grid='-', midnight='24', order=2, description='-', schedule=0, ncores=1, nthreads=1, check_grid='no', save='yes', display=False) -> Cube or None : wrapper of the operator OPH_REDUCE2 rollup(ndim=1, container='-', exec_mode='sync', ncores=1, nthreads=1, schedule=0, description='-', save='yes', display=False) -> Cube or None : wrapper of the operator OPH_ROLLUP split(nsplit=2, container='-', exec_mode='sync', ncores=1, nthreads=1, schedule=0, description='-', save='yes', display=False) -> Cube or None : wrapper of the operator OPH_SPLIT subset(subset_dims='none', subset_filter='all', container='-', exec_mode='sync', subset_type='index', time_filter='yes', offset=0, grid='-', ncores=1, nthreads=1, schedule=0, description='-', check_grid='no', save='yes', display=False) -> Cube or None : wrapper of the operator OPH_SUBSET unpublish( exec_mode='sync', save='yes', display=False) -> None : wrapper of the operator OPH_UNPUBLISH Class Methods: setclient(username='', password='', server, port='11732', token='', read_env=False, api_mode=True, project=None) -> None : Instantiate the Client, common for all Cube objects, for submitting requests b2drop(action='put', auth_path='-', src_path=None, dst_path='-', cdd=None, exec_mode='sync', save='yes', display=False) -> None : wrapper of the operator OPH_B2DROP cancel(id=None, type='kill', objkey_filter='all', display=False) -> None : wrapper of the operator OPH_CANCEL cluster(action='info', nhost=1, host_partition='all', host_type='io', user_filter='all', exec_mode='sync', display=False) -> None : wrapper of the operator OPH_CLUSTER containerschema(container=None, cwd=None, exec_mode='sync', objkey_filter='all', save='yes', display=True) -> dict or None : wrapper of the operator OPH_CONTAINERSCHEMA createcontainer(exec_mode='sync', container=None, cwd=None, dim=None, dim_type="double", hierarchy='oph_base', base_time='1900-01-01 00:00:00', units='d', calendar='standard', month_lengths='31,28,31,30,31,30,31,31,30,31,30,31', leap_year=0, leap_month=2, vocabulary='CF', compressed='no', description='-', save='yes', display=False) -> None : wrapper of the operator OPH_CREATECONTAINER deletecontainer(container=None, container_pid='-', force='no', cwd=None, nthreads=1, exec_mode='sync', objkey_filter='all', save='yes', display=False) -> None : wrapper of the operator OPH_DELETECONTAINER explorenc(exec_mode='sync', schedule=0, measure='-', src_path=None, cdd=None, exp_dim='-', imp_dim='-', subset_dims='none', subset_type='index', subset_filter='all', limit_filter=100, show_index='no', show_id='no', show_time='no', show_stats='00000000000000', show_fit='no', level=0, imp_num_point=0, offset=50, operation='avg', wavelet='no', wavelet_ratio=0, wavelet_coeff='no', objkey_filter='all', save='yes', display=True) -> dict or None : wrapper of the operator OPH_EXPLORENC folder(command=None, cwd=None, path=None, exec_mode='sync', save='yes', display=False) -> None : wrapper of the operator OPH_FOLDER fs(command='ls', dpath='-', file='-', cdd=None, recursive='no', depth=0, realpath='no', exec_mode='sync', save='yes', display=False) -> None : wrapper of the operator OPH_FS get_config(key='all', objkey_filter='all', display=True) -> dict or None : wrapper of the operator OPH_GET_CONFIG hierarchy(hierarchy='all', hierarchy_version='latest', exec_mode='sync', objkey_filter='all', save='yes', display=True) -> dict or None : wrapper of the operator OPH_HIERARCHY importnc(container='-', cwd=None, exp_dim='auto', host_partition='auto', imp_dim='auto', measure=None, src_path=None, cdd=None, compressed='no', exp_concept_level='c', grid='-', imp_concept_level='c', import_metadata='yes', check_compliance='no', offset=0, ioserver='mysql_table', ncores=1, nfrag=0, nhost=0, subset_dims='none', subset_filter='all', time_filter='yes', subset_type='index', exec_mode='sync', base_time='1900-01-01 00:00:00', calendar='standard', hierarchy='oph_base', leap_month=2, leap_year=0, month_lengths='31,28,31,30,31,30,31,31,30,31,30,31', run='yes', units='d', vocabulary='CF', description='-', policy='rr', schedule=0, check_grid='no', save='yes', display=False) -> Cube or None : wrapper of the operator OPH_IMPORTNC importnc2(container='-', cwd=None, exp_dim='auto', host_partition='auto', imp_dim='auto', measure=None, src_path=None, cdd=None, compressed='no', exp_concept_level='c', grid='-', imp_concept_level='c', import_metadata='yes', check_compliance='no', offset=0, ioserver='ophidiaio_memory', ncores=1, nthreads=1, nfrag=0, nhost=0, subset_dims='none', subset_filter='all', time_filter='yes', subset_type='index', exec_mode='sync', base_time='1900-01-01 00:00:00', calendar='standard', hierarchy='oph_base', leap_month=2, leap_year=0, month_lengths='31,28,31,30,31,30,31,31,30,31,30,31', run='yes', units='d', vocabulary='CF', description='-', policy='rr', schedule=0, check_grid='no', save='yes', display=False) -> Cube or None : wrapper of the operator OPH_IMPORTNC2 instances(action='read', level=1, host_filter='all', nhost=0, host_partition='all', ioserver_filter='all', host_status='all', exec_mode='sync', objkey_filter='all', save='yes', display=True) -> dict or None : wrapper of the operator OPH_INSTANCES list(level=1, exec_mode='sync', path='-', cwd=None, container_filter='all', cube='all', host_filter='all', dbms_filter='all', measure_filter='all', ntransform='all', src_filter='all', db_filter='all', recursive='no', objkey_filter='all', save='yes', display=True) -> dict or None : wrapper of the operator OPH_LIST loggingbk(session_level=0, job_level=0, mask=000, session_filter='all', session_label_filter='all', session_creation_filter='1900-01-01 00:00:00,2100-01-01 00:00:00', workflowid_filter='all', markerid_filter='all', parent_job_filter='all', job_creation_filter='1900-01-01 00:00:00,2100-01-01 00:00:00', job_status_filter='all', submission_string_filter='all', job_start_filter='1900-01-01 00:00:00,2100-01-01 00:00:00', job_end_filter='1900-01-01 00:00:00,2100-01-01 00:00:00', nlines=100, objkey_filter='all', exec_mode='sync', save='yes', display=True) -> dict or None : wrapper of the operator OPH_LOGGINGBK log_info(log_type='server', container_id=0, ioserver='mysql', nlines=10, exec_mode='sync', objkey_filter='all', save='yes', display=True) -> dict or None : wrapper of the operator OPH_LOG_INFO man(function=None, function_type='operator', function_version='latest', exec_mode='sync', save='yes', display=True) -> dict or None : wrapper of the operator OPH_MAN manage_session(action='list', session='this', key='user', value='null', objkey_filter='all', save='yes', display=True) -> dict or None : wrapper of the operator OPH_MANAGE_SESSION mergecubes(ncores=1, exec_mode='sync', cubes=None, schedule=0, container='-', mode='i', hold_values='no', number=1, order='none', description='-', save='yes', display=False) -> Cube : wrapper of the operator OPH_MERGECUBES mergecubes2(ncores=1, exec_mode='sync', cubes=None, schedule=0, container='-', dim_type='long', number=1, order='none', description='-', dim='-', save='yes', display=False) -> Cube or None: wrapper of the operator OPH_MERGECUBES2 movecontainer(container=None, cwd=None, exec_mode='sync', save='yes', display=False) -> None : wrapper of the operator OPH_MOVECONTAINER operators(operator_filter=None, limit_filter=0, exec_mode='sync', save='yes', display=True) -> dict or None : wrapper of the operator OPH_OPERATORS_LIST primitives(dbms_filter=None, level=1, limit_filter=0, primitive_filter=None, primitive_type=None, return_type=None, exec_mode='sync', objkey_filter='all', save='yes', display=True) -> dict or None : wrapper of the operator OPH_PRIMITIVES_LIST randcube(ncores=1, exec_mode='sync', container=None, cwd=None, host_partition='auto', ioserver='mysql_table', schedule=0, algorithm='default', policy='rr', nhost=0, run='yes', nfrag=1, ntuple=1, measure=None, measure_type=None, exp_ndim=None, dim=None, concept_level='c', dim_size=None, compressed='no', grid='-', description='-', save='yes', display=False) -> Cube or None : wrapper of the operator OPH_RANDCUBE randcube2(ncores=1, nthreads=1, exec_mode='sync', container=None, cwd=None, host_partition='auto', ioserver='ophidiaio_memory', schedule=0, algorithm='default', policy='rr', nhost=0, run='yes', nfrag=1, ntuple=1, measure=None, measure_type=None, exp_ndim=None, dim=None, concept_level='c', dim_size=None, compressed='no', grid='-', description='-', save='yes', display=False) -> Cube or None : wrapper of the operator OPH_RANDCUBE2 resume(id=0, id_type='workflow', document_type='response', level=1, save='no', session='this', objkey_filter='all', user='', display=True) -> dict or None : wrapper of the operator OPH_RESUME script(script=':', args=' ', stdout='stdout', stderr='stderr', ncores=1, exec_mode='sync', list='no', space='no', python_code=False, save='yes', display=False) -> None : wrapper of the operator OPH_SCRIPT search(path='-', metadata_value_filter='all', exec_mode='sync', metadata_key_filter='all', container_filter='all', objkey_filter='all', cwd=None, recursive='no', save='yes', display=True) -> dict or None : wrapper of the operator OPH_SEARCH service(status='', level=1, enable='none', disable='none', objkey_filter='all', save='yes', display=False) -> dict or None : wrapper of the operator OPH_SERVICE showgrid(container=None, grid='all', dim='all', show_index='no', cwd=None, exec_mode='sync', objkey_filter='all', save='yes', display=True) -> dict or None : wrapper of the operator OPH_SHOWGRID tasks(cls, cube_filter='all', path='-', operator_filter='all', cwd=None, recursive='no', container='all', objkey_filter='all', exec_mode='sync', save='yes', display=True) -> dict or None : wrapper of the operator OPH_TASKS """ client = None @classmethod def setclient(cls, username="", password="", server="", port="11732", token="", read_env=False, api_mode=True, project=None): """setclient(username='', password='', server='', port='11732', token='', read_env=False, api_mode=True, project=None) -> None : Instantiate the Client, common for all Cube objects, for submitting requests :param username: Ophidia user :type username: str :param password: Ophidia password :type password: str :param server: Ophidia server address :type server: str :param port: Ophidia server port :type port: str :param token: Ophidia token :type token: str :param read_env: If true read the client variables from the environment :type read_env: bool :param api_mode: If True, use the class as an API and catch also framework-level errors :type api_mode: bool :param project: String with project ID to be used for job scheduling :type project: str :returns: None :rtype: None """ try: cls.client = _client.Client(username, password, server, port, token, read_env, api_mode, project) except Exception as e: print(get_linenumber(), "Something went wrong in setting the client:", e) finally: pass @classmethod def b2drop(cls, action="put", auth_path="-", src_path=None, dst_path="-", cdd=None, exec_mode="sync", save="yes", display=False): """b2drop(action='put', auth_path='-', src_path=None, dst_path='-', cdd=None, exec_mode='sync', save='yes', display=False) -> None : wrapper of the operator OPH_B2DROP :param action: put|get :type action: str :param auth_path: absolute path to the netrc file containing the B2DROP credentials :type auth_path: str :param src_path: path to the file to be uploaded/downloaded to/from B2DROP :type src_path: str :param dst_path: path where the file will be uploaded on B2DROP or downloaded on disk :type dst_path: str :param cdd: absolute path corresponding to the current directory on data repository :type cdd: str :param exec_mode: async or sync :type exec_mode: str :param save: option to enable/disable JSON response saving on the server-side (default is yes) :type save: str :param display: option for displaying the response in a "pretty way" using the pretty_print function (default is False) :type display: bool :returns: None :rtype: None :raises: RuntimeError """ try: if Cube.client is None or src_path is None: raise RuntimeError("Cube.client or src_path is None") query = "oph_b2drop " if action is not None: query += "action=" + str(action) + ";" if auth_path is not None: query += "auth_path=" + str(auth_path) + ";" if src_path is not None: query += "src_path=" + str(src_path) + ";" if dst_path is not None: query += "dst_path=" + str(dst_path) + ";" if cdd is not None: query += "cdd=" + str(cdd) + ";" if exec_mode is not None: query += "exec_mode=" + str(exec_mode) + ";" if save is not None: query += "save=" + str(save) + ";" if Cube.client.submit(query, display) is None: raise RuntimeError() except Exception as e: print(get_linenumber(), "Something went wrong:", e) raise RuntimeError() @classmethod def cluster(cls, action="info", nhost=1, host_partition="all", host_type="io", user_filter="all", exec_mode="sync", display=False): """cluster(action='info', nhost=1, host_partition='all', host_type='io', user_filter='all', exec_mode='sync', display=False) -> None : wrapper of the operator OPH_CLUSTER :param action: info|info_cluster|deploy|undeploy :type action: str :param nhost: number of hosts to be reserved as well as number of I/O servers to be started :type nhost: int :param host_partition: name of user-defined partition to be used :type host_partition: str :param host_type: type of partition to be deployed :type host_type: str :param user_filter: name of user to be used as filter :type user_filter: str :param exec_mode: async or sync :type exec_mode: str :param display: option for displaying the response in a "pretty way" using the pretty_print function (default is False) :type display: bool :returns: None :rtype: None :raises: RuntimeError """ try: if Cube.client is None or Cube.client.host_partition is None: raise RuntimeError("Cube.client is None") query = "oph_cluster " if action is not None: query += "action=" + str(action) + ";" if nhost is not None: query += "nhost=" + str(nhost) + ";" if host_partition is not None: query += "host_partition=" + str(host_partition) + ";" if host_type is not None: query += "host_type=" + str(host_type) + ";" if user_filter is not None: query += "user_filter=" + str(user_filter) + ";" if exec_mode is not None: query += "exec_mode=" + str(exec_mode) + ";" if Cube.client.submit(query, display) is None: raise RuntimeError() except Exception as e: print(get_linenumber(), "Something went wrong:", e) raise RuntimeError() @classmethod def containerschema(cls, container=None, cwd=None, exec_mode="sync", objkey_filter="all", save="yes", display=True): """containerschema(container=None, cwd=None, exec_mode='sync', objkey_filter='all', save='yes', display=True) -> dict or None : wrapper of the operator OPH_CONTAINERSCHEMA :param container: container name :type container: str :param cwd: current working directory :type cwd: str :param exec_mode: async or sync :type exec_mode: str :param objkey_filter: filter on the output of the operator :type objkey_filter: str :param save: option to enable/disable JSON response saving on the server-side (default is yes) :type save: str :param display: option for displaying the response in a "pretty way" using the pretty_print function (default is False) :type display: bool :returns: response or None :rtype: dict or None :raises: RuntimeError """ response = None try: if Cube.client is None or container is None or (cwd is None and Cube.client.cwd is None): raise RuntimeError("Cube.client, container or cwd is None") query = "oph_containerschema " if container is not None: query += "container=" + str(container) + ";" if cwd is not None: query += "cwd=" + str(cwd) + ";" if exec_mode is not None: query += "exec_mode=" + str(exec_mode) + ";" if objkey_filter is not None: query += "objkey_filter=" + str(objkey_filter) + ";" if save is not None: query += "save=" + str(save) + ";" if Cube.client.submit(query, display) is None: raise RuntimeError() if Cube.client.last_response is not None and display is False: response = Cube.client.deserialize_response()["response"] except Exception as e: print(get_linenumber(), "Something went wrong:", e) raise RuntimeError() else: return response @classmethod def createcontainer( cls, exec_mode="sync", container=None, cwd=None, dim=None, dim_type="double", hierarchy="oph_base", base_time="1900-01-01 00:00:00", units="d", calendar="standard", month_lengths="31,28,31,30,31,30,31,31,30,31,30,31", leap_year=0, leap_month=2, vocabulary="CF", compressed="no", description="-", save="yes", display=False, ): """createcontainer(exec_mode='sync', container=None, cwd=None, dim=None, dim_type="double", hierarchy='oph_base', base_time='1900-01-01 00:00:00', units='d', calendar='standard', month_lengths='31,28,31,30,31,30,31,31,30,31,30,31', leap_year=0, leap_month=2, vocabulary='CF', compressed='no', description='-', save='yes', display=False) -> dict or None : wrapper of the operator OPH_CREATECONTAINER :param exec_mode: async or sync :type exec_mode: str :param container: container name :type container: str :param cwd: current working directory :type cwd: str :param dim: pipe (|) separated list of dimension names :type dim: str :param dim_type: pipe (|) separated list of dimension types (int|float|long|double) :type dim_type: str :param hierarchy: pipe (|) separated list of dimension hierarchies (oph_base|oph_time) :type hierarchy: str :param base_time: reference time :type base_time: str :param units: unit of time :type units: str :param calendar: calendar used :type calendar: str :param month_lengths: comma-separated list of month lengths :type month_lengths: str :param leap_year: leap year :type leap_year: int :param leap_month: leap month :type leap_month: int :param vocabulary: metadata vocabulary :type vocabulary: str :param compressed: yes or no :type compressed: str :param description: additional description to be associated with the output container :type description: str :param save: option to enable/disable JSON response saving on the server-side (default is yes) :type save: str :param display: option for displaying the response in a "pretty way" using the pretty_print function (default is True) :type display: bool :returns: None :rtype: None :raises: RuntimeError """ try: if Cube.client is None or container is None or dim is None or dim_type is None or (cwd is None and Cube.client.cwd is None): raise RuntimeError("Cube.client, container, dim, dim_type or cwd is None") query = "oph_createcontainer " if exec_mode is not None: query += "exec_mode=" + str(exec_mode) + ";" if container is not None: query += "container=" + str(container) + ";" if cwd is not None: query += "cwd=" + str(cwd) + ";" if dim is not None: query += "dim=" + str(dim) + ";" if dim_type is not None: query += "dim_type=" + str(dim_type) + ";" if hierarchy is not None: query += "hierarchy=" + str(hierarchy) + ";" if base_time is not None: query += "base_time=" + str(base_time) + ";" if units is not None: query += "units=" + str(units) + ";" if calendar is not None: query += "calendar=" + str(calendar) + ";" if month_lengths is not None: query += "month_lengths=" + str(month_lengths) + ";" if leap_year is not None: query += "leap_year=" + str(leap_year) + ";" if leap_month is not None: query += "leap_month=" + str(leap_month) + ";" if vocabulary is not None: query += "vocabulary=" + str(vocabulary) + ";" if compressed is not None: query += "compressed=" + str(compressed) + ";" if description is not None: query += "description=" + str(description) + ";" if save is not None: query += "save=" + str(save) + ";" if Cube.client.submit(query, display) is None: raise RuntimeError() except Exception as e: print(get_linenumber(), "Something went wrong:", e) raise RuntimeError() @classmethod def deletecontainer(cls, container=None, container_pid="-", force="no", cwd=None, nthreads=1, exec_mode="sync", objkey_filter="all", save="yes", display=False): """deletecontainer(container=None, container_pid='-', force='no', cwd=None, nthreads=1, exec_mode='sync', objkey_filter='all', save='yes', display=False) -> None : wrapper of the operator OPH_DELETECONTAINER :param container: container name :type container: str :param cwd: current working directory :type cwd: str :param container_pid: PID of the input container :type container_pid: str :param force: yes or no :type force: str :param nthreads: number of threads to use :type nthreads: int :param exec_mode: async or sync :type exec_mode: str :param save: option to enable/disable JSON response saving on the server-side (default is yes) :type save: str :param display: option for displaying the response in a "pretty way" using the pretty_print function (default is False) :type display: bool :returns: None :rtype: None :raises: RuntimeError """ try: if Cube.client is None or ((container is None or (cwd is None and Cube.client.cwd is None)) and container_pid == "-"): raise RuntimeError("Cube.client, container and container_pid or cwd is None") query = "oph_deletecontainer " if container is not None: query += "container=" + str(container) + ";" if container_pid is not None: query += "container_pid=" + str(container_pid) + ";" if force is not None: query += "force=" + str(force) + ";" if cwd is not None: query += "cwd=" + str(cwd) + ";" if nthreads is not None: query += "nthreads=" + str(nthreads) + ";" if exec_mode is not None: query += "exec_mode=" + str(exec_mode) + ";" if objkey_filter is not None: query += "objkey_filter=" + str(objkey_filter) + ";" if save is not None: query += "save=" + str(save) + ";" if Cube.client.submit(query, display) is None: raise RuntimeError() except Exception as e: print(get_linenumber(), "Something went wrong:", e) raise RuntimeError() @classmethod def cancel(cls, id=None, type="kill", objkey_filter="all", display=False): """cancel(id=None, type='kill', objkey_filter='all', display=False) -> None : wrapper of the operator OPH_CANCEL :param id: identifier of the workflow to be stopped :type id: int :param type: kill|abort|stop :type type: str :param objkey_filter: filter the objkey :type objkey_filter: str :param display: option for displaying the response in a "pretty way" using the pretty_print function (default is False) :type display: bool :returns: None :rtype: None :raises: RuntimeError """ try: if Cube.client is None or id is None: raise RuntimeError("Cube.client or id is None") query = "oph_cancel " if id is not None: query += "id=" + str(id) + ";" if type is not None: query += "type=" + str(type) + ";" if objkey_filter is not None: query += "objkey_filter=" + str(objkey_filter) + ";" if Cube.client.submit(query, display) is None: raise RuntimeError() except Exception as e: print(get_linenumber(), "Something went wrong:", e) raise RuntimeError() @classmethod def service(cls, status="", level=1, enable="none", disable="none", objkey_filter="all", display=False): """service(status='', level=1, enable='none', disable='none', objkey_filter='all', display=False) -> dict or None : wrapper of the operator OPH_SERVICE :param status: up|down :type status: str :param level: 1|2 :type level: int :param enable: list of the users to be enabled ('all' to enable all users) :type enable: str :param disable: list of the users to be disabled ('all' to disable all users) :type disable: str :param objkey_filter: filter the objkey :type objkey_filter: str :param display: option for displaying the response in a "pretty way" using the pretty_print function (default is False) :type display: bool :returns: response or None :rtype: dict or None :raises: RuntimeError """ response = None try: if Cube.client is None: raise RuntimeError("Cube.client is None") query = "oph_service " if status is not None: query += "status=" + str(status) + ";" if level is not None: query += "level=" + str(level) + ";" if enable is not None: query += "enable=" + str(enable) + ";" if disable is not None: query += "disable=" + str(disable) + ";" if objkey_filter is not None: query += "objkey_filter=" + str(objkey_filter) + ";" if Cube.client.submit(query, display) is None: raise RuntimeError() if Cube.client.last_response is not None and display is False: response = Cube.client.deserialize_response()["response"] except Exception as e: print(get_linenumber(), "Something went wrong:", e) raise RuntimeError() else: return response @classmethod def get_config(cls, key="all", objkey_filter="all", display=True): """get_config(key='all', objkey_filter='all', display=True) -> dict or None : wrapper of the operator OPH_GET_CONFIG :param key: all|OPH_XML_URL|OPH_SESSION_ID|OPH_EXEC_MODE|OPH_NCORES|OPH_DATACUBE|OPH_CWD|OPH_CDD|OPH_BASE_SRC_PATH :type key: str :param objkey_filter: filter the objkey :type objkey_filter: str :param display: option for displaying the response in a "pretty way" using the pretty_print function (default is True) :type display: bool :returns: response or None :rtype: dict or None :raises: RuntimeError """ response = None try: if Cube.client is None: raise RuntimeError("Cube.client is None") query = "oph_get_config " if key is not None: query += "key=" + str(key) + ";" if objkey_filter is not None: query += "objkey_filter=" + str(objkey_filter) + ";" if Cube.client.submit(query, display) is None: raise RuntimeError() if Cube.client.last_response is not None and display is False: response = Cube.client.deserialize_response()["response"] except Exception as e: print(get_linenumber(), "Something went wrong:", e) raise RuntimeError() else: return response @classmethod def manage_session(cls, action="list", session="this", key="user", value="null", objkey_filter="all", save="yes", display=True): """manage_session(action='list', session='this', key='user', value='null', objkey_filter='all', save='yes', display=True) -> dict or None : wrapper of the operator OPH_MANAGE_SESSION :param action: disable|enable|env|grant|list|listusers|new|remove|revoke|setenv :type action: str :param session: link to intended session :type session: str :param key: active|autoremove|label|user :type key: str :param value: value of the key :type value: str :param objkey_filter: filter the objkey :type objkey_filter: str :param save: option to enable/disable JSON response saving on the server-side (default is yes) :type save: str :param display: option for displaying the response in a "pretty way" using the pretty_print function (default is False) :type display: bool :returns: response or None :rtype: dict or None :raises: RuntimeError """ response = None try: if Cube.client is None: raise RuntimeError("Cube.client or action is None") query = "oph_manage_session " if action is not None: query += "action=" + str(action) + ";" if session is not None: query += "session=" + str(session) + ";" if key is not None: query += "key=" + str(key) + ";" if value is not None: query += "value=" + str(value) + ";" if objkey_filter is not None: query += "objkey_filter=" + str(objkey_filter) + ";" if save is not None: query += "save=" + str(save) + ";" if Cube.client.submit(query, display) is None: raise RuntimeError() if Cube.client.last_response is not None and display is False: response = Cube.client.deserialize_response()["response"] except Exception as e: print(get_linenumber(), "Something went wrong:", e) raise RuntimeError() else: return response @classmethod def instances( cls, action="read", level=1, host_filter="all", nhost=0, host_partition="all", ioserver_filter="all", host_status="all", exec_mode="sync", objkey_filter="all", save="yes", display=True ): """instances(level=1, action='read', level=1, host_filter='all', nhost=0, host_partition='all', ioserver_filter='all', host_status='all', exec_mode='sync', objkey_filter='all', save='yes', display=True) -> dict or None : wrapper of the operator OPH_INSTANCES :param action: read|add|remove :type action: str :param level: 1|2|3 :type level: int :param host_filter: optional filter on host name :type host_filter: str :param nhost: number of hosts to be grouped in the user-defined partition (add or remove mode) :type nhost: int :param host_partition: optional filter on host partition name :type host_partition: str :param ioserver_filter: mysql_table|ophidiaio_memory|all :type ioserver_filter: str :param host_status: up|down|all :type host_status: str :param exec_mode: async or sync :type exec_mode: str :param objkey_filter: filter the objkey :type objkey_filter: str :param save: option to enable/disable JSON response saving on the server-side (default is yes) :type save: str :param display: option for displaying the response in a "pretty way" using the pretty_print function (default is True) :type display: bool :returns: response or None :rtype: dict or None :raises: RuntimeError """ response = None try: if Cube.client is None: raise RuntimeError("Cube.client is None") query = "oph_instances " if action is not None: query += "action=" + str(action) + ";" if level is not None: query += "level=" + str(level) + ";" if host_filter is not None: query += "host_filter=" + str(host_filter) + ";" if nhost is not None: query += "nhost=" + str(nhost) + ";" if host_partition is not None: query += "host_partition=" + str(host_partition) + ";" if ioserver_filter is not None: query += "ioserver_filter=" + str(ioserver_filter) + ";" if host_status is not None: query += "host_status=" + str(host_status) + ";" if exec_mode is not None: query += "exec_mode=" + str(exec_mode) + ";" if objkey_filter is not None: query += "objkey_filter=" + str(objkey_filter) + ";" if save is not None: query += "save=" + str(save) + ";" if Cube.client.submit(query, display) is None: raise RuntimeError() if Cube.client.last_response is not None and display is False: response = Cube.client.deserialize_response()["response"] except Exception as e: print(get_linenumber(), "Something went wrong:", e) raise RuntimeError() else: return response @classmethod def log_info(cls, log_type="server", container_id=0, ioserver="mysql", nlines=10, exec_mode="sync", objkey_filter="all", save="yes", display=True): """log_info(log_type='server', container_id=0, ioserver='mysql', nlines=10, exec_mode='sync', objkey_filter='all', save='yes', display=True) -> dict or None : wrapper of the operator OPH_LOG_INFO :param log_type: server|container|ioserver :type log_type: str :param container_id: id of the container related to the requested log :type container_id: int :param ioserver: mysql|ophidiaio :type ioserver: str :param nlines: maximum number of lines to be displayed :type nlines: int :param objkey_filter: filter the objkey :type objkey_filter: str :param exec_mode: async or sync :type exec_mode: str :param save: option to enable/disable JSON response saving on the server-side (default is yes) :type save: str :param display: option for displaying the response in a "pretty way" using the pretty_print function (default is True) :type display: bool :returns: response or None :rtype: dict or None :raises: RuntimeError """ response = None try: if Cube.client is None: raise RuntimeError("Cube.client is None") query = "oph_log_info " if log_type is not None: query += "log_type=" + str(log_type) + ";" if container_id is not None: query += "container_id=" + str(container_id) + ";" if ioserver is not None: query += "ioserver=" + str(ioserver) + ";" if nlines is not None: query += "nlines=" + str(nlines) + ";" if exec_mode is not None: query += "exec_mode=" + str(exec_mode) + ";" if objkey_filter is not None: query += "objkey_filter=" + str(objkey_filter) + ";" if save is not None: query += "save=" + str(save) + ";" if Cube.client.submit(query, display) is None: raise RuntimeError() if Cube.client.last_response is not None and display is False: response = Cube.client.deserialize_response()["response"] except Exception as e: print(get_linenumber(), "Something went wrong:", e) raise RuntimeError() else: return response @classmethod def loggingbk( cls, session_level=0, job_level=0, mask=000, session_filter="all", session_label_filter="all", session_creation_filter="1900-01-01 00:00:00,2100-01-01 00:00:00", workflowid_filter="all", markerid_filter="all", parent_job_filter="all", job_creation_filter="1900-01-01 00:00:00,2100-01-01 00:00:00", job_status_filter="all", submission_string_filter="all", job_start_filter="1900-01-01 00:00:00,2100-01-01 00:00:00", job_end_filter="1900-01-01 00:00:00,2100-01-01 00:00:00", nlines=100, objkey_filter="all", exec_mode="sync", save="yes", display=True, ): """loggingbk(session_level=0, job_level=0, mask=000, session_filter='all', session_label_filter='all', session_creation_filter='1900-01-01 00:00:00,2100-01-01 00:00:00', workflowid_filter='all', markerid_filter='all', parent_job_filter='all', job_creation_filter='1900-01-01 00:00:00,2100-01-01 00:00:00', job_status_filter='all', submission_string_filter='all', job_start_filter='1900-01-01 00:00:00,2100-01-01 00:00:00', job_end_filter='1900-01-01 00:00:00,2100-01-01 00:00:00', nlines=100, objkey_filter='all', exec_mode='sync', save='yes', display=True) -> dict or None : wrapper of the operator OPH_LOGGINGBK :param session_level: 0|1 :type session_level: int :param job_level: 0|1|2 :type job_level: int :param mask: 3-digit mask for job output :type mask: str :param session_filter: filter on a particular sessionID :type session_filter: str :param session_label_filter: filter on a particular session label :type session_label_filter: str :param session_creation_filter: filter on session creation date :type session_creation_filter: str :param workflowid_filter: filter on a particular workflow ID :type workflowid_filter: str :param markerid_filter: filter on a particular marker ID :type markerid_filter: str :param parent_job_filter: filter on a particular parent job ID :type parent_job_filter: str :param job_creation_filter: filter on job submission date as with session_creation_filter :type job_creation_filter: str :param job_status_filter: filter on job status :type job_status_filter: str :param submission_string_filter: filter on submission string :type submission_string_filter: str :param job_start_filter: filter on job start date as with session_creation_filter :type job_start_filter: str :param job_end_filter: filter on job end date as with session_creation_filter :type job_end_filter: str :param nlines: maximum number of lines to be displayed :type nlines: int :param exec_mode: async or sync :type exec_mode: str :param save: option to enable/disable JSON response saving on the server-side (default is yes) :type save: str :param display: option for displaying the response in a "pretty way" using the pretty_print function (default is True) :type display: bool :returns: response or None :rtype: dict or None :raises: RuntimeError """ response = None try: if Cube.client is None: raise RuntimeError("Cube.client is None") query = "oph_loggingbk " if session_level is not None: query += "session_level=" + str(session_level) + ";" if job_level is not None: query += "job_level=" + str(job_level) + ";" if mask is not None: query += "mask=" + str(mask) + ";" if nlines is not None: query += "nlines=" + str(nlines) + ";" if session_filter is not None: query += "session_filter=" + str(session_filter) + ";" if session_label_filter is not None: query += "session_label_filter=" + str(session_label_filter) + ";" if session_creation_filter is not None: query += "session_creation_filter=" + str(session_creation_filter) + ";" if workflowid_filter is not None: query += "workflowid_filter=" + str(workflowid_filter) + ";" if markerid_filter is not None: query += "markerid_filter=" + str(markerid_filter) + ";" if parent_job_filter is not None: query += "parent_job_filter=" + str(parent_job_filter) + ";" if job_creation_filter is not None: query += "job_creation_filter=" + str(job_creation_filter) + ";" if job_status_filter is not None: query += "job_status_filter=" + str(job_status_filter) + ";" if submission_string_filter is not None: query += "submission_string_filter=" + str(submission_string_filter) + ";" if job_start_filter is not None: query += "job_start_filter=" + str(job_start_filter) + ";" if job_end_filter is not None: query += "job_end_filter=" + str(job_end_filter) + ";" if objkey_filter is not None: query += "objkey_filter=" + str(objkey_filter) + ";" if exec_mode is not None: query += "exec_mode=" + str(exec_mode) + ";" if save is not None: query += "save=" + str(save) + ";" if Cube.client.submit(query, display) is None: raise RuntimeError() if Cube.client.last_response is not None and display is False: response = Cube.client.deserialize_response()["response"] except Exception as e: print(get_linenumber(), "Something went wrong:", e) raise RuntimeError() else: return response @classmethod def folder(cls, command=None, path="-", cwd=None, exec_mode="sync", objkey_filter="all", save="yes", display=False): """folder(command=None, cwd=None, path=None, exec_mode='sync', save='yes', display=False) -> None : wrapper of the operator OPH_FOLDER :param command: cd|mkdir|mv|rm :type command: str :param cwd: current working directory :type cwd: str :param path: absolute or relative path :type path: str :param exec_mode: async or sync :type exec_mode: str :param save: option to enable/disable JSON response saving on the server-side (default is yes) :type save: str :param display: option for displaying the response in a "pretty way" using the pretty_print function (default is False) :type display: bool :returns: None :rtype: None :raises: RuntimeError """ try: if Cube.client is None or command is None or (cwd is None and Cube.client.cwd is None): raise RuntimeError("Cube.client, command or cwd is None") query = "oph_folder " if command is not None: query += "command=" + str(command) + ";" if path is not None: query += "path=" + str(path) + ";" if cwd is not None: query += "cwd=" + str(cwd) + ";" if exec_mode is not None: query += "exec_mode=" + str(exec_mode) + ";" if objkey_filter is not None: query += "objkey_filter=" + str(objkey_filter) + ";" if save is not None: query += "save=" + str(save) + ";" if Cube.client.submit(query, display) is None: raise RuntimeError() except Exception as e: print(get_linenumber(), "Something went wrong:", e) raise RuntimeError() @classmethod def fs(cls, command="ls", dpath="-", file="-", cdd=None, recursive="no", depth=0, realpath="no", exec_mode="sync", objkey_filter="all", save="yes", display=False): """fs(command='ls', dpath='-', file='-', cdd=None, recursive='no', depth=0, realpath='no', exec_mode='sync', objkey_filter='all', save='yes', display=False) -> None : wrapper of the operator OPH_FS :param command: ls|cd|mkdir|rm|mv :type command: str :param dpath: paths needed by commands :type dpath: str :param file: file filter :type file: str :param cdd: absolute path corresponding to the current directory on data repository :type cdd: str :param recursive: if search is done recursively or not :type recursive: str :param depth: maximum folder depth to be explored in case of recursion :type depth: int :param realpath: yes|no :type realpath: str :param exec_mode: async or sync :type exec_mode: str :param save: option to enable/disable JSON response saving on the server-side (default is yes) :type save: str :param display: option for displaying the response in a "pretty way" using the pretty_print function (default is False) :type display: bool :returns: None :rtype: None :raises: RuntimeError """ try: if Cube.client is None: raise RuntimeError("Cube.client, is None") query = "oph_fs " if command is not None: query += "command=" + str(command) + ";" if dpath is not None: query += "dpath=" + str(dpath) + ";" if file is not None: query += "file=" + str(file) + ";" if cdd is not None: query += "cdd=" + str(cdd) + ";" if recursive is not None: query += "recursive=" + str(recursive) + ";" if depth is not None: query += "depth=" + str(depth) + ";" if realpath is not None: query += "realpath=" + str(realpath) + ";" if exec_mode is not None: query += "exec_mode=" + str(exec_mode) + ";" if objkey_filter is not None: query += "objkey_filter=" + str(objkey_filter) + ";" if save is not None: query += "save=" + str(save) + ";" if Cube.client.submit(query, display) is None: raise RuntimeError() except Exception as e: print(get_linenumber(), "Something went wrong:", e) raise RuntimeError() @classmethod def tasks(cls, cube_filter="all", operator_filter="all", path="-", cwd=None, recursive="no", container="all", exec_mode="sync", objkey_filter="all", save="yes", display=True): """tasks(cls, cube_filter='all', path='-', operator_filter='all', cwd=None, recursive='no', container='all', objkey_filter='all', exec_mode='sync', save='yes', display=True) -> dict or None : wrapper of the operator OPH_tasks :param cube_filter: optional filter on cube :type cube_filter: str :param operator_filter: optional filter on the name of the operators :type operator_filter: str :param path: optional filter on absolute or relative path :type path: str :param cwd: current working directory :type cwd: str :param recursive: if the search is done recursively or not :type recursive: yes|no :param container: optional filter on container name :type container: str :param exec_mode: async or sync :type exec_mode: str :param save: option to enable/disable JSON response saving on the server-side (default is yes) :type save: str :param display: option for displaying the response in a "pretty way" using the pretty_print function (default is True) :type display: bool :returns: response or None :rtype: dict or None :raises: RuntimeError """ response = None try: if Cube.client is None: raise RuntimeError("Cube.client is None") query = "oph_tasks " if cube_filter is not None: query += "cube_filter=" + str(cube_filter) + ";" if operator_filter is not None: query += "operator_filter=" + str(operator_filter) + ";" if path is not None: query += "path=" + str(path) + ";" if cwd is not None: query += "cwd=" + str(cwd) + ";" if recursive is not None: query += "recursive=" + str(recursive) + ";" if container is not None: query += "container=" + str(container) + ";" if exec_mode is not None: query += "exec_mode=" + str(exec_mode) + ";" if objkey_filter is not None: query += "objkey_filter=" + str(objkey_filter) + ";" if save is not None: query += "save=" + str(save) + ";" if Cube.client.submit(query, display) is None: raise RuntimeError() if Cube.client.last_response is not None and display is False: response = Cube.client.deserialize_response()["response"] except Exception as e: print(get_linenumber(), "Something went wrong:", e) raise RuntimeError() else: return response @classmethod def showgrid(cls, container=None, grid="all", dim="all", show_index="no", cwd=None, exec_mode="sync", objkey_filter="all", save="yes", display=True): """showgrid(container=None, grid='all', dim='all', show_index='no', cwd=None, exec_mode='sync', objkey_filter='all', save='yes', display=True) -> dict or None : wrapper of the operator OPH_SHOWGRID :param container: name of the input container :type container: str :param grid: name of grid to show :type grid: str :param dim: name of dimension to show :type dim: str :param show_index: yes|no :type show_index: str :param cwd: current working directory :type cwd: str :param exec_mode: async or sync :type exec_mode: str :param save: option to enable/disable JSON response saving on the server-side (default is yes) :type save: str :param display: option for displaying the response in a "pretty way" using the pretty_print function (default is True) :type display: bool :returns: response or None :rtype: dict or None :raises: RuntimeError """ response = None try: if Cube.client is None or container is None or (cwd is None and Cube.client.cwd is None): raise RuntimeError("Cube.client, container or cwd is None") query = "oph_showgrid " if container is not None: query += "container=" + str(container) + ";" if grid is not None: query += "grid=" + str(grid) + ";" if dim is not None: query += "dim=" + str(dim) + ";" if show_index is not None: query += "show_index=" + str(show_index) + ";" if cwd is not None: query += "cwd=" + str(cwd) + ";" if exec_mode is not None: query += "exec_mode=" + str(exec_mode) + ";" if objkey_filter is not None: query += "objkey_filter=" + str(objkey_filter) + ";" if save is not None: query += "save=" + str(save) + ";" if Cube.client.submit(query, display) is None: raise RuntimeError() if Cube.client.last_response is not None and display is False: response = Cube.client.deserialize_response()["response"] except Exception as e: print(get_linenumber(), "Something went wrong:", e) raise RuntimeError() else: return response @classmethod def search( cls, container_filter="all", metadata_key_filter="all", metadata_value_filter="all", path="-", cwd=None, recursive="no", exec_mode="sync", objkey_filter="all", save="yes", display=True ): """search(path='-', metadata_value_filter='all', exec_mode='sync', metadata_key_filter='all', container_filter='all', objkey_filter='all', cwd=None, recursive='no', save='yes', display=True) -> dict or None : wrapper of the operator OPH_SEARCH :param container_filter: filter on container name :type container_filter: str :param metadata_key_filter: name of the key (or the enumeration of keys) identifying requested metadata :type metadata_key_filter: str :param metadata_value_filter: value of the key (or the enumeration of keys) identifying requested metadata :type metadata_value_filter: str :param path: absolute/relative path used as the starting point of the recursive search :type path: str :param cwd: current working directory :type cwd: str :param recursive: if the search is done recursively or not :type recursive: yes|no :param exec_mode: async or sync :type exec_mode: str :param save: option to enable/disable JSON response saving on the server-side (default is yes) :type save: str :param display: option for displaying the response in a "pretty way" using the pretty_print function (default is True) :type display: bool :returns: response or None :rtype: dict or None :raises: RuntimeError """ response = None try: if Cube.client is None or (cwd is None and Cube.client.cwd is None): raise RuntimeError("Cube.client or cwd is None") query = "oph_search " if container_filter is not None: query += "container_filter=" + str(container_filter) + ";" if metadata_key_filter is not None: query += "metadata_key_filter=" + str(metadata_key_filter) + ";" if metadata_value_filter is not None: query += "metadata_value_filter=" + str(metadata_value_filter) + ";" if path is not None: query += "path=" + str(path) + ";" if cwd is not None: query += "cwd=" + str(cwd) + ";" if recursive is not None: query += "recursive=" + str(recursive) + ";" if exec_mode is not None: query += "exec_mode=" + str(exec_mode) + ";" if objkey_filter is not None: query += "objkey_filter=" + str(objkey_filter) + ";" if save is not None: query += "save=" + str(save) + ";" if Cube.client.submit(query, display) is None: raise RuntimeError() if Cube.client.last_response is not None and display is False: response = Cube.client.deserialize_response()["response"] except Exception as e: print(get_linenumber(), "Something went wrong:", e) raise RuntimeError() else: return response @classmethod def hierarchy(cls, hierarchy="all", hierarchy_version="latest", exec_mode="sync", objkey_filter="all", save="yes", display=True): """hierarchy(hierarchy='all', hierarchy_version='latest', exec_mode='sync', objkey_filter='all', save='yes', display=True) -> dict or None : wrapper of the operator OPH_HIERARCHY :param hierarchy: name of the requested hierarchy :type hierarchy: str :param hierarchy_version: version of the requested hierarchy :type hierarchy_version: str :param exec_mode: async or sync :type exec_mode: str :param save: option to enable/disable JSON response saving on the server-side (default is yes) :type save: str :param display: option for displaying the response in a "pretty way" using the pretty_print function (default is True) :type display: bool :returns: response or None :rtype: dict or None :raises: RuntimeError """ response = None try: if Cube.client is None: raise RuntimeError("Cube.client is None") query = "oph_hierarchy " if hierarchy is not None: query += "hierarchy=" + str(hierarchy) + ";" if hierarchy_version is not None: query += "hierarchy_version=" + str(hierarchy_version) + ";" if exec_mode is not None: query += "exec_mode=" + str(exec_mode) + ";" if objkey_filter is not None: query += "objkey_filter=" + str(objkey_filter) + ";" if save is not None: query += "save=" + str(save) + ";" if Cube.client.submit(query, display) is None: raise RuntimeError() if Cube.client.last_response is not None and display is False: response = Cube.client.deserialize_response()["response"] except Exception as e: print(get_linenumber(), "Something went wrong:", e) raise RuntimeError() else: return response @classmethod def list( cls, level=1, exec_mode="sync", path="-", cwd=None, container_filter="all", cube="all", host_filter="all", dbms_filter="all", measure_filter="all", ntransform="all", src_filter="all", db_filter="all", recursive="no", objkey_filter="all", save="yes", display=True, ): """list(level=1, exec_mode='sync', path='-', cwd=None, container_filter='all', cube='all', host_filter='all', dbms_filter='all', measure_filter='all', ntransform='all', src_filter='all', db_filter='all', recursive='no', objkey_filter='all', save='yes', display=True) -> dict or None : wrapper of the operator OPH_LIST :param level: 0|1|2|3|4|5|6|7|8 :type level: int :param path: absolute or relative path :type path: str :param container_filter: filter on container name :type container_filter: str :param cube: filter on cube :type cube: str :param host_filter: filter on host :type host_filter: str :param dbms_filter: filter on DBMS :type dbms_filter: str :param db_filter: filter on db :type db_filter: str :param measure_filter: filter on measure :type measure_filter: str :param ntransform: filter on cube level :type ntransform: int :param src_filter: filter on source file :type src_filter: str :param recursive: yes|no :type recursive: str :param cwd: current working directory :type cwd: str :param exec_mode: async or sync :type exec_mode: str :param save: option to enable/disable JSON response saving on the server-side (default is yes) :type save: str :param display: option for displaying the response in a "pretty way" using the pretty_print function (default is True) :type display: bool :returns: response or None :rtype: dict or None :raises: RuntimeError """ response = None try: if Cube.client is None or (cwd is None and Cube.client.cwd is None): raise RuntimeError("Cube.client or cwd is None") query = "oph_list " if level is not None: query += "level=" + str(level) + ";" if exec_mode is not None: query += "exec_mode=" + str(exec_mode) + ";" if path is not None: query += "path=" + str(path) + ";" if cwd is not None: query += "cwd=" + str(cwd) + ";" if container_filter is not None: query += "container_filter=" + str(container_filter) + ";" if cube is not None: query += "cube=" + str(cube) + ";" if host_filter is not None: query += "host_filter=" + str(host_filter) + ";" if dbms_filter is not None: query += "dbms_filter=" + str(dbms_filter) + ";" if measure_filter is not None: query += "measure_filter=" + str(measure_filter) + ";" if ntransform is not None: query += "ntransform=" + str(ntransform) + ";" if src_filter is not None: query += "src_filter=" + str(src_filter) + ";" if db_filter is not None: query += "db_filter=" + str(db_filter) + ";" if recursive is not None: query += "recursive=" + str(recursive) + ";" if objkey_filter is not None: query += "objkey_filter=" + str(objkey_filter) + ";" if save is not None: query += "save=" + str(save) + ";" if Cube.client.submit(query, display) is None: raise RuntimeError() if Cube.client.last_response is not None and display is False: response = Cube.client.deserialize_response()["response"] except Exception as e: print(get_linenumber(), "Something went wrong:", e) raise RuntimeError() else: return response @classmethod def randcube( cls, ncores=1, exec_mode="sync", container=None, cwd=None, host_partition="auto", ioserver="mysql_table", schedule=0, algorithm="default", policy="rr", nhost=0, run="yes", nfrag=1, ntuple=1, measure=None, measure_type=None, exp_ndim=None, dim=None, concept_level="c", dim_size=None, compressed="no", grid="-", description="-", save="yes", display=False, ): """randcube(ncores=1, exec_mode='sync', container=None, cwd=None, host_partition='auto', ioserver='mysql_table', schedule=0, algorithm='default', policy='rr', nhost=0, run='yes', nfrag=1, ntuple=1, measure=None, measure_type=None, exp_ndim=None, dim=None, concept_level='c', dim_size=None, compressed='no', grid='-', description='-', save='yes', display=False) -> Cube or None : wrapper of the operator OPH_RANDCUBE :param ncores: number of cores to use :type ncores: int :param exec_mode: async or sync :type exec_mode: str :param container: container name :type container: str :param cwd: current working directory :type cwd: str :param host_partition: host partition name :type host_partition: str :param algorithm: default|temperatures :type algorithm: str :param policy: rule to select how data are distribuited over hosts (rr|port) :type policy: str :param ioserver: mysql_table|ophdiaio_memory :type ioserver: str :param schedule: 0 :type schedule: int :param nhost: number of hosts to use :type nhost: int :param run: yes|no :type run: str :param nfrag: number of fragments/db to use :type nfrag: int :param ntuple: number of tuples/fragment to use :type ntuple: int :param measure: measure to be imported :type measure: str :param measure_type: double|float|int|long|short|byte :type measure_type: str :param exp_ndim: number of explicit dimensions in dim :type exp_ndim: int :param dim: pipe (|) separated list of dimension names :type dim: str :param concept_level: pipe (|) separated list of dimensions hierarchy levels :type concept_level: str :param dim_size: pipe (|) separated list of dimension sizes :type dim_size: str :param compressed: yes|no :type compressed: str :param grid: optionally group dimensions in a grid :type grid: str :param description: additional description to be associated with the output cube :type description: str :param save: option to enable/disable JSON response saving on the server-side (default is yes) :type save: str :param display: option for displaying the response in a "pretty way" using the pretty_print function (default is False) :type display: bool :returns: obj or None :rtype: Cube or None :raises: RuntimeError """ if ( Cube.client is None or (cwd is None and Cube.client.cwd is None) or container is None or nfrag is None or ntuple is None or measure is None or measure_type is None or exp_ndim is None or dim is None or dim_size is None ): raise RuntimeError("Cube.client, cwd, container, nfrag, ntuple, measure, measure_type, exp_ndim, dim or dim_size is None") newcube = None query = "oph_randcube " if ncores is not None: query += "ncores=" + str(ncores) + ";" if exec_mode is not None: query += "exec_mode=" + str(exec_mode) + ";" if container is not None: query += "container=" + str(container) + ";" if cwd is not None: query += "cwd=" + str(cwd) + ";" if host_partition is not None: query += "host_partition=" + str(host_partition) + ";" if algorithm is not None: query += "algorithm=" + str(algorithm) + ";" if policy is not None: query += "policy=" + str(policy) + ";" if ioserver is not None: query += "ioserver=" + str(ioserver) + ";" if schedule is not None: query += "schedule=" + str(schedule) + ";" if nhost is not None: query += "nhost=" + str(nhost) + ";" if run is not None: query += "run=" + str(run) + ";" if nfrag is not None: query += "nfrag=" + str(nfrag) + ";" if ntuple is not None: query += "ntuple=" + str(ntuple) + ";" if measure is not None: query += "measure=" + str(measure) + ";" if measure_type is not None: query += "measure_type=" + str(measure_type) + ";" if exp_ndim is not None: query += "exp_ndim=" + str(exp_ndim) + ";" if dim is not None: query += "dim=" + str(dim) + ";" if concept_level is not None: query += "concept_level=" + str(concept_level) + ";" if dim_size is not None: query += "dim_size=" + str(dim_size) + ";" if compressed is not None: query += "compressed=" + str(compressed) + ";" if grid is not None: query += "grid=" + str(grid) + ";" if description is not None: query += "description=" + str(description) + ";" if save is not None: query += "save=" + str(save) + ";" try: if Cube.client.submit(query, display) is None: raise RuntimeError() if Cube.client.last_response is not None: if Cube.client.cube: newcube = Cube(pid=Cube.client.cube) except Exception as e: print(get_linenumber(), "Something went wrong:", e) raise RuntimeError() else: return newcube @classmethod def randcube2( cls, ncores=1, nthreads=1, exec_mode="sync", container=None, cwd=None, host_partition="auto", ioserver="ophidiaio_memory", schedule=0, algorithm="default", policy="rr", nhost=0, run="yes", nfrag=1, ntuple=1, measure=None, measure_type=None, exp_ndim=None, dim=None, concept_level="c", dim_size=None, compressed="no", grid="-", description="-", save="yes", display=False, ): """randcube2(ncores=1, nthreads=1, exec_mode='sync', container=None, cwd=None, host_partition='auto', ioserver='ophidiaio_memory', schedule=0, algorithm='default', policy='rr', nhost=0, run='yes', nfrag=1, ntuple=1, measure=None, measure_type=None, exp_ndim=None, dim=None, concept_level='c', dim_size=None, compressed='no', grid='-', description='-', save='yes', display=False) -> Cube or None : wrapper of the operator OPH_RANDCUBE2 :param ncores: number of cores to use :type ncores: int :param nthreads: number of threads to use :type nthreads: int :param exec_mode: async or sync :type exec_mode: str :param container: container name :type container: str :param cwd: current working directory :type cwd: str :param host_partition: host partition name :type host_partition: str :param algorithm: default|temperatures :type algorithm: str :param policy: rule to select how data are distribuited over hosts (rr|port) :type policy: str :param ioserver: ophdiaio_memory :type ioserver: str :param schedule: 0 :type schedule: int :param nhost: number of hosts to use :type nhost: int :param run: yes|no :type run: str :param nfrag: number of fragments/db to use :type nfrag: int :param ntuple: number of tuples/fragment to use :type ntuple: int :param measure: measure to be imported :type measure: str :param measure_type: double|float|int|long|short|byte :type measure_type: str :param exp_ndim: number of explicit dimensions in dim :type exp_ndim: int :param dim: pipe (|) separated list of dimension names :type dim: str :param concept_level: pipe (|) separated list of dimensions hierarchy levels :type concept_level: str :param dim_size: pipe (|) separated list of dimension sizes :type dim_size: str :param compressed: yes|no :type compressed: str :param grid: optionally group dimensions in a grid :type grid: str :param description: additional description to be associated with the output cube :type description: str :param save: option to enable/disable JSON response saving on the server-side (default is yes) :type save: str :param display: option for displaying the response in a "pretty way" using the pretty_print function (default is False) :type display: bool :returns: obj or None :rtype: Cube or None :raises: RuntimeError """ if ( Cube.client is None or (cwd is None and Cube.client.cwd is None) or container is None or nfrag is None or ntuple is None or measure is None or measure_type is None or exp_ndim is None or dim is None or dim_size is None ): raise RuntimeError("Cube.client, cwd, container, nfrag, ntuple, measure, measure_type, exp_ndim, dim or dim_size is None") newcube = None query = "oph_randcube2 " if ncores is not None: query += "ncores=" + str(ncores) + ";" if nthreads is not None: query += "nthreads=" + str(nthreads) + ";" if exec_mode is not None: query += "exec_mode=" + str(exec_mode) + ";" if container is not None: query += "container=" + str(container) + ";" if cwd is not None: query += "cwd=" + str(cwd) + ";" if host_partition is not None: query += "host_partition=" + str(host_partition) + ";" if algorithm is not None: query += "algorithm=" + str(algorithm) + ";" if policy is not None: query += "policy=" + str(policy) + ";" if ioserver is not None: query += "ioserver=" + str(ioserver) + ";" if schedule is not None: query += "schedule=" + str(schedule) + ";" if nhost is not None: query += "nhost=" + str(nhost) + ";" if run is not None: query += "run=" + str(run) + ";" if nfrag is not None: query += "nfrag=" + str(nfrag) + ";" if ntuple is not None: query += "ntuple=" + str(ntuple) + ";" if measure is not None: query += "measure=" + str(measure) + ";" if measure_type is not None: query += "measure_type=" + str(measure_type) + ";" if exp_ndim is not None: query += "exp_ndim=" + str(exp_ndim) + ";" if dim is not None: query += "dim=" + str(dim) + ";" if concept_level is not None: query += "concept_level=" + str(concept_level) + ";" if dim_size is not None: query += "dim_size=" + str(dim_size) + ";" if compressed is not None: query += "compressed=" + str(compressed) + ";" if grid is not None: query += "grid=" + str(grid) + ";" if description is not None: query += "description=" + str(description) + ";" if save is not None: query += "save=" + str(save) + ";" try: if Cube.client.submit(query, display) is None: raise RuntimeError() if Cube.client.last_response is not None: if Cube.client.cube: newcube = Cube(pid=Cube.client.cube) except Exception as e: print(get_linenumber(), "Something went wrong:", e) raise RuntimeError() else: return newcube @classmethod def explorenc( cls, exec_mode="sync", schedule=0, measure="-", src_path=None, cdd=None, exp_dim="-", imp_dim="-", subset_dims="none", subset_type="index", subset_filter="all", limit_filter=100, show_index="no", show_id="no", show_time="no", show_stats="00000000000000", show_fit="no", level=0, imp_num_point=0, offset=50, operation="avg", wavelet="no", wavelet_ratio=0, wavelet_coeff="no", objkey_filter="all", save="yes", display=True, ): """explorenc(exec_mode='sync', schedule=0, measure='-', src_path=None, cdd=None, exp_dim='-', imp_dim='-', subset_dims='none', subset_type='index', subset_filter='all', limit_filter=100, show_index='no', show_id='no', show_time='no', show_stats='00000000000000', show_fit='no', level=0, imp_num_point=0, offset=50, operation='avg', wavelet='no', wavelet_ratio=0, wavelet_coeff='no', objkey_filter='all', save='yes', display=True) -> None : wrapper of the operator OPH_EXPLORENC :param exec_mode: async or sync :type exec_mode: str :param schedule: 0 :type schedule: int :param measure: name of the measure related to the NetCDF file :type measure: str :param src_path: path of file to be imported :type src_path: str :param cdd: absolute path corresponding to the current directory on data repository :type cdd: str :param exp_dim: pipe (|) separated list of explicit dimension names :type exp_dim: str :param imp_dim: pipe (|) separated list of implicit dimension names :type imp_dim: str :param subset_dims: pipe (|) separated list of dimensions on which to apply the subsetting :type subset_dims: str :param subset_filter: pipe (|) separated list of filters, one per dimension, composed of comma-separated microfilters (e.g. 1,5,10:2:50) :type subset_filter: str :param subset_type: index|coord :type subset_type: str :param limit_filter: max number of lines :type limit_filter: int :param show_index: yes|no :type show_index: str :param show_id: yes|no :type show_id: str :param show_time: yes|no :type show_time: str :param show_stats: (15-bit) mask to set statistics to be computed for each time serie :type show_stats: str :param show_fit: yes|no :type show_fit: str :param level: 0|1|2 :type level: int :param imp_num_point: number of points which measure values must be distribuited along by interpolation :type imp_num_point: int :param offset: relative offset to be used to set reduction interval bounds (percentage) :type offset: float :param operation: max|min|avg|sum :type operation: str :param wavelet: yes|no|only :type wavelet: str :param wavelet_ratio: fraction of wavelet transform coefficients that are cleared by the filter (percentage) :type wavelet_ratio: float :param wavelet_coeff: yes|no :type wavelet_coeff: str :param save: option to enable/disable JSON response saving on the server-side (default is yes) :type save: str :param display: option for displaying the response in a "pretty way" using the pretty_print function (default is False) :type display: bool :returns: response or None :rtype: dict or None :raises: RuntimeError """ response = None try: if Cube.client is None or src_path is None: raise RuntimeError("Cube.client or src_path") query = "oph_explorenc " if exec_mode is not None: query += "exec_mode=" + str(exec_mode) + ";" if schedule is not None: query += "schedule=" + str(schedule) + ";" if measure is not None: query += "measure=" + str(measure) + ";" if src_path is not None: query += "src_path=" + str(src_path) + ";" if cdd is not None: query += "cdd=" + str(cdd) + ";" if exp_dim is not None: query += "exp_dim=" + str(exp_dim) + ";" if imp_dim is not None: query += "imp_dim=" + str(imp_dim) + ";" if subset_dims is not None: query += "subset_dims=" + str(subset_dims) + ";" if subset_type is not None: query += "subset_type=" + str(subset_type) + ";" if subset_filter is not None: query += "subset_filter=" + str(subset_filter) + ";" if limit_filter is not None: query += "limit_filter=" + str(limit_filter) + ";" if show_index is not None: query += "show_index=" + str(show_index) + ";" if show_id is not None: query += "show_id=" + str(show_id) + ";" if show_time is not None: query += "show_time=" + str(show_time) + ";" if show_stats is not None: query += "show_stats=" + str(show_stats) + ";" if show_fit is not None: query += "show_fit=" + str(show_fit) + ";" if level is not None: query += "level=" + str(level) + ";" if imp_num_point is not None: query += "imp_num_point=" + str(imp_num_point) + ";" if offset is not None: query += "offset=" + str(offset) + ";" if operation is not None: query += "operation=" + str(operation) + ";" if wavelet is not None: query += "wavelet=" + str(wavelet) + ";" if wavelet_ratio is not None: query += "wavelet_ratio=" + str(wavelet_ratio) + ";" if wavelet_coeff is not None: query += "wavelet_coeff=" + str(wavelet_coeff) + ";" if objkey_filter is not None: query += "objkey_filter=" + str(objkey_filter) + ";" if save is not None: query += "save=" + str(save) + ";" if Cube.client.submit(query, display) is None: raise RuntimeError() if Cube.client.last_response is not None and display is False: response = Cube.client.deserialize_response()["response"] except Exception as e: print(get_linenumber(), "Something went wrong:", e) raise RuntimeError() else: return response @classmethod def importnc( cls, container="-", cwd=None, exp_dim="auto", host_partition="auto", imp_dim="auto", measure=None, src_path=None, cdd=None, compressed="no", exp_concept_level="c", grid="-", imp_concept_level="c", import_metadata="yes", check_compliance="no", offset=0, ioserver="mysql_table", ncores=1, nfrag=0, nhost=0, subset_dims="none", subset_filter="all", time_filter="yes", subset_type="index", exec_mode="sync", base_time="1900-01-01 00:00:00", calendar="standard", hierarchy="oph_base", leap_month=2, leap_year=0, month_lengths="31,28,31,30,31,30,31,31,30,31,30,31", run="yes", units="d", vocabulary="CF", description="-", policy="rr", schedule=0, check_grid="no", save="yes", display=False, ): """importnc(container='-', cwd=None, exp_dim='auto', host_partition='auto', imp_dim='auto', measure=None, src_path=None, cdd=None, compressed='no', exp_concept_level='c', grid='-', imp_concept_level='c', import_metadata='yes', check_compliance='no', offset=0, ioserver='mysql_table', ncores=1, nfrag=0, nhost=0, subset_dims='none', subset_filter='all', time_filter='yes', subset_type='index', exec_mode='sync', base_time='1900-01-01 00:00:00', calendar='standard', hierarchy='oph_base', leap_month=2, leap_year=0, month_lengths='31,28,31,30,31,30,31,31,30,31,30,31', run='yes', units='d', vocabulary='CF', description='-', policy='rr', schedule=0, check_grid='no', save='yes', display=False) -> Cube or None : wrapper of the operator OPH_IMPORTNC :param ncores: number of cores to use :type ncores: int :param exec_mode: async or sync :type exec_mode: str :param schedule: 0 :type schedule: int :param container: container name :type container: str :param cwd: current working directory :type cwd: str :param exp_dim: pipe (|) separated list of explicit dimension names :type exp_dim: str :param host_partition: host partition name :type host_partition: str :param imp_dim: pipe (|) separated list of implicit dimension names :type imp_dim: str :param measure: measure to be imported :type measure: str :param src_path: path of file to be imported :type src_path: str :param cdd: absolute path corresponding to the current directory on data repository :type cdd: str :param compressed: yes|no :type compressed: str :param exp_concept_level: pipe (|) separated list of explicit dimensions hierarchy levels :type exp_concept_level: str :param grid: optionally group dimensions in a grid :type grid: str :param imp_concept_level: pipe (|) separated list of implicit dimensions hierarchy levels :type imp_concept_level: str :param import_metadata: yes|no :type import_metadata: str :param check_compliance: yes|no :type check_compliance: str :param offset: it is added to the bounds of subset intervals :type offset: int :param ioserver: mysql_table|ophdiaio_memory :type ioserver: str :param nfrag: number of fragments/db to use :type nfrag: int :param nhost: number of hosts to use :type nhost: int :param subset_dims: pipe (|) separated list of dimensions on which to apply the subsetting :type subset_dims: str :param subset_filter: pipe (|) separated list of filters, one per dimension, composed of comma-separated microfilters (e.g. 1,5,10:2:50) :type subset_filter: str :param time_filter: yes|no :type time_filter: str :param subset_type: index|coord :type subset_type: str :param base_time: reference time :type base_time: str :param calendar: calendar used (standard|gregorian|proleptic_gregorian|julian|360_day|no_leap|all_leap|user_defined) :type calendar: str :param hierarchy: pipe (|) separated list of dimension hierarchies (oph_base|oph_time) :type hierarchy: str :param leap_month: leap month :type leap_month: int :param leap_year: leap year :type leap_year: int :param month_lengths: comma-separated list of month lengths :type month_lengths: str :param run: yes|no :type run: str :param units: unit of time (s|m|h|3|6|d) :type units: str :param vocabulary: metadata vocabulary :type vocabulary: str :param description: additional description to be associated with the output cube :type description: str :param policy: rule to select how data are distribuited over hosts (rr|port) :type policy: str :param check_grid: yes|no :type check_grid: str :param save: option to enable/disable JSON response saving on the server-side (default is yes) :type save: str :param display: option for displaying the response in a "pretty way" using the pretty_print function (default is False) :type display: bool :returns: obj or None :rtype: Cube or None :raises: RuntimeError """ if Cube.client is None or measure is None or src_path is None: raise RuntimeError("Cube.client, measure or src_path is None") newcube = None query = "oph_importnc " if ncores is not None: query += "ncores=" + str(ncores) + ";" if exec_mode is not None: query += "exec_mode=" + str(exec_mode) + ";" if container is not None: query += "container=" + str(container) + ";" if cwd is not None: query += "cwd=" + str(cwd) + ";" if host_partition is not None: query += "host_partition=" + str(host_partition) + ";" if ioserver is not None: query += "ioserver=" + str(ioserver) + ";" if import_metadata is not None: query += "import_metadata=" + str(import_metadata) + ";" if check_compliance is not None: query += "check_compliance=" + str(check_compliance) + ";" if schedule is not None: query += "schedule=" + str(schedule) + ";" if nhost is not None: query += "nhost=" + str(nhost) + ";" if nfrag is not None: query += "nfrag=" + str(nfrag) + ";" if run is not None: query += "run=" + str(run) + ";" if measure is not None: query += "measure=" + str(measure) + ";" if src_path is not None: query += "src_path=" + str(src_path) + ";" if cdd is not None: query += "cdd=" + str(cdd) + ";" if exp_dim is not None: query += "exp_dim=" + str(exp_dim) + ";" if imp_dim is not None: query += "imp_dim=" + str(imp_dim) + ";" if subset_dims is not None: query += "subset_dims=" + str(subset_dims) + ";" if subset_type is not None: query += "subset_type=" + str(subset_type) + ";" if subset_filter is not None: query += "subset_filter=" + str(subset_filter) + ";" if time_filter is not None: query += "time_filter=" + str(time_filter) + ";" if offset is not None: query += "offset=" + str(offset) + ";" if exp_concept_level is not None: query += "exp_concept_level=" + str(exp_concept_level) + ";" if imp_concept_level is not None: query += "imp_concept_level=" + str(imp_concept_level) + ";" if compressed is not None: query += "compressed=" + str(compressed) + ";" if grid is not None: query += "grid=" + str(grid) + ";" if hierarchy is not None: query += "hierarchy=" + str(hierarchy) + ";" if vocabulary is not None: query += "vocabulary=" + str(vocabulary) + ";" if base_time is not None: query += "base_time=" + str(base_time) + ";" if units is not None: query += "units=" + str(units) + ";" if calendar is not None: query += "calendar=" + str(calendar) + ";" if month_lengths is not None: query += "month_lengths=" + str(month_lengths) + ";" if leap_year is not None: query += "leap_year=" + str(leap_year) + ";" if leap_month is not None: query += "leap_month=" + str(leap_month) + ";" if policy is not None: query += "policy=" + str(policy) + ";" if description is not None: query += "description=" + str(description) + ";" if check_grid is not None: query += "check_grid=" + str(check_grid) + ";" if save is not None: query += "save=" + str(save) + ";" try: if Cube.client.submit(query, display) is None: raise RuntimeError() if Cube.client.last_response is not None: if Cube.client.cube: newcube = Cube(pid=Cube.client.cube) except Exception as e: print(get_linenumber(), "Something went wrong:", e) raise RuntimeError() else: return newcube @classmethod def importnc2( cls, container="-", cwd=None, exp_dim="auto", host_partition="auto", imp_dim="auto", measure=None, src_path=None, cdd=None, compressed="no", exp_concept_level="c", grid="-", imp_concept_level="c", import_metadata="yes", check_compliance="no", offset=0, ioserver="ophidiaio_memory", ncores=1, nthreads=1, nfrag=0, nhost=0, subset_dims="none", subset_filter="all", time_filter="yes", subset_type="index", exec_mode="sync", base_time="1900-01-01 00:00:00", calendar="standard", hierarchy="oph_base", leap_month=2, leap_year=0, month_lengths="31,28,31,30,31,30,31,31,30,31,30,31", run="yes", units="d", vocabulary="CF", description="-", policy="rr", schedule=0, check_grid="no", save="yes", display=False, ): """importnc2(container='-', cwd=None, exp_dim='auto', host_partition='auto', imp_dim='auto', measure=None, src_path=None, cdd=None, compressed='no', exp_concept_level='c', grid='-', imp_concept_level='c', import_metadata='yes', check_compliance='no', offset=0, ioserver='ophidiaio_memory', ncores=1, nthreads=1, nfrag=0, nhost=0, subset_dims='none', subset_filter='all', time_filter='yes', subset_type='index', exec_mode='sync', base_time='1900-01-01 00:00:00', calendar='standard', hierarchy='oph_base', leap_month=2, leap_year=0, month_lengths='31,28,31,30,31,30,31,31,30,31,30,31', run='yes', units='d', vocabulary='CF', description='-', policy='rr', schedule=0, check_grid='no', save='yes', display=False) -> Cube or None : wrapper of the operator OPH_IMPORTNC2 :param ncores: number of cores to use :type ncores: int :param nthreads: number of threads to use :type nthreads: int :param exec_mode: async or sync :type exec_mode: str :param schedule: 0 :type schedule: int :param container: container name :type container: str :param cwd: current working directory :type cwd: str :param exp_dim: pipe (|) separated list of explicit dimension names :type exp_dim: str :param host_partition: host partition name :type host_partition: str :param imp_dim: pipe (|) separated list of implicit dimension names :type imp_dim: str :param measure: measure to be imported :type measure: str :param src_path: path of file to be imported :type src_path: str :param cdd: absolute path corresponding to the current directory on data repository :type cdd: str :param compressed: yes|no :type compressed: str :param exp_concept_level: pipe (|) separated list of explicit dimensions hierarchy levels :type exp_concept_level: str :param grid: optionally group dimensions in a grid :type grid: str :param imp_concept_level: pipe (|) separated list of implicit dimensions hierarchy levels :type imp_concept_level: str :param import_metadata: yes|no :type import_metadata: str :param check_compliance: yes|no :type check_compliance: str :param offset: it is added to the bounds of subset intervals :type offset: int :param ioserver: ophdiaio_memory :type ioserver: str :param nfrag: number of fragments/db to use :type nfrag: int :param nhost: number of hosts to use :type nhost: int :param subset_dims: pipe (|) separated list of dimensions on which to apply the subsetting :type subset_dims: str :param subset_filter: pipe (|) separated list of filters, one per dimension, composed of comma-separated microfilters (e.g. 1,5,10:2:50) :type subset_filter: str :param time_filter: yes|no :type time_filter: str :param subset_type: index|coord :type subset_type: str :param base_time: reference time :type base_time: str :param calendar: calendar used (standard|gregorian|proleptic_gregorian|julian|360_day|no_leap|all_leap|user_defined) :type calendar: str :param hierarchy: pipe (|) separated list of dimension hierarchies (oph_base|oph_time) :type hierarchy: str :param leap_month: leap month :type leap_month: int :param leap_year: leap year :type leap_year: int :param month_lengths: comma-separated list of month lengths :type month_lengths: str :param run: yes|no :type run: str :param units: unit of time (s|m|h|3|6|d) :type units: str :param vocabulary: metadata vocabulary :type vocabulary: str :param policy: rule to select how data are distribuited over hosts (rr|port) :type policy: str :param description: additional description to be associated with the output cube :type description: str :param check_grid: yes|no :type check_grid: str :param save: option to enable/disable JSON response saving on the server-side (default is yes) :type save: str :param display: option for displaying the response in a "pretty way" using the pretty_print function (default is False) :type display: bool :returns: obj or None :rtype: Cube or None :raises: RuntimeError """ if Cube.client is None or measure is None or src_path is None: raise RuntimeError("Cube.client, measure or src_path is None") newcube = None query = "oph_importnc2 " if ncores is not None: query += "ncores=" + str(ncores) + ";" if nthreads is not None: query += "nthreads=" + str(nthreads) + ";" if exec_mode is not None: query += "exec_mode=" + str(exec_mode) + ";" if container is not None: query += "container=" + str(container) + ";" if cwd is not None: query += "cwd=" + str(cwd) + ";" if host_partition is not None: query += "host_partition=" + str(host_partition) + ";" if ioserver is not None: query += "ioserver=" + str(ioserver) + ";" if import_metadata is not None: query += "import_metadata=" + str(import_metadata) + ";" if check_compliance is not None: query += "check_compliance=" + str(check_compliance) + ";" if schedule is not None: query += "schedule=" + str(schedule) + ";" if nhost is not None: query += "nhost=" + str(nhost) + ";" if nfrag is not None: query += "nfrag=" + str(nfrag) + ";" if run is not None: query += "run=" + str(run) + ";" if measure is not None: query += "measure=" + str(measure) + ";" if src_path is not None: query += "src_path=" + str(src_path) + ";" if cdd is not None: query += "cdd=" + str(cdd) + ";" if exp_dim is not None: query += "exp_dim=" + str(exp_dim) + ";" if imp_dim is not None: query += "imp_dim=" + str(imp_dim) + ";" if subset_dims is not None: query += "subset_dims=" + str(subset_dims) + ";" if subset_type is not None: query += "subset_type=" + str(subset_type) + ";" if subset_filter is not None: query += "subset_filter=" + str(subset_filter) + ";" if time_filter is not None: query += "time_filter=" + str(time_filter) + ";" if offset is not None: query += "offset=" + str(offset) + ";" if exp_concept_level is not None: query += "exp_concept_level=" + str(exp_concept_level) + ";" if imp_concept_level is not None: query += "imp_concept_level=" + str(imp_concept_level) + ";" if compressed is not None: query += "compressed=" + str(compressed) + ";" if grid is not None: query += "grid=" + str(grid) + ";" if hierarchy is not None: query += "hierarchy=" + str(hierarchy) + ";" if vocabulary is not None: query += "vocabulary=" + str(vocabulary) + ";" if base_time is not None: query += "base_time=" + str(base_time) + ";" if units is not None: query += "units=" + str(units) + ";" if calendar is not None: query += "calendar=" + str(calendar) + ";" if month_lengths is not None: query += "month_lengths=" + str(month_lengths) + ";" if leap_year is not None: query += "leap_year=" + str(leap_year) + ";" if leap_month is not None: query += "leap_month=" + str(leap_month) + ";" if policy is not None: query += "policy=" + str(policy) + ";" if description is not None: query += "description=" + str(description) + ";" if check_grid is not None: query += "check_grid=" + str(check_grid) + ";" if save is not None: query += "save=" + str(save) + ";" try: if Cube.client.submit(query, display) is None: raise RuntimeError() if Cube.client.last_response is not None: if Cube.client.cube: newcube = Cube(pid=Cube.client.cube) except Exception as e: print(get_linenumber(), "Something went wrong:", e) raise RuntimeError() else: return newcube @classmethod def man(cls, function=None, function_version="latest", function_type="operator", exec_mode="sync", objkey_filter="all", save="yes", display=True): """man(function=None, function_type='operator', function_version='latest', exec_mode='sync', save='yes', display=True) -> dict or None : wrapper of the operator OPH_MAN :param function: operator or primitive name :type function: str :param function_type: operator|primitive :type function_type: str :param function_version: operator or primitive version :type function_version: str :param exec_mode: async or sync :type exec_mode: str :param save: option to enable/disable JSON response saving on the server-side (default is yes) :type save: str :param display: option for displaying the response in a "pretty way" using the pretty_print function (default is True) :type display: bool :returns: response or None :rtype: dict or None :raises: RuntimeError """ response = None try: if Cube.client is None or function is None: raise RuntimeError("Cube.client or function is None") query = "oph_man " if function is not None: query += "function=" + str(function) + ";" if function_version is not None: query += "function_version=" + str(function_version) + ";" if function_type is not None: query += "function_type=" + str(function_type) + ";" if exec_mode is not None: query += "exec_mode=" + str(exec_mode) + ";" if objkey_filter is not None: query += "objkey_filter=" + str(objkey_filter) + ";" if save is not None: query += "save=" + str(save) + ";" if Cube.client.submit(query, display) is None: raise RuntimeError() if Cube.client.last_response is not None and display is False: response = Cube.client.deserialize_response()["response"] except Exception as e: print(get_linenumber(), "Something went wrong:", e) raise RuntimeError() else: return response @classmethod def movecontainer(cls, container=None, cwd=None, exec_mode="sync", save="yes", display=False): """movecontainer(container=None, cwd=None, exec_mode='sync', save='yes', display=False) -> None : wrapper of the operator OPH_MOVECONTAINER :param container: container name :type container: str :param cwd: current working directory :type cwd: str :param exec_mode: async or sync :type exec_mode: str :param save: option to enable/disable JSON response saving on the server-side (default is yes) :type save: str :param display: option for displaying the response in a "pretty way" using the pretty_print function (default is False) :type display: bool :returns: None :rtype: None :raises: RuntimeError """ try: if Cube.client is None or container is None or (cwd is None and Cube.client.cwd is None): raise RuntimeError("Cube.client, container or cwd is None") query = "oph_movecontainer " if container is not None: query += "container=" + str(container) + ";" if cwd is not None: query += "cwd=" + str(cwd) + ";" if exec_mode is not None: query += "exec_mode=" + str(exec_mode) + ";" if save is not None: query += "save=" + str(save) + ";" if Cube.client.submit(query, display) is None: raise RuntimeError() except Exception as e: print(get_linenumber(), "Something went wrong:", e) raise RuntimeError() @classmethod def operators(cls, operator_filter=None, limit_filter=0, exec_mode="sync", objkey_filter="all", save="yes", display=True): """operators(operator_filter=None, limit_filter=0, exec_mode='sync', save='yes', display=True) -> dict or None : wrapper of the operator OPH_OPERATORS_LIST :param operator_filter: filter on operator name :type operator_filter: str :param limit_filter: max number of lines :type limit_filter: int :param exec_mode: async or sync :type exec_mode: str :param save: option to enable/disable JSON response saving on the server-side (default is yes) :type save: str :param display: option for displaying the response in a "pretty way" using the pretty_print function (default is True) :type display: bool :returns: response or None :rtype: dict or None :raises: RuntimeError """ response = None try: if Cube.client is None: raise RuntimeError("Cube.client is None") query = "oph_operators_list " if operator_filter is not None: query += "operator_filter=" + str(operator_filter) + ";" if limit_filter is not None: query += "limit_filter=" + str(limit_filter) + ";" if exec_mode is not None: query += "exec_mode=" + str(exec_mode) + ";" if objkey_filter is not None: query += "objkey_filter=" + str(objkey_filter) + ";" if save is not None: query += "save=" + str(save) + ";" if Cube.client.submit(query, display) is None: raise RuntimeError() if Cube.client.last_response is not None and display is False: response = Cube.client.deserialize_response()["response"] except Exception as e: print(get_linenumber(), "Something went wrong:", e) raise RuntimeError() else: return response @classmethod def primitives(cls, level=1, dbms_filter=None, return_type="all", primitive_type="all", primitive_filter="", limit_filter=0, exec_mode="sync", objkey_filter="all", save="yes", display=True): """primitives(dbms_filter=None, level=1, limit_filter=0, primitive_filter=None, primitive_type=None, return_type=None, exec_mode='sync', objkey_filter='all', save='yes', display=True) -> dict or None : wrapper of the operator OPH_PRIMITIVES_LIST :param dbms_filter: filter on DBMS :type dbms_filter: str :param level: 1|2|3|4|5 :type level: int :param limit_filter: max number of lines :type limit_filter: int :param primitive_filter: filter on primitive name :type primitive_filter: str :param primitive_type: all|simple|aggregate :type primitive_type: str :param return_type: all|array|number :type return_type: str :param exec_mode: async or sync :type exec_mode: str :param save: option to enable/disable JSON response saving on the server-side (default is yes) :type save: str :param display: option for displaying the response in a "pretty way" using the pretty_print function (default is True) :type display: bool :returns: response or None :rtype: dict or None :raises: RuntimeError """ response = None try: if Cube.client is None: raise RuntimeError("Cube.client is None") query = "oph_primitives_list " if level is not None: query += "level=" + str(level) + ";" if dbms_filter is not None: query += "dbms_filter=" + str(dbms_filter) + ";" if return_type is not None: query += "return_type=" + str(return_type) + ";" if primitive_type is not None: query += "primitive_type=" + str(primitive_type) + ";" if primitive_filter is not None: query += "primitive_filter=" + str(primitive_filter) + ";" if limit_filter is not None: query += "limit_filter=" + str(limit_filter) + ";" if exec_mode is not None: query += "exec_mode=" + str(exec_mode) + ";" if objkey_filter is not None: query += "objkey_filter=" + str(objkey_filter) + ";" if save is not None: query += "save=" + str(save) + ";" if Cube.client.submit(query, display) is None: raise RuntimeError() if Cube.client.last_response is not None and display is False: response = Cube.client.deserialize_response()["response"] except Exception as e: print(get_linenumber(), "Something went wrong:", e) raise RuntimeError() else: return response @classmethod def script(cls, script=":", args=" ", stdout="stdout", stderr="stderr", list="no", space="no", python_code=False, exec_mode="sync", ncores=1, save="yes", display=False): """script(script=':', args=' ', stdout='stdout', stderr='stderr', ncores=1, exec_mode='sync', list='no', space='no', python_code=False, save='yes', display=False) -> None : wrapper of the operator OPH_SCRIPT :param script: script/executable filename :type script: str :param args: pipe (|) separated list of arguments for the script :type args: str :param stdout: file/stream where stdout is redirected :type stdout: str :param stderr: file/stream where stderr is redirected :type stderr: str :param list: yes|no :type list: str :param space: yes|no :type space: str :param python_code: yes|no :type python_code: bool :param ncores: number of cores to use :type ncores: int :param exec_mode: async or sync :type exec_mode: str :param save: option to enable/disable JSON response saving on the server-side (default is yes) :type save: str :param display: option for displaying the response in a "pretty way" using the pretty_print function (default is False) :type display: bool :returns: None :rtype: None :raises: RuntimeError """ def createScript(function): from inspect import getsource, signature import stat from os.path import expanduser, isdir from os import mkdir, chmod from time import time base_path = expanduser("~") + "/.ophidia/" script_path = base_path + function.__name__ + str(int(time() * 10 ** 6)) + ".py" try: # Check if hidden folder exists or create it otherwise if not isdir(base_path): mkdir(base_path, stat.S_IRWXU) fnct_text = getsource(function) fnct_signature = signature(function) fnct_args = fnct_signature.parameters fnct_args_num = len(fnct_args) script_args = "(" if fnct_args_num > 0: for i in range(1, fnct_args_num + 1): script_args = script_args + "sys.argv[" + str(i) + "], " script_args = script_args[:-2] + ")" else: script_args = script_args + ")" script_text = ( """#!/bin/python """ + fnct_text + """ if __name__ == '__main__': import sys if len(sys.argv) <= """ + str(fnct_args_num) + """: print('Some input arguments are missing') sys.exit(1) if """ + function.__name__ + script_args + """: sys.exit(1) """ ) with open(script_path, "w") as file: file.write(script_text) chmod(script_path, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP) except (IOError, ValueError, TypeError, OSError) as e: print(get_linenumber(), "Python function error: ", e) raise RuntimeError() return script_path try: if Cube.client is None: raise RuntimeError("Cube.client is None") query = "oph_script " if script is not None: if python_code: if sys.version_info[0] < 3: raise RuntimeError("Python 3 is required to use a Python function as a scripts") else: script_path = createScript(script) query += "script=" + str(script_path) + ";" else: query += "script=" + str(script) + ";" if args is not None: query += "args=" + str(args) + ";" if stdout is not None: query += "stdout=" + str(stdout) + ";" if stderr is not None: query += "stderr=" + str(stderr) + ";" if list is not None: query += "list=" + str(list) + ";" if space is not None: query += "space=" + str(space) + ";" if exec_mode is not None: query += "exec_mode=" + str(exec_mode) + ";" if ncores is not None: query += "ncores=" + str(ncores) + ";" if save is not None: query += "save=" + str(save) + ";" if Cube.client.submit(query, display) is None: if script is not None and python_code: os.remove(script_path) raise RuntimeError() except Exception as e: print(get_linenumber(), "Something went wrong:", e) raise RuntimeError() @classmethod def resume(cls, session="this", id=0, id_type="workflow", document_type="response", level=1, user="", status_filter="11111111", save="no", objkey_filter="all", display=True): """resume( id=0, id_type='workflow', document_type='response', level=1, save='no', session='this', objkey_filter='all', user='', display=True) -> dict or None : wrapper of the operator OPH_RESUME :param session: identifier of the intended session, by default it is the working session :type session: str :param id: identifier of the intended workflow or marker, by default no filter is applied :type id: int :param id_type: workflow|marker :type id_type: str :param document_type: request|response :type document_type: str :param level: 0|1|2|3|4|5 :type level: int :param user: filter by name of the submitter, by default no filter is applied :type user: str :param status_filter: filter by job status (bitmap) :type status_filter: str :param save: yes|no :type save: str :param display: option for displaying the response in a "pretty way" using the pretty_print function (default is True) :type display: bool :returns: response or None :rtype: dict or None :raises: RuntimeError """ response = None try: if Cube.client is None: raise RuntimeError("Cube.client is None") query = "oph_resume " if session is not None: query += "session=" + str(session) + ";" if id is not None: query += "id=" + str(id) + ";" if id_type is not None: query += "id_type=" + str(id_type) + ";" if document_type is not None: query += "document_type=" + str(document_type) + ";" if level is not None: query += "level=" + str(level) + ";" if user is not None: query += "user=" + str(user) + ";" if status_filter is not None: query += "status_filter=" + str(status_filter) + ";" if save is not None: query += "save=" + str(save) + ";" if objkey_filter is not None: query += "objkey_filter=" + str(objkey_filter) + ";" if save is not None: query += "save=" + str(save) + ";" if Cube.client.submit(query, display) is None: raise RuntimeError() if Cube.client.last_response is not None and display is False: response = Cube.client.deserialize_response()["response"] except Exception as e: print(get_linenumber(), "Something went wrong:", e) raise RuntimeError() else: return response @classmethod def mergecubes(cls, ncores=1, exec_mode="sync", cubes=None, schedule=0, container="-", mode="i", hold_values="no", number=1, order="none", description="-", save="yes", display=False): """mergecubes(ncores=1, exec_mode='sync', cubes=None, schedule=0, container='-', mode='i', hold_values='no', number=1, order='none', description='-', save='yes', display=False) -> Cube : wrapper of the operator OPH_MERGECUBES :param ncores: number of cores to use :type ncores: int :param exec_mode: async or sync :type exec_mode: str :param schedule: 0 :type schedule: int :param cubes: pipe (|) separated list of cubes :type cubes: str :param container: optional container name :type container: str :param mode: interlace or append measures :type mode: str :param hold_values: enables the copy of the original values of implicit dimension :type hold_values: str :param number: number of replies of the first cube :type number: int :param order: criteria on which input cubes are ordered before merging :type order: str :param description: additional description to be associated with the output cube :type description: str :param save: option to enable/disable JSON response saving on the server-side (default is yes) :type save: str :param display: option for displaying the response in a "pretty way" using the pretty_print function (default is False) :type display: bool :returns: new cube or None :rtype: Cube or None :raises: RuntimeError """ if Cube.client is None or cubes is None: raise RuntimeError("Cube.client or cubes is None") newcube = None query = "oph_mergecubes " if ncores is not None: query += "ncores=" + str(ncores) + ";" if exec_mode is not None: query += "exec_mode=" + str(exec_mode) + ";" if cubes is not None: query += "cubes=" + str(cubes) + ";" if schedule is not None: query += "schedule=" + str(schedule) + ";" if container is not None: query += "container=" + str(container) + ";" if mode is not None: query += "mode=" + str(mode) + ";" if hold_values is not None: query += "hold_values=" + str(hold_values) + ";" if number is not None: query += "number=" + str(number) + ";" if order is not None: query += "order=" + str(order) + ";" if description is not None: query += "description=" + str(description) + ";" if save is not None: query += "save=" + str(save) + ";" try: if Cube.client.submit(query, display) is None: raise RuntimeError() if Cube.client.last_response is not None: if Cube.client.cube: newcube = Cube(pid=Cube.client.cube) except Exception as e: print(get_linenumber(), "Something went wrong:", e) raise RuntimeError() else: return newcube @classmethod def mergecubes2(cls, ncores=1, exec_mode="sync", cubes=None, schedule=0, container="-", dim_type="long", number=1, order="none", description="-", dim="-", save="yes", display=False): """mergecubes2(ncores=1, exec_mode='sync', cubes=None, schedule=0, container='-', dim_type='long', number=1, order='none', description='-', dim='-', save='yes', display=False) -> Cube or None: wrapper of the operator OPH_MERGECUBES2 :param ncores: number of cores to use :type ncores: int :param exec_mode: async or sync :type exec_mode: str :param schedule: 0 :type schedule: int :param cubes: pipe (|) separated list of cubes :type cubes: str :param container: optional container name :type container: str :param dim_type: data type of the new dimension :type dim_type: str :param number: number of replies of the first cube :type number: int :param order: criteria on which input cubes are ordered before merging :type order: str :param description: additional description to be associated with the output cube :type description: str :param dim: name of the new dimension to be created :type dim: str :param save: option to enable/disable JSON response saving on the server-side (default is yes) :type save: str :param display: option for displaying the response in a "pretty way" using the pretty_print function (default is False) :type display: bool :returns: new cube or None :rtype: Cube or None :raises: RuntimeError """ if Cube.client is None or cubes is None: raise RuntimeError("Cube.client or cubes is None") newcube = None query = "oph_mergecubes2 " if ncores is not None: query += "ncores=" + str(ncores) + ";" if exec_mode is not None: query += "exec_mode=" + str(exec_mode) + ";" if cubes is not None: query += "cubes=" + str(cubes) + ";" if schedule is not None: query += "schedule=" + str(schedule) + ";" if container is not None: query += "container=" + str(container) + ";" if number is not None: query += "number=" + str(number) + ";" if order is not None: query += "order=" + str(order) + ";" if description is not None: query += "description=" + str(description) + ";" if dim_type is not None: query += "dim_type=" + str(dim_type) + ";" if dim is not None: query += "dim=" + str(dim) + ";" if save is not None: query += "save=" + str(save) + ";" try: if Cube.client.submit(query, display) is None: raise RuntimeError() if Cube.client.last_response is not None: if Cube.client.cube: newcube = Cube(pid=Cube.client.cube) except Exception as e: print(get_linenumber(), "Something went wrong:", e) raise RuntimeError() else: return newcube def __init__( self, container="-", cwd=None, exp_dim="auto", host_partition="auto", imp_dim="auto", measure=None, src_path=None, cdd=None, compressed="no", exp_concept_level="c", grid="-", imp_concept_level="c", import_metadata="no", check_compliance="no", offset=0, ioserver="mysql_table", ncores=1, nfrag=0, nhost=0, subset_dims="none", subset_filter="all", time_filter="yes", subset_type="index", exec_mode="sync", base_time="1900-01-01 00:00:00", calendar="standard", hierarchy="oph_base", leap_month=2, leap_year=0, month_lengths="31,28,31,30,31,30,31,31,30,31,30,31", run="yes", units="d", vocabulary="-", description="-", policy="rr", schedule=0, pid=None, check_grid="no", save="yes", display=False, ): """Cube(container='-', cwd=None, exp_dim='auto', host_partition='auto', imp_dim='auto', measure=None, src_path=None, cdd=None, compressed='no', exp_concept_level='c', grid='-', imp_concept_level='c', import_metadata='no', check_compliance='no', offset=0, ioserver='mysql_table', ncores=1, nfrag=0, nhost=0, subset_dims='none', subset_filter='all', time_filter='yes', subset_type='index', exec_mode='sync', base_time='1900-01-01 00:00:00', calendar='standard', hierarchy='oph_base', leap_month=2, leap_year=0, month_lengths='31,28,31,30,31,30,31,31,30,31,30,31', run='yes', units='d', vocabulary='-', description='-', policy='rr', schedule=0, pid=None, check_grid='no', save='yes', display=False) -> obj or Cube(pid=None) -> obj :param ncores: number of cores to use :type ncores: int :param exec_mode: async or sync :type exec_mode: str :param schedule: 0 :type schedule: int :param container: container name :type container: str :param cwd: current working directory :type cwd: str :param exp_dim: pipe (|) separated list of explicit dimension names :type exp_dim: str :param host_partition: host partition name :type host_partition: str :param imp_dim: pipe (|) separated list of implicit dimension names :type imp_dim: str :param measure: measure to be imported :type measure: str :param src_path: path of file to be imported :type src_path: str :param cdd: absolute path corresponding to the current directory on data repository :type cdd: str :param compressed: yes|no :type compressed: str :param exp_concept_level: pipe (|) separated list of explicit dimensions hierarchy levels :type exp_concept_level: str :param grid: optionally group dimensions in a grid :type grid: str :param imp_concept_level: pipe (|) separated list of implicit dimensions hierarchy levels :type imp_concept_level: str :param import_metadata: yes|no :type import_metadata: str :param check_compliance: yes|no :type check_compliance: str :param offset: it is added to the bounds of subset intervals :type offset: int :param ioserver: mysql_table|ophdiaio_memory :type ioserver: str :param nfrag: number of fragments/db to use :type nfrag: int :param nhost: number of hosts to use :type nhost: int :param subset_dims: pipe (|) separated list of dimensions on which to apply the subsetting :type subset_dims: str :param subset_filter: pipe (|) separated list of filters, one per dimension, composed of comma-separated microfilters (e.g. 1,5,10:2:50) :type subset_filter: str :param time_filter: yes|no :type time_filter: str :param subset_type: index|coord :type subset_type: str :param base_time: reference time :type base_time: str :param calendar: calendar used (standard|gregorian|proleptic_gregorian|julian|360_day|no_leap|all_leap|user_defined) :type calendar: str :param hierarchy: pipe (|) separated list of dimension hierarchies (oph_base|oph_time) :type hierarchy: str :param leap_month: leap month :type leap_month: int :param leap_year: leap year :type leap_year: int :param month_lengths: comma-separated list of month lengths :type month_lengths: str :param run: yes|no :type run: str :param units: unit of time (s|m|h|3|6|d) :type units: str :param vocabulary: metadata vocabulary :type vocabulary: str :param description: additional description to be associated with the output cube :type description: str :param policy: rule to select how data are distribuited over hosts (rr|port) :type policy: str :param pid: PID of an existing cube (if used all other parameters are ignored) :type pid: str :param check_grid: yes|no :type check_grid: str :param save: option to enable/disable JSON response saving on the server-side (default is yes) :type save: str :param display: option for displaying the response in a "pretty way" using the pretty_print function (default is False) :type display: bool :returns: obj or None :rtype: Cube or None :raises: RuntimeError """ self.pid = None self.creation_date = None self.measure = None self.measure_type = None self.level = None self.nfragments = None self.source_file = None self.hostxcube = None self.fragxdb = None self.rowsxfrag = None self.elementsxrow = None self.compressed = None self.size = None self.nelements = None self.dim_info = None if pid is not None: if Cube.client is None: raise RuntimeError("Cube.client is None") self.pid = pid else: if (Cube.client is not None) and (cwd is not None or measure is not None or src_path is not None): if (cwd is None and Cube.client.cwd is None) or measure is None or src_path is None: raise RuntimeError("one or more required parameters are None") else: query = "oph_importnc " if container is not None: query += "container=" + str(container) + ";" if cwd is not None: query += "cwd=" + str(cwd) + ";" if exp_dim is not None: query += "exp_dim=" + str(exp_dim) + ";" if host_partition is not None: query += "host_partition=" + str(host_partition) + ";" if imp_dim is not None: query += "imp_dim=" + str(imp_dim) + ";" if measure is not None: query += "measure=" + str(measure) + ";" if src_path is not None: query += "src_path=" + str(src_path) + ";" if cdd is not None: query += "cdd=" + str(cdd) + ";" if compressed is not None: query += "compressed=" + str(compressed) + ";" if exp_concept_level is not None: query += "exp_concept_level=" + str(exp_concept_level) + ";" if grid is not None: query += "grid=" + str(grid) + ";" if imp_concept_level is not None: query += "imp_concept_level=" + str(imp_concept_level) + ";" if import_metadata is not None: query += "import_metadata=" + str(import_metadata) + ";" if check_compliance is not None: query += "check_compliance=" + str(check_compliance) + ";" if ioserver is not None: query += "ioserver=" + str(ioserver) + ";" if ncores is not None: query += "ncores=" + str(ncores) + ";" if nfrag is not None: query += "nfrag=" + str(nfrag) + ";" if nhost is not None: query += "nhost=" + str(nhost) + ";" if subset_dims is not None: query += "subset_dims=" + str(subset_dims) + ";" if subset_filter is not None: query += "subset_filter=" + str(subset_filter) + ";" if time_filter is not None: query += "time_filter=" + str(time_filter) + ";" if offset is not None: query += "offset=" + str(offset) + ";" if subset_type is not None: query += "subset_type=" + str(subset_type) + ";" if exec_mode is not None: query += "exec_mode=" + str(exec_mode) + ";" if base_time is not None: query += "base_time=" + str(base_time) + ";" if calendar is not None: query += "calendar=" + str(calendar) + ";" if hierarchy is not None: query += "hierarchy=" + str(hierarchy) + ";" if leap_month is not None: query += "leap_month=" + str(leap_month) + ";" if leap_year is not None: query += "leap_year=" + str(leap_year) + ";" if month_lengths is not None: query += "month_lengths=" + str(month_lengths) + ";" if run is not None: query += "run=" + str(run) + ";" if units is not None: query += "units=" + str(units) + ";" if vocabulary is not None: query += "vocabulary=" + str(vocabulary) + ";" if schedule is not None: query += "schedule=" + str(schedule) + ";" if policy is not None: query += "policy=" + str(policy) + ";" if description is not None: query += "description=" + str(description) + ";" if check_grid is not None: query += "check_grid=" + str(check_grid) + ";" if save is not None: query += "save=" + str(save) + ";" try: if Cube.client.submit(query, display) is None: raise RuntimeError() if Cube.client.last_response is not None: if Cube.client.cube: self.pid = Cube.client.cube except Exception as e: print(get_linenumber(), "Something went wrong in instantiating the cube", e) raise RuntimeError() else: if self.pid: print("New cube is " + self.pid) def __del__(self): del self.pid del self.creation_date del self.measure del self.measure_type del self.level del self.nfragments del self.source_file del self.hostxcube del self.fragxdb del self.rowsxfrag del self.elementsxrow del self.compressed del self.size del self.nelements del self.dim_info def info(self, display=True): """info(display=True) -> None : call OPH_CUBESIZE and OPH_CUBESCHEMA to fill all Cube attributes :param display: option for displaying the response in a "pretty way" using the pretty_print function (default is True) :type display: bool :returns: None :rtype: None :raises: RuntimeError """ if Cube.client is None or self.pid is None: raise RuntimeError("Cube.client is None or pid is None") query = "oph_cubesize exec_mode=sync;cube=" + str(self.pid) + ";" if Cube.client.submit(query, display=False) is None: raise RuntimeError() query = "oph_cubeschema exec_mode=sync;cube=" + str(self.pid) + ";" if Cube.client.submit(query, display) is None: raise RuntimeError() res = Cube.client.deserialize_response() if res is not None: for res_i in res["response"]: if res_i["objkey"] == "cubeschema_cubeinfo": self.pid = res_i["objcontent"][0]["rowvalues"][0][0] self.creation_date = res_i["objcontent"][0]["rowvalues"][0][1] self.measure = res_i["objcontent"][0]["rowvalues"][0][2] self.measure_type = res_i["objcontent"][0]["rowvalues"][0][3] self.level = res_i["objcontent"][0]["rowvalues"][0][4] self.nfragments = res_i["objcontent"][0]["rowvalues"][0][5] self.source_file = res_i["objcontent"][0]["rowvalues"][0][6] elif res_i["objkey"] == "cubeschema_morecubeinfo": self.hostxcube = res_i["objcontent"][0]["rowvalues"][0][1] self.fragxdb = res_i["objcontent"][0]["rowvalues"][0][2] self.rowsxfrag = res_i["objcontent"][0]["rowvalues"][0][3] self.elementsxrow = res_i["objcontent"][0]["rowvalues"][0][4] self.compressed = res_i["objcontent"][0]["rowvalues"][0][5] self.size = res_i["objcontent"][0]["rowvalues"][0][6] + " " + res_i["objcontent"][0]["rowvalues"][0][7] self.nelements = res_i["objcontent"][0]["rowvalues"][0][8] elif res_i["objkey"] == "cubeschema_diminfo": self.dim_info = list() for row_i in res_i["objcontent"][0]["rowvalues"]: element = dict() element["name"] = row_i[0] element["type"] = row_i[1] element["size"] = row_i[2] element["hierarchy"] = row_i[3] element["concept_level"] = row_i[4] element["array"] = row_i[5] element["level"] = row_i[6] element["lattice_name"] = row_i[7] self.dim_info.append(element) def exportnc(self, misc="no", output_path="default", output_name="default", cdd=None, force="no", export_metadata="yes", schedule=0, exec_mode="sync", ncores=1, save="yes", display=False): """exportnc(misc='no', output_path='default', output_name='default', cdd=None, force='no', export_metadata='yes', schedule=0, exec_mode='sync', ncores=1, save='yes', display=False) -> None : wrapper of the operator OPH_EXPORTNC :param ncores: number of cores to use :type ncores: int :param exec_mode: async or sync :type exec_mode: str :param schedule: 0 :type schedule: int :param export_metadata: yes|no :type export_metadata: str :param misc: yes|no :type misc: str :param cdd: absolute path corresponding to the current directory on data repository :type cdd: str :param force: yes|no :type force: str :param output_path: directory of the output file :type output_path: str :param output_name: name of the output file :type output_name: str :param save: option to enable/disable JSON response saving on the server-side (default is yes) :type save: str :param display: option for displaying the response in a "pretty way" using the pretty_print function (default is False) :type display: bool :returns: None :rtype: None :raises: RuntimeError """ if Cube.client is None or self.pid is None: raise RuntimeError("Cube.client or pid is None") query = "oph_exportnc " if misc is not None: query += "misc=" + str(misc) + ";" if output_path is not None: query += "output_path=" + str(output_path) + ";" if output_name is not None: query += "output_name=" + str(output_name) + ";" if cdd is not None: query += "cdd=" + str(cdd) + ";" if force is not None: query += "force=" + str(force) + ";" if export_metadata is not None: query += "export_metadata=" + str(export_metadata) + ";" if schedule is not None: query += "schedule=" + str(schedule) + ";" if exec_mode is not None: query += "exec_mode=" + str(exec_mode) + ";" if ncores is not None: query += "ncores=" + str(ncores) + ";" if save is not None: query += "save=" + str(save) + ";" query += "cube=" + str(self.pid) + ";" try: if Cube.client.submit(query, display) is None: raise RuntimeError() except Exception as e: print(get_linenumber(), "Something went wrong:", e) raise RuntimeError() def exportnc2(self, misc="no", output_path="default", output_name="default", cdd=None, force="no", export_metadata="yes", schedule=0, exec_mode="sync", ncores=1, save="yes", display=False): """exportnc2(misc='no', output_path='default', output_name='default', cdd=None, force='no', export_metadata='yes', schedule=0, exec_mode='sync', ncores=1, save='yes', display=False) -> None : wrapper of the operator OPH_EXPORTNC2 :param ncores: number of cores to use :type ncores: int :param exec_mode: async or sync :type exec_mode: str :param schedule: 0 :type schedule: int :param export_metadata: yes|no|postpone :type export_metadata: str :param misc: yes|no :type misc: str :param cdd: absolute path corresponding to the current directory on data repository :type cdd: str :param force: yes|no :type force: str :param output_path: directory of the output file :type output_path: str :param output_name: name of the output file :type output_name: str :param save: option to enable/disable JSON response saving on the server-side (default is yes) :type save: str :param display: option for displaying the response in a "pretty way" using the pretty_print function (default is False) :type display: bool :returns: None :rtype: None :raises: RuntimeError """ if Cube.client is None or self.pid is None: raise RuntimeError("Cube.client or pid is None") query = "oph_exportnc2 " if misc is not None: query += "misc=" + str(misc) + ";" if output_path is not None: query += "output_path=" + str(output_path) + ";" if output_name is not None: query += "output_name=" + str(output_name) + ";" if cdd is not None: query += "cdd=" + str(cdd) + ";" if force is not None: query += "force=" + str(force) + ";" if export_metadata is not None: query += "export_metadata=" + str(export_metadata) + ";" if schedule is not None: query += "schedule=" + str(schedule) + ";" if exec_mode is not None: query += "exec_mode=" + str(exec_mode) + ";" if ncores is not None: query += "ncores=" + str(ncores) + ";" if save is not None: query += "save=" + str(save) + ";" query += "cube=" + str(self.pid) + ";" try: if Cube.client.submit(query, display) is None: raise RuntimeError() except Exception as e: print(get_linenumber(), "Something went wrong:", e) raise RuntimeError() def aggregate( self, ncores=1, nthreads=1, exec_mode="sync", schedule=0, group_size="all", operation=None, missingvalue="-", grid="-", container="-", description="-", check_grid="no", save="yes", display=False, ): """aggregate( ncores=1, nthreads=1, exec_mode='sync', schedule=0, group_size='all', operation=None, missingvalue='-', grid='-', container='-', description='-', check_grid='no', save='yes', display=False) -> Cube or None : wrapper of the operator OPH_AGGREGATE :param ncores: number of cores to use :type ncores: int :param nthreads: number of threads to use :type nthreads: int :param exec_mode: async or sync :type exec_mode: str :param schedule: 0 :type schedule: int :param operation: count|max|min|avg|sum :type operation: str :param container: optional container name :type container: str :param grid: optionally group dimensions in a grid :type grid: str :param group_size: number of tuples per group to consider in the aggregation function :type group_size: int or str :param missingvalue: missing value; by default it is the value from the file if defined, NAN otherwise (for float and double) :type missingvalue: float :param description: additional description to be associated with the output cube :type description: str :param check_grid: yes|no :type check_grid: str :param save: option to enable/disable JSON response saving on the server-side (default is yes) :type save: str :param display: option for displaying the response in a "pretty way" using the pretty_print function (default is False) :type display: bool :returns: new cube or None :rtype: Cube or None :raises: RuntimeError """ if Cube.client is None or self.pid is None or operation is None: raise RuntimeError("Cube.client, pid or operation is None") newcube = None query = "oph_aggregate " if ncores is not None: query += "ncores=" + str(ncores) + ";" if exec_mode is not None: query += "exec_mode=" + str(exec_mode) + ";" if schedule is not None: query += "schedule=" + str(schedule) + ";" if group_size is not None: query += "group_size=" + str(group_size) + ";" if operation is not None: query += "operation=" + str(operation) + ";" if missingvalue is not None: query += "missingvalue=" + str(missingvalue) + ";" if grid is not None: query += "grid=" + str(grid) + ";" if container is not None: query += "container=" + str(container) + ";" if description is not None: query += "description=" + str(description) + ";" if check_grid is not None: query += "check_grid=" + str(check_grid) + ";" if nthreads is not None: query += "nthreads=" + str(nthreads) + ";" if save is not None: query += "save=" + str(save) + ";" query += "cube=" + str(self.pid) + ";" try: if Cube.client.submit(query, display) is None: raise RuntimeError() if Cube.client.last_response is not None: if Cube.client.cube: newcube = Cube(pid=Cube.client.cube) except Exception as e: print(get_linenumber(), "Something went wrong:", e) raise RuntimeError() else: return newcube def aggregate2( self, ncores=1, nthreads=1, exec_mode="sync", schedule=0, dim="-", concept_level="A", midnight="24", operation=None, grid="-", missingvalue="-", container="-", description="-", check_grid="no", save="yes", display=False, ): """aggregate2(ncores=1, nthreads=1, exec_mode='sync', schedule=0, dim='-', concept_level='A', midnight='24', operation=None, grid='-', missingvalue='-', container='-', description='-', check_grid='no', save='yes', display=False) -> Cube or None : wrapper of the operator OPH_AGGREGATE2 :param ncores: number of cores to use :type ncores: int :param nthreads: number of threads to use :type nthreads: int :param exec_mode: async or sync :type exec_mode: str :param schedule: 0 :type schedule: int :param dim: name of dimension on which the operation will be applied :type dim: str :param operation: count|max|min|avg|sum :type operation: str :param concept_level: concept level inside the hierarchy used for the operation :type concept_level: str :param container: name of the container to be used to store the output cube, by default it is the input container :type container: str :param grid: optional argument used to identify the grid of dimensions to be used or the one to be created :type grid: str :param midnight: 00|24 :type midnight: str :param missingvalue: missing value; by default it is the value from the file if defined, NAN otherwise (for float and double) :type missingvalue: float :param description: additional description to be associated with the output cube :type description: str :param check_grid: yes|no :type check_grid: str :param save: option to enable/disable JSON response saving on the server-side (default is yes) :type save: str :param display: option for displaying the response in a "pretty way" using the pretty_print function (default is False) :type display: bool :returns: new cube or None :rtype: Cube or None :raises: RuntimeError """ if Cube.client is None or self.pid is None or operation is None: raise RuntimeError("Cube.client, pid, dim or operation is None") newcube = None query = "oph_aggregate2 " if ncores is not None: query += "ncores=" + str(ncores) + ";" if exec_mode is not None: query += "exec_mode=" + str(exec_mode) + ";" if schedule is not None: query += "schedule=" + str(schedule) + ";" if dim is not None: query += "dim=" + str(dim) + ";" if concept_level is not None: query += "concept_level=" + str(concept_level) + ";" if midnight is not None: query += "midnight=" + str(midnight) + ";" if operation is not None: query += "operation=" + str(operation) + ";" if missingvalue is not None: query += "missingvalue=" + str(missingvalue) + ";" if grid is not None: query += "grid=" + str(grid) + ";" if container is not None: query += "container=" + str(container) + ";" if description is not None: query += "description=" + str(description) + ";" if check_grid is not None: query += "check_grid=" + str(check_grid) + ";" if nthreads is not None: query += "nthreads=" + str(nthreads) + ";" if save is not None: query += "save=" + str(save) + ";" query += "cube=" + str(self.pid) + ";" try: if Cube.client.submit(query, display) is None: raise RuntimeError() if Cube.client.last_response is not None: if Cube.client.cube: newcube = Cube(pid=Cube.client.cube) except Exception as e: print(get_linenumber(), "Something went wrong:", e) raise RuntimeError() else: return newcube def apply( self, ncores=1, nthreads=1, exec_mode="sync", query="measure", dim_query="null", measure="null", measure_type="manual", dim_type="manual", check_type="yes", on_reduce="skip", compressed="auto", schedule=0, container="-", description="-", save="yes", display=False, ): """apply(ncores=1, nthreads=1, exec_mode='sync', query='measure', dim_query='null', measure='null', measure_type='manual', dim_type='manual', check_type='yes', on_reduce='skip', compressed='auto', schedule=0, container='-', description='-', save='yes', display=False) -> Cube or None : wrapper of the operator OPH_APPLY :param ncores: number of cores to use :type ncores: int :param nthreads: number of threads to use :type nthreads: int :param exec_mode: async or sync :type exec_mode: str :param schedule: 0 :type schedule: int :param query: query to be submitted :type query: str :param check_type: yes|no :type check_type: str :param on_reduce: skip|update :type on_reduce: str :param compressed: yes|no|auto :type compressed: str :param container: name of the container to be used to store the output cube, by default it is the input container :type container: str :param dim_query: optional query on dimension values :type dim_query: str :param dim_type: auto|manual :type dim_type: str :param measure: name of the new measure resulting from the specified operation :type measure: str :param measure_type: auto|manual :type measure_type: str :param description: additional description to be associated with the output cube :type description: str :param save: option to enable/disable JSON response saving on the server-side (default is yes) :type save: str :param display: option for displaying the response in a "pretty way" using the pretty_print function (default is False) :type display: bool :returns: new cube or None :rtype: Cube or None :raises: RuntimeError """ if Cube.client is None or self.pid is None: raise RuntimeError("Cube.client, pid or query is None") newcube = None internal_query = "oph_apply " if ncores is not None: internal_query += "ncores=" + str(ncores) + ";" if exec_mode is not None: internal_query += "exec_mode=" + str(exec_mode) + ";" if query is not None: internal_query += "query=" + str(query) + ";" if dim_query is not None: internal_query += "dim_query=" + str(dim_query) + ";" if measure is not None: internal_query += "measure=" + str(measure) + ";" if measure_type is not None: internal_query += "measure_type=" + str(measure_type) + ";" if dim_type is not None: internal_query += "dim_type=" + str(dim_type) + ";" if check_type is not None: internal_query += "check_type=" + str(check_type) + ";" if on_reduce is not None: internal_query += "on_reduce=" + str(on_reduce) + ";" if compressed is not None: internal_query += "compressed=" + str(compressed) + ";" if schedule is not None: internal_query += "schedule=" + str(schedule) + ";" if container is not None: internal_query += "container=" + str(container) + ";" if description is not None: internal_query += "description=" + str(description) + ";" if nthreads is not None: query += "nthreads=" + str(nthreads) + ";" if save is not None: query += "save=" + str(save) + ";" internal_query += "cube=" + str(self.pid) + ";" try: if Cube.client.submit(internal_query, display) is None: raise RuntimeError() if Cube.client.last_response is not None: if Cube.client.cube: newcube = Cube(pid=Cube.client.cube) except Exception as e: print(get_linenumber(), "Something went wrong:", e) raise RuntimeError() else: return newcube def concatnc( self, src_path=None, cdd=None, grid="-", check_exp_dim="yes", dim_offset="-", dim_continue="no", offset=0, description="-", subset_dims="none", subset_filter="all", subset_type="index", time_filter="yes", ncores=1, exec_mode="sync", schedule=0, save="yes", display=False, ): """concatnc(src_path=None, cdd=None, grid='-', check_exp_dim='yes', dim_offset='-', dim_continue='no', offset=0, description='-', subset_dims='none', subset_filter='all', subset_type='index', time_filter='yes', ncores=1, exec_mode='sync', schedule=0, save='yes', display=False) -> Cube or None : wrapper of the operator OPH_CONCATNC :param src_path: path of file to be imported :type src_path: str :param cdd: absolute path corresponding to the current directory on data repository :type cdd: str :param grid: optionally group dimensions in a grid :type grid: str :param subset_dims: pipe (|) separated list of dimensions on which to apply the subsetting :type subset_dims: str :param subset_filter: pipe (|) separated list of filters, one per dimension, composed of comma-separated microfilters (e.g. 1,5,10:2:50) :type subset_filter: str :param time_filter: yes|no :type time_filter: str :param subset_type: index|coord :type subset_type: str :param ncores: number of cores to use :type ncores: int :param exec_mode: async or sync :type exec_mode: str :param schedule: 0 :type schedule: int :param offset: it is added to the bounds of subset intervals :type offset: int :param check_exp_dim: yes|no :type check_exp_dim: str :param dim_offset: offset to be added to dimension values of imported data :type dim_offset: float :param dim_continue: yes|no :type dim_continue: str :param description: additional description to be associated with the output cube :type description: str :param save: option to enable/disable JSON response saving on the server-side (default is yes) :type save: str :param display: option for displaying the response in a "pretty way" using the pretty_print function (default is False) :type display: bool :returns: new cube or None :rtype: Cube or None :raises: RuntimeError """ if Cube.client is None or self.pid is None or src_path is None: raise RuntimeError("Cube.client, pid or src_path is None") newcube = None query = "oph_concatnc " if src_path is not None: query += "src_path=" + str(src_path) + ";" if cdd is not None: query += "cdd=" + str(cdd) + ";" if grid is not None: query += "grid=" + str(grid) + ";" if subset_dims is not None: query += "subset_dims=" + str(subset_dims) + ";" if subset_type is not None: query += "subset_type=" + str(subset_type) + ";" if subset_filter is not None: query += "subset_filter=" + str(subset_filter) + ";" if time_filter is not None: query += "time_filter=" + str(time_filter) + ";" if offset is not None: query += "offset=" + str(offset) + ";" if ncores is not None: query += "ncores=" + str(ncores) + ";" if exec_mode is not None: query += "exec_mode=" + str(exec_mode) + ";" if schedule is not None: query += "schedule=" + str(schedule) + ";" if description is not None: query += "description=" + str(description) + ";" if check_exp_dim is not None: query += "check_exp_dim=" + str(check_exp_dim) + ";" if dim_offset is not None: query += "dim_offset=" + str(dim_offset) + ";" if dim_continue is not None: query += "dim_continue=" + str(dim_continue) + ";" if save is not None: query += "save=" + str(save) + ";" query += "cube=" + str(self.pid) + ";" try: if Cube.client.submit(query, display) is None: raise RuntimeError() if Cube.client.last_response is not None: if Cube.client.cube: newcube = Cube(pid=Cube.client.cube) except Exception as e: print(get_linenumber(), "Something went wrong:", e) raise RuntimeError() else: return newcube def concatnc2( self, src_path=None, cdd=None, grid="-", check_exp_dim="yes", dim_offset="-", dim_continue="no", offset=0, description="-", subset_dims="none", subset_filter="all", subset_type="index", time_filter="yes", ncores=1, nthreads=1, exec_mode="sync", schedule=0, save="yes", display=False, ): """concatnc(src_path=None, cdd=None, grid='-', check_exp_dim='yes', dim_offset='-', dim_continue='no', offset=0, description='-', subset_dims='none', subset_filter='all', subset_type='index', time_filter='yes', ncores=1, nthreads=1, exec_mode='sync', schedule=0, save='yes', display=False) -> Cube or None : wrapper of the operator OPH_CONCATNC2 :param src_path: path of file to be imported :type src_path: str :param cdd: absolute path corresponding to the current directory on data repository :type cdd: str :param grid: optionally group dimensions in a grid :type grid: str :param subset_dims: pipe (|) separated list of dimensions on which to apply the subsetting :type subset_dims: str :param subset_filter: pipe (|) separated list of filters, one per dimension, composed of comma-separated microfilters (e.g. 1,5,10:2:50) :type subset_filter: str :param time_filter: yes|no :type time_filter: str :param subset_type: index|coord :type subset_type: str :param ncores: number of cores to use :type ncores: int :param nthreads: number of threads to use :type nthreads: int :param exec_mode: async or sync :type exec_mode: str :param schedule: 0 :type schedule: int :param offset: it is added to the bounds of subset intervals :type offset: int :param check_exp_dim: yes|no :type check_exp_dim: str :param dim_offset: offset to be added to dimension values of imported data :type dim_offset: float :param dim_continue: yes|no :type dim_continue: str :param description: additional description to be associated with the output cube :type description: str :param save: option to enable/disable JSON response saving on the server-side (default is yes) :type save: str :param display: option for displaying the response in a "pretty way" using the pretty_print function (default is False) :type display: bool :returns: new cube or None :rtype: Cube or None :raises: RuntimeError """ if Cube.client is None or self.pid is None or src_path is None: raise RuntimeError("Cube.client, pid or src_path is None") newcube = None query = "oph_concatnc2 " if src_path is not None: query += "src_path=" + str(src_path) + ";" if cdd is not None: query += "cdd=" + str(cdd) + ";" if grid is not None: query += "grid=" + str(grid) + ";" if subset_dims is not None: query += "subset_dims=" + str(subset_dims) + ";" if subset_type is not None: query += "subset_type=" + str(subset_type) + ";" if subset_filter is not None: query += "subset_filter=" + str(subset_filter) + ";" if time_filter is not None: query += "time_filter=" + str(time_filter) + ";" if offset is not None: query += "offset=" + str(offset) + ";" if ncores is not None: query += "ncores=" + str(ncores) + ";" if nthreads is not None: query += "nthreads=" + str(nthreads) + ";" if exec_mode is not None: query += "exec_mode=" + str(exec_mode) + ";" if schedule is not None: query += "schedule=" + str(schedule) + ";" if description is not None: query += "description=" + str(description) + ";" if check_exp_dim is not None: query += "check_exp_dim=" + str(check_exp_dim) + ";" if dim_offset is not None: query += "dim_offset=" + str(dim_offset) + ";" if dim_continue is not None: query += "dim_continue=" + str(dim_continue) + ";" if save is not None: query += "save=" + str(save) + ";" query += "cube=" + str(self.pid) + ";" try: if Cube.client.submit(query, display) is None: raise RuntimeError() if Cube.client.last_response is not None: if Cube.client.cube: newcube = Cube(pid=Cube.client.cube) except Exception as e: print(get_linenumber(), "Something went wrong:", e) raise RuntimeError() else: return newcube def provenance(self, branch="all", exec_mode="sync", objkey_filter="all", save="yes", display=True): """provenance(branch='all', exec_mode='sync', objkey_filter='all', save='yes', display=True) -> dict or None : wrapper of the operator OPH_CUBEIO :param branch: parent|children|all :type branch: str :param exec_mode: async or sync :type exec_mode: str :param save: option to enable/disable JSON response saving on the server-side (default is yes) :type save: str :param display: option for displaying the response in a "pretty way" using the pretty_print function (default is True) :type display: bool :returns: response or None :rtype: dict or None :raises: RuntimeError """ if Cube.client is None or self.pid is None: raise RuntimeError("Cube.client or pid is None") response = None query = "oph_cubeio " if branch is not None: query += "branch=" + str(branch) + ";" if exec_mode is not None: query += "exec_mode=" + str(exec_mode) + ";" if objkey_filter is not None: query += "objkey_filter=" + str(objkey_filter) + ";" if save is not None: query += "save=" + str(save) + ";" query += "cube=" + str(self.pid) + ";" try: if Cube.client.submit(query, display) is None: raise RuntimeError() if Cube.client.last_response is not None and display is False: response = Cube.client.deserialize_response()["response"] except Exception as e: print(get_linenumber(), "Something went wrong:", e) raise RuntimeError() else: return response def delete(self, ncores=1, nthreads=1, exec_mode="sync", schedule=0, save="yes", display=False): """delete(ncores=1, nthreads=1, exec_mode='sync', schedule=0, save='yes', display=False) -> None : wrapper of the operator OPH_DELETE :param ncores: number of cores to use :type ncores: int :param nthreads: number of threads to use :type nthreads: int :param exec_mode: async or sync :type exec_mode: str :param schedule: 0 :type schedule: int :param save: option to enable/disable JSON response saving on the server-side (default is yes) :type save: str :param display: option for displaying the response in a "pretty way" using the pretty_print function (default is False) :type display: bool :returns: None :rtype: None :raises: RuntimeError """ if Cube.client is None or self.pid is None: raise RuntimeError("Cube.client or pid is None") query = "oph_delete " if ncores is not None: query += "ncores=" + str(ncores) + ";" if exec_mode is not None: query += "exec_mode=" + str(exec_mode) + ";" if schedule is not None: query += "schedule=" + str(schedule) + ";" if nthreads is not None: query += "nthreads=" + str(nthreads) + ";" if save is not None: query += "save=" + str(save) + ";" query += "cube=" + str(self.pid) + ";" try: if Cube.client.submit(query, display) is None: raise RuntimeError() except Exception as e: print(get_linenumber(), "Something went wrong:", e) raise RuntimeError() def drilldown(self, ncores=1, exec_mode="sync", schedule=0, ndim=1, container="-", description="-", save="yes", display=False): """drilldown(ndim=1, container='-', ncores=1, exec_mode='sync', schedule=0, description='-', save='yes', display=False) -> Cube or None : wrapper of the operator OPH_DRILLDOWN :param ncores: number of cores to use :type ncores: int :param exec_mode: async or sync :type exec_mode: str :param schedule: 0 :type schedule: int :param ndim: number of implicit dimensions that will be transformed in explicit dimensions :type ndim: int :param container: name of the container to be used to store the output cube, by default it is the input container :type container: str :param description: additional description to be associated with the output cube :type description: str :param save: option to enable/disable JSON response saving on the server-side (default is yes) :type save: str :param display: option for displaying the response in a "pretty way" using the pretty_print function (default is False) :type display: bool :returns: new cube or None :rtype: Cube or None :raises: RuntimeError """ if Cube.client is None or self.pid is None: raise RuntimeError("Cube.client or pid is None") newcube = None query = "oph_drilldown " if ncores is not None: query += "ncores=" + str(ncores) + ";" if exec_mode is not None: query += "exec_mode=" + str(exec_mode) + ";" if schedule is not None: query += "schedule=" + str(schedule) + ";" if ndim is not None: query += "ndim=" + str(ndim) + ";" if container is not None: query += "container=" + str(container) + ";" if description is not None: query += "description=" + str(description) + ";" if save is not None: query += "save=" + str(save) + ";" query += "cube=" + str(self.pid) + ";" try: if Cube.client.submit(query, display) is None: raise RuntimeError() if Cube.client.last_response is not None: if Cube.client.cube: newcube = Cube(pid=Cube.client.cube) except Exception as e: print(get_linenumber(), "Something went wrong:", e) raise RuntimeError() else: return newcube def duplicate(self, ncores=1, nthreads=1, exec_mode="sync", schedule=0, container="-", description="-", save="yes", display=False): """duplicate(container='-', ncores=1, nthreads=1, exec_mode='sync', description='-', save='yes', display=False) -> Cube or None : wrapper of the operator OPH_DUPLICATE :param ncores: number of cores to use :type ncores: int :param nthreads: number of threads to use :type nthreads: int :param exec_mode: async or sync :type exec_mode: str :param schedule: 0 :type schedule: int :param container: name of the container to be used to store the output cube, by default it is the input container :type container: str :param description: additional description to be associated with the output cube :type description: str :param save: option to enable/disable JSON response saving on the server-side (default is yes) :type save: str :param display: option for displaying the response in a "pretty way" using the pretty_print function (default is False) :type display: bool :returns: new cube or None :rtype: Cube or None :raises: RuntimeError """ if Cube.client is None or self.pid is None: raise RuntimeError("Cube.client or pid is None") newcube = None query = "oph_duplicate " if ncores is not None: query += "ncores=" + str(ncores) + ";" if exec_mode is not None: query += "exec_mode=" + str(exec_mode) + ";" if schedule is not None: query += "schedule=" + str(schedule) + ";" if container is not None: query += "container=" + str(container) + ";" if description is not None: query += "description=" + str(description) + ";" if nthreads is not None: query += "nthreads=" + str(nthreads) + ";" if save is not None: query += "save=" + str(save) + ";" query += "cube=" + str(self.pid) + ";" try: if Cube.client.submit(query, display) is None: raise RuntimeError() if Cube.client.last_response is not None: if Cube.client.cube: newcube = Cube(pid=Cube.client.cube) except Exception as e: print(get_linenumber(), "Something went wrong:", e) raise RuntimeError() else: return newcube def explore( self, schedule=0, limit_filter=100, subset_dims=None, subset_filter="all", time_filter="yes", subset_type="index", show_index="no", show_id="no", show_time="no", level=1, output_path="default", output_name="default", cdd=None, base64="no", ncores=1, exec_mode="sync", objkey_filter="all", save="yes", display=True, ): """explore(schedule=0, limit_filter=100, subset_dims=None, subset_filter='all', time_filter='yes', subset_type='index', show_index='no', show_id='no', show_time='no', level=1, output_path='default', output_name='default', cdd=None, base64='no', ncores=1, exec_mode='sync', objkey_filter='all', save='yes', display=True) -> dict or None : wrapper of the operator OPH_EXPLORECUBE :param ncores: number of cores to use :type ncores: int :param exec_mode: async or sync :type exec_mode: str :param schedule: 0 :type schedule: int :param level: 1|2 :type level: int :param limit_filter: max number of rows :type limit_filter: int :param output_path: absolute path of the JSON Response :type output_path: str :param output_name: filename of the JSON Response :type output_name: str :param time_filter: yes|no :type time_filter: str :param subset_type: if subset is applied on dimension values or indexes :type subset_type: str :param show_id: yes|no :type show_id: str :param show_index: yes|no :type show_index: str :param show_time: yes|no :type show_time: str :param cdd: absolute path corresponding to the current directory on data repository :type cdd: str :param base64: yes|no :type base64: str :param subset_dims: pipe (|) separated list of dimensions on which to apply the subsetting :type subset_dims: str :param subset_filter: pipe (|) separated list of filters, one per dimension, composed of comma-separated microfilters (e.g. 1,5,10:2:50) :type subset_filter: str :param save: option to enable/disable JSON response saving on the server-side (default is yes) :type save: str :param display: option for displaying the response in a "pretty way" using the pretty_print function (default is True) :type display: bool :returns: response or None :rtype: dict or None :raises: RuntimeError """ if Cube.client is None or self.pid is None: raise RuntimeError("Cube.client or pid is None") response = None query = "oph_explorecube " if schedule is not None: query += "schedule=" + str(schedule) + ";" if limit_filter is not None: query += "limit_filter=" + str(limit_filter) + ";" if subset_dims is not None: query += "subset_dims=" + str(subset_dims) + ";" if subset_filter is not None: query += "subset_filter=" + str(subset_filter) + ";" if time_filter is not None: query += "time_filter=" + str(time_filter) + ";" if subset_type is not None: query += "subset_type=" + str(subset_type) + ";" if show_index is not None: query += "show_index=" + str(show_index) + ";" if show_id is not None: query += "show_id=" + str(show_id) + ";" if show_time is not None: query += "show_time=" + str(show_time) + ";" if level is not None: query += "level=" + str(level) + ";" if output_path is not None: query += "output_path=" + str(output_path) + ";" if output_name is not None: query += "output_name=" + str(output_name) + ";" if cdd is not None: query += "cdd=" + str(cdd) + ";" if base64 is not None: query += "base64=" + str(base64) + ";" if ncores is not None: query += "ncores=" + str(ncores) + ";" if exec_mode is not None: query += "exec_mode=" + str(exec_mode) + ";" if objkey_filter is not None: query += "objkey_filter=" + str(objkey_filter) + ";" if save is not None: query += "save=" + str(save) + ";" query += "cube=" + str(self.pid) + ";" try: if Cube.client.submit(query, display) is None: raise RuntimeError() if Cube.client.last_response is not None and display is False: response = Cube.client.deserialize_response()["response"] except Exception as e: print(get_linenumber(), "Something went wrong:", e) raise RuntimeError() else: return response def publish(self, content="all", schedule=0, show_index="no", show_id="no", show_time="no", ncores=1, exec_mode="sync", save="yes", display=True): """publish( ncores=1, content='all', exec_mode='sync', show_id= 'no', show_index='no', schedule=0, show_time='no', save='yes', display=True) -> dict or None : wrapper of the operator OPH_PUBLISH :param ncores: number of cores to use :type ncores: int :param exec_mode: async or sync :type exec_mode: str :param schedule: 0 :type schedule: int :param show_index: yes|no :type show_index: str :param show_id: yes|no :type show_id: str :param show_time: yes|no :type show_time: str :param content: all|data|metadata :type content: str :param save: option to enable/disable JSON response saving on the server-side (default is yes) :type save: str :param display: option for displaying the response in a "pretty way" using the pretty_print function (default is True) :type display: bool :returns: response or None :rtype: dict or None :raises: RuntimeError """ if Cube.client is None or self.pid is None: raise RuntimeError("Cube.client or pid is None") response = None query = "oph_publish " if content is not None: query += "content=" + str(content) + ";" if schedule is not None: query += "schedule=" + str(schedule) + ";" if show_index is not None: query += "show_index=" + str(show_index) + ";" if show_id is not None: query += "show_id=" + str(show_id) + ";" if show_time is not None: query += "show_time=" + str(show_time) + ";" if ncores is not None: query += "ncores=" + str(ncores) + ";" if exec_mode is not None: query += "exec_mode=" + str(exec_mode) + ";" if save is not None: query += "save=" + str(save) + ";" query += "cube=" + str(self.pid) + ";" try: if Cube.client.submit(query, display) is None: raise RuntimeError() if Cube.client.last_response is not None and display is False: response = Cube.client.deserialize_response()["response"] except Exception as e: print(get_linenumber(), "Something went wrong:", e) raise RuntimeError() else: return response def unpublish(self, exec_mode="sync", save="yes", display=False): """unpublish( exec_mode='sync', save='yes', display=False) -> None : wrapper of the operator OPH_UNPUBLISH :param exec_mode: async or sync :type exec_mode: str :param save: option to enable/disable JSON response saving on the server-side (default is yes) :type save: str :param display: option for displaying the response in a "pretty way" using the pretty_print function (default is False) :type display: bool :returns: None :rtype: None :raises: RuntimeError """ if Cube.client is None or self.pid is None: raise RuntimeError("Cube.client or pid is None") query = "oph_unpublish ncores=1;" if exec_mode is not None: query += "exec_mode=" + str(exec_mode) + ";" if save is not None: query += "save=" + str(save) + ";" query += "cube=" + str(self.pid) + ";" try: if Cube.client.submit(query, display) is None: raise RuntimeError() except Exception as e: print(get_linenumber(), "Something went wrong:", e) raise RuntimeError() def cubeschema( self, level=0, dim="all", show_index="no", show_time="no", base64="no", action="read", concept_level="c", dim_level=1, dim_array="yes", exec_mode="sync", objkey_filter="all", save="yes", display=True, ): """cubeschema( objkey_filter='all', exec_mode='sync', level=0, dim=None, show_index='no', show_time='no', base64='no', action='read', concept_level='c', dim_level=1, dim_array='yes', save='yes', display=True) -> dict or None : wrapper of the operator OPH_CUBESCHEMA :param level: 0|1|2 :type level: int :param dim: names of dimensions to show. Only valid with level bigger than 0 :type dim: str :param show_index: yes|no :type show_index: str :param show_time: yes|no :type show_time: str :param base64: yes|no :type base64: str :param action: read|add|clear :type action: str :param concept_level: hierarchy level of a new dimension to be added (default is 'c') :type concept_level: str :param dim_level: level of a new dimension to be added, greater than 0 (default is 1) :type dim_level: int :param dim_array: yes|no :type dim_array: str :param exec_mode: async or sync :type exec_mode: str :param save: option to enable/disable JSON response saving on the server-side (default is yes) :type save: str :param display: option for displaying the response in a "pretty way" using the pretty_print function (default is True) :type display: bool :returns: response or None :rtype: dict or None :raises: RuntimeError """ if Cube.client is None or self.pid is None: raise RuntimeError("Cube.client or pid is None") response = None query = "oph_cubeschema ncores=1;" if level is not None: query += "level=" + str(level) + ";" if dim is not None: query += "dim=" + str(dim) + ";" if show_index is not None: query += "show_index=" + str(show_index) + ";" if show_time is not None: query += "show_time=" + str(show_time) + ";" if base64 is not None: query += "base64=" + str(base64) + ";" if action is not None: query += "action=" + str(action) + ";" if concept_level is not None: query += "concept_level=" + str(concept_level) + ";" if dim_level is not None: query += "dim_level=" + str(dim_level) + ";" if dim_array is not None: query += "dim_array=" + str(dim_array) + ";" if exec_mode is not None: query += "exec_mode=" + str(exec_mode) + ";" if objkey_filter is not None: query += "objkey_filter=" + str(objkey_filter) + ";" if save is not None: query += "save=" + str(save) + ";" query += "cube=" + str(self.pid) + ";" try: if Cube.client.submit(query, display) is None: raise RuntimeError() if Cube.client.last_response is not None and display is False: response = Cube.client.deserialize_response()["response"] except Exception as e: print(get_linenumber(), "Something went wrong:", e) raise RuntimeError() else: return response def cubesize(self, schedule=0, exec_mode="sync", byte_unit="MB", algorithm="euristic", ncores=1, objkey_filter="all", save="yes", display=True): """cubesize( schedule=0, ncores=1, byte_unit='MB', algorithm='euristic', objkey_filter='all', exec_mode='sync', save='yes', display=True) -> dict or None : wrapper of the operator OPH_CUBESIZE :param ncores: number of cores to use :type ncores: int :param exec_mode: async or sync :type exec_mode: str :param schedule: 0 :type schedule: int :param byte_unit: KB|MB|GB|TB|PB :type byte_unit: str :param algorithm: euristic|count :type algorithm: str :param save: option to enable/disable JSON response saving on the server-side (default is yes) :type save: str :param display: option for displaying the response in a "pretty way" using the pretty_print function (default is True) :type display: bool :returns: response or None :rtype: dict or None :raises: RuntimeError """ if Cube.client is None or self.pid is None: raise RuntimeError("Cube.client or pid is None") response = None query = "oph_cubesize " if schedule is not None: query += "schedule=" + str(schedule) + ";" if exec_mode is not None: query += "exec_mode=" + str(exec_mode) + ";" if byte_unit is not None: query += "byte_unit=" + str(byte_unit) + ";" if algorithm is not None: algorithm += "algorithm=" + str(algorithm) + ";" if ncores is not None: query += "ncores=" + str(ncores) + ";" if objkey_filter is not None: query += "objkey_filter=" + str(objkey_filter) + ";" if save is not None: query += "save=" + str(save) + ";" query += "cube=" + str(self.pid) + ";" try: if Cube.client.submit(query, display) is None: raise RuntimeError() if Cube.client.last_response is not None and display is False: response = Cube.client.deserialize_response()["response"] except Exception as e: print(get_linenumber(), "Something went wrong:", e) raise RuntimeError() else: return response def cubeelements(self, schedule=0, exec_mode="sync", algorithm="dim_product", ncores=1, objkey_filter="all", save="yes", display=True): """cubeelements( schedule=0, algorithm='dim_product', ncores=1, exec_mode='sync', objkey_filter='all', save='yes', display=True) -> dict or None : wrapper of the operator OPH_CUBEELEMENTS :param ncores: number of cores to use :type ncores: int :param exec_mode: async or sync :type exec_mode: str :param schedule: 0 :type schedule: int :param algorithm: dim_product|count :type algorithm: str :param save: option to enable/disable JSON response saving on the server-side (default is yes) :type save: str :param display: option for displaying the response in a "pretty way" using the pretty_print function (default is True) :type display: bool :returns: response or None :rtype: dict or None :raises: RuntimeError """ if Cube.client is None or self.pid is None: raise RuntimeError("Cube.client or pid is None") response = None query = "oph_cubeelements " if schedule is not None: query += "schedule=" + str(schedule) + ";" if exec_mode is not None: query += "exec_mode=" + str(exec_mode) + ";" if algorithm is not None: query += "algorithm=" + str(algorithm) + ";" if ncores is not None: query += "ncores=" + str(ncores) + ";" if objkey_filter is not None: query += "objkey_filter=" + str(objkey_filter) + ";" if save is not None: query += "save=" + str(save) + ";" query += "cube=" + str(self.pid) + ";" try: if Cube.client.submit(query, display) is None: raise RuntimeError() if Cube.client.last_response is not None and display is False: response = Cube.client.deserialize_response()["response"] except Exception as e: print(get_linenumber(), "Something went wrong:", e) raise RuntimeError() else: return response def intercube(self, ncores=1, exec_mode="sync", cube2=None, cubes=None, operation="sub", missingvalue="-", measure="null", schedule=0, container="-", description="-", save="yes", display=False): """intercube(ncores=1, exec_mode='sync', cube2=None, cubes=None, operation='sub', missingvalue='-', measure='null', schedule=0, container='-', description='-', save='yes', display=False) -> Cube or None : wrapper of the operator OPH_INTERCUBE :param ncores: number of cores to use :type ncores: int :param exec_mode: async or sync :type exec_mode: str :param schedule: 0 :type schedule: int :param cube2: PID of the second cube :type cube2: str :param cubes: pipe (|) separated list of cubes :type cubes: str :param operation: sum|sub|mul|div|abs|arg|corr|mask|max|min|arg_max|arg_min :type operation: str :param missingvalue: missing value; by default it is the value from the file if defined, NAN otherwise (for float and double) :type missingvalue: float :param measure: new measure name :type measure: str :param container: name of the container to be used to store the output cube, by default it is the input container :type container: str :param description: additional description to be associated with the output cube :type description: str :param save: option to enable/disable JSON response saving on the server-side (default is yes) :type save: str :param display: option for displaying the response in a "pretty way" using the pretty_print function (default is False) :type display: bool :returns: new cube or None :rtype: Cube or None :raises: RuntimeError """ if Cube.client is None or ((self.pid is None or cube2 is None) and cubes is None): raise RuntimeError("Cube.client, pid, cube2 or cubes is None") newcube = None query = "oph_intercube " if ncores is not None: query += "ncores=" + str(ncores) + ";" if exec_mode is not None: query += "exec_mode=" + str(exec_mode) + ";" if cubes is not None: query += "cubes=" + str(cubes) + ";" else: if self.pid is not None: query += "cube=" + str(self.pid) + ";" if cube2 is not None: query += "cube2=" + str(cube2) + ";" if operation is not None: query += "operation=" + str(operation) + ";" if missingvalue is not None: query += "missingvalue=" + str(missingvalue) + ";" if measure is not None: query += "measure=" + str(measure) + ";" if schedule is not None: query += "schedule=" + str(schedule) + ";" if container is not None: query += "container=" + str(container) + ";" if description is not None: query += "description=" + str(description) + ";" if save is not None: query += "save=" + str(save) + ";" try: if Cube.client.submit(query, display) is None: raise RuntimeError() if Cube.client.last_response is not None: if Cube.client.cube: newcube = Cube(pid=Cube.client.cube) except Exception as e: print(get_linenumber(), "Something went wrong:", e) raise RuntimeError() else: return newcube def merge(self, ncores=1, exec_mode="sync", schedule=0, nmerge=0, container="-", description="-", save="yes", display=False): """merge(nmerge=0, schedule=0, description='-', container='-', exec_mode='sync', ncores=1, save='yes', display=False) -> Cube or None : wrapper of the operator OPH_MERGE :param ncores: number of cores to use :type ncores: int :param exec_mode: async or sync :type exec_mode: str :param schedule: 0 :type schedule: int :param nmerge: number of input fragments to merge in an output fragment, 0 for all :type nmerge: int :param container: name of the container to be used to store the output cube, by default it is the input container :type container: str :param description: additional description to be associated with the output cube :type description: str :param save: option to enable/disable JSON response saving on the server-side (default is yes) :type save: str :param display: option for displaying the response in a "pretty way" using the pretty_print function (default is False) :type display: bool :returns: new cube or None :rtype: Cube or None :raises: RuntimeError """ if Cube.client is None or self.pid is None: raise RuntimeError("Cube.client or pid is None") newcube = None query = "oph_merge " if ncores is not None: query += "ncores=" + str(ncores) + ";" if exec_mode is not None: query += "exec_mode=" + str(exec_mode) + ";" if schedule is not None: query += "schedule=" + str(schedule) + ";" if nmerge is not None: query += "nmerge=" + str(nmerge) + ";" if container is not None: query += "container=" + str(container) + ";" if description is not None: query += "description=" + str(description) + ";" if save is not None: query += "save=" + str(save) + ";" query += "cube=" + str(self.pid) + ";" try: if Cube.client.submit(query, display) is None: raise RuntimeError() if Cube.client.last_response is not None: if Cube.client.cube: newcube = Cube(pid=Cube.client.cube) except Exception as e: print(get_linenumber(), "Something went wrong:", e) raise RuntimeError() else: return newcube def metadata( self, mode="read", metadata_key="all", variable="global", metadata_id=0, metadata_type="text", metadata_value="-", variable_filter="all", metadata_type_filter="all", metadata_value_filter="all", force="no", exec_mode="sync", objkey_filter="all", save="yes", display=True, ): """metadata(mode='read', metadata_id=0, metadata_key='all', variable='global', metadata_type='text', metadata_value=None, variable_filter=None, metadata_type_filter=None, metadata_value_filter=None, force='no', exec_mode='sync', objkey_filter='all', save='yes', display=True) -> dict or None : wrapper of the operator OPH_METADATA :param mode: insert|read|update|delete :type mode: str :param metadata_id: id of the particular metadata instance to interact with :type metadata_id: int :param metadata_key: name of the key (or the enumeration of keys) identifying requested metadata :type metadata_key: str :param variable: name of the variable to which we can associate a new metadata key :type variable: str :param metadata_type: text|image|video|audio|url|double|float|long|int|short :type metadata_type: str :param metadata_value: string value to be assigned to specified metadata :type metadata_value: str :param variable_filter: filter on variable name :type variable_filter: str :param metadata_type_filter: filter on metadata type :type metadata_type_filter: str :param metadata_value_filter: filter on metadata value :type metadata_value_filter: str :param force: force update or deletion of functional metadata associated to a vocabulary, default is no :type force: str :param exec_mode: async or sync :type exec_mode: str :param save: option to enable/disable JSON response saving on the server-side (default is yes) :type save: str :param display: option for displaying the response in a "pretty way" using the pretty_print function (default is True) :type display: bool :returns: response or None :rtype: dict or None :raises: RuntimeError """ if Cube.client is None or self.pid is None: raise RuntimeError("Cube.client or pid is None") response = None query = "oph_metadata " if mode is not None: query += "mode=" + str(mode) + ";" if metadata_key is not None: query += "metadata_key=" + str(metadata_key) + ";" if variable is not None: query += "variable=" + str(variable) + ";" if metadata_id is not None: query += "metadata_id=" + str(metadata_id) + ";" if metadata_type is not None: query += "metadata_type=" + str(metadata_type) + ";" if metadata_value is not None: query += "metadata_value=" + str(metadata_value) + ";" if variable_filter is not None: query += "variable_filter=" + str(variable_filter) + ";" if metadata_type_filter is not None: query += "metadata_type_filter=" + str(metadata_type_filter) + ";" if metadata_value_filter is not None: query += "metadata_value_filter=" + str(metadata_value_filter) + ";" if force is not None: query += "force=" + str(force) + ";" if exec_mode is not None: query += "exec_mode=" + str(exec_mode) + ";" if objkey_filter is not None: query += "objkey_filter=" + str(objkey_filter) + ";" if save is not None: query += "save=" + str(save) + ";" query += "cube=" + str(self.pid) + ";" try: if Cube.client.submit(query, display) is None: raise RuntimeError() if Cube.client.last_response is not None and display is False: response = Cube.client.deserialize_response()["response"] except Exception as e: print(get_linenumber(), "Something went wrong:", e) raise RuntimeError() else: return response def permute(self, ncores=1, nthreads=1, exec_mode="sync", schedule=0, dim_pos=None, container="-", description="-", save="yes", display=False): """permute(dim_pos=None, container='-', exec_mode='sync', ncores=1, nthreads=1, schedule=0, description='-', save='yes', display=False) -> Cube or None : wrapper of the operator OPH_PERMUTE :param ncores: number of cores to use :type ncores: int :param nthreads: number of threads to use :type nthreads: int :param exec_mode: async or sync :type exec_mode: str :param schedule: 0 :type schedule: int :param dim_pos: permutation of implicit dimensions as a comma-separated list of dimension levels :type dim_pos: str :param container: name of the container to be used to store the output cube, by default it is the input container :type container: str :param description: additional description to be associated with the output cube :type description: str :param save: option to enable/disable JSON response saving on the server-side (default is yes) :type save: str :param display: option for displaying the response in a "pretty way" using the pretty_print function (default is False) :type display: bool :returns: new cube or None :rtype: Cube or None :raises: RuntimeError """ if Cube.client is None or self.pid is None or dim_pos is None: raise RuntimeError("Cube.client, pid or dim_pos is None") newcube = None query = "oph_permute " if ncores is not None: query += "ncores=" + str(ncores) + ";" if exec_mode is not None: query += "exec_mode=" + str(exec_mode) + ";" if schedule is not None: query += "schedule=" + str(schedule) + ";" if dim_pos is not None: query += "dim_pos=" + str(dim_pos) + ";" if container is not None: query += "container=" + str(container) + ";" if description is not None: query += "description=" + str(description) + ";" if nthreads is not None: query += "nthreads=" + str(nthreads) + ";" if save is not None: query += "save=" + str(save) + ";" query += "cube=" + str(self.pid) + ";" try: if Cube.client.submit(query, display) is None: raise RuntimeError() if Cube.client.last_response is not None: if Cube.client.cube: newcube = Cube(pid=Cube.client.cube) except Exception as e: print(get_linenumber(), "Something went wrong:", e) raise RuntimeError() else: return newcube def reduce( self, ncores=1, nthreads=1, exec_mode="sync", schedule=0, group_size="all", operation=None, order=2, missingvalue="-", grid="-", container="-", description="-", check_grid="no", save="yes", display=False, ): """reduce(operation=None, container=None, exec_mode='sync', missingvalue='-', grid='-', group_size='all', ncores=1, nthreads=1, schedule=0, order=2, description='-', objkey_filter='all', check_grid='no', save='yes', display=False) -> Cube or None : wrapper of the operator OPH_REDUCE :param ncores: number of cores to use :type ncores: int :param nthreads: number of threads to use :type nthreads: int :param exec_mode: async or sync :type exec_mode: str :param schedule: 0 :type schedule: int :param operation: count|max|min|avg|sum|std|var|cmoment|acmoment|rmoment|armoment|quantile|arg_max|arg_min :type operation: str :param order: order used in evaluation the moments or value of the quantile in range [0, 1] :type order: float :param missingvalue: missing value; by default it is the value from the file if defined, NAN otherwise (for float and double) :type missingvalue: float :param container: name of the container to be used to store the output cube, by default it is the input container :type container: str :param grid: optional argument used to identify the grid of dimensions to be used or the one to be created :type grid: str :param group_size: size of the aggregation set, all for the entire array :type group_size: int or str :param description: additional description to be associated with the output cube :type description: str :param check_grid: yes|no :type check_grid: str :param save: option to enable/disable JSON response saving on the server-side (default is yes) :type save: str :param display: option for displaying the response in a "pretty way" using the pretty_print function (default is False) :type display: bool :returns: new cube or None :rtype: Cube or None :raises: RuntimeError """ if Cube.client is None or self.pid is None or operation is None: raise RuntimeError("Cube.client, pid or operation is None") newcube = None query = "oph_reduce " if ncores is not None: query += "ncores=" + str(ncores) + ";" if exec_mode is not None: query += "exec_mode=" + str(exec_mode) + ";" if schedule is not None: query += "schedule=" + str(schedule) + ";" if group_size is not None: query += "group_size=" + str(group_size) + ";" if operation is not None: query += "operation=" + str(operation) + ";" if order is not None: query += "order=" + str(order) + ";" if missingvalue is not None: query += "missingvalue=" + str(missingvalue) + ";" if grid is not None: query += "grid=" + str(grid) + ";" if container is not None: query += "container=" + str(container) + ";" if description is not None: query += "description=" + str(description) + ";" if check_grid is not None: query += "check_grid=" + str(check_grid) + ";" if nthreads is not None: query += "nthreads=" + str(nthreads) + ";" if save is not None: query += "save=" + str(save) + ";" query += "cube=" + str(self.pid) + ";" try: if Cube.client.submit(query, display) is None: raise RuntimeError() if Cube.client.last_response is not None: if Cube.client.cube: newcube = Cube(pid=Cube.client.cube) except Exception as e: print(get_linenumber(), "Something went wrong:", e) raise RuntimeError() else: return newcube def reduce2( self, ncores=1, exec_mode="sync", schedule=0, dim=None, concept_level="A", midnight="24", operation=None, order=2, missingvalue="-", grid="-", container="-", description="-", nthreads=1, check_grid="no", save="yes", display=False, ): """reduce2(dim=None, operation=None, concept_level='A', container='-', exec_mode='sync', grid='-', midnight='24', order=2, missingvalue="-", description='-', schedule=0, ncores=1, nthreads=1, check_grid='no', save='yes', display=False) -> Cube or None : wrapper of the operator OPH_REDUCE2 :param ncores: number of cores to use :type ncores: int :param exec_mode: async or sync :type exec_mode: str :param schedule: 0 :type schedule: int :param dim: name of dimension on which the operation will be applied :type dim: str :param operation: count|max|min|avg|sum|std|var|cmoment|acmoment|rmoment|armoment|quantile|arg_max|arg_min :type operation: str :param concept_level: concept level inside the hierarchy used for the operation :type concept_level: str :param container: name of the container to be used to store the output cube, by default it is the input container :type container: str :param grid: optional argument used to identify the grid of dimensions to be used or the one to be created :type grid: str :param midnight: 00|24 :type midnight: str :param order: order used in evaluation the moments or value of the quantile in range [0, 1] :type order: float :param missingvalue: missing value; by default it is the value from the file if defined, NAN otherwise (for float and double) :type missingvalue: float :param description: additional description to be associated with the output cube :type description: str :param nthreads: number of threads to use :type nthreads: int :param check_grid: yes|no :type check_grid: str :param save: option to enable/disable JSON response saving on the server-side (default is yes) :type save: str :param display: option for displaying the response in a "pretty way" using the pretty_print function (default is False) :type display: bool :returns: new cube or None :rtype: Cube or None :raises: RuntimeError """ if Cube.client is None or self.pid is None or dim is None or operation is None: raise RuntimeError("Cube.client, pid, dim or operation is None") newcube = None query = "oph_reduce2 " if ncores is not None: query += "ncores=" + str(ncores) + ";" if exec_mode is not None: query += "exec_mode=" + str(exec_mode) + ";" if schedule is not None: query += "schedule=" + str(schedule) + ";" if dim is not None: query += "dim=" + str(dim) + ";" if concept_level is not None: query += "concept_level=" + str(concept_level) + ";" if midnight is not None: query += "midnight=" + str(midnight) + ";" if operation is not None: query += "operation=" + str(operation) + ";" if order is not None: query += "order=" + str(order) + ";" if missingvalue is not None: query += "missingvalue=" + str(missingvalue) + ";" if grid is not None: query += "grid=" + str(grid) + ";" if container is not None: query += "container=" + str(container) + ";" if description is not None: query += "description=" + str(description) + ";" if check_grid is not None: query += "check_grid=" + str(check_grid) + ";" if nthreads is not None: query += "nthreads=" + str(nthreads) + ";" if save is not None: query += "save=" + str(save) + ";" query += "cube=" + str(self.pid) + ";" try: if Cube.client.submit(query, display) is None: raise RuntimeError() if Cube.client.last_response is not None: if Cube.client.cube: newcube = Cube(pid=Cube.client.cube) except Exception as e: print(get_linenumber(), "Something went wrong:", e) raise RuntimeError() else: return newcube def rollup(self, ncores=1, nthreads=1, exec_mode="sync", schedule=0, ndim=1, container="-", description="-", save="yes", display=False): """rollup(ndim=1, container='-', exec_mode='sync', ncores=1, nthreads=1, schedule=0, description='-', save='yes', display=False) -> Cube or None : wrapper of the operator OPH_ROLLUP :param ncores: number of cores to use :type ncores: int :param nthreads: number of threads to use :type nthreads: int :param exec_mode: async or sync :type exec_mode: str :param schedule: 0 :type schedule: int :param ndim: number of explicit dimensions that will be transformed in implicit dimensions :type ndim: int :param container: name of the container to be used to store the output cube, by default it is the input container :type container: str :param description: additional description to be associated with the output cube :type description: str :param save: option to enable/disable JSON response saving on the server-side (default is yes) :type save: str :param display: option for displaying the response in a "pretty way" using the pretty_print function (default is False) :type display: bool :returns: new cube or None :rtype: Cube or None :raises: RuntimeError """ if Cube.client is None or self.pid is None: raise RuntimeError("Cube.client or pid is None") newcube = None query = "oph_rollup " if ncores is not None: query += "ncores=" + str(ncores) + ";" if exec_mode is not None: query += "exec_mode=" + str(exec_mode) + ";" if schedule is not None: query += "schedule=" + str(schedule) + ";" if ndim is not None: query += "ndim=" + str(ndim) + ";" if container is not None: query += "container=" + str(container) + ";" if description is not None: query += "description=" + str(description) + ";" if nthreads is not None: query += "nthreads=" + str(nthreads) + ";" if save is not None: query += "save=" + str(save) + ";" query += "cube=" + str(self.pid) + ";" try: if Cube.client.submit(query, display) is None: raise RuntimeError() if Cube.client.last_response is not None: if Cube.client.cube: newcube = Cube(pid=Cube.client.cube) except Exception as e: print(get_linenumber(), "Something went wrong:", e) raise RuntimeError() else: return newcube def split(self, ncores=1, nthreads=1, exec_mode="sync", schedule=0, nsplit=2, container="-", description="-", save="yes", display=False): """split(nsplit=2, container='-', exec_mode='sync', ncores=1, nthreads=1, schedule=0, description='-', save='yes', display=False) -> Cube or None : wrapper of the operator OPH_SPLIT :param ncores: number of cores to use :type ncores: int :param nthreads: number of threads to use :type nthreads: int :param exec_mode: async or sync :type exec_mode: str :param schedule: 0 :type schedule: int :param nsplit: number of output fragments per input fragment :type nsplit: int :param container: name of the container to be used to store the output cube, by default it is the input container :type container: str :param description: additional description to be associated with the output cube :type description: str :param save: option to enable/disable JSON response saving on the server-side (default is yes) :type save: str :param display: option for displaying the response in a "pretty way" using the pretty_print function (default is False) :type display: bool :returns: new cube or None :rtype: Cube or None :raises: RuntimeError """ if Cube.client is None or self.pid is None or nsplit is None: raise RuntimeError("Cube.client, pid or nsplit is None") newcube = None query = "oph_split " if ncores is not None: query += "ncores=" + str(ncores) + ";" if exec_mode is not None: query += "exec_mode=" + str(exec_mode) + ";" if schedule is not None: query += "schedule=" + str(schedule) + ";" if nsplit is not None: query += "nsplit=" + str(nsplit) + ";" if container is not None: query += "container=" + str(container) + ";" if description is not None: query += "description=" + str(description) + ";" if nthreads is not None: query += "nthreads=" + str(nthreads) + ";" if save is not None: query += "save=" + str(save) + ";" query += "cube=" + str(self.pid) + ";" try: if Cube.client.submit(query, display) is None: raise RuntimeError() if Cube.client.last_response is not None: if Cube.client.cube: newcube = Cube(pid=Cube.client.cube) except Exception as e: print(get_linenumber(), "Something went wrong:", e) raise RuntimeError() else: return newcube def subset( self, ncores=1, nthreads=1, exec_mode="sync", schedule=0, subset_dims="none", subset_filter="all", subset_type="index", time_filter="yes", offset=0, grid="-", container="-", description="-", check_grid="no", save="yes", display=False, ): """subset(subset_dims='none', subset_filter='all', container='-', exec_mode='sync', subset_type='index', time_filter='yes', offset=0, grid='-', ncores=1, nthreads=1, schedule=0, description='-', check_grid='no', save='yes', display=False) -> Cube or None : wrapper of the operator OPH_SUBSET :param ncores: number of cores to use :type ncores: int :param nthreads: number of threads to use :type nthreads: int :param exec_mode: async or sync :type exec_mode: str :param schedule: 0 :type schedule: int :param subset_dims: pipe (|) separated list of dimensions on which to apply the subsetting :type subset_dims: str :param subset_filter: pipe (|) separated list of filters, one per dimension, composed of comma-separated microfilters on dimension indexes (e.g. 1,5,10:2:50) :type subset_filter: str :param container: name of the container to be used to store the output cube, by default it is the input container :type container: str :param subset_type: index|coord :type subset_type: str :param time_filter: yes|no :type time_filter: str :param offset: added to the bounds of subset intervals :type offset: int :param grid: optional argument used to identify the grid of dimensions to be used or the one to be created :type grid: str :param description: additional description to be associated with the output cube :type description: str :param check_grid: yes|no :type check_grid: str :param save: option to enable/disable JSON response saving on the server-side (default is yes) :type save: str :param display: option for displaying the response in a "pretty way" using the pretty_print function (default is False) :type display: bool :returns: new cube or None :rtype: Cube or None :raises: RuntimeError """ if Cube.client is None or self.pid is None: raise RuntimeError("Cube.client pid is None") newcube = None query = "oph_subset " if ncores is not None: query += "ncores=" + str(ncores) + ";" if exec_mode is not None: query += "exec_mode=" + str(exec_mode) + ";" if schedule is not None: query += "schedule=" + str(schedule) + ";" if subset_dims is not None: query += "subset_dims=" + str(subset_dims) + ";" if subset_filter is not None: query += "subset_filter=" + str(subset_filter) + ";" if subset_type is not None: query += "subset_type=" + str(subset_type) + ";" if time_filter is not None: query += "time_filter=" + str(time_filter) + ";" if offset is not None: query += "offset=" + str(offset) + ";" if grid is not None: query += "grid=" + str(grid) + ";" if container is not None: query += "container=" + str(container) + ";" if description is not None: query += "description=" + str(description) + ";" if check_grid is not None: query += "check_grid=" + str(check_grid) + ";" if nthreads is not None: query += "nthreads=" + str(nthreads) + ";" if save is not None: query += "save=" + str(save) + ";" query += "cube=" + str(self.pid) + ";" try: if Cube.client.submit(query, display) is None: raise RuntimeError() if Cube.client.last_response is not None: if Cube.client.cube: newcube = Cube(pid=Cube.client.cube) except Exception as e: print(get_linenumber(), "Something went wrong:", e) raise RuntimeError() else: return newcube def to_b2drop(self, cdd=None, auth_path="-", dst_path="-", ncores=1, export_metadata="yes"): """to_b2drop(cdd=None, auth_path='-', dst_path='-', ncores=1, export_metadata='yes') -> None : method that integrates the features of OPH_EXPORTNC2 and OPH_B2DROP operators to upload a cube to B2DROP as a NetCDF file :param cdd: absolute path corresponding to the current directory on data repository :type cdd: str :param auth_path: absolute path to the netrc file containing the B2DROP credentials :type auth_path: str :param dst_path: path where the file will be uploaded on B2DROP :type dst_path: str :param ncores: number of cores to use :type ncores: int :param export_metadata: yes|no :type export_metadata: str :param display: option for displaying the response in a "pretty way" using the pretty_print function (default is False) :type display: bool :returns: None :rtype: None :raises: RuntimeError """ if Cube.client is None or self.pid is None: raise RuntimeError("Cube.client or pid is None") response = None try: self.exportnc2(cdd=cdd, force="yes", output_path="local", export_metadata=export_metadata, ncores=ncores, display=False) file_path = "" if Cube.client.last_response is not None: response = Cube.client.deserialize_response() for response_i in response["response"]: if response_i["objclass"] == "text" and "title" in response_i["objcontent"][0] and response_i["objcontent"][0]["title"] == "Output File": file_path = response_i["objcontent"][0]["message"] break if not file_path: raise RuntimeError("Unable to export NetCDF file") Cube.b2drop(action="put", auth_path=auth_path, src_path=file_path, dst_path=dst_path, cdd="/", display=False) Cube.fs(command="rm", dpath=file_path, cdd="/", display=False) except Exception as e: print(get_linenumber(), "Something went wrong:", e) raise RuntimeError() def export_array(self, show_id="no", show_time="no", subset_dims=None, subset_filter=None, time_filter="no"): """export_array(show_id='no', show_time='no', subset_dims=None, subset_filter=None, time_filter='no') -> dict or None : return data from an Ophidia datacube into a Python structure :param show_id: yes|no :type show_id: str :param show_time: yes|no :type show_time: str :param subset_dims: pipe (|) separated list of dimensions on which to apply the subsetting :type subset_dims: str :param subset_filter: pipe (|) separated list of filters, one per dimension, composed of comma-separated microfilters (e.g. 1,5,10:2:50) :type subset_filter: str :param time_filter: yes|no :type time_filter: str :returns: data_values or None :rtype: dict or None :raises: RuntimeError """ if Cube.client is None or self.pid is None: raise RuntimeError("Cube.client or pid is None") response = None query = "oph_explorecube ncore=1;base64=yes;level=2;show_index=yes;subset_type=coord;limit_filter=0;save=no;" if time_filter is not None: query += "time_filter=" + str(time_filter) + ";" if show_id is not None: query += "show_id=" + str(show_id) + ";" if show_time is not None: query += "show_time=" + str(show_time) + ";" if subset_dims is not None: query += "subset_dims=" + str(subset_dims) + ";" if subset_filter is not None: query += "subset_filter=" + str(subset_filter) + ";" query += "cube=" + str(self.pid) + ";" try: if Cube.client.submit(query, display=False) is None: raise RuntimeError() if Cube.client.last_response is not None: response = Cube.client.deserialize_response() except Exception as e: print(get_linenumber(), "Something went wrong:", e) raise RuntimeError() def get_unpack_format(element_num, output_type): if output_type == "float": format = str(element_num) + "f" elif output_type == "double": format = str(element_num) + "d" elif output_type == "int": format = str(element_num) + "i" elif output_type == "long": format = str(element_num) + "l" elif output_type == "short": format = str(element_num) + "h" elif output_type == "char": format = str(element_num) + "c" else: raise RuntimeError("The value type is not valid") return format def calculate_decoded_length(decoded_string, output_type): if output_type == "float" or output_type == "int": num = int(float(len(decoded_string)) / float(4)) elif output_type == "double" or output_type == "long": num = int(float(len(decoded_string)) / float(8)) elif output_type == "short": num = int(float(len(decoded_string)) / float(2)) elif output_type == "char": num = int(float(len(decoded_string)) / float(1)) else: raise RuntimeError("The value type is not valid") return num data_values = {} data_values["measure"] = {} # Get dimensions adimCube = True try: dimensions = [] for response_i in response["response"]: if response_i["objkey"] == "explorecube_dimvalues": data_values["dimension"] = {} adimCube = False for response_j in response_i["objcontent"]: if response_j["title"] and response_j["rowfieldtypes"] and response_j["rowfieldtypes"][1] and response_j["rowvalues"]: curr_dim = {} curr_dim["name"] = response_j["title"] # Append actual values dim_array = [] # Special case for time if show_time == "yes" and response_j["title"] == "time": for val in response_j["rowvalues"]: dims = [s.strip() for s in val[1].split(",")] for v in dims: dim_array.append(v) else: for val in response_j["rowvalues"]: decoded_bin = base64.b64decode(val[1]) length = calculate_decoded_length(decoded_bin, response_j["rowfieldtypes"][1]) format = get_unpack_format(length, response_j["rowfieldtypes"][1]) dims = struct.unpack(format, decoded_bin) for v in dims: dim_array.append(v) curr_dim["values"] = dim_array dimensions.append(curr_dim) else: raise RuntimeError("Unable to get dimension name or values in response") dim_num = len(dimensions) if dim_num == 0: raise RuntimeError("No dimension found") data_values["dimension"] = dimensions break except Exception as e: print(get_linenumber(), "Unable to get dimensions from response:", e) return None # Read values try: measures = [] for response_i in response["response"]: if response_i["objkey"] == "explorecube_data": for response_j in response_i["objcontent"]: if response_j["title"] and response_j["rowkeys"] and response_j["rowfieldtypes"] and response_j["rowvalues"]: curr_mes = {} measure_name = "" measure_index = 0 if not adimCube: # Check that implicit dimension is just one if dim_num - (len(response_j["rowkeys"]) - 1) / 2.0 > 1: raise RuntimeError("More than one implicit dimension") for i, t in enumerate(response_j["rowkeys"]): if response_j["title"] == t: measure_name = t measure_index = i break if measure_index == 0: raise RuntimeError("Unable to get measure name in response") curr_mes["name"] = measure_name # Append actual values measure_value = [] for val in response_j["rowvalues"]: decoded_bin = base64.b64decode(val[measure_index]) length = calculate_decoded_length(decoded_bin, response_j["rowfieldtypes"][measure_index]) format = get_unpack_format(length, response_j["rowfieldtypes"][measure_index]) measure = struct.unpack(format, decoded_bin) curr_line = [] for v in measure: curr_line.append(v) measure_value.append(curr_line) curr_mes["values"] = measure_value measures.append(curr_mes) else: raise RuntimeError("Unable to get measure values in response") break break measure_num = len(measures) if measure_num == 0: raise RuntimeError("No measure found") data_values["measure"] = measures except Exception as e: print(get_linenumber(), "Unable to get measure from response:", e) return None else: return data_values def __str__(self): buf = "-" * 30 + "\n" buf += "%30s: %s" % ("Cube", self.pid) + "\n" buf += "-" * 30 + "\n" buf += "%30s: %s" % ("Creation Date", self.creation_date) + "\n" buf += "%30s: %s (%s)" % ("Measure (type)", self.measure, self.measure_type) + "\n" buf += "%30s: %s" % ("Source file", self.source_file) + "\n" buf += "%30s: %s" % ("Level", self.level) + "\n" if self.compressed == "yes": buf += "%30s: %s (%s)" % ("Size", self.size, "compressed") + "\n" else: buf += "%30s: %s (%s)" % ("Size", self.size, "not compressed") + "\n" buf += "%30s: %s" % ("Num. of elements", self.nelements) + "\n" buf += "%30s: %s" % ("Num. of fragments", self.nfragments) + "\n" buf += "-" * 30 + "\n" buf += "%30s: %s" % ("Num. of hosts", self.hostxcube) + "\n" buf += "%30s: %s (%s)" % ("Num. of fragments/DB (total)", self.fragxdb, int(self.fragxdb) * int(self.hostxcube)) + "\n" buf += "%30s: %s (%s)" % ("Num. of rows/fragment (total)", self.rowsxfrag, int(self.rowsxfrag) * int(self.fragxdb) * int(self.hostxcube)) + "\n" buf += "%30s: %s (%s)" % ("Num. of elements/row (total)", self.elementsxrow, int(self.elementsxrow) * int(self.rowsxfrag) * int(self.fragxdb) * int(self.hostxcube)) + "\n" buf += "-" * 127 + "\n" buf += "%15s %15s %15s %15s %15s %15s %15s %15s" % ("Dimension", "Data type", "Size", "Hierarchy", "Concept level", "Array", "Level", "Lattice name") + "\n" buf += "-" * 127 + "\n" for dim in self.dim_info: buf += "%15s %15s %15s %15s %15s %15s %15s %15s" % (dim["name"], dim["type"], dim["size"], dim["hierarchy"], dim["concept_level"], dim["array"], dim["level"], dim["lattice_name"]) + "\n" buf += "-" * 127 + "\n" return buf
OphidiaBigData/PyOphidia
PyOphidia/cube.py
Python
gpl-3.0
248,867
[ "NetCDF" ]
84e19c1c109103c4d710767317ea1f2805c41d58aa8b32bb245f683e19f2abbf
############################################################################## # # Copyright (c) 2002 Zope Foundation and Contributors. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE # ############################################################################## """Compiles restricted code using the compiler module from the Python standard library. """ __version__='$Revision: 1.6 $'[11:-2] from compiler import ast, parse, misc, syntax, pycodegen from compiler.pycodegen import AbstractCompileMode, Expression, \ Interactive, Module, ModuleCodeGenerator, FunctionCodeGenerator, findOp import MutatingWalker from RestrictionMutator import RestrictionMutator def niceParse(source, filename, mode): if isinstance(source, unicode): # Use the utf-8-sig BOM so the compiler # detects this as a UTF-8 encoded string. source = '\xef\xbb\xbf' + source.encode('utf-8') try: return parse(source, mode) except: # Try to make a clean error message using # the builtin Python compiler. try: compile(source, filename, mode) except SyntaxError: raise # Some other error occurred. raise class RestrictedCompileMode(AbstractCompileMode): """Abstract base class for hooking up custom CodeGenerator.""" # See concrete subclasses below. def __init__(self, source, filename): if source: source = '\n'.join(source.splitlines()) + '\n' self.rm = RestrictionMutator() AbstractCompileMode.__init__(self, source, filename) def parse(self): return niceParse(self.source, self.filename, self.mode) def _get_tree(self): tree = self.parse() MutatingWalker.walk(tree, self.rm) if self.rm.errors: raise SyntaxError, self.rm.errors[0] misc.set_filename(self.filename, tree) syntax.check(tree) return tree def compile(self): tree = self._get_tree() gen = self.CodeGeneratorClass(tree) self.code = gen.getCode() def compileAndTuplize(gen): try: gen.compile() except SyntaxError, v: return None, (str(v),), gen.rm.warnings, gen.rm.used_names return gen.getCode(), (), gen.rm.warnings, gen.rm.used_names def compile_restricted_function(p, body, name, filename, globalize=None): """Compiles a restricted code object for a function. The function can be reconstituted using the 'new' module: new.function(<code>, <globals>) The globalize argument, if specified, is a list of variable names to be treated as globals (code is generated as if each name in the list appeared in a global statement at the top of the function). """ gen = RFunction(p, body, name, filename, globalize) return compileAndTuplize(gen) def compile_restricted_exec(s, filename='<string>'): """Compiles a restricted code suite.""" gen = RModule(s, filename) return compileAndTuplize(gen) def compile_restricted_eval(s, filename='<string>'): """Compiles a restricted expression.""" gen = RExpression(s, filename) return compileAndTuplize(gen) def compile_restricted(source, filename, mode): """Replacement for the builtin compile() function.""" if mode == "single": gen = RInteractive(source, filename) elif mode == "exec": gen = RModule(source, filename) elif mode == "eval": gen = RExpression(source, filename) else: raise ValueError("compile_restricted() 3rd arg must be 'exec' or " "'eval' or 'single'") gen.compile() return gen.getCode() class RestrictedCodeGenerator: """Mixin for CodeGenerator to replace UNPACK_SEQUENCE bytecodes. The UNPACK_SEQUENCE opcode is not safe because it extracts elements from a sequence without using a safe iterator or making __getitem__ checks. This code generator replaces use of UNPACK_SEQUENCE with calls to a function that unpacks the sequence, performes the appropriate security checks, and returns a simple list. """ # Replace the standard code generator for assignments to tuples # and lists. def _gen_safe_unpack_sequence(self, num): # We're at a place where UNPACK_SEQUENCE should be generated, to # unpack num items. That's a security hole, since it exposes # individual items from an arbitrary iterable. We don't remove # the UNPACK_SEQUENCE, but instead insert a call to our _getiter_() # wrapper first. That applies security checks to each item as # it's delivered. codegen is (just) a bit messy because the # iterable is already on the stack, so we have to do a stack swap # to get things in the right order. self.emit('LOAD_GLOBAL', '_getiter_') self.emit('ROT_TWO') self.emit('CALL_FUNCTION', 1) self.emit('UNPACK_SEQUENCE', num) def _visitAssSequence(self, node): if findOp(node) != 'OP_DELETE': self._gen_safe_unpack_sequence(len(node.nodes)) for child in node.nodes: self.visit(child) visitAssTuple = _visitAssSequence visitAssList = _visitAssSequence # Call to generate code for unpacking nested tuple arguments # in function calls. def unpackSequence(self, tup): self._gen_safe_unpack_sequence(len(tup)) for elt in tup: if isinstance(elt, tuple): self.unpackSequence(elt) else: self._nameOp('STORE', elt) # A collection of code generators that adds the restricted mixin to # handle unpacking for all the different compilation modes. They # are defined here (at the end) so that can refer to RestrictedCodeGenerator. class RestrictedFunctionCodeGenerator(RestrictedCodeGenerator, pycodegen.FunctionCodeGenerator): pass class RestrictedExpressionCodeGenerator(RestrictedCodeGenerator, pycodegen.ExpressionCodeGenerator): pass class RestrictedInteractiveCodeGenerator(RestrictedCodeGenerator, pycodegen.InteractiveCodeGenerator): pass class RestrictedModuleCodeGenerator(RestrictedCodeGenerator, pycodegen.ModuleCodeGenerator): def initClass(self): ModuleCodeGenerator.initClass(self) self.__class__.FunctionGen = RestrictedFunctionCodeGenerator # These subclasses work around the definition of stub compile and mode # attributes in the common base class AbstractCompileMode. If it # didn't define new attributes, then the stub code inherited via # RestrictedCompileMode would override the real definitions in # Expression. class RExpression(RestrictedCompileMode, Expression): mode = "eval" CodeGeneratorClass = RestrictedExpressionCodeGenerator class RInteractive(RestrictedCompileMode, Interactive): mode = "single" CodeGeneratorClass = RestrictedInteractiveCodeGenerator class RModule(RestrictedCompileMode, Module): mode = "exec" CodeGeneratorClass = RestrictedModuleCodeGenerator class RFunction(RModule): """A restricted Python function built from parts.""" CodeGeneratorClass = RestrictedModuleCodeGenerator def __init__(self, p, body, name, filename, globals): self.params = p if body: body = '\n'.join(body.splitlines()) + '\n' self.body = body self.name = name self.globals = globals or [] RModule.__init__(self, None, filename) def parse(self): # Parse the parameters and body, then combine them. firstline = 'def f(%s): pass' % self.params tree = niceParse(firstline, '<function parameters>', 'exec') f = tree.node.nodes[0] body_code = niceParse(self.body, self.filename, 'exec') # Stitch the body code into the function. f.code.nodes = body_code.node.nodes f.name = self.name # Look for a docstring, if there are any nodes at all if len(f.code.nodes) > 0: stmt1 = f.code.nodes[0] if (isinstance(stmt1, ast.Discard) and isinstance(stmt1.expr, ast.Const) and isinstance(stmt1.expr.value, str)): f.doc = stmt1.expr.value # The caller may specify that certain variables are globals # so that they can be referenced before a local assignment. # The only known example is the variables context, container, # script, traverse_subpath in PythonScripts. if self.globals: f.code.nodes.insert(0, ast.Global(self.globals)) return tree
tempson-py/tempson
tempson/RestrictedPython/RCompile.py
Python
mit
9,077
[ "VisIt" ]
b0533dab6db3c6966b7da371d76cdcd6fb966fc46745c30451ec154f3f36de2a
# vi: ts=8 sts=4 sw=4 et # # visit.py: form visitor # # This file is part of Draco2. Draco2 is free software and is made available # under the MIT license. Consult the file "LICENSE" that is distributed # together with this file for the exact licensing terms. # # Draco2 is copyright (c) 1999-2007 by the Draco2 authors. See the file # "AUTHORS" for a complete overview. # # $Revision: 1187 $ class FormVisitor(object): """Form Visitor.""" def visit(self, form): """Visit all nodes in a form.""" for co in form.inputs: self.visit_control(co) for co in form.outputs: self.visit_control(co) self.visit_form(form) def visit_control(self, control): pass def visit_form(self, form): pass
geertj/draco2
draco2/form/visit.py
Python
mit
776
[ "VisIt" ]
343f75cacaf58ffa4e4a668ca19f26c2f5493b147e1db6470910619a3b58831d
""" Given a formula in conjunctive normal form (2-CNF), finds a way to assign True/False values to all variables to satisfy all clauses, or reports there is no solution. https://en.wikipedia.org/wiki/2-satisfiability Format: - each clause is a pair of literals - each literal in the form (name, is_neg) where name is an arbitrary identifier, and is_neg is true if the literal is negated """ def dfs_transposed(vertex, graph, order, visited): """ Perform a depth first search traversal of the graph starting at the given vertex. Stores the order in which nodes were visited to the list, in transposed order. """ visited[vertex] = True for adjacent in graph[vertex]: if not visited[adjacent]: dfs_transposed(adjacent, graph, order, visited) order.append(vertex) def dfs(vertex, current_comp, vertex_scc, graph, visited): """ Perform a depth first search traversal of the graph starting at the given vertex. Records all visited nodes as being of a certain strongly connected component. """ visited[vertex] = True vertex_scc[vertex] = current_comp for adjacent in graph[vertex]: if not visited[adjacent]: dfs(adjacent, current_comp, vertex_scc, graph, visited) def add_edge(graph, vertex_from, vertex_to): """ Add a directed edge to the graph. """ if vertex_from not in graph: graph[vertex_from] = [] graph[vertex_from].append(vertex_to) def scc(graph): ''' Computes the strongly connected components of a graph ''' order = [] visited = {vertex: False for vertex in graph} graph_transposed = {vertex: [] for vertex in graph} for (source, neighbours) in graph.iteritems(): for target in neighbours: add_edge(graph_transposed, target, source) for vertex in graph: if not visited[vertex]: dfs_transposed(vertex, graph_transposed, order, visited) visited = {vertex: False for vertex in graph} vertex_scc = {} current_comp = 0 for vertex in reversed(order): if not visited[vertex]: # Each dfs will visit exactly one component dfs(vertex, current_comp, vertex_scc, graph, visited) current_comp += 1 return vertex_scc def build_graph(formula): ''' Builds the implication graph from the formula ''' graph = {} for clause in formula: for (lit, _) in clause: for neg in [False, True]: graph[(lit, neg)] = [] for ((a_lit, a_neg), (b_lit, b_neg)) in formula: add_edge(graph, (a_lit, a_neg), (b_lit, not b_neg)) add_edge(graph, (b_lit, b_neg), (a_lit, not a_neg)) return graph def solve_sat(formula): """ Solves the 2-SAT problem """ graph = build_graph(formula) vertex_scc = scc(graph) for (var, _) in graph: if vertex_scc[(var, False)] == vertex_scc[(var, True)]: return None # The formula is contradictory comp_repr = {} # An arbitrary representant from each component for vertex in graph: if not vertex_scc[vertex] in comp_repr: comp_repr[vertex_scc[vertex]] = vertex comp_value = {} # True/False value for each strongly connected component components = sorted(vertex_scc.values()) for comp in components: if comp not in comp_value: comp_value[comp] = False (lit, neg) = comp_repr[comp] comp_value[vertex_scc[(lit, not neg)]] = True value = {var: comp_value[vertex_scc[(var, False)]] for (var, _) in graph} return value def main(): """ Entry point for testing """ formula = [(('x', False), ('y', False)), (('y', True), ('y', True)), (('a', False), ('b', False)), (('a', True), ('c', True)), (('c', False), ('b', True))] result = solve_sat(formula) for (variable, assign) in result.items(): print(f"{variable}:{assign}") if __name__ == '__main__': main()
keon/algorithms
algorithms/graph/satisfiability.py
Python
mit
4,076
[ "VisIt" ]
48a63448e6dcd1ef2d0fada769c1c808664db1c5244bdbddffd76b26f00f34cc
############################################################################## # MDTraj: A Python Library for Loading, Saving, and Manipulating # Molecular Dynamics Trajectories. # Copyright 2012-2014 Stanford University and the Authors # # Authors: Peter Eastman, Robert McGibbon # Contributors: Kyle A. Beauchamp, Matthew Harrigan, Carlos Xavier Hernandez # # MDTraj is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as # published by the Free Software Foundation, either version 2.1 # of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with MDTraj. If not, see <http://www.gnu.org/licenses/>. # # Portions of this code originate from the OpenMM molecular simulation # toolkit, copyright (c) 2012 Stanford University and Peter Eastman. Those # portions are distributed under the following terms: # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS, CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, # DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE # USE OR OTHER DEALINGS IN THE SOFTWARE. ############################################################################## ############################################################################## # Imports ############################################################################## from __future__ import print_function, division import itertools import numpy as np import os import xml.etree.ElementTree as etree from collections import namedtuple from mdtraj.core import element as elem from mdtraj.core.residue_names import (_PROTEIN_RESIDUES, _WATER_RESIDUES, _AMINO_ACID_CODES) from mdtraj.core.selection import parse_selection from mdtraj.utils import ilen, import_, ensure_type from mdtraj.utils.six import string_types from mdtraj.utils.singleton import Singleton ############################################################################## # Utilities ############################################################################## def _topology_from_subset(topology, atom_indices): """Create a new topology that only contains the supplied indices Note ---- This really should be a copy constructor (class method) on Topology. It used to work on OpenMM topologies, but we've diverged where that no longer works. Parameters ---------- topology : mdtraj.Topology The base topology atom_indices : array_like, dtype=int The indices of the atoms to keep """ newTopology = Topology() old_atom_to_new_atom = {} for chain in topology._chains: newChain = newTopology.add_chain() for residue in chain._residues: resSeq = getattr(residue, 'resSeq', None) or residue.index newResidue = None for atom in residue._atoms: if atom.index in atom_indices: try: # OpenMM Topology objects don't have serial attributes, so we have to check first. serial = atom.serial except AttributeError: serial = None if newResidue is None: newResidue = newTopology.add_residue(residue.name, newChain, resSeq, residue.segment_id) newAtom = newTopology.add_atom(atom.name, atom.element, newResidue, serial=serial) old_atom_to_new_atom[atom] = newAtom bondsiter = topology.bonds if not hasattr(bondsiter, '__iter__'): bondsiter = bondsiter() for bond in bondsiter: try: atom1, atom2 = bond newTopology.add_bond(old_atom_to_new_atom[atom1], old_atom_to_new_atom[atom2], type=bond.type, order=bond.order) except KeyError: pass # we only put bonds into the new topology if both of their partners # were indexed and thus HAVE a new atom # Delete empty residues newTopology._residues = [r for r in newTopology._residues if len(r._atoms) > 0] for chain in newTopology._chains: chain._residues = [r for r in chain._residues if len(r._atoms) > 0] # Delete empty chains newTopology._chains = [c for c in newTopology._chains if len(c._residues) > 0] # Re-set the numAtoms and numResidues newTopology._numAtoms = ilen(newTopology.atoms) newTopology._numResidues = ilen(newTopology.residues) # Reset the chain indices for i, chain in enumerate(newTopology.chains): chain.index = i # Reset the residue indices for i, res in enumerate(newTopology.residues): res.index = i return newTopology ############################################################################## # Classes ############################################################################## class Topology(object): """Topology stores the topological information about a system. The structure of a Topology object is similar to that of a PDB file. It consists of a set of Chains (often but not always corresponding to polymer chains). Each Chain contains a set of Residues, and each Residue contains a set of Atoms. In addition, the Topology stores a list of which atom pairs are bonded to each other. Atom and residue names should follow the PDB 3.0 nomenclature for all molecules for which one exists. Attributes ---------- chains : generator Iterator over all Chains in the Topology. residues : generator Iterator over all Residues in the Chain. atoms : generator Iterator over all Atoms in the Chain. bonds : generator Iterator over all Bonds in the Topology Examples -------- >>> topology = md.load('example.pdb').topology >>> print(topology) <mdtraj.Topology with 1 chains, 3 residues, 22 atoms, 21 bonds at 0x105a98e90> >>> table, bonds = topology.to_dataframe() >>> print(table.head()) serial name element resSeq resName chainID 0 0 H1 H 1 CYS 0 1 1 CH3 C 1 CYS 0 2 2 H2 H 1 CYS 0 3 3 H3 H 1 CYS 0 4 4 C C 1 CYS 0 >>> # rename residue "CYS" to "CYSS" >>> table[table['residue'] == 'CYS']['residue'] = 'CYSS' >>> print(table.head()) serial name element resSeq resName chainID 0 0 H1 H 1 CYSS 0 1 1 CH3 C 1 CYSS 0 2 2 H2 H 1 CYSS 0 3 3 H3 H 1 CYSS 0 4 4 C C 1 CYSS 0 >>> t2 = md.Topology.from_dataframe(table, bonds) """ _standardBonds = {} def __init__(self): """Create a new Topology object""" self._chains = [] self._numResidues = 0 self._numAtoms = 0 self._bonds = [] self._atoms = [] self._residues = [] def __ne__(self, other): return not self.__eq__(other) def __str__(self): return "<%s>" % (self._string_summary_basic()) def __repr__(self): return "<%s at 0x%02x>" % (self._string_summary_basic(), id(self)) def _string_summary_basic(self): return ("mdtraj.Topology with %d chains, %d residues, " "%d atoms, %d bonds" % (self.n_chains, self.n_residues, self.n_atoms, len(self._bonds))) def copy(self): """Return a copy of the topology Returns ------- out : Topology A copy of this topology """ out = Topology() for chain in self.chains: c = out.add_chain() for residue in chain.residues: r = out.add_residue(residue.name, c, residue.resSeq, residue.segment_id) for atom in residue.atoms: out.add_atom(atom.name, atom.element, r, serial=atom.serial) for bond in self.bonds: a1, a2 = bond out.add_bond(a1, a2, type=bond.type, order=bond.order) return out def __copy__(self, *args): return self.copy() def __deepcopy__(self, *args): return self.copy() def __hash__(self): hash_value = hash(tuple(self._chains)) hash_value ^= hash(tuple(self._atoms)) hash_value ^= hash(tuple(self._bonds)) hash_value ^= hash(tuple(self._residues)) return hash_value def join(self, other, keep_resSeq=True): """Join two topologies together Parameters ---------- other : Topology Another topology object keep_resSeq : bool, optional, default=True if False the residue numbers (resSeq) of the topology that is joined are updated in order to continue from the last resSeq of this topology Returns ------- out : Topology A joint topology, with all of the atoms/residues/chains/bonds in each of the individual topologies """ if not isinstance(other, Topology): raise ValueError('other must be an instance of Topology to join') out = self.copy() #I need this in order to have the resSeq of the #new residues to continue from the one of the #las residue of this topology if not keep_resSeq: out_resSeq = out.atom(-1).residue.resSeq atom_mapping = {} for chain in other.chains: c = out.add_chain() for residue in chain.residues: if keep_resSeq: out_resSeq = residue.resSeq else: out_resSeq += 1 r = out.add_residue(residue.name, c, out_resSeq, residue.segment_id) for atom in residue.atoms: a = out.add_atom(atom.name, atom.element, r, serial=atom.serial) atom_mapping[atom] = a for bond in other.bonds: a1, a2 = bond out.add_bond(atom_mapping[a1], atom_mapping[a2], type=bond.type, order=bond.order) return out def to_fasta(self, chain=None): """Convert this topology into FASTA string Parameters ---------- chain : Integer, optional, default=None If specified, will return the FASTA string for this chain in the Topology. Returns ------- fasta : String or list of Strings A FASTA string for each chain specified. """ fasta = lambda c: "".join([res.code for res in c.residues if res.is_protein and res.code is not None]) if chain is not None: if not isinstance(chain, int): raise ValueError('chain must be an Integer.') return fasta(self._chains[chain]) else: return [fasta(c) for c in self._chains] def to_openmm(self, traj=None): """Convert this topology into OpenMM topology Parameters ---------- traj : MDTraj.Trajectory, optional, default=None If specified, use the first frame from this trajectory to set the unitcell information in the openmm topology. Returns ------- topology : simtk.openmm.app.Topology This topology, as an OpenMM topology """ app = import_('simtk.openmm.app') mm = import_('simtk.openmm') u = import_('simtk.unit') out = app.Topology() atom_mapping = {} bond_mapping = {Single: app.Single, Double: app.Double, Triple: app.Triple, Amide: app.Amide, Aromatic: app.Aromatic, None: None} for chain in self.chains: c = out.addChain() for residue in chain.residues: r = out.addResidue(residue.name, c, id=str(residue.resSeq)) for atom in residue.atoms: if atom.element is elem.virtual: element = None else: element = app.Element.getBySymbol(atom.element.symbol) a = out.addAtom(atom.name, element, r) atom_mapping[atom] = a for bond in self.bonds: a1, a2 = bond out.addBond(atom_mapping[a1], atom_mapping[a2], type=bond_mapping[bond.type], order=bond.order) if traj is not None: angles = traj.unitcell_angles[0] if np.linalg.norm(angles - 90.0) > 1E-4: raise(ValueError("Unitcell angles must be 90.0 to use " "in OpenMM topology.")) box_vectors = mm.Vec3(*traj.unitcell_lengths[0]) * u.nanometer out.setUnitCellDimensions(box_vectors) return out @classmethod def from_openmm(cls, value): """Create a mdtraj topology from an OpenMM topology Parameters ---------- value : simtk.openmm.app.Topology An OpenMM topology that you wish to convert to a mdtraj topology. """ app = import_('simtk.openmm.app') bond_mapping = {app.Single: Single, app.Double: Double, app.Triple: Triple, app.Amide: Amide, app.Aromatic: Aromatic, None: None} if not isinstance(value, app.Topology): raise TypeError('value must be an OpenMM Topology. ' 'You supplied a %s' % type(value)) out = cls() atom_mapping = {} for chain in value.chains(): c = out.add_chain() for residue in chain.residues(): try: r = out.add_residue(residue.name, c, resSeq=int(residue.id)) except ValueError: r = out.add_residue(residue.name, c) for atom in residue.atoms(): if atom.element is None: element = elem.virtual else: element = elem.get_by_symbol(atom.element.symbol) a = out.add_atom(atom.name, element, r) atom_mapping[atom] = a for bond in value.bonds(): a1, a2 = bond out.add_bond(atom_mapping[a1], atom_mapping[a2], type=bond_mapping[bond.type], order=bond.order) return out def to_dataframe(self): """Convert this topology into a pandas dataframe Returns ------- atoms : pandas.DataFrame The atoms in the topology, represented as a data frame. bonds : np.ndarray, shape=(n_bonds, 4), dtype=float, Optional The bonds in this topology, represented as an n_bonds x 4 array indicating the two atom indices of the bond, the bond type, and bond order, cast to floats as the type is mapped from the classes to fractional. The atom indices and order are integers cast to float """ pd = import_('pandas') data = [(atom.serial, atom.name, atom.element.symbol, atom.residue.resSeq, atom.residue.name, atom.residue.chain.index,atom.segment_id) for atom in self.atoms] atoms = pd.DataFrame(data, columns=["serial", "name", "element", "resSeq", "resName", "chainID","segmentID"]) bonds = np.zeros([len(self._bonds), 4], dtype=float) for index, bond in enumerate(self.bonds): if bond.order is None: order = 0.0 else: order = bond.order try: bond_type = float(bond.type) except TypeError: # Trap the None case bond_type = 0.0 bonds[index] = bond.atom1.index, bond.atom2.index, bond_type, order return atoms, bonds @classmethod def from_dataframe(cls, atoms, bonds=None): """Create a mdtraj topology from a pandas data frame Parameters ---------- atoms : pandas.DataFrame The atoms in the topology, represented as a data frame. This data frame should have columns "serial" (atom index), "name" (atom name), "element" (atom's element), "resSeq" (index of the residue) "resName" (name of the residue), "chainID" (index of the chain), and optionally "segmentID", following the same conventions as wwPDB 3.0 format. bonds : np.ndarray, shape=(n_bonds, 4) or (n_bonds, 2), dtype=float, Optional The bonds in the topology, represented as a n_bonds x 4 or n_bonds x 2 size array of the indices of the atoms involved, type, and order of each bond, represented as floats. Type and order are optional. Specifying bonds here is optional. To create standard protein bonds, you can use `create_standard_bonds` to "fill in" the bonds on your newly created Topology object, although type and order of bond are not computed if that method is used. See Also -------- create_standard_bonds """ pd = import_('pandas') if bonds is None: bonds = np.zeros([0, 4], dtype=float) for col in ["name", "element", "resSeq", "resName", "chainID", "serial"]: if col not in atoms.columns: raise ValueError('dataframe must have column %s' % col) if "segmentID" not in atoms.columns: atoms["segmentID"] = "" out = cls() if not isinstance(atoms, pd.DataFrame): raise TypeError('atoms must be an instance of pandas.DataFrame. ' 'You supplied a %s' % type(atoms)) if not isinstance(bonds, np.ndarray): raise TypeError('bonds must be an instance of numpy.ndarray. ' 'You supplied a %s' % type(bonds)) if not np.all(np.arange(len(atoms)) == atoms.index): raise ValueError('atoms must be uniquely numbered ' 'starting from zero.') out._atoms = [None for i in range(len(atoms))] c = None r = None previous_chainID = None previous_resName = None previous_resSeq = None for atom_index, atom in atoms.iterrows(): int(atom_index) # Fixes bizarre hashing issue on Py3K. See #545 if atom['chainID'] != previous_chainID: previous_chainID = atom['chainID'] c = out.add_chain() if atom['resSeq'] != previous_resSeq or atom['resName'] != previous_resName: previous_resSeq = atom['resSeq'] previous_resName = atom['resName'] r = out.add_residue(atom['resName'], c, atom['resSeq'], atom['segmentID']) a = Atom(atom['name'], elem.get_by_symbol(atom['element']), atom_index, r, serial=atom['serial']) out._atoms[atom_index] = a r._atoms.append(a) for bond in bonds: ai1 = int(bond[0]) ai2 = int(bond[1]) try: bond_type = float_to_bond_type(bond[2]) bond_order = int(bond[3]) if bond_order == 0: bond_order = None except IndexError: # Does not exist bond_type = None bond_order = None out.add_bond(out.atom(ai1), out.atom(ai2), bond_type, bond_order) out._numAtoms = out.n_atoms return out def to_bondgraph(self): """Create a NetworkX graph from the atoms and bonds in this topology Returns ------- g : nx.Graph A graph whose nodes are the Atoms in this topology, and whose edges are the bonds See Also -------- atoms bonds Notes ----- This method requires the NetworkX python package. """ nx = import_('networkx') g = nx.Graph() g.add_nodes_from(self.atoms) g.add_edges_from(self.bonds) return g def __eq__(self, other): """Are two topologies equal? Parameters ---------- other : object The object to compare to Returns ------- equality : bool Are the two topologies identical? """ if not isinstance(other, Topology): return False if self is other: return True if len(self._chains) != len(other._chains): return False for c1, c2 in zip(self.chains, other.chains): if c1.index != c2.index: return False if len(c1._residues) != len(c2._residues): return False for r1, r2 in zip(c1.residues, c2.residues): if (r1.index != r1.index) or (r1.name != r2.name): # or (r1.resSeq != r2.resSeq): return False if len(r1._atoms) != len(r2._atoms): return False for a1, a2 in zip(r1.atoms, r2.atoms): if (a1.index != a2.index) or (a1.name != a2.name): return False if a1.element is not a2.element: return False # for attr in ['atomic_number', 'name', 'symbol']: # if getattr(a1.element, attr) != getattr(a2.element, attr): # return False if len(self._bonds) != len(other._bonds): return False # the bond ordering is somewhat ambiguous, so try and fix it for comparison self_sorted_bonds = sorted([bond for bond in self.bonds]) other_sorted_bonds = sorted([bond for bond in other.bonds]) for i, bond in enumerate(self_sorted_bonds): if bond != other_sorted_bonds[i]: return False return True def add_chain(self): """Create a new Chain and add it to the Topology. Returns ------- chain : mdtraj.topology.Chain the newly created Chain """ chain = Chain(len(self._chains), self) self._chains.append(chain) return chain def add_residue(self, name, chain, resSeq=None, segment_id=""): """Create a new Residue and add it to the Topology. Parameters ---------- name : str The name of the residue to add chain : mdtraj.topology.Chain The Chain to add it to resSeq : int, optional Residue sequence number, such as from a PDB record. These sequence numbers are arbitrary, and do not necessarily start at 0 (or 1). If not supplied, the resSeq attribute will be set to the residue's sequential (0 based) index. segment_id : str, optional A label for the segment to which this residue belongs Returns ------- residue : mdtraj.topology.Residue The newly created Residue """ if resSeq is None: resSeq = self._numResidues residue = Residue(name, self._numResidues, chain, resSeq, segment_id) self._residues.append(residue) self._numResidues += 1 chain._residues.append(residue) return residue def insert_atom(self, name, element, residue, index=None, rindex=None, serial=None): """Create a new Atom and insert it into the Topology at a specific position. Parameters ---------- name : str The name of the atom to add element : mdtraj.element.Element The element of the atom to add residue : mdtraj.topology.Residue The Residue to add it to index : int If provided, the desired index for this atom within the topology. Existing atoms with indices >= index will be pushed back. rindex : int If provided, the desired position for this atom within the residue serial : int Serial number associated with the atom. This has nothing to do with the actual ordering and is solely for PDB labeling purposes. Returns ------- atom : mdtraj.topology.Atom the newly created Atom """ if element is None: element = elem.virtual if index is None: atom = Atom(name, element, self._numAtoms, residue, serial=serial) self._atoms.append(atom) else: atom = Atom(name, element, index, residue, serial=serial) for i in range(index, len(self._atoms)): self._atoms[i].index += 1 self._atoms.insert(index, atom) self._numAtoms += 1 if rindex is None: residue._atoms.append(atom) else: residue._atoms.insert(rindex, atom) return atom def delete_atom_by_index(self, index): """Delete an Atom from the topology. Parameters ---------- index : int The index of the atom to be removed. """ a = self._atoms[index] if a.index != index: raise RuntimeError("Index of selected atom does not match order in topology.") for i in range(index+1, len(self._atoms)): self._atoms[i].index -= 1 a.residue._atoms.remove(a) self._atoms.remove(a) self._numAtoms -= 1 def add_atom(self, name, element, residue, serial=None): """Create a new Atom and add it to the Topology. Parameters ---------- name : str The name of the atom to add element : mdtraj.element.Element The element of the atom to add residue : mdtraj.topology.Residue The Residue to add it to serial : int Serial number associated with the atom. Returns ------- atom : mdtraj.topology.Atom the newly created Atom """ if element is None: element = elem.virtual atom = Atom(name, element, self._numAtoms, residue, serial=serial) self._atoms.append(atom) self._numAtoms += 1 residue._atoms.append(atom) return atom def add_bond(self, atom1, atom2, type=None, order=None): """Create a new bond and add it to the Topology. Parameters ---------- atom1 : mdtraj.topology.Atom The first Atom connected by the bond atom2 : mdtraj.topology.Atom The second Atom connected by the bond type : mdtraj.topology.Singleton or None, Default: None, Optional Bond type of the bond, or None if not known/provided order : 1, 2, 3 or None, Default: None, Optional Characteristic order of the bond if known, defaults None """ if atom1.index < atom2.index: self._bonds.append(Bond(atom1, atom2, type=type, order=order)) else: self._bonds.append(Bond(atom2, atom1, type=type, order=order)) def chain(self, index): """Get a specific chain by index. These indices start from zero. Parameters ---------- index : int The index of the chain to select. Returns ------- chain : Chain The `index`-th chain in the topology. """ return self._chains[index] @property def chains(self): """Iterator over all Chains in the Topology. Returns ------- chainiter : listiterator Iterator over all Chains in the Topology. """ return iter(self._chains) @property def n_chains(self): """Get the number of chains in the Topology""" return len(self._chains) def residue(self, index): """Get a specific residue by index. These indices start from zero. Parameters ---------- index : int The index of the residue to select. Returns ------- residue : Residue The `index`-th residue in the topology. """ return self._residues[index] @property def residues(self): """Iterator over all Residues in the Topology. Returns ------- residueiter : generator Iterator over all Residues in the Topology. """ for chain in self._chains: for residue in chain._residues: yield residue @property def n_residues(self): """Get the number of residues in the Topology. """ return len(self._residues) def atom(self, index): """Get a specific atom by index. These indices start from zero. Parameters ---------- index : int The index of the atom to select. Returns ------- atom : Atom The `index`-th atom in the topology. """ return self._atoms[index] @property def atoms(self): """Iterator over all Atoms in the Topology. Returns ------- atomiter : generator Iterator over all Atoms in the Topology. """ for chain in self._chains: for residue in chain._residues: for atom in residue._atoms: yield atom def atoms_by_name(self, name): """Iterator over all Atoms in the Topology with a specified name Parameters ---------- name : str The particular atom name of interest. Examples -------- >>> for atom in topology.atoms_by_name('CA'): ... print(atom) Returns ------- atomiter : generator """ for atom in self.atoms: if atom.name == name: yield atom @property def n_atoms(self): """Get the number of atoms in the Topology""" return len(self._atoms) @property def bonds(self): """Iterator over all bonds (each represented as a tuple of two Atoms) in the Topology. Returns ------- bonditer : generator Iterator over all bonds between Atoms in the Trajectory. Each bond can then be iterated to get the atoms. I.e.: `for atom1, atom2 in Topology.bonds` yields iterator over pairs of Atoms `for bond in Topology.bonds` yields iterator over bonds """ return iter(self._bonds) @property def n_bonds(self): """Get the number of bonds in the Topology""" return len(self._bonds) def create_standard_bonds(self): """Create bonds based on the atom and residue names for all standard residue types. """ if len(Topology._standardBonds) == 0: # Load the standard bond defitions. tree = etree.parse(os.path.join(os.path.dirname(__file__), '..', 'formats', 'pdb', 'data', 'residues.xml')) for residue in tree.getroot().findall('Residue'): bonds = [] Topology._standardBonds[residue.attrib['name']] = bonds for bond in residue.findall('Bond'): bonds.append((bond.attrib['from'], bond.attrib['to'])) for chain in self._chains: # First build a map of atom names to atoms. atomMaps = [] for residue in chain._residues: atomMap = {} atomMaps.append(atomMap) for atom in residue._atoms: atomMap[atom.name] = atom # Loop over residues and construct bonds. for i in range(len(chain._residues)): name = chain._residues[i].name if name in Topology._standardBonds: for bond in Topology._standardBonds[name]: if bond[0].startswith('-') and i > 0: fromResidue = i-1 fromAtom = bond[0][1:] elif (bond[0].startswith('+') and i < len(chain._residues)): fromResidue = i+1 fromAtom = bond[0][1:] else: fromResidue = i fromAtom = bond[0] if bond[1].startswith('-') and i > 0: toResidue = i-1 toAtom = bond[1][1:] elif (bond[1].startswith('+') and i < len(chain._residues)): toResidue = i+1 toAtom = bond[1][1:] else: toResidue = i toAtom = bond[1] if (fromAtom in atomMaps[fromResidue] and toAtom in atomMaps[toResidue]): self.add_bond(atomMaps[fromResidue][fromAtom], atomMaps[toResidue][toAtom]) def create_disulfide_bonds(self, positions): """Identify disulfide bonds based on proximity and add them to the Topology. Parameters ---------- positions : list The list of atomic positions based on which to identify bonded atoms """ def isCyx(res): names = [atom.name for atom in res._atoms] return 'SG' in names and 'HG' not in names cyx = [res for res in self.residues if res.name == 'CYS' and isCyx(res)] atomNames = [[atom.name for atom in res._atoms] for res in cyx] for i in range(len(cyx)): sg1 = cyx[i]._atoms[atomNames[i].index('SG')] pos1 = positions[sg1.index] for j in range(i): sg2 = cyx[j]._atoms[atomNames[j].index('SG')] pos2 = positions[sg2.index] delta = [x-y for (x, y) in zip(pos1, pos2)] distance = np.sqrt( delta[0]*delta[0] + delta[1]*delta[1] + delta[2]*delta[2]) if distance < 0.3: # this is supposed to be nm. I think we're good self.add_bond(sg1, sg2) def subset(self, atom_indices): """Create a new Topology from a subset of the atoms in an existing topology. Notes ----- The existing topology will not be altered. Parameters ---------- atom_indices : array_like A list of the indices corresponding to the atoms in that you'd like to retain. """ return _topology_from_subset(self, atom_indices) def select_expression(self, selection_string): """Translate a atom selection expression into a pure python expression. Parameters ---------- selection_string : str An expression in the MDTraj atom selection DSL Examples -------- >>> topology.select_expression('name O and water') "[atom.index for atom in topology.atoms if ((atom.name == 'O') and atom.residue.is_water)]") Returns ------- python_string : str A string containing a pure python expression, equivalent to the selection expression. """ condition = parse_selection(selection_string).source fmt_string = "[atom.index for atom in topology.atoms if {condition}]" return fmt_string.format(condition=condition) def select(self, selection_string): """Execute a selection against the topology Parameters ---------- selection_string : str An expression in the MDTraj atom selection DSL Examples -------- >>> topology.select('name O and water') array([1, 3, 5, 10, ...]) Returns ------- indices : np.ndarray, dtype=int, ndim=1 Array of the indices of the atoms matching the selection expression. See Also -------- select_expression, mdtraj.core.selection.parse_selection """ filter_func = parse_selection(selection_string).expr indices = np.array([a.index for a in self.atoms if filter_func(a)]) return indices def select_atom_indices(self, selection='minimal'): """Get the indices of biologically-relevant groups by name. Parameters ---------- selection : {'all', 'alpha', 'minimal', 'heavy', 'water'} What types of atoms to select. ``all`` All atoms ``alpha`` Protein residue alpha carbons ``minimal`` Keep the atoms in protein residues with names in {CA, CB, C, N, O} ``heavy`` All non-hydrogen protein atoms. ``water`` Water oxygen atoms Returns ---------- indices : np.ndarray (N,) An array of the indices of the selected atoms. """ selection = selection.lower() options = ['all', 'alpha', 'minimal', 'heavy', 'water'] if selection == 'all': atom_indices = np.arange(self.n_atoms) elif selection == 'alpha': atom_indices = [a.index for a in self.atoms if a.name == 'CA' and a.residue.is_protein] elif selection == 'minimal': atom_indices = [a.index for a in self.atoms if a.name in ['CA', 'CB', 'C', 'N', 'O'] and a.residue.is_protein] elif selection == 'heavy': atom_indices = [a.index for a in self.atoms if a.element != elem.hydrogen and a.residue.is_protein] elif selection == 'water': atom_indices = [a.index for a in self.atoms if a.name in ['O', 'OW'] and a.residue.is_water] else: raise ValueError( '%s is not a valid option. Selection must be one of %s' % ( selection, ', '.join(options))) indices = np.array(atom_indices) return indices def select_pairs(self, selection1=None, selection2=None): """Generate unique pairs of atom indices. If a selecton is a string, it will be resolved using the atom selection DSL, otherwise it is expected to be an array of atom indices. Parameters ---------- selection1 : str or array-like, shape=(n_indices, ), dtype=int A selection for `select()` or an array of atom indices. selection2 : str or array-like, shape=(n_indices, ), dtype=int A selection for `select()` or an array of atom indices. Returns ------- pairs : array-like, shape=(n_pairs, 2), dtype=int Each row gives the indices of two atoms. """ # Resolve selections using the atom selection DSL... if isinstance(selection1, string_types): a_indices = self.select(selection1) else: # ...or use a provided array of indices. a_indices = ensure_type(selection1, dtype=np.int32, ndim=1, name='a_indices', warn_on_cast=False) if isinstance(selection2, string_types): b_indices = self.select(selection2) else: b_indices = ensure_type(selection2, dtype=np.int32, ndim=1, name='b_indices', warn_on_cast=False) a_indices.sort() b_indices.sort() # Create unique pairs from the indices. # In the cases where a_indices and b_indices are identical or mutually # exclusive, we can utilize a more efficient and memory friendly # approach by removing the intermediate set creation required in # the general case. if np.array_equal(a_indices, b_indices): pairs = self._unique_pairs_equal(a_indices) elif len(np.intersect1d(a_indices, b_indices)) == 0: pairs = self._unique_pairs_mutually_exclusive(a_indices, b_indices) else: pairs = self._unique_pairs(a_indices, b_indices) return pairs @classmethod def _unique_pairs(cls, a_indices, b_indices): return np.array(list(set( (a, b) if a > b else (b, a) for a, b in itertools.product(a_indices, b_indices) if a != b)), dtype=np.int32) @classmethod def _unique_pairs_mutually_exclusive(cls, a_indices, b_indices): pairs = np.fromiter(itertools.chain.from_iterable( itertools.product(a_indices, b_indices)), dtype=np.int32, count=len(a_indices) * len(b_indices) * 2) return np.vstack((pairs[::2], pairs[1::2])).T @classmethod def _unique_pairs_equal(cls, a_indices): pairs = np.fromiter(itertools.chain.from_iterable( itertools.combinations(a_indices, 2)), dtype=np.int32, count=len(a_indices) * (len(a_indices) - 1)) return np.vstack((pairs[::2], pairs[1::2])).T def find_molecules(self): """Identify molecules based on bonds. A molecule is defined as a set of atoms that are connected to each other by bonds. This method uses the list of bonds to divide up the Topology's atoms into molecules. Returns ------- molecules : list of sets Each entry represents one molecule, and is the set of all Atoms in that molecule """ if len(self._bonds) == 0 and any(res.n_atoms > 1 for res in self._residues): raise ValueError('Cannot identify molecules because this Topology does not include bonds') # Make a list of every other atom to which each atom is connected. num_atoms = self.n_atoms atom_bonds = [[] for i in range(num_atoms)] for atom1, atom2 in self.bonds: atom_bonds[atom1.index].append(atom2.index) atom_bonds[atom2.index].append(atom1.index) # This is essentially a recursive algorithm, but it is reformulated as a loop to avoid # stack overflows. It selects an atom, marks it as a new molecule, then recursively # marks every atom bonded to it as also being in that molecule. atom_molecule = [-1]*num_atoms num_molecules = 0 for i in range(num_atoms): if atom_molecule[i] == -1: # Start a new molecule. atom_stack = [i] neighbor_stack = [0] molecule = num_molecules num_molecules += 1 # Recursively tag all the bonded atoms. while len(atom_stack) > 0: atom = atom_stack[-1] atom_molecule[atom] = molecule while neighbor_stack[-1] < len(atom_bonds[atom]) and atom_molecule[atom_bonds[atom][neighbor_stack[-1]]] != -1: neighbor_stack[-1] += 1 if neighbor_stack[-1] < len(atom_bonds[atom]): atom_stack.append(atom_bonds[atom][neighbor_stack[-1]]) neighbor_stack.append(0) else: del atom_stack[-1] del neighbor_stack[-1] # Build the final output. molecules = [set() for i in range(num_molecules)] for atom in self.atoms: molecules[atom_molecule[atom.index]].add(atom) return molecules def guess_anchor_molecules(self): """Guess anchor molecules for imaging Returns ------- anchor_molecules : list of atom sets List of anchor molecules See Also -------- Trajectory.image_molecules """ molecules = self.find_molecules() # Select the anchor molecules. molecules.sort(key=lambda x: -len(x)) atoms_cutoff = max(len(molecules[int(0.1*len(molecules))]), int(0.1*len(molecules[0]))) anchor_molecules = [mol for mol in molecules if len(mol) > atoms_cutoff] num_anchors = len(anchor_molecules) if num_anchors == 0: raise ValueError("Could not find any anchor molecules. Based on " "our heuristic, those should be molecules with " "more than {} atoms. Perhaps your topology " "doesn't give an accurate bond graph?" .format(atoms_cutoff)) return anchor_molecules class Chain(object): """A Chain object represents a chain within a Topology. Attributes ---------- index : int The index of the Chain within its Topology topology : mdtraj.Topology The Topology this Chain belongs to residues : generator Iterator over all Residues in the Chain. atoms : generator Iterator over all Atoms in the Chain. """ def __init__(self, index, topology): """Construct a new Chain. You should call add_chain() on the Topology instead of calling this directly.""" # The index of the Chain within its Topology self.index = index # The Topology this Chain belongs to self.topology = topology self._residues = [] @property def residues(self): """Iterator over all Residues in the Chain. Returns ------- residueiter : listiterator Iterator over all Residues in the Topology. """ return iter(self._residues) def residue(self, index): """Get a specific residue in this Chain. Parameters ---------- index : int The index of the residue to select. Returns ------- residue : Residue """ return self._residues[index] @property def n_residues(self): """Get the number of residues in this Chain. """ return len(self._residues) @property def atoms(self): """Iterator over all Atoms in the Chain. Returns ------- atomiter : generator Iterator over all Atoms in the Chain. """ for residue in self._residues: for atom in residue._atoms: yield atom def atoms_by_name(self, name): """Iterator over all Atoms in the Chain with a specified name. Parameters ---------- name : str The particular atom name of interest. Examples -------- >>> for atom in chain.atoms_by_name('CA'): ... print(atom) Returns ------- atomiter : generator """ for atom in self.atoms: if atom.name == name: yield atom def atom(self, index): """Get a specific atom in this Chain. Parameters ---------- index : int The index of the atom to select. Returns ------- atom : Atom """ # this could be made faster by caching the list # of atoms internally if necessary return next(itertools.islice(self.atoms, index, index + 1)) @property def n_atoms(self): """Get the number of atoms in this Chain""" return sum(r.n_atoms for r in self._residues) def __hash__(self): return self.index class Residue(object): """A Residue object represents a residue within a Topology. Attributes ---------- name : str The name of the Residue index : int The index of the Residue within its Topology chain : mdtraj.topology.Chain The chain within which this residue belongs resSeq : int The residue sequence number segment_id : str, optional A label for the segment to which this residue belongs """ def __init__(self, name, index, chain, resSeq, segment_id=''): """Construct a new Residue. You should call add_residue() on the Topology instead of calling this directly.""" self.name = name self.index = index self.chain = chain self.resSeq = resSeq self.segment_id = segment_id self._atoms = [] @property def atoms(self): """Iterator over all Atoms in the Residue. Returns ------- atomiter : listiterator Iterator over all Atoms in the Residue. """ return iter(self._atoms) def atoms_by_name(self, name): """Iterator over all Atoms in the Residue with a specified name Parameters ---------- name : str The particular atom name of interest. Examples -------- >>> for atom in residue.atoms_by_name('CA'): ... print(atom) Returns ------- atomiter : generator """ for atom in self.atoms: if atom.name == name: yield atom def atom(self, index_or_name): """Get a specific atom in this Residue. Parameters ---------- index_or_name : {int, str} Either a (zero-based) index, or the name of the atom. If a string is passed in, the first atom -- in index order -- with a matching name wil be returned. Returns ------- atom : Atom """ try: return self._atoms[index_or_name] except TypeError: try: return next(self.atoms_by_name(index_or_name)) except StopIteration: raise KeyError('no matching atom found') @property def n_atoms(self): """Get the number of atoms in this Residue""" return len(self._atoms) @property def is_protein(self): """Whether the residue is one found in proteins.""" return self.name in _PROTEIN_RESIDUES @property def code(self): """Get the one letter code for this Residue""" if self.is_protein: return _AMINO_ACID_CODES[self.name] else: return None @property def is_water(self): """Whether the residue is water. Residue names according to VMD References ---------- http://www.ks.uiuc.edu/Research/vmd/vmd-1.3/ug/node133.html """ return self.name in _WATER_RESIDUES @property def is_nucleic(self): """Whether the residue is one found in nucleic acids.""" raise NotImplementedError def __str__(self): return '%s%s' % (self.name, self.resSeq) def __repr__(self): return str(self) def __hash__(self): return hash((self.name, self.index, self.resSeq, self.segment_id)) class Atom(object): """An Atom object represents a residue within a Topology. Attributes ---------- name : str The name of the Atom element : mdtraj.element.Element The element of the Atoms index : int The index of the Atom within its Topology residue : mdtraj.topology.Residue The Residue this Atom belongs to serial : int The serial number from the PDB specification. Unlike index, this may not be contiguous or 0-indexed. """ def __init__(self, name, element, index, residue, serial=None): """Construct a new Atom. You should call add_atom() on the Topology instead of calling this directly.""" # The name of the Atom self.name = name # That Atom's element self.element = element # The index of the Atom within its Topology self.index = index # The Residue this Atom belongs to self.residue = residue # The not-necessarily-contiguous "serial" number from the PDB spec self.serial = serial @property def n_bonds(self): """Number of bonds in which the atom participates.""" # TODO: this info could be cached. return ilen(bond for bond in self.residue.chain.topology.bonds if self in bond) @property def is_backbone(self): """Whether the atom is in the backbone of a protein residue""" return (self.name in set(['C', 'CA', 'N', 'O']) and self.residue.is_protein) @property def is_sidechain(self): """Whether the atom is in the sidechain of a protein residue""" return (self.name not in set(['C', 'CA', 'N', 'O', 'HA', 'H']) and self.residue.is_protein) @property def segment_id(self): """User specified segment_id of the residue to which this atom belongs""" return self.residue.segment_id def __eq__(self, other): """ Check whether two Atom objects are equal. """ if self.name != other.name: return False if self.index != other.index: return False if self.element.name != other.element.name: return False if self.residue.name != other.residue.name: return False if self.residue.index != other.residue.index: return False if self.residue.chain.index != other.residue.chain.index: return False return True def __hash__(self): """A quick comparison. """ return self.index def __str__(self): return '%s-%s' % (self.residue, self.name) def __repr__(self): return str(self) # Enumerated values for bond type class Single(Singleton): def __repr__(self): return 'Single' def __float__(self): return 1.0 Single = Single() class Double(Singleton): def __repr__(self): return 'Double' def __float__(self): return 2.0 Double = Double() class Triple(Singleton): def __repr__(self): return 'Triple' def __float__(self): return 3.0 Triple = Triple() class Aromatic(Singleton): def __repr__(self): return 'Aromatic' def __float__(self): return 1.5 Aromatic = Aromatic() class Amide(Singleton): def __repr__(self): return 'Amide' def __float__(self): return 1.25 Amide = Amide() def float_to_bond_type(bond_float): """ Convert a float to known bond type class, or None if no matched class if found Parameters ---------- bond_float : float Representation of built in bond types as a float, Maps this float to specific bond class, if the float has no map, None is returned instead Returns ------- bond_type : mdtraj.topology.Singleton subclass or None Bond type matched to the float if known If no match is found, returns None (which is also a valid type for the Bond class) """ all_bond_types = [Single, Double, Triple, Aromatic, Amide] for bond_type in all_bond_types: if float(bond_type) == float(bond_float): return bond_type return None class Bond(namedtuple('Bond', ['atom1', 'atom2'])): """A Bond representation of a bond between two Atoms within a Topology Attributes ---------- atom1 : mdtraj.topology.Atom The first atom in the bond atom2 : mdtraj.topology.Atom The second atom in the bond order : instance of mdtraj.topology.Singleton or None type : int on [1,3] domain or None """ def __new__(cls, atom1, atom2, type=None, order=None): """Construct a new Bond. You should call add_bond() on the Topology instead of calling this directly. Must use __new__ constructor since this is an immutable class """ bond = super(Bond, cls).__new__(cls, atom1, atom2) assert isinstance(type, Singleton) or type is None, "Type must be None or a Singleton" assert order is None or 1 <= order <= 3, "Order must be int between 1 to 3 or None" bond.type = type bond.order = order return bond def __getnewargs__(self): """ Support for pickle protocol 2: http://docs.python.org/2/library/pickle.html#pickling-and-unpickling-normal-class-instances """ return self[0], self[1], self.type, self.order @property def _equality_tuple(self): # Hierarchy of parameters: Atom1 index -> Atom2 index -> type -> order return (self[0].index, self[1].index, float(self.type) if self.type is not None else 0.0, self.order if self.order is not None else 0) def __deepcopy__(self, memo): return Bond(self[0], self[1], self.type, self.order) def __eq__(self, other): if not isinstance(other, Bond): return False return self._equality_tuple == other._equality_tuple def __repr__(self): s = "Bond(%s, %s" % (self[0], self[1]) if self.type is not None: s = "%s, type=%s" % (s, self.type) if self.order is not None: s = "%s, order=%d" % (s, self.order) s += ")" return s def __str__(self): return self.__repr__() def __hash__(self): # Set of atoms making up bonds, the type, and the order return hash((self[0], self[1], self.type, self.order)) @staticmethod def _other_is_bond(other): # Ensure other type for inequalities is a bond if not isinstance(other, Bond): raise TypeError("Bond inequalities can only be compared with other bonds") def __gt__(self, other): # Cannot use total_ordering because namedtuple # has its own __gt__, __lt__, etc. methods, which # supersede total_ordering self._other_is_bond(other) return self._equality_tuple > other._equality_tuple def __ge__(self, other): self._other_is_bond(other) return self._equality_tuple >= other._equality_tuple def __lt__(self, other): self._other_is_bond(other) return self._equality_tuple < other._equality_tuple def __le__(self, other): self._other_is_bond(other) return self._equality_tuple <= other._equality_tuple def __ne__(self, other): return not self.__eq__(other) def __getstate__(self): # This is required for pickle because the parent class # does not properly return a state return self.__dict__
rmcgibbo/mdtraj
mdtraj/core/topology.py
Python
lgpl-2.1
60,641
[ "MDTraj", "OpenMM", "VMD" ]
85c3fdd1c77e6697daf8b90278ebc2ddf1a5a7c6c56c1ab23cc1b01e62813f54
# pysam versioning information __version__ = "0.8.3" __samtools_version__ = "1.2" __htslib_version__ = "1.2.1"
nlhepler/pysam
pysam/version.py
Python
mit
114
[ "pysam" ]
1498ec4dce49b9eca4bb92dca74eec53ba2df41d40b7d0dd61fb9feb219a5b8a
from __future__ import (absolute_import, division, print_function) from mantid.api import (DataProcessorAlgorithm, mtd, AlgorithmFactory, FileProperty, FileAction, MultipleFileProperty, WorkspaceProperty, PropertyMode, Progress) from mantid.simpleapi import (LoadIsawUB, LoadInstrument, SetGoniometer, ConvertToMD, Load, LoadIsawDetCal, LoadMask, DeleteWorkspace, MaskDetectors, ConvertToMDMinMaxGlobal) from mantid.kernel import VisibleWhenProperty, PropertyCriterion, Direction from mantid import logger class ConvertMultipleRunsToSingleCrystalMD(DataProcessorAlgorithm): def category(self): return "MDAlgorithms\\Creation" def seeAlso(self): return [ "ConvertToDiffractionMDWorkspace","ConvertToMD" ] def name(self): return "ConvertMultipleRunsToSingleCrystalMD" def summary(self): return "Convert multiple runs to one Single Crystal MDEventWorkspace" def PyInit(self): # files to reduce self.declareProperty(MultipleFileProperty(name="Filename", extensions=["_event.nxs", ".nxs.h5", ".nxs"]), "Files to combine in reduction") # Filter by time self.copyProperties('LoadEventNexus', ['FilterByTofMin', 'FilterByTofMax', 'FilterByTimeStop']) # UBMatrix self.declareProperty(FileProperty(name="UBMatrix",defaultValue="",action=FileAction.OptionalLoad, extensions=[".mat", ".ub", ".txt"]), doc="Path to an ISAW-style UB matrix text file. See :ref:`LoadIsawUB <algm-LoadIsawUB>`") # Goniometer self.declareProperty('SetGoniometer', False, "Set which Goniometer to use. See :ref:`SetGoniometer <algm-SetGoniometer>`") condition = VisibleWhenProperty("SetGoniometer", PropertyCriterion.IsNotDefault) self.copyProperties('SetGoniometer', ['Goniometers', 'Axis0', 'Axis1', 'Axis2']) self.setPropertySettings("Goniometers", condition) self.setPropertySettings('Axis0', condition) self.setPropertySettings('Axis1', condition) self.setPropertySettings('Axis2', condition) # Corrections self.declareProperty(FileProperty(name="LoadInstrument",defaultValue="",action=FileAction.OptionalLoad, extensions=[".xml"]), "Load a different instrument IDF onto the data from a file. See :ref:`LoadInstrument <algm-LoadInstrument>`") self.declareProperty(FileProperty(name="DetCal",defaultValue="",action=FileAction.OptionalLoad, extensions=[".detcal"]), "Load an ISAW DetCal calibration onto the data from a file. See :ref:`LoadIsawDetCal <algm-LoadIsawDetCal>`") self.declareProperty(FileProperty(name="MaskFile",defaultValue="",action=FileAction.OptionalLoad, extensions=[".xml",".msk"]), "Masking file for masking. Supported file format is XML and ISIS ASCII. See :ref:`LoadMask <algm-LoadMask>`") self.declareProperty(WorkspaceProperty("OutputWorkspace", "", optional=PropertyMode.Mandatory, direction=Direction.Output), "Output Workspace") # Convert Settings self.copyProperties('ConvertToMD', ['Uproj', 'Vproj', 'Wproj', 'MinValues', 'MaxValues', 'SplitInto', 'SplitThreshold', 'MaxRecursionDepth', 'OverwriteExisting']) self.setPropertyGroup('FilterByTofMin', 'Loading') self.setPropertyGroup('FilterByTofMax', 'Loading') self.setPropertyGroup('FilterByTimeStop', 'Loading') # Goniometer self.setPropertyGroup("SetGoniometer","Goniometer") self.setPropertyGroup("Goniometers","Goniometer") self.setPropertyGroup("Axis0","Goniometer") self.setPropertyGroup("Axis1","Goniometer") self.setPropertyGroup("Axis2","Goniometer") # Corrections self.setPropertyGroup("LoadInstrument","Corrections") self.setPropertyGroup("DetCal","Corrections") self.setPropertyGroup("MaskFile","Corrections") # ConvertToMD self.setPropertyGroup('Uproj', 'ConvertToMD') self.setPropertyGroup('Vproj', 'ConvertToMD') self.setPropertyGroup('Wproj', 'ConvertToMD') self.setPropertyGroup('MinValues', 'ConvertToMD') self.setPropertyGroup('MaxValues', 'ConvertToMD') self.setPropertyGroup('SplitInto', 'ConvertToMD') self.setPropertyGroup('SplitThreshold', 'ConvertToMD') self.setPropertyGroup('MaxRecursionDepth', 'ConvertToMD') def PyExec(self): _load_inst = bool(self.getProperty("LoadInstrument").value) _detcal = bool(self.getProperty("DetCal").value) _masking = bool(self.getProperty("MaskFile").value) _outWS_name = self.getPropertyValue("OutputWorkspace") _UB = bool(self.getProperty("UBMatrix").value) MinValues = self.getProperty("MinValues").value MaxValues = self.getProperty("MaxValues").value if self.getProperty("OverwriteExisting").value: if mtd.doesExist(_outWS_name): DeleteWorkspace(_outWS_name) progress = Progress(self, 0.0, 1.0, len(self.getProperty("Filename").value)) for run in self.getProperty("Filename").value: logger.notice("Working on " + run) Load(Filename=run, OutputWorkspace='__run', FilterByTofMin=self.getProperty("FilterByTofMin").value, FilterByTofMax=self.getProperty("FilterByTofMax").value, FilterByTimeStop=self.getProperty("FilterByTimeStop").value) if _load_inst: LoadInstrument(Workspace='__run', Filename=self.getProperty("LoadInstrument").value, RewriteSpectraMap=False) if _detcal: LoadIsawDetCal(InputWorkspace='__run', Filename=self.getProperty("DetCal").value) if _masking: if not mtd.doesExist('__mask'): LoadMask(Instrument=mtd['__run'].getInstrument().getName(), InputFile=self.getProperty("MaskFile").value, OutputWorkspace='__mask') MaskDetectors(Workspace='__run',MaskedWorkspace='__mask') if self.getProperty('SetGoniometer').value: SetGoniometer(Workspace='__run', Goniometers=self.getProperty('Goniometers').value, Axis0=self.getProperty('Axis0').value, Axis1=self.getProperty('Axis1').value, Axis2=self.getProperty('Axis2').value) if _UB: LoadIsawUB(InputWorkspace='__run', Filename=self.getProperty("UBMatrix").value) if len(MinValues) == 0 or len(MaxValues) == 0: MinValues, MaxValues = ConvertToMDMinMaxGlobal('__run', dEAnalysisMode='Elastic',Q3DFrames='HKL',QDimensions='Q3D') ConvertToMD(InputWorkspace='__run', OutputWorkspace=_outWS_name, QDimensions='Q3D', dEAnalysisMode='Elastic', Q3DFrames='HKL', QConversionScales='HKL', Uproj=self.getProperty('Uproj').value, Vproj=self.getProperty('Vproj').value, Wproj=self.getProperty('Wproj').value, MinValues=MinValues, MaxValues=MaxValues, SplitInto=self.getProperty('SplitInto').value, SplitThreshold=self.getProperty('SplitThreshold').value, MaxRecursionDepth=self.getProperty('MaxRecursionDepth').value, OverwriteExisting=False) else: if len(MinValues) == 0 or len(MaxValues) == 0: MinValues, MaxValues = ConvertToMDMinMaxGlobal('__run', dEAnalysisMode='Elastic',Q3DFrames='Q',QDimensions='Q3D') ConvertToMD(InputWorkspace='__run', OutputWorkspace=_outWS_name, QDimensions='Q3D', dEAnalysisMode='Elastic', Q3DFrames='Q_sample', Uproj=self.getProperty('Uproj').value, Vproj=self.getProperty('Vproj').value, Wproj=self.getProperty('Wproj').value, MinValues=MinValues, MaxValues=MaxValues, SplitInto=self.getProperty('SplitInto').value, SplitThreshold=self.getProperty('SplitThreshold').value, MaxRecursionDepth=self.getProperty('MaxRecursionDepth').value, OverwriteExisting=False) DeleteWorkspace('__run') progress.report() if mtd.doesExist('__mask'): DeleteWorkspace('__mask') self.setProperty("OutputWorkspace", mtd[_outWS_name]) AlgorithmFactory.subscribe(ConvertMultipleRunsToSingleCrystalMD)
ScreamingUdder/mantid
Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/ConvertMultipleRunsToSingleCrystalMD.py
Python
gpl-3.0
9,743
[ "CRYSTAL" ]
7bd5dc7f1625f26785db9dacc09710c0ebff4d7457485cc257706fd10a2753e8
# DEVELOPMENT - local_settings.py # - This file should be copied to ~/hydroshare/hydroshare/local_settings.py # - The iRODS specific contents of this file contain username and password informaiton # that is used for an xDCIShare proxy user import redis import os from kombu import Queue, Exchange from kombu.common import Broadcast DEBUG = True # DEVELOPMENT EXAMPLE ONLY # Make these unique, and don't share it with anybody SECRET_KEY = "9e2e3c2d-8282-41b2-a027-de304c0bc3d944963c9a-4778-43e0-947c-38889e976dcab9f328cb-1576-4314-bfa6-70c42a6e773c" NEVERCACHE_KEY = "7b205669-41dd-40db-9b96-c6f93b66123496a56be1-607f-4dbf-bf62-3315fb353ce6f12a7d28-06ad-4ef7-9266-b5ea66ed2519" ALLOWED_HOSTS = "*" RABBITMQ_HOST = os.environ.get('RABBITMQ_PORT_5672_TCP_ADDR', 'localhost') RABBITMQ_PORT = '5672' REDIS_HOST = os.environ.get('REDIS_PORT_6379_TCP_ADDR', 'localhost') REDIS_PORT = 6379 POSTGIS_HOST = os.environ.get('POSTGIS_PORT_5432_TCP_ADDR', 'localhost') POSTGIS_PORT = 5432 POSTGIS_DB = os.environ.get('POSTGIS_DB', 'postgres') POSTGIS_PASSWORD = os.environ.get('POSTGIS_PASSWORD', 'postgres') POSTGIS_USER = os.environ.get('POSTGIS_USER', 'postgres') REDIS_CONNECTION = redis.Redis( host=REDIS_HOST, port=REDIS_PORT, db=4) WMS_CACHE_DB = redis.Redis( host=REDIS_HOST, port=REDIS_PORT, db=5) PERMISSIONS_DB= redis.Redis( host=REDIS_HOST, port=REDIS_PORT, db=6) IPYTHON_SETTINGS=[] IPYTHON_BASE='/hydroshare/static/media/ipython-notebook' IPYTHON_HOST='127.0.0.1' # celery settings # customizations: we need a special queue for broadcast signals to all # docker daemons. we also need a special queue for direct messages to all # docker daemons. BROKER_URL='amqp://guest:guest@{RABBITMQ_HOST}:{RABBITMQ_PORT}//'.format(RABBITMQ_HOST=RABBITMQ_HOST, RABBITMQ_PORT=RABBITMQ_PORT) CELERY_ACCEPT_CONTENT = ['json'] CELERY_TASK_SERIALIZER = 'json' CELERY_RESULT_SERIALIZER = 'json' CELERY_DEFAULT_QUEUE = 'default' DEFAULT_EXCHANGE=Exchange('default', type='topic') CELERY_QUEUES = ( Queue('default', DEFAULT_EXCHANGE, routing_key='task.default'), ) CELERY_DEFAULT_EXCHANGE = 'tasks' CELERY_DEFAULT_EXCHANGE_TYPE = 'topic' CELERY_DEFAULT_ROUTING_KEY = 'task.default' CELERY_ROUTES = ('hs_core.router.HSTaskRouter',) DOCKER_URL = 'unix://docker.sock/' DOCKER_API_VERSION = '1.12' # CartoCSS CARTO_HOME='/hs_tmp/node_modules/carto' USE_SOUTH = False SITE_TITLE = "xDCIShare" SESSION_EXPIRE_AT_BROWSER_CLOSE = True ############# # DATABASES # ############# DATABASES = { "default": { # Add "postgresql_psycopg2", "mysql", "sqlite3" or "oracle". "ENGINE": "django.contrib.gis.db.backends.postgis", # DB name or path to database file if using sqlite3. "NAME": POSTGIS_DB, # Not used with sqlite3. "USER": POSTGIS_USER, # Not used with sqlite3. "PASSWORD": POSTGIS_PASSWORD, # Set to empty string for localhost. Not used with sqlite3. "HOST": POSTGIS_HOST, # Set to empty string for default. Not used with sqlite3. "PORT": POSTGIS_PORT, } } POSTGIS_VERSION=(2,1,1) # Local resource iRODS configuration USE_IRODS = True IRODS_ROOT = '/tmp' IRODS_ICOMMANDS_PATH = '/usr/bin' IRODS_HOST = 'hydrotest41.renci.org' IRODS_PORT = '1247' IRODS_DEFAULT_RESOURCE = 'hydrotest41Resc' IRODS_HOME_COLLECTION = '/hydrotest41Zone/home/hsproxy' IRODS_CWD = '/hydrotest41Zone/home/hsproxy' IRODS_ZONE = 'hydrotest41Zone' IRODS_USERNAME = 'hsproxy' IRODS_AUTH = 'proxywater1' IRODS_GLOBAL_SESSION = True # Remote user zone iRODS configuration REMOTE_USE_IRODS = False # iRODS customized bagit rule path IRODS_BAGIT_RULE='hydroshare/irods/ruleGenerateBagIt_HS.r' IRODS_BAGIT_PATH = 'bags' IRODS_BAGIT_POSTFIX = 'zip' HS_BAGIT_README_FILE_WITH_PATH = 'docs/bagit/readme.txt' # crossref login credential for resource publication USE_CROSSREF_TEST = True CROSSREF_LOGIN_ID = '' CROSSREF_LOGIN_PWD = '' # Since Hyrax server on-demand update is only needed when private netCDF resources on www # are made public, in local development environments or VM deployments other than the www # production, this should not be run by setting RUN_HYRAX_UPDATE to False. RUN_HYRAX_UPDATE # should only be set to True on www.hydroshare.org RUN_HYRAX_UPDATE = False HYRAX_SSH_HOST = '' HYRAX_SSH_PROXY_USER = '' HYRAX_SSH_PROXY_USER_PWD = '' HYRAX_SCRIPT_RUN_COMMAND = '' # hsuserproxy system user configuration used to create xDCIShare iRODS users on-demand HS_USER_ZONE_HOST = '' HS_USER_ZONE_PROXY_USER = '' HS_USER_ZONE_PROXY_USER_PWD = '' HS_USER_ZONE_PROXY_USER_CREATE_USER_CMD = '' HS_USER_ZONE_PROXY_USER_DELETE_USER_CMD = '' # the local xDCIShare proxy user (a counterpart of wwwHydroProxy) in a federated zone with HydroShare Zone HS_LOCAL_PROXY_USER_IN_FED_ZONE = 'localTestHydroProxy' # Please keep the line below unchanged since it is used to check whether # the current site is in production or not HS_WWW_IRODS_PROXY_USER = 'wwwHydroProxy' # credentials for xDCIShare proxy user iRODS account which is set to have own access control # to all collections in any federated zone with HydroShare zone, which is only useful when # testing HydroShare federated zone in local test development environment since in www # production environment, IRODS_USERNAME and other associated settings already represent wwwHydroProxy settings HS_WWW_IRODS_PROXY_USER_PWD = '' HS_WWW_IRODS_HOST = '' HS_IRODS_LOCAL_ZONE_DEF_RES = 'hydroshareLocalResc' HS_WWW_IRODS_ZONE = '' HS_USER_IRODS_ZONE = 'hydroshareuserZone' # Email configuration EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' #EMAIL_HOST_USER = '' #EMAIL_HOST_PASSWORD = '' #EMAIL_HOST = '' #EMAIL_PORT = '' #EMAIL_USE_TLS = True #DEFAULT_FROM_EMAIL = '' #DEFAULT_SUPPORT_EMAIL='' HYDROSHARE_SHARED_TEMP = '/shared_tmp' TIME_ZONE = "Etc/UTC"
RENCI/xDCIShare
hydroshare/local_settings.py
Python
bsd-3-clause
5,851
[ "NetCDF" ]
9b97fdfa5606e81f2c62ee4b7ad257730a574cd42ad5e5b1d574988f9022e465
#!/usr/bin/python # -*- coding: utf-8 -*- # # This module is also sponsored by E.T.A.I. (www.etai.fr) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = r''' --- module: vmware_guest short_description: Manages virtual machines in vCenter description: > This module can be used to create new virtual machines from templates or other virtual machines, manage power state of virtual machine such as power on, power off, suspend, shutdown, reboot, restart etc., modify various virtual machine components like network, disk, customization etc., rename a virtual machine and remove a virtual machine with associated components. version_added: '2.2' author: - Loic Blot (@nerzhul) <loic.blot@unix-experience.fr> - Philippe Dellaert (@pdellaert) <philippe@dellaert.org> - Abhijeet Kasurde (@Akasurde) <akasurde@redhat.com> requirements: - python >= 2.6 - PyVmomi notes: - Please make sure that the user used for vmware_guest has the correct level of privileges. - For example, following is the list of minimum privileges required by users to create virtual machines. - " DataStore > Allocate Space" - " Virtual Machine > Configuration > Add New Disk" - " Virtual Machine > Configuration > Add or Remove Device" - " Virtual Machine > Inventory > Create New" - " Network > Assign Network" - " Resource > Assign Virtual Machine to Resource Pool" - "Module may require additional privileges as well, which may be required for gathering facts - e.g. ESXi configurations." - Tested on vSphere 5.5, 6.0, 6.5 and 6.7 - Use SCSI disks instead of IDE when you want to expand online disks by specifying a SCSI controller - "For additional information please visit Ansible VMware community wiki - U(https://github.com/ansible/community/wiki/VMware)." options: state: description: - Specify the state the virtual machine should be in. - 'If C(state) is set to C(present) and virtual machine exists, ensure the virtual machine configurations conforms to task arguments.' - 'If C(state) is set to C(absent) and virtual machine exists, then the specified virtual machine is removed with its associated components.' - 'If C(state) is set to one of the following C(poweredon), C(poweredoff), C(present), C(restarted), C(suspended) and virtual machine does not exists, then virtual machine is deployed with given parameters.' - 'If C(state) is set to C(poweredon) and virtual machine exists with powerstate other than powered on, then the specified virtual machine is powered on.' - 'If C(state) is set to C(poweredoff) and virtual machine exists with powerstate other than powered off, then the specified virtual machine is powered off.' - 'If C(state) is set to C(restarted) and virtual machine exists, then the virtual machine is restarted.' - 'If C(state) is set to C(suspended) and virtual machine exists, then the virtual machine is set to suspended mode.' - 'If C(state) is set to C(shutdownguest) and virtual machine exists, then the virtual machine is shutdown.' - 'If C(state) is set to C(rebootguest) and virtual machine exists, then the virtual machine is rebooted.' default: present choices: [ present, absent, poweredon, poweredoff, restarted, suspended, shutdownguest, rebootguest ] name: description: - Name of the virtual machine to work with. - Virtual machine names in vCenter are not necessarily unique, which may be problematic, see C(name_match). - 'If multiple virtual machines with same name exists, then C(folder) is required parameter to identify uniqueness of the virtual machine.' - This parameter is required, if C(state) is set to C(poweredon), C(poweredoff), C(present), C(restarted), C(suspended) and virtual machine does not exists. - This parameter is case sensitive. required: yes name_match: description: - If multiple virtual machines matching the name, use the first or last found. default: 'first' choices: [ first, last ] uuid: description: - UUID of the virtual machine to manage if known, this is VMware's unique identifier. - This is required if C(name) is not supplied. - If virtual machine does not exists, then this parameter is ignored. - Please note that a supplied UUID will be ignored on virtual machine creation, as VMware creates the UUID internally. use_instance_uuid: description: - Whether to use the VMware instance UUID rather than the BIOS UUID. default: no type: bool version_added: '2.8' template: description: - Template or existing virtual machine used to create new virtual machine. - If this value is not set, virtual machine is created without using a template. - If the virtual machine already exists, this parameter will be ignored. - This parameter is case sensitive. - You can also specify template or VM UUID for identifying source. version_added 2.8. Use C(hw_product_uuid) from M(vmware_guest_facts) as UUID value. - From version 2.8 onwards, absolute path to virtual machine or template can be used. aliases: [ 'template_src' ] is_template: description: - Flag the instance as a template. - This will mark the given virtual machine as template. default: 'no' type: bool version_added: '2.3' folder: description: - Destination folder, absolute path to find an existing guest or create the new guest. - The folder should include the datacenter. ESX's datacenter is ha-datacenter. - This parameter is case sensitive. - This parameter is required, while deploying new virtual machine. version_added 2.5. - 'If multiple machines are found with same name, this parameter is used to identify uniqueness of the virtual machine. version_added 2.5' - 'Examples:' - ' folder: /ha-datacenter/vm' - ' folder: ha-datacenter/vm' - ' folder: /datacenter1/vm' - ' folder: datacenter1/vm' - ' folder: /datacenter1/vm/folder1' - ' folder: datacenter1/vm/folder1' - ' folder: /folder1/datacenter1/vm' - ' folder: folder1/datacenter1/vm' - ' folder: /folder1/datacenter1/vm/folder2' hardware: description: - Manage virtual machine's hardware attributes. - All parameters case sensitive. - 'Valid attributes are:' - ' - C(hotadd_cpu) (boolean): Allow virtual CPUs to be added while the virtual machine is running.' - ' - C(hotremove_cpu) (boolean): Allow virtual CPUs to be removed while the virtual machine is running. version_added: 2.5' - ' - C(hotadd_memory) (boolean): Allow memory to be added while the virtual machine is running.' - ' - C(memory_mb) (integer): Amount of memory in MB.' - ' - C(nested_virt) (bool): Enable nested virtualization. version_added: 2.5' - ' - C(num_cpus) (integer): Number of CPUs.' - ' - C(num_cpu_cores_per_socket) (integer): Number of Cores Per Socket.' - " C(num_cpus) must be a multiple of C(num_cpu_cores_per_socket). For example to create a VM with 2 sockets of 4 cores, specify C(num_cpus): 8 and C(num_cpu_cores_per_socket): 4" - ' - C(scsi) (string): Valid values are C(buslogic), C(lsilogic), C(lsilogicsas) and C(paravirtual) (default).' - " - C(memory_reservation_lock) (boolean): If set true, memory resource reservation for the virtual machine will always be equal to the virtual machine's memory size. version_added: 2.5" - ' - C(max_connections) (integer): Maximum number of active remote display connections for the virtual machines. version_added: 2.5.' - ' - C(mem_limit) (integer): The memory utilization of a virtual machine will not exceed this limit. Unit is MB. version_added: 2.5' - ' - C(mem_reservation) (integer): The amount of memory resource that is guaranteed available to the virtual machine. Unit is MB. C(memory_reservation) is alias to this. version_added: 2.5' - ' - C(cpu_limit) (integer): The CPU utilization of a virtual machine will not exceed this limit. Unit is MHz. version_added: 2.5' - ' - C(cpu_reservation) (integer): The amount of CPU resource that is guaranteed available to the virtual machine. Unit is MHz. version_added: 2.5' - ' - C(version) (integer): The Virtual machine hardware versions. Default is 10 (ESXi 5.5 and onwards). If value specified as C(latest), version is set to the most current virtual hardware supported on the host. C(latest) is added in version 2.10. Please check VMware documentation for correct virtual machine hardware version. Incorrect hardware version may lead to failure in deployment. If hardware version is already equal to the given version then no action is taken. version_added: 2.6' - ' - C(boot_firmware) (string): Choose which firmware should be used to boot the virtual machine. Allowed values are "bios" and "efi". version_added: 2.7' - ' - C(virt_based_security) (bool): Enable Virtualization Based Security feature for Windows 10. (Support from Virtual machine hardware version 14, Guest OS Windows 10 64 bit, Windows Server 2016)' guest_id: description: - Set the guest ID. - This parameter is case sensitive. - 'Examples:' - " virtual machine with RHEL7 64 bit, will be 'rhel7_64Guest'" - " virtual machine with CentOS 64 bit, will be 'centos64Guest'" - " virtual machine with Ubuntu 64 bit, will be 'ubuntu64Guest'" - This field is required when creating a virtual machine, not required when creating from the template. - > Valid values are referenced here: U(https://code.vmware.com/apis/358/doc/vim.vm.GuestOsDescriptor.GuestOsIdentifier.html) version_added: '2.3' disk: description: - A list of disks to add. - This parameter is case sensitive. - Shrinking disks is not supported. - Removing existing disks of the virtual machine is not supported. - 'Valid attributes are:' - ' - C(size_[tb,gb,mb,kb]) (integer): Disk storage size in specified unit.' - ' - C(type) (string): Valid values are:' - ' - C(thin) thin disk' - ' - C(eagerzeroedthick) eagerzeroedthick disk, added in version 2.5' - ' Default: C(None) thick disk, no eagerzero.' - ' - C(datastore) (string): The name of datastore which will be used for the disk. If C(autoselect_datastore) is set to True, then will select the less used datastore whose name contains this "disk.datastore" string.' - ' - C(filename) (string): Existing disk image to be used. Filename must already exist on the datastore.' - ' Specify filename string in C([datastore_name] path/to/file.vmdk) format. Added in version 2.8.' - ' - C(autoselect_datastore) (bool): select the less used datastore. "disk.datastore" and "disk.autoselect_datastore" will not be used if C(datastore) is specified outside this C(disk) configuration.' - ' - C(disk_mode) (string): Type of disk mode. Added in version 2.6' - ' - Available options are :' - ' - C(persistent): Changes are immediately and permanently written to the virtual disk. This is default.' - ' - C(independent_persistent): Same as persistent, but not affected by snapshots.' - ' - C(independent_nonpersistent): Changes to virtual disk are made to a redo log and discarded at power off, but not affected by snapshots.' cdrom: description: - A CD-ROM configuration for the virtual machine. - Or a list of CD-ROMs configuration for the virtual machine. Added in version 2.9. - 'Parameters C(controller_type), C(controller_number), C(unit_number), C(state) are added for a list of CD-ROMs configuration support.' - 'Valid attributes are:' - ' - C(type) (string): The type of CD-ROM, valid options are C(none), C(client) or C(iso). With C(none) the CD-ROM will be disconnected but present.' - ' - C(iso_path) (string): The datastore path to the ISO file to use, in the form of C([datastore1] path/to/file.iso). Required if type is set C(iso).' - ' - C(controller_type) (string): Default value is C(ide). Only C(ide) controller type for CD-ROM is supported for now, will add SATA controller type in the future.' - ' - C(controller_number) (int): For C(ide) controller, valid value is 0 or 1.' - ' - C(unit_number) (int): For CD-ROM device attach to C(ide) controller, valid value is 0 or 1. C(controller_number) and C(unit_number) are mandatory attributes.' - ' - C(state) (string): Valid value is C(present) or C(absent). Default is C(present). If set to C(absent), then the specified CD-ROM will be removed. For C(ide) controller, hot-add or hot-remove CD-ROM is not supported.' version_added: '2.5' resource_pool: description: - Use the given resource pool for virtual machine operation. - This parameter is case sensitive. - Resource pool should be child of the selected host parent. version_added: '2.3' wait_for_ip_address: description: - Wait until vCenter detects an IP address for the virtual machine. - This requires vmware-tools (vmtoolsd) to properly work after creation. - "vmware-tools needs to be installed on the given virtual machine in order to work with this parameter." default: 'no' type: bool wait_for_ip_address_timeout: description: - Define a timeout (in seconds) for the wait_for_ip_address parameter. default: '300' type: int version_added: '2.10' wait_for_customization: description: - Wait until vCenter detects all guest customizations as successfully completed. - When enabled, the VM will automatically be powered on. default: 'no' type: bool version_added: '2.8' state_change_timeout: description: - If the C(state) is set to C(shutdownguest), by default the module will return immediately after sending the shutdown signal. - If this argument is set to a positive integer, the module will instead wait for the virtual machine to reach the poweredoff state. - The value sets a timeout in seconds for the module to wait for the state change. default: 0 version_added: '2.6' snapshot_src: description: - Name of the existing snapshot to use to create a clone of a virtual machine. - This parameter is case sensitive. - While creating linked clone using C(linked_clone) parameter, this parameter is required. version_added: '2.4' linked_clone: description: - Whether to create a linked clone from the snapshot specified. - If specified, then C(snapshot_src) is required parameter. default: 'no' type: bool version_added: '2.4' force: description: - Ignore warnings and complete the actions. - This parameter is useful while removing virtual machine which is powered on state. - 'This module reflects the VMware vCenter API and UI workflow, as such, in some cases the `force` flag will be mandatory to perform the action to ensure you are certain the action has to be taken, no matter what the consequence. This is specifically the case for removing a powered on the virtual machine when C(state) is set to C(absent).' default: 'no' type: bool delete_from_inventory: description: - Whether to delete Virtual machine from inventory or delete from disk. default: False type: bool version_added: '2.10' datacenter: description: - Destination datacenter for the deploy operation. - This parameter is case sensitive. default: ha-datacenter cluster: description: - The cluster name where the virtual machine will run. - This is a required parameter, if C(esxi_hostname) is not set. - C(esxi_hostname) and C(cluster) are mutually exclusive parameters. - This parameter is case sensitive. version_added: '2.3' esxi_hostname: description: - The ESXi hostname where the virtual machine will run. - This is a required parameter, if C(cluster) is not set. - C(esxi_hostname) and C(cluster) are mutually exclusive parameters. - This parameter is case sensitive. annotation: description: - A note or annotation to include in the virtual machine. version_added: '2.3' customvalues: description: - Define a list of custom values to set on virtual machine. - A custom value object takes two fields C(key) and C(value). - Incorrect key and values will be ignored. version_added: '2.3' networks: description: - A list of networks (in the order of the NICs). - Removing NICs is not allowed, while reconfiguring the virtual machine. - All parameters and VMware object names are case sensitive. - 'One of the below parameters is required per entry:' - ' - C(name) (string): Name of the portgroup or distributed virtual portgroup for this interface. When specifying distributed virtual portgroup make sure given C(esxi_hostname) or C(cluster) is associated with it.' - ' - C(vlan) (integer): VLAN number for this interface.' - 'Optional parameters per entry (used for virtual hardware):' - ' - C(device_type) (string): Virtual network device (one of C(e1000), C(e1000e), C(pcnet32), C(vmxnet2), C(vmxnet3) (default), C(sriov)).' - ' - C(mac) (string): Customize MAC address.' - ' - C(dvswitch_name) (string): Name of the distributed vSwitch. This value is required if multiple distributed portgroups exists with the same name. version_added 2.7' - ' - C(start_connected) (bool): Indicates that virtual network adapter starts with associated virtual machine powers on. version_added: 2.5' - 'Optional parameters per entry (used for OS customization):' - ' - C(type) (string): Type of IP assignment (either C(dhcp) or C(static)). C(dhcp) is default.' - ' - C(ip) (string): Static IP address (implies C(type: static)).' - ' - C(netmask) (string): Static netmask required for C(ip).' - ' - C(gateway) (string): Static gateway.' - ' - C(dns_servers) (string): DNS servers for this network interface (Windows).' - ' - C(domain) (string): Domain name for this network interface (Windows).' - ' - C(wake_on_lan) (bool): Indicates if wake-on-LAN is enabled on this virtual network adapter. version_added: 2.5' - ' - C(allow_guest_control) (bool): Enables guest control over whether the connectable device is connected. version_added: 2.5' version_added: '2.3' customization: description: - Parameters for OS customization when cloning from the template or the virtual machine, or apply to the existing virtual machine directly. - Not all operating systems are supported for customization with respective vCenter version, please check VMware documentation for respective OS customization. - For supported customization operating system matrix, (see U(http://partnerweb.vmware.com/programs/guestOS/guest-os-customization-matrix.pdf)) - All parameters and VMware object names are case sensitive. - Linux based OSes requires Perl package to be installed for OS customizations. - 'Common parameters (Linux/Windows):' - ' - C(existing_vm) (bool): If set to C(True), do OS customization on the specified virtual machine directly. If set to C(False) or not specified, do OS customization when cloning from the template or the virtual machine. version_added: 2.8' - ' - C(dns_servers) (list): List of DNS servers to configure.' - ' - C(dns_suffix) (list): List of domain suffixes, also known as DNS search path (default: C(domain) parameter).' - ' - C(domain) (string): DNS domain name to use.' - ' - C(hostname) (string): Computer hostname (default: shorted C(name) parameter). Allowed characters are alphanumeric (uppercase and lowercase) and minus, rest of the characters are dropped as per RFC 952.' - 'Parameters related to Linux customization:' - ' - C(timezone) (string): Timezone (See List of supported time zones for different vSphere versions in Linux/Unix systems (2145518) U(https://kb.vmware.com/s/article/2145518)). version_added: 2.9' - ' - C(hwclockUTC) (bool): Specifies whether the hardware clock is in UTC or local time. True when the hardware clock is in UTC, False when the hardware clock is in local time. version_added: 2.9' - 'Parameters related to Windows customization:' - ' - C(autologon) (bool): Auto logon after virtual machine customization (default: False).' - ' - C(autologoncount) (int): Number of autologon after reboot (default: 1).' - ' - C(domainadmin) (string): User used to join in AD domain (mandatory with C(joindomain)).' - ' - C(domainadminpassword) (string): Password used to join in AD domain (mandatory with C(joindomain)).' - ' - C(fullname) (string): Server owner name (default: Administrator).' - ' - C(joindomain) (string): AD domain to join (Not compatible with C(joinworkgroup)).' - ' - C(joinworkgroup) (string): Workgroup to join (Not compatible with C(joindomain), default: WORKGROUP).' - ' - C(orgname) (string): Organisation name (default: ACME).' - ' - C(password) (string): Local administrator password.' - ' - C(productid) (string): Product ID.' - ' - C(runonce) (list): List of commands to run at first user logon.' - ' - C(timezone) (int): Timezone (See U(https://msdn.microsoft.com/en-us/library/ms912391.aspx)).' version_added: '2.3' vapp_properties: description: - A list of vApp properties - 'For full list of attributes and types refer to: U(https://github.com/vmware/pyvmomi/blob/master/docs/vim/vApp/PropertyInfo.rst)' - 'Basic attributes are:' - ' - C(id) (string): Property id - required.' - ' - C(value) (string): Property value.' - ' - C(type) (string): Value type, string type by default.' - ' - C(operation): C(remove): This attribute is required only when removing properties.' version_added: '2.6' customization_spec: description: - Unique name identifying the requested customization specification. - This parameter is case sensitive. - If set, then overrides C(customization) parameter values. version_added: '2.6' datastore: description: - Specify datastore or datastore cluster to provision virtual machine. - 'This parameter takes precedence over "disk.datastore" parameter.' - 'This parameter can be used to override datastore or datastore cluster setting of the virtual machine when deployed from the template.' - Please see example for more usage. version_added: '2.7' convert: description: - Specify convert disk type while cloning template or virtual machine. choices: [ thin, thick, eagerzeroedthick ] version_added: '2.8' extends_documentation_fragment: vmware.documentation ''' EXAMPLES = r''' - name: Create a virtual machine on given ESXi hostname vmware_guest: hostname: "{{ vcenter_hostname }}" username: "{{ vcenter_username }}" password: "{{ vcenter_password }}" validate_certs: no folder: /DC1/vm/ name: test_vm_0001 state: poweredon guest_id: centos64Guest # This is hostname of particular ESXi server on which user wants VM to be deployed esxi_hostname: "{{ esxi_hostname }}" disk: - size_gb: 10 type: thin datastore: datastore1 hardware: memory_mb: 512 num_cpus: 4 scsi: paravirtual networks: - name: VM Network mac: aa:bb:dd:aa:00:14 ip: 10.10.10.100 netmask: 255.255.255.0 device_type: vmxnet3 wait_for_ip_address: yes wait_for_ip_address_timeout: 600 delegate_to: localhost register: deploy_vm - name: Create a virtual machine from a template vmware_guest: hostname: "{{ vcenter_hostname }}" username: "{{ vcenter_username }}" password: "{{ vcenter_password }}" validate_certs: no folder: /testvms name: testvm_2 state: poweredon template: template_el7 disk: - size_gb: 10 type: thin datastore: g73_datastore hardware: memory_mb: 512 num_cpus: 6 num_cpu_cores_per_socket: 3 scsi: paravirtual memory_reservation_lock: True mem_limit: 8096 mem_reservation: 4096 cpu_limit: 8096 cpu_reservation: 4096 max_connections: 5 hotadd_cpu: True hotremove_cpu: True hotadd_memory: False version: 12 # Hardware version of virtual machine boot_firmware: "efi" cdrom: type: iso iso_path: "[datastore1] livecd.iso" networks: - name: VM Network mac: aa:bb:dd:aa:00:14 wait_for_ip_address: yes delegate_to: localhost register: deploy - name: Clone a virtual machine from Windows template and customize vmware_guest: hostname: "{{ vcenter_hostname }}" username: "{{ vcenter_username }}" password: "{{ vcenter_password }}" validate_certs: no datacenter: datacenter1 cluster: cluster name: testvm-2 template: template_windows networks: - name: VM Network ip: 192.168.1.100 netmask: 255.255.255.0 gateway: 192.168.1.1 mac: aa:bb:dd:aa:00:14 domain: my_domain dns_servers: - 192.168.1.1 - 192.168.1.2 - vlan: 1234 type: dhcp customization: autologon: yes dns_servers: - 192.168.1.1 - 192.168.1.2 domain: my_domain password: new_vm_password runonce: - powershell.exe -ExecutionPolicy Unrestricted -File C:\Windows\Temp\ConfigureRemotingForAnsible.ps1 -ForceNewSSLCert -EnableCredSSP delegate_to: localhost - name: Clone a virtual machine from Linux template and customize vmware_guest: hostname: "{{ vcenter_hostname }}" username: "{{ vcenter_username }}" password: "{{ vcenter_password }}" validate_certs: no datacenter: "{{ datacenter }}" state: present folder: /DC1/vm template: "{{ template }}" name: "{{ vm_name }}" cluster: DC1_C1 networks: - name: VM Network ip: 192.168.10.11 netmask: 255.255.255.0 wait_for_ip_address: True customization: domain: "{{ guest_domain }}" dns_servers: - 8.9.9.9 - 7.8.8.9 dns_suffix: - example.com - example2.com delegate_to: localhost - name: Rename a virtual machine (requires the virtual machine's uuid) vmware_guest: hostname: "{{ vcenter_hostname }}" username: "{{ vcenter_username }}" password: "{{ vcenter_password }}" validate_certs: no uuid: "{{ vm_uuid }}" name: new_name state: present delegate_to: localhost - name: Remove a virtual machine by uuid vmware_guest: hostname: "{{ vcenter_hostname }}" username: "{{ vcenter_username }}" password: "{{ vcenter_password }}" validate_certs: no uuid: "{{ vm_uuid }}" state: absent delegate_to: localhost - name: Remove a virtual machine from inventory vmware_guest: hostname: "{{ vcenter_hostname }}" username: "{{ vcenter_username }}" password: "{{ vcenter_password }}" validate_certs: no name: vm_name delete_from_inventory: True state: absent delegate_to: localhost - name: Manipulate vApp properties vmware_guest: hostname: "{{ vcenter_hostname }}" username: "{{ vcenter_username }}" password: "{{ vcenter_password }}" validate_certs: no name: vm_name state: present vapp_properties: - id: remoteIP category: Backup label: Backup server IP type: str value: 10.10.10.1 - id: old_property operation: remove delegate_to: localhost - name: Set powerstate of a virtual machine to poweroff by using UUID vmware_guest: hostname: "{{ vcenter_hostname }}" username: "{{ vcenter_username }}" password: "{{ vcenter_password }}" validate_certs: no uuid: "{{ vm_uuid }}" state: poweredoff delegate_to: localhost - name: Deploy a virtual machine in a datastore different from the datastore of the template vmware_guest: hostname: "{{ vcenter_hostname }}" username: "{{ vcenter_username }}" password: "{{ vcenter_password }}" name: "{{ vm_name }}" state: present template: "{{ template_name }}" # Here datastore can be different which holds template datastore: "{{ virtual_machine_datastore }}" hardware: memory_mb: 512 num_cpus: 2 scsi: paravirtual delegate_to: localhost ''' RETURN = r''' instance: description: metadata about the new virtual machine returned: always type: dict sample: None ''' import re import time import string HAS_PYVMOMI = False try: from pyVmomi import vim, vmodl, VmomiSupport HAS_PYVMOMI = True except ImportError: pass from random import randint from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.common.network import is_mac from ansible.module_utils._text import to_text, to_native from ansible.module_utils.vmware import (find_obj, gather_vm_facts, get_all_objs, compile_folder_path_for_object, serialize_spec, vmware_argument_spec, set_vm_power_state, PyVmomi, find_dvs_by_name, find_dvspg_by_name, wait_for_vm_ip, wait_for_task, TaskError, quote_obj_name) def list_or_dict(value): if isinstance(value, list) or isinstance(value, dict): return value else: raise ValueError("'%s' is not valid, valid type is 'list' or 'dict'." % value) class PyVmomiDeviceHelper(object): """ This class is a helper to create easily VMware Objects for PyVmomiHelper """ def __init__(self, module): self.module = module self.next_disk_unit_number = 0 self.scsi_device_type = { 'lsilogic': vim.vm.device.VirtualLsiLogicController, 'paravirtual': vim.vm.device.ParaVirtualSCSIController, 'buslogic': vim.vm.device.VirtualBusLogicController, 'lsilogicsas': vim.vm.device.VirtualLsiLogicSASController, } def create_scsi_controller(self, scsi_type): scsi_ctl = vim.vm.device.VirtualDeviceSpec() scsi_ctl.operation = vim.vm.device.VirtualDeviceSpec.Operation.add scsi_device = self.scsi_device_type.get(scsi_type, vim.vm.device.ParaVirtualSCSIController) scsi_ctl.device = scsi_device() scsi_ctl.device.busNumber = 0 # While creating a new SCSI controller, temporary key value # should be unique negative integers scsi_ctl.device.key = -randint(1000, 9999) scsi_ctl.device.hotAddRemove = True scsi_ctl.device.sharedBus = 'noSharing' scsi_ctl.device.scsiCtlrUnitNumber = 7 return scsi_ctl def is_scsi_controller(self, device): return isinstance(device, tuple(self.scsi_device_type.values())) @staticmethod def create_ide_controller(bus_number=0): ide_ctl = vim.vm.device.VirtualDeviceSpec() ide_ctl.operation = vim.vm.device.VirtualDeviceSpec.Operation.add ide_ctl.device = vim.vm.device.VirtualIDEController() ide_ctl.device.deviceInfo = vim.Description() # While creating a new IDE controller, temporary key value # should be unique negative integers ide_ctl.device.key = -randint(200, 299) ide_ctl.device.busNumber = bus_number return ide_ctl @staticmethod def create_cdrom(ide_device, cdrom_type, iso_path=None, unit_number=0): cdrom_spec = vim.vm.device.VirtualDeviceSpec() cdrom_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add cdrom_spec.device = vim.vm.device.VirtualCdrom() cdrom_spec.device.controllerKey = ide_device.key cdrom_spec.device.key = -randint(3000, 3999) cdrom_spec.device.unitNumber = unit_number cdrom_spec.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo() cdrom_spec.device.connectable.allowGuestControl = True cdrom_spec.device.connectable.startConnected = (cdrom_type != "none") if cdrom_type in ["none", "client"]: cdrom_spec.device.backing = vim.vm.device.VirtualCdrom.RemotePassthroughBackingInfo() elif cdrom_type == "iso": cdrom_spec.device.backing = vim.vm.device.VirtualCdrom.IsoBackingInfo(fileName=iso_path) return cdrom_spec @staticmethod def is_equal_cdrom(vm_obj, cdrom_device, cdrom_type, iso_path): if cdrom_type == "none": return (isinstance(cdrom_device.backing, vim.vm.device.VirtualCdrom.RemotePassthroughBackingInfo) and cdrom_device.connectable.allowGuestControl and not cdrom_device.connectable.startConnected and (vm_obj.runtime.powerState != vim.VirtualMachinePowerState.poweredOn or not cdrom_device.connectable.connected)) elif cdrom_type == "client": return (isinstance(cdrom_device.backing, vim.vm.device.VirtualCdrom.RemotePassthroughBackingInfo) and cdrom_device.connectable.allowGuestControl and cdrom_device.connectable.startConnected and (vm_obj.runtime.powerState != vim.VirtualMachinePowerState.poweredOn or cdrom_device.connectable.connected)) elif cdrom_type == "iso": return (isinstance(cdrom_device.backing, vim.vm.device.VirtualCdrom.IsoBackingInfo) and cdrom_device.backing.fileName == iso_path and cdrom_device.connectable.allowGuestControl and cdrom_device.connectable.startConnected and (vm_obj.runtime.powerState != vim.VirtualMachinePowerState.poweredOn or cdrom_device.connectable.connected)) @staticmethod def update_cdrom_config(vm_obj, cdrom_spec, cdrom_device, iso_path=None): # Updating an existing CD-ROM if cdrom_spec["type"] in ["client", "none"]: cdrom_device.backing = vim.vm.device.VirtualCdrom.RemotePassthroughBackingInfo() elif cdrom_spec["type"] == "iso" and iso_path is not None: cdrom_device.backing = vim.vm.device.VirtualCdrom.IsoBackingInfo(fileName=iso_path) cdrom_device.connectable = vim.vm.device.VirtualDevice.ConnectInfo() cdrom_device.connectable.allowGuestControl = True cdrom_device.connectable.startConnected = (cdrom_spec["type"] != "none") if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn: cdrom_device.connectable.connected = (cdrom_spec["type"] != "none") def remove_cdrom(self, cdrom_device): cdrom_spec = vim.vm.device.VirtualDeviceSpec() cdrom_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.remove cdrom_spec.device = cdrom_device return cdrom_spec def create_scsi_disk(self, scsi_ctl, disk_index=None): diskspec = vim.vm.device.VirtualDeviceSpec() diskspec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add diskspec.device = vim.vm.device.VirtualDisk() diskspec.device.backing = vim.vm.device.VirtualDisk.FlatVer2BackingInfo() diskspec.device.controllerKey = scsi_ctl.device.key if self.next_disk_unit_number == 7: raise AssertionError() if disk_index == 7: raise AssertionError() """ Configure disk unit number. """ if disk_index is not None: diskspec.device.unitNumber = disk_index self.next_disk_unit_number = disk_index + 1 else: diskspec.device.unitNumber = self.next_disk_unit_number self.next_disk_unit_number += 1 # unit number 7 is reserved to SCSI controller, increase next index if self.next_disk_unit_number == 7: self.next_disk_unit_number += 1 return diskspec def get_device(self, device_type, name): nic_dict = dict(pcnet32=vim.vm.device.VirtualPCNet32(), vmxnet2=vim.vm.device.VirtualVmxnet2(), vmxnet3=vim.vm.device.VirtualVmxnet3(), e1000=vim.vm.device.VirtualE1000(), e1000e=vim.vm.device.VirtualE1000e(), sriov=vim.vm.device.VirtualSriovEthernetCard(), ) if device_type in nic_dict: return nic_dict[device_type] else: self.module.fail_json(msg='Invalid device_type "%s"' ' for network "%s"' % (device_type, name)) def create_nic(self, device_type, device_label, device_infos): nic = vim.vm.device.VirtualDeviceSpec() nic.device = self.get_device(device_type, device_infos['name']) nic.device.wakeOnLanEnabled = bool(device_infos.get('wake_on_lan', True)) nic.device.deviceInfo = vim.Description() nic.device.deviceInfo.label = device_label nic.device.deviceInfo.summary = device_infos['name'] nic.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo() nic.device.connectable.startConnected = bool(device_infos.get('start_connected', True)) nic.device.connectable.allowGuestControl = bool(device_infos.get('allow_guest_control', True)) nic.device.connectable.connected = True if 'mac' in device_infos and is_mac(device_infos['mac']): nic.device.addressType = 'manual' nic.device.macAddress = device_infos['mac'] else: nic.device.addressType = 'generated' return nic def integer_value(self, input_value, name): """ Function to return int value for given input, else return error Args: input_value: Input value to retrieve int value from name: Name of the Input value (used to build error message) Returns: (int) if integer value can be obtained, otherwise will send a error message. """ if isinstance(input_value, int): return input_value elif isinstance(input_value, str) and input_value.isdigit(): return int(input_value) else: self.module.fail_json(msg='"%s" attribute should be an' ' integer value.' % name) class PyVmomiCache(object): """ This class caches references to objects which are requested multiples times but not modified """ def __init__(self, content, dc_name=None): self.content = content self.dc_name = dc_name self.networks = {} self.clusters = {} self.esx_hosts = {} self.parent_datacenters = {} def find_obj(self, content, types, name, confine_to_datacenter=True): """ Wrapper around find_obj to set datacenter context """ result = find_obj(content, types, name) if result and confine_to_datacenter: if to_text(self.get_parent_datacenter(result).name) != to_text(self.dc_name): result = None objects = self.get_all_objs(content, types, confine_to_datacenter=True) for obj in objects: if name is None or to_text(obj.name) == to_text(name): return obj return result def get_all_objs(self, content, types, confine_to_datacenter=True): """ Wrapper around get_all_objs to set datacenter context """ objects = get_all_objs(content, types) if confine_to_datacenter: if hasattr(objects, 'items'): # resource pools come back as a dictionary # make a copy for k, v in tuple(objects.items()): parent_dc = self.get_parent_datacenter(k) if parent_dc.name != self.dc_name: del objects[k] else: # everything else should be a list objects = [x for x in objects if self.get_parent_datacenter(x).name == self.dc_name] return objects def get_network(self, network): network = quote_obj_name(network) if network not in self.networks: self.networks[network] = self.find_obj(self.content, [vim.Network], network) return self.networks[network] def get_cluster(self, cluster): if cluster not in self.clusters: self.clusters[cluster] = self.find_obj(self.content, [vim.ClusterComputeResource], cluster) return self.clusters[cluster] def get_esx_host(self, host): if host not in self.esx_hosts: self.esx_hosts[host] = self.find_obj(self.content, [vim.HostSystem], host) return self.esx_hosts[host] def get_parent_datacenter(self, obj): """ Walk the parent tree to find the objects datacenter """ if isinstance(obj, vim.Datacenter): return obj if obj in self.parent_datacenters: return self.parent_datacenters[obj] datacenter = None while True: if not hasattr(obj, 'parent'): break obj = obj.parent if isinstance(obj, vim.Datacenter): datacenter = obj break self.parent_datacenters[obj] = datacenter return datacenter class PyVmomiHelper(PyVmomi): def __init__(self, module): super(PyVmomiHelper, self).__init__(module) self.device_helper = PyVmomiDeviceHelper(self.module) self.configspec = None self.relospec = None self.change_detected = False # a change was detected and needs to be applied through reconfiguration self.change_applied = False # a change was applied meaning at least one task succeeded self.customspec = None self.cache = PyVmomiCache(self.content, dc_name=self.params['datacenter']) def gather_facts(self, vm): return gather_vm_facts(self.content, vm) def remove_vm(self, vm, delete_from_inventory=False): # https://www.vmware.com/support/developer/converter-sdk/conv60_apireference/vim.ManagedEntity.html#destroy if vm.summary.runtime.powerState.lower() == 'poweredon': self.module.fail_json(msg="Virtual machine %s found in 'powered on' state, " "please use 'force' parameter to remove or poweroff VM " "and try removing VM again." % vm.name) # Delete VM from Inventory if delete_from_inventory: try: vm.UnregisterVM() except (vim.fault.TaskInProgress, vmodl.RuntimeFault) as e: return {'changed': self.change_applied, 'failed': True, 'msg': e.msg, 'op': 'UnregisterVM'} self.change_applied = True return {'changed': self.change_applied, 'failed': False} # Delete VM from Disk task = vm.Destroy() self.wait_for_task(task) if task.info.state == 'error': return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'destroy'} else: return {'changed': self.change_applied, 'failed': False} def configure_guestid(self, vm_obj, vm_creation=False): # guest_id is not required when using templates if self.params['template']: return # guest_id is only mandatory on VM creation if vm_creation and self.params['guest_id'] is None: self.module.fail_json(msg="guest_id attribute is mandatory for VM creation") if self.params['guest_id'] and \ (vm_obj is None or self.params['guest_id'].lower() != vm_obj.summary.config.guestId.lower()): self.change_detected = True self.configspec.guestId = self.params['guest_id'] def configure_resource_alloc_info(self, vm_obj): """ Function to configure resource allocation information about virtual machine :param vm_obj: VM object in case of reconfigure, None in case of deploy :return: None """ rai_change_detected = False memory_allocation = vim.ResourceAllocationInfo() cpu_allocation = vim.ResourceAllocationInfo() if 'hardware' in self.params: if 'mem_limit' in self.params['hardware']: mem_limit = None try: mem_limit = int(self.params['hardware'].get('mem_limit')) except ValueError: self.module.fail_json(msg="hardware.mem_limit attribute should be an integer value.") memory_allocation.limit = mem_limit if vm_obj is None or memory_allocation.limit != vm_obj.config.memoryAllocation.limit: rai_change_detected = True if 'mem_reservation' in self.params['hardware'] or 'memory_reservation' in self.params['hardware']: mem_reservation = self.params['hardware'].get('mem_reservation') if mem_reservation is None: mem_reservation = self.params['hardware'].get('memory_reservation') try: mem_reservation = int(mem_reservation) except ValueError: self.module.fail_json(msg="hardware.mem_reservation or hardware.memory_reservation should be an integer value.") memory_allocation.reservation = mem_reservation if vm_obj is None or \ memory_allocation.reservation != vm_obj.config.memoryAllocation.reservation: rai_change_detected = True if 'cpu_limit' in self.params['hardware']: cpu_limit = None try: cpu_limit = int(self.params['hardware'].get('cpu_limit')) except ValueError: self.module.fail_json(msg="hardware.cpu_limit attribute should be an integer value.") cpu_allocation.limit = cpu_limit if vm_obj is None or cpu_allocation.limit != vm_obj.config.cpuAllocation.limit: rai_change_detected = True if 'cpu_reservation' in self.params['hardware']: cpu_reservation = None try: cpu_reservation = int(self.params['hardware'].get('cpu_reservation')) except ValueError: self.module.fail_json(msg="hardware.cpu_reservation should be an integer value.") cpu_allocation.reservation = cpu_reservation if vm_obj is None or \ cpu_allocation.reservation != vm_obj.config.cpuAllocation.reservation: rai_change_detected = True if rai_change_detected: self.configspec.memoryAllocation = memory_allocation self.configspec.cpuAllocation = cpu_allocation self.change_detected = True def configure_cpu_and_memory(self, vm_obj, vm_creation=False): # set cpu/memory/etc if 'hardware' in self.params: if 'num_cpus' in self.params['hardware']: try: num_cpus = int(self.params['hardware']['num_cpus']) except ValueError: self.module.fail_json(msg="hardware.num_cpus attribute should be an integer value.") # check VM power state and cpu hot-add/hot-remove state before re-config VM if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn: if not vm_obj.config.cpuHotRemoveEnabled and num_cpus < vm_obj.config.hardware.numCPU: self.module.fail_json(msg="Configured cpu number is less than the cpu number of the VM, " "cpuHotRemove is not enabled") if not vm_obj.config.cpuHotAddEnabled and num_cpus > vm_obj.config.hardware.numCPU: self.module.fail_json(msg="Configured cpu number is more than the cpu number of the VM, " "cpuHotAdd is not enabled") if 'num_cpu_cores_per_socket' in self.params['hardware']: try: num_cpu_cores_per_socket = int(self.params['hardware']['num_cpu_cores_per_socket']) except ValueError: self.module.fail_json(msg="hardware.num_cpu_cores_per_socket attribute " "should be an integer value.") if num_cpus % num_cpu_cores_per_socket != 0: self.module.fail_json(msg="hardware.num_cpus attribute should be a multiple " "of hardware.num_cpu_cores_per_socket") self.configspec.numCoresPerSocket = num_cpu_cores_per_socket if vm_obj is None or self.configspec.numCoresPerSocket != vm_obj.config.hardware.numCoresPerSocket: self.change_detected = True self.configspec.numCPUs = num_cpus if vm_obj is None or self.configspec.numCPUs != vm_obj.config.hardware.numCPU: self.change_detected = True # num_cpu is mandatory for VM creation elif vm_creation and not self.params['template']: self.module.fail_json(msg="hardware.num_cpus attribute is mandatory for VM creation") if 'memory_mb' in self.params['hardware']: try: memory_mb = int(self.params['hardware']['memory_mb']) except ValueError: self.module.fail_json(msg="Failed to parse hardware.memory_mb value." " Please refer the documentation and provide" " correct value.") # check VM power state and memory hotadd state before re-config VM if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn: if vm_obj.config.memoryHotAddEnabled and memory_mb < vm_obj.config.hardware.memoryMB: self.module.fail_json(msg="Configured memory is less than memory size of the VM, " "operation is not supported") elif not vm_obj.config.memoryHotAddEnabled and memory_mb != vm_obj.config.hardware.memoryMB: self.module.fail_json(msg="memoryHotAdd is not enabled") self.configspec.memoryMB = memory_mb if vm_obj is None or self.configspec.memoryMB != vm_obj.config.hardware.memoryMB: self.change_detected = True # memory_mb is mandatory for VM creation elif vm_creation and not self.params['template']: self.module.fail_json(msg="hardware.memory_mb attribute is mandatory for VM creation") if 'hotadd_memory' in self.params['hardware']: if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn and \ vm_obj.config.memoryHotAddEnabled != bool(self.params['hardware']['hotadd_memory']): self.module.fail_json(msg="Configure hotadd memory operation is not supported when VM is power on") self.configspec.memoryHotAddEnabled = bool(self.params['hardware']['hotadd_memory']) if vm_obj is None or self.configspec.memoryHotAddEnabled != vm_obj.config.memoryHotAddEnabled: self.change_detected = True if 'hotadd_cpu' in self.params['hardware']: if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn and \ vm_obj.config.cpuHotAddEnabled != bool(self.params['hardware']['hotadd_cpu']): self.module.fail_json(msg="Configure hotadd cpu operation is not supported when VM is power on") self.configspec.cpuHotAddEnabled = bool(self.params['hardware']['hotadd_cpu']) if vm_obj is None or self.configspec.cpuHotAddEnabled != vm_obj.config.cpuHotAddEnabled: self.change_detected = True if 'hotremove_cpu' in self.params['hardware']: if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn and \ vm_obj.config.cpuHotRemoveEnabled != bool(self.params['hardware']['hotremove_cpu']): self.module.fail_json(msg="Configure hotremove cpu operation is not supported when VM is power on") self.configspec.cpuHotRemoveEnabled = bool(self.params['hardware']['hotremove_cpu']) if vm_obj is None or self.configspec.cpuHotRemoveEnabled != vm_obj.config.cpuHotRemoveEnabled: self.change_detected = True if 'memory_reservation_lock' in self.params['hardware']: self.configspec.memoryReservationLockedToMax = bool(self.params['hardware']['memory_reservation_lock']) if vm_obj is None or self.configspec.memoryReservationLockedToMax != vm_obj.config.memoryReservationLockedToMax: self.change_detected = True if 'boot_firmware' in self.params['hardware']: # boot firmware re-config can cause boot issue if vm_obj is not None: return boot_firmware = self.params['hardware']['boot_firmware'].lower() if boot_firmware not in ('bios', 'efi'): self.module.fail_json(msg="hardware.boot_firmware value is invalid [%s]." " Need one of ['bios', 'efi']." % boot_firmware) self.configspec.firmware = boot_firmware self.change_detected = True def sanitize_cdrom_params(self): # cdroms {'ide': [{num: 0, cdrom: []}, {}], 'sata': [{num: 0, cdrom: []}, {}, ...]} cdroms = {'ide': [], 'sata': []} expected_cdrom_spec = self.params.get('cdrom') if expected_cdrom_spec: for cdrom_spec in expected_cdrom_spec: cdrom_spec['controller_type'] = cdrom_spec.get('controller_type', 'ide').lower() if cdrom_spec['controller_type'] not in ['ide', 'sata']: self.module.fail_json(msg="Invalid cdrom.controller_type: %s, valid value is 'ide' or 'sata'." % cdrom_spec['controller_type']) cdrom_spec['state'] = cdrom_spec.get('state', 'present').lower() if cdrom_spec['state'] not in ['present', 'absent']: self.module.fail_json(msg="Invalid cdrom.state: %s, valid value is 'present', 'absent'." % cdrom_spec['state']) if cdrom_spec['state'] == 'present': if 'type' in cdrom_spec and cdrom_spec.get('type') not in ['none', 'client', 'iso']: self.module.fail_json(msg="Invalid cdrom.type: %s, valid value is 'none', 'client' or 'iso'." % cdrom_spec.get('type')) if cdrom_spec.get('type') == 'iso' and not cdrom_spec.get('iso_path'): self.module.fail_json(msg="cdrom.iso_path is mandatory when cdrom.type is set to iso.") if cdrom_spec['controller_type'] == 'ide' and \ (cdrom_spec.get('controller_number') not in [0, 1] or cdrom_spec.get('unit_number') not in [0, 1]): self.module.fail_json(msg="Invalid cdrom.controller_number: %s or cdrom.unit_number: %s, valid" " values are 0 or 1 for IDE controller." % (cdrom_spec.get('controller_number'), cdrom_spec.get('unit_number'))) if cdrom_spec['controller_type'] == 'sata' and \ (cdrom_spec.get('controller_number') not in range(0, 4) or cdrom_spec.get('unit_number') not in range(0, 30)): self.module.fail_json(msg="Invalid cdrom.controller_number: %s or cdrom.unit_number: %s," " valid controller_number value is 0-3, valid unit_number is 0-29" " for SATA controller." % (cdrom_spec.get('controller_number'), cdrom_spec.get('unit_number'))) ctl_exist = False for exist_spec in cdroms.get(cdrom_spec['controller_type']): if exist_spec['num'] == cdrom_spec['controller_number']: ctl_exist = True exist_spec['cdrom'].append(cdrom_spec) break if not ctl_exist: cdroms.get(cdrom_spec['controller_type']).append({'num': cdrom_spec['controller_number'], 'cdrom': [cdrom_spec]}) return cdroms def configure_cdrom(self, vm_obj): # Configure the VM CD-ROM if self.params.get('cdrom'): if vm_obj and vm_obj.config.template: # Changing CD-ROM settings on a template is not supported return if isinstance(self.params.get('cdrom'), dict): self.configure_cdrom_dict(vm_obj) elif isinstance(self.params.get('cdrom'), list): self.configure_cdrom_list(vm_obj) def configure_cdrom_dict(self, vm_obj): if self.params["cdrom"].get('type') not in ['none', 'client', 'iso']: self.module.fail_json(msg="cdrom.type is mandatory. Options are 'none', 'client', and 'iso'.") if self.params["cdrom"]['type'] == 'iso' and not self.params["cdrom"].get('iso_path'): self.module.fail_json(msg="cdrom.iso_path is mandatory when cdrom.type is set to iso.") cdrom_spec = None cdrom_devices = self.get_vm_cdrom_devices(vm=vm_obj) iso_path = self.params["cdrom"].get("iso_path") if len(cdrom_devices) == 0: # Creating new CD-ROM ide_devices = self.get_vm_ide_devices(vm=vm_obj) if len(ide_devices) == 0: # Creating new IDE device ide_ctl = self.device_helper.create_ide_controller() ide_device = ide_ctl.device self.change_detected = True self.configspec.deviceChange.append(ide_ctl) else: ide_device = ide_devices[0] if len(ide_device.device) > 3: self.module.fail_json(msg="hardware.cdrom specified for a VM or template which already has 4" " IDE devices of which none are a cdrom") cdrom_spec = self.device_helper.create_cdrom(ide_device=ide_device, cdrom_type=self.params["cdrom"]["type"], iso_path=iso_path) if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn: cdrom_spec.device.connectable.connected = (self.params["cdrom"]["type"] != "none") elif not self.device_helper.is_equal_cdrom(vm_obj=vm_obj, cdrom_device=cdrom_devices[0], cdrom_type=self.params["cdrom"]["type"], iso_path=iso_path): self.device_helper.update_cdrom_config(vm_obj, self.params["cdrom"], cdrom_devices[0], iso_path=iso_path) cdrom_spec = vim.vm.device.VirtualDeviceSpec() cdrom_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit cdrom_spec.device = cdrom_devices[0] if cdrom_spec: self.change_detected = True self.configspec.deviceChange.append(cdrom_spec) def configure_cdrom_list(self, vm_obj): configured_cdroms = self.sanitize_cdrom_params() cdrom_devices = self.get_vm_cdrom_devices(vm=vm_obj) # configure IDE CD-ROMs if configured_cdroms['ide']: ide_devices = self.get_vm_ide_devices(vm=vm_obj) for expected_cdrom_spec in configured_cdroms['ide']: ide_device = None for device in ide_devices: if device.busNumber == expected_cdrom_spec['num']: ide_device = device break # if not find the matched ide controller or no existing ide controller if not ide_device: ide_ctl = self.device_helper.create_ide_controller(bus_number=expected_cdrom_spec['num']) ide_device = ide_ctl.device self.change_detected = True self.configspec.deviceChange.append(ide_ctl) for cdrom in expected_cdrom_spec['cdrom']: cdrom_device = None iso_path = cdrom.get('iso_path') unit_number = cdrom.get('unit_number') for target_cdrom in cdrom_devices: if target_cdrom.controllerKey == ide_device.key and target_cdrom.unitNumber == unit_number: cdrom_device = target_cdrom break # create new CD-ROM if not cdrom_device and cdrom.get('state') != 'absent': if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn: self.module.fail_json(msg='CD-ROM attach to IDE controller not support hot-add.') if len(ide_device.device) == 2: self.module.fail_json(msg='Maximum number of CD-ROMs attached to IDE controller is 2.') cdrom_spec = self.device_helper.create_cdrom(ide_device=ide_device, cdrom_type=cdrom['type'], iso_path=iso_path, unit_number=unit_number) self.change_detected = True self.configspec.deviceChange.append(cdrom_spec) # re-configure CD-ROM elif cdrom_device and cdrom.get('state') != 'absent' and \ not self.device_helper.is_equal_cdrom(vm_obj=vm_obj, cdrom_device=cdrom_device, cdrom_type=cdrom['type'], iso_path=iso_path): self.device_helper.update_cdrom_config(vm_obj, cdrom, cdrom_device, iso_path=iso_path) cdrom_spec = vim.vm.device.VirtualDeviceSpec() cdrom_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit cdrom_spec.device = cdrom_device self.change_detected = True self.configspec.deviceChange.append(cdrom_spec) # delete CD-ROM elif cdrom_device and cdrom.get('state') == 'absent': if vm_obj and vm_obj.runtime.powerState != vim.VirtualMachinePowerState.poweredOff: self.module.fail_json(msg='CD-ROM attach to IDE controller not support hot-remove.') cdrom_spec = self.device_helper.remove_cdrom(cdrom_device) self.change_detected = True self.configspec.deviceChange.append(cdrom_spec) # configure SATA CD-ROMs is not supported yet if configured_cdroms['sata']: pass def configure_hardware_params(self, vm_obj): """ Function to configure hardware related configuration of virtual machine Args: vm_obj: virtual machine object """ if 'hardware' in self.params: if 'max_connections' in self.params['hardware']: # maxMksConnections == max_connections self.configspec.maxMksConnections = int(self.params['hardware']['max_connections']) if vm_obj is None or self.configspec.maxMksConnections != vm_obj.config.maxMksConnections: self.change_detected = True if 'nested_virt' in self.params['hardware']: self.configspec.nestedHVEnabled = bool(self.params['hardware']['nested_virt']) if vm_obj is None or self.configspec.nestedHVEnabled != bool(vm_obj.config.nestedHVEnabled): self.change_detected = True if 'version' in self.params['hardware']: hw_version_check_failed = False temp_version = self.params['hardware'].get('version', 10) if isinstance(temp_version, str) and temp_version.lower() == 'latest': # Check is to make sure vm_obj is not of type template if vm_obj and not vm_obj.config.template: try: task = vm_obj.UpgradeVM_Task() self.wait_for_task(task) if task.info.state == 'error': return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'upgrade'} except vim.fault.AlreadyUpgraded: # Don't fail if VM is already upgraded. pass else: try: temp_version = int(temp_version) except ValueError: hw_version_check_failed = True if temp_version not in range(3, 16): hw_version_check_failed = True if hw_version_check_failed: self.module.fail_json(msg="Failed to set hardware.version '%s' value as valid" " values range from 3 (ESX 2.x) to 14 (ESXi 6.5 and greater)." % temp_version) # Hardware version is denoted as "vmx-10" version = "vmx-%02d" % temp_version self.configspec.version = version if vm_obj is None or self.configspec.version != vm_obj.config.version: self.change_detected = True # Check is to make sure vm_obj is not of type template if vm_obj and not vm_obj.config.template: # VM exists and we need to update the hardware version current_version = vm_obj.config.version # current_version = "vmx-10" version_digit = int(current_version.split("-", 1)[-1]) if temp_version < version_digit: self.module.fail_json(msg="Current hardware version '%d' which is greater than the specified" " version '%d'. Downgrading hardware version is" " not supported. Please specify version greater" " than the current version." % (version_digit, temp_version)) new_version = "vmx-%02d" % temp_version try: task = vm_obj.UpgradeVM_Task(new_version) self.wait_for_task(task) if task.info.state == 'error': return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'upgrade'} except vim.fault.AlreadyUpgraded: # Don't fail if VM is already upgraded. pass if 'virt_based_security' in self.params['hardware']: host_version = self.select_host().summary.config.product.version if int(host_version.split('.')[0]) < 6 or (int(host_version.split('.')[0]) == 6 and int(host_version.split('.')[1]) < 7): self.module.fail_json(msg="ESXi version %s not support VBS." % host_version) guest_ids = ['windows9_64Guest', 'windows9Server64Guest'] if vm_obj is None: guestid = self.configspec.guestId else: guestid = vm_obj.summary.config.guestId if guestid not in guest_ids: self.module.fail_json(msg="Guest '%s' not support VBS." % guestid) if (vm_obj is None and int(self.configspec.version.split('-')[1]) >= 14) or \ (vm_obj and int(vm_obj.config.version.split('-')[1]) >= 14 and (vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOff)): self.configspec.flags = vim.vm.FlagInfo() self.configspec.flags.vbsEnabled = bool(self.params['hardware']['virt_based_security']) if bool(self.params['hardware']['virt_based_security']): self.configspec.flags.vvtdEnabled = True self.configspec.nestedHVEnabled = True if (vm_obj is None and self.configspec.firmware == 'efi') or \ (vm_obj and vm_obj.config.firmware == 'efi'): self.configspec.bootOptions = vim.vm.BootOptions() self.configspec.bootOptions.efiSecureBootEnabled = True else: self.module.fail_json(msg="Not support VBS when firmware is BIOS.") if vm_obj is None or self.configspec.flags.vbsEnabled != vm_obj.config.flags.vbsEnabled: self.change_detected = True def get_device_by_type(self, vm=None, type=None): device_list = [] if vm is None or type is None: return device_list for device in vm.config.hardware.device: if isinstance(device, type): device_list.append(device) return device_list def get_vm_cdrom_devices(self, vm=None): return self.get_device_by_type(vm=vm, type=vim.vm.device.VirtualCdrom) def get_vm_ide_devices(self, vm=None): return self.get_device_by_type(vm=vm, type=vim.vm.device.VirtualIDEController) def get_vm_network_interfaces(self, vm=None): device_list = [] if vm is None: return device_list nw_device_types = (vim.vm.device.VirtualPCNet32, vim.vm.device.VirtualVmxnet2, vim.vm.device.VirtualVmxnet3, vim.vm.device.VirtualE1000, vim.vm.device.VirtualE1000e, vim.vm.device.VirtualSriovEthernetCard) for device in vm.config.hardware.device: if isinstance(device, nw_device_types): device_list.append(device) return device_list def sanitize_network_params(self): """ Sanitize user provided network provided params Returns: A sanitized list of network params, else fails """ network_devices = list() # Clean up user data here for network in self.params['networks']: if 'name' not in network and 'vlan' not in network: self.module.fail_json(msg="Please specify at least a network name or" " a VLAN name under VM network list.") if 'name' in network and self.cache.get_network(network['name']) is None: self.module.fail_json(msg="Network '%(name)s' does not exist." % network) elif 'vlan' in network: dvps = self.cache.get_all_objs(self.content, [vim.dvs.DistributedVirtualPortgroup]) for dvp in dvps: if hasattr(dvp.config.defaultPortConfig, 'vlan') and \ isinstance(dvp.config.defaultPortConfig.vlan.vlanId, int) and \ str(dvp.config.defaultPortConfig.vlan.vlanId) == str(network['vlan']): network['name'] = dvp.config.name break if 'dvswitch_name' in network and \ dvp.config.distributedVirtualSwitch.name == network['dvswitch_name'] and \ dvp.config.name == network['vlan']: network['name'] = dvp.config.name break if dvp.config.name == network['vlan']: network['name'] = dvp.config.name break else: self.module.fail_json(msg="VLAN '%(vlan)s' does not exist." % network) if 'type' in network: if network['type'] not in ['dhcp', 'static']: self.module.fail_json(msg="Network type '%(type)s' is not a valid parameter." " Valid parameters are ['dhcp', 'static']." % network) if network['type'] != 'static' and ('ip' in network or 'netmask' in network): self.module.fail_json(msg='Static IP information provided for network "%(name)s",' ' but "type" is set to "%(type)s".' % network) else: # Type is optional parameter, if user provided IP or Subnet assume # network type as 'static' if 'ip' in network or 'netmask' in network: network['type'] = 'static' else: # User wants network type as 'dhcp' network['type'] = 'dhcp' if network.get('type') == 'static': if 'ip' in network and 'netmask' not in network: self.module.fail_json(msg="'netmask' is required if 'ip' is" " specified under VM network list.") if 'ip' not in network and 'netmask' in network: self.module.fail_json(msg="'ip' is required if 'netmask' is" " specified under VM network list.") validate_device_types = ['pcnet32', 'vmxnet2', 'vmxnet3', 'e1000', 'e1000e', 'sriov'] if 'device_type' in network and network['device_type'] not in validate_device_types: self.module.fail_json(msg="Device type specified '%s' is not valid." " Please specify correct device" " type from ['%s']." % (network['device_type'], "', '".join(validate_device_types))) if 'mac' in network and not is_mac(network['mac']): self.module.fail_json(msg="Device MAC address '%s' is invalid." " Please provide correct MAC address." % network['mac']) network_devices.append(network) return network_devices def configure_network(self, vm_obj): # Ignore empty networks, this permits to keep networks when deploying a template/cloning a VM if len(self.params['networks']) == 0: return network_devices = self.sanitize_network_params() # List current device for Clone or Idempotency current_net_devices = self.get_vm_network_interfaces(vm=vm_obj) if len(network_devices) < len(current_net_devices): self.module.fail_json(msg="Given network device list is lesser than current VM device list (%d < %d). " "Removing interfaces is not allowed" % (len(network_devices), len(current_net_devices))) for key in range(0, len(network_devices)): nic_change_detected = False network_name = network_devices[key]['name'] if key < len(current_net_devices) and (vm_obj or self.params['template']): # We are editing existing network devices, this is either when # are cloning from VM or Template nic = vim.vm.device.VirtualDeviceSpec() nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit nic.device = current_net_devices[key] if ('wake_on_lan' in network_devices[key] and nic.device.wakeOnLanEnabled != network_devices[key].get('wake_on_lan')): nic.device.wakeOnLanEnabled = network_devices[key].get('wake_on_lan') nic_change_detected = True if ('start_connected' in network_devices[key] and nic.device.connectable.startConnected != network_devices[key].get('start_connected')): nic.device.connectable.startConnected = network_devices[key].get('start_connected') nic_change_detected = True if ('allow_guest_control' in network_devices[key] and nic.device.connectable.allowGuestControl != network_devices[key].get('allow_guest_control')): nic.device.connectable.allowGuestControl = network_devices[key].get('allow_guest_control') nic_change_detected = True if nic.device.deviceInfo.summary != network_name: nic.device.deviceInfo.summary = network_name nic_change_detected = True if 'device_type' in network_devices[key]: device = self.device_helper.get_device(network_devices[key]['device_type'], network_name) device_class = type(device) if not isinstance(nic.device, device_class): self.module.fail_json(msg="Changing the device type is not possible when interface is already present. " "The failing device type is %s" % network_devices[key]['device_type']) # Changing mac address has no effect when editing interface if 'mac' in network_devices[key] and nic.device.macAddress != current_net_devices[key].macAddress: self.module.fail_json(msg="Changing MAC address has not effect when interface is already present. " "The failing new MAC address is %s" % nic.device.macAddress) else: # Default device type is vmxnet3, VMware best practice device_type = network_devices[key].get('device_type', 'vmxnet3') nic = self.device_helper.create_nic(device_type, 'Network Adapter %s' % (key + 1), network_devices[key]) nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.add nic_change_detected = True if hasattr(self.cache.get_network(network_name), 'portKeys'): # VDS switch pg_obj = None if 'dvswitch_name' in network_devices[key]: dvs_name = network_devices[key]['dvswitch_name'] dvs_obj = find_dvs_by_name(self.content, dvs_name) if dvs_obj is None: self.module.fail_json(msg="Unable to find distributed virtual switch %s" % dvs_name) pg_obj = find_dvspg_by_name(dvs_obj, network_name) if pg_obj is None: self.module.fail_json(msg="Unable to find distributed port group %s" % network_name) else: pg_obj = self.cache.find_obj(self.content, [vim.dvs.DistributedVirtualPortgroup], network_name) # TODO: (akasurde) There is no way to find association between resource pool and distributed virtual portgroup # For now, check if we are able to find distributed virtual switch if not pg_obj.config.distributedVirtualSwitch: self.module.fail_json(msg="Failed to find distributed virtual switch which is associated with" " distributed virtual portgroup '%s'. Make sure hostsystem is associated with" " the given distributed virtual portgroup. Also, check if user has correct" " permission to access distributed virtual switch in the given portgroup." % pg_obj.name) if (nic.device.backing and (not hasattr(nic.device.backing, 'port') or (nic.device.backing.port.portgroupKey != pg_obj.key or nic.device.backing.port.switchUuid != pg_obj.config.distributedVirtualSwitch.uuid))): nic_change_detected = True dvs_port_connection = vim.dvs.PortConnection() dvs_port_connection.portgroupKey = pg_obj.key # If user specifies distributed port group without associating to the hostsystem on which # virtual machine is going to be deployed then we get error. We can infer that there is no # association between given distributed port group and host system. host_system = self.params.get('esxi_hostname') if host_system and host_system not in [host.config.host.name for host in pg_obj.config.distributedVirtualSwitch.config.host]: self.module.fail_json(msg="It seems that host system '%s' is not associated with distributed" " virtual portgroup '%s'. Please make sure host system is associated" " with given distributed virtual portgroup" % (host_system, pg_obj.name)) dvs_port_connection.switchUuid = pg_obj.config.distributedVirtualSwitch.uuid nic.device.backing = vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo() nic.device.backing.port = dvs_port_connection elif isinstance(self.cache.get_network(network_name), vim.OpaqueNetwork): # NSX-T Logical Switch nic.device.backing = vim.vm.device.VirtualEthernetCard.OpaqueNetworkBackingInfo() network_id = self.cache.get_network(network_name).summary.opaqueNetworkId nic.device.backing.opaqueNetworkType = 'nsx.LogicalSwitch' nic.device.backing.opaqueNetworkId = network_id nic.device.deviceInfo.summary = 'nsx.LogicalSwitch: %s' % network_id nic_change_detected = True else: # vSwitch if not isinstance(nic.device.backing, vim.vm.device.VirtualEthernetCard.NetworkBackingInfo): nic.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo() nic_change_detected = True net_obj = self.cache.get_network(network_name) if nic.device.backing.network != net_obj: nic.device.backing.network = net_obj nic_change_detected = True if nic.device.backing.deviceName != network_name: nic.device.backing.deviceName = network_name nic_change_detected = True if nic_change_detected: # Change to fix the issue found while configuring opaque network # VMs cloned from a template with opaque network will get disconnected # Replacing deprecated config parameter with relocation Spec if isinstance(self.cache.get_network(network_name), vim.OpaqueNetwork): self.relospec.deviceChange.append(nic) else: self.configspec.deviceChange.append(nic) self.change_detected = True def configure_vapp_properties(self, vm_obj): if len(self.params['vapp_properties']) == 0: return for x in self.params['vapp_properties']: if not x.get('id'): self.module.fail_json(msg="id is required to set vApp property") new_vmconfig_spec = vim.vApp.VmConfigSpec() if vm_obj: # VM exists # This is primarily for vcsim/integration tests, unset vAppConfig was not seen on my deployments orig_spec = vm_obj.config.vAppConfig if vm_obj.config.vAppConfig else new_vmconfig_spec vapp_properties_current = dict((x.id, x) for x in orig_spec.property) vapp_properties_to_change = dict((x['id'], x) for x in self.params['vapp_properties']) # each property must have a unique key # init key counter with max value + 1 all_keys = [x.key for x in orig_spec.property] new_property_index = max(all_keys) + 1 if all_keys else 0 for property_id, property_spec in vapp_properties_to_change.items(): is_property_changed = False new_vapp_property_spec = vim.vApp.PropertySpec() if property_id in vapp_properties_current: if property_spec.get('operation') == 'remove': new_vapp_property_spec.operation = 'remove' new_vapp_property_spec.removeKey = vapp_properties_current[property_id].key is_property_changed = True else: # this is 'edit' branch new_vapp_property_spec.operation = 'edit' new_vapp_property_spec.info = vapp_properties_current[property_id] try: for property_name, property_value in property_spec.items(): if property_name == 'operation': # operation is not an info object property # if set to anything other than 'remove' we don't fail continue # Updating attributes only if needed if getattr(new_vapp_property_spec.info, property_name) != property_value: setattr(new_vapp_property_spec.info, property_name, property_value) is_property_changed = True except Exception as e: msg = "Failed to set vApp property field='%s' and value='%s'. Error: %s" % (property_name, property_value, to_text(e)) self.module.fail_json(msg=msg) else: if property_spec.get('operation') == 'remove': # attempt to delete non-existent property continue # this is add new property branch new_vapp_property_spec.operation = 'add' property_info = vim.vApp.PropertyInfo() property_info.classId = property_spec.get('classId') property_info.instanceId = property_spec.get('instanceId') property_info.id = property_spec.get('id') property_info.category = property_spec.get('category') property_info.label = property_spec.get('label') property_info.type = property_spec.get('type', 'string') property_info.userConfigurable = property_spec.get('userConfigurable', True) property_info.defaultValue = property_spec.get('defaultValue') property_info.value = property_spec.get('value', '') property_info.description = property_spec.get('description') new_vapp_property_spec.info = property_info new_vapp_property_spec.info.key = new_property_index new_property_index += 1 is_property_changed = True if is_property_changed: new_vmconfig_spec.property.append(new_vapp_property_spec) else: # New VM all_keys = [x.key for x in new_vmconfig_spec.property] new_property_index = max(all_keys) + 1 if all_keys else 0 vapp_properties_to_change = dict((x['id'], x) for x in self.params['vapp_properties']) is_property_changed = False for property_id, property_spec in vapp_properties_to_change.items(): new_vapp_property_spec = vim.vApp.PropertySpec() # this is add new property branch new_vapp_property_spec.operation = 'add' property_info = vim.vApp.PropertyInfo() property_info.classId = property_spec.get('classId') property_info.instanceId = property_spec.get('instanceId') property_info.id = property_spec.get('id') property_info.category = property_spec.get('category') property_info.label = property_spec.get('label') property_info.type = property_spec.get('type', 'string') property_info.userConfigurable = property_spec.get('userConfigurable', True) property_info.defaultValue = property_spec.get('defaultValue') property_info.value = property_spec.get('value', '') property_info.description = property_spec.get('description') new_vapp_property_spec.info = property_info new_vapp_property_spec.info.key = new_property_index new_property_index += 1 is_property_changed = True if is_property_changed: new_vmconfig_spec.property.append(new_vapp_property_spec) if new_vmconfig_spec.property: self.configspec.vAppConfig = new_vmconfig_spec self.change_detected = True def customize_customvalues(self, vm_obj): if len(self.params['customvalues']) == 0: return facts = self.gather_facts(vm_obj) for kv in self.params['customvalues']: if 'key' not in kv or 'value' not in kv: self.module.exit_json(msg="customvalues items required both 'key' and 'value' fields.") key_id = None for field in self.content.customFieldsManager.field: if field.name == kv['key']: key_id = field.key break if not key_id: self.module.fail_json(msg="Unable to find custom value key %s" % kv['key']) # If kv is not kv fetched from facts, change it if kv['key'] not in facts['customvalues'] or facts['customvalues'][kv['key']] != kv['value']: self.content.customFieldsManager.SetField(entity=vm_obj, key=key_id, value=kv['value']) self.change_detected = True def customize_vm(self, vm_obj): # User specified customization specification custom_spec_name = self.params.get('customization_spec') if custom_spec_name: cc_mgr = self.content.customizationSpecManager if cc_mgr.DoesCustomizationSpecExist(name=custom_spec_name): temp_spec = cc_mgr.GetCustomizationSpec(name=custom_spec_name) self.customspec = temp_spec.spec return else: self.module.fail_json(msg="Unable to find customization specification" " '%s' in given configuration." % custom_spec_name) # Network settings adaptermaps = [] for network in self.params['networks']: guest_map = vim.vm.customization.AdapterMapping() guest_map.adapter = vim.vm.customization.IPSettings() if 'ip' in network and 'netmask' in network: guest_map.adapter.ip = vim.vm.customization.FixedIp() guest_map.adapter.ip.ipAddress = str(network['ip']) guest_map.adapter.subnetMask = str(network['netmask']) elif 'type' in network and network['type'] == 'dhcp': guest_map.adapter.ip = vim.vm.customization.DhcpIpGenerator() if 'gateway' in network: guest_map.adapter.gateway = network['gateway'] # On Windows, DNS domain and DNS servers can be set by network interface # https://pubs.vmware.com/vi3/sdk/ReferenceGuide/vim.vm.customization.IPSettings.html if 'domain' in network: guest_map.adapter.dnsDomain = network['domain'] elif 'domain' in self.params['customization']: guest_map.adapter.dnsDomain = self.params['customization']['domain'] if 'dns_servers' in network: guest_map.adapter.dnsServerList = network['dns_servers'] elif 'dns_servers' in self.params['customization']: guest_map.adapter.dnsServerList = self.params['customization']['dns_servers'] adaptermaps.append(guest_map) # Global DNS settings globalip = vim.vm.customization.GlobalIPSettings() if 'dns_servers' in self.params['customization']: globalip.dnsServerList = self.params['customization']['dns_servers'] # TODO: Maybe list the different domains from the interfaces here by default ? if 'dns_suffix' in self.params['customization']: dns_suffix = self.params['customization']['dns_suffix'] if isinstance(dns_suffix, list): globalip.dnsSuffixList = " ".join(dns_suffix) else: globalip.dnsSuffixList = dns_suffix elif 'domain' in self.params['customization']: globalip.dnsSuffixList = self.params['customization']['domain'] if self.params['guest_id']: guest_id = self.params['guest_id'] else: guest_id = vm_obj.summary.config.guestId # For windows guest OS, use SysPrep # https://pubs.vmware.com/vi3/sdk/ReferenceGuide/vim.vm.customization.Sysprep.html#field_detail if 'win' in guest_id: ident = vim.vm.customization.Sysprep() ident.userData = vim.vm.customization.UserData() # Setting hostName, orgName and fullName is mandatory, so we set some default when missing ident.userData.computerName = vim.vm.customization.FixedName() # computer name will be truncated to 15 characters if using VM name default_name = self.params['name'].replace(' ', '') punctuation = string.punctuation.replace('-', '') default_name = ''.join([c for c in default_name if c not in punctuation]) ident.userData.computerName.name = str(self.params['customization'].get('hostname', default_name[0:15])) ident.userData.fullName = str(self.params['customization'].get('fullname', 'Administrator')) ident.userData.orgName = str(self.params['customization'].get('orgname', 'ACME')) if 'productid' in self.params['customization']: ident.userData.productId = str(self.params['customization']['productid']) ident.guiUnattended = vim.vm.customization.GuiUnattended() if 'autologon' in self.params['customization']: ident.guiUnattended.autoLogon = self.params['customization']['autologon'] ident.guiUnattended.autoLogonCount = self.params['customization'].get('autologoncount', 1) if 'timezone' in self.params['customization']: # Check if timezone value is a int before proceeding. ident.guiUnattended.timeZone = self.device_helper.integer_value( self.params['customization']['timezone'], 'customization.timezone') ident.identification = vim.vm.customization.Identification() if self.params['customization'].get('password', '') != '': ident.guiUnattended.password = vim.vm.customization.Password() ident.guiUnattended.password.value = str(self.params['customization']['password']) ident.guiUnattended.password.plainText = True if 'joindomain' in self.params['customization']: if 'domainadmin' not in self.params['customization'] or 'domainadminpassword' not in self.params['customization']: self.module.fail_json(msg="'domainadmin' and 'domainadminpassword' entries are mandatory in 'customization' section to use " "joindomain feature") ident.identification.domainAdmin = str(self.params['customization']['domainadmin']) ident.identification.joinDomain = str(self.params['customization']['joindomain']) ident.identification.domainAdminPassword = vim.vm.customization.Password() ident.identification.domainAdminPassword.value = str(self.params['customization']['domainadminpassword']) ident.identification.domainAdminPassword.plainText = True elif 'joinworkgroup' in self.params['customization']: ident.identification.joinWorkgroup = str(self.params['customization']['joinworkgroup']) if 'runonce' in self.params['customization']: ident.guiRunOnce = vim.vm.customization.GuiRunOnce() ident.guiRunOnce.commandList = self.params['customization']['runonce'] else: # FIXME: We have no clue whether this non-Windows OS is actually Linux, hence it might fail! # For Linux guest OS, use LinuxPrep # https://pubs.vmware.com/vi3/sdk/ReferenceGuide/vim.vm.customization.LinuxPrep.html ident = vim.vm.customization.LinuxPrep() # TODO: Maybe add domain from interface if missing ? if 'domain' in self.params['customization']: ident.domain = str(self.params['customization']['domain']) ident.hostName = vim.vm.customization.FixedName() hostname = str(self.params['customization'].get('hostname', self.params['name'].split('.')[0])) # Remove all characters except alphanumeric and minus which is allowed by RFC 952 valid_hostname = re.sub(r"[^a-zA-Z0-9\-]", "", hostname) ident.hostName.name = valid_hostname # List of supported time zones for different vSphere versions in Linux/Unix systems # https://kb.vmware.com/s/article/2145518 if 'timezone' in self.params['customization']: ident.timeZone = str(self.params['customization']['timezone']) if 'hwclockUTC' in self.params['customization']: ident.hwClockUTC = self.params['customization']['hwclockUTC'] self.customspec = vim.vm.customization.Specification() self.customspec.nicSettingMap = adaptermaps self.customspec.globalIPSettings = globalip self.customspec.identity = ident def get_vm_scsi_controller(self, vm_obj): # If vm_obj doesn't exist there is no SCSI controller to find if vm_obj is None: return None for device in vm_obj.config.hardware.device: if self.device_helper.is_scsi_controller(device): scsi_ctl = vim.vm.device.VirtualDeviceSpec() scsi_ctl.device = device return scsi_ctl return None def get_configured_disk_size(self, expected_disk_spec): # what size is it? if [x for x in expected_disk_spec.keys() if x.startswith('size_') or x == 'size']: # size, size_tb, size_gb, size_mb, size_kb if 'size' in expected_disk_spec: size_regex = re.compile(r'(\d+(?:\.\d+)?)([tgmkTGMK][bB])') disk_size_m = size_regex.match(expected_disk_spec['size']) try: if disk_size_m: expected = disk_size_m.group(1) unit = disk_size_m.group(2) else: raise ValueError if re.match(r'\d+\.\d+', expected): # We found float value in string, let's typecast it expected = float(expected) else: # We found int value in string, let's typecast it expected = int(expected) if not expected or not unit: raise ValueError except (TypeError, ValueError, NameError): # Common failure self.module.fail_json(msg="Failed to parse disk size please review value" " provided using documentation.") else: param = [x for x in expected_disk_spec.keys() if x.startswith('size_')][0] unit = param.split('_')[-1].lower() expected = [x[1] for x in expected_disk_spec.items() if x[0].startswith('size_')][0] expected = int(expected) disk_units = dict(tb=3, gb=2, mb=1, kb=0) if unit in disk_units: unit = unit.lower() return expected * (1024 ** disk_units[unit]) else: self.module.fail_json(msg="%s is not a supported unit for disk size." " Supported units are ['%s']." % (unit, "', '".join(disk_units.keys()))) # No size found but disk, fail self.module.fail_json( msg="No size, size_kb, size_mb, size_gb or size_tb attribute found into disk configuration") def find_vmdk(self, vmdk_path): """ Takes a vsphere datastore path in the format [datastore_name] path/to/file.vmdk Returns vsphere file object or raises RuntimeError """ datastore_name, vmdk_fullpath, vmdk_filename, vmdk_folder = self.vmdk_disk_path_split(vmdk_path) datastore = self.cache.find_obj(self.content, [vim.Datastore], datastore_name) if datastore is None: self.module.fail_json(msg="Failed to find the datastore %s" % datastore_name) return self.find_vmdk_file(datastore, vmdk_fullpath, vmdk_filename, vmdk_folder) def add_existing_vmdk(self, vm_obj, expected_disk_spec, diskspec, scsi_ctl): """ Adds vmdk file described by expected_disk_spec['filename'], retrieves the file information and adds the correct spec to self.configspec.deviceChange. """ filename = expected_disk_spec['filename'] # if this is a new disk, or the disk file names are different if (vm_obj and diskspec.device.backing.fileName != filename) or vm_obj is None: vmdk_file = self.find_vmdk(expected_disk_spec['filename']) diskspec.device.backing.fileName = expected_disk_spec['filename'] diskspec.device.capacityInKB = VmomiSupport.vmodlTypes['long'](vmdk_file.fileSize / 1024) diskspec.device.key = -1 self.change_detected = True self.configspec.deviceChange.append(diskspec) def configure_disks(self, vm_obj): # Ignore empty disk list, this permits to keep disks when deploying a template/cloning a VM if len(self.params['disk']) == 0: return scsi_ctl = self.get_vm_scsi_controller(vm_obj) # Create scsi controller only if we are deploying a new VM, not a template or reconfiguring if vm_obj is None or scsi_ctl is None: scsi_ctl = self.device_helper.create_scsi_controller(self.get_scsi_type()) self.change_detected = True self.configspec.deviceChange.append(scsi_ctl) disks = [x for x in vm_obj.config.hardware.device if isinstance(x, vim.vm.device.VirtualDisk)] \ if vm_obj is not None else None if disks is not None and self.params.get('disk') and len(self.params.get('disk')) < len(disks): self.module.fail_json(msg="Provided disks configuration has less disks than " "the target object (%d vs %d)" % (len(self.params.get('disk')), len(disks))) disk_index = 0 for expected_disk_spec in self.params.get('disk'): disk_modified = False # If we are manipulating and existing objects which has disks and disk_index is in disks if vm_obj is not None and disks is not None and disk_index < len(disks): diskspec = vim.vm.device.VirtualDeviceSpec() # set the operation to edit so that it knows to keep other settings diskspec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit diskspec.device = disks[disk_index] else: diskspec = self.device_helper.create_scsi_disk(scsi_ctl, disk_index) disk_modified = True # increment index for next disk search disk_index += 1 # index 7 is reserved to SCSI controller if disk_index == 7: disk_index += 1 if 'disk_mode' in expected_disk_spec: disk_mode = expected_disk_spec.get('disk_mode', 'persistent').lower() valid_disk_mode = ['persistent', 'independent_persistent', 'independent_nonpersistent'] if disk_mode not in valid_disk_mode: self.module.fail_json(msg="disk_mode specified is not valid." " Should be one of ['%s']" % "', '".join(valid_disk_mode)) if (vm_obj and diskspec.device.backing.diskMode != disk_mode) or (vm_obj is None): diskspec.device.backing.diskMode = disk_mode disk_modified = True else: diskspec.device.backing.diskMode = "persistent" # is it thin? if 'type' in expected_disk_spec: disk_type = expected_disk_spec.get('type', '').lower() if disk_type == 'thin': diskspec.device.backing.thinProvisioned = True elif disk_type == 'eagerzeroedthick': diskspec.device.backing.eagerlyScrub = True if 'filename' in expected_disk_spec and expected_disk_spec['filename'] is not None: self.add_existing_vmdk(vm_obj, expected_disk_spec, diskspec, scsi_ctl) continue elif vm_obj is None or self.params['template']: # We are creating new VM or from Template # Only create virtual device if not backed by vmdk in original template if diskspec.device.backing.fileName == '': diskspec.fileOperation = vim.vm.device.VirtualDeviceSpec.FileOperation.create # which datastore? if expected_disk_spec.get('datastore'): # TODO: This is already handled by the relocation spec, # but it needs to eventually be handled for all the # other disks defined pass kb = self.get_configured_disk_size(expected_disk_spec) # VMware doesn't allow to reduce disk sizes if kb < diskspec.device.capacityInKB: self.module.fail_json( msg="Given disk size is smaller than found (%d < %d). Reducing disks is not allowed." % (kb, diskspec.device.capacityInKB)) if kb != diskspec.device.capacityInKB or disk_modified: diskspec.device.capacityInKB = kb self.configspec.deviceChange.append(diskspec) self.change_detected = True def select_host(self): hostsystem = self.cache.get_esx_host(self.params['esxi_hostname']) if not hostsystem: self.module.fail_json(msg='Failed to find ESX host "%(esxi_hostname)s"' % self.params) if hostsystem.runtime.connectionState != 'connected' or hostsystem.runtime.inMaintenanceMode: self.module.fail_json(msg='ESXi "%(esxi_hostname)s" is in invalid state or in maintenance mode.' % self.params) return hostsystem def autoselect_datastore(self): datastore = None datastores = self.cache.get_all_objs(self.content, [vim.Datastore]) if datastores is None or len(datastores) == 0: self.module.fail_json(msg="Unable to find a datastore list when autoselecting") datastore_freespace = 0 for ds in datastores: if not self.is_datastore_valid(datastore_obj=ds): continue if ds.summary.freeSpace > datastore_freespace: datastore = ds datastore_freespace = ds.summary.freeSpace return datastore def get_recommended_datastore(self, datastore_cluster_obj=None): """ Function to return Storage DRS recommended datastore from datastore cluster Args: datastore_cluster_obj: datastore cluster managed object Returns: Name of recommended datastore from the given datastore cluster """ if datastore_cluster_obj is None: return None # Check if Datastore Cluster provided by user is SDRS ready sdrs_status = datastore_cluster_obj.podStorageDrsEntry.storageDrsConfig.podConfig.enabled if sdrs_status: # We can get storage recommendation only if SDRS is enabled on given datastorage cluster pod_sel_spec = vim.storageDrs.PodSelectionSpec() pod_sel_spec.storagePod = datastore_cluster_obj storage_spec = vim.storageDrs.StoragePlacementSpec() storage_spec.podSelectionSpec = pod_sel_spec storage_spec.type = 'create' try: rec = self.content.storageResourceManager.RecommendDatastores(storageSpec=storage_spec) rec_action = rec.recommendations[0].action[0] return rec_action.destination.name except Exception: # There is some error so we fall back to general workflow pass datastore = None datastore_freespace = 0 for ds in datastore_cluster_obj.childEntity: if isinstance(ds, vim.Datastore) and ds.summary.freeSpace > datastore_freespace: # If datastore field is provided, filter destination datastores if not self.is_datastore_valid(datastore_obj=ds): continue datastore = ds datastore_freespace = ds.summary.freeSpace if datastore: return datastore.name return None def select_datastore(self, vm_obj=None): datastore = None datastore_name = None if len(self.params['disk']) != 0: # TODO: really use the datastore for newly created disks if 'autoselect_datastore' in self.params['disk'][0] and self.params['disk'][0]['autoselect_datastore']: datastores = self.cache.get_all_objs(self.content, [vim.Datastore]) datastores = [x for x in datastores if self.cache.get_parent_datacenter(x).name == self.params['datacenter']] if datastores is None or len(datastores) == 0: self.module.fail_json(msg="Unable to find a datastore list when autoselecting") datastore_freespace = 0 for ds in datastores: if not self.is_datastore_valid(datastore_obj=ds): continue if (ds.summary.freeSpace > datastore_freespace) or (ds.summary.freeSpace == datastore_freespace and not datastore): # If datastore field is provided, filter destination datastores if 'datastore' in self.params['disk'][0] and \ isinstance(self.params['disk'][0]['datastore'], str) and \ ds.name.find(self.params['disk'][0]['datastore']) < 0: continue datastore = ds datastore_name = datastore.name datastore_freespace = ds.summary.freeSpace elif 'datastore' in self.params['disk'][0]: datastore_name = self.params['disk'][0]['datastore'] # Check if user has provided datastore cluster first datastore_cluster = self.cache.find_obj(self.content, [vim.StoragePod], datastore_name) if datastore_cluster: # If user specified datastore cluster so get recommended datastore datastore_name = self.get_recommended_datastore(datastore_cluster_obj=datastore_cluster) # Check if get_recommended_datastore or user specified datastore exists or not datastore = self.cache.find_obj(self.content, [vim.Datastore], datastore_name) else: self.module.fail_json(msg="Either datastore or autoselect_datastore should be provided to select datastore") if not datastore and self.params['template']: # use the template's existing DS disks = [x for x in vm_obj.config.hardware.device if isinstance(x, vim.vm.device.VirtualDisk)] if disks: datastore = disks[0].backing.datastore datastore_name = datastore.name # validation if datastore: dc = self.cache.get_parent_datacenter(datastore) if dc.name != self.params['datacenter']: datastore = self.autoselect_datastore() datastore_name = datastore.name if not datastore: if len(self.params['disk']) != 0 or self.params['template'] is None: self.module.fail_json(msg="Unable to find the datastore with given parameters." " This could mean, %s is a non-existent virtual machine and module tried to" " deploy it as new virtual machine with no disk. Please specify disks parameter" " or specify template to clone from." % self.params['name']) self.module.fail_json(msg="Failed to find a matching datastore") return datastore, datastore_name def obj_has_parent(self, obj, parent): if obj is None and parent is None: raise AssertionError() current_parent = obj while True: if current_parent.name == parent.name: return True # Check if we have reached till root folder moid = current_parent._moId if moid in ['group-d1', 'ha-folder-root']: return False current_parent = current_parent.parent if current_parent is None: return False def get_scsi_type(self): disk_controller_type = "paravirtual" # set cpu/memory/etc if 'hardware' in self.params: if 'scsi' in self.params['hardware']: if self.params['hardware']['scsi'] in ['buslogic', 'paravirtual', 'lsilogic', 'lsilogicsas']: disk_controller_type = self.params['hardware']['scsi'] else: self.module.fail_json(msg="hardware.scsi attribute should be 'paravirtual' or 'lsilogic'") return disk_controller_type def find_folder(self, searchpath): """ Walk inventory objects one position of the searchpath at a time """ # split the searchpath so we can iterate through it paths = [x.replace('/', '') for x in searchpath.split('/')] paths_total = len(paths) - 1 position = 0 # recursive walk while looking for next element in searchpath root = self.content.rootFolder while root and position <= paths_total: change = False if hasattr(root, 'childEntity'): for child in root.childEntity: if child.name == paths[position]: root = child position += 1 change = True break elif isinstance(root, vim.Datacenter): if hasattr(root, 'vmFolder'): if root.vmFolder.name == paths[position]: root = root.vmFolder position += 1 change = True else: root = None if not change: root = None return root def get_resource_pool(self, cluster=None, host=None, resource_pool=None): """ Get a resource pool, filter on cluster, esxi_hostname or resource_pool if given """ cluster_name = cluster or self.params.get('cluster', None) host_name = host or self.params.get('esxi_hostname', None) resource_pool_name = resource_pool or self.params.get('resource_pool', None) # get the datacenter object datacenter = find_obj(self.content, [vim.Datacenter], self.params['datacenter']) if not datacenter: self.module.fail_json(msg='Unable to find datacenter "%s"' % self.params['datacenter']) # if cluster is given, get the cluster object if cluster_name: cluster = find_obj(self.content, [vim.ComputeResource], cluster_name, folder=datacenter) if not cluster: self.module.fail_json(msg='Unable to find cluster "%s"' % cluster_name) # if host is given, get the cluster object using the host elif host_name: host = find_obj(self.content, [vim.HostSystem], host_name, folder=datacenter) if not host: self.module.fail_json(msg='Unable to find host "%s"' % host_name) cluster = host.parent else: cluster = None # get resource pools limiting search to cluster or datacenter resource_pool = find_obj(self.content, [vim.ResourcePool], resource_pool_name, folder=cluster or datacenter) if not resource_pool: if resource_pool_name: self.module.fail_json(msg='Unable to find resource_pool "%s"' % resource_pool_name) else: self.module.fail_json(msg='Unable to find resource pool, need esxi_hostname, resource_pool, or cluster') return resource_pool def deploy_vm(self): # https://github.com/vmware/pyvmomi-community-samples/blob/master/samples/clone_vm.py # https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.vm.CloneSpec.html # https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.vm.ConfigSpec.html # https://www.vmware.com/support/developer/vc-sdk/visdk41pubs/ApiReference/vim.vm.RelocateSpec.html # FIXME: # - static IPs self.folder = self.params.get('folder', None) if self.folder is None: self.module.fail_json(msg="Folder is required parameter while deploying new virtual machine") # Prepend / if it was missing from the folder path, also strip trailing slashes if not self.folder.startswith('/'): self.folder = '/%(folder)s' % self.params self.folder = self.folder.rstrip('/') datacenter = self.cache.find_obj(self.content, [vim.Datacenter], self.params['datacenter']) if datacenter is None: self.module.fail_json(msg='No datacenter named %(datacenter)s was found' % self.params) dcpath = compile_folder_path_for_object(datacenter) # Nested folder does not have trailing / if not dcpath.endswith('/'): dcpath += '/' # Check for full path first in case it was already supplied if (self.folder.startswith(dcpath + self.params['datacenter'] + '/vm') or self.folder.startswith(dcpath + '/' + self.params['datacenter'] + '/vm')): fullpath = self.folder elif self.folder.startswith('/vm/') or self.folder == '/vm': fullpath = "%s%s%s" % (dcpath, self.params['datacenter'], self.folder) elif self.folder.startswith('/'): fullpath = "%s%s/vm%s" % (dcpath, self.params['datacenter'], self.folder) else: fullpath = "%s%s/vm/%s" % (dcpath, self.params['datacenter'], self.folder) f_obj = self.content.searchIndex.FindByInventoryPath(fullpath) # abort if no strategy was successful if f_obj is None: # Add some debugging values in failure. details = { 'datacenter': datacenter.name, 'datacenter_path': dcpath, 'folder': self.folder, 'full_search_path': fullpath, } self.module.fail_json(msg='No folder %s matched in the search path : %s' % (self.folder, fullpath), details=details) destfolder = f_obj if self.params['template']: vm_obj = self.get_vm_or_template(template_name=self.params['template']) if vm_obj is None: self.module.fail_json(msg="Could not find a template named %(template)s" % self.params) else: vm_obj = None # always get a resource_pool resource_pool = self.get_resource_pool() # set the destination datastore for VM & disks if self.params['datastore']: # Give precedence to datastore value provided by user # User may want to deploy VM to specific datastore. datastore_name = self.params['datastore'] # Check if user has provided datastore cluster first datastore_cluster = self.cache.find_obj(self.content, [vim.StoragePod], datastore_name) if datastore_cluster: # If user specified datastore cluster so get recommended datastore datastore_name = self.get_recommended_datastore(datastore_cluster_obj=datastore_cluster) # Check if get_recommended_datastore or user specified datastore exists or not datastore = self.cache.find_obj(self.content, [vim.Datastore], datastore_name) else: (datastore, datastore_name) = self.select_datastore(vm_obj) self.configspec = vim.vm.ConfigSpec() self.configspec.deviceChange = [] # create the relocation spec self.relospec = vim.vm.RelocateSpec() self.relospec.deviceChange = [] self.configure_guestid(vm_obj=vm_obj, vm_creation=True) self.configure_cpu_and_memory(vm_obj=vm_obj, vm_creation=True) self.configure_hardware_params(vm_obj=vm_obj) self.configure_resource_alloc_info(vm_obj=vm_obj) self.configure_vapp_properties(vm_obj=vm_obj) self.configure_disks(vm_obj=vm_obj) self.configure_network(vm_obj=vm_obj) self.configure_cdrom(vm_obj=vm_obj) # Find if we need network customizations (find keys in dictionary that requires customizations) network_changes = False for nw in self.params['networks']: for key in nw: # We don't need customizations for these keys if key not in ('device_type', 'mac', 'name', 'vlan', 'type', 'start_connected'): network_changes = True break if len(self.params['customization']) > 0 or network_changes or self.params.get('customization_spec') is not None: self.customize_vm(vm_obj=vm_obj) clonespec = None clone_method = None try: if self.params['template']: # Only select specific host when ESXi hostname is provided if self.params['esxi_hostname']: self.relospec.host = self.select_host() self.relospec.datastore = datastore # Convert disk present in template if is set if self.params['convert']: for device in vm_obj.config.hardware.device: if isinstance(device, vim.vm.device.VirtualDisk): disk_locator = vim.vm.RelocateSpec.DiskLocator() disk_locator.diskBackingInfo = vim.vm.device.VirtualDisk.FlatVer2BackingInfo() if self.params['convert'] in ['thin']: disk_locator.diskBackingInfo.thinProvisioned = True if self.params['convert'] in ['eagerzeroedthick']: disk_locator.diskBackingInfo.eagerlyScrub = True if self.params['convert'] in ['thick']: disk_locator.diskBackingInfo.diskMode = "persistent" disk_locator.diskId = device.key disk_locator.datastore = datastore self.relospec.disk.append(disk_locator) # https://www.vmware.com/support/developer/vc-sdk/visdk41pubs/ApiReference/vim.vm.RelocateSpec.html # > pool: For a clone operation from a template to a virtual machine, this argument is required. self.relospec.pool = resource_pool linked_clone = self.params.get('linked_clone') snapshot_src = self.params.get('snapshot_src', None) if linked_clone: if snapshot_src is not None: self.relospec.diskMoveType = vim.vm.RelocateSpec.DiskMoveOptions.createNewChildDiskBacking else: self.module.fail_json(msg="Parameter 'linked_src' and 'snapshot_src' are" " required together for linked clone operation.") clonespec = vim.vm.CloneSpec(template=self.params['is_template'], location=self.relospec) if self.customspec: clonespec.customization = self.customspec if snapshot_src is not None: if vm_obj.snapshot is None: self.module.fail_json(msg="No snapshots present for virtual machine or template [%(template)s]" % self.params) snapshot = self.get_snapshots_by_name_recursively(snapshots=vm_obj.snapshot.rootSnapshotList, snapname=snapshot_src) if len(snapshot) != 1: self.module.fail_json(msg='virtual machine "%(template)s" does not contain' ' snapshot named "%(snapshot_src)s"' % self.params) clonespec.snapshot = snapshot[0].snapshot clonespec.config = self.configspec clone_method = 'Clone' try: task = vm_obj.Clone(folder=destfolder, name=self.params['name'], spec=clonespec) except vim.fault.NoPermission as e: self.module.fail_json(msg="Failed to clone virtual machine %s to folder %s " "due to permission issue: %s" % (self.params['name'], destfolder, to_native(e.msg))) self.change_detected = True else: # ConfigSpec require name for VM creation self.configspec.name = self.params['name'] self.configspec.files = vim.vm.FileInfo(logDirectory=None, snapshotDirectory=None, suspendDirectory=None, vmPathName="[" + datastore_name + "]") clone_method = 'CreateVM_Task' try: task = destfolder.CreateVM_Task(config=self.configspec, pool=resource_pool) except vmodl.fault.InvalidRequest as e: self.module.fail_json(msg="Failed to create virtual machine due to invalid configuration " "parameter %s" % to_native(e.msg)) except vim.fault.RestrictedVersion as e: self.module.fail_json(msg="Failed to create virtual machine due to " "product versioning restrictions: %s" % to_native(e.msg)) self.change_detected = True self.wait_for_task(task) except TypeError as e: self.module.fail_json(msg="TypeError was returned, please ensure to give correct inputs. %s" % to_text(e)) if task.info.state == 'error': # https://kb.vmware.com/selfservice/microsites/search.do?language=en_US&cmd=displayKC&externalId=2021361 # https://kb.vmware.com/selfservice/microsites/search.do?language=en_US&cmd=displayKC&externalId=2173 # provide these to the user for debugging clonespec_json = serialize_spec(clonespec) configspec_json = serialize_spec(self.configspec) kwargs = { 'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'clonespec': clonespec_json, 'configspec': configspec_json, 'clone_method': clone_method } return kwargs else: # set annotation vm = task.info.result if self.params['annotation']: annotation_spec = vim.vm.ConfigSpec() annotation_spec.annotation = str(self.params['annotation']) task = vm.ReconfigVM_Task(annotation_spec) self.wait_for_task(task) if task.info.state == 'error': return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'annotation'} if self.params['customvalues']: self.customize_customvalues(vm_obj=vm) if self.params['wait_for_ip_address'] or self.params['wait_for_customization'] or self.params['state'] in ['poweredon', 'restarted']: set_vm_power_state(self.content, vm, 'poweredon', force=False) if self.params['wait_for_ip_address']: wait_for_vm_ip(self.content, vm, self.params['wait_for_ip_address_timeout']) if self.params['wait_for_customization']: is_customization_ok = self.wait_for_customization(vm) if not is_customization_ok: vm_facts = self.gather_facts(vm) return {'changed': self.change_applied, 'failed': True, 'instance': vm_facts, 'op': 'customization'} vm_facts = self.gather_facts(vm) return {'changed': self.change_applied, 'failed': False, 'instance': vm_facts} def get_snapshots_by_name_recursively(self, snapshots, snapname): snap_obj = [] for snapshot in snapshots: if snapshot.name == snapname: snap_obj.append(snapshot) else: snap_obj = snap_obj + self.get_snapshots_by_name_recursively(snapshot.childSnapshotList, snapname) return snap_obj def reconfigure_vm(self): self.configspec = vim.vm.ConfigSpec() self.configspec.deviceChange = [] # create the relocation spec self.relospec = vim.vm.RelocateSpec() self.relospec.deviceChange = [] self.configure_guestid(vm_obj=self.current_vm_obj) self.configure_cpu_and_memory(vm_obj=self.current_vm_obj) self.configure_hardware_params(vm_obj=self.current_vm_obj) self.configure_disks(vm_obj=self.current_vm_obj) self.configure_network(vm_obj=self.current_vm_obj) self.configure_cdrom(vm_obj=self.current_vm_obj) self.customize_customvalues(vm_obj=self.current_vm_obj) self.configure_resource_alloc_info(vm_obj=self.current_vm_obj) self.configure_vapp_properties(vm_obj=self.current_vm_obj) if self.params['annotation'] and self.current_vm_obj.config.annotation != self.params['annotation']: self.configspec.annotation = str(self.params['annotation']) self.change_detected = True if self.params['resource_pool']: self.relospec.pool = self.get_resource_pool() if self.relospec.pool != self.current_vm_obj.resourcePool: task = self.current_vm_obj.RelocateVM_Task(spec=self.relospec) self.wait_for_task(task) if task.info.state == 'error': return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'relocate'} # Only send VMware task if we see a modification if self.change_detected: task = None try: task = self.current_vm_obj.ReconfigVM_Task(spec=self.configspec) except vim.fault.RestrictedVersion as e: self.module.fail_json(msg="Failed to reconfigure virtual machine due to" " product versioning restrictions: %s" % to_native(e.msg)) self.wait_for_task(task) if task.info.state == 'error': return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'reconfig'} # Rename VM if self.params['uuid'] and self.params['name'] and self.params['name'] != self.current_vm_obj.config.name: task = self.current_vm_obj.Rename_Task(self.params['name']) self.wait_for_task(task) if task.info.state == 'error': return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'rename'} # Mark VM as Template if self.params['is_template'] and not self.current_vm_obj.config.template: try: self.current_vm_obj.MarkAsTemplate() self.change_applied = True except vmodl.fault.NotSupported as e: self.module.fail_json(msg="Failed to mark virtual machine [%s] " "as template: %s" % (self.params['name'], e.msg)) # Mark Template as VM elif not self.params['is_template'] and self.current_vm_obj.config.template: resource_pool = self.get_resource_pool() kwargs = dict(pool=resource_pool) if self.params.get('esxi_hostname', None): host_system_obj = self.select_host() kwargs.update(host=host_system_obj) try: self.current_vm_obj.MarkAsVirtualMachine(**kwargs) self.change_applied = True except vim.fault.InvalidState as invalid_state: self.module.fail_json(msg="Virtual machine is not marked" " as template : %s" % to_native(invalid_state.msg)) except vim.fault.InvalidDatastore as invalid_ds: self.module.fail_json(msg="Converting template to virtual machine" " operation cannot be performed on the" " target datastores: %s" % to_native(invalid_ds.msg)) except vim.fault.CannotAccessVmComponent as cannot_access: self.module.fail_json(msg="Failed to convert template to virtual machine" " as operation unable access virtual machine" " component: %s" % to_native(cannot_access.msg)) except vmodl.fault.InvalidArgument as invalid_argument: self.module.fail_json(msg="Failed to convert template to virtual machine" " due to : %s" % to_native(invalid_argument.msg)) except Exception as generic_exc: self.module.fail_json(msg="Failed to convert template to virtual machine" " due to generic error : %s" % to_native(generic_exc)) # Automatically update VMware UUID when converting template to VM. # This avoids an interactive prompt during VM startup. uuid_action = [x for x in self.current_vm_obj.config.extraConfig if x.key == "uuid.action"] if not uuid_action: uuid_action_opt = vim.option.OptionValue() uuid_action_opt.key = "uuid.action" uuid_action_opt.value = "create" self.configspec.extraConfig.append(uuid_action_opt) self.change_detected = True # add customize existing VM after VM re-configure if 'existing_vm' in self.params['customization'] and self.params['customization']['existing_vm']: if self.current_vm_obj.config.template: self.module.fail_json(msg="VM is template, not support guest OS customization.") if self.current_vm_obj.runtime.powerState != vim.VirtualMachinePowerState.poweredOff: self.module.fail_json(msg="VM is not in poweroff state, can not do guest OS customization.") cus_result = self.customize_exist_vm() if cus_result['failed']: return cus_result vm_facts = self.gather_facts(self.current_vm_obj) return {'changed': self.change_applied, 'failed': False, 'instance': vm_facts} def customize_exist_vm(self): task = None # Find if we need network customizations (find keys in dictionary that requires customizations) network_changes = False for nw in self.params['networks']: for key in nw: # We don't need customizations for these keys if key not in ('device_type', 'mac', 'name', 'vlan', 'type', 'start_connected'): network_changes = True break if len(self.params['customization']) > 1 or network_changes or self.params.get('customization_spec'): self.customize_vm(vm_obj=self.current_vm_obj) try: task = self.current_vm_obj.CustomizeVM_Task(self.customspec) except vim.fault.CustomizationFault as e: self.module.fail_json(msg="Failed to customization virtual machine due to CustomizationFault: %s" % to_native(e.msg)) except vim.fault.RuntimeFault as e: self.module.fail_json(msg="failed to customization virtual machine due to RuntimeFault: %s" % to_native(e.msg)) except Exception as e: self.module.fail_json(msg="failed to customization virtual machine due to fault: %s" % to_native(e.msg)) self.wait_for_task(task) if task.info.state == 'error': return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'customize_exist'} if self.params['wait_for_customization']: set_vm_power_state(self.content, self.current_vm_obj, 'poweredon', force=False) is_customization_ok = self.wait_for_customization(self.current_vm_obj) if not is_customization_ok: return {'changed': self.change_applied, 'failed': True, 'op': 'wait_for_customize_exist'} return {'changed': self.change_applied, 'failed': False} def wait_for_task(self, task, poll_interval=1): """ Wait for a VMware task to complete. Terminal states are 'error' and 'success'. Inputs: - task: the task to wait for - poll_interval: polling interval to check the task, in seconds Modifies: - self.change_applied """ # https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.Task.html # https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.TaskInfo.html # https://github.com/virtdevninja/pyvmomi-community-samples/blob/master/samples/tools/tasks.py while task.info.state not in ['error', 'success']: time.sleep(poll_interval) self.change_applied = self.change_applied or task.info.state == 'success' def get_vm_events(self, vm, eventTypeIdList): byEntity = vim.event.EventFilterSpec.ByEntity(entity=vm, recursion="self") filterSpec = vim.event.EventFilterSpec(entity=byEntity, eventTypeId=eventTypeIdList) eventManager = self.content.eventManager return eventManager.QueryEvent(filterSpec) def wait_for_customization(self, vm, poll=10000, sleep=10): thispoll = 0 while thispoll <= poll: eventStarted = self.get_vm_events(vm, ['CustomizationStartedEvent']) if len(eventStarted): thispoll = 0 while thispoll <= poll: eventsFinishedResult = self.get_vm_events(vm, ['CustomizationSucceeded', 'CustomizationFailed']) if len(eventsFinishedResult): if not isinstance(eventsFinishedResult[0], vim.event.CustomizationSucceeded): self.module.fail_json(msg='Customization failed with error {0}:\n{1}'.format( eventsFinishedResult[0]._wsdlName, eventsFinishedResult[0].fullFormattedMessage)) return False break else: time.sleep(sleep) thispoll += 1 return True else: time.sleep(sleep) thispoll += 1 self.module.fail_json('waiting for customizations timed out.') return False def main(): argument_spec = vmware_argument_spec() argument_spec.update( state=dict(type='str', default='present', choices=['absent', 'poweredoff', 'poweredon', 'present', 'rebootguest', 'restarted', 'shutdownguest', 'suspended']), template=dict(type='str', aliases=['template_src']), is_template=dict(type='bool', default=False), annotation=dict(type='str', aliases=['notes']), customvalues=dict(type='list', default=[]), name=dict(type='str'), name_match=dict(type='str', choices=['first', 'last'], default='first'), uuid=dict(type='str'), use_instance_uuid=dict(type='bool', default=False), folder=dict(type='str'), guest_id=dict(type='str'), disk=dict(type='list', default=[]), cdrom=dict(type=list_or_dict, default=[]), hardware=dict(type='dict', default={}), force=dict(type='bool', default=False), datacenter=dict(type='str', default='ha-datacenter'), esxi_hostname=dict(type='str'), cluster=dict(type='str'), wait_for_ip_address=dict(type='bool', default=False), wait_for_ip_address_timeout=dict(type='int', default=300), state_change_timeout=dict(type='int', default=0), snapshot_src=dict(type='str'), linked_clone=dict(type='bool', default=False), networks=dict(type='list', default=[]), resource_pool=dict(type='str'), customization=dict(type='dict', default={}, no_log=True), customization_spec=dict(type='str', default=None), wait_for_customization=dict(type='bool', default=False), vapp_properties=dict(type='list', default=[]), datastore=dict(type='str'), convert=dict(type='str', choices=['thin', 'thick', 'eagerzeroedthick']), delete_from_inventory=dict(type='bool', default=False), ) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=[ ['cluster', 'esxi_hostname'], ], required_one_of=[ ['name', 'uuid'], ], ) result = {'failed': False, 'changed': False} pyv = PyVmomiHelper(module) # Check if the VM exists before continuing vm = pyv.get_vm() # VM already exists if vm: if module.params['state'] == 'absent': # destroy it if module.check_mode: result.update( vm_name=vm.name, changed=True, current_powerstate=vm.summary.runtime.powerState.lower(), desired_operation='remove_vm', ) module.exit_json(**result) if module.params['force']: # has to be poweredoff first set_vm_power_state(pyv.content, vm, 'poweredoff', module.params['force']) result = pyv.remove_vm(vm, module.params['delete_from_inventory']) elif module.params['state'] == 'present': if module.check_mode: result.update( vm_name=vm.name, changed=True, desired_operation='reconfigure_vm', ) module.exit_json(**result) result = pyv.reconfigure_vm() elif module.params['state'] in ['poweredon', 'poweredoff', 'restarted', 'suspended', 'shutdownguest', 'rebootguest']: if module.check_mode: result.update( vm_name=vm.name, changed=True, current_powerstate=vm.summary.runtime.powerState.lower(), desired_operation='set_vm_power_state', ) module.exit_json(**result) # set powerstate tmp_result = set_vm_power_state(pyv.content, vm, module.params['state'], module.params['force'], module.params['state_change_timeout']) if tmp_result['changed']: result["changed"] = True if module.params['state'] in ['poweredon', 'restarted', 'rebootguest'] and module.params['wait_for_ip_address']: wait_result = wait_for_vm_ip(pyv.content, vm, module.params['wait_for_ip_address_timeout']) if not wait_result: module.fail_json(msg='Waiting for IP address timed out') tmp_result['instance'] = wait_result if not tmp_result["failed"]: result["failed"] = False result['instance'] = tmp_result['instance'] if tmp_result["failed"]: result["failed"] = True result["msg"] = tmp_result["msg"] else: # This should not happen raise AssertionError() # VM doesn't exist else: if module.params['state'] in ['poweredon', 'poweredoff', 'present', 'restarted', 'suspended']: if module.check_mode: result.update( changed=True, desired_operation='deploy_vm', ) module.exit_json(**result) result = pyv.deploy_vm() if result['failed']: module.fail_json(msg='Failed to create a virtual machine : %s' % result['msg']) if result['failed']: module.fail_json(**result) else: module.exit_json(**result) if __name__ == '__main__': main()
tumbl3w33d/ansible
lib/ansible/modules/cloud/vmware/vmware_guest.py
Python
gpl-3.0
148,470
[ "VisIt" ]
3283486b3efbbd088baaeff88c30ea6f38a2c8718a261d8aad6c19f0e9cd8575
#!/usr/bin/python # -*- coding: utf-8 -*- """NBody in N^2 complexity Note that we are unp.sing only Newtonian forces and do not consider relativity Neither do we consider collisions between stars Thus some of our stars will accelerate to speeds beyond c This is done to keep the simulation simple enough for teaching purposes All the work is done in the calc_force, move and random_galaxy functions. To vectorize the code these are the functions to transform. """ from benchpress.benchmarks import util import numpy as np G = 6.673e-11 solarmass=1.98892e30 def fill_diagonal(a, val): d,_ = a.shape #This only makes sense for square matrices a.shape=d*d #Flatten a without making a copy a[::d+1]=val #Assign the diagonal values a.shape = (d,d) #Return a to its original shape def calc_force(a, b, dt): """Calculate forces between bodies F = ((G m_a m_b)/r^2)/((x_b-x_a)/r) """ dx = b['x'] - a['x'][np.newaxis,:].T dy = b['y'] - a['y'][np.newaxis,:].T dz = b['z'] - a['z'][np.newaxis,:].T pm = b['m'] * a['m'][np.newaxis,:].T if a is b: fill_diagonal(dx,1.0) fill_diagonal(dy,1.0) fill_diagonal(dz,1.0) fill_diagonal(pm,0.0) r = ( dx ** 2 + dy ** 2 + dz ** 2) ** 0.5 #In the below calc of the the forces the force of a body upon itself #becomes nan and thus destroys the data Fx = G * pm / r ** 2 * (dx / r) Fy = G * pm / r ** 2 * (dy / r) Fz = G * pm / r ** 2 * (dz / r) #The diagonal nan numbers must be removed so that the force from a body #upon itself is zero if a is b: fill_diagonal(Fx,0) fill_diagonal(Fy,0) fill_diagonal(Fz,0) a['vx'] += np.add.reduce(Fx, axis=1)/ a['m'] * dt a['vy'] += np.add.reduce(Fy, axis=1)/ a['m'] * dt a['vz'] += np.add.reduce(Fz, axis=1)/ a['m'] * dt def move(solarsystem, astoroids, dt): """Move the bodies first find forces and change velocity and then move positions """ calc_force(solarsystem, solarsystem, dt) calc_force(astoroids, solarsystem, dt) solarsystem['x'] += solarsystem['vx'] * dt solarsystem['y'] += solarsystem['vy'] * dt solarsystem['z'] += solarsystem['vz'] * dt astoroids['x'] += astoroids['vx'] * dt astoroids['y'] += astoroids['vy'] * dt astoroids['z'] += astoroids['vz'] * dt def circlev(rx, ry, rz): r2=np.sqrt(rx*rx+ry*ry+rz*rz) numerator=(6.67e-11)*1e6*solarmass return np.sqrt(numerator/r2) def sign(x): if x<0: return -1 if x>0: return 1 return 0 def random_system(x_max, y_max, z_max, n, b, B): """Generate a galaxy of random bodies""" solarsystem = {'m':np.empty(n), 'x':np.empty(n), 'y':np.empty(n),'z':np.empty(n),\ 'vx':np.empty(n), 'vy':np.empty(n),'vz':np.empty(n)} solarsystem['m'][0]= 1e6*solarmass solarsystem['x'][0]= 0 solarsystem['y'][0]= 0 solarsystem['z'][0]= 0 solarsystem['vx'][0]= 0 solarsystem['vy'][0]= 0 solarsystem['vz'][0]= 0 for i in range(1,n): px, py,pz = B.random_array((1,)), B.random_array((1,)), B.random_array((1,))*.01 dist = (1.0/np.sqrt(px*px+py*py+pz*pz))-(.8-B.random_array((1,))*.1) px = x_max*px*dist*sign(.5-B.random_array((1,))) py = y_max*py*dist*sign(.5-B.random_array((1,))) pz = z_max*pz*dist*sign(.5-B.random_array((1,))) solarsystem['x'][i], solarsystem['y'][i], solarsystem['z'][i] = px, py, pz magv = circlev(px,py, pz) absangle = np.arctan(abs(py/px)) thetav= np.pi/2-absangle vx = -1*sign(py)*np.cos(thetav)*magv vy = sign(px)*np.sin(thetav)*magv vz = 0 solarsystem['vx'][i], solarsystem['vy'][i], solarsystem['vz'][i] = vx, vy, vz solarsystem['m'][i] = B.random_array((1,))*solarmass*10+1e20; astoroids = {'m':np.empty(b), 'x':np.empty(b), 'y':np.empty(b),'z':np.empty(b),\ 'vx':np.empty(b), 'vy':np.empty(b),'vz':np.empty(b)} for i in range(b): px, py,pz = B.random_array((1,)), B.random_array((1,)), B.random_array((1,))*.01 dist = (1.0/np.sqrt(px*px+py*py+pz*pz))-(B.random_array((1,))*.1) px = x_max*px*dist*sign(.5-B.random_array((1,))) py = y_max*py*dist*sign(.5-B.random_array((1,))) pz = z_max*pz*dist*sign(.5-B.random_array((1,))) astoroids['x'][i], astoroids['y'][i], astoroids['z'][i] = px, py, pz magv = circlev(px,py, pz) absangle = np.arctan(abs(py/px)) thetav= np.pi/2-absangle vx = -1*sign(py)*np.cos(thetav)*magv vy = sign(px)*np.sin(thetav)*magv vz = 0 astoroids['vx'][i], astoroids['vy'][i], astoroids['vz'][i] = vx, vy, vz astoroids['m'][i] = B.random_array((1,))*solarmass*10+1e14; return solarsystem, astoroids def gfx_init(xm, ym, zm): """Init plot""" import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D # pylint: disable=W0611 plt.ion() fig = plt.figure() sub = fig.add_subplot(111, projection='3d') sub.xm = xm sub.ym = ym sub.zm = zm return sub def show(sub, solarsystem, bodies): """Show plot""" import matplotlib.pyplot as plt #Sun sub.clear() sub.scatter( solarsystem['x'][0], solarsystem['y'][0], solarsystem['z'][0], s=100, marker='o', c='yellow', ) #Planets sub.scatter( [solarsystem['x'][1:]], [solarsystem['y'][1:]], [solarsystem['z'][1:]], s=5, marker='o', c='blue', ) #Astoroids sub.scatter( [bodies['x']], [bodies['y']], [bodies['z']], s=.1, marker='.', c='green', ) sub.set_xbound(-sub.xm, sub.xm) sub.set_ybound(-sub.ym, sub.ym) try: sub.set_zbound(-sub.zm, sub.zm) except AttributeError: print('Warning: correct 3D plots may require matplotlib-1.1 or later') plt.draw() def main(): B = util.Benchmark() num_asteroids = B.size[0] num_planets = B.size[1] num_iteratinos = B.size[2] x_max = 1e18 y_max = 1e18 z_max = 1e18 dt = 1e12 solarsystem, astoroids = random_system(x_max, y_max, z_max, num_planets, num_asteroids, B) if B.verbose: P3 = gfx_init(x_max, y_max, z_max) B.start() for _ in range(num_iteratinos): move(solarsystem, astoroids, dt) if B.verbose: show(P3, solarsystem, astoroids) R = solarsystem['x'] B.stop() B.pprint() if B.verbose: print(R) if B.outputfn: B.tofile(B.outputfn, {'res': R}) if __name__ == "__main__": main()
bh107/benchpress
benchpress/benchmarks/nbody_nice/python_numpy/dep/nbody_nice_old.py
Python
apache-2.0
6,857
[ "Galaxy" ]
624af20c0d2ca3f2546a52bd54247ad1c1e17aaf41115b230b6455e6b3e4e70c
# python standard library import sys # ensure Python 3.5 assert sys.version_info[0:2] == (3, 5), sys.version_info import os import operator import itertools import collections import functools import glob import csv import datetime import bisect import sqlite3 import subprocess import random import gc import shutil import shelve import contextlib # third party packages import PyQt4 import cython import numpy as np import scipy import numba import numexpr import bokeh import h5py import tables import bcolz import pandas import dask import IPython import rpy2 import statsmodels import sklearn import sh import sqlalchemy import pymysql import psycopg2 import petl import humanize import matplotlib as mpl import matplotlib_venn as venn import seaborn as sns import Bio import pyfasta import pysam import pysamstats import petlx import vcf import vcfnp import intervaltree import anhima from mpl_toolkits.basemap import Basemap import allel
cggh/biipy
test.py
Python
mit
947
[ "pysam" ]
6b17da001d5b327fbca7530945f31ec10030524ec84c9fbde193ea9c394861b0
import pdb import numpy as np import math import time import chainer import chainer.functions as F import chainer.links as L from chainer import cuda from util import gaussian_logp0 from util import bernoulli_logp class VAE(chainer.Chain): def __init__(self, dim_in, dim_hidden, dim_latent, num_layers, num_trans, temperature, num_zsamples=1): super(VAE, self).__init__() # initialise first encoder and decoder hidden layer separately because # the input and output dims differ from the other hidden layers self.qlin0 = L.Linear(dim_in, dim_hidden) self.plin0 = L.Linear(dim_latent, dim_hidden) self._children.append('qlin0') self._children.append('plin0') for i in range(num_layers-1): # encoder layer_name = 'qlin' + str(i+1) setattr(self, layer_name, L.Linear(2*dim_hidden, dim_hidden)) self._children.append(layer_name) # decoder layer_name = 'plin' + str(i+1) setattr(self, layer_name, L.Linear(2*dim_hidden, dim_hidden)) self._children.append(layer_name) # initialise the encoder and decoder output layer separately because # the input and output dims differ from the other hidden layers self.qlin_mu = L.Linear(2*dim_hidden, dim_latent) self.qlin_ln_var = L.Linear(2*dim_hidden, dim_latent) self.plin_ber_prob = L.Linear(2*dim_hidden, dim_in) self._children.append('qlin_mu') self._children.append('qlin_ln_var') self._children.append('plin_ber_prob') # flow for i in range(num_trans): layer_name = 'flow_w_' + str(i) # weights setattr(self, layer_name, L.Scale(axis=1, W_shape=(dim_latent), bias_term=False)) self._children.append(layer_name) layer_name = 'flow_b_' + str(i) # bias setattr(self, layer_name, L.Bias(axis=0, shape=(1))) self._children.append(layer_name) layer_name = 'flow_u_' + str(i) # scaling factor u setattr(self, layer_name, L.Scale(axis=1, W_shape=(dim_latent), bias_term=False)) self._children.append(layer_name) self.num_layers = num_layers self.num_trans = num_trans self.temperature = temperature self.num_zsamples = num_zsamples self.epochs_seen = 0 def encode(self, x): h = F.crelu(self.qlin0(x)) for i in range(self.num_layers-1): layer_name = 'qlin' + str(i+1) h = F.crelu(self[layer_name](h)) self.qmu = self.qlin_mu(h) self.qln_var = self.qlin_ln_var(h) return self.qmu, self.qln_var def decode(self, z): h = F.crelu(self.plin0(z)) for i in range(self.num_layers-1): layer_name = 'plin' + str(i+1) h = F.crelu(self[layer_name](h)) self.p_ber_prob_logit = self.plin_ber_prob(h) return self.p_ber_prob_logit def planar_flows(self,z): self.z_trans = [] self.z_trans.append(z) self.phi = [] for i in range(self.num_trans): flow_w_name = 'flow_w_' + str(i) flow_b_name = 'flow_b_' + str(i) flow_u_name = 'flow_u_' + str(i) h = self[flow_w_name](z) h = F.sum(h,axis=(1)) h = self[flow_b_name](h) h = F.tanh(h) h_tanh = h dim_latent = z.shape[1] h = F.transpose(F.tile(h, (dim_latent,1))) h = self[flow_u_name](h) z += h self.z_trans.append(z) # Calculate and store the phi term h_tanh_derivative = 1-(h_tanh*h_tanh) h_tanh_derivative = F.transpose(F.tile(h_tanh_derivative, (dim_latent,1))) phi = self[flow_w_name](h_tanh_derivative) # Equation (11) self.phi.append(phi) return z def __call__(self, x): # Compute q(z|x) encoding_time = time.time() qmu, qln_var = self.encode(x) encoding_time = float(time.time() - encoding_time) decoding_time_average = 0. self.kl = 0 self.logp = 0 current_temperature = min(self.temperature['value'],1.0) self.temperature['value'] += self.temperature['increment'] for j in xrange(self.num_zsamples): # Sample z ~ q(z_0|x) z_0 = F.gaussian(self.qmu, self.qln_var) # Perform planar flow mappings, Equation (10) decoding_time = time.time() z_K = self.planar_flows(z_0) # Obtain parameters for p(x|z_K) p_ber_prob_logit = self.decode(z_K) decoding_time = time.time() - decoding_time decoding_time_average += decoding_time # Compute log q(z_0) q_prior_log = current_temperature*gaussian_logp0(z_0) # Compute log p(x|z_K) decoder_log = bernoulli_logp(x, p_ber_prob_logit) # Compute log p(z_K) p_prior_log = current_temperature*gaussian_logp0(z_K) # Compute log p(x,z_K) which is log p(x|z_K) + log p(z_K) joint_log = decoder_log + p_prior_log # Compute second term of log q(z_K) q_K_log = 0 for i in range(self.num_trans): flow_u_name = 'flow_u_' + str(i) lodget_jacobian = F.sum(self[flow_u_name](self.phi[i]), axis=1) q_K_log += F.log(1 + lodget_jacobian) q_K_log *= current_temperature # For recording purposes only self.logp += decoder_log self.kl += -(q_prior_log - p_prior_log - q_K_log) decoding_time_average /= self.num_zsamples # pdb.set_trace() self.obj_batch = ((q_prior_log -joint_log) - q_K_log) self.obj_batch /= self.num_zsamples batch_size = self.obj_batch.shape[0] self.obj = F.sum(self.obj_batch)/batch_size self.kl /= self.num_zsamples self.logp /= self.num_zsamples self.timing_info = np.array([encoding_time,decoding_time]) return self.obj
ashwindcruz/dgm
planar_mnist/model.py
Python
mit
6,218
[ "Gaussian" ]
9663930065298024fd64b1ffde11737a4e8c0b3a654499f18f727b40208435bc
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 1998-2015 by Paweł T. Jochym <pawel.jochym@ifj.edu.pl> # # This file is part of Elastic. # Elastic is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # Elastic is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with Elastic. If not, see <http://www.gnu.org/licenses/>. ''' The elastic command is a command-line tool exposing the functionality of elastic library for direct use - without writing any python code. ''' from __future__ import print_function, absolute_import, division import click import ase.io import elastic import pkg_resources from click import echo verbose = 0 def banner(): if verbose > 1: echo('Elastic ver. %s\n----------------------' % pkg_resources.get_distribution("elastic").version) def set_verbosity(v): global verbose verbose = v def process_calc(fn): from time import sleep sleep(1) @click.group() @click.option('--vasp', 'frmt', flag_value='vasp', help='Use VASP formats (default)', default=True) @click.option('--abinit', 'frmt', flag_value='abinit', help='Use AbInit formats') @click.option('--cij', 'action', flag_value='cij', help='Generate deformations for Cij (default)', default=True) @click.option('--eos', 'action', flag_value='eos', help='Generate deformations for Equation of State') @click.option('-v', '--verbose', count=True, help='Increase verbosity') @click.version_option() @click.pass_context def cli(ctx, frmt, action, verbose): '''Command-line interface to the elastic library.''' if verbose: set_verbosity(verbose) banner() @cli.command() @click.option('-n', '--num', 'num', default=5, type=int, help='Number of generated deformations per axis (default: 5)') @click.option('-l', '--lo', 'lo', default=0.98, type=float, help='Lower relative volume for EOS scan (default: 0.98)') @click.option('-h', '--hi', 'hi', default=1.02, type=float, help='Upper relative volume for EOS scan (default: 1.02)') @click.option('-s', '--size', 'size', default=2.0, type=float, help='Deformation size for Cij scan (% or deg., default: 2.0)') @click.argument('struct', type=click.Path(exists=True)) @click.pass_context def gen(ctx, num, lo, hi, size, struct): '''Generate deformed structures''' frmt = ctx.parent.params['frmt'] action = ctx.parent.params['action'] cryst = ase.io.read(struct, format=frmt) fn_tmpl = action if frmt == 'vasp': fn_tmpl += '_%03d.POSCAR' kwargs = {'vasp5': True, 'direct': True} elif frmt == 'abinit': fn_tmpl += '_%03d.abinit' kwargs = {} if verbose: from elastic.elastic import get_lattice_type nr, brav, sg, sgn = get_lattice_type(cryst) echo('%s lattice (%s): %s' % (brav, sg, cryst.get_chemical_formula())) if action == 'cij': echo('Generating {:d} deformations of {:.1f}(%/degs.) per axis'.format( num, size)) elif action == 'eos': echo('Generating {:d} deformations from {:.3f} to {:.3f} of V0'.format( num, lo, hi)) if action == 'cij': systems = elastic.get_elementary_deformations(cryst, n=num, d=size) elif action == 'eos': systems = elastic.scan_volumes(cryst, n=num, lo=lo, hi=hi) systems.insert(0, cryst) if verbose: echo('Writing %d deformation files.' % len(systems)) for n, s in enumerate(systems): ase.io.write(fn_tmpl % n, s, format=frmt, **kwargs) @cli.command() @click.argument('files', type=click.Path(exists=True), nargs=-1) @click.pass_context def proc(ctx, files): '''Process calculated structures''' def calc_reader(fn, verb): if verb: echo('Reading: {:<60s}\r'.format(fn), nl=False, err=True) return ase.io.read(fn) action = ctx.parent.params['action'] systems = [calc_reader(calc, verbose) for calc in files] if verbose : echo('', err=True) if action == 'cij': cij = elastic.get_elastic_tensor(systems[0], systems=systems[1:]) msv = cij[1][3].max() eps = 1e-4 if verbose: echo('Cij solution\n'+30*'-') echo(' Solution rank: {:2d}{}'.format( cij[1][2], ' (undetermined)' if cij[1][2] < len(cij[0]) else '')) if cij[1][2] == len(cij[0]): echo(' Square of residuals: {:7.2g}'.format(cij[1][1])) echo(' Relative singular values:') for sv in cij[1][3]/msv: echo('{:7.4f}{}'.format( sv, '* ' if (sv) < eps else ' '), nl=False) echo('\n\nElastic tensor (GPa):') for dsc in elastic.elastic.get_cij_order(systems[0]): echo('{: >7s} '.format(dsc), nl=False) echo('\n'+30*'-') for c, sv in zip(cij[0], cij[1][3]/msv): echo('{:7.2f}{}'.format( c/ase.units.GPa, '* ' if sv < eps else ' '), nl=False) echo() elif action == 'eos': eos = elastic.get_BM_EOS(systems[0], systems=systems[1:]) eos[1] /= ase.units.GPa if verbose: echo('# %7s (A^3) %7s (GPa) %7s' % ("V0", "B0", "B0'")) echo(' %7.2f %7.2f %7.2f' % tuple(eos)) if __name__ == '__main__': cli()
jochym/Elastic
elastic/cli/elastic.py
Python
gpl-3.0
5,903
[ "ABINIT", "ASE", "VASP" ]
c48f81aa0a99fb1b4e7c75da32af3a02dee4e1a54db3ae04381076ed6c203606
# # Copyright (C) 2013-2019 The ESPResSo project # # This file is part of ESPResSo. # # ESPResSo is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ESPResSo is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # import unittest as ut import unittest_decorators as utx import numpy as np import espressomd import espressomd.electrostatics import tests_common class InteractionsBondedTest(ut.TestCase): system = espressomd.System(box_l=[17.0, 9.0, 8.0]) np.random.seed(seed=system.seed) box_l = 10. start_pos = np.random.rand(3) * box_l axis = np.random.rand(3) axis /= np.linalg.norm(axis) steps = 10 def setUp(self): self.system.box_l = [self.box_l] * 3 self.system.cell_system.skin = 0.4 self.system.time_step = .2 self.system.part.add(id=0, pos=self.start_pos, type=0) self.system.part.add(id=1, pos=self.start_pos, type=0) def tearDown(self): self.system.part.clear() # Test Harmonic Bond def test_harmonic(self): hb_k = 5 hb_r_0 = 1.5 hb_r_cut = 3.355 hb = espressomd.interactions.HarmonicBond( k=hb_k, r_0=hb_r_0, r_cut=hb_r_cut) self.run_test(hb, lambda r: tests_common.harmonic_force( scalar_r=r, k=hb_k, r_0=hb_r_0), lambda r: tests_common.harmonic_potential( scalar_r=r, k=hb_k, r_0=hb_r_0), 0.01, hb_r_cut, True) # Test Fene Bond def test_fene(self): fene_k = 23.15 fene_d_r_max = 3.355 fene_r_0 = 1.1 fene = espressomd.interactions.FeneBond( k=fene_k, d_r_max=fene_d_r_max, r_0=fene_r_0) self.run_test(fene, lambda r: tests_common.fene_force( scalar_r=r, k=fene_k, d_r_max=fene_d_r_max, r_0=fene_r_0), lambda r: tests_common.fene_potential( scalar_r=r, k=fene_k, d_r_max=fene_d_r_max, r_0=fene_r_0), 0.01, fene_r_0 + fene_d_r_max, True) @utx.skipIfMissingFeatures(["ELECTROSTATICS"]) def test_coulomb(self): coulomb_k = 1 q1 = 1 q2 = -1 self.system.part[0].q = q1 self.system.part[1].q = q2 self.run_test( espressomd.interactions.BondedCoulomb(prefactor=coulomb_k), lambda r: tests_common.coulomb_force(r, coulomb_k, q1, q2), lambda r: tests_common.coulomb_potential(r, coulomb_k, q1, q2), 0.01, self.system.box_l[0] / 3) @utx.skipIfMissingFeatures(["ELECTROSTATICS"]) def test_coulomb_sr(self): # with negated actual charges and only short range int: cancels out all # interactions q1 = 1.2 q2 = -q1 self.system.part[0].q = q1 self.system.part[1].q = q2 r_cut = 2 sr_solver = espressomd.electrostatics.DH( prefactor=2, kappa=0.8, r_cut=r_cut) self.system.actors.add(sr_solver) coulomb_sr = espressomd.interactions.BondedCoulombSRBond( q1q2=- q1 * q2) # no break test, bond can't break. it extends as far as the short range # part of the electrostatics actor self.run_test( coulomb_sr, lambda r: [0., 0., 0.], lambda r: 0, 0.01, r_cut, test_breakage=False) def test_quartic(self): """Tests the Quartic bonded interaction by comparing the potential and force against the analytic values""" quartic_k0 = 2. quartic_k1 = 5. quartic_r = 0.5 quartic_r_cut = self.system.box_l[0] / 3. quartic = espressomd.interactions.QuarticBond(k0=quartic_k0, k1=quartic_k1, r=quartic_r, r_cut=quartic_r_cut) self.run_test(quartic, lambda r: tests_common.quartic_force( k0=quartic_k0, k1=quartic_k1, r=quartic_r, r_cut=quartic_r_cut, scalar_r=r), lambda r: tests_common.quartic_potential( k0=quartic_k0, k1=quartic_k1, r=quartic_r, r_cut=quartic_r_cut, scalar_r=r), 0.01, quartic_r_cut, True) def run_test(self, bond_instance, force_func, energy_func, min_dist, cutoff, test_breakage=False): self.system.bonded_inter.add(bond_instance) self.system.part[0].bonds = ((bond_instance, 1),) # n+1 steps from min_dist to cut, then we remove the cut, because that # may break the bond due to rounding errors for dist in np.linspace(min_dist, cutoff, self.steps + 1)[:-1]: self.system.part[1].pos = self.system.part[ 0].pos + self.axis * dist self.system.integrator.run(recalc_forces=True, steps=0) # Calculate energies E_sim = self.system.analysis.energy()["total"] E_ref = energy_func(dist) # Calculate forces f0_sim = np.copy(self.system.part[0].f) f1_sim = np.copy(self.system.part[1].f) f1_ref = self.axis * force_func(dist) # Check that energies match, ... self.assertAlmostEqual(E_sim, E_ref) # force equals minus the counter-force ... np.testing.assert_allclose(f0_sim, -f1_sim, 1E-12) # and has correct value. np.testing.assert_almost_equal(f1_sim, f1_ref) # Pressure # Isotropic pressure = 1/3 Trace Stress tensor # = 1/(3V) sum_i f_i r_i # where F is the force between the particles and r their distance p_expected = 1. / 3. * \ np.dot(f1_sim, self.axis * dist) / self.system.volume() p_sim = self.system.analysis.pressure()["total"] self.assertAlmostEqual(p_sim, p_expected, delta=1E-12) # Pressure tensor # P_ij = 1/V F_i r_j p_tensor_expected = np.outer( f1_sim, self.axis * dist) / self.system.volume() p_tensor_sim = self.system.analysis.stress_tensor()["total"] np.testing.assert_allclose( p_tensor_sim, p_tensor_expected, atol=1E-12) if test_breakage: self.system.part[1].pos = self.system.part[0].pos \ + self.axis * cutoff * (1.01) with self.assertRaisesRegex(Exception, "Encountered errors during integrate"): self.system.integrator.run(recalc_forces=True, steps=0) if __name__ == '__main__': ut.main()
psci2195/espresso-ffans
testsuite/python/interactions_bonded.py
Python
gpl-3.0
7,258
[ "ESPResSo" ]
58fb9a04ebee0fdda09c3ccb4812fb8cddf27ad75c281fbfffc91ff077d00f31
from __future__ import print_function, division from trippy import psf, scamp, bgFinder, pill, psfStarChooser import numpy as np,scipy as sci,pylab as pyl from astropy.io import fits import pickle import unittest import os,sys class tester(unittest.TestCase): @classmethod def setUpClass(self): self.rates = [0.4,1.0,2.5] self.angles = [37.0,0.0,-58.7] self.EXPTIME = 360.0 #os.system('gunzip test_image.fits.gz') self.gened_bg = 1000.0 if not (os.path.isfile('test_image.fits') and os.path.isfile('planted_locations.pickle')): print('You may not have the necessary fits files for these tests.') print('Please execute the following two wget commands:') print('wget https://www.cadc-ccda.hia-iha.nrc-cnrc.gc.ca/files/vault/fraserw/test_psf.fits') print('wget https://www.cadc-ccda.hia-iha.nrc-cnrc.gc.ca/files/vault/fraserw/test_image.fits') print('') exit() with fits.open('test_image.fits') as han: self.image = han[0].data self.loadedPSF = psf.modelPSF(restore = 'test_psf.fits') with open('planted_locations.pickle','rb') as han: if sys.version_info[0]==3: x = pickle.load(han, encoding='latin1') elif sys.version_info[0]==2: x = pickle.load(han) self.planted_locations = np.array(x) self.bg = bgFinder.bgFinder(self.image) scamp.makeParFiles.writeConv() scamp.makeParFiles.writeParam() scamp.makeParFiles.writeSex('test.sex', minArea = 2, threshold = 2) scamp.runSex('test.sex','test_image.fits',verbose = True) self.catalog = scamp.getCatalog('def.cat',paramFile = 'def.param') os.remove('default.conv') os.remove('def.param') os.remove('test.sex') os.remove('def.cat') #os.system('gzip test_image.fits') starChooser = psfStarChooser.starChooser(self.image, self.catalog['XWIN_IMAGE'],self.catalog['YWIN_IMAGE'], self.catalog['FLUX_AUTO'],self.catalog['FLUXERR_AUTO']) (self.goodFits,self.goodMeds,self.goodSTDs) = starChooser(30, 25, noVisualSelection=True, autoTrim=False, quickFit=False, repFact=5) (self.goodFitsQF,self.goodMedsQF,self.goodSTDsQF) = starChooser(30, 25, noVisualSelection=True, autoTrim=False, quickFit=True, repFact=5) #using manual alpha and beta because the fitting done with the star chooser results in #different answers with the different versions of scipy ALPHA = 29.81210639392963 BETA = 19.32497948470224 self.goodPSF = psf.modelPSF(np.arange(51),np.arange(51), alpha=ALPHA,beta=BETA,repFact=10) #self.goodPSF = psf.modelPSF(np.arange(51),np.arange(51), alpha=self.goodMeds[2],beta=self.goodMeds[3],repFact=10) self.goodPSF.genLookupTable(self.image,self.goodFits[:,4],self.goodFits[:,5],verbose=False) self.goodPSF.line(self.rates[0], self.angles[0], self.EXPTIME/3600., pixScale=0.185, useLookupTable=True) #self.goodPSF.psfStore('test_psf.fits') self.pill = pill.pillPhot(self.image) rads = np.arange(5.0,25.0,5) x_test, y_test = 652.4552577047876, 101.62067493726078 #necessray to use custom coordinates to avoid errors due to different sextractor versions self.pill.computeRoundAperCorrFromSource(x_test,y_test, rads, width = 60.0, skyRadius = 50.0,display=False,displayAperture=False) self.pill(x_test,y_test,7.0,l=5.0,display=False,enableBGSelection = False) self.pill.SNR(verbose=True) self.distances = {} self.distances[26] = 0.2913793325 self.distances[16] = 0.5179553032 self.distances[7] = 0.1751143336 self.distances[0] = 0.7428739071 self.distances[23] = 0.3347620368 self.distances[18] = 0.7867022753 self.distances[14] = 0.1851933748 self.distances[5] = 0.4436303377 self.distances[28] = 0.3333371580 self.distances[9] = 0.1997155696 self.distances[29] = 0.3528487086 self.distances[22] = 0.4013085365 self.distances[19] = 0.5801378489 self.distances[17] = 0.6873673797 self.distances[1] = 0.1621235311 self.distances[6] = 0.1248580739 self.distances[13] = 2.0296137333 self.distances[24] = 1.3238428831 self.distances[3] = 0.7816261053 self.distances[25] = 0.3067137897 self.distances[6] = 0.8099660873 def test_compSourceAper(self): self.assertAlmostEqual(self.pill.roundAperCorr(7.5),0.8348167541693421,msg = 'Round aperture correction from image seems to be discrepant.') def test_snr(self): self.assertAlmostEqual(self.pill.snr,22.629585517648817,msg = 'SNR is different than expected.') def test_flux(self): self.assertAlmostEqual(self.pill.sourceFlux,8602.32952903671,msg = 'Flux is different than expected.') def test_bg(self): self.assertAlmostEqual(self.pill.bg,1002.9928889886571,msg = 'Background is different than expected.') def test_numPix(self): self.assertEqual(self.pill.nPix,225.7,msg = 'Numpix is different than expected.') def test_roundAperFromPSF(self): rads = np.arange(5.0,25.0,5) r = (rads[1]+rads[0])/2.0 self.goodPSF.computeRoundAperCorrFromPSF(rads, display=False,displayAperture=False) self.loadedPSF.computeRoundAperCorrFromPSF(rads, display=False,displayAperture=False) self.assertAlmostEqual(self.goodPSF.roundAperCorr(r),self.loadedPSF.roundAperCorr(r),6, msg = 'Line aperture corrections differ.') def test_lineAperFromTSF(self): rads = np.arange(5.0,25.0,5) r = (rads[1]+rads[0])/2.0 self.goodPSF.computeLineAperCorrFromTSF(rads, self.rates[0]/10.0, self.angles[0],display=False,displayAperture=False) self.loadedPSF.computeLineAperCorrFromTSF(rads, self.rates[0]/10.0, self.angles[0],display=False,displayAperture=False) self.assertAlmostEqual(self.goodPSF.lineAperCorr(r),self.loadedPSF.lineAperCorr(r), 6, msg = 'Line aperture corrections differ.') def test_fwhm(self): self.assertEqual(self.loadedPSF.FWHM(), self.goodPSF.FWHM(),msg = 'PSF FWHM differ.') def test_fwhmFromMoffat(self): self.assertEqual(self.loadedPSF.FWHM(fromMoffatProfile=True), self.goodPSF.FWHM(fromMoffatProfile=True),msg = 'PSF FWHM differ.') def test_lookupTable(self): diff = self.goodPSF.lookupTable - self.loadedPSF.lookupTable self.assertAlmostEqual(np.max(np.abs(diff)),0.0, 2, msg = 'Generated lookup table appears to be unusual.') def test_long_one(self): print('\n Test Long PSF One:') self.goodPSF.line(self.rates[0], self.angles[0], self.EXPTIME/3600., pixScale=0.185, useLookupTable=True) self.loadedPSF.line(self.rates[0], self.angles[0], self.EXPTIME/3600., pixScale=0.185, useLookupTable=True) diff = self.goodPSF.longPSF - self.loadedPSF.longPSF self.assertAlmostEqual(np.max(np.abs(diff)),0.0,1, msg = 'Generated TSF one appears to be unusual.') def test_long_two(self): print('\n Test Long PSF Two:') self.goodPSF.line(self.rates[1], self.angles[1], self.EXPTIME/3600., pixScale=0.185, useLookupTable=True) self.loadedPSF.line(self.rates[1], self.angles[1], self.EXPTIME/3600., pixScale=0.185, useLookupTable=True) diff = self.goodPSF.longPSF - self.loadedPSF.longPSF self.assertAlmostEqual(np.max(np.abs(diff)), 0.0, 1, msg = 'Generated TSF two appears to be unusual.') def test_long_three(self): print('\n Test Long PSF Three:') self.goodPSF.line(self.rates[2], self.angles[2], self.EXPTIME/3600., pixScale=0.185, useLookupTable=True) self.loadedPSF.line(self.rates[2], self.angles[2], self.EXPTIME/3600., pixScale=0.185, useLookupTable=True) diff = self.goodPSF.longPSF - self.loadedPSF.longPSF self.assertAlmostEqual(np.max(np.abs(diff)), 0.0, 1, msg = 'Generated TSF three appears to be unusual.') def test_createPSF(self): diff = np.sum(self.goodFits[:,0] - np.array([11.42,11.23,11.68,12.19])) self.assertAlmostEqual(diff,0.0, msg = 'Fitted FWHM not the same. ') def test_createPSF_QF(self): diff = np.sum(self.goodFitsQF[:,0] - np.array([11.49,11.37,11.77,12.26])) self.assertAlmostEqual(diff,0.0, msg = 'Quick fitted FWHM not the same. ') def test_sextractor_coordinates(self): n_good = 0 for i in range(len(self.catalog['XWIN_IMAGE'])): x = self.catalog['XWIN_IMAGE'][i]-1.0 y = self.catalog['YWIN_IMAGE'][i]-1.0 d = ((self.planted_locations[:,0] - x)**2 + (self.planted_locations[:,1]-y)**2)**0.5 arg = np.argmin(d) #super lax distance to handle sextractor version differences #avoid index 6 which is source that sextractor cuts into two. if abs(d[arg] - self.distances[arg])<0.05 and arg!=6: n_good+=1 self.assertEqual(n_good,19,'Didnt find 20 unique sources in the correct places.') def test_len_sex(self): self.assertEqual(len(self.catalog['XWIN_IMAGE']),21,'Did not detect exactly 21 sources.') def test_bg_median(self): median = self.bg('median') self.assertAlmostEqual(median, 1000.0341315254655, msg = 'Median failed.') def test_bg_mean(self): mean = self.bg('mean') self.assertAlmostEqual(mean, 1000.0460484863409, msg = 'Mean failed.') def test_bg_hist(self): hist = self.bg('histMode') self.assertAlmostEqual(hist, 1000.4190254669019, msg = 'Hist method failed.') def test_bg_fraser(self): fraser = self.bg('fraserMode',0.1) self.assertAlmostEqual(fraser, 999.933901294518, msg = 'Fraser method failed.') def test_bg_gauss(self): gauss = self.bg('gaussFit') self.assertAlmostEqual(gauss, 1000.0460761715437, msg = 'Gaussian method failed.') def test_bg_smart(self): smart = self.bg('smart',3.0) self.assertAlmostEqual(smart, 1000.0460761715437, msg = 'Smart method failed.') if __name__ == "__main__": unittest.main()
fraserw/PyMOP
trippy/tests/test.py
Python
gpl-2.0
11,282
[ "Gaussian" ]
5d379574c68432b4695685bbaca9a3593b6c91e6fafb2aacfb0d91649ae9e139
""" Maximum likelihood covariance estimator. """ # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr> # Gael Varoquaux <gael.varoquaux@normalesup.org> # Virgile Fritsch <virgile.fritsch@inria.fr> # # License: BSD Style. # avoid division truncation from __future__ import division import warnings import numpy as np from scipy import linalg from ..base import BaseEstimator from ..utils import array2d from ..utils.extmath import fast_logdet, pinvh def log_likelihood(emp_cov, precision): """Computes the log_likelihood of the data Params ------ emp_cov: 2D ndarray (n_features, n_features) Maximum Likelihood Estimator of covariance precision: 2D ndarray (n_features, n_features) The precision matrix of the covariance model to be tested """ return -np.sum(emp_cov * precision) + fast_logdet(precision) def empirical_covariance(X, assume_centered=False): """Computes the Maximum likelihood covariance estimator Parameters ---------- X: 2D ndarray, shape (n_samples, n_features) Data from which to compute the covariance estimate assume_centered: Boolean If True, data are not centered before computation. Useful when working with data whose mean is almost, but not exactly zero. If False, data are centered before computation. Returns ------- covariance: 2D ndarray, shape (n_features, n_features) Empirical covariance (Maximum Likelihood Estimator) """ X = np.asarray(X) if X.ndim == 1: X = np.reshape(X, (1, -1)) warnings.warn("Only one sample available. " \ "You may want to reshape your data array") if assume_centered: covariance = np.dot(X.T, X) / X.shape[0] else: covariance = np.cov(X.T, bias=1) return covariance class EmpiricalCovariance(BaseEstimator): """Maximum likelihood covariance estimator Parameters ---------- store_precision : bool Specifies if the estimated precision is stored assume_centered: bool If True, data are not centered before computation. Useful when working with data whose mean is almost, but not exactly zero. If False (default), data are centered before computation. Attributes ---------- `covariance_` : 2D ndarray, shape (n_features, n_features) Estimated covariance matrix `precision_` : 2D ndarray, shape (n_features, n_features) Estimated pseudo-inverse matrix. (stored only if store_precision is True) """ def __init__(self, store_precision=True, assume_centered=False): self.store_precision = store_precision self.assume_centered = assume_centered def _set_covariance(self, covariance): """Saves the covariance and precision estimates Storage is done accordingly to `self.store_precision`. Precision stored only if invertible. Params ------ covariance: 2D ndarray, shape (n_features, n_features) Estimated covariance matrix to be stored, and from which precision is computed. """ covariance = array2d(covariance) # set covariance self.covariance_ = covariance # set precision if self.store_precision: self.precision_ = pinvh(covariance) else: self.precision_ = None def get_precision(self): """Getter for the precision matrix. Returns ------- precision_: array-like, The precision matrix associated to the current covariance object. """ if self.store_precision: precision = self.precision_ else: precision = pinvh(self.covariance_) return precision def fit(self, X, y=None): """Fits the Maximum Likelihood Estimator covariance model according to the given training data and parameters. Parameters ---------- X: array-like, shape = [n_samples, n_features] Training data, where n_samples is the number of samples and n_features is the number of features. y: not used, present for API consistence purpose. Returns ------- self : object Returns self. """ if self.assume_centered: self.location_ = np.zeros(X.shape[1]) else: self.location_ = X.mean(0) covariance = empirical_covariance( X, assume_centered=self.assume_centered) self._set_covariance(covariance) return self def score(self, X_test, y=None): """Computes the log-likelihood of a gaussian data set with `self.covariance_` as an estimator of its covariance matrix. Parameters ---------- X_test: array-like, shape = [n_samples, n_features] Test data of which we compute the likelihood, where n_samples is the number of samples and n_features is the number of features. X_test is assumed to be drawn from the same distribution than tha data used in fit (including centering). y: not used, present for API consistence purpose. Returns ------- res : float The likelihood of the data set with `self.covariance_` as an estimator of its covariance matrix. """ # compute empirical covariance of the test set test_cov = empirical_covariance( X_test - self.location_, assume_centered=True) # compute log likelihood res = log_likelihood(test_cov, self.get_precision()) return res def error_norm(self, comp_cov, norm='frobenius', scaling=True, squared=True): """Computes the Mean Squared Error between two covariance estimators. (In the sense of the Frobenius norm) Parameters ---------- comp_cov: array-like, shape = [n_features, n_features] The covariance to compare with. norm: str The type of norm used to compute the error. Available error types: - 'frobenius' (default): sqrt(tr(A^t.A)) - 'spectral': sqrt(max(eigenvalues(A^t.A)) where A is the error ``(comp_cov - self.covariance_)``. scaling: bool If True (default), the squared error norm is divided by n_features. If False, the squared error norm is not rescaled. squared: bool Whether to compute the squared error norm or the error norm. If True (default), the squared error norm is returned. If False, the error norm is returned. Returns ------- The Mean Squared Error (in the sense of the Frobenius norm) between `self` and `comp_cov` covariance estimators. """ # compute the error error = comp_cov - self.covariance_ # compute the error norm if norm == "frobenius": squared_norm = np.sum(error ** 2) elif norm == "spectral": squared_norm = np.amax(linalg.svdvals(np.dot(error.T, error))) else: raise NotImplementedError( "Only spectral and frobenius norms are implemented") # optionaly scale the error norm if scaling: squared_norm = squared_norm / error.shape[0] # finally get either the squared norm or the norm if squared: result = squared_norm else: result = np.sqrt(squared_norm) return result def mahalanobis(self, observations): """Computes the mahalanobis distances of given observations. The provided observations are assumed to be centered. One may want to center them using a location estimate first. Parameters ---------- observations: array-like, shape = [n_observations, n_features] The observations, the Mahalanobis distances of the which we compute. Observations are assumed to be drawn from the same distribution than tha data used in fit (including centering). Returns ------- mahalanobis_distance: array, shape = [n_observations,] Mahalanobis distances of the observations. """ precision = self.get_precision() # compute mahalanobis distances centered_obs = observations - self.location_ mahalanobis_dist = np.sum( np.dot(centered_obs, precision) * centered_obs, 1) return mahalanobis_dist
seckcoder/lang-learn
python/sklearn/sklearn/covariance/empirical_covariance_.py
Python
unlicense
8,610
[ "Gaussian" ]
0031dbcee0d3c68d4b78065c60f7653fa0a5751bdc5c90f2b26a4ba5ce56480e
# Copyright (c) OpenMMLab. All rights reserved. import copy import inspect import math import warnings import cv2 import mmcv import numpy as np from numpy import random from mmdet.core import PolygonMasks, find_inside_bboxes from mmdet.core.evaluation.bbox_overlaps import bbox_overlaps from ..builder import PIPELINES try: from imagecorruptions import corrupt except ImportError: corrupt = None try: import albumentations from albumentations import Compose except ImportError: albumentations = None Compose = None @PIPELINES.register_module() class Resize: """Resize images & bbox & mask. This transform resizes the input image to some scale. Bboxes and masks are then resized with the same scale factor. If the input dict contains the key "scale", then the scale in the input dict is used, otherwise the specified scale in the init method is used. If the input dict contains the key "scale_factor" (if MultiScaleFlipAug does not give img_scale but scale_factor), the actual scale will be computed by image shape and scale_factor. `img_scale` can either be a tuple (single-scale) or a list of tuple (multi-scale). There are 3 multiscale modes: - ``ratio_range is not None``: randomly sample a ratio from the ratio \ range and multiply it with the image scale. - ``ratio_range is None`` and ``multiscale_mode == "range"``: randomly \ sample a scale from the multiscale range. - ``ratio_range is None`` and ``multiscale_mode == "value"``: randomly \ sample a scale from multiple scales. Args: img_scale (tuple or list[tuple]): Images scales for resizing. multiscale_mode (str): Either "range" or "value". ratio_range (tuple[float]): (min_ratio, max_ratio) keep_ratio (bool): Whether to keep the aspect ratio when resizing the image. bbox_clip_border (bool, optional): Whether to clip the objects outside the border of the image. In some dataset like MOT17, the gt bboxes are allowed to cross the border of images. Therefore, we don't need to clip the gt bboxes in these cases. Defaults to True. backend (str): Image resize backend, choices are 'cv2' and 'pillow'. These two backends generates slightly different results. Defaults to 'cv2'. override (bool, optional): Whether to override `scale` and `scale_factor` so as to call resize twice. Default False. If True, after the first resizing, the existed `scale` and `scale_factor` will be ignored so the second resizing can be allowed. This option is a work-around for multiple times of resize in DETR. Defaults to False. """ def __init__(self, img_scale=None, multiscale_mode='range', ratio_range=None, keep_ratio=True, bbox_clip_border=True, backend='cv2', override=False): if img_scale is None: self.img_scale = None else: if isinstance(img_scale, list): self.img_scale = img_scale else: self.img_scale = [img_scale] assert mmcv.is_list_of(self.img_scale, tuple) if ratio_range is not None: # mode 1: given a scale and a range of image ratio assert len(self.img_scale) == 1 else: # mode 2: given multiple scales or a range of scales assert multiscale_mode in ['value', 'range'] self.backend = backend self.multiscale_mode = multiscale_mode self.ratio_range = ratio_range self.keep_ratio = keep_ratio # TODO: refactor the override option in Resize self.override = override self.bbox_clip_border = bbox_clip_border @staticmethod def random_select(img_scales): """Randomly select an img_scale from given candidates. Args: img_scales (list[tuple]): Images scales for selection. Returns: (tuple, int): Returns a tuple ``(img_scale, scale_dix)``, \ where ``img_scale`` is the selected image scale and \ ``scale_idx`` is the selected index in the given candidates. """ assert mmcv.is_list_of(img_scales, tuple) scale_idx = np.random.randint(len(img_scales)) img_scale = img_scales[scale_idx] return img_scale, scale_idx @staticmethod def random_sample(img_scales): """Randomly sample an img_scale when ``multiscale_mode=='range'``. Args: img_scales (list[tuple]): Images scale range for sampling. There must be two tuples in img_scales, which specify the lower and upper bound of image scales. Returns: (tuple, None): Returns a tuple ``(img_scale, None)``, where \ ``img_scale`` is sampled scale and None is just a placeholder \ to be consistent with :func:`random_select`. """ assert mmcv.is_list_of(img_scales, tuple) and len(img_scales) == 2 img_scale_long = [max(s) for s in img_scales] img_scale_short = [min(s) for s in img_scales] long_edge = np.random.randint( min(img_scale_long), max(img_scale_long) + 1) short_edge = np.random.randint( min(img_scale_short), max(img_scale_short) + 1) img_scale = (long_edge, short_edge) return img_scale, None @staticmethod def random_sample_ratio(img_scale, ratio_range): """Randomly sample an img_scale when ``ratio_range`` is specified. A ratio will be randomly sampled from the range specified by ``ratio_range``. Then it would be multiplied with ``img_scale`` to generate sampled scale. Args: img_scale (tuple): Images scale base to multiply with ratio. ratio_range (tuple[float]): The minimum and maximum ratio to scale the ``img_scale``. Returns: (tuple, None): Returns a tuple ``(scale, None)``, where \ ``scale`` is sampled ratio multiplied with ``img_scale`` and \ None is just a placeholder to be consistent with \ :func:`random_select`. """ assert isinstance(img_scale, tuple) and len(img_scale) == 2 min_ratio, max_ratio = ratio_range assert min_ratio <= max_ratio ratio = np.random.random_sample() * (max_ratio - min_ratio) + min_ratio scale = int(img_scale[0] * ratio), int(img_scale[1] * ratio) return scale, None def _random_scale(self, results): """Randomly sample an img_scale according to ``ratio_range`` and ``multiscale_mode``. If ``ratio_range`` is specified, a ratio will be sampled and be multiplied with ``img_scale``. If multiple scales are specified by ``img_scale``, a scale will be sampled according to ``multiscale_mode``. Otherwise, single scale will be used. Args: results (dict): Result dict from :obj:`dataset`. Returns: dict: Two new keys 'scale` and 'scale_idx` are added into \ ``results``, which would be used by subsequent pipelines. """ if self.ratio_range is not None: scale, scale_idx = self.random_sample_ratio( self.img_scale[0], self.ratio_range) elif len(self.img_scale) == 1: scale, scale_idx = self.img_scale[0], 0 elif self.multiscale_mode == 'range': scale, scale_idx = self.random_sample(self.img_scale) elif self.multiscale_mode == 'value': scale, scale_idx = self.random_select(self.img_scale) else: raise NotImplementedError results['scale'] = scale results['scale_idx'] = scale_idx def _resize_img(self, results): """Resize images with ``results['scale']``.""" for key in results.get('img_fields', ['img']): if self.keep_ratio: img, scale_factor = mmcv.imrescale( results[key], results['scale'], return_scale=True, backend=self.backend) # the w_scale and h_scale has minor difference # a real fix should be done in the mmcv.imrescale in the future new_h, new_w = img.shape[:2] h, w = results[key].shape[:2] w_scale = new_w / w h_scale = new_h / h else: img, w_scale, h_scale = mmcv.imresize( results[key], results['scale'], return_scale=True, backend=self.backend) results[key] = img scale_factor = np.array([w_scale, h_scale, w_scale, h_scale], dtype=np.float32) results['img_shape'] = img.shape # in case that there is no padding results['pad_shape'] = img.shape results['scale_factor'] = scale_factor results['keep_ratio'] = self.keep_ratio def _resize_bboxes(self, results): """Resize bounding boxes with ``results['scale_factor']``.""" for key in results.get('bbox_fields', []): bboxes = results[key] * results['scale_factor'] if self.bbox_clip_border: img_shape = results['img_shape'] bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1]) bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0]) results[key] = bboxes def _resize_masks(self, results): """Resize masks with ``results['scale']``""" for key in results.get('mask_fields', []): if results[key] is None: continue if self.keep_ratio: results[key] = results[key].rescale(results['scale']) else: results[key] = results[key].resize(results['img_shape'][:2]) def _resize_seg(self, results): """Resize semantic segmentation map with ``results['scale']``.""" for key in results.get('seg_fields', []): if self.keep_ratio: gt_seg = mmcv.imrescale( results[key], results['scale'], interpolation='nearest', backend=self.backend) else: gt_seg = mmcv.imresize( results[key], results['scale'], interpolation='nearest', backend=self.backend) results[key] = gt_seg def __call__(self, results): """Call function to resize images, bounding boxes, masks, semantic segmentation map. Args: results (dict): Result dict from loading pipeline. Returns: dict: Resized results, 'img_shape', 'pad_shape', 'scale_factor', \ 'keep_ratio' keys are added into result dict. """ if 'scale' not in results: if 'scale_factor' in results: img_shape = results['img'].shape[:2] scale_factor = results['scale_factor'] assert isinstance(scale_factor, float) results['scale'] = tuple( [int(x * scale_factor) for x in img_shape][::-1]) else: self._random_scale(results) else: if not self.override: assert 'scale_factor' not in results, ( 'scale and scale_factor cannot be both set.') else: results.pop('scale') if 'scale_factor' in results: results.pop('scale_factor') self._random_scale(results) self._resize_img(results) self._resize_bboxes(results) self._resize_masks(results) self._resize_seg(results) return results def __repr__(self): repr_str = self.__class__.__name__ repr_str += f'(img_scale={self.img_scale}, ' repr_str += f'multiscale_mode={self.multiscale_mode}, ' repr_str += f'ratio_range={self.ratio_range}, ' repr_str += f'keep_ratio={self.keep_ratio}, ' repr_str += f'bbox_clip_border={self.bbox_clip_border})' return repr_str @PIPELINES.register_module() class RandomFlip: """Flip the image & bbox & mask. If the input dict contains the key "flip", then the flag will be used, otherwise it will be randomly decided by a ratio specified in the init method. When random flip is enabled, ``flip_ratio``/``direction`` can either be a float/string or tuple of float/string. There are 3 flip modes: - ``flip_ratio`` is float, ``direction`` is string: the image will be ``direction``ly flipped with probability of ``flip_ratio`` . E.g., ``flip_ratio=0.5``, ``direction='horizontal'``, then image will be horizontally flipped with probability of 0.5. - ``flip_ratio`` is float, ``direction`` is list of string: the image will be ``direction[i]``ly flipped with probability of ``flip_ratio/len(direction)``. E.g., ``flip_ratio=0.5``, ``direction=['horizontal', 'vertical']``, then image will be horizontally flipped with probability of 0.25, vertically with probability of 0.25. - ``flip_ratio`` is list of float, ``direction`` is list of string: given ``len(flip_ratio) == len(direction)``, the image will be ``direction[i]``ly flipped with probability of ``flip_ratio[i]``. E.g., ``flip_ratio=[0.3, 0.5]``, ``direction=['horizontal', 'vertical']``, then image will be horizontally flipped with probability of 0.3, vertically with probability of 0.5. Args: flip_ratio (float | list[float], optional): The flipping probability. Default: None. direction(str | list[str], optional): The flipping direction. Options are 'horizontal', 'vertical', 'diagonal'. Default: 'horizontal'. If input is a list, the length must equal ``flip_ratio``. Each element in ``flip_ratio`` indicates the flip probability of corresponding direction. """ def __init__(self, flip_ratio=None, direction='horizontal'): if isinstance(flip_ratio, list): assert mmcv.is_list_of(flip_ratio, float) assert 0 <= sum(flip_ratio) <= 1 elif isinstance(flip_ratio, float): assert 0 <= flip_ratio <= 1 elif flip_ratio is None: pass else: raise ValueError('flip_ratios must be None, float, ' 'or list of float') self.flip_ratio = flip_ratio valid_directions = ['horizontal', 'vertical', 'diagonal'] if isinstance(direction, str): assert direction in valid_directions elif isinstance(direction, list): assert mmcv.is_list_of(direction, str) assert set(direction).issubset(set(valid_directions)) else: raise ValueError('direction must be either str or list of str') self.direction = direction if isinstance(flip_ratio, list): assert len(self.flip_ratio) == len(self.direction) def bbox_flip(self, bboxes, img_shape, direction): """Flip bboxes horizontally. Args: bboxes (numpy.ndarray): Bounding boxes, shape (..., 4*k) img_shape (tuple[int]): Image shape (height, width) direction (str): Flip direction. Options are 'horizontal', 'vertical'. Returns: numpy.ndarray: Flipped bounding boxes. """ assert bboxes.shape[-1] % 4 == 0 flipped = bboxes.copy() if direction == 'horizontal': w = img_shape[1] flipped[..., 0::4] = w - bboxes[..., 2::4] flipped[..., 2::4] = w - bboxes[..., 0::4] elif direction == 'vertical': h = img_shape[0] flipped[..., 1::4] = h - bboxes[..., 3::4] flipped[..., 3::4] = h - bboxes[..., 1::4] elif direction == 'diagonal': w = img_shape[1] h = img_shape[0] flipped[..., 0::4] = w - bboxes[..., 2::4] flipped[..., 1::4] = h - bboxes[..., 3::4] flipped[..., 2::4] = w - bboxes[..., 0::4] flipped[..., 3::4] = h - bboxes[..., 1::4] else: raise ValueError(f"Invalid flipping direction '{direction}'") return flipped def __call__(self, results): """Call function to flip bounding boxes, masks, semantic segmentation maps. Args: results (dict): Result dict from loading pipeline. Returns: dict: Flipped results, 'flip', 'flip_direction' keys are added \ into result dict. """ if 'flip' not in results: if isinstance(self.direction, list): # None means non-flip direction_list = self.direction + [None] else: # None means non-flip direction_list = [self.direction, None] if isinstance(self.flip_ratio, list): non_flip_ratio = 1 - sum(self.flip_ratio) flip_ratio_list = self.flip_ratio + [non_flip_ratio] else: non_flip_ratio = 1 - self.flip_ratio # exclude non-flip single_ratio = self.flip_ratio / (len(direction_list) - 1) flip_ratio_list = [single_ratio] * (len(direction_list) - 1) + [non_flip_ratio] cur_dir = np.random.choice(direction_list, p=flip_ratio_list) results['flip'] = cur_dir is not None if 'flip_direction' not in results: results['flip_direction'] = cur_dir if results['flip']: # flip image for key in results.get('img_fields', ['img']): results[key] = mmcv.imflip( results[key], direction=results['flip_direction']) # flip bboxes for key in results.get('bbox_fields', []): results[key] = self.bbox_flip(results[key], results['img_shape'], results['flip_direction']) # flip masks for key in results.get('mask_fields', []): results[key] = results[key].flip(results['flip_direction']) # flip segs for key in results.get('seg_fields', []): results[key] = mmcv.imflip( results[key], direction=results['flip_direction']) return results def __repr__(self): return self.__class__.__name__ + f'(flip_ratio={self.flip_ratio})' @PIPELINES.register_module() class RandomShift: """Shift the image and box given shift pixels and probability. Args: shift_ratio (float): Probability of shifts. Default 0.5. max_shift_px (int): The max pixels for shifting. Default 32. filter_thr_px (int): The width and height threshold for filtering. The bbox and the rest of the targets below the width and height threshold will be filtered. Default 1. """ def __init__(self, shift_ratio=0.5, max_shift_px=32, filter_thr_px=1): assert 0 <= shift_ratio <= 1 assert max_shift_px >= 0 self.shift_ratio = shift_ratio self.max_shift_px = max_shift_px self.filter_thr_px = int(filter_thr_px) # The key correspondence from bboxes to labels. self.bbox2label = { 'gt_bboxes': 'gt_labels', 'gt_bboxes_ignore': 'gt_labels_ignore' } def __call__(self, results): """Call function to random shift images, bounding boxes. Args: results (dict): Result dict from loading pipeline. Returns: dict: Shift results. """ if random.random() < self.shift_ratio: img_shape = results['img'].shape[:2] random_shift_x = random.randint(-self.max_shift_px, self.max_shift_px) random_shift_y = random.randint(-self.max_shift_px, self.max_shift_px) new_x = max(0, random_shift_x) orig_x = max(0, -random_shift_x) new_y = max(0, random_shift_y) orig_y = max(0, -random_shift_y) # TODO: support mask and semantic segmentation maps. for key in results.get('bbox_fields', []): bboxes = results[key].copy() bboxes[..., 0::2] += random_shift_x bboxes[..., 1::2] += random_shift_y # clip border bboxes[..., 0::2] = np.clip(bboxes[..., 0::2], 0, img_shape[1]) bboxes[..., 1::2] = np.clip(bboxes[..., 1::2], 0, img_shape[0]) # remove invalid bboxes bbox_w = bboxes[..., 2] - bboxes[..., 0] bbox_h = bboxes[..., 3] - bboxes[..., 1] valid_inds = (bbox_w > self.filter_thr_px) & ( bbox_h > self.filter_thr_px) # If the shift does not contain any gt-bbox area, skip this # image. if key == 'gt_bboxes' and not valid_inds.any(): return results bboxes = bboxes[valid_inds] results[key] = bboxes # label fields. e.g. gt_labels and gt_labels_ignore label_key = self.bbox2label.get(key) if label_key in results: results[label_key] = results[label_key][valid_inds] for key in results.get('img_fields', ['img']): img = results[key] new_img = np.zeros_like(img) img_h, img_w = img.shape[:2] new_h = img_h - np.abs(random_shift_y) new_w = img_w - np.abs(random_shift_x) new_img[new_y:new_y + new_h, new_x:new_x + new_w] \ = img[orig_y:orig_y + new_h, orig_x:orig_x + new_w] results[key] = new_img return results def __repr__(self): repr_str = self.__class__.__name__ repr_str += f'(max_shift_px={self.max_shift_px}, ' return repr_str @PIPELINES.register_module() class Pad: """Pad the image & masks & segmentation map. There are two padding modes: (1) pad to a fixed size and (2) pad to the minimum size that is divisible by some number. Added keys are "pad_shape", "pad_fixed_size", "pad_size_divisor", Args: size (tuple, optional): Fixed padding size. size_divisor (int, optional): The divisor of padded size. pad_to_square (bool): Whether to pad the image into a square. Currently only used for YOLOX. Default: False. pad_val (dict, optional): A dict for padding value, the default value is `dict(img=0, masks=0, seg=255)`. """ def __init__(self, size=None, size_divisor=None, pad_to_square=False, pad_val=dict(img=0, masks=0, seg=255)): self.size = size self.size_divisor = size_divisor if isinstance(pad_val, float) or isinstance(pad_val, int): warnings.warn( 'pad_val of float type is deprecated now, ' f'please use pad_val=dict(img={pad_val}, ' f'masks={pad_val}, seg=255) instead.', DeprecationWarning) pad_val = dict(img=pad_val, masks=pad_val, seg=255) assert isinstance(pad_val, dict) self.pad_val = pad_val self.pad_to_square = pad_to_square if pad_to_square: assert size is None and size_divisor is None, \ 'The size and size_divisor must be None ' \ 'when pad2square is True' else: assert size is not None or size_divisor is not None, \ 'only one of size and size_divisor should be valid' assert size is None or size_divisor is None def _pad_img(self, results): """Pad images according to ``self.size``.""" pad_val = self.pad_val.get('img', 0) for key in results.get('img_fields', ['img']): if self.pad_to_square: max_size = max(results[key].shape[:2]) self.size = (max_size, max_size) if self.size is not None: padded_img = mmcv.impad( results[key], shape=self.size, pad_val=pad_val) elif self.size_divisor is not None: padded_img = mmcv.impad_to_multiple( results[key], self.size_divisor, pad_val=pad_val) results[key] = padded_img results['pad_shape'] = padded_img.shape results['pad_fixed_size'] = self.size results['pad_size_divisor'] = self.size_divisor def _pad_masks(self, results): """Pad masks according to ``results['pad_shape']``.""" pad_shape = results['pad_shape'][:2] pad_val = self.pad_val.get('masks', 0) for key in results.get('mask_fields', []): results[key] = results[key].pad(pad_shape, pad_val=pad_val) def _pad_seg(self, results): """Pad semantic segmentation map according to ``results['pad_shape']``.""" pad_val = self.pad_val.get('seg', 255) for key in results.get('seg_fields', []): results[key] = mmcv.impad( results[key], shape=results['pad_shape'][:2], pad_val=pad_val) def __call__(self, results): """Call function to pad images, masks, semantic segmentation maps. Args: results (dict): Result dict from loading pipeline. Returns: dict: Updated result dict. """ self._pad_img(results) self._pad_masks(results) self._pad_seg(results) return results def __repr__(self): repr_str = self.__class__.__name__ repr_str += f'(size={self.size}, ' repr_str += f'size_divisor={self.size_divisor}, ' repr_str += f'pad_to_square={self.pad_to_square}, ' repr_str += f'pad_val={self.pad_val})' return repr_str @PIPELINES.register_module() class Normalize: """Normalize the image. Added key is "img_norm_cfg". Args: mean (sequence): Mean values of 3 channels. std (sequence): Std values of 3 channels. to_rgb (bool): Whether to convert the image from BGR to RGB, default is true. """ def __init__(self, mean, std, to_rgb=True): self.mean = np.array(mean, dtype=np.float32) self.std = np.array(std, dtype=np.float32) self.to_rgb = to_rgb def __call__(self, results): """Call function to normalize images. Args: results (dict): Result dict from loading pipeline. Returns: dict: Normalized results, 'img_norm_cfg' key is added into result dict. """ for key in results.get('img_fields', ['img']): results[key] = mmcv.imnormalize(results[key], self.mean, self.std, self.to_rgb) results['img_norm_cfg'] = dict( mean=self.mean, std=self.std, to_rgb=self.to_rgb) return results def __repr__(self): repr_str = self.__class__.__name__ repr_str += f'(mean={self.mean}, std={self.std}, to_rgb={self.to_rgb})' return repr_str @PIPELINES.register_module() class RandomCrop: """Random crop the image & bboxes & masks. The absolute `crop_size` is sampled based on `crop_type` and `image_size`, then the cropped results are generated. Args: crop_size (tuple): The relative ratio or absolute pixels of height and width. crop_type (str, optional): one of "relative_range", "relative", "absolute", "absolute_range". "relative" randomly crops (h * crop_size[0], w * crop_size[1]) part from an input of size (h, w). "relative_range" uniformly samples relative crop size from range [crop_size[0], 1] and [crop_size[1], 1] for height and width respectively. "absolute" crops from an input with absolute size (crop_size[0], crop_size[1]). "absolute_range" uniformly samples crop_h in range [crop_size[0], min(h, crop_size[1])] and crop_w in range [crop_size[0], min(w, crop_size[1])]. Default "absolute". allow_negative_crop (bool, optional): Whether to allow a crop that does not contain any bbox area. Default False. recompute_bbox (bool, optional): Whether to re-compute the boxes based on cropped instance masks. Default False. bbox_clip_border (bool, optional): Whether clip the objects outside the border of the image. Defaults to True. Note: - If the image is smaller than the absolute crop size, return the original image. - The keys for bboxes, labels and masks must be aligned. That is, `gt_bboxes` corresponds to `gt_labels` and `gt_masks`, and `gt_bboxes_ignore` corresponds to `gt_labels_ignore` and `gt_masks_ignore`. - If the crop does not contain any gt-bbox region and `allow_negative_crop` is set to False, skip this image. """ def __init__(self, crop_size, crop_type='absolute', allow_negative_crop=False, recompute_bbox=False, bbox_clip_border=True): if crop_type not in [ 'relative_range', 'relative', 'absolute', 'absolute_range' ]: raise ValueError(f'Invalid crop_type {crop_type}.') if crop_type in ['absolute', 'absolute_range']: assert crop_size[0] > 0 and crop_size[1] > 0 assert isinstance(crop_size[0], int) and isinstance( crop_size[1], int) else: assert 0 < crop_size[0] <= 1 and 0 < crop_size[1] <= 1 self.crop_size = crop_size self.crop_type = crop_type self.allow_negative_crop = allow_negative_crop self.bbox_clip_border = bbox_clip_border self.recompute_bbox = recompute_bbox # The key correspondence from bboxes to labels and masks. self.bbox2label = { 'gt_bboxes': 'gt_labels', 'gt_bboxes_ignore': 'gt_labels_ignore' } self.bbox2mask = { 'gt_bboxes': 'gt_masks', 'gt_bboxes_ignore': 'gt_masks_ignore' } def _crop_data(self, results, crop_size, allow_negative_crop): """Function to randomly crop images, bounding boxes, masks, semantic segmentation maps. Args: results (dict): Result dict from loading pipeline. crop_size (tuple): Expected absolute size after cropping, (h, w). allow_negative_crop (bool): Whether to allow a crop that does not contain any bbox area. Default to False. Returns: dict: Randomly cropped results, 'img_shape' key in result dict is updated according to crop size. """ assert crop_size[0] > 0 and crop_size[1] > 0 for key in results.get('img_fields', ['img']): img = results[key] margin_h = max(img.shape[0] - crop_size[0], 0) margin_w = max(img.shape[1] - crop_size[1], 0) offset_h = np.random.randint(0, margin_h + 1) offset_w = np.random.randint(0, margin_w + 1) crop_y1, crop_y2 = offset_h, offset_h + crop_size[0] crop_x1, crop_x2 = offset_w, offset_w + crop_size[1] # crop the image img = img[crop_y1:crop_y2, crop_x1:crop_x2, ...] img_shape = img.shape results[key] = img results['img_shape'] = img_shape # crop bboxes accordingly and clip to the image boundary for key in results.get('bbox_fields', []): # e.g. gt_bboxes and gt_bboxes_ignore bbox_offset = np.array([offset_w, offset_h, offset_w, offset_h], dtype=np.float32) bboxes = results[key] - bbox_offset if self.bbox_clip_border: bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1]) bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0]) valid_inds = (bboxes[:, 2] > bboxes[:, 0]) & ( bboxes[:, 3] > bboxes[:, 1]) # If the crop does not contain any gt-bbox area and # allow_negative_crop is False, skip this image. if (key == 'gt_bboxes' and not valid_inds.any() and not allow_negative_crop): return None results[key] = bboxes[valid_inds, :] # label fields. e.g. gt_labels and gt_labels_ignore label_key = self.bbox2label.get(key) if label_key in results: results[label_key] = results[label_key][valid_inds] # mask fields, e.g. gt_masks and gt_masks_ignore mask_key = self.bbox2mask.get(key) if mask_key in results: results[mask_key] = results[mask_key][ valid_inds.nonzero()[0]].crop( np.asarray([crop_x1, crop_y1, crop_x2, crop_y2])) if self.recompute_bbox: results[key] = results[mask_key].get_bboxes() # crop semantic seg for key in results.get('seg_fields', []): results[key] = results[key][crop_y1:crop_y2, crop_x1:crop_x2] return results def _get_crop_size(self, image_size): """Randomly generates the absolute crop size based on `crop_type` and `image_size`. Args: image_size (tuple): (h, w). Returns: crop_size (tuple): (crop_h, crop_w) in absolute pixels. """ h, w = image_size if self.crop_type == 'absolute': return (min(self.crop_size[0], h), min(self.crop_size[1], w)) elif self.crop_type == 'absolute_range': assert self.crop_size[0] <= self.crop_size[1] crop_h = np.random.randint( min(h, self.crop_size[0]), min(h, self.crop_size[1]) + 1) crop_w = np.random.randint( min(w, self.crop_size[0]), min(w, self.crop_size[1]) + 1) return crop_h, crop_w elif self.crop_type == 'relative': crop_h, crop_w = self.crop_size return int(h * crop_h + 0.5), int(w * crop_w + 0.5) elif self.crop_type == 'relative_range': crop_size = np.asarray(self.crop_size, dtype=np.float32) crop_h, crop_w = crop_size + np.random.rand(2) * (1 - crop_size) return int(h * crop_h + 0.5), int(w * crop_w + 0.5) def __call__(self, results): """Call function to randomly crop images, bounding boxes, masks, semantic segmentation maps. Args: results (dict): Result dict from loading pipeline. Returns: dict: Randomly cropped results, 'img_shape' key in result dict is updated according to crop size. """ image_size = results['img'].shape[:2] crop_size = self._get_crop_size(image_size) results = self._crop_data(results, crop_size, self.allow_negative_crop) return results def __repr__(self): repr_str = self.__class__.__name__ repr_str += f'(crop_size={self.crop_size}, ' repr_str += f'crop_type={self.crop_type}, ' repr_str += f'allow_negative_crop={self.allow_negative_crop}, ' repr_str += f'bbox_clip_border={self.bbox_clip_border})' return repr_str @PIPELINES.register_module() class SegRescale: """Rescale semantic segmentation maps. Args: scale_factor (float): The scale factor of the final output. backend (str): Image rescale backend, choices are 'cv2' and 'pillow'. These two backends generates slightly different results. Defaults to 'cv2'. """ def __init__(self, scale_factor=1, backend='cv2'): self.scale_factor = scale_factor self.backend = backend def __call__(self, results): """Call function to scale the semantic segmentation map. Args: results (dict): Result dict from loading pipeline. Returns: dict: Result dict with semantic segmentation map scaled. """ for key in results.get('seg_fields', []): if self.scale_factor != 1: results[key] = mmcv.imrescale( results[key], self.scale_factor, interpolation='nearest', backend=self.backend) return results def __repr__(self): return self.__class__.__name__ + f'(scale_factor={self.scale_factor})' @PIPELINES.register_module() class PhotoMetricDistortion: """Apply photometric distortion to image sequentially, every transformation is applied with a probability of 0.5. The position of random contrast is in second or second to last. 1. random brightness 2. random contrast (mode 0) 3. convert color from BGR to HSV 4. random saturation 5. random hue 6. convert color from HSV to BGR 7. random contrast (mode 1) 8. randomly swap channels Args: brightness_delta (int): delta of brightness. contrast_range (tuple): range of contrast. saturation_range (tuple): range of saturation. hue_delta (int): delta of hue. """ def __init__(self, brightness_delta=32, contrast_range=(0.5, 1.5), saturation_range=(0.5, 1.5), hue_delta=18): self.brightness_delta = brightness_delta self.contrast_lower, self.contrast_upper = contrast_range self.saturation_lower, self.saturation_upper = saturation_range self.hue_delta = hue_delta def __call__(self, results): """Call function to perform photometric distortion on images. Args: results (dict): Result dict from loading pipeline. Returns: dict: Result dict with images distorted. """ if 'img_fields' in results: assert results['img_fields'] == ['img'], \ 'Only single img_fields is allowed' img = results['img'] img = img.astype(np.float32) # random brightness if random.randint(2): delta = random.uniform(-self.brightness_delta, self.brightness_delta) img += delta # mode == 0 --> do random contrast first # mode == 1 --> do random contrast last mode = random.randint(2) if mode == 1: if random.randint(2): alpha = random.uniform(self.contrast_lower, self.contrast_upper) img *= alpha # convert color from BGR to HSV img = mmcv.bgr2hsv(img) # random saturation if random.randint(2): img[..., 1] *= random.uniform(self.saturation_lower, self.saturation_upper) # random hue if random.randint(2): img[..., 0] += random.uniform(-self.hue_delta, self.hue_delta) img[..., 0][img[..., 0] > 360] -= 360 img[..., 0][img[..., 0] < 0] += 360 # convert color from HSV to BGR img = mmcv.hsv2bgr(img) # random contrast if mode == 0: if random.randint(2): alpha = random.uniform(self.contrast_lower, self.contrast_upper) img *= alpha # randomly swap channels if random.randint(2): img = img[..., random.permutation(3)] results['img'] = img return results def __repr__(self): repr_str = self.__class__.__name__ repr_str += f'(\nbrightness_delta={self.brightness_delta},\n' repr_str += 'contrast_range=' repr_str += f'{(self.contrast_lower, self.contrast_upper)},\n' repr_str += 'saturation_range=' repr_str += f'{(self.saturation_lower, self.saturation_upper)},\n' repr_str += f'hue_delta={self.hue_delta})' return repr_str @PIPELINES.register_module() class Expand: """Random expand the image & bboxes. Randomly place the original image on a canvas of 'ratio' x original image size filled with mean values. The ratio is in the range of ratio_range. Args: mean (tuple): mean value of dataset. to_rgb (bool): if need to convert the order of mean to align with RGB. ratio_range (tuple): range of expand ratio. prob (float): probability of applying this transformation """ def __init__(self, mean=(0, 0, 0), to_rgb=True, ratio_range=(1, 4), seg_ignore_label=None, prob=0.5): self.to_rgb = to_rgb self.ratio_range = ratio_range if to_rgb: self.mean = mean[::-1] else: self.mean = mean self.min_ratio, self.max_ratio = ratio_range self.seg_ignore_label = seg_ignore_label self.prob = prob def __call__(self, results): """Call function to expand images, bounding boxes. Args: results (dict): Result dict from loading pipeline. Returns: dict: Result dict with images, bounding boxes expanded """ if random.uniform(0, 1) > self.prob: return results if 'img_fields' in results: assert results['img_fields'] == ['img'], \ 'Only single img_fields is allowed' img = results['img'] h, w, c = img.shape ratio = random.uniform(self.min_ratio, self.max_ratio) # speedup expand when meets large image if np.all(self.mean == self.mean[0]): expand_img = np.empty((int(h * ratio), int(w * ratio), c), img.dtype) expand_img.fill(self.mean[0]) else: expand_img = np.full((int(h * ratio), int(w * ratio), c), self.mean, dtype=img.dtype) left = int(random.uniform(0, w * ratio - w)) top = int(random.uniform(0, h * ratio - h)) expand_img[top:top + h, left:left + w] = img results['img'] = expand_img # expand bboxes for key in results.get('bbox_fields', []): results[key] = results[key] + np.tile( (left, top), 2).astype(results[key].dtype) # expand masks for key in results.get('mask_fields', []): results[key] = results[key].expand( int(h * ratio), int(w * ratio), top, left) # expand segs for key in results.get('seg_fields', []): gt_seg = results[key] expand_gt_seg = np.full((int(h * ratio), int(w * ratio)), self.seg_ignore_label, dtype=gt_seg.dtype) expand_gt_seg[top:top + h, left:left + w] = gt_seg results[key] = expand_gt_seg return results def __repr__(self): repr_str = self.__class__.__name__ repr_str += f'(mean={self.mean}, to_rgb={self.to_rgb}, ' repr_str += f'ratio_range={self.ratio_range}, ' repr_str += f'seg_ignore_label={self.seg_ignore_label})' return repr_str @PIPELINES.register_module() class MinIoURandomCrop: """Random crop the image & bboxes, the cropped patches have minimum IoU requirement with original image & bboxes, the IoU threshold is randomly selected from min_ious. Args: min_ious (tuple): minimum IoU threshold for all intersections with bounding boxes min_crop_size (float): minimum crop's size (i.e. h,w := a*h, a*w, where a >= min_crop_size). bbox_clip_border (bool, optional): Whether clip the objects outside the border of the image. Defaults to True. Note: The keys for bboxes, labels and masks should be paired. That is, \ `gt_bboxes` corresponds to `gt_labels` and `gt_masks`, and \ `gt_bboxes_ignore` to `gt_labels_ignore` and `gt_masks_ignore`. """ def __init__(self, min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), min_crop_size=0.3, bbox_clip_border=True): # 1: return ori img self.min_ious = min_ious self.sample_mode = (1, *min_ious, 0) self.min_crop_size = min_crop_size self.bbox_clip_border = bbox_clip_border self.bbox2label = { 'gt_bboxes': 'gt_labels', 'gt_bboxes_ignore': 'gt_labels_ignore' } self.bbox2mask = { 'gt_bboxes': 'gt_masks', 'gt_bboxes_ignore': 'gt_masks_ignore' } def __call__(self, results): """Call function to crop images and bounding boxes with minimum IoU constraint. Args: results (dict): Result dict from loading pipeline. Returns: dict: Result dict with images and bounding boxes cropped, \ 'img_shape' key is updated. """ if 'img_fields' in results: assert results['img_fields'] == ['img'], \ 'Only single img_fields is allowed' img = results['img'] assert 'bbox_fields' in results boxes = [results[key] for key in results['bbox_fields']] boxes = np.concatenate(boxes, 0) h, w, c = img.shape while True: mode = random.choice(self.sample_mode) self.mode = mode if mode == 1: return results min_iou = mode for i in range(50): new_w = random.uniform(self.min_crop_size * w, w) new_h = random.uniform(self.min_crop_size * h, h) # h / w in [0.5, 2] if new_h / new_w < 0.5 or new_h / new_w > 2: continue left = random.uniform(w - new_w) top = random.uniform(h - new_h) patch = np.array( (int(left), int(top), int(left + new_w), int(top + new_h))) # Line or point crop is not allowed if patch[2] == patch[0] or patch[3] == patch[1]: continue overlaps = bbox_overlaps( patch.reshape(-1, 4), boxes.reshape(-1, 4)).reshape(-1) if len(overlaps) > 0 and overlaps.min() < min_iou: continue # center of boxes should inside the crop img # only adjust boxes and instance masks when the gt is not empty if len(overlaps) > 0: # adjust boxes def is_center_of_bboxes_in_patch(boxes, patch): center = (boxes[:, :2] + boxes[:, 2:]) / 2 mask = ((center[:, 0] > patch[0]) * (center[:, 1] > patch[1]) * (center[:, 0] < patch[2]) * (center[:, 1] < patch[3])) return mask mask = is_center_of_bboxes_in_patch(boxes, patch) if not mask.any(): continue for key in results.get('bbox_fields', []): boxes = results[key].copy() mask = is_center_of_bboxes_in_patch(boxes, patch) boxes = boxes[mask] if self.bbox_clip_border: boxes[:, 2:] = boxes[:, 2:].clip(max=patch[2:]) boxes[:, :2] = boxes[:, :2].clip(min=patch[:2]) boxes -= np.tile(patch[:2], 2) results[key] = boxes # labels label_key = self.bbox2label.get(key) if label_key in results: results[label_key] = results[label_key][mask] # mask fields mask_key = self.bbox2mask.get(key) if mask_key in results: results[mask_key] = results[mask_key][ mask.nonzero()[0]].crop(patch) # adjust the img no matter whether the gt is empty before crop img = img[patch[1]:patch[3], patch[0]:patch[2]] results['img'] = img results['img_shape'] = img.shape # seg fields for key in results.get('seg_fields', []): results[key] = results[key][patch[1]:patch[3], patch[0]:patch[2]] return results def __repr__(self): repr_str = self.__class__.__name__ repr_str += f'(min_ious={self.min_ious}, ' repr_str += f'min_crop_size={self.min_crop_size}, ' repr_str += f'bbox_clip_border={self.bbox_clip_border})' return repr_str @PIPELINES.register_module() class Corrupt: """Corruption augmentation. Corruption transforms implemented based on `imagecorruptions <https://github.com/bethgelab/imagecorruptions>`_. Args: corruption (str): Corruption name. severity (int, optional): The severity of corruption. Default: 1. """ def __init__(self, corruption, severity=1): self.corruption = corruption self.severity = severity def __call__(self, results): """Call function to corrupt image. Args: results (dict): Result dict from loading pipeline. Returns: dict: Result dict with images corrupted. """ if corrupt is None: raise RuntimeError('imagecorruptions is not installed') if 'img_fields' in results: assert results['img_fields'] == ['img'], \ 'Only single img_fields is allowed' results['img'] = corrupt( results['img'].astype(np.uint8), corruption_name=self.corruption, severity=self.severity) return results def __repr__(self): repr_str = self.__class__.__name__ repr_str += f'(corruption={self.corruption}, ' repr_str += f'severity={self.severity})' return repr_str @PIPELINES.register_module() class Albu: """Albumentation augmentation. Adds custom transformations from Albumentations library. Please, visit `https://albumentations.readthedocs.io` to get more information. An example of ``transforms`` is as followed: .. code-block:: [ dict( type='ShiftScaleRotate', shift_limit=0.0625, scale_limit=0.0, rotate_limit=0, interpolation=1, p=0.5), dict( type='RandomBrightnessContrast', brightness_limit=[0.1, 0.3], contrast_limit=[0.1, 0.3], p=0.2), dict(type='ChannelShuffle', p=0.1), dict( type='OneOf', transforms=[ dict(type='Blur', blur_limit=3, p=1.0), dict(type='MedianBlur', blur_limit=3, p=1.0) ], p=0.1), ] Args: transforms (list[dict]): A list of albu transformations bbox_params (dict): Bbox_params for albumentation `Compose` keymap (dict): Contains {'input key':'albumentation-style key'} skip_img_without_anno (bool): Whether to skip the image if no ann left after aug """ def __init__(self, transforms, bbox_params=None, keymap=None, update_pad_shape=False, skip_img_without_anno=False): if Compose is None: raise RuntimeError('albumentations is not installed') # Args will be modified later, copying it will be safer transforms = copy.deepcopy(transforms) if bbox_params is not None: bbox_params = copy.deepcopy(bbox_params) if keymap is not None: keymap = copy.deepcopy(keymap) self.transforms = transforms self.filter_lost_elements = False self.update_pad_shape = update_pad_shape self.skip_img_without_anno = skip_img_without_anno # A simple workaround to remove masks without boxes if (isinstance(bbox_params, dict) and 'label_fields' in bbox_params and 'filter_lost_elements' in bbox_params): self.filter_lost_elements = True self.origin_label_fields = bbox_params['label_fields'] bbox_params['label_fields'] = ['idx_mapper'] del bbox_params['filter_lost_elements'] self.bbox_params = ( self.albu_builder(bbox_params) if bbox_params else None) self.aug = Compose([self.albu_builder(t) for t in self.transforms], bbox_params=self.bbox_params) if not keymap: self.keymap_to_albu = { 'img': 'image', 'gt_masks': 'masks', 'gt_bboxes': 'bboxes' } else: self.keymap_to_albu = keymap self.keymap_back = {v: k for k, v in self.keymap_to_albu.items()} def albu_builder(self, cfg): """Import a module from albumentations. It inherits some of :func:`build_from_cfg` logic. Args: cfg (dict): Config dict. It should at least contain the key "type". Returns: obj: The constructed object. """ assert isinstance(cfg, dict) and 'type' in cfg args = cfg.copy() obj_type = args.pop('type') if mmcv.is_str(obj_type): if albumentations is None: raise RuntimeError('albumentations is not installed') obj_cls = getattr(albumentations, obj_type) elif inspect.isclass(obj_type): obj_cls = obj_type else: raise TypeError( f'type must be a str or valid type, but got {type(obj_type)}') if 'transforms' in args: args['transforms'] = [ self.albu_builder(transform) for transform in args['transforms'] ] return obj_cls(**args) @staticmethod def mapper(d, keymap): """Dictionary mapper. Renames keys according to keymap provided. Args: d (dict): old dict keymap (dict): {'old_key':'new_key'} Returns: dict: new dict. """ updated_dict = {} for k, v in zip(d.keys(), d.values()): new_k = keymap.get(k, k) updated_dict[new_k] = d[k] return updated_dict def __call__(self, results): # dict to albumentations format results = self.mapper(results, self.keymap_to_albu) # TODO: add bbox_fields if 'bboxes' in results: # to list of boxes if isinstance(results['bboxes'], np.ndarray): results['bboxes'] = [x for x in results['bboxes']] # add pseudo-field for filtration if self.filter_lost_elements: results['idx_mapper'] = np.arange(len(results['bboxes'])) # TODO: Support mask structure in albu if 'masks' in results: if isinstance(results['masks'], PolygonMasks): raise NotImplementedError( 'Albu only supports BitMap masks now') ori_masks = results['masks'] if albumentations.__version__ < '0.5': results['masks'] = results['masks'].masks else: results['masks'] = [mask for mask in results['masks'].masks] results = self.aug(**results) if 'bboxes' in results: if isinstance(results['bboxes'], list): results['bboxes'] = np.array( results['bboxes'], dtype=np.float32) results['bboxes'] = results['bboxes'].reshape(-1, 4) # filter label_fields if self.filter_lost_elements: for label in self.origin_label_fields: results[label] = np.array( [results[label][i] for i in results['idx_mapper']]) if 'masks' in results: results['masks'] = np.array( [results['masks'][i] for i in results['idx_mapper']]) results['masks'] = ori_masks.__class__( results['masks'], results['image'].shape[0], results['image'].shape[1]) if (not len(results['idx_mapper']) and self.skip_img_without_anno): return None if 'gt_labels' in results: if isinstance(results['gt_labels'], list): results['gt_labels'] = np.array(results['gt_labels']) results['gt_labels'] = results['gt_labels'].astype(np.int64) # back to the original format results = self.mapper(results, self.keymap_back) # update final shape if self.update_pad_shape: results['pad_shape'] = results['img'].shape return results def __repr__(self): repr_str = self.__class__.__name__ + f'(transforms={self.transforms})' return repr_str @PIPELINES.register_module() class RandomCenterCropPad: """Random center crop and random around padding for CornerNet. This operation generates randomly cropped image from the original image and pads it simultaneously. Different from :class:`RandomCrop`, the output shape may not equal to ``crop_size`` strictly. We choose a random value from ``ratios`` and the output shape could be larger or smaller than ``crop_size``. The padding operation is also different from :class:`Pad`, here we use around padding instead of right-bottom padding. The relation between output image (padding image) and original image: .. code:: text output image +----------------------------+ | padded area | +------|----------------------------|----------+ | | cropped area | | | | +---------------+ | | | | | . center | | | original image | | | range | | | | | +---------------+ | | +------|----------------------------|----------+ | padded area | +----------------------------+ There are 5 main areas in the figure: - output image: output image of this operation, also called padding image in following instruction. - original image: input image of this operation. - padded area: non-intersect area of output image and original image. - cropped area: the overlap of output image and original image. - center range: a smaller area where random center chosen from. center range is computed by ``border`` and original image's shape to avoid our random center is too close to original image's border. Also this operation act differently in train and test mode, the summary pipeline is listed below. Train pipeline: 1. Choose a ``random_ratio`` from ``ratios``, the shape of padding image will be ``random_ratio * crop_size``. 2. Choose a ``random_center`` in center range. 3. Generate padding image with center matches the ``random_center``. 4. Initialize the padding image with pixel value equals to ``mean``. 5. Copy the cropped area to padding image. 6. Refine annotations. Test pipeline: 1. Compute output shape according to ``test_pad_mode``. 2. Generate padding image with center matches the original image center. 3. Initialize the padding image with pixel value equals to ``mean``. 4. Copy the ``cropped area`` to padding image. Args: crop_size (tuple | None): expected size after crop, final size will computed according to ratio. Requires (h, w) in train mode, and None in test mode. ratios (tuple): random select a ratio from tuple and crop image to (crop_size[0] * ratio) * (crop_size[1] * ratio). Only available in train mode. border (int): max distance from center select area to image border. Only available in train mode. mean (sequence): Mean values of 3 channels. std (sequence): Std values of 3 channels. to_rgb (bool): Whether to convert the image from BGR to RGB. test_mode (bool): whether involve random variables in transform. In train mode, crop_size is fixed, center coords and ratio is random selected from predefined lists. In test mode, crop_size is image's original shape, center coords and ratio is fixed. test_pad_mode (tuple): padding method and padding shape value, only available in test mode. Default is using 'logical_or' with 127 as padding shape value. - 'logical_or': final_shape = input_shape | padding_shape_value - 'size_divisor': final_shape = int( ceil(input_shape / padding_shape_value) * padding_shape_value) test_pad_add_pix (int): Extra padding pixel in test mode. Default 0. bbox_clip_border (bool, optional): Whether clip the objects outside the border of the image. Defaults to True. """ def __init__(self, crop_size=None, ratios=(0.9, 1.0, 1.1), border=128, mean=None, std=None, to_rgb=None, test_mode=False, test_pad_mode=('logical_or', 127), test_pad_add_pix=0, bbox_clip_border=True): if test_mode: assert crop_size is None, 'crop_size must be None in test mode' assert ratios is None, 'ratios must be None in test mode' assert border is None, 'border must be None in test mode' assert isinstance(test_pad_mode, (list, tuple)) assert test_pad_mode[0] in ['logical_or', 'size_divisor'] else: assert isinstance(crop_size, (list, tuple)) assert crop_size[0] > 0 and crop_size[1] > 0, ( 'crop_size must > 0 in train mode') assert isinstance(ratios, (list, tuple)) assert test_pad_mode is None, ( 'test_pad_mode must be None in train mode') self.crop_size = crop_size self.ratios = ratios self.border = border # We do not set default value to mean, std and to_rgb because these # hyper-parameters are easy to forget but could affect the performance. # Please use the same setting as Normalize for performance assurance. assert mean is not None and std is not None and to_rgb is not None self.to_rgb = to_rgb self.input_mean = mean self.input_std = std if to_rgb: self.mean = mean[::-1] self.std = std[::-1] else: self.mean = mean self.std = std self.test_mode = test_mode self.test_pad_mode = test_pad_mode self.test_pad_add_pix = test_pad_add_pix self.bbox_clip_border = bbox_clip_border def _get_border(self, border, size): """Get final border for the target size. This function generates a ``final_border`` according to image's shape. The area between ``final_border`` and ``size - final_border`` is the ``center range``. We randomly choose center from the ``center range`` to avoid our random center is too close to original image's border. Also ``center range`` should be larger than 0. Args: border (int): The initial border, default is 128. size (int): The width or height of original image. Returns: int: The final border. """ k = 2 * border / size i = pow(2, np.ceil(np.log2(np.ceil(k))) + (k == int(k))) return border // i def _filter_boxes(self, patch, boxes): """Check whether the center of each box is in the patch. Args: patch (list[int]): The cropped area, [left, top, right, bottom]. boxes (numpy array, (N x 4)): Ground truth boxes. Returns: mask (numpy array, (N,)): Each box is inside or outside the patch. """ center = (boxes[:, :2] + boxes[:, 2:]) / 2 mask = (center[:, 0] > patch[0]) * (center[:, 1] > patch[1]) * ( center[:, 0] < patch[2]) * ( center[:, 1] < patch[3]) return mask def _crop_image_and_paste(self, image, center, size): """Crop image with a given center and size, then paste the cropped image to a blank image with two centers align. This function is equivalent to generating a blank image with ``size`` as its shape. Then cover it on the original image with two centers ( the center of blank image and the random center of original image) aligned. The overlap area is paste from the original image and the outside area is filled with ``mean pixel``. Args: image (np array, H x W x C): Original image. center (list[int]): Target crop center coord. size (list[int]): Target crop size. [target_h, target_w] Returns: cropped_img (np array, target_h x target_w x C): Cropped image. border (np array, 4): The distance of four border of ``cropped_img`` to the original image area, [top, bottom, left, right] patch (list[int]): The cropped area, [left, top, right, bottom]. """ center_y, center_x = center target_h, target_w = size img_h, img_w, img_c = image.shape x0 = max(0, center_x - target_w // 2) x1 = min(center_x + target_w // 2, img_w) y0 = max(0, center_y - target_h // 2) y1 = min(center_y + target_h // 2, img_h) patch = np.array((int(x0), int(y0), int(x1), int(y1))) left, right = center_x - x0, x1 - center_x top, bottom = center_y - y0, y1 - center_y cropped_center_y, cropped_center_x = target_h // 2, target_w // 2 cropped_img = np.zeros((target_h, target_w, img_c), dtype=image.dtype) for i in range(img_c): cropped_img[:, :, i] += self.mean[i] y_slice = slice(cropped_center_y - top, cropped_center_y + bottom) x_slice = slice(cropped_center_x - left, cropped_center_x + right) cropped_img[y_slice, x_slice, :] = image[y0:y1, x0:x1, :] border = np.array([ cropped_center_y - top, cropped_center_y + bottom, cropped_center_x - left, cropped_center_x + right ], dtype=np.float32) return cropped_img, border, patch def _train_aug(self, results): """Random crop and around padding the original image. Args: results (dict): Image infomations in the augment pipeline. Returns: results (dict): The updated dict. """ img = results['img'] h, w, c = img.shape boxes = results['gt_bboxes'] while True: scale = random.choice(self.ratios) new_h = int(self.crop_size[0] * scale) new_w = int(self.crop_size[1] * scale) h_border = self._get_border(self.border, h) w_border = self._get_border(self.border, w) for i in range(50): center_x = random.randint(low=w_border, high=w - w_border) center_y = random.randint(low=h_border, high=h - h_border) cropped_img, border, patch = self._crop_image_and_paste( img, [center_y, center_x], [new_h, new_w]) mask = self._filter_boxes(patch, boxes) # if image do not have valid bbox, any crop patch is valid. if not mask.any() and len(boxes) > 0: continue results['img'] = cropped_img results['img_shape'] = cropped_img.shape results['pad_shape'] = cropped_img.shape x0, y0, x1, y1 = patch left_w, top_h = center_x - x0, center_y - y0 cropped_center_x, cropped_center_y = new_w // 2, new_h // 2 # crop bboxes accordingly and clip to the image boundary for key in results.get('bbox_fields', []): mask = self._filter_boxes(patch, results[key]) bboxes = results[key][mask] bboxes[:, 0:4:2] += cropped_center_x - left_w - x0 bboxes[:, 1:4:2] += cropped_center_y - top_h - y0 if self.bbox_clip_border: bboxes[:, 0:4:2] = np.clip(bboxes[:, 0:4:2], 0, new_w) bboxes[:, 1:4:2] = np.clip(bboxes[:, 1:4:2], 0, new_h) keep = (bboxes[:, 2] > bboxes[:, 0]) & ( bboxes[:, 3] > bboxes[:, 1]) bboxes = bboxes[keep] results[key] = bboxes if key in ['gt_bboxes']: if 'gt_labels' in results: labels = results['gt_labels'][mask] labels = labels[keep] results['gt_labels'] = labels if 'gt_masks' in results: raise NotImplementedError( 'RandomCenterCropPad only supports bbox.') # crop semantic seg for key in results.get('seg_fields', []): raise NotImplementedError( 'RandomCenterCropPad only supports bbox.') return results def _test_aug(self, results): """Around padding the original image without cropping. The padding mode and value are from ``test_pad_mode``. Args: results (dict): Image infomations in the augment pipeline. Returns: results (dict): The updated dict. """ img = results['img'] h, w, c = img.shape results['img_shape'] = img.shape if self.test_pad_mode[0] in ['logical_or']: # self.test_pad_add_pix is only used for centernet target_h = (h | self.test_pad_mode[1]) + self.test_pad_add_pix target_w = (w | self.test_pad_mode[1]) + self.test_pad_add_pix elif self.test_pad_mode[0] in ['size_divisor']: divisor = self.test_pad_mode[1] target_h = int(np.ceil(h / divisor)) * divisor target_w = int(np.ceil(w / divisor)) * divisor else: raise NotImplementedError( 'RandomCenterCropPad only support two testing pad mode:' 'logical-or and size_divisor.') cropped_img, border, _ = self._crop_image_and_paste( img, [h // 2, w // 2], [target_h, target_w]) results['img'] = cropped_img results['pad_shape'] = cropped_img.shape results['border'] = border return results def __call__(self, results): img = results['img'] assert img.dtype == np.float32, ( 'RandomCenterCropPad needs the input image of dtype np.float32,' ' please set "to_float32=True" in "LoadImageFromFile" pipeline') h, w, c = img.shape assert c == len(self.mean) if self.test_mode: return self._test_aug(results) else: return self._train_aug(results) def __repr__(self): repr_str = self.__class__.__name__ repr_str += f'(crop_size={self.crop_size}, ' repr_str += f'ratios={self.ratios}, ' repr_str += f'border={self.border}, ' repr_str += f'mean={self.input_mean}, ' repr_str += f'std={self.input_std}, ' repr_str += f'to_rgb={self.to_rgb}, ' repr_str += f'test_mode={self.test_mode}, ' repr_str += f'test_pad_mode={self.test_pad_mode}, ' repr_str += f'bbox_clip_border={self.bbox_clip_border})' return repr_str @PIPELINES.register_module() class CutOut: """CutOut operation. Randomly drop some regions of image used in `Cutout <https://arxiv.org/abs/1708.04552>`_. Args: n_holes (int | tuple[int, int]): Number of regions to be dropped. If it is given as a list, number of holes will be randomly selected from the closed interval [`n_holes[0]`, `n_holes[1]`]. cutout_shape (tuple[int, int] | list[tuple[int, int]]): The candidate shape of dropped regions. It can be `tuple[int, int]` to use a fixed cutout shape, or `list[tuple[int, int]]` to randomly choose shape from the list. cutout_ratio (tuple[float, float] | list[tuple[float, float]]): The candidate ratio of dropped regions. It can be `tuple[float, float]` to use a fixed ratio or `list[tuple[float, float]]` to randomly choose ratio from the list. Please note that `cutout_shape` and `cutout_ratio` cannot be both given at the same time. fill_in (tuple[float, float, float] | tuple[int, int, int]): The value of pixel to fill in the dropped regions. Default: (0, 0, 0). """ def __init__(self, n_holes, cutout_shape=None, cutout_ratio=None, fill_in=(0, 0, 0)): assert (cutout_shape is None) ^ (cutout_ratio is None), \ 'Either cutout_shape or cutout_ratio should be specified.' assert (isinstance(cutout_shape, (list, tuple)) or isinstance(cutout_ratio, (list, tuple))) if isinstance(n_holes, tuple): assert len(n_holes) == 2 and 0 <= n_holes[0] < n_holes[1] else: n_holes = (n_holes, n_holes) self.n_holes = n_holes self.fill_in = fill_in self.with_ratio = cutout_ratio is not None self.candidates = cutout_ratio if self.with_ratio else cutout_shape if not isinstance(self.candidates, list): self.candidates = [self.candidates] def __call__(self, results): """Call function to drop some regions of image.""" h, w, c = results['img'].shape n_holes = np.random.randint(self.n_holes[0], self.n_holes[1] + 1) for _ in range(n_holes): x1 = np.random.randint(0, w) y1 = np.random.randint(0, h) index = np.random.randint(0, len(self.candidates)) if not self.with_ratio: cutout_w, cutout_h = self.candidates[index] else: cutout_w = int(self.candidates[index][0] * w) cutout_h = int(self.candidates[index][1] * h) x2 = np.clip(x1 + cutout_w, 0, w) y2 = np.clip(y1 + cutout_h, 0, h) results['img'][y1:y2, x1:x2, :] = self.fill_in return results def __repr__(self): repr_str = self.__class__.__name__ repr_str += f'(n_holes={self.n_holes}, ' repr_str += (f'cutout_ratio={self.candidates}, ' if self.with_ratio else f'cutout_shape={self.candidates}, ') repr_str += f'fill_in={self.fill_in})' return repr_str @PIPELINES.register_module() class Mosaic: """Mosaic augmentation. Given 4 images, mosaic transform combines them into one output image. The output image is composed of the parts from each sub- image. .. code:: text mosaic transform center_x +------------------------------+ | pad | pad | | +-----------+ | | | | | | | image1 |--------+ | | | | | | | | | image2 | | center_y |----+-------------+-----------| | | cropped | | |pad | image3 | image4 | | | | | +----|-------------+-----------+ | | +-------------+ The mosaic transform steps are as follows: 1. Choose the mosaic center as the intersections of 4 images 2. Get the left top image according to the index, and randomly sample another 3 images from the custom dataset. 3. Sub image will be cropped if image is larger than mosaic patch Args: img_scale (Sequence[int]): Image size after mosaic pipeline of single image. Default to (640, 640). center_ratio_range (Sequence[float]): Center ratio range of mosaic output. Default to (0.5, 1.5). min_bbox_size (int | float): The minimum pixel for filtering invalid bboxes after the mosaic pipeline. Default to 0. bbox_clip_border (bool, optional): Whether to clip the objects outside the border of the image. In some dataset like MOT17, the gt bboxes are allowed to cross the border of images. Therefore, we don't need to clip the gt bboxes in these cases. Defaults to True. skip_filter (bool): Whether to skip filtering rules. If it is True, the filter rule will not be applied, and the `min_bbox_size` is invalid. Default to True. pad_val (int): Pad value. Default to 114. """ def __init__(self, img_scale=(640, 640), center_ratio_range=(0.5, 1.5), min_bbox_size=0, bbox_clip_border=True, skip_filter=True, pad_val=114): assert isinstance(img_scale, tuple) self.img_scale = img_scale self.center_ratio_range = center_ratio_range self.min_bbox_size = min_bbox_size self.bbox_clip_border = bbox_clip_border self.skip_filter = skip_filter self.pad_val = pad_val def __call__(self, results): """Call function to make a mosaic of image. Args: results (dict): Result dict. Returns: dict: Result dict with mosaic transformed. """ results = self._mosaic_transform(results) return results def get_indexes(self, dataset): """Call function to collect indexes. Args: dataset (:obj:`MultiImageMixDataset`): The dataset. Returns: list: indexes. """ indexes = [random.randint(0, len(dataset)) for _ in range(3)] return indexes def _mosaic_transform(self, results): """Mosaic transform function. Args: results (dict): Result dict. Returns: dict: Updated result dict. """ assert 'mix_results' in results mosaic_labels = [] mosaic_bboxes = [] if len(results['img'].shape) == 3: mosaic_img = np.full( (int(self.img_scale[0] * 2), int(self.img_scale[1] * 2), 3), self.pad_val, dtype=results['img'].dtype) else: mosaic_img = np.full( (int(self.img_scale[0] * 2), int(self.img_scale[1] * 2)), self.pad_val, dtype=results['img'].dtype) # mosaic center x, y center_x = int( random.uniform(*self.center_ratio_range) * self.img_scale[1]) center_y = int( random.uniform(*self.center_ratio_range) * self.img_scale[0]) center_position = (center_x, center_y) loc_strs = ('top_left', 'top_right', 'bottom_left', 'bottom_right') for i, loc in enumerate(loc_strs): if loc == 'top_left': results_patch = copy.deepcopy(results) else: results_patch = copy.deepcopy(results['mix_results'][i - 1]) img_i = results_patch['img'] h_i, w_i = img_i.shape[:2] # keep_ratio resize scale_ratio_i = min(self.img_scale[0] / h_i, self.img_scale[1] / w_i) img_i = mmcv.imresize( img_i, (int(w_i * scale_ratio_i), int(h_i * scale_ratio_i))) # compute the combine parameters paste_coord, crop_coord = self._mosaic_combine( loc, center_position, img_i.shape[:2][::-1]) x1_p, y1_p, x2_p, y2_p = paste_coord x1_c, y1_c, x2_c, y2_c = crop_coord # crop and paste image mosaic_img[y1_p:y2_p, x1_p:x2_p] = img_i[y1_c:y2_c, x1_c:x2_c] # adjust coordinate gt_bboxes_i = results_patch['gt_bboxes'] gt_labels_i = results_patch['gt_labels'] if gt_bboxes_i.shape[0] > 0: padw = x1_p - x1_c padh = y1_p - y1_c gt_bboxes_i[:, 0::2] = \ scale_ratio_i * gt_bboxes_i[:, 0::2] + padw gt_bboxes_i[:, 1::2] = \ scale_ratio_i * gt_bboxes_i[:, 1::2] + padh mosaic_bboxes.append(gt_bboxes_i) mosaic_labels.append(gt_labels_i) if len(mosaic_labels) > 0: mosaic_bboxes = np.concatenate(mosaic_bboxes, 0) mosaic_labels = np.concatenate(mosaic_labels, 0) if self.bbox_clip_border: mosaic_bboxes[:, 0::2] = np.clip(mosaic_bboxes[:, 0::2], 0, 2 * self.img_scale[1]) mosaic_bboxes[:, 1::2] = np.clip(mosaic_bboxes[:, 1::2], 0, 2 * self.img_scale[0]) if not self.skip_filter: mosaic_bboxes, mosaic_labels = \ self._filter_box_candidates(mosaic_bboxes, mosaic_labels) # remove outside bboxes inside_inds = find_inside_bboxes(mosaic_bboxes, 2 * self.img_scale[0], 2 * self.img_scale[1]) mosaic_bboxes = mosaic_bboxes[inside_inds] mosaic_labels = mosaic_labels[inside_inds] results['img'] = mosaic_img results['img_shape'] = mosaic_img.shape results['gt_bboxes'] = mosaic_bboxes results['gt_labels'] = mosaic_labels return results def _mosaic_combine(self, loc, center_position_xy, img_shape_wh): """Calculate global coordinate of mosaic image and local coordinate of cropped sub-image. Args: loc (str): Index for the sub-image, loc in ('top_left', 'top_right', 'bottom_left', 'bottom_right'). center_position_xy (Sequence[float]): Mixing center for 4 images, (x, y). img_shape_wh (Sequence[int]): Width and height of sub-image Returns: tuple[tuple[float]]: Corresponding coordinate of pasting and cropping - paste_coord (tuple): paste corner coordinate in mosaic image. - crop_coord (tuple): crop corner coordinate in mosaic image. """ assert loc in ('top_left', 'top_right', 'bottom_left', 'bottom_right') if loc == 'top_left': # index0 to top left part of image x1, y1, x2, y2 = max(center_position_xy[0] - img_shape_wh[0], 0), \ max(center_position_xy[1] - img_shape_wh[1], 0), \ center_position_xy[0], \ center_position_xy[1] crop_coord = img_shape_wh[0] - (x2 - x1), img_shape_wh[1] - ( y2 - y1), img_shape_wh[0], img_shape_wh[1] elif loc == 'top_right': # index1 to top right part of image x1, y1, x2, y2 = center_position_xy[0], \ max(center_position_xy[1] - img_shape_wh[1], 0), \ min(center_position_xy[0] + img_shape_wh[0], self.img_scale[1] * 2), \ center_position_xy[1] crop_coord = 0, img_shape_wh[1] - (y2 - y1), min( img_shape_wh[0], x2 - x1), img_shape_wh[1] elif loc == 'bottom_left': # index2 to bottom left part of image x1, y1, x2, y2 = max(center_position_xy[0] - img_shape_wh[0], 0), \ center_position_xy[1], \ center_position_xy[0], \ min(self.img_scale[0] * 2, center_position_xy[1] + img_shape_wh[1]) crop_coord = img_shape_wh[0] - (x2 - x1), 0, img_shape_wh[0], min( y2 - y1, img_shape_wh[1]) else: # index3 to bottom right part of image x1, y1, x2, y2 = center_position_xy[0], \ center_position_xy[1], \ min(center_position_xy[0] + img_shape_wh[0], self.img_scale[1] * 2), \ min(self.img_scale[0] * 2, center_position_xy[1] + img_shape_wh[1]) crop_coord = 0, 0, min(img_shape_wh[0], x2 - x1), min(y2 - y1, img_shape_wh[1]) paste_coord = x1, y1, x2, y2 return paste_coord, crop_coord def _filter_box_candidates(self, bboxes, labels): """Filter out bboxes too small after Mosaic.""" bbox_w = bboxes[:, 2] - bboxes[:, 0] bbox_h = bboxes[:, 3] - bboxes[:, 1] valid_inds = (bbox_w > self.min_bbox_size) & \ (bbox_h > self.min_bbox_size) valid_inds = np.nonzero(valid_inds)[0] return bboxes[valid_inds], labels[valid_inds] def __repr__(self): repr_str = self.__class__.__name__ repr_str += f'img_scale={self.img_scale}, ' repr_str += f'center_ratio_range={self.center_ratio_range}, ' repr_str += f'pad_val={self.pad_val}, ' repr_str += f'min_bbox_size={self.min_bbox_size}, ' repr_str += f'skip_filter={self.skip_filter})' return repr_str @PIPELINES.register_module() class MixUp: """MixUp data augmentation. .. code:: text mixup transform +------------------------------+ | mixup image | | | +--------|--------+ | | | | | | |---------------+ | | | | | | | | image | | | | | | | | | | | |-----------------+ | | pad | +------------------------------+ The mixup transform steps are as follows:: 1. Another random image is picked by dataset and embedded in the top left patch(after padding and resizing) 2. The target of mixup transform is the weighted average of mixup image and origin image. Args: img_scale (Sequence[int]): Image output size after mixup pipeline. Default: (640, 640). ratio_range (Sequence[float]): Scale ratio of mixup image. Default: (0.5, 1.5). flip_ratio (float): Horizontal flip ratio of mixup image. Default: 0.5. pad_val (int): Pad value. Default: 114. max_iters (int): The maximum number of iterations. If the number of iterations is greater than `max_iters`, but gt_bbox is still empty, then the iteration is terminated. Default: 15. min_bbox_size (float): Width and height threshold to filter bboxes. If the height or width of a box is smaller than this value, it will be removed. Default: 5. min_area_ratio (float): Threshold of area ratio between original bboxes and wrapped bboxes. If smaller than this value, the box will be removed. Default: 0.2. max_aspect_ratio (float): Aspect ratio of width and height threshold to filter bboxes. If max(h/w, w/h) larger than this value, the box will be removed. Default: 20. bbox_clip_border (bool, optional): Whether to clip the objects outside the border of the image. In some dataset like MOT17, the gt bboxes are allowed to cross the border of images. Therefore, we don't need to clip the gt bboxes in these cases. Defaults to True. skip_filter (bool): Whether to skip filtering rules. If it is True, the filter rule will not be applied, and the `min_bbox_size` and `min_area_ratio` and `max_aspect_ratio` is invalid. Default to True. """ def __init__(self, img_scale=(640, 640), ratio_range=(0.5, 1.5), flip_ratio=0.5, pad_val=114, max_iters=15, min_bbox_size=5, min_area_ratio=0.2, max_aspect_ratio=20, bbox_clip_border=True, skip_filter=True): assert isinstance(img_scale, tuple) self.dynamic_scale = img_scale self.ratio_range = ratio_range self.flip_ratio = flip_ratio self.pad_val = pad_val self.max_iters = max_iters self.min_bbox_size = min_bbox_size self.min_area_ratio = min_area_ratio self.max_aspect_ratio = max_aspect_ratio self.bbox_clip_border = bbox_clip_border self.skip_filter = skip_filter def __call__(self, results): """Call function to make a mixup of image. Args: results (dict): Result dict. Returns: dict: Result dict with mixup transformed. """ results = self._mixup_transform(results) return results def get_indexes(self, dataset): """Call function to collect indexes. Args: dataset (:obj:`MultiImageMixDataset`): The dataset. Returns: list: indexes. """ for i in range(self.max_iters): index = random.randint(0, len(dataset)) gt_bboxes_i = dataset.get_ann_info(index)['bboxes'] if len(gt_bboxes_i) != 0: break return index def _mixup_transform(self, results): """MixUp transform function. Args: results (dict): Result dict. Returns: dict: Updated result dict. """ assert 'mix_results' in results assert len( results['mix_results']) == 1, 'MixUp only support 2 images now !' if results['mix_results'][0]['gt_bboxes'].shape[0] == 0: # empty bbox return results retrieve_results = results['mix_results'][0] retrieve_img = retrieve_results['img'] jit_factor = random.uniform(*self.ratio_range) is_filp = random.uniform(0, 1) > self.flip_ratio if len(retrieve_img.shape) == 3: out_img = np.ones( (self.dynamic_scale[0], self.dynamic_scale[1], 3), dtype=retrieve_img.dtype) * self.pad_val else: out_img = np.ones( self.dynamic_scale, dtype=retrieve_img.dtype) * self.pad_val # 1. keep_ratio resize scale_ratio = min(self.dynamic_scale[0] / retrieve_img.shape[0], self.dynamic_scale[1] / retrieve_img.shape[1]) retrieve_img = mmcv.imresize( retrieve_img, (int(retrieve_img.shape[1] * scale_ratio), int(retrieve_img.shape[0] * scale_ratio))) # 2. paste out_img[:retrieve_img.shape[0], :retrieve_img.shape[1]] = retrieve_img # 3. scale jit scale_ratio *= jit_factor out_img = mmcv.imresize(out_img, (int(out_img.shape[1] * jit_factor), int(out_img.shape[0] * jit_factor))) # 4. flip if is_filp: out_img = out_img[:, ::-1, :] # 5. random crop ori_img = results['img'] origin_h, origin_w = out_img.shape[:2] target_h, target_w = ori_img.shape[:2] padded_img = np.zeros( (max(origin_h, target_h), max(origin_w, target_w), 3)).astype(np.uint8) padded_img[:origin_h, :origin_w] = out_img x_offset, y_offset = 0, 0 if padded_img.shape[0] > target_h: y_offset = random.randint(0, padded_img.shape[0] - target_h) if padded_img.shape[1] > target_w: x_offset = random.randint(0, padded_img.shape[1] - target_w) padded_cropped_img = padded_img[y_offset:y_offset + target_h, x_offset:x_offset + target_w] # 6. adjust bbox retrieve_gt_bboxes = retrieve_results['gt_bboxes'] retrieve_gt_bboxes[:, 0::2] = retrieve_gt_bboxes[:, 0::2] * scale_ratio retrieve_gt_bboxes[:, 1::2] = retrieve_gt_bboxes[:, 1::2] * scale_ratio if self.bbox_clip_border: retrieve_gt_bboxes[:, 0::2] = np.clip(retrieve_gt_bboxes[:, 0::2], 0, origin_w) retrieve_gt_bboxes[:, 1::2] = np.clip(retrieve_gt_bboxes[:, 1::2], 0, origin_h) if is_filp: retrieve_gt_bboxes[:, 0::2] = ( origin_w - retrieve_gt_bboxes[:, 0::2][:, ::-1]) # 7. filter cp_retrieve_gt_bboxes = retrieve_gt_bboxes.copy() cp_retrieve_gt_bboxes[:, 0::2] = \ cp_retrieve_gt_bboxes[:, 0::2] - x_offset cp_retrieve_gt_bboxes[:, 1::2] = \ cp_retrieve_gt_bboxes[:, 1::2] - y_offset if self.bbox_clip_border: cp_retrieve_gt_bboxes[:, 0::2] = np.clip( cp_retrieve_gt_bboxes[:, 0::2], 0, target_w) cp_retrieve_gt_bboxes[:, 1::2] = np.clip( cp_retrieve_gt_bboxes[:, 1::2], 0, target_h) # 8. mix up ori_img = ori_img.astype(np.float32) mixup_img = 0.5 * ori_img + 0.5 * padded_cropped_img.astype(np.float32) retrieve_gt_labels = retrieve_results['gt_labels'] if not self.skip_filter: keep_list = self._filter_box_candidates(retrieve_gt_bboxes.T, cp_retrieve_gt_bboxes.T) retrieve_gt_labels = retrieve_gt_labels[keep_list] cp_retrieve_gt_bboxes = cp_retrieve_gt_bboxes[keep_list] mixup_gt_bboxes = np.concatenate( (results['gt_bboxes'], cp_retrieve_gt_bboxes), axis=0) mixup_gt_labels = np.concatenate( (results['gt_labels'], retrieve_gt_labels), axis=0) # remove outside bbox inside_inds = find_inside_bboxes(mixup_gt_bboxes, target_h, target_w) mixup_gt_bboxes = mixup_gt_bboxes[inside_inds] mixup_gt_labels = mixup_gt_labels[inside_inds] results['img'] = mixup_img.astype(np.uint8) results['img_shape'] = mixup_img.shape results['gt_bboxes'] = mixup_gt_bboxes results['gt_labels'] = mixup_gt_labels return results def _filter_box_candidates(self, bbox1, bbox2): """Compute candidate boxes which include following 5 things: bbox1 before augment, bbox2 after augment, min_bbox_size (pixels), min_area_ratio, max_aspect_ratio. """ w1, h1 = bbox1[2] - bbox1[0], bbox1[3] - bbox1[1] w2, h2 = bbox2[2] - bbox2[0], bbox2[3] - bbox2[1] ar = np.maximum(w2 / (h2 + 1e-16), h2 / (w2 + 1e-16)) return ((w2 > self.min_bbox_size) & (h2 > self.min_bbox_size) & (w2 * h2 / (w1 * h1 + 1e-16) > self.min_area_ratio) & (ar < self.max_aspect_ratio)) def __repr__(self): repr_str = self.__class__.__name__ repr_str += f'dynamic_scale={self.dynamic_scale}, ' repr_str += f'ratio_range={self.ratio_range}, ' repr_str += f'flip_ratio={self.flip_ratio}, ' repr_str += f'pad_val={self.pad_val}, ' repr_str += f'max_iters={self.max_iters}, ' repr_str += f'min_bbox_size={self.min_bbox_size}, ' repr_str += f'min_area_ratio={self.min_area_ratio}, ' repr_str += f'max_aspect_ratio={self.max_aspect_ratio}, ' repr_str += f'skip_filter={self.skip_filter})' return repr_str @PIPELINES.register_module() class RandomAffine: """Random affine transform data augmentation. This operation randomly generates affine transform matrix which including rotation, translation, shear and scaling transforms. Args: max_rotate_degree (float): Maximum degrees of rotation transform. Default: 10. max_translate_ratio (float): Maximum ratio of translation. Default: 0.1. scaling_ratio_range (tuple[float]): Min and max ratio of scaling transform. Default: (0.5, 1.5). max_shear_degree (float): Maximum degrees of shear transform. Default: 2. border (tuple[int]): Distance from height and width sides of input image to adjust output shape. Only used in mosaic dataset. Default: (0, 0). border_val (tuple[int]): Border padding values of 3 channels. Default: (114, 114, 114). min_bbox_size (float): Width and height threshold to filter bboxes. If the height or width of a box is smaller than this value, it will be removed. Default: 2. min_area_ratio (float): Threshold of area ratio between original bboxes and wrapped bboxes. If smaller than this value, the box will be removed. Default: 0.2. max_aspect_ratio (float): Aspect ratio of width and height threshold to filter bboxes. If max(h/w, w/h) larger than this value, the box will be removed. bbox_clip_border (bool, optional): Whether to clip the objects outside the border of the image. In some dataset like MOT17, the gt bboxes are allowed to cross the border of images. Therefore, we don't need to clip the gt bboxes in these cases. Defaults to True. skip_filter (bool): Whether to skip filtering rules. If it is True, the filter rule will not be applied, and the `min_bbox_size` and `min_area_ratio` and `max_aspect_ratio` is invalid. Default to True. """ def __init__(self, max_rotate_degree=10.0, max_translate_ratio=0.1, scaling_ratio_range=(0.5, 1.5), max_shear_degree=2.0, border=(0, 0), border_val=(114, 114, 114), min_bbox_size=2, min_area_ratio=0.2, max_aspect_ratio=20, bbox_clip_border=True, skip_filter=True): assert 0 <= max_translate_ratio <= 1 assert scaling_ratio_range[0] <= scaling_ratio_range[1] assert scaling_ratio_range[0] > 0 self.max_rotate_degree = max_rotate_degree self.max_translate_ratio = max_translate_ratio self.scaling_ratio_range = scaling_ratio_range self.max_shear_degree = max_shear_degree self.border = border self.border_val = border_val self.min_bbox_size = min_bbox_size self.min_area_ratio = min_area_ratio self.max_aspect_ratio = max_aspect_ratio self.bbox_clip_border = bbox_clip_border self.skip_filter = skip_filter def __call__(self, results): img = results['img'] height = img.shape[0] + self.border[0] * 2 width = img.shape[1] + self.border[1] * 2 # Rotation rotation_degree = random.uniform(-self.max_rotate_degree, self.max_rotate_degree) rotation_matrix = self._get_rotation_matrix(rotation_degree) # Scaling scaling_ratio = random.uniform(self.scaling_ratio_range[0], self.scaling_ratio_range[1]) scaling_matrix = self._get_scaling_matrix(scaling_ratio) # Shear x_degree = random.uniform(-self.max_shear_degree, self.max_shear_degree) y_degree = random.uniform(-self.max_shear_degree, self.max_shear_degree) shear_matrix = self._get_shear_matrix(x_degree, y_degree) # Translation trans_x = random.uniform(-self.max_translate_ratio, self.max_translate_ratio) * width trans_y = random.uniform(-self.max_translate_ratio, self.max_translate_ratio) * height translate_matrix = self._get_translation_matrix(trans_x, trans_y) warp_matrix = ( translate_matrix @ shear_matrix @ rotation_matrix @ scaling_matrix) img = cv2.warpPerspective( img, warp_matrix, dsize=(width, height), borderValue=self.border_val) results['img'] = img results['img_shape'] = img.shape for key in results.get('bbox_fields', []): bboxes = results[key] num_bboxes = len(bboxes) if num_bboxes: # homogeneous coordinates xs = bboxes[:, [0, 0, 2, 2]].reshape(num_bboxes * 4) ys = bboxes[:, [1, 3, 3, 1]].reshape(num_bboxes * 4) ones = np.ones_like(xs) points = np.vstack([xs, ys, ones]) warp_points = warp_matrix @ points warp_points = warp_points[:2] / warp_points[2] xs = warp_points[0].reshape(num_bboxes, 4) ys = warp_points[1].reshape(num_bboxes, 4) warp_bboxes = np.vstack( (xs.min(1), ys.min(1), xs.max(1), ys.max(1))).T if self.bbox_clip_border: warp_bboxes[:, [0, 2]] = \ warp_bboxes[:, [0, 2]].clip(0, width) warp_bboxes[:, [1, 3]] = \ warp_bboxes[:, [1, 3]].clip(0, height) # remove outside bbox valid_index = find_inside_bboxes(warp_bboxes, height, width) if not self.skip_filter: # filter bboxes filter_index = self.filter_gt_bboxes( bboxes * scaling_ratio, warp_bboxes) valid_index = valid_index & filter_index results[key] = warp_bboxes[valid_index] if key in ['gt_bboxes']: if 'gt_labels' in results: results['gt_labels'] = results['gt_labels'][ valid_index] if 'gt_masks' in results: raise NotImplementedError( 'RandomAffine only supports bbox.') return results def filter_gt_bboxes(self, origin_bboxes, wrapped_bboxes): origin_w = origin_bboxes[:, 2] - origin_bboxes[:, 0] origin_h = origin_bboxes[:, 3] - origin_bboxes[:, 1] wrapped_w = wrapped_bboxes[:, 2] - wrapped_bboxes[:, 0] wrapped_h = wrapped_bboxes[:, 3] - wrapped_bboxes[:, 1] aspect_ratio = np.maximum(wrapped_w / (wrapped_h + 1e-16), wrapped_h / (wrapped_w + 1e-16)) wh_valid_idx = (wrapped_w > self.min_bbox_size) & \ (wrapped_h > self.min_bbox_size) area_valid_idx = wrapped_w * wrapped_h / (origin_w * origin_h + 1e-16) > self.min_area_ratio aspect_ratio_valid_idx = aspect_ratio < self.max_aspect_ratio return wh_valid_idx & area_valid_idx & aspect_ratio_valid_idx def __repr__(self): repr_str = self.__class__.__name__ repr_str += f'(max_rotate_degree={self.max_rotate_degree}, ' repr_str += f'max_translate_ratio={self.max_translate_ratio}, ' repr_str += f'scaling_ratio={self.scaling_ratio_range}, ' repr_str += f'max_shear_degree={self.max_shear_degree}, ' repr_str += f'border={self.border}, ' repr_str += f'border_val={self.border_val}, ' repr_str += f'min_bbox_size={self.min_bbox_size}, ' repr_str += f'min_area_ratio={self.min_area_ratio}, ' repr_str += f'max_aspect_ratio={self.max_aspect_ratio}, ' repr_str += f'skip_filter={self.skip_filter})' return repr_str @staticmethod def _get_rotation_matrix(rotate_degrees): radian = math.radians(rotate_degrees) rotation_matrix = np.array( [[np.cos(radian), -np.sin(radian), 0.], [np.sin(radian), np.cos(radian), 0.], [0., 0., 1.]], dtype=np.float32) return rotation_matrix @staticmethod def _get_scaling_matrix(scale_ratio): scaling_matrix = np.array( [[scale_ratio, 0., 0.], [0., scale_ratio, 0.], [0., 0., 1.]], dtype=np.float32) return scaling_matrix @staticmethod def _get_share_matrix(scale_ratio): scaling_matrix = np.array( [[scale_ratio, 0., 0.], [0., scale_ratio, 0.], [0., 0., 1.]], dtype=np.float32) return scaling_matrix @staticmethod def _get_shear_matrix(x_shear_degrees, y_shear_degrees): x_radian = math.radians(x_shear_degrees) y_radian = math.radians(y_shear_degrees) shear_matrix = np.array([[1, np.tan(x_radian), 0.], [np.tan(y_radian), 1, 0.], [0., 0., 1.]], dtype=np.float32) return shear_matrix @staticmethod def _get_translation_matrix(x, y): translation_matrix = np.array([[1, 0., x], [0., 1, y], [0., 0., 1.]], dtype=np.float32) return translation_matrix @PIPELINES.register_module() class YOLOXHSVRandomAug: """Apply HSV augmentation to image sequentially. It is referenced from https://github.com/Megvii- BaseDetection/YOLOX/blob/main/yolox/data/data_augment.py#L21. Args: hue_delta (int): delta of hue. Default: 5. saturation_delta (int): delta of saturation. Default: 30. value_delta (int): delat of value. Default: 30. """ def __init__(self, hue_delta=5, saturation_delta=30, value_delta=30): self.hue_delta = hue_delta self.saturation_delta = saturation_delta self.value_delta = value_delta def __call__(self, results): img = results['img'] hsv_gains = np.random.uniform(-1, 1, 3) * [ self.hue_delta, self.saturation_delta, self.value_delta ] # random selection of h, s, v hsv_gains *= np.random.randint(0, 2, 3) # prevent overflow hsv_gains = hsv_gains.astype(np.int16) img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV).astype(np.int16) img_hsv[..., 0] = (img_hsv[..., 0] + hsv_gains[0]) % 180 img_hsv[..., 1] = np.clip(img_hsv[..., 1] + hsv_gains[1], 0, 255) img_hsv[..., 2] = np.clip(img_hsv[..., 2] + hsv_gains[2], 0, 255) cv2.cvtColor(img_hsv.astype(img.dtype), cv2.COLOR_HSV2BGR, dst=img) results['img'] = img return results def __repr__(self): repr_str = self.__class__.__name__ repr_str += f'(hue_delta={self.hue_delta}, ' repr_str += f'saturation_delta={self.saturation_delta}, ' repr_str += f'value_delta={self.value_delta})' return repr_str
open-mmlab/mmdetection
mmdet/datasets/pipelines/transforms.py
Python
apache-2.0
109,483
[ "VisIt" ]
cfde4be457f63d3d8bcd47c169cd37e656b1be2c2d89f16dd3249bc1151c1fc5
from __future__ import print_function import os, sys import unittest from pathlib import Path import vtk, qt, ctk, slicer from slicer.ScriptedLoadableModule import * import logging import csv from slicer.util import VTKObservationMixin import platform import time from RigidAlignmentModule import RigidAlignmentModuleLogic try: import urllib.request, urllib.parse, urllib.error except ImportError: import urllib # python 2.x import shutil from CommonUtilities import * from packaging import version def _setSectionResizeMode(header, *args, **kwargs): if version.parse(qt.Qt.qVersion()) < version.parse("5.0.0"): header.setResizeMode(*args, **kwargs) else: header.setSectionResizeMode(*args, **kwargs) # # ShapeAnalysisModule # class ShapeAnalysisModule(ScriptedLoadableModule): """Uses ScriptedLoadableModule base class, available at: https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py """ def __init__(self, parent): ScriptedLoadableModule.__init__(self, parent) self.parent.title = "Shape Analysis Module" self.parent.categories = ["SPHARM"] self.parent.dependencies = [] self.parent.contributors = ["Laura Pascal (Kitware Inc.), Beatriz Paniagua (Kitware Inc.), Hina Shah (Kitware Inc.)"] self.parent.helpText = """ SPHARM-PDM is a tool that computes point-based models using a parametric boundary description for the computing of Shape Analysis. """ self.parent.acknowledgementText = """ This work was supported by NIH NIBIB R01EB021391 (Shape Analysis Toolbox for Medical Image Computing Projects). """ # # ShapeAnalysisModuleWidget # class ShapeAnalysisModuleWidget(ScriptedLoadableModuleWidget): """Uses ScriptedLoadableModuleWidget base class, available at: https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py """ def setup(self): ScriptedLoadableModuleWidget.setup(self) # # Global variables # self.Logic = ShapeAnalysisModuleLogic() self.progressbars_layout = None # # Interface # loader = qt.QUiLoader() self.moduleName = 'ShapeAnalysisModule' scriptedModulesPath = eval('slicer.modules.%s.path' % self.moduleName.lower()) scriptedModulesPath = os.path.dirname(scriptedModulesPath) path = os.path.join(scriptedModulesPath, 'Resources', 'UI', '%s.ui' % self.moduleName) qfile = qt.QFile(path) qfile.open(qt.QFile.ReadOnly) widget = loader.load(qfile, self.parent) self.layout = self.parent.layout() self.widget = widget self.layout.addWidget(widget) # Global variables of the Interface # Group Project IO self.CollapsibleButton_GroupProjectIO = self.getWidget('CollapsibleButton_GroupProjectIO') self.GroupProjectInputDirectory = self.getWidget('DirectoryButton_GroupProjectInputDirectory') self.GroupProjectOutputDirectory = self.getWidget('DirectoryButton_GroupProjectOutputDirectory') self.Debug = self.getWidget('checkBox_Debug') # Post Processed Segmentation self.CollapsibleButton_SegPostProcess = self.getWidget('CollapsibleButton_SegPostProcess') self.OverwriteSegPostProcess = self.getWidget('checkBox_OverwriteSegPostProcess') self.label_RescaleSegPostProcess = self.getWidget('label_RescaleSegPostProcess') self.RescaleSegPostProcess = self.getWidget('checkBox_RescaleSegPostProcess') self.sx = self.getWidget('SliderWidget_sx') self.sy = self.getWidget('SliderWidget_sy') self.sz = self.getWidget('SliderWidget_sz') self.label_sx = self.getWidget('label_sx') self.label_sy = self.getWidget('label_sy') self.label_sz = self.getWidget('label_sz') self.LabelState = self.getWidget('checkBox_LabelState') self.label_ValueLabelNumber = self.getWidget('label_ValueLabelNumber') self.ValueLabelNumber = self.getWidget('SliderWidget_ValueLabelNumber') # Generate Mesh Parameters self.CollapsibleButton_GenParaMesh = self.getWidget('CollapsibleButton_GenParaMesh') self.OverwriteGenParaMesh = self.getWidget('checkBox_OverwriteGenParaMesh') self.NumberofIterations = self.getWidget('SliderWidget_NumberofIterations') # Parameters to SPHARM Mesh self.CollapsibleButton_ParaToSPHARMMesh = self.getWidget('CollapsibleButton_ParaToSPHARMMesh') self.OverwriteParaToSPHARMMesh = self.getWidget('checkBox_OverwriteParaToSPHARMMesh') self.SubdivLevelValue = self.getWidget('SliderWidget_SubdivLevelValue') self.SPHARMDegreeValue = self.getWidget('SliderWidget_SPHARMDegreeValue') self.thetaIterationValue = self.getWidget('spinBox_thetaIterationValue') self.phiIterationValue = self.getWidget('spinBox_phiIterationValue') self.medialMesh = self.getWidget('checkBox_medialMesh') # Advanced Post Processed Segmentation self.CollapsibleButton_AdvancedPostProcessedSegmentation = self.getWidget( 'CollapsibleButton_AdvancedPostProcessedSegmentation') self.GaussianFiltering = self.getWidget('checkBox_GaussianFiltering') self.label_VarianceX = self.getWidget('label_VarianceX') self.VarianceX = self.getWidget('SliderWidget_VarianceX') self.label_VarianceY = self.getWidget('label_VarianceY') self.VarianceY = self.getWidget('SliderWidget_VarianceY') self.label_VarianceZ = self.getWidget('label_VarianceZ') self.VarianceZ = self.getWidget('SliderWidget_VarianceZ') # Advanced Parameters to SPHARM Mesh self.CollapsibleButton_AdvancedParametersToSPHARMMesh = self.getWidget( 'CollapsibleButton_AdvancedParametersToSPHARMMesh') self.useRegTemplate = self.getWidget('checkBox_useRegTemplate') self.label_regTemplate = self.getWidget('label_regTemplate') self.regTemplate = self.getWidget('PathLineEdit_regTemplate') self.useFlipTemplate = self.getWidget('checkBox_useFlipTemplate') self.label_flipTemplate = self.getWidget('label_flipTemplate') self.flipTemplate = self.getWidget('PathLineEdit_flipTemplate') self.choiceOfFlip = self.getWidget('comboBox_choiceOfFlip') self.sameFlipForAll = self.getWidget('checkBox_sameFlipForAll') self.tableWidget_ChoiceOfFlip = self.getWidget('tableWidget_ChoiceOfFlip') # Correspondence Improvement self.CollapsibleButton_RigidAlignment = self.getWidget('CollapsibleButton_RigidAlignment') self.RigidAlignmentFiducialsDirectory = self.getWidget('DirectoryButton_RigidAlignmentFiducialsDirectory') self.RigidAlignmentEnabled = self.getWidget('checkBox_RigidAlignmentEnabled') # Visualization self.CollapsibleButton_Visualization = self.getWidget('CollapsibleButton_Visualization') self.visualizationInSPV = self.getWidget('pushButton_visualizationInSPV') self.CheckableComboBox_visualization = self.getWidget('CheckableComboBox_visualization') self.tableWidget_visualization = self.getWidget('tableWidget_visualization') # Apply CLIs self.ApplyButton = self.getWidget('applyButton') self.progress_layout = self.getWidget('progress_layout').layout() # Connections # Group Project IO self.CollapsibleButton_GroupProjectIO.connect('clicked()', lambda: self.onSelectedCollapsibleButtonOpen( self.CollapsibleButton_GroupProjectIO)) self.GroupProjectInputDirectory.connect('directoryChanged(const QString &)', self.onInputDirectoryChanged) self.GroupProjectOutputDirectory.connect('directoryChanged(const QString &)', self.onOutputDirectoryChanged) self.Debug.connect('clicked(bool)', self.onDebug) # Post Processed Segmentation self.CollapsibleButton_SegPostProcess.connect('clicked()', lambda: self.onSelectedCollapsibleButtonOpen( self.CollapsibleButton_SegPostProcess)) self.OverwriteSegPostProcess.connect('clicked(bool)', self.onOverwriteFilesSegPostProcess) self.RescaleSegPostProcess.connect('stateChanged(int)', self.onSelectSpacing) self.sx.connect('valueChanged(double)', self.onSxValueChanged) self.sy.connect('valueChanged(double)', self.onSyValueChanged) self.sz.connect('valueChanged(double)', self.onSzValueChanged) self.LabelState.connect('clicked(bool)', self.onSelectValueLabelNumber) self.ValueLabelNumber.connect('valueChanged(double)', self.onLabelNumberValueChanged) # Generate Mesh Parameters self.CollapsibleButton_GenParaMesh.connect('clicked()', lambda: self.onSelectedCollapsibleButtonOpen( self.CollapsibleButton_GenParaMesh)) self.OverwriteGenParaMesh.connect('clicked(bool)', self.onOverwriteFilesGenParaMesh) self.NumberofIterations.connect('valueChanged(double)', self.onNumberofIterationsValueChanged) # Parameters to SPHARM Mesh self.CollapsibleButton_ParaToSPHARMMesh.connect('clicked()', lambda: self.onSelectedCollapsibleButtonOpen( self.CollapsibleButton_ParaToSPHARMMesh)) self.OverwriteParaToSPHARMMesh.connect('clicked(bool)', self.onOverwriteFilesParaToSPHARMMesh) self.SubdivLevelValue.connect('valueChanged(double)', self.onSubdivLevelValueChanged) self.SPHARMDegreeValue.connect('valueChanged(double)', self.onSPHARMDegreeValueChanged) self.thetaIterationValue.connect('valueChanged(int)', self.onThetaIterationValueChanged) self.phiIterationValue.connect('valueChanged(int)', self.onPhiIterationValueChanged) self.medialMesh.connect('clicked(bool)', self.onMedialMeshValueChanged) # Advanced Post Processed Segmentation self.CollapsibleButton_AdvancedPostProcessedSegmentation.connect('clicked()', lambda: self.onSelectedCollapsibleButtonOpen( self.CollapsibleButton_AdvancedPostProcessedSegmentation)) self.GaussianFiltering.connect('clicked(bool)', self.onSelectGaussianVariance) self.VarianceX.connect('valueChanged(double)', self.onVarianceXValueChanged) self.VarianceY.connect('valueChanged(double)', self.onVarianceYValueChanged) self.VarianceZ.connect('valueChanged(double)', self.onVarianceZValueChanged) # Advanced Parameters to SPHARM Mesh self.CollapsibleButton_AdvancedParametersToSPHARMMesh.connect('clicked()', lambda: self.onSelectedCollapsibleButtonOpen( self.CollapsibleButton_AdvancedParametersToSPHARMMesh)) self.useRegTemplate.connect('clicked(bool)', self.onEnableRegTemplate) self.regTemplate.connect('currentPathChanged(const QString)', self.onRegTemplateValueChanged) self.useFlipTemplate.connect('clicked(bool)', self.onEnableFlipTemplate) self.flipTemplate.connect('currentPathChanged(const QString)', self.onFlipTemplateValueChanged) self.choiceOfFlip.connect('currentIndexChanged(int)', self.onChoiceOfFlipValueChanged) self.sameFlipForAll.connect('clicked(bool)', self.onEnableFlipChoices) # Correspondence Improvement self.CollapsibleButton_RigidAlignment.connect('clicked()', lambda: self.onSelectedCollapsibleButtonOpen( self.CollapsibleButton_RigidAlignment)) self.RigidAlignmentFiducialsDirectory.connect('directoryChanged(const QString &)', self.onFiducialsDirectoryChanged) self.RigidAlignmentEnabled.connect('stateChanged(int)', self.onEnableRigidAlignment) # Visualization self.CollapsibleButton_Visualization.connect('clicked()', lambda: self.onSelectedCollapsibleButtonOpen( self.CollapsibleButton_Visualization)) self.CheckableComboBox_visualization.connect('checkedIndexesChanged()', self.onCheckableComboBoxValueChanged) self.visualizationInSPV.connect('clicked(bool)', self.onSPHARMMeshesVisualizationInSPV) # Apply CLIs self.ApplyButton.connect('clicked(bool)', self.onApplyButton) slicer.mrmlScene.AddObserver(slicer.mrmlScene.EndCloseEvent, self.onCloseScene) # Widget Configuration # Table for the Flip Options self.tableWidget_ChoiceOfFlip.setColumnCount(2) self.tableWidget_ChoiceOfFlip.setHorizontalHeaderLabels([' Input Files ', ' Choice of Flip ']) self.tableWidget_ChoiceOfFlip.setColumnWidth(0, 400) horizontalHeader = self.tableWidget_ChoiceOfFlip.horizontalHeader() horizontalHeader.setStretchLastSection(False) _setSectionResizeMode(horizontalHeader, 0, qt.QHeaderView.Stretch) _setSectionResizeMode(horizontalHeader, 1, qt.QHeaderView.ResizeToContents) self.tableWidget_ChoiceOfFlip.verticalHeader().setVisible(False) # Progress Bar self.progress_layout.addWidget(self.Logic.ProgressBar) # Table for the visualization in SPV self.tableWidget_visualization.setColumnCount(2) self.tableWidget_visualization.setHorizontalHeaderLabels([' VTK Files ', ' Visualization ']) self.tableWidget_visualization.setColumnWidth(0, 400) horizontalHeader = self.tableWidget_visualization.horizontalHeader() horizontalHeader.setStretchLastSection(False) _setSectionResizeMode(horizontalHeader, 0, qt.QHeaderView.Stretch) _setSectionResizeMode(horizontalHeader, 1, qt.QHeaderView.ResizeToContents) self.tableWidget_visualization.verticalHeader().setVisible(False) # Configuration of the parameters of the widget self.Logic.parameters.setTableForChoiceOfFlip(self.tableWidget_ChoiceOfFlip) def enter(self): if not hasattr(slicer.modules, 'shapepopulationviewer') and not hasattr(slicer.modules, 'launcher'): messageBox = ctk.ctkMessageBox() messageBox.setWindowTitle(' /!\ WARNING /!\ ') messageBox.setIcon(messageBox.Warning) messageBox.setText("Shape Population Viewer is not installed!") messageBox.setInformativeText("To install Shape Population Viewer in order to display the SPHARM meshes outputs generated by Shape Analysis Module, you can:\n" "Solution 1: \n" " - Install it via the Extensions Managers\n" " - Restart 3DSlicer\n" "Solution 2: \n" " - Download it on https://www.nitrc.org/projects/shapepopviewer/\n" " - Add the folder where you stored it in Edit/Application Settings/Modules/Add\n" " - Restart 3DSlicer") messageBox.setStandardButtons(messageBox.Ok) messageBox.exec_() else: self.CollapsibleButton_Visualization.enabled = True def onCloseScene(self, obj, event): # Group Project IO self.CollapsibleButton_GroupProjectIO.setChecked(True) self.Logic.InputCases = [] self.GroupProjectInputDirectory.directory = slicer.app.slicerHome self.GroupProjectOutputDirectory.directory = slicer.app.slicerHome self.Debug.setChecked(False) # Post Processed Segmentation self.CollapsibleButton_SegPostProcess.setChecked(False) self.OverwriteSegPostProcess.setChecked(False) self.RescaleSegPostProcess.setChecked(True) self.sx.setValue(0.5) self.sy.setValue(0.5) self.sz.setValue(0.5) self.LabelState.setChecked(False) self.ValueLabelNumber.setValue(0) # Generate Mesh Parameters self.CollapsibleButton_GenParaMesh.setChecked(False) self.OverwriteGenParaMesh.setChecked(False) self.NumberofIterations.setValue(1000) # Parameters to SPHARM Mesh self.CollapsibleButton_ParaToSPHARMMesh.setChecked(False) self.OverwriteParaToSPHARMMesh.setChecked(False) self.SubdivLevelValue.setValue(10) self.SPHARMDegreeValue.setValue(15) self.thetaIterationValue.setValue(100) self.phiIterationValue.setValue(100) self.medialMesh.setChecked(False) # Advanced Post Processed Segmentation self.CollapsibleButton_AdvancedPostProcessedSegmentation.setChecked(False) self.GaussianFiltering.setChecked(False) self.VarianceX.setValue(10) self.VarianceY.setValue(10) self.VarianceZ.setValue(10) # Advanced Parameters to SPHARM Mesh self.CollapsibleButton_AdvancedParametersToSPHARMMesh.setChecked(False) self.useRegTemplate.setChecked(False) self.regTemplate.setCurrentPath(" ") self.useFlipTemplate.setChecked(False) self.flipTemplate.setCurrentPath(" ") self.choiceOfFlip.setCurrentIndex(0) self.choiceOfFlip.enabled = True self.sameFlipForAll.setChecked(True) self.tableWidget_ChoiceOfFlip.enabled = False self.tableWidget_ChoiceOfFlip.clear() self.tableWidget_ChoiceOfFlip.setColumnCount(2) self.tableWidget_ChoiceOfFlip.setHorizontalHeaderLabels([' Input Files ', ' Choice of Flip ']) self.tableWidget_ChoiceOfFlip.setColumnWidth(0, 400) horizontalHeader = self.tableWidget_ChoiceOfFlip.horizontalHeader() horizontalHeader.setStretchLastSection(False) _setSectionResizeMode(horizontalHeader, 0, qt.QHeaderView.Stretch) _setSectionResizeMode(horizontalHeader, 1, qt.QHeaderView.ResizeToContents) self.tableWidget_ChoiceOfFlip.verticalHeader().setVisible(False) # Visualization self.CollapsibleButton_Visualization.setChecked(False) self.CheckableComboBox_visualization.model().clear() self.tableWidget_visualization.clear() self.tableWidget_visualization.setColumnCount(2) self.tableWidget_visualization.setHorizontalHeaderLabels([' VTK Files ', ' Visualization ']) self.tableWidget_visualization.setColumnWidth(0, 400) horizontalHeader = self.tableWidget_visualization.horizontalHeader() horizontalHeader.setStretchLastSection(False) _setSectionResizeMode(horizontalHeader, 0, qt.QHeaderView.Stretch) _setSectionResizeMode(horizontalHeader, 1, qt.QHeaderView.ResizeToContents) self.tableWidget_visualization.verticalHeader().setVisible(False) # Apply if self.ApplyButton.text == "Cancel": self.ApplyButton.click() self.Logic.ProgressBar.hide() if self.progressbars_layout: self.CLIProgressBars.hide() # Functions to recover the widget in the .ui file def getWidget(self, objectName): return self.findWidget(self.widget, objectName) def findWidget(self, widget, objectName): if widget.objectName == objectName: return widget else: for w in widget.children(): resulting_widget = self.findWidget(w, objectName) if resulting_widget: return resulting_widget return None # Only one tab can be displayed at the same time: # When one tab is opened all the other tabs are closed def onSelectedCollapsibleButtonOpen(self, selectedCollapsibleButton): if selectedCollapsibleButton.isChecked(): collapsibleButtonList = [self.CollapsibleButton_GroupProjectIO, self.CollapsibleButton_SegPostProcess, self.CollapsibleButton_GenParaMesh, self.CollapsibleButton_ParaToSPHARMMesh, self.CollapsibleButton_AdvancedPostProcessedSegmentation, self.CollapsibleButton_AdvancedParametersToSPHARMMesh, self.CollapsibleButton_Visualization, self.CollapsibleButton_RigidAlignment] for collapsibleButton in collapsibleButtonList: collapsibleButton.setChecked(False) selectedCollapsibleButton.setChecked(True) # # Group Project IO # def onInputDirectoryChanged(self): inputDirectory = self.GroupProjectInputDirectory.directory # Update of the input directory path self.Logic.parameters.setInputDirectory(inputDirectory) # Possible extensions exts = [".gipl", ".gipl.gz", ".mgh", ".mgh,gz", ".nii", ".nii.gz",".nrrd", ".vtk", ".vtp", ".hdr", ".mhd"] # Search cases and add the filename to a list self.Logic.InputCases = [] for file in os.listdir(inputDirectory): for ext in exts: if file.endswith(ext): self.Logic.InputCases.append(file) if file.endswith(".nii") or file.endswith(".nii.gz"): self.RescaleSegPostProcess.setCheckState(qt.Qt.Unchecked) self.label_RescaleSegPostProcess.enabled = False self.RescaleSegPostProcess.enabled = False # Update of the output directory path def onOutputDirectoryChanged(self): outputDirectory = self.GroupProjectOutputDirectory.directory self.Logic.parameters.setOutputDirectory(outputDirectory) # Update of the debug parameter def onDebug(self): self.Logic.parameters.setDebug(self.Debug.checkState()) # # Post Processed Segmentation # def onOverwriteFilesSegPostProcess(self): # Update of the overwrite boolean for the Post Processed Segmentation step self.Logic.parameters.setOverwriteSegPostProcess(self.OverwriteSegPostProcess.checkState()) if self.OverwriteSegPostProcess.checkState(): # Message for the user messageBox = ctk.ctkMessageBox() messageBox.setWindowTitle(' /!\ WARNING /!\ ') messageBox.setIcon(messageBox.Warning) messageBox.setText("<p align='center'>Applying the overwrite option to Post Processed Segmentation step will also apply to the next steps</p>") messageBox.setStandardButtons(messageBox.Ok) messageBox.exec_() # Check the overwrite option for the next steps self.OverwriteGenParaMesh.setCheckState(qt.Qt.Checked) self.Logic.parameters.setOverwriteGenParaMesh(self.OverwriteGenParaMesh.checkState()) self.OverwriteParaToSPHARMMesh.setCheckState(qt.Qt.Checked) self.Logic.parameters.setOverwriteParaToSPHARMMesh(self.OverwriteParaToSPHARMMesh.checkState()) def onSelectSpacing(self): # Update of the rescale boolean for the Post Processed Segmentation step self.Logic.parameters.setRescaleSegPostProcess(self.RescaleSegPostProcess.checkState()) # Enable/Disable the spacing x,y, and z parameters in the UI self.label_sx.enabled = self.RescaleSegPostProcess.checkState() self.label_sy.enabled = self.RescaleSegPostProcess.checkState() self.label_sz.enabled = self.RescaleSegPostProcess.checkState() self.sx.enabled = self.RescaleSegPostProcess.checkState() self.sy.enabled = self.RescaleSegPostProcess.checkState() self.sz.enabled = self.RescaleSegPostProcess.checkState() # Update of the spacing x parameter for the Post Processed Segmentation step def onSxValueChanged(self): self.Logic.parameters.setSx(self.sx.value) # Update of the spacing y parameter for the Post Processed Segmentation step def onSyValueChanged(self): self.Logic.parameters.setSy(self.sy.value) # Update of the spacing z parameter for the Post Processed Segmentation step def onSzValueChanged(self): self.Logic.parameters.setSz(self.sz.value) # Enable/Disable the label number value in the UI def onSelectValueLabelNumber(self): self.label_ValueLabelNumber.enabled = self.LabelState.checkState() self.ValueLabelNumber.enabled = self.LabelState.checkState() # Update of the label parameter for the Post Processed Segmentation step def onLabelNumberValueChanged(self): self.Logic.parameters.setLabelNumber(self.ValueLabelNumber.value) # # Generate Mesh Parameters # def onOverwriteFilesGenParaMesh(self): # If the overwrite option for GenParaMesh is unchecked if not self.OverwriteGenParaMesh.checkState(): # If the overwrite option for the previous step is checked, the overwrite option need to be checked for this step too if self.OverwriteSegPostProcess.checkState(): self.OverwriteGenParaMesh.setCheckState(qt.Qt.Checked) # Message for the user messageBox = ctk.ctkMessageBox() messageBox.setWindowTitle(' /!\ WARNING /!\ ') messageBox.setIcon(messageBox.Warning) messageBox.setText("<p align='center'>The overwrite option need to be applied to this step as it is set for the previous step</p>") messageBox.setStandardButtons(messageBox.Ok) messageBox.exec_() # If the overwrite option for GenParaMesh is checked else: # Message for the user messageBox = ctk.ctkMessageBox() messageBox.setWindowTitle(' /!\ WARNING /!\ ') messageBox.setIcon(messageBox.Warning) messageBox.setText("<p align='center'>Applying the overwrite option to Generate Mesh Parameters step will also apply to the next steps</p>") messageBox.setStandardButtons(messageBox.Ok) messageBox.exec_() # Check the overwrite option for the next step self.OverwriteParaToSPHARMMesh.setCheckState(qt.Qt.Checked) self.Logic.parameters.setOverwriteParaToSPHARMMesh(self.OverwriteParaToSPHARMMesh.checkState()) # Update of the overwrite boolean for the Generate Mesh Parameters step self.Logic.parameters.setOverwriteGenParaMesh(self.OverwriteGenParaMesh.checkState()) # Update of the iterations parameter for the Generate Mesh Parameters step def onNumberofIterationsValueChanged(self): self.Logic.parameters.setNumberofIterations(self.NumberofIterations.value) # # Parameters to SPHARM Mesh # def onOverwriteFilesParaToSPHARMMesh(self): # If the overwrite option for ParaToSPHARMMesh is unchecked if not self.OverwriteParaToSPHARMMesh.checkState(): # If the overwrite option for a previous step is checked, the overwrite option need to be checked for this step too if self.OverwriteSegPostProcess.checkState() or self.OverwriteGenParaMesh.checkState(): self.OverwriteParaToSPHARMMesh.setCheckState(qt.Qt.Checked) # Message for the user messageBox = ctk.ctkMessageBox() messageBox.setWindowTitle(' /!\ WARNING /!\ ') messageBox.setIcon(messageBox.Warning) messageBox.setText("<p align='center'>The overwrite option need to be applied to this step as it is set for the previous step</p>") messageBox.setStandardButtons(messageBox.Ok) messageBox.exec_() # Update of the overwrite boolean for the Parameters to SPHARM Mesh step self.Logic.parameters.setOverwriteParaToSPHARMMesh(self.OverwriteParaToSPHARMMesh.checkState()) # Update of the sub-division parameter for the Parameters to SPHARM Mesh step def onSubdivLevelValueChanged(self): self.Logic.parameters.setSubdivLevelValue(self.SubdivLevelValue.value) # Update of the SPHARM degree parameter for the Parameters to SPHARM Mesh step def onSPHARMDegreeValueChanged(self): self.Logic.parameters.setSPHARMDegreeValue(self.SPHARMDegreeValue.value) # Update of the theta iteration parameter for the Parameters to SPHARM Mesh step def onThetaIterationValueChanged(self): self.Logic.parameters.setThetaIterationValue(self.thetaIterationValue.value) # Update of the phi iteration parameter for the Parameters to SPHARM Mesh step def onPhiIterationValueChanged(self): self.Logic.parameters.setPhiIterationValue(self.phiIterationValue.value) # Update of the medial mesh boolean for the Parameters to SPHARM Mesh step def onMedialMeshValueChanged(self): self.Logic.parameters.setMedialMesh(self.medialMesh.checkState()) # # Advanced Post Processed Segmentation # def onSelectGaussianVariance(self): # Update of the gaussian variance boolean for the Post Processed Segmentation step self.Logic.parameters.setGaussianFiltering(self.GaussianFiltering.checkState()) # Enable/Disable the gaussian variance parameters in the UI self.label_VarianceX.enabled = self.GaussianFiltering.checkState() self.VarianceX.enabled = self.GaussianFiltering.checkState() self.label_VarianceY.enabled = self.GaussianFiltering.checkState() self.VarianceY.enabled = self.GaussianFiltering.checkState() self.label_VarianceZ.enabled = self.GaussianFiltering.checkState() self.VarianceZ.enabled = self.GaussianFiltering.checkState() # Update of the variance x parameter for the Post Processed Segmentation step def onVarianceXValueChanged(self): self.Logic.parameters.setVarianceX(self.VarianceX.value) # Update of the variance y parameter for the Post Processed Segmentation step def onVarianceYValueChanged(self): self.Logic.parameters.setVarianceY(self.VarianceY.value) # Update of the variance z parameter for the Post Processed Segmentation step def onVarianceZValueChanged(self): self.Logic.parameters.setVarianceZ(self.VarianceZ.value) # # Advanced Parameters to SPHARM Mesh # def onEnableRegTemplate(self): # Update of the registration template boolean for the Parameters to SPHARM Mesh step self.Logic.parameters.setUseRegTemplate(self.useRegTemplate.checkState()) # Enable/Disable the registration template path in the UI self.label_regTemplate.enabled = self.useRegTemplate.checkState() self.regTemplate.enabled = self.useRegTemplate.checkState() # Update of the registration template path for the Parameters to SPHARM Mesh step def onRegTemplateValueChanged(self): self.Logic.parameters.setRegTemplate(self.regTemplate.currentPath) def onEnableFlipTemplate(self): # Update of the flip template boolean for the Parameters to SPHARM Mesh step self.Logic.parameters.setUseFlipTemplate(self.useFlipTemplate.checkState()) # Enable/Disable the flip template path in the UI self.label_flipTemplate.enabled = self.useFlipTemplate.checkState() self.flipTemplate.enabled = self.useFlipTemplate.checkState() # Update of the flip template path for the Parameters to SPHARM Mesh step def onFlipTemplateValueChanged(self): self.Logic.parameters.setFlipTemplate(self.flipTemplate.currentPath) # Update of the flip parameter for the Parameters to SPHARM Mesh step def onChoiceOfFlipValueChanged(self): self.Logic.parameters.setChoiceOfFlip(self.choiceOfFlip.currentIndex) def onEnableFlipChoices(self): # Update of the flip option boolean for the Parameters to SPHARM Mesh step self.Logic.parameters.setSameFlipForAll(self.sameFlipForAll.checkState()) self.choiceOfFlip.enabled = self.sameFlipForAll.checkState() self.tableWidget_ChoiceOfFlip.enabled = not self.sameFlipForAll.checkState() if not self.sameFlipForAll.checkState(): self.fillTableForFlipOptions() # # Correspondence Improvement # def onFiducialsDirectoryChanged(self, directory): self.Logic.parameters.setFiducialsDirectory(directory) def onEnableRigidAlignment(self, enabled): self.Logic.parameters.setRigidAlignmentEnabled(enabled) # # Apply CLIs # def onApplyButton(self): # Run workflow if not self.Logic.Node.IsBusy(): # Check the registration template file if self.useRegTemplate.checkState(): if not os.path.exists(self.regTemplate.currentPath) or not self.regTemplate.currentPath.endswith(".vtk"): slicer.util.errorDisplay("Invalid registration template file in Advanced Parameters to SPHARM Mesh Tab") return # Check the flip template file if self.useFlipTemplate.checkState(): if not os.path.exists(self.flipTemplate.currentPath) or not self.flipTemplate.currentPath.endswith(".coef"): slicer.util.errorDisplay("Invalid flip template file in Advanced Parameters to SPHARM Mesh Tab") return # Empty the output folders if the overwrite options are checked self.Logic.cleanOutputFolders() # Change the apply buttons logging.info('Widget: Running ShapeAnalysisModule') self.ApplyButton.setText("Cancel") self.Logic.addObserver(self.Logic.Node, slicer.vtkMRMLCommandLineModuleNode().StatusModifiedEvent, self.onLogicModified) self.Logic.Node.SetStatus(self.Logic.Node.Scheduled) self.Logic.allCaseStartTime = time.time() self.Logic.ShapeAnalysisCases() # Cancel Workflow else: logging.info("Widget: Cancelling ShapeAnalysisModule") self.ApplyButton.setEnabled(False) self.Logic.Cancel() def onLogicModified(self, logic_node, event): status = logic_node.GetStatusString() logging.info('-- %s : ShapeAnalysisModule', status) # if not busy (completed, error, cancelled) if not logic_node.IsBusy(): self.Logic.removeObserver(logic_node, slicer.vtkMRMLCommandLineModuleNode().StatusModifiedEvent, self.onLogicModified) # Create Error Message if status == 'Completed with errors' or status == 'Cancelled': logging.error(self.Logic.ErrorMessage) qt.QMessageBox.critical(slicer.util.mainWindow(), 'ShapeAnalysisModule', self.Logic.ErrorMessage) elif status == 'Completed': self.Logic.improveCorrespondence() self.configurationVisualization() # Empty lists self.Logic.pipeline = {} self.Logic.completed = {} # Change the apply buttons self.ApplyButton.setEnabled(True) self.ApplyButton.setText("Run ShapeAnalysisModule") # if running, create some progress bars for each cases elif status == 'Running': self.Logic.ProgressBar.show() if self.progressbars_layout: self.CLIProgressBars.hide() self.CLIProgressBars = ctk.ctkCollapsibleGroupBox() self.CLIProgressBars.setTitle('Detail') self.progress_layout.addWidget(self.CLIProgressBars) self.progressbars_layout = qt.QVBoxLayout(self.CLIProgressBars) for i in range(len(self.Logic.pipeline)): self.progressbars_layout.addWidget(self.Logic.pipeline[i].ProgressBar) # Function to update the checkable comboBox and the table's checkBoxes in the visualization tab according of the check of one checkBox in the checkable comboBox def onCheckableComboBoxValueChanged(self): currentText = self.CheckableComboBox_visualization.currentText currentIndex = self.CheckableComboBox_visualization.currentIndex currentItem = self.CheckableComboBox_visualization.model().item(currentIndex, 0) # ******* Update the CheckableComboBox ******* # # Check/Uncheck the "Case i: case_name [..]" checkboxes in the checkacle comboBox if currentText == "All Models": self.checkedItems("SPHARM", currentItem.checkState()) elif currentText == "All SPHARM Models": self.checkedItems("SPHARM Models", currentItem.checkState()) elif currentText == "All SPHARM Ellipse Aligned Models": self.checkedItems("SPHARM Ellipse Aligned Models", currentItem.checkState()) elif currentText == "All SPHARM Medial Meshes": self.checkedItems("SPHARM Medial Meshes", currentItem.checkState()) elif currentText == "All SPHARM Procrustes Aligned Models": self.checkedItems("SPHARM Procrustes Aligned Models", currentItem.checkState()) # Check/Uncheck the "All [..]" checkboxes in the checkacle comboBox self.checkedAllItems() self.CheckableComboBox_visualization.blockSignals(False) # ******* Update the checkboxes in the table ******* # for row in range(0, self.tableWidget_visualization.rowCount): actionOnCheckBox = False label = self.tableWidget_visualization.cellWidget(row, 0) outputRootname = label.text if currentText == "All Models": actionOnCheckBox = True elif currentText == "All SPHARM Models": if not outputRootname.find("SPHARM") == -1 and outputRootname.find("SPHARM_ellalign") == -1 and outputRootname.find("SPHARMMedialMesh") == -1 and outputRootname.find("SPHARM_procalign") == -1: actionOnCheckBox = True elif currentText == "All SPHARM Ellipse Aligned Models": if not outputRootname.find("SPHARM_ellalign") == -1: actionOnCheckBox = True elif currentText == "All SPHARM Medial Meshes": if not outputRootname.find("SPHARMMedialMesh") == -1: actionOnCheckBox = True elif currentText == "All SPHARM Procrustes Aligned Models": if not outputRootname.find("SPHARM_procalign") == -1: actionOnCheckBox = True else: for inputFilename in self.Logic.InputCases: inputRootname = inputFilename.split('/')[-1].split('.')[0] if not currentText.find(inputRootname) == -1: if not currentText.find("SPHARM Models") == -1: if not outputRootname.find(inputRootname) == -1 and not outputRootname.find("SPHARM") == -1 and outputRootname.find("SPHARM_ellalign") == -1 and outputRootname.find("SPHARMMedialMesh") == -1 and outputRootname.find("SPHARM_procalign") == -1: actionOnCheckBox = True elif not currentText.find("SPHARM Ellipse Aligned Models") == -1: if not outputRootname.find(inputRootname) == -1 and not outputRootname.find("SPHARM_ellalign") == -1: actionOnCheckBox = True elif not currentText.find("SPHARM Medial Meshes") == -1: if not outputRootname.find(inputRootname) == -1 and not outputRootname.find("SPHARMMedialMesh") == -1: actionOnCheckBox = True elif not currentText.find("SPHARM Procrustes Aligned Models") == -1: if not outputRootname.find(inputRootname) == -1 and not outputRootname.find("SPHARM_procalign") == -1: actionOnCheckBox = True # check/uncheck the checkBox at (row,1) if actionOnCheckBox: widget = self.tableWidget_visualization.cellWidget(row, 1) tuple = widget.children() checkBox = tuple[1] checkBox.blockSignals(True) item = self.CheckableComboBox_visualization.model().item(currentIndex, 0) if item.checkState(): checkBox.setChecked(True) else: checkBox.setChecked(False) checkBox.blockSignals(False) # Function to update the checkboxes in the checkbable comboBox in the visualization tab according of the check of a checBox in the visualization tab def onCheckBoxTableValueChanged(self): self.CheckableComboBox_visualization.blockSignals(True) list = self.CheckableComboBox_visualization.model() table = self.tableWidget_visualization allSPHARMMesdialMeshesIndex = self.CheckableComboBox_visualization.findText("All SPHARM Medial Meshes") # If == -1 "All SPHARM Medial Meshes" checkBox doesn't exist allSPHARMProcrustesAlignedModelsIndex = self.CheckableComboBox_visualization.findText("All SPHARM Procrustes Aligned Models") # If == -1 "All SPHARM Procrustes Aligned Models" checkBox doesn't exist for i in range(len(self.Logic.InputCases)): allCaseSPHARMModelsChecked = True allCaseSPHARMEllalignModelsChecked = True allCaseSPHARMMedialMeshesChecked = True allCaseSPHARMProcrustesAlignedModelsChecked = True inputRootname = self.Logic.InputCases[i].split('/')[-1].split('.')[0] for row in range(0,table.rowCount): label = table.cellWidget(row, 0) outputRootname = label.text if not outputRootname.find(inputRootname) == -1: widget = table.cellWidget(row, 1) tuple = widget.children() checkBox = tuple[1] if not checkBox.checkState(): if not outputRootname.find("SPHARM") == -1 and outputRootname.find("SPHARM_ellalign") == -1 and outputRootname.find("SPHARMMedialMesh") == -1 and outputRootname.find("SPHARM_procalign") == -1: allCaseSPHARMModelsChecked = False if not outputRootname.find("SPHARM_ellalign") == -1: allCaseSPHARMEllalignModelsChecked = False if not allSPHARMMesdialMeshesIndex == -1: if not outputRootname.find("SPHARMMedialMesh") == -1: allCaseSPHARMMedialMeshesChecked = False if not allSPHARMProcrustesAlignedModelsIndex == -1: if not outputRootname.find("SPHARM_procalign") == -1: allCaseSPHARMProcrustesAlignedModelsChecked = False # Check/uncheck checbox case according of the checkbox in the table text = "Case " + str(i) + ": " + inputRootname + " - SPHARM Models" self.checkedCaseItem(text, allCaseSPHARMModelsChecked) text = "Case " + str(i) + ": " + inputRootname + " - SPHARM Ellipse Aligned Models" self.checkedCaseItem(text, allCaseSPHARMEllalignModelsChecked) if not allSPHARMMesdialMeshesIndex == -1: text = "Case " + str(i) + ": " + inputRootname + " - SPHARM Medial Meshes" self.checkedCaseItem(text, allCaseSPHARMMedialMeshesChecked) if not allSPHARMProcrustesAlignedModelsIndex == -1: text = "Case " + str(i) + ": " + inputRootname + " - SPHARM Procrustes Aligned Models" self.checkedCaseItem(text, allCaseSPHARMProcrustesAlignedModelsChecked) # Check/Uncheck the "All [..]" checkboxes in the checkacle comboBox self.checkedAllItems() self.CheckableComboBox_visualization.blockSignals(False) # Visualization of the SPHARM Mesh outputs in Shape Population Viewer def onSPHARMMeshesVisualizationInSPV(self): # Creation of a CSV file to load the vtk files in ShapePopulationViewer filePathCSV = slicer.app.temporaryPath + '/' + 'PreviewForVisualizationInSPV.csv' self.Logic.creationCSVFileForSPV(self.tableWidget_visualization, filePathCSV) if isinstance(slicer.modules.gaussianblurimagefilter, slicer.qSlicerCLIModule): # Creation of the parameters of SPV parameters = {} parameters["CSVFile"] = filePathCSV # If a binary of SPV has been installed if hasattr(slicer.modules, 'shapepopulationviewer'): SPV = slicer.modules.shapepopulationviewer # If SPV has been installed via the Extension Manager elif hasattr(slicer.modules, 'launcher'): SPV = slicer.modules.launcher # Launch SPV slicer.cli.run(SPV, None, parameters, wait_for_completion=True) else: # Load CSV and select modules slicer.modules.shapepopulationviewer.widgetRepresentation().loadCSVFile(filePathCSV) slicer.util.selectModule(slicer.modules.shapepopulationviewer) # Deletion of the CSV files in the Slicer temporary directory if os.path.exists(filePathCSV): os.remove(filePathCSV) # Function to fill the flip options table for all the SPHARM mesh outputs # - Column 0: filename of the input files # - Column 1: comboBox with the flip corresponding to the output file def fillTableForFlipOptions(self): table = self.tableWidget_ChoiceOfFlip row = 0 for basename in self.Logic.InputCases: table.setRowCount(row + 1) # Column 0: rootname = basename.split('/')[-1].split('.')[0] labelVTKFile = qt.QLabel(rootname) labelVTKFile.setAlignment(0x84) table.setCellWidget(row, 0, labelVTKFile) # Column 1: widget = qt.QWidget() layout = qt.QHBoxLayout(widget) comboBox = qt.QComboBox() comboBox.addItems(['No Flip', 'Flip Along Axis of x and y', 'Flip Along Axis of y and z', 'Flip Along Axis of x and z', 'Flip Along Axis of x', 'Flip Along Axis of y', 'Flip Along Axis of x, y and z', 'Flip Along Axis of z', 'All']) comboBox.setCurrentIndex(self.choiceOfFlip.currentIndex) layout.addWidget(comboBox) layout.setAlignment(0x84) layout.setContentsMargins(0, 0, 0, 0) widget.setLayout(layout) table.setCellWidget(row, 1, widget) row = row + 1 # Function to configure the checkable comboBox and the table of the visualization tab def configurationVisualization(self): # Configuration of the checkable comboBox checkableComboBox = self.CheckableComboBox_visualization # clean the checkable comboBox list = checkableComboBox.model() list.clear() # add items according of the SPHARM Mesh computed by ParaToSPHARMMesh checkableComboBox.blockSignals(True) checkableComboBox.addItem("All Models") checkableComboBox.addItem("All SPHARM Models") checkableComboBox.addItem("All SPHARM Ellipse Aligned Models") if self.medialMesh.checkState(): checkableComboBox.addItem("All SPHARM Medial Meshes") if self.useRegTemplate.checkState(): checkableComboBox.addItem("All SPHARM Procrustes Aligned Models") # Fill the checkable comboBox for i in range(len(self.Logic.InputCases)): checkableComboBox.addItem("Case " + str(i) + ": " + self.Logic.InputCases[i].split('/')[-1].split('.')[0] + " - SPHARM Models") checkableComboBox.addItem("Case " + str(i) + ": " + self.Logic.InputCases[i].split('/')[-1].split('.')[0] + " - SPHARM Ellipse Aligned Models") if self.medialMesh.checkState(): checkableComboBox.addItem("Case " + str(i) + ": " + self.Logic.InputCases[i].split('/')[-1].split('.')[0] + " - SPHARM Medial Meshes") if self.useRegTemplate.checkState(): checkableComboBox.addItem("Case " + str(i) + ": " + self.Logic.InputCases[i].split('/')[-1].split('.')[0] + " - SPHARM Procrustes Aligned Models") checkableComboBox.blockSignals(False) # Configuration of the table # column 0: filename of the SPHARM Meshes generated by ParaToSPHARMMesh # column 1: checkbox that allows to the user to select what output he wants to display in Shape Population Viewer table = self.tableWidget_visualization outputDirectory = self.GroupProjectOutputDirectory.directory SPHARMMeshOutputDirectory = outputDirectory + "/Step3_ParaToSPHARMMesh/" row = 0 for filename in os.listdir(SPHARMMeshOutputDirectory): if filename.endswith(".vtk") and not filename.endswith("_para.vtk") and not filename.endswith("SPHARMMedialAxis.vtk"): table.setRowCount(row + 1) # Column 0: labelVTKFile = qt.QLabel(os.path.splitext(filename)[0]) labelVTKFile.setAlignment(0x84) table.setCellWidget(row, 0, labelVTKFile) # Column 1: widget = qt.QWidget() layout = qt.QHBoxLayout(widget) checkBox = qt.QCheckBox() layout.addWidget(checkBox) layout.setAlignment(0x84) layout.setContentsMargins(0, 0, 0, 0) widget.setLayout(layout) table.setCellWidget(row, 1, widget) checkBox.connect('stateChanged(int)', self.onCheckBoxTableValueChanged) row = row + 1 # Functions to update the checkable comboBox in the visualization tab # Check/Uncheck checkBoxes with the label 'text' def checkedItems(self, text, checkState): list = self.CheckableComboBox_visualization.model() for i in range(1, list.rowCount()): item = list.item(i, 0) if not item.text().find(text) == -1: item.setCheckState(checkState) # Check/Uncheck "All [..]" checkBoxes in the checkable comboBox def checkedAllItems(self): list = self.CheckableComboBox_visualization.model() allIndex = self.CheckableComboBox_visualization.findText("All Models") allItem = list.item(allIndex, 0) allSPHARMIndex = self.CheckableComboBox_visualization.findText("All SPHARM Models") allSPHARMItem = list.item(allSPHARMIndex, 0) allSPHARMEllalignIndex = self.CheckableComboBox_visualization.findText("All SPHARM Ellipse Aligned Models") allSPHARMEllalignItem = list.item(allSPHARMEllalignIndex, 0) allSPHARMMesdialMeshesIndex = self.CheckableComboBox_visualization.findText("All SPHARM Medial Meshes") if not allSPHARMMesdialMeshesIndex == -1: allSPHARMMesdialMeshesItem = list.item(allSPHARMMesdialMeshesIndex, 0) allSPHARMProcrustesAlignedModelsIndex = self.CheckableComboBox_visualization.findText("All SPHARM Procrustes Aligned Models") if not allSPHARMProcrustesAlignedModelsIndex == -1: allSPHARMProcrustesAlignedModelsItem = list.item(allSPHARMProcrustesAlignedModelsIndex, 0) # Check/Uncheck "All SPHARM Models" checkBox self.checkedAllItem("- SPHARM Models", allSPHARMItem) # Check/Uncheck "All SPHARM Ellipse Aligned Models" checkBox self.checkedAllItem("- SPHARM Ellipse Aligned Models", allSPHARMEllalignItem) # Check/Uncheck "All SPHARM Medial Mesh" checkBox if not allSPHARMMesdialMeshesIndex == -1: self.checkedAllItem("- SPHARM Medial Meshes", allSPHARMMesdialMeshesItem) # Check/Uncheck "All SPHARM Procrustes Aligned Models" checkBox if not allSPHARMProcrustesAlignedModelsIndex == -1: self.checkedAllItem("- SPHARM Procrustes Aligned Models", allSPHARMProcrustesAlignedModelsItem) # Check/Uncheck "All Models" checkBox if allSPHARMEllalignItem.checkState() and allSPHARMItem.checkState(): if allSPHARMMesdialMeshesIndex == -1 and allSPHARMProcrustesAlignedModelsIndex == -1: allItem.setCheckState(qt.Qt.Checked) return elif not allSPHARMMesdialMeshesIndex == -1 and not allSPHARMProcrustesAlignedModelsIndex == -1: if allSPHARMMesdialMeshesItem.checkState() and allSPHARMProcrustesAlignedModelsItem.checkState(): allItem.setCheckState(qt.Qt.Checked) return elif not allSPHARMMesdialMeshesIndex == -1 and allSPHARMProcrustesAlignedModelsIndex == -1: if allSPHARMMesdialMeshesItem.checkState(): allItem.setCheckState(qt.Qt.Checked) return elif allSPHARMMesdialMeshesIndex == -1 and not allSPHARMProcrustesAlignedModelsIndex == -1: if allSPHARMProcrustesAlignedModelsItem.checkState(): allItem.setCheckState(qt.Qt.Checked) return allItem.setCheckState(qt.Qt.Unchecked) # Check/Uncheck "Case i: case_name - SPHARM [..]" checkBox in the checkable comboBox def checkedCaseItem(self, text, doCheck): list = self.CheckableComboBox_visualization.model() item = list.findItems(text)[0] if doCheck: item.setCheckState(qt.Qt.Checked) else: item.setCheckState(qt.Qt.Unchecked) # Check/Uncheck "All [..]" (except "All Models") checkBox in the checkable comboBox def checkedAllItem(self, text, item): if self.areAllCasesChecked(text): item.setCheckState(qt.Qt.Checked) else: item.setCheckState(qt.Qt.Unchecked) # Specify if all the "Case i: case_name - SPHARM [..]" checkBoxes of one type of Model are checked def areAllCasesChecked(self, text): list = self.CheckableComboBox_visualization.model() isChecked = True for i in range(3, list.rowCount()): item = list.item(i, 0) if not item.text().find(text) == -1: if not item.checkState(): isChecked = False return isChecked def clearFlipOptionsTable(self): table = self.tableWidget_ChoiceOfFlip table.clear() table.setColumnCount(2) table.setHorizontalHeaderLabels([' Files ', ' Choice of Flip ']) table.setColumnWidth(0, 400) horizontalHeader = table.horizontalHeader() horizontalHeader.setStretchLastSection(False) _setSectionResizeMode(horizontalHeader, 0, qt.QHeaderView.Stretch) _setSectionResizeMode(horizontalHeader, 1, qt.QHeaderView.ResizeToContents) table.verticalHeader().setVisible(False) # # ShapeAnalysisModuleParameters # class ShapeAnalysisModuleParameters(object): def __init__(self): # self.waitForCompletion = False # Group Project IO self.inputDirectory = " " self.outputDirectory = " " self.debug = False # Post Processed Segmentation self.OverwriteSegPostProcess = False self.RescaleSegPostProcess = True self.sx = 0.5 self.sy = 0.5 self.sz = 0.5 self.labelNumber = 0 # Generate Mesh Parameters self.OverwriteGenParaMesh = False self.NumberofIterations = 1000 # Parameters to SPHARM Mesh self.OverwriteParaToSPHARMMesh = False self.SubdivLevelValue = 10 self.SPHARMDegreeValue = 15 self.thetaIterationValue = 100 self.phiIterationValue = 100 self.medialMesh = False self.tableWidget_ChoiceOfFlip = None # Advanced Post Processed Segmentation self.GaussianFiltering = False self.VarianceX = 10 self.VarianceY = 10 self.VarianceZ = 10 # Advanced Parameters to SPHARM Mesh self.useRegTemplate = False self.regTemplate = " " self.useFlipTemplate = False self.flipTemplate = " " self.choiceOfFlip = 0 self.sameFlipForAll = True # RigidAlignment Parameters self.rigidAlignmentEnabled = False self.fiducialsDirectory = " " def setWaitForCompletion(self, bool): self.waitForCompletion = bool def setInputDirectory(self, path): self.inputDirectory = path def setOutputDirectory(self, path): self.outputDirectory = path def setDebug(self, bool): self.debug = bool def setOverwriteSegPostProcess(self, bool): self.OverwriteSegPostProcess = bool def setRescaleSegPostProcess(self, bool): self.RescaleSegPostProcess = bool def setSx(self, value): self.sx = value def setSy(self, value): self.sy = value def setSz(self, value): self.sz = value def setLabelNumber(self, value): self.labelNumber = value def setOverwriteGenParaMesh(self, bool): self.OverwriteGenParaMesh = bool def setNumberofIterations(self, value): self.NumberofIterations = value def setOverwriteParaToSPHARMMesh(self, bool): self.OverwriteParaToSPHARMMesh = bool def setSubdivLevelValue(self, value): self.SubdivLevelValue = value def setSPHARMDegreeValue(self, value): self.SPHARMDegreeValue = value def setThetaIterationValue(self, value): self.thetaIterationValue = value def setPhiIterationValue(self, value): self.phiIterationValue = value def setMedialMesh(self, bool): self.medialMesh = bool def setTableForChoiceOfFlip(self, table): self.tableWidget_ChoiceOfFlip = table def setGaussianFiltering(self, bool): self.GaussianFiltering = bool def setVarianceX(self, value): self.VarianceX = value def setVarianceY(self, value): self.VarianceY = value def setVarianceZ(self, value): self.VarianceZ = value def setUseRegTemplate(self, bool): self.useRegTemplate = bool def setRegTemplate(self, path): self.regTemplate = path def setUseFlipTemplate(self, bool): self.useFlipTemplate = bool def setFlipTemplate(self, path): self.flipTemplate = path def setChoiceOfFlip(self, value): self.choiceOfFlip = value def setSameFlipForAll(self, bool): self.sameFlipForAll = bool def setFiducialsDirectory(self, directory): self.fiducialsDirectory = directory def setRigidAlignmentEnabled(self, enabled): self.rigidAlignmentEnabled = enabled # # ShapeAnalysisModuleLogic # class ShapeAnalysisModuleLogic(LogicMixin): """ Uses ScriptedLoadableModuleLogic base class, available at: https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py """ def __init__(self): LogicMixin.__init__(self, "ShapeAnalysisModule") self.parameters = ShapeAnalysisModuleParameters() def ShapeAnalysisCases(self): # No cases if not len(self.InputCases) > 0: inputDirectory = self.parameters.inputDirectory self.ErrorMessage = "No cases found in " + inputDirectory self.Node.SetStatus(self.Node.CompletedWithErrors) return -1 # Create pipelines else: logging.info('%d case(s) found', len(self.InputCases)) # Init for i in range(len(self.InputCases)): self.completed[i] = False self.pipeline[i] = ShapeAnalysisModulePipeline(i, self.InputCases[i], self.parameters) self.addObserver(self.pipeline[i].Node, slicer.vtkMRMLCommandLineModuleNode().StatusModifiedEvent, self.onPipelineModified) # Logic ready self.Node.SetStatus(self.Node.Running) # Launch Workflow self.startPipeline(0) return 0 # Empty the output folders if the overwrite option is checked def cleanOutputFolders(self): outputDirectory = self.parameters.outputDirectory if self.parameters.OverwriteSegPostProcess: PostProcessOutputDirectory = outputDirectory + "/Step1_SegPostProcess" if os.path.exists(PostProcessOutputDirectory): for filename in os.listdir(PostProcessOutputDirectory): os.remove(os.path.join(PostProcessOutputDirectory, filename)) if self.parameters.OverwriteGenParaMesh: GenParaMeshOutputDirectory = outputDirectory + "/Step2_GenParaMesh" if os.path.exists(GenParaMeshOutputDirectory): for filename in os.listdir(GenParaMeshOutputDirectory): os.remove(os.path.join(GenParaMeshOutputDirectory, filename)) if self.parameters.OverwriteParaToSPHARMMesh: SPHARMMeshOutputDirectory = outputDirectory + "/Step3_ParaToSPHARMMesh" if os.path.exists(SPHARMMeshOutputDirectory): for filename in os.listdir(SPHARMMeshOutputDirectory): os.remove(os.path.join(SPHARMMeshOutputDirectory, filename)) # Function to create a CSV file containing all the SPHARM mesh output files # that the user wants to display in ShapePopultaionViewer def creationCSVFileForSPV(self, table, filepathCSV): # Creation of a CSV file with a header 'VTK Files' file = open(filepathCSV, 'w') cw = csv.writer(file, delimiter=',') cw.writerow(['VTK Files']) # Add the filepath of the vtk file checked in the table outputDirectory = self.parameters.outputDirectory SPHARMMeshOutputDirectory = outputDirectory + "/Step3_ParaToSPHARMMesh/" # Add the path of the vtk files if the users selected it for row in range(0, table.rowCount): # check the checkBox widget = table.cellWidget(row, 1) tuple = widget.children() checkBox = tuple[1] if checkBox.isChecked(): # Recovery of the vtk filename qlabel = table.cellWidget(row, 0) vtkRootname = qlabel.text VTKfilepath = SPHARMMeshOutputDirectory + vtkRootname + ".vtk" if os.path.exists(VTKfilepath): cw.writerow([VTKfilepath]) file.close() # Function to conditionally invoke RigidAlignment module to improve correspondence def improveCorrespondence(self): if self.parameters.rigidAlignmentEnabled: logging.info("Invoking RigidAlignment...") fidsDir = Path(self.parameters.fiducialsDirectory) outDir = Path(self.parameters.outputDirectory) inDir = outDir / 'Step3_ParaToSPHARMMesh' outModelsDir = outDir / 'Step4_Improvement' / 'models' outSphereDir = outDir / 'Step4_Improvement' / 'sphere' os.makedirs(outModelsDir, exist_ok=True) os.makedirs(outSphereDir, exist_ok=True) models = inDir.glob('*_pp_surf_SPHARM.vtk') fiducials = fidsDir.glob('*_fid.fcsv') unitSphere = next(inDir.glob('*_surf_para.vtk')) logic = RigidAlignmentModuleLogic() logic.run( models=models, fiducials=fiducials, unitSphere=unitSphere, outModelsDir=outModelsDir, outSphereDir=outSphereDir, ) else: logging.info("RigidAlignment not enabled; Skipping.") # # ShapeAnalysisModulePipeline # class ShapeAnalysisModulePipeline(PipelineMixin): def __init__(self, pipelineID, CaseInput, interface): PipelineMixin.__init__(self, pipelineID, CaseInput, interface) self.interface = interface def setupSkipCLIs(self): self.skip_meshToLabelMap = False self.skip_segPostProcess = False self.skip_genParaMesh = False self.skip_paraToSPHARMMesh = False outputDirectory = self.interface.outputDirectory # Skip MeshToLabelMap? if not self.inputExtension == "vtk" and not self.inputExtension == "vtp": self.skip_meshToLabelMap = True else: MeshToLabelMapOutputDirectory = outputDirectory + "/Step0_MeshToLabelMap" MeshToLabelMapOutputFilepath = MeshToLabelMapOutputDirectory + "/" + self.inputRootname + ".nrrd" if os.path.exists(MeshToLabelMapOutputFilepath): self.inputExtension = "nrrd" self.skip_meshToLabelMap = True # If MeshToLabelMap is not skipped, do not skip the next CLIs: SegPostProcess, GenParaMesh and ParaToSPHARMMesh if self.skip_meshToLabelMap == False: return # Skip SegPostProcess ? if not self.interface.OverwriteSegPostProcess: PostProcessOutputDirectory = outputDirectory + "/Step1_SegPostProcess" PostProcessOutputFilepath = PostProcessOutputDirectory + "/" + self.inputRootname + "_pp.nrrd" if os.path.exists(PostProcessOutputFilepath): self.skip_segPostProcess = True # If SegPostProcess is not skip, do not skip the next CLIs: GenParaMesh and ParaToSPHARMMesh if self.skip_segPostProcess == False: return # Skip GenParaMesh ? if not self.interface.OverwriteGenParaMesh: GenParaMeshOutputDirectory = outputDirectory + "/Step2_GenParaMesh" ParaOutputFilepath = GenParaMeshOutputDirectory + "/" + self.inputRootname + "_pp_para.vtk" SurfOutputFilepath = GenParaMeshOutputDirectory + "/" + self.inputRootname + "_pp_surf.vtk" if os.path.exists(ParaOutputFilepath) and os.path.exists(SurfOutputFilepath): self.skip_genParaMesh = True # If GenParaMesh is not skipped, do not skip the next CLI: ParaToSPHARMMesh if self.skip_genParaMesh == False: return # Skip ParaToSPHARMMesh ? if not self.interface.OverwriteParaToSPHARMMesh: SPHARMMeshOutputDirectory = outputDirectory + "/Step3_ParaToSPHARMMesh" SPHARMMeshRootname = self.inputRootname + "_pp_surf" if os.path.exists(SPHARMMeshOutputDirectory): for file in os.listdir(SPHARMMeshOutputDirectory): if not file.find(SPHARMMeshRootname) == -1: self.skip_paraToSPHARMMesh = True def setup(self): # Initialization of global variables self.setupGlobalVariables() self.setupSkipCLIs() inputDirectory = self.interface.inputDirectory outputDirectory = self.interface.outputDirectory ## Mesh To Label Map: Transform model in label map cli_nodes = list() # list of the nodes used in the Mesh to Label Map step cli_dirnames = list() # list of the directory pathes where the nodes used in the Mesh to Label Map step are stored MeshToLabelMapOutputDirectory = outputDirectory + "/Step0_MeshToLabelMap" MeshToLabelMapOutputFilename = self.inputRootname + ".nrrd" MeshToLabelMapOutputFilepath = os.path.join(MeshToLabelMapOutputDirectory, MeshToLabelMapOutputFilename) if not self.skip_meshToLabelMap: # Setup of the parameters of the CLI self.ID += 1 cli_parameters = {} model_input_node = MRMLUtility.loadMRMLNode(self.inputRootname, inputDirectory, self.CaseInput, 'ModelFile') cli_parameters["mesh"] = model_input_node meshtolabelmap_output_node = MRMLUtility.createNewMRMLNode(self.inputRootname, slicer.vtkMRMLLabelMapVolumeNode()) cli_parameters["labelMap"] = meshtolabelmap_output_node cli_parameters["spacingVec"] = "0.1,0.1,0.1" self.inputExtension = "nrrd" self.setupModule(slicer.modules.meshtolabelmap, cli_parameters) # Setup of the nodes created by the CLI # Creation of a folder in the output folder : LabelMap if not os.path.exists(MeshToLabelMapOutputDirectory): os.makedirs(MeshToLabelMapOutputDirectory) cli_nodes.append(model_input_node) cli_nodes.append(meshtolabelmap_output_node) cli_dirnames.append(inputDirectory) cli_dirnames.append(MeshToLabelMapOutputDirectory) self.setupNode(0, cli_nodes, cli_dirnames, [False, True], [True, True]) else: if os.path.exists(MeshToLabelMapOutputFilepath): # Setup of the nodes which will be used by the next CLI meshtolabelmap_output_node = MRMLUtility.loadMRMLNode(self.inputRootname, MeshToLabelMapOutputDirectory, MeshToLabelMapOutputFilename, 'LabelMap') cli_nodes.append(meshtolabelmap_output_node) cli_dirnames.append(MeshToLabelMapOutputDirectory) self.setupNode(0, cli_nodes, cli_dirnames, [False], [True]) ## Post Processed Segmentation cli_nodes = list() # list of the nodes used in the Post Processed Segmentation step cli_dirnames = list() # list of the directory pathes where the nodes used in the Post Processed Segmentation step are stored PostProcessOutputDirectory = outputDirectory + "/Step1_SegPostProcess" PostProcessOutputRootname = self.inputRootname + "_pp" PostProcessOutputFilename = self.inputRootname + "_pp.nrrd" if not self.skip_segPostProcess: # Setup of the parameters of the CLI self.ID += 1 cli_parameters = {} # IF Mesh To Label Map has been skipped AND the input given was already a label map if self.skip_meshToLabelMap and not os.path.exists(MeshToLabelMapOutputFilepath): PossProcessInputDirectory = inputDirectory labelmap_input_node = MRMLUtility.loadMRMLNode(self.inputRootname, inputDirectory, self.CaseInput, 'LabelMap') # ELSE the input given was a model which has been transformed by MeshToLabelMap and store in the folder LabelMap else: labelmap_input_node = meshtolabelmap_output_node PossProcessInputDirectory = MeshToLabelMapOutputDirectory cli_parameters["fileName"] = labelmap_input_node pp_output_node = MRMLUtility.createNewMRMLNode(PostProcessOutputRootname, slicer.vtkMRMLLabelMapVolumeNode()) cli_parameters["outfileName"] = pp_output_node.GetID() if self.interface.RescaleSegPostProcess: cli_parameters["scaleOn"] = True cli_parameters["spacing_vect"] = str(self.interface.sx) + "," + str(self.interface.sy) + "," + str(self.interface.sz) cli_parameters["label"] = self.interface.labelNumber if self.interface.debug: cli_parameters["debug"] = True # Advanced parameters if self.interface.GaussianFiltering: cli_parameters["gaussianOn"] = True cli_parameters["variance_vect"] = str(self.interface.VarianceX) + "," + str(self.interface.VarianceY) + "," + str(self.interface.VarianceZ) self.setupModule(slicer.modules.segpostprocessclp, cli_parameters) # Setup of the nodes created by the CLI # Creation of a folder in the output folder : Step1_SegPostProcess if not os.path.exists(PostProcessOutputDirectory): os.makedirs(PostProcessOutputDirectory) cli_nodes.append(labelmap_input_node) cli_nodes.append(pp_output_node) cli_dirnames.append(PossProcessInputDirectory) cli_dirnames.append(PostProcessOutputDirectory) self.setupNode(1, cli_nodes, cli_dirnames, [False,True], [True,True]) else: # Setup of the nodes which will be used by the next CLI pp_output_node = MRMLUtility.loadMRMLNode(PostProcessOutputRootname, PostProcessOutputDirectory, PostProcessOutputFilename, 'LabelMap') cli_nodes.append(pp_output_node) cli_dirnames.append(PostProcessOutputDirectory) self.setupNode(1, cli_nodes, cli_dirnames, [False], [True]) ## Generate Mesh Parameters cli_nodes = list() # list of the nodes used in the Generate Mesh Parameters step cli_dirnames = list() # list of the directory pathes where the nodes used in the Generate Mesh Parameters step are stored GenParaMeshOutputDirectory = outputDirectory + "/Step2_GenParaMesh" GenParaMeshOutputParaRootname = PostProcessOutputRootname + "_para" GenParaMeshOutputSurfRootname = PostProcessOutputRootname + "_surf" GenParaMeshOutputParaFilename = PostProcessOutputRootname + "_para.vtk" GenParaMeshOutputSurfFilename = PostProcessOutputRootname + "_surf.vtk" if not self.skip_genParaMesh: # Setup of the parameters of the CLI self.ID += 1 cli_parameters = {} cli_parameters["infile"] = pp_output_node para_output_model = MRMLUtility.createNewMRMLNode(GenParaMeshOutputParaRootname, slicer.vtkMRMLModelNode()) cli_parameters["outParaName"] = para_output_model surfmesh_output_model = MRMLUtility.createNewMRMLNode(GenParaMeshOutputSurfRootname, slicer.vtkMRMLModelNode()) cli_parameters["outSurfName"] = surfmesh_output_model cli_parameters["numIterations"] = self.interface.NumberofIterations if self.interface.debug: cli_parameters["debug"] = True self.setupModule(slicer.modules.genparameshclp, cli_parameters) # Setup of the nodes created by the CLI # Creation of a folder in the output folder : Step2_GenParaMesh if not os.path.exists(GenParaMeshOutputDirectory): os.makedirs(GenParaMeshOutputDirectory) cli_nodes.append(para_output_model) cli_nodes.append(surfmesh_output_model) cli_dirnames.append(GenParaMeshOutputDirectory) cli_dirnames.append(GenParaMeshOutputDirectory) self.setupNode(2, cli_nodes, cli_dirnames, [True,True], [True,True]) else: # Setup of the nodes which will be used by the next CLI para_output_model = MRMLUtility.loadMRMLNode(GenParaMeshOutputParaRootname, GenParaMeshOutputDirectory, GenParaMeshOutputParaFilename, 'ModelFile') surfmesh_output_model = MRMLUtility.loadMRMLNode(GenParaMeshOutputSurfRootname, GenParaMeshOutputDirectory, GenParaMeshOutputSurfFilename, 'ModelFile') cli_nodes.append(para_output_model) cli_nodes.append(surfmesh_output_model) cli_dirnames.append(GenParaMeshOutputDirectory) cli_dirnames.append(GenParaMeshOutputDirectory) self.setupNode(2, cli_nodes, cli_dirnames, [False, False], [True, True]) ## Parameters to SPHARM Mesh cli_nodes = list() # list of the nodes used in the Parameters To SPHARM Mesh step cli_dirnames = list() # list of the directory pathes where the nodes used in the Parameters To SPHARM Mesh step are stored SPHARMMeshOutputDirectory = outputDirectory + "/Step3_ParaToSPHARMMesh" if not self.skip_paraToSPHARMMesh: # Search of the flip to apply: # 1 = flip along axes of x &amp; y, # 2 = flip along y &amp; z, # 3 = flip along x &amp; z # 4 = flip along x, # 5 = flip along y, # 6 = flip along x &amp; y &amp; z, # 7 = flip along z where y is the smallest, x is the second smallest and z is the long axis of the ellipsoid # 8 = All the flips if not self.interface.sameFlipForAll: # Recovery of the flip chosen by the user row = self.pipelineID widget = self.interface.tableWidget_ChoiceOfFlip.cellWidget(row, 1) tuple = widget.children() comboBox = qt.QComboBox() comboBox = tuple[1] flipIndexToApply = comboBox.currentIndex pass else: flipIndexToApply = self.interface.choiceOfFlip # Only one flip to apply if flipIndexToApply < 8: L = [1] # All the flips to apply else: L = list(range(1,8)) for i in L: # Setup of the parameters of the CLI self.ID += 1 cli_parameters = {} cli_parameters["inParaFile"] = para_output_model cli_parameters["inSurfFile"] = surfmesh_output_model # Creation of a folder in the output folder : Step3_ParaToSPHARMMesh if not os.path.exists(SPHARMMeshOutputDirectory): os.makedirs(SPHARMMeshOutputDirectory) if flipIndexToApply < 8: SPHARMMeshRootname = SPHARMMeshOutputDirectory + "/" + GenParaMeshOutputSurfRootname cli_parameters["outbase"] = SPHARMMeshRootname # For each flip creation of an output filename else: flipName = ['AlongXY', 'AlongYZ', 'AlongXZ', 'AlongX', 'AlongY', 'AlongXYZ', 'AlongZ'] SPHARMMeshRootname = SPHARMMeshOutputDirectory + "/" + self.inputRootname + "_flip" + flipName[i - 1] + "_pp_surf" cli_parameters["outbase"] = SPHARMMeshRootname cli_parameters["subdivLevel"] = self.interface.SubdivLevelValue cli_parameters["spharmDegree"] = self.interface.SPHARMDegreeValue cli_parameters["thetaIteration"] = self.interface.thetaIterationValue cli_parameters["phiIteration"] = self.interface.phiIterationValue if self.interface.medialMesh: cli_parameters["medialMesh"] = True if self.interface.debug: cli_parameters["debug"] = True # Advanced parameters if self.interface.useRegTemplate: cli_parameters["regTemplateFileOn"] = True regtemplate_filepath = self.interface.regTemplate regtemplate_dir = os.path.split(regtemplate_filepath)[0] regtemplate_rootname = os.path.split(regtemplate_filepath)[1].split(".")[0] regtemplate_filename = os.path.split(regtemplate_filepath)[1] regtemplate_model = MRMLUtility.loadMRMLNode(regtemplate_rootname, regtemplate_dir, regtemplate_filename, 'ModelFile') cli_parameters["regTemplateFile"] = regtemplate_model cli_nodes.append(regtemplate_model) cli_dirnames.append(regtemplate_filepath) self.setupNode(i + 2, cli_nodes, cli_dirnames, [False], [True]) if self.interface.useFlipTemplate: cli_parameters["flipTemplateFileOn"] = True cli_parameters["flipTemplateFile"] = self.interface.flipTemplate if flipIndexToApply < 8: cli_parameters["finalFlipIndex"] = flipIndexToApply else: cli_parameters["finalFlipIndex"] = i self.setupModule(slicer.modules.paratospharmmeshclp, cli_parameters) class ShapeAnalysisModuleWrapper(object): """ This class should be called from an external python script to run SPHARM-PDM method on multiple cases thanks to SlicerSALT or 3DSlicer. External python script (ex: SPHARM-PDM.py) should do the following: from ShapeAnalasisModule import ShapeAnalysisModuleWrapper from ConfigParser import SafeConfigParser parser = SafeConfigParser() parser.read(sys.argv[1]) #argv[1]: 'path/to/SPHARM-PDM-parameters.ini' inputDirectoryPath = parser.get('section', 'input-directory-path') [...] ShapeAnalysisModuleInstance = ShapeAnalysisModuleWrapper(inputDirectoryPath, outputDirectoryPath, [...]) ShapeAnalysisModuleInstance.startProcessing() The external python script can be run non-interactively using this command: ./SlicerSalt --no-main-window --python-script /path/to/SPHARM-PDM.py path/to/SPHARM-PDM-parameters.py """ def __init__(self, inputDirectoryPath, outputDirectoryPath, RescaleSegPostProcess, sx, sy, sz, labelNumber, GaussianFiltering, VarianceX, VarianceY, VarianceZ, numberofIterations, SubdivLevelValue, SPHARMDegreeValue, medialMesh, thetaIterationValue, phiIterationValue, useRegTemplate, regTemplate, useFlipTemplate, flipTemplate, choiceOfFlip): self.Logic = ShapeAnalysisModuleLogic() self.Logic.parameters.setWaitForCompletion(True) self.Logic.parameters.setInputDirectory(inputDirectoryPath) self.Logic.parameters.setOutputDirectory(outputDirectoryPath) self.Logic.parameters.setRescaleSegPostProcess(RescaleSegPostProcess) self.Logic.parameters.setSx(sx) self.Logic.parameters.setSy(sy) self.Logic.parameters.setSz(sz) self.Logic.parameters.setLabelNumber(labelNumber) self.Logic.parameters.setGaussianFiltering(GaussianFiltering) self.Logic.parameters.setVarianceX(VarianceX) self.Logic.parameters.setVarianceY(VarianceY) self.Logic.parameters.setVarianceZ(VarianceZ) self.Logic.parameters.setNumberofIterations(numberofIterations) self.Logic.parameters.setSubdivLevelValue(SubdivLevelValue) self.Logic.parameters.setSPHARMDegreeValue(SPHARMDegreeValue) self.Logic.parameters.setMedialMesh(medialMesh) self.Logic.parameters.setThetaIterationValue(thetaIterationValue) self.Logic.parameters.setPhiIterationValue(phiIterationValue) self.Logic.parameters.setUseRegTemplate(useRegTemplate) self.Logic.parameters.setRegTemplate(regTemplate) self.Logic.parameters.setUseFlipTemplate(useFlipTemplate) self.Logic.parameters.setFlipTemplate(flipTemplate) self.Logic.parameters.setChoiceOfFlip(choiceOfFlip) def startProcessing(self): # Setup the inputCases # Possible extensions exts = [".gipl", ".gipl.gz", ".mgh", ".mgh,gz", ".nii", ".nii.gz",".nrrd", ".vtk", ".vtp", ".hdr", ".mhd"] # Search cases and add the filename to a list self.Logic.InputCases = [] for file in os.listdir(self.Logic.parameters.inputDirectory): for ext in exts: if file.endswith(ext): self.Logic.InputCases.append(file) self.Logic.ShapeAnalysisCases() class ShapeAnalysisModuleTest(ScriptedLoadableModuleTest): """ This is the test case for your scripted module. Uses ScriptedLoadableModuleTest base class, available at: https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py """ def setUp(self): slicer.mrmlScene.Clear(0) self.inputRootnames = list() def runTest(self): self.setUp() self.delayDisplay('Starting the tests') self.test_ShapeAnalysisModule_completedWithoutErrors() def test_ShapeAnalysisModule_completedWithoutErrors(self): self.delayDisplay('Test 1: Run Shape Analysis Module') self.Logic = ShapeAnalysisModuleLogic() # Creation of input folder inputDirectoryPath = slicer.app.temporaryPath + '/InputShapeAnalysisModule' if not os.path.exists(inputDirectoryPath): os.makedirs(inputDirectoryPath) # Download the label map in the input folder input_downloads = ( ('https://data.kitware.com/api/v1/file/59945eb38d777f7d33e9c3c4/download', 'InputImage.gipl'), ) for i in range(len(input_downloads)): self.inputRootnames.append(input_downloads[i][1].split(".")[0]) self.download_files(inputDirectoryPath, input_downloads) # Creation of output folder outputDirectoryPath = slicer.app.temporaryPath + '/OutputShapeAnalysisModule' if not os.path.exists(outputDirectoryPath): os.makedirs(outputDirectoryPath) # Creation of a template folder templateDirectoryPath = slicer.app.temporaryPath + '/TemplateShapeAnalysisModule' if not os.path.exists(templateDirectoryPath): os.makedirs(templateDirectoryPath) else: for filename in os.listdir(templateDirectoryPath): os.remove(os.path.join(templateDirectoryPath, filename)) # Download the registration template in the template folder template_downloads = ( ('https://data.kitware.com/api/v1/file/599462f78d777f7d33e9c3e6/download', 'RegistrationTemplateForParaToSPHARMMesh.vtk'), ) self.download_files(templateDirectoryPath, template_downloads) # # Inputs of Shape Analysis Module # self.Logic.parameters.setWaitForCompletion(True) self.Logic.parameters.setInputDirectory(inputDirectoryPath) self.Logic.parameters.setOutputDirectory(outputDirectoryPath) self.Logic.parameters.setOverwriteSegPostProcess(True) self.Logic.parameters.setOverwriteGenParaMesh(True) self.Logic.parameters.setNumberofIterations(25) self.Logic.parameters.setOverwriteParaToSPHARMMesh(True) self.Logic.parameters.setMedialMesh(True) self.Logic.parameters.setUseRegTemplate(True) regTemplateFilePath = templateDirectoryPath + '/RegistrationTemplateForParaToSPHARMMesh.vtk' self.Logic.parameters.setChoiceOfFlip(3) self.Logic.parameters.setRegTemplate(regTemplateFilePath) # Setup the inputCases # Possible extensions exts = [".gipl", ".gipl.gz", ".mgh", ".mgh,gz", ".nii", ".nii.gz",".nrrd", ".vtk", ".vtp", ".hdr", ".mhd"] # Search cases and add the filename to a list self.Logic.InputCases = [] for file in os.listdir(inputDirectoryPath): for ext in exts: if file.endswith(ext): self.Logic.InputCases.append(file) self.delayDisplay('Run Shape Analysis Module') self.Logic.ShapeAnalysisCases() self.assertTrue(self.comparisonOfOutputsSegPostProcess()) self.assertTrue(self.comparisonOfOutputsGenParaMesh()) self.assertTrue(self.comparisonOfOutputsParaToSPHARMMesh()) self.cleanSlicerTemporaryDirectory() self.delayDisplay('Tests Passed!') slicer.mrmlScene.Clear(0) def comparisonOfOutputsSegPostProcess(self): self.delayDisplay('Test 2: Comparison of the outputs generated by SegPostProcess CLI') # Checking the existence of the output directory Step1_SegPostProcess outputDirectoryPath = slicer.app.temporaryPath + '/OutputShapeAnalysisModule' SegPostProcessOutputDirectoryPath = outputDirectoryPath + '/Step1_SegPostProcess' if not os.path.exists(SegPostProcessOutputDirectoryPath): return False # Downloading output data to compare with the ones generated by Shape Analysis Module during the tests output_downloads = ( ('https://data.kitware.com/api/v1/file/59945ee08d777f7d33e9c3d3/download', 'OutputImageToCompareSegPostProcess.nrrd'), ) self.download_files(SegPostProcessOutputDirectoryPath, output_downloads) # Comparison of the Post Process Mesh Outputs self.delayDisplay('Comparison of the Post Process Outputs') output_filenames = list() for inputRootname in self.inputRootnames: output_filename = inputRootname + "_pp.nrrd" output_filenames.append(output_filename) for i in range(len(output_filenames)): volume2_filepath = os.path.join(SegPostProcessOutputDirectoryPath, output_filenames[i]) # Checking the existence of the output files in the folder Step1_SegPostProcess if not os.path.exists(volume2_filepath): return False # Loading the 2 volumes for comparison volume1_rootname = output_filenames[i].split(".")[0] volume2_rootname = output_downloads[i][1].split(".")[0] volume1 = MRMLUtility.loadMRMLNode(volume1_rootname, SegPostProcessOutputDirectoryPath, output_downloads[i][1], 'LabelMap') volume2 = MRMLUtility.loadMRMLNode(volume2_rootname, SegPostProcessOutputDirectoryPath, output_filenames[i], 'LabelMap') # Comparison if not self.volume_comparison(volume1, volume2): return False return True def comparisonOfOutputsGenParaMesh(self): self.delayDisplay('Test 3: Comparison of the outputs generated by GenParaMesh CLI') # Checking the existence of the output directory Step2_GenParaMesh outputDirectoryPath = slicer.app.temporaryPath + '/OutputShapeAnalysisModule' GenParaMeshOutputDirectoryPath = outputDirectoryPath + '/Step2_GenParaMesh' if not os.path.exists(GenParaMeshOutputDirectoryPath): return False # Downloading output data to compare with the ones generated by Shape Analysis Module during the tests output_downloads = ( ('https://data.kitware.com/api/v1/file/59af09588d777f7d33e9cf9d/download', 'OutputImageToCompareGenParaMesh_para.vtk'), ('https://data.kitware.com/api/v1/file/59945ece8d777f7d33e9c3c7/download', 'OutputImageToCompareGenParaMesh_surf.vtk'), ) self.download_files(GenParaMeshOutputDirectoryPath, output_downloads) # Comparison of the Parameters Mesh Outputs self.delayDisplay('Comparison of the Parameters Mesh Outputs') output_filenames = list() for inputRootname in self.inputRootnames: output_para_filename = inputRootname + "_pp_para.vtk" output_surf_filename = inputRootname + "_pp_surf.vtk" output_filenames.append(output_para_filename) output_filenames.append(output_surf_filename) for i in range(len(output_filenames)): model2_filepath = os.path.join(GenParaMeshOutputDirectoryPath, output_filenames[i]) # Checking the existence of the output files in the folder Step2_GenParaMesh if not os.path.exists(model2_filepath): return False # Loading the 2 models for comparison model1_rootname = output_downloads[i][1].split(".")[0] model2_rootname = output_filenames[i].split(".")[0] model1 = MRMLUtility.loadMRMLNode(model1_rootname, GenParaMeshOutputDirectoryPath, output_downloads[i][1], 'ModelFile') model2 = MRMLUtility.loadMRMLNode(model2_rootname, GenParaMeshOutputDirectoryPath,output_filenames[i], 'ModelFile') # Comparison if not self.polydata_comparison(model1, model2): return False return True def comparisonOfOutputsParaToSPHARMMesh(self): self.delayDisplay('Test 4: Comparison of the outputs generated by ParaToSPHARMMesh CLI') # Checking the existence of the output directory Step3_ParaToSPHARMMesh outputDirectoryPath = slicer.app.temporaryPath + '/OutputShapeAnalysisModule' ParaToSPHARMMeshOutputDirectoryPath = outputDirectoryPath + '/Step3_ParaToSPHARMMesh' if not os.path.exists(ParaToSPHARMMeshOutputDirectoryPath): return False # Downloading output data to compare with the ones generated by Shape Analysis Module during the tests output_downloads = ( ('https://data.kitware.com/api/v1/file/59af09028d777f7d33e9cf9a/download', 'OutputImageToCompareParaToSPHARMMesh_SPHARM.vtk'), ('https://data.kitware.com/api/v1/file/59af09018d777f7d33e9cf91/download', 'OutputImageToCompareParaToSPHARMMesh_SPHARM_ellalign.vtk'), ('https://data.kitware.com/api/v1/file/59af09018d777f7d33e9cf94/download', 'OutputImageToCompareParaToSPHARMMesh_MedialMesh.vtk'), ('https://data.kitware.com/api/v1/file/59af09028d777f7d33e9cf97/download', 'OutputImageToCompareParaToSPHARMMesh_SPHARM_procalign.vtk'), ) self.download_files(ParaToSPHARMMeshOutputDirectoryPath, output_downloads) # Comparison of the SPHARM Mesh Outputs self.delayDisplay('Comparison of the SPHARM Mesh Outputs') output_filenames = list() for inputRootname in self.inputRootnames: output_spharm_filename = inputRootname + "_pp_surf_SPHARM.vtk" output_ellalign_filename = inputRootname + "_pp_surf_SPHARM_ellalign.vtk" output_medialmesh_filename = inputRootname + "_pp_surf_SPHARMMedialMesh.vtk" output_procalign_filename = inputRootname + "_pp_surf_SPHARM_procalign.vtk" output_filenames.append(output_spharm_filename) output_filenames.append(output_ellalign_filename) output_filenames.append(output_medialmesh_filename) output_filenames.append(output_procalign_filename) for i in range(len(output_filenames)): model2_filepath = os.path.join(ParaToSPHARMMeshOutputDirectoryPath, output_filenames[i]) # Checking the existence of the output files in the folder Step3_ParaToSPHARMMesh if not os.path.exists(model2_filepath): return False # Loading the 2 models for comparison model1_rootname = output_downloads[i][1].split(".")[0] model2_rootname = output_filenames[i].split(".")[0] model1 = MRMLUtility.loadMRMLNode(model1_rootname, ParaToSPHARMMeshOutputDirectoryPath, output_downloads[i][1], 'ModelFile') model2 = MRMLUtility.loadMRMLNode(model2_rootname, ParaToSPHARMMeshOutputDirectoryPath, output_filenames[i], 'ModelFile') # Comparison if not self.polydata_comparison(model1, model2): return False return True def volume_comparison(self, volume1, volume2): imageData1 = volume1.GetImageData() imageData2 = volume2.GetImageData() nbPoints1 = imageData1.GetNumberOfPoints() nbPoints2 = imageData2.GetNumberOfPoints() if not nbPoints1 == nbPoints2: return False dimension1 = imageData1.GetDimensions() dimension2 = imageData2.GetDimensions() if not dimension1 == dimension2: return False for i in range(dimension1[0]): for j in range(dimension1[1]): for k in range(dimension1[2]): if not imageData1.GetScalarComponentAsDouble(i,j,k,0) == imageData2.GetScalarComponentAsDouble(i,j,k,0): return False return True def polydata_comparison(self, model1, model2): polydata1 = model1.GetPolyData() polydata2 = model2.GetPolyData() # Number of points nbPoints1 = polydata1.GetNumberOfPoints() nbPoints2 = polydata2.GetNumberOfPoints() if not nbPoints1 == nbPoints2: return False # Polydata data1 = polydata1.GetPoints().GetData() data2 = polydata2.GetPoints().GetData() # Number of Components nbComponents1 = data1.GetNumberOfComponents() nbComponents2 = data2.GetNumberOfComponents() if not nbComponents1 == nbComponents2: return False # Points value for i in range(nbPoints1): for j in range(nbComponents1): if not data1.GetTuple(i)[j] == data2.GetTuple(i)[j]: return False # Area nbAreas1 = polydata1.GetPointData().GetNumberOfArrays() nbAreas2 = polydata2.GetPointData().GetNumberOfArrays() if not nbAreas1 == nbAreas2: return False for l in range(nbAreas1): area1 = polydata1.GetPointData().GetArray(l) area2 = polydata2.GetPointData().GetArray(l) # Name of the area nameArea1 = area1.GetName() nameArea2 = area2.GetName() if not nameArea1 == nameArea2: return False # Number of Components of the area nbComponents1 = area1.GetNumberOfComponents() nbComponents2 = area2.GetNumberOfComponents() if not nbComponents1 == nbComponents2: return False # Points value in the area for i in range(nbPoints1): for j in range(nbComponents1): if not data1.GetTuple(i)[j] == data2.GetTuple(i)[j]: return False return True def download_files(self, directoryPath, downloads): self.delayDisplay('Starting download') for url, name in downloads: filePath = os.path.join(directoryPath, name) if not os.path.exists(filePath) or os.stat(filePath).st_size == 0: print('Requesting download %s from %s...\n' % (name, url)) if sys.version_info[0] == 3: urllib.request.urlretrieve(url, filePath) else: urllib.urlretrieve(url, filePath) # python 2.x self.delayDisplay('Finished with download') # Function to delete all the data needed for the tests def cleanSlicerTemporaryDirectory(self): # deletion of the SAM input folder inputDirectoryPath = slicer.app.temporaryPath + '/InputShapeAnalysisModule' if os.path.exists(inputDirectoryPath): shutil.rmtree(inputDirectoryPath) # deletion of the SAM output folder outputDirectoryPath = slicer.app.temporaryPath + '/OutputShapeAnalysisModule' if os.path.exists(outputDirectoryPath): shutil.rmtree(outputDirectoryPath) # deletion of the SAM template folder templateDirectoryPath = slicer.app.temporaryPath + '/TemplateShapeAnalysisModule' if os.path.exists(templateDirectoryPath): shutil.rmtree(templateDirectoryPath)
NIRALUser/SPHARM-PDM
Modules/Scripted/ShapeAnalysisModule/ShapeAnalysisModule.py
Python
apache-2.0
91,183
[ "Gaussian", "VTK" ]
9ca29743fe813f32f8816f5007559b64680385a7e081d683621f0d1b3e85554b
import Galaxy import MathCalculator import SqlReader import XMLDomReader import math import os import string VEGA_LUMINOSITY=74.4731973982 NGC_4874_RA=194.898787 NGC_4874_DEC=27.959389 HUBBLE_CONSTANT=67.6 def galaxies_print(galaxies): file=open("data_used.csv","w+") title="objid,ra,dec,r-band,redshift" file.write(title+"\n") for object in galaxies: coma="," galaxy_data=str(object.__getattribute__("objid"))+coma\ +str(object.__getattribute__("ra"))+coma+ \ str(object.__getattribute__("dec"))+coma+ \ str(object.__getattribute__("r"))+coma+ \ str(object.__getattribute__("redshift"))+"\n" file.write(galaxy_data) file.close() if __name__ == '__main__': #update information and write to the file sqlReader=SqlReader.SqlReader("SELECT p.objid,p.ra,p.dec,p.r,s.z as redshift FROM galaxy as p join specobj as s on s.bestobjid=p.objid WHERE p.ra BETWEEN 194.39877127332 AND 195.39877127332 AND p.dec BETWEEN 27.4592632735057 AND 28.4592632735057") sqlReader.dataCollect() #parse and get data xmlReader=XMLDomReader.XmlDomReader('data_requested.xml') galaxies=xmlReader.getClusterofGalaxies() print "galaxies collected: ",len(galaxies) mathCalculator=MathCalculator.MathCalculator() Redshift=mathCalculator.MeanRedshift(galaxies) galaxies_print(galaxies) MeanVelocity=mathCalculator.Meancalculate(galaxies,'velocity') RMSDispersion=mathCalculator.rootMeanSquareV(galaxies,'velocity') Distance=MeanVelocity/HUBBLE_CONSTANT Radius=Distance*(0.5/180)*math.pi MasstoSun=2*RMSDispersion*RMSDispersion*Radius*232*1e6 LumitoSun=0.0 for object in galaxies: Mr = object.__getattribute__('r')-5*math.log10(object.__getattribute__('distance')*1e6/10) LumitoSun+=math.pow(10,(-0.4)*(Mr-4.68)) print 'total galaxies data used: ',len(galaxies) print 'redshift: ',Redshift print 'meanVelocity',MeanVelocity print 'RMSDispersion: ',RMSDispersion print 'Distance: ',Distance print 'Radius: ',Radius print 'MasstoSun: ',MasstoSun print 'LumitoSun: ',LumitoSun print 'MasstoLumino: ',MasstoSun/LumitoSun
chshibo/CosData_Tools
Pysics_Lab.py
Python
gpl-3.0
2,237
[ "Galaxy" ]
434d0f22a24f5df98cadeb7ee7804b13b1f1bffd1490d88c0f1888783a147c5d
import tensorflow as tf # neural network for function approximation import gym # environment import numpy as np # matrix operation and math functions from gym import wrappers import gym_morph # customized environment for cart-pole import matplotlib.pyplot as plt import time # Hyperparameters RANDOM_NUMBER_SEED = 2 ENVIRONMENT1 = "morph-v0" # ENVIRONMENT2 = "morph-l1-v0" MAX_EPISODES = 100 # number of episodes EPISODE_LENGTH = 200 # single episode length HIDDEN_LAYER = True HIDDEN_SIZE = 6 DISPLAY_WEIGHTS = False # Help debug weight update gamma = 0.99 # Discount per step # alpha = 0.01 # Learning rate alpha = 0.1 RENDER = False # Render the cart-pole system VIDEO_INTERVAL = 100 # Generate a video at this interval CONSECUTIVE_TARGET = 100 # Including previous 100 rewards DIR_PATH_SAVEFIG = "/home/yh/cartpole_mc_ann/" file_name_savefig = "el" + str(EPISODE_LENGTH) \ + "_hn" + str(HIDDEN_SIZE) \ + "_clr" + str(alpha).replace(".", "p") \ + ".png" env = gym.make(ENVIRONMENT1) env.seed(RANDOM_NUMBER_SEED) np.random.seed(RANDOM_NUMBER_SEED) tf.set_random_seed(RANDOM_NUMBER_SEED) # Input and output sizes input_size = env.observation_space.shape[0] try: output_size = env.action_space.shape[0] except AttributeError: output_size = env.action_space.n # Tensorflow network setup x = tf.placeholder(tf.float32, shape=(None, input_size)) y = tf.placeholder(tf.float32, shape=(None, 1)) expected_returns = tf.placeholder(tf.float32, shape=(None, 1)) # Xavier (2010) weights initializer for uniform distribution: # x = sqrt(6. / (in + out)); [-x, x] w_init = tf.contrib.layers.xavier_initializer() if HIDDEN_LAYER: hidden_W = tf.get_variable("W1", shape=[input_size, HIDDEN_SIZE], initializer=w_init) hidden_B = tf.Variable(tf.zeros(HIDDEN_SIZE)) dist_W = tf.get_variable("W2", shape=[HIDDEN_SIZE, output_size], initializer=w_init) dist_B = tf.Variable(tf.zeros(output_size)) hidden = tf.nn.elu(tf.matmul(x, hidden_W) + hidden_B) dist = tf.tanh(tf.matmul(hidden, dist_W) + dist_B) else: dist_W = tf.get_variable("W1", shape=[input_size, output_size], initializer=w_init) dist_B = tf.Variable(tf.zeros(output_size)) dist = tf.tanh(tf.matmul(x, dist_W) + dist_B) dist_soft = tf.nn.log_softmax(dist) dist_in = tf.matmul(dist_soft, tf.Variable([[1.], [0.]])) pi = tf.contrib.distributions.Bernoulli(dist_in) pi_sample = pi.sample() log_pi = pi.log_prob(y) optimizer = tf.train.RMSPropOptimizer(alpha) # global_step = tf.Variable() # optimizer = tf.train.RMSPropOptimizer(alpha) train = optimizer.minimize(-1.0 * expected_returns * log_pi) # saver = tf.train.Saver() # Create and initialize a session sess = tf.Session() sess.run(tf.global_variables_initializer()) def run_episode(environment, ep, render=False): raw_reward = 0 discounted_reward = 0 cumulative_reward = [] discount = 1.0 states = [] actions = [] obs = environment.reset() done = False while not done: states.append(obs) cumulative_reward.append(discounted_reward) if render and ((ep % VIDEO_INTERVAL) == 0): environment.render() action = sess.run(pi_sample, feed_dict={x: [obs]})[0] actions.append(action) obs, reward, done, info = env.step(action[0]) raw_reward += reward if reward > 0: discounted_reward += reward * discount else: discounted_reward += reward discount *= gamma return raw_reward, discounted_reward, cumulative_reward, states, actions def display_weights(session): global HIDDEN_LAYER if HIDDEN_LAYER: w1 = session.run(hidden_W) b1 = session.run(hidden_B) w2 = session.run(dist_W) b2 = session.run(dist_B) print(w1, b1, w2, b2) else: w1 = session.run(dist_W) b1 = session.run(dist_B) print(w1, b1) returns = [] mean_returns = [] for ep in range(MAX_EPISODES): raw_G, discounted_G, cumulative_G, ep_states, ep_actions = \ run_episode(env, ep, RENDER) expected_R = np.transpose([discounted_G - np.array(cumulative_G)]) sess.run(train, feed_dict={x: ep_states, y: ep_actions, expected_returns: expected_R}) if DISPLAY_WEIGHTS: display_weights(sess) returns.append(raw_G) running_returns = returns[max(0, ep-CONSECUTIVE_TARGET):(ep+1)] mean_return = np.mean(running_returns) mean_returns.append(mean_return) msg = "Episode: {}, Return: {}, Last {} returns mean: {}" msg = msg.format(ep+1, raw_G, CONSECUTIVE_TARGET, mean_return) print(msg) env.close() # Plot # plt.style.use('ggplot') plt.style.use('dark_background') episodes_plot = np.arange(MAX_EPISODES) fig = plt.figure() ax = fig.add_subplot(111) fig.subplots_adjust(top=0.85) ax.set_title("The Cart-Pole Problem \n \ Episode Length: %i \ Discount Factor: %.2f \n \ Number of Hidden Neuron: %i \ Constant Learning Rate: %.5f" % (EPISODE_LENGTH, gamma, HIDDEN_SIZE, alpha)) ax.set_xlabel("Episode") ax.set_ylabel("Return") ax.set_ylim((0, 200)) ax.grid(linestyle='--') ax.set_aspect(0.4) ax.plot(episodes_plot, returns, label='Instant return') ax.plot(episodes_plot, mean_returns, label='Averaged return') legend = ax.legend(loc='best', shadow=True) fig.savefig(DIR_PATH_SAVEFIG + file_name_savefig, dpi=500) # plt.show()
GitYiheng/reinforcement_learning_test
test03_monte_carlo/t13_cartpole_mc_lr.py
Python
mit
5,445
[ "NEURON" ]
04c39761c48fc1a9a489e075a34bd25f4d9d83984cfebb05e4dbb073bf84159f
#!/usr/bin/env python3 """The influence of windowing of log. sweep signals when using a Kaiser Window by fixing beta (=2) and fade_out (=0). fstart = 1 Hz fstop = 22050 Hz """ import sys sys.path.append('..') import measurement_chain import plotting import calculation import ir_imitation import generation import matplotlib.pyplot as plt import windows from scipy.signal import lfilter, fftconvolve import numpy as np # Parameters of the measuring system fs = 44100 fstart = 1 fstop = 22050 duration = 1 pad = 4 # Generate excitation signal excitation = generation.log_sweep(fstart, fstop, duration, fs) N = len(excitation) # Noise in measurement chain noise_level_db = -30 noise = measurement_chain.additive_noise(noise_level_db) # FIR-Filter-System dirac_system = measurement_chain.convolution([1.0]) # Combinate system elements system = measurement_chain.chained(dirac_system, noise) # Lists beta = 7 fade_in_list = np.arange(0, 1001, 1) fade_out = 0 # Spectrum of dirac for reference dirac = np.zeros(pad * fs) dirac[0] = 1 dirac_f = np.fft.rfft(dirac) def get_results(fade_in): excitation_windowed = excitation * windows.window_kaiser(N, fade_in, fade_out, fs, beta) excitation_windowed_zeropadded = generation.zero_padding( excitation_windowed, pad, fs) system_response = system(excitation_windowed_zeropadded) ir = calculation.deconv_process(excitation_windowed_zeropadded, system_response, fs) return ir with open("log_sweep_kaiser_window_script1.txt", "w") as f: for fade_in in fade_in_list: ir = get_results(fade_in) pnr = calculation.pnr_db(ir[0], ir[1:4 * fs]) spectrum_distance = calculation.vector_distance( dirac_f, np.fft.rfft(ir[:pad * fs])) f.write(str(fade_in) + " " + str(pnr) + " " + str(spectrum_distance) + " \n")
spatialaudio/sweep
log_sweep_kaiser_window_script1/log_sweep_kaiser_window_script1.py
Python
mit
2,121
[ "DIRAC" ]
f0e6360cefe6428ca3ed62112111a3442f8ad72c9d844bb6681f6339e1fb5211
# Copyright 2013 Mark Dickinson # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Abstract base class for the various flavours of directed graph. """ import abc from collections import Container, Counter, deque, Iterable, Sized class IDirectedGraph(Container, Iterable, Sized): """ Abstract base class for directed graphs. """ @abc.abstractproperty def vertices(self): """ Return a collection of the vertices of the graph. The collection should support iteration and rapid containment testing. """ @abc.abstractproperty def edges(self): """ Return a collection of the edges of the graph. The collection should support iteration and rapid containment testing. """ @abc.abstractmethod def head(self, edge): """ Return the head (target, destination) of the given edge. """ @abc.abstractmethod def tail(self, edge): """ Return the tail (source) of the given edge. """ @abc.abstractmethod def out_edges(self, vertex): """ Return an iterable of the edges leaving the given vertex. """ @abc.abstractmethod def in_edges(self, vertex): """ Return an iterable of the edges entering the given vertex. """ @abc.abstractproperty def full_subgraph(self, vertices): """ Return the subgraph of this graph whose vertices are the given ones and whose edges are all the edges of the original graph between those vertices. """ @classmethod def vertex_set(cls): """ Return an empty object suitable for storing a set of vertices. Usually a plain set will suffice, but for the ObjectGraph we'll use an ElementTransformSet instead. """ return set() @classmethod def vertex_dict(cls): """ Return an empty mapping whose keys are vertices. Usually a plain dict is good enough; for the ObjectGraph we'll override to use KeyTransformDict instead. """ return dict() @classmethod def vertex_equal(cls, vertex1, vertex2): """ Criterion to use to determine whether two vertices are equal. Usually we want to use simple equality here, but the for the ObjectGraph we'll need to use identity. """ return vertex1 == vertex2 def __len__(self): """ Number of vertices in the graph. """ return len(self.vertices) def __iter__(self): """ Generate objects of graph. """ return iter(self.vertices) def __contains__(self, vertex): """ Return True if vertex is a vertex of the graph, else False. """ return vertex in self.vertices def __repr__(self): return "<{}.{} object of size {} at 0x{:x}>".format( self.__module__, type(self).__name__, len(self), id(self), ) def children(self, vertex): """ Return the list of immediate children of the given vertex. """ return [self.head(edge) for edge in self.out_edges(vertex)] def parents(self, vertex): """ Return the list of immediate parents of this vertex. """ return [self.tail(edge) for edge in self.in_edges(vertex)] def references(self): """ Return (tail, head) pairs for each edge in the graph. """ return [ (tail, head) for tail in self.vertices for head in self.children(tail) ] def descendants(self, start, generations=None): """ Return the subgraph of all nodes reachable from the given start vertex, including that vertex. If specified, the optional `generations` argument specifies how many generations to limit to. """ visited = self.vertex_set() visited.add(start) to_visit = deque([(start, 0)]) while to_visit: vertex, depth = to_visit.popleft() if depth == generations: continue for child in self.children(vertex): if child not in visited: visited.add(child) to_visit.append((child, depth+1)) return self.full_subgraph(visited) def ancestors(self, start, generations=None): """ Return the subgraph of all nodes from which the given vertex is reachable, including that vertex. If specified, the optional `generations` argument specifies how many generations to limit to. """ visited = self.vertex_set() visited.add(start) to_visit = deque([(start, 0)]) while to_visit: vertex, depth = to_visit.popleft() if depth == generations: continue for parent in self.parents(vertex): if parent not in visited: visited.add(parent) to_visit.append((parent, depth+1)) return self.full_subgraph(visited) def shortest_path(self, start, end): """ Find a shortest path from start to end. Returns the subgraph consisting of the vertices in that path and (all) the edges between them. Raises ValueError if no path from start to end exists. """ # Vertices whose children are yet to be explored. to_visit = deque([start]) # Mapping from each child to the parent that it was first found via. # We map the start vertex to a dummy object. dummy = object() explored = self.vertex_dict() explored[start] = dummy # Breadth-first search, rooted at ``start``. while to_visit: if end in explored: break parent = to_visit.popleft() for child in self.children(parent): if child not in explored: explored[child] = parent to_visit.append(child) else: raise ValueError("No path found.") # Backtrack to construct vertices of path. vertex = end path = [] while vertex is not dummy: path.append(vertex) vertex = explored[vertex] return self.full_subgraph(path) def shortest_cycle(self, start): """ Find a shortest cycle including start. Returns the subgraph consisting of the vertices in that cycle and (all) the edges between them. Raises ValueError if no cycle including start exists. """ # Vertices whose children are yet to be explored. to_visit = deque([start]) # Mapping from each child to the parent that it was first found via. explored = self.vertex_dict() # Breadth-first search, rooted at ``start``. while to_visit: if start in explored: break parent = to_visit.popleft() for child in self.children(parent): if child not in explored: explored[child] = parent to_visit.append(child) else: raise ValueError("No path found.") # Backtrack to construct vertices of path. vertex = start path = [] while True: path.append(vertex) vertex = explored[vertex] if self.vertex_equal(vertex, start): break return self.full_subgraph(path) def _component_graph(self): """ Compute the graph of strongly connected components. Each strongly connected component is itself represented as a list of pairs, giving information not only about the vertices belonging to this strongly connected component, but also the edges leading from this strongly connected component to other components. Each pair is of the form ('EDGE', v) or ('VERTEX', v) for some vertex v. In the first case, that indicates that there's an edge from this strongly connected component to the given vertex v (which will belong to another component); in the second, it indicates that v is a member of this strongly connected component. Each component will begin with a vertex (the *root* vertex of the strongly connected component); the following edges are edges from that vertex. Algorithm is based on that described in "Path-based depth-first search for strong and biconnected components" by Harold N. Gabow, Inf.Process.Lett. 74 (2000) 107--114. """ sccs = [] stack = [] boundaries = [] identified = self.vertex_set() index = self.vertex_dict() to_do = [] def visit_vertex(v): index[v] = len(stack) stack.append(('VERTEX', v)) boundaries.append(index[v]) to_do.append((leave_vertex, v)) to_do.extend((visit_edge, w) for w in self.children(v)) def visit_edge(v): if v in identified: stack.append(('EDGE', v)) elif v in index: while index[v] < boundaries[-1]: boundaries.pop() else: to_do.append((visit_vertex, v)) def leave_vertex(v): if boundaries[-1] == index[v]: root = boundaries.pop() scc = stack[root:] del stack[root:] for item_type, w in scc: if item_type == 'VERTEX': identified.add(w) del index[w] sccs.append(scc) stack.append(('EDGE', v)) # Visit every vertex of the graph. for v in self.vertices: if v not in identified: to_do.append((visit_vertex, v)) while to_do: operation, v = to_do.pop() operation(v) stack.pop() return sccs def source_components(self): """ Return the strongly connected components not reachable from any other component. Any component in the graph is reachable from one of these. """ raw_sccs = self._component_graph() # Construct a dictionary mapping each vertex to the root of its scc. vertex_to_root = self.vertex_dict() # And keep track of which SCCs have incoming edges. non_sources = self.vertex_set() # Build maps from vertices to roots, and identify the sccs that *are* # reachable from other components. for scc in raw_sccs: root = scc[0][1] for item_type, w in scc: if item_type == 'VERTEX': vertex_to_root[w] = root elif item_type == 'EDGE': non_sources.add(vertex_to_root[w]) sccs = [] for raw_scc in raw_sccs: root = raw_scc[0][1] if root not in non_sources: sccs.append([v for vtype, v in raw_scc if vtype == 'VERTEX']) return [self.full_subgraph(scc) for scc in sccs] def strongly_connected_components(self): """ Return list of strongly connected components of this graph. Returns a list of subgraphs. Algorithm is based on that described in "Path-based depth-first search for strong and biconnected components" by Harold N. Gabow, Inf.Process.Lett. 74 (2000) 107--114. """ raw_sccs = self._component_graph() sccs = [] for raw_scc in raw_sccs: sccs.append([v for vtype, v in raw_scc if vtype == 'VERTEX']) return [self.full_subgraph(scc) for scc in sccs] def count_by(self, classifier): """ Return a count of objects using the given classifier. Here `classifier` should be a callable that accepts a single object from the graph and returns the "class" of that object, which should be a hashable value. Returns a collections.Counter instance mapping classes to counts. """ return Counter(classifier(obj) for obj in self) def find_by(self, predicate): """ Return a list of all objects satisfying the given predicate. Here `predicate` should be a callable that accepts a single object from the graph and returns a value that can be interpreted as a boolean. """ return [obj for obj in self if predicate(obj)] def __sub__(self, other): """ Return the full subgraph containing all vertices in self except those in other. """ difference = [v for v in self.vertices if v not in other.vertices] return self.full_subgraph(difference) def __and__(self, other): """ Return the intersection of the two graphs. Returns the full subgraph of self on the intersection of self.vertices and other.vertices. Note that this operation is not necessarily symmetric, though in the common case where both self and other are already full subgraphs of a larger graph, it will be. """ intersection = [v for v in self.vertices if v in other.vertices] return self.full_subgraph(intersection)
mdickinson/refcycle
refcycle/i_directed_graph.py
Python
apache-2.0
14,060
[ "VisIt" ]
116c190ba1a69660400abfde2c2d9eca615a4717f6e8fb157e3ea6115b8f2213
"""This file implements the gym environment of minitaur. """ import math import time import os, inspect currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) parentdir = os.path.dirname(os.path.dirname(currentdir)) os.sys.path.insert(0, parentdir) import gym from gym import spaces from gym.utils import seeding import numpy as np import pybullet from pybullet_utils import bullet_client as bc import pybullet_data from pybullet_envs.minitaur.envs import minitaur from pybullet_envs.minitaur.envs import minitaur_derpy from pybullet_envs.minitaur.envs import minitaur_logging from pybullet_envs.minitaur.envs import minitaur_logging_pb2 from pybullet_envs.minitaur.envs import minitaur_rainbow_dash from pybullet_envs.minitaur.envs import motor from pkg_resources import parse_version NUM_MOTORS = 8 MOTOR_ANGLE_OBSERVATION_INDEX = 0 MOTOR_VELOCITY_OBSERVATION_INDEX = MOTOR_ANGLE_OBSERVATION_INDEX + NUM_MOTORS MOTOR_TORQUE_OBSERVATION_INDEX = MOTOR_VELOCITY_OBSERVATION_INDEX + NUM_MOTORS BASE_ORIENTATION_OBSERVATION_INDEX = MOTOR_TORQUE_OBSERVATION_INDEX + NUM_MOTORS ACTION_EPS = 0.01 OBSERVATION_EPS = 0.01 RENDER_HEIGHT = 360 RENDER_WIDTH = 480 SENSOR_NOISE_STDDEV = minitaur.SENSOR_NOISE_STDDEV DEFAULT_URDF_VERSION = "default" DERPY_V0_URDF_VERSION = "derpy_v0" RAINBOW_DASH_V0_URDF_VERSION = "rainbow_dash_v0" NUM_SIMULATION_ITERATION_STEPS = 300 MINIATUR_URDF_VERSION_MAP = { DEFAULT_URDF_VERSION: minitaur.Minitaur, DERPY_V0_URDF_VERSION: minitaur_derpy.MinitaurDerpy, RAINBOW_DASH_V0_URDF_VERSION: minitaur_rainbow_dash.MinitaurRainbowDash, } def convert_to_list(obj): try: iter(obj) return obj except TypeError: return [obj] class MinitaurGymEnv(gym.Env): """The gym environment for the minitaur. It simulates the locomotion of a minitaur, a quadruped robot. The state space include the angles, velocities and torques for all the motors and the action space is the desired motor angle for each motor. The reward function is based on how far the minitaur walks in 1000 steps and penalizes the energy expenditure. """ metadata = {"render.modes": ["human", "rgb_array"], "video.frames_per_second": 100} def __init__(self, urdf_root=pybullet_data.getDataPath(), urdf_version=None, distance_weight=1.0, energy_weight=0.005, shake_weight=0.0, drift_weight=0.0, distance_limit=float("inf"), observation_noise_stdev=SENSOR_NOISE_STDDEV, self_collision_enabled=True, motor_velocity_limit=np.inf, pd_control_enabled=False, leg_model_enabled=True, accurate_motor_model_enabled=False, remove_default_joint_damping=False, motor_kp=1.0, motor_kd=0.02, control_latency=0.0, pd_latency=0.0, torque_control_enabled=False, motor_overheat_protection=False, hard_reset=True, on_rack=False, render=False, num_steps_to_log=1000, action_repeat=1, control_time_step=None, env_randomizer=None, forward_reward_cap=float("inf"), reflection=True, log_path=None): """Initialize the minitaur gym environment. Args: urdf_root: The path to the urdf data folder. urdf_version: [DEFAULT_URDF_VERSION, DERPY_V0_URDF_VERSION, RAINBOW_DASH_V0_URDF_VERSION] are allowable versions. If None, DEFAULT_URDF_VERSION is used. DERPY_V0_URDF_VERSION is the result of first pass system identification for derpy. We will have a different URDF and related Minitaur class each time we perform system identification. While the majority of the code of the class remains the same, some code changes (e.g. the constraint location might change). __init__() will choose the right Minitaur class from different minitaur modules based on urdf_version. distance_weight: The weight of the distance term in the reward. energy_weight: The weight of the energy term in the reward. shake_weight: The weight of the vertical shakiness term in the reward. drift_weight: The weight of the sideways drift term in the reward. distance_limit: The maximum distance to terminate the episode. observation_noise_stdev: The standard deviation of observation noise. self_collision_enabled: Whether to enable self collision in the sim. motor_velocity_limit: The velocity limit of each motor. pd_control_enabled: Whether to use PD controller for each motor. leg_model_enabled: Whether to use a leg motor to reparameterize the action space. accurate_motor_model_enabled: Whether to use the accurate DC motor model. remove_default_joint_damping: Whether to remove the default joint damping. motor_kp: proportional gain for the accurate motor model. motor_kd: derivative gain for the accurate motor model. control_latency: It is the delay in the controller between when an observation is made at some point, and when that reading is reported back to the Neural Network. pd_latency: latency of the PD controller loop. PD calculates PWM based on the motor angle and velocity. The latency measures the time between when the motor angle and velocity are observed on the microcontroller and when the true state happens on the motor. It is typically (0.001- 0.002s). torque_control_enabled: Whether to use the torque control, if set to False, pose control will be used. motor_overheat_protection: Whether to shutdown the motor that has exerted large torque (OVERHEAT_SHUTDOWN_TORQUE) for an extended amount of time (OVERHEAT_SHUTDOWN_TIME). See ApplyAction() in minitaur.py for more details. hard_reset: Whether to wipe the simulation and load everything when reset is called. If set to false, reset just place the minitaur back to start position and set its pose to initial configuration. on_rack: Whether to place the minitaur on rack. This is only used to debug the walking gait. In this mode, the minitaur's base is hanged midair so that its walking gait is clearer to visualize. render: Whether to render the simulation. num_steps_to_log: The max number of control steps in one episode that will be logged. If the number of steps is more than num_steps_to_log, the environment will still be running, but only first num_steps_to_log will be recorded in logging. action_repeat: The number of simulation steps before actions are applied. control_time_step: The time step between two successive control signals. env_randomizer: An instance (or a list) of EnvRandomizer(s). An EnvRandomizer may randomize the physical property of minitaur, change the terrrain during reset(), or add perturbation forces during step(). forward_reward_cap: The maximum value that forward reward is capped at. Disabled (Inf) by default. log_path: The path to write out logs. For the details of logging, refer to minitaur_logging.proto. Raises: ValueError: If the urdf_version is not supported. """ # Set up logging. self._log_path = log_path self.logging = minitaur_logging.MinitaurLogging(log_path) # PD control needs smaller time step for stability. if control_time_step is not None: self.control_time_step = control_time_step self._action_repeat = action_repeat self._time_step = control_time_step / action_repeat else: # Default values for time step and action repeat if accurate_motor_model_enabled or pd_control_enabled: self._time_step = 0.002 self._action_repeat = 5 else: self._time_step = 0.01 self._action_repeat = 1 self.control_time_step = self._time_step * self._action_repeat # TODO(b/73829334): Fix the value of self._num_bullet_solver_iterations. self._num_bullet_solver_iterations = int(NUM_SIMULATION_ITERATION_STEPS / self._action_repeat) self._urdf_root = urdf_root self._self_collision_enabled = self_collision_enabled self._motor_velocity_limit = motor_velocity_limit self._observation = [] self._true_observation = [] self._objectives = [] self._objective_weights = [distance_weight, energy_weight, drift_weight, shake_weight] self._env_step_counter = 0 self._num_steps_to_log = num_steps_to_log self._is_render = render self._last_base_position = [0, 0, 0] self._distance_weight = distance_weight self._energy_weight = energy_weight self._drift_weight = drift_weight self._shake_weight = shake_weight self._distance_limit = distance_limit self._observation_noise_stdev = observation_noise_stdev self._action_bound = 1 self._pd_control_enabled = pd_control_enabled self._leg_model_enabled = leg_model_enabled self._accurate_motor_model_enabled = accurate_motor_model_enabled self._remove_default_joint_damping = remove_default_joint_damping self._motor_kp = motor_kp self._motor_kd = motor_kd self._torque_control_enabled = torque_control_enabled self._motor_overheat_protection = motor_overheat_protection self._on_rack = on_rack self._cam_dist = 1.0 self._cam_yaw = 0 self._cam_pitch = -30 self._forward_reward_cap = forward_reward_cap self._hard_reset = True self._last_frame_time = 0.0 self._control_latency = control_latency self._pd_latency = pd_latency self._urdf_version = urdf_version self._ground_id = None self._reflection = reflection self._env_randomizers = convert_to_list(env_randomizer) if env_randomizer else [] self._episode_proto = minitaur_logging_pb2.MinitaurEpisode() if self._is_render: self._pybullet_client = bc.BulletClient(connection_mode=pybullet.GUI) else: self._pybullet_client = bc.BulletClient() if self._urdf_version is None: self._urdf_version = DEFAULT_URDF_VERSION self._pybullet_client.setPhysicsEngineParameter(enableConeFriction=0) self.seed() self.reset() observation_high = (self._get_observation_upper_bound() + OBSERVATION_EPS) observation_low = (self._get_observation_lower_bound() - OBSERVATION_EPS) action_dim = NUM_MOTORS action_high = np.array([self._action_bound] * action_dim) self.action_space = spaces.Box(-action_high, action_high) self.observation_space = spaces.Box(observation_low, observation_high) self.viewer = None self._hard_reset = hard_reset # This assignment need to be after reset() def close(self): if self._env_step_counter > 0: self.logging.save_episode(self._episode_proto) self.minitaur.Terminate() def add_env_randomizer(self, env_randomizer): self._env_randomizers.append(env_randomizer) def reset(self, initial_motor_angles=None, reset_duration=1.0): self._pybullet_client.configureDebugVisualizer(self._pybullet_client.COV_ENABLE_RENDERING, 0) if self._env_step_counter > 0: self.logging.save_episode(self._episode_proto) self._episode_proto = minitaur_logging_pb2.MinitaurEpisode() minitaur_logging.preallocate_episode_proto(self._episode_proto, self._num_steps_to_log) if self._hard_reset: self._pybullet_client.resetSimulation() self._pybullet_client.setPhysicsEngineParameter( numSolverIterations=int(self._num_bullet_solver_iterations)) self._pybullet_client.setTimeStep(self._time_step) self._ground_id = self._pybullet_client.loadURDF("%s/plane.urdf" % self._urdf_root) if (self._reflection): self._pybullet_client.changeVisualShape(self._ground_id, -1, rgbaColor=[1, 1, 1, 0.8]) self._pybullet_client.configureDebugVisualizer( self._pybullet_client.COV_ENABLE_PLANAR_REFLECTION, self._ground_id) self._pybullet_client.setGravity(0, 0, -10) acc_motor = self._accurate_motor_model_enabled motor_protect = self._motor_overheat_protection if self._urdf_version not in MINIATUR_URDF_VERSION_MAP: raise ValueError("%s is not a supported urdf_version." % self._urdf_version) else: self.minitaur = (MINIATUR_URDF_VERSION_MAP[self._urdf_version]( pybullet_client=self._pybullet_client, action_repeat=self._action_repeat, urdf_root=self._urdf_root, time_step=self._time_step, self_collision_enabled=self._self_collision_enabled, motor_velocity_limit=self._motor_velocity_limit, pd_control_enabled=self._pd_control_enabled, accurate_motor_model_enabled=acc_motor, remove_default_joint_damping=self._remove_default_joint_damping, motor_kp=self._motor_kp, motor_kd=self._motor_kd, control_latency=self._control_latency, pd_latency=self._pd_latency, observation_noise_stdev=self._observation_noise_stdev, torque_control_enabled=self._torque_control_enabled, motor_overheat_protection=motor_protect, on_rack=self._on_rack)) self.minitaur.Reset(reload_urdf=False, default_motor_angles=initial_motor_angles, reset_time=reset_duration) # Loop over all env randomizers. for env_randomizer in self._env_randomizers: env_randomizer.randomize_env(self) self._pybullet_client.setPhysicsEngineParameter(enableConeFriction=0) self._env_step_counter = 0 self._last_base_position = [0, 0, 0] self._objectives = [] self._pybullet_client.resetDebugVisualizerCamera(self._cam_dist, self._cam_yaw, self._cam_pitch, [0, 0, 0]) self._pybullet_client.configureDebugVisualizer(self._pybullet_client.COV_ENABLE_RENDERING, 1) return self._get_observation() def seed(self, seed=None): self.np_random, seed = seeding.np_random(seed) return [seed] def _transform_action_to_motor_command(self, action): if self._leg_model_enabled: for i, action_component in enumerate(action): if not (-self._action_bound - ACTION_EPS <= action_component <= self._action_bound + ACTION_EPS): raise ValueError("{}th action {} out of bounds.".format(i, action_component)) action = self.minitaur.ConvertFromLegModel(action) return action def step(self, action): """Step forward the simulation, given the action. Args: action: A list of desired motor angles for eight motors. Returns: observations: The angles, velocities and torques of all motors. reward: The reward for the current state-action pair. done: Whether the episode has ended. info: A dictionary that stores diagnostic information. Raises: ValueError: The action dimension is not the same as the number of motors. ValueError: The magnitude of actions is out of bounds. """ self._last_base_position = self.minitaur.GetBasePosition() if self._is_render: # Sleep, otherwise the computation takes less time than real time, # which will make the visualization like a fast-forward video. time_spent = time.time() - self._last_frame_time self._last_frame_time = time.time() time_to_sleep = self.control_time_step - time_spent if time_to_sleep > 0: time.sleep(time_to_sleep) base_pos = self.minitaur.GetBasePosition() # Keep the previous orientation of the camera set by the user. [yaw, pitch, dist] = self._pybullet_client.getDebugVisualizerCamera()[8:11] self._pybullet_client.resetDebugVisualizerCamera(dist, yaw, pitch, base_pos) for env_randomizer in self._env_randomizers: env_randomizer.randomize_step(self) action = self._transform_action_to_motor_command(action) self.minitaur.Step(action) reward = self._reward() done = self._termination() if self._log_path is not None: minitaur_logging.update_episode_proto(self._episode_proto, self.minitaur, action, self._env_step_counter) self._env_step_counter += 1 if done: self.minitaur.Terminate() return np.array(self._get_observation()), reward, done, {} def render(self, mode="rgb_array", close=False): if mode != "rgb_array": return np.array([]) base_pos = self.minitaur.GetBasePosition() view_matrix = self._pybullet_client.computeViewMatrixFromYawPitchRoll( cameraTargetPosition=base_pos, distance=self._cam_dist, yaw=self._cam_yaw, pitch=self._cam_pitch, roll=0, upAxisIndex=2) proj_matrix = self._pybullet_client.computeProjectionMatrixFOV(fov=60, aspect=float(RENDER_WIDTH) / RENDER_HEIGHT, nearVal=0.1, farVal=100.0) (_, _, px, _, _) = self._pybullet_client.getCameraImage( width=RENDER_WIDTH, height=RENDER_HEIGHT, renderer=self._pybullet_client.ER_BULLET_HARDWARE_OPENGL, viewMatrix=view_matrix, projectionMatrix=proj_matrix) rgb_array = np.array(px) rgb_array = rgb_array[:, :, :3] return rgb_array def get_minitaur_motor_angles(self): """Get the minitaur's motor angles. Returns: A numpy array of motor angles. """ return np.array(self._observation[MOTOR_ANGLE_OBSERVATION_INDEX:MOTOR_ANGLE_OBSERVATION_INDEX + NUM_MOTORS]) def get_minitaur_motor_velocities(self): """Get the minitaur's motor velocities. Returns: A numpy array of motor velocities. """ return np.array( self._observation[MOTOR_VELOCITY_OBSERVATION_INDEX:MOTOR_VELOCITY_OBSERVATION_INDEX + NUM_MOTORS]) def get_minitaur_motor_torques(self): """Get the minitaur's motor torques. Returns: A numpy array of motor torques. """ return np.array( self._observation[MOTOR_TORQUE_OBSERVATION_INDEX:MOTOR_TORQUE_OBSERVATION_INDEX + NUM_MOTORS]) def get_minitaur_base_orientation(self): """Get the minitaur's base orientation, represented by a quaternion. Returns: A numpy array of minitaur's orientation. """ return np.array(self._observation[BASE_ORIENTATION_OBSERVATION_INDEX:]) def is_fallen(self): """Decide whether the minitaur has fallen. If the up directions between the base and the world is larger (the dot product is smaller than 0.85) or the base is very low on the ground (the height is smaller than 0.13 meter), the minitaur is considered fallen. Returns: Boolean value that indicates whether the minitaur has fallen. """ orientation = self.minitaur.GetBaseOrientation() rot_mat = self._pybullet_client.getMatrixFromQuaternion(orientation) local_up = rot_mat[6:] pos = self.minitaur.GetBasePosition() return (np.dot(np.asarray([0, 0, 1]), np.asarray(local_up)) < 0.85 or pos[2] < 0.13) def _termination(self): position = self.minitaur.GetBasePosition() distance = math.sqrt(position[0]**2 + position[1]**2) return self.is_fallen() or distance > self._distance_limit def _reward(self): current_base_position = self.minitaur.GetBasePosition() forward_reward = current_base_position[0] - self._last_base_position[0] # Cap the forward reward if a cap is set. forward_reward = min(forward_reward, self._forward_reward_cap) # Penalty for sideways translation. drift_reward = -abs(current_base_position[1] - self._last_base_position[1]) # Penalty for sideways rotation of the body. orientation = self.minitaur.GetBaseOrientation() rot_matrix = pybullet.getMatrixFromQuaternion(orientation) local_up_vec = rot_matrix[6:] shake_reward = -abs(np.dot(np.asarray([1, 1, 0]), np.asarray(local_up_vec))) energy_reward = -np.abs( np.dot(self.minitaur.GetMotorTorques(), self.minitaur.GetMotorVelocities())) * self._time_step objectives = [forward_reward, energy_reward, drift_reward, shake_reward] weighted_objectives = [o * w for o, w in zip(objectives, self._objective_weights)] reward = sum(weighted_objectives) self._objectives.append(objectives) return reward def get_objectives(self): return self._objectives @property def objective_weights(self): """Accessor for the weights for all the objectives. Returns: List of floating points that corresponds to weights for the objectives in the order that objectives are stored. """ return self._objective_weights def _get_observation(self): """Get observation of this environment, including noise and latency. The minitaur class maintains a history of true observations. Based on the latency, this function will find the observation at the right time, interpolate if necessary. Then Gaussian noise is added to this observation based on self.observation_noise_stdev. Returns: The noisy observation with latency. """ observation = [] observation.extend(self.minitaur.GetMotorAngles().tolist()) observation.extend(self.minitaur.GetMotorVelocities().tolist()) observation.extend(self.minitaur.GetMotorTorques().tolist()) observation.extend(list(self.minitaur.GetBaseOrientation())) self._observation = observation return self._observation def _get_true_observation(self): """Get the observations of this environment. It includes the angles, velocities, torques and the orientation of the base. Returns: The observation list. observation[0:8] are motor angles. observation[8:16] are motor velocities, observation[16:24] are motor torques. observation[24:28] is the orientation of the base, in quaternion form. """ observation = [] observation.extend(self.minitaur.GetTrueMotorAngles().tolist()) observation.extend(self.minitaur.GetTrueMotorVelocities().tolist()) observation.extend(self.minitaur.GetTrueMotorTorques().tolist()) observation.extend(list(self.minitaur.GetTrueBaseOrientation())) self._true_observation = observation return self._true_observation def _get_observation_upper_bound(self): """Get the upper bound of the observation. Returns: The upper bound of an observation. See GetObservation() for the details of each element of an observation. """ upper_bound = np.zeros(self._get_observation_dimension()) num_motors = self.minitaur.num_motors upper_bound[0:num_motors] = math.pi # Joint angle. upper_bound[num_motors:2 * num_motors] = (motor.MOTOR_SPEED_LIMIT) # Joint velocity. upper_bound[2 * num_motors:3 * num_motors] = (motor.OBSERVED_TORQUE_LIMIT) # Joint torque. upper_bound[3 * num_motors:] = 1.0 # Quaternion of base orientation. return upper_bound def _get_observation_lower_bound(self): """Get the lower bound of the observation.""" return -self._get_observation_upper_bound() def _get_observation_dimension(self): """Get the length of the observation list. Returns: The length of the observation list. """ return len(self._get_observation()) if parse_version(gym.__version__) < parse_version('0.9.6'): _render = render _reset = reset _seed = seed _step = step def set_time_step(self, control_step, simulation_step=0.001): """Sets the time step of the environment. Args: control_step: The time period (in seconds) between two adjacent control actions are applied. simulation_step: The simulation time step in PyBullet. By default, the simulation step is 0.001s, which is a good trade-off between simulation speed and accuracy. Raises: ValueError: If the control step is smaller than the simulation step. """ if control_step < simulation_step: raise ValueError("Control step should be larger than or equal to simulation step.") self.control_time_step = control_step self._time_step = simulation_step self._action_repeat = int(round(control_step / simulation_step)) self._num_bullet_solver_iterations = (NUM_SIMULATION_ITERATION_STEPS / self._action_repeat) self._pybullet_client.setPhysicsEngineParameter( numSolverIterations=self._num_bullet_solver_iterations) self._pybullet_client.setTimeStep(self._time_step) self.minitaur.SetTimeSteps(action_repeat=self._action_repeat, simulation_step=self._time_step) @property def pybullet_client(self): return self._pybullet_client @property def ground_id(self): return self._ground_id @ground_id.setter def ground_id(self, new_ground_id): self._ground_id = new_ground_id @property def env_step_counter(self): return self._env_step_counter
nrz/ylikuutio
external/bullet3/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_gym_env.py
Python
agpl-3.0
25,350
[ "Gaussian" ]
78cb44845ad5400d9b90895f305ec2819593092f5cbc26c0b70b969add03e1ac
import numpy as np from numpy.linalg import det from astropy.table import Table from astropy import units as u from astropy.coordinates import SkyCoord, search_around_sky from tqdm import tqdm, tnrange, tqdm_notebook from concurrent.futures import ProcessPoolExecutor, as_completed from sklearn.neighbors import KernelDensity ## general functions def describe(var, decimals=3, nullvalue=-999): """Describe one variable """ format_string_base = ("{{:.{d}f}} +/- {{:.{d}f}}; " "median: {{:.{d}f}}; " "limits: [{{:.{d}f}}, {{:.{d}f}}]; " "N={{}} ({{}} NaN; {{}} masked)") format_string = format_string_base.format(d=decimals) print(format_string.format(np.nanmean(var), np.nanstd(var), np.nanmedian(var), np.nanmin(var), np.nanmax(var), len(var), np.sum(np.isnan(var)), len(var[var == nullvalue]))) ## Sky functions def ru2ra(x, ra1=0., ra2=360.): """Transform a random uniform number to a RA between the RA specified as an input""" return x*(ra2-ra1)+ra1 def ru2dec(y, dec1=-90, dec2=90.): """Transform a random uniform number to a Dec between the decs. specified as an input""" sin_dec1rad = np.sin(np.deg2rad(dec1)) sin_dec2rad = np.sin(np.deg2rad(dec2)) inner = y*(sin_dec2rad - sin_dec1rad) + sin_dec1rad return np.rad2deg(np.arcsin(inner)) def generate_random_catalogue(n, ra1=0., ra2=360., dec1=-90, dec2=90.): """Generate a random catalogue in the zone deffined by the input coordinates """ x = np.random.rand(n) y = np.random.rand(n) ra = ru2ra(x, ra1=ra1, ra2=ra2) dec = ru2dec(y, dec1=dec1, dec2=dec2) return SkyCoord(ra, dec, unit=(u.deg, u.deg), frame='icrs') def area_ra_dec(ra_down, ra_up, dec_down, dec_up): """Compute the area in a region between two right ascentions and two declinations. The unit of the output is squared arsecs. """ return ((np.deg2rad(ra_up) - np.deg2rad(ra_down)) * (np.sin(np.deg2rad(dec_up)) - np.sin(np.deg2rad(dec_down))) * np.rad2deg(3600.)**2) ## Data holding classes class Field(object): """ Class to represent a region of the sky between two right ascensions and two declinations. """ def __init__(self, ra_down, ra_up, dec_down, dec_up): self.ra_down = ra_down self.ra_up = ra_up self.dec_down = dec_down self.dec_up = dec_up self.area = area_ra_dec(self.ra_down, self.ra_up, self.dec_down, self.dec_up) def filter_catalogue(self, catalogue, colnames=("ra", "dec")): """ Filter a catalogue to the """ # TODO: Check if colnames in the catalogue return catalogue[ ((catalogue[colnames[0]] >= self.ra_down) & (catalogue[colnames[0]] <= self.ra_up) & (catalogue[colnames[1]] >= self.dec_down) & (catalogue[colnames[1]] <= self.dec_up))] def random_catalogue(self, n): """ Generate a random catalogue in the area with n sources """ return generate_random_catalogue( n, ra1=self.ra_down, ra2=self.ra_up, dec1=self.dec_down, dec2=self.dec_up) class Q_0(object): """ Compute the Q_0 given a set of catalogues and a field """ def __init__(self, coords_small, coords_big, field, radius=5.): self.coords_small = coords_small self.coords_big = coords_big self.field = field self.radius = radius self.n_small = len(self.coords_small) self.n_big = len(self.coords_big) def __call__(self, radius=None): """Compute the Q_0 for a given radius (in arcsecs)""" if radius is None: radius = self.radius # Generate random catalogue with n sources as the small one random_small = self.field.random_catalogue(self.n_small) idx_random_small, idx_big, d2d, d3d = search_around_sky( random_small, self.coords_big, radius*u.arcsec) nomatch_random = self.n_small - len(np.unique(idx_random_small)) # Compute match in radius idx_small, idx_big, d2d, d3d = search_around_sky( self.coords_small, self.coords_big, radius*u.arcsec) nomatch_small = self.n_small - len(np.unique(idx_small)) return (1. - float(nomatch_small)/float(nomatch_random)) ## Error functions def R(theta): """Rotation matrix. Input: - theta: angle in degrees """ theta_rad = np.deg2rad(theta) c = np.cos(theta_rad) s = np.sin(theta_rad) return np.array([[c, -s], [s, c]]) def get_sigma(maj_error, min_error, pos_angle, radio_ra, radio_dec, opt_ra, opt_dec, opt_ra_err, opt_dec_err, additional_error=0.6): """ Get the covariance matrix between an elongated radio source and an optical source. Input: * maj_error: error in the major axis of the radio Gaussian in arsecs * min_error: error in the minor axis of the radio Gaussian in arsecs * pos_angle: position angle of the radio Gaussian in degrees * radio_ra: Right ascension of the radio source in degrees * radio_dec: Declination of the radio source in degrees * opt_ra: Right ascension of the optical source in degrees * opt_dec: Declination of the optical source in degrees * opt_ra_err: Error in right ascension of the optical source in degrees * opt_dec_err: Error in declination of the optical source in degrees * additonal_error: Additonal term to add to the error. By default it adds an astrometic error of 0.6 arcsecs. Output: * sigma: Combined covariance matrix """ factor = 0.60056120439322491 # sqrt(2.0) / sqrt(8.0 * log(2)); see Condon(1997) for derivation of adjustment factor majerr = factor * maj_error minerr = factor * min_error # angle between the radio and the optical sources cosadj = np.cos(np.deg2rad(0.5*(radio_dec + opt_dec))) phi = np.arctan2((opt_dec - radio_dec), ((opt_ra - radio_ra)*cosadj)) # angle from direction of major axis to vector joining LOFAR source and optical source alpha = np.pi/2.0 - phi - np.deg2rad(pos_angle) # Covariance matrices sigma_radio_nr = np.array([[majerr**2, 0], [0, minerr**2]]) sigma_optical_nr = np.array([[opt_ra_err**2, 0], [0, opt_dec_err**2]]) # Rotate the covariance matrices R_radio = R(alpha) sigma_radio = R_radio @ sigma_radio_nr @ R_radio.T R_optical = R(-phi) sigma_optical = R_optical @ sigma_optical_nr @ R_optical.T # Additional error sigma_additonal_error = np.array([[additional_error**2, 0], [0, additional_error**2]]) sigma = sigma_radio + sigma_optical + sigma_additonal_error return sigma def get_sigma_all_old(maj_error, min_error, pos_angle, radio_ra, radio_dec, opt_ra, opt_dec, opt_ra_err, opt_dec_err, additonal_error=0.6): """ Get the combined error and the axes components between an elongated radio source and an optical source. Input: * maj_error: error in the major axis of the radio Gaussian in arsecs * min_error: error in the minor axis of the radio Gaussian in arsecs * pos_angle: position angle of the radio Gaussian in degrees * radio_ra: Right ascension of the radio source in degrees * radio_dec: Declination of the radio source in degrees * opt_ra: Right ascension of the optical source in degrees * opt_dec: Declination of the optical source in degrees * opt_ra_err: Error in right ascension of the optical source in degrees * opt_dec_err: Error in declination of the optical source in degrees * additonal_error: Additonal term to add to the error. By default it adds an astrometic error of 0.6 arcsecs. Output: * sigma: Combined error * sigma_maj: Error in the major axis direction * sigma_min: Error in the minor axis direction """ factor = 0.60056120439322491 # sqrt(2.0) / sqrt(8.0 * log(2)); see Condon(1997) for derivation of adjustment factor majerr = factor * maj_error minerr = factor * min_error cosadj = np.cos(np.deg2rad(0.5*(radio_dec + opt_dec))) phi = np.arctan2((opt_dec - radio_dec), ((opt_ra - radio_ra)*cosadj)) # angle from direction of major axis to vector joining LOFAR source and optical source sigma = np.pi/2.0 - phi - np.deg2rad(pos_angle) maj_squared = ((majerr * np.cos(sigma))**2 + (opt_ra_err * np.cos(phi))**2 + additonal_error**2/2. ) min_squared = ((minerr * np.sin(sigma))**2 + (opt_dec_err * np.sin(phi))**2 + additonal_error**2/2. ) return np.sqrt(maj_squared + min_squared), np.sqrt(maj_squared), np.sqrt(min_squared) def get_sigma_all(maj_error, min_error, pos_angle, radio_ra, radio_dec, opt_ra, opt_dec, opt_ra_err, opt_dec_err, additional_error=0.6): """Apply the get_sigma function in parallel and return the determinant of the covariance matrix and its [1,1] term (or [0,0] in Python) """ n = len(opt_ra) det_sigma = np.empty(n) sigma_0_0 = np.empty(n) for i in range(n): sigma = get_sigma(maj_error, min_error, pos_angle, radio_ra, radio_dec, opt_ra[i], opt_dec[i], opt_ra_err[i], opt_dec_err[i], additional_error=additional_error) det_sigma[i] = det(sigma) sigma_0_0[i] = sigma[0,0] return sigma_0_0, det_sigma ## ML functions def get_center(bins): """ Get the central positions for an array defining bins """ return (bins[:-1] + bins[1:]) / 2 def get_n_m(magnitude, bin_list, area): """Compute n(m) Density of sources per unit of area **Note that the output is cumulative** """ n_hist, _ = np.histogram(magnitude, bin_list) return np.cumsum(n_hist)/area def get_n_m_kde(magnitude, bin_centre, area, bandwidth=0.2): """Compute n(m) Density of sources per unit of area in a non-cumulative fashion using a KDE. For this function we need the centre of the bins instead of the edges. **Note that the output is non-cumulative** """ kde_skl = KernelDensity(bandwidth=bandwidth) kde_skl.fit(magnitude[:, np.newaxis]) pdf = np.exp(kde_skl.score_samples(bin_centre[:, np.newaxis])) return pdf/area*len(magnitude)/np.sum(pdf) def get_q_m(magnitude, bin_list, n, n_m, area, radius=5): """Compute q(m) Normalized probability of a real match **Note that the output is cumulative** """ n_hist_total, _ = np.histogram(magnitude, bin_list) # Correct probability if there are no sources if len(magnitude) == 0: n_hist_total = np.ones_like(n_hist_total)*0.5 # Estimate real(m) real_m = n_hist_total # Remove small negative numbers real_m[real_m <= 0.] = 0. real_m_cumsum = np.cumsum(real_m) return real_m_cumsum/real_m_cumsum[-1] def get_q_m_kde(magnitude, bin_centre, radius=5, bandwidth=0.2): """Compute q(m) Normalized probability of a real match in a non-cumulative fashion using a KDE. For this function we need the centre of the bins instead of the edges. **Note that the output is non-cumulative** """ # Get real(m) kde_skl = KernelDensity(bandwidth=bandwidth) kde_skl.fit(magnitude[:, np.newaxis]) pdf_q_m = np.exp(kde_skl.score_samples(bin_centre[:, np.newaxis])) real_m = pdf_q_m*len(magnitude)/np.sum(pdf_q_m) # Correct probability if there are no sources if len(magnitude) == 0: real_m = np.ones_like(n_hist_total)*0.5 # Remove small negative numbers real_m[real_m <= 0.] = 0. return real_m/np.sum(real_m) def estimate_q_m(magnitude, bin_list, n_m, coords_small, coords_big, radius=5): """Compute q(m) Estimation of the distribution of real matched sources with respect to a magnitude (normalized to 1). As explained in Fleuren et al. """ assert len(magnitude) == len(coords_big) # Cross match idx_small, idx_big, d2d, d3d = search_around_sky( coords_small, coords_big, radius*u.arcsec) n_small = len(coords_small) idx = np.unique(idx_big) # Get the distribution of matched sources n_hist_total, _ = np.histogram(magnitude[idx], bin_list) # Correct probability if there are no sources if len(magnitude[idx]) == 0: n_hist_total = np.ones_like(n_hist_total)*0.5 # Estimate real(m) n_m_nocumsum = np.ediff1d(n_m, to_begin=n_m[0]) real_m = n_hist_total - n_small*n_m_nocumsum*np.pi*radius**2 # Remove small negative numbers real_m[real_m <= 0.] = 0. real_m_cumsum = np.cumsum(real_m) return real_m_cumsum/real_m_cumsum[-1] def estimate_q_m_kde(magnitude, bin_centre, n_m, coords_small, coords_big, radius=5, bandwidth=0.2): """Compute q(m) Estimation of the distribution of real matched sources with respect to a magnitude (normalized to 1). As explained in Fleuren et al. in a non-cumulative fashion using a KDE. For this function we need the centre of the bins instead of the edges. """ assert len(magnitude) == len(coords_big) # Cross match idx_small, idx_big, d2d, d3d = search_around_sky( coords_small, coords_big, radius*u.arcsec) n_small = len(coords_small) idx = np.unique(idx_big) # Get the distribution of matched sources kde_skl_q_m = KernelDensity(bandwidth=bandwidth) kde_skl_q_m.fit(magnitude[idx][:, np.newaxis]) pdf_q_m = np.exp(kde_skl_q_m.score_samples(bin_centre[:, np.newaxis])) n_hist_total = pdf_q_m*len(magnitude[idx])/np.sum(pdf_q_m) # Correct probability if there are no sources ## CHECK if len(magnitude[idx]) == 0: n_hist_total = np.ones_like(n_hist_total)*0.5 # Estimate real(m) real_m = n_hist_total - n_small*n_m*np.pi*radius**2 # Remove small negative numbers ## CHECK real_m[real_m <= 0.] = 0. return real_m/np.sum(real_m) def fr(r, sigma): """Get the probability related to the spatial distribution""" return 0.5/np.pi/det(sigma)*np.exp(-0.5*r**2/sigma[0,0]) def fr_u(r, sigma_0_0, det_sigma): """Get the probability related to the spatial distribution""" return 0.5/np.pi/det_sigma*np.exp(-0.5*r**2/sigma_0_0) def fr_u_old(r, sigma, sigma_maj, sigma_min): """Get the probability related to the spatial distribution. UPDATED TO NEW FORMULA. MERGE LATER.""" return 0.5/np.pi/sigma_maj/sigma_min*np.exp(-0.5*r**2/(sigma**2)) class SingleMLEstimator(object): """ Class to estimate the Maximum Likelihood ratio """ def __init__(self, q0, n_m, q_m, center): self.q0 = q0 self.n_m = n_m self.q_m = q_m self.center = center def get_qm(self, m): """Get q(m) """ return np.interp(m, self.center, self.q_m*self.q0) def get_nm(self, m): """Get n(m)""" return np.interp(m, self.center, self.n_m) def __call__(self, m, r, sigma_0_0, det_sigma): """Get the likelihood ratio""" return fr_u(r, sigma_0_0, det_sigma) * self.get_qm(m) / self.get_nm(m) class SingleMLEstimatorOld(object): """ Class to estimate the Maximum Likelihood ratio """ def __init__(self, q0, n_m, q_m, center): self.q0 = q0 self.n_m = n_m self.q_m = q_m self.center = center def get_qm(self, m): """Get q(m) """ return np.interp(m, self.center, self.q_m*self.q0) def get_nm(self, m): """Get n(m)""" return np.interp(m, self.center, self.n_m) def __call__(self, m, r, sigma, sigma_maj, sigma_min): """Get the likelihood ratio""" return fr_u_old(r, sigma, sigma_maj, sigma_min) * self.get_qm(m) / self.get_nm(m) class MultiMLEstimator(object): """ Class to estimate the Maximum Likelihood ratio in a vectorized fashion. """ def __init__(self, q0, n_m, q_m, center): self.q0 = q0 self.n_m = n_m self.q_m = q_m self.center = center def get_qm(self, m, k): """Get q(m) """ return np.interp(m, self.center[k], self.q_m[k]*self.q0[k]) def get_nm(self, m, k): """Get n(m)""" return np.interp(m, self.center[k], self.n_m[k]) def get_qm_vect(self, m, k): """Get q(m) for a given category """ return np.vectorize(self.get_qm)(m, k) def get_nm_vect(self, m, k): """Get n(m) for a given category """ return np.vectorize(self.get_nm)(m, k) def __call__(self, m, r, sigma_0_0, det_sigma, k): """Get the likelihood ratio""" return fr_u(r, sigma_0_0, det_sigma) * self.get_qm_vect(m, k) / self.get_nm_vect(m, k) class MultiMLEstimatorOld(object): """ Class to estimate the Maximum Likelihood ratio in a vectorized fashion. """ def __init__(self, q0, n_m, q_m, center): self.q0 = q0 self.n_m = n_m self.q_m = q_m self.center = center def get_qm(self, m, k): """Get q(m) """ return np.interp(m, self.center[k], self.q_m[k]*self.q0[k]) def get_nm(self, m, k): """Get n(m)""" return np.interp(m, self.center[k], self.n_m[k]) def get_qm_vect(self, m, k): """Get q(m) for a given category """ return np.vectorize(self.get_qm)(m, k) def get_nm_vect(self, m, k): """Get n(m) for a given category """ return np.vectorize(self.get_nm)(m, k) def __call__(self, m, r, sigma, sigma_maj, sigma_min, k): """Get the likelihood ratio""" return fr_u_old(r, sigma, sigma_maj, sigma_min) * self.get_qm_vect(m, k) / self.get_nm_vect(m, k) def q0_min_level(q_0_list, min_level=0.001): """Ensures that the minimum value of the Q_0 for each bin is always above a minimum threshold """ q_0 = np.array(q_0_list) q_0[q_0 < min_level] = min_level return q_0 def q0_min_numbers(q_0_list, numbers_combined_bins): """Ensures that the minimum value of the Q_0 for each bin is always above a minimum threshold that depends on the number of sources in each bin. """ q_0 = np.array(q_0_list) thresholds = 1./numbers_combined_bins q_0[q_0 < thresholds] = thresholds[q_0 < thresholds] return q_0 def get_threshold(lr_dist, n_bins=200, n_gal_cut=1000): """Get the threshold as the position of the first minima in the LR distribution in log space""" from scipy.signal import savgol_filter val, bins = np.histogram(np.log10(lr_dist + 1), bins=200) if n_gal_cut is not None: val[val >= n_gal_cut] = n_gal_cut v1 = savgol_filter(val, 31, 3) g1 = np.gradient(v1) # Firt derivative g2 = np.gradient(g1) # Second derivative center = get_center(bins) t_value = 10**(center[np.argmax(g2 < 0)])-1 return t_value ## Multiprocessing functions def parallel_process(array, function, n_jobs=3, use_kwargs=False, front_num=3, notebook=False): """ A parallel version of the map function with a progress bar. Args: array (array-like): An array to iterate over. function (function): A python function to apply to the elements of array n_jobs (int, default=16): The number of cores to use use_kwargs (boolean, default=False): Whether to consider the elements of array as dictionaries of keyword arguments to function front_num (int, default=3): The number of iterations to run serially before kicking off the parallel job. Useful for catching bugs Returns: [function(array[0]), function(array[1]), ...] see: http://danshiebler.com/2016-09-14-parallel-progress-bar/ """ if notebook: tqdm_f = tqdm_notebook else: tqdm_f = tqdm #We run the first few iterations serially to catch bugs if front_num > 0: front = [function(**a) if use_kwargs else function(a) for a in array[:front_num]] #If we set n_jobs to 1, just run a list comprehension. This is useful for benchmarking and debugging. if n_jobs==1: return front + [function(**a) if use_kwargs else function(a) for a in tqdm_f(array[front_num:])] #Assemble the workers with ProcessPoolExecutor(max_workers=n_jobs) as pool: #Pass the elements of array into function if use_kwargs: futures = [pool.submit(function, **a) for a in array[front_num:]] else: futures = [pool.submit(function, a) for a in array[front_num:]] kwargs = { 'total': len(futures), 'unit': 'it', 'unit_scale': True, 'leave': True } #Print out the progress as tasks complete for f in tqdm_f(as_completed(futures), **kwargs): pass out = [] #Get the results from the futures. for i, future in enumerate(tqdm_f(futures)): out.append(future.result()) return front + out
nudomarinero/mltier1
mltier1.py
Python
gpl-3.0
21,465
[ "Gaussian" ]
c947d63f30f2f24779296af0d121ddb333a0acc4518964bc761ff661fde024a6
# Copyright 2021 The Distla Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= """Tests for chebyshev.py.""" import jax from jax import lax import jax.numpy as jnp import numpy as np import pytest from distla_core.chebyshev import chebyshev from distla_core.linalg.utils import testutils from distla_core.utils import pops from distla_core.utils import vops precisions = [lax.Precision.DEFAULT, lax.Precision.HIGH, lax.Precision.HIGHEST] # REDACTED I think pytest has some nice way of doing this properly. np.random.seed(1) # REDACTED This function is duplicated in at least two places within # distla_core. We need something like utils for tests. def random_posdef(D, delta, dtype): """ Returns, as a numpy array, a random D x D positive definite matrix of dtype, that has the ratio of smallest eigenvalue/frobenius norm be approximately delta. delta is assumed to be small compared to 1. """ A = np.random.randn(D, D).astype(dtype) if issubclass(dtype, np.complexfloating): A = A + 1j * np.random.randn(D, D).astype(dtype) A = np.dot(A.T.conj(), A) A = A / np.linalg.norm(A) Emin = np.min(np.linalg.eigh(A)[0]) A = A + (delta - Emin) * np.eye(D, dtype=dtype) A *= dtype(3) * dtype(np.random.rand()) return A def random_self_adjoint(D, dtype): """Returns, as a numpy array, a random D x D Hermitian matrix of dtype.""" A = np.random.randn(D, D).astype(dtype) if issubclass(dtype, np.complexfloating): A = A + 1j * np.random.randn(D, D).astype(dtype) A = (A + A.T.conj()) / dtype(2) A *= dtype(3) * dtype(np.random.rand()) return A def test_return_which(): """Test the usage of the return_which keyword argument.""" def f(x): return x**2 interval = (-1, 1) D = 16 n = 1000 p_sz = 2 atol = 1e-7 matmat, scalar1, pmatmat, scalar2, _ = chebyshev.chebyshize( f, interval, p_sz=p_sz, n=n, return_which=("matmat", "scalar", "pmatmat", "scalar"), ) M = random_self_adjoint(D, np.float32) M = M / np.linalg.norm(M) np.testing.assert_allclose(scalar1(M), M**2, atol=atol) np.testing.assert_allclose(scalar2(M), M**2, atol=atol) M_sq_exact = np.dot(M, M) M_sq = matmat(M) np.testing.assert_allclose(M_sq, M_sq_exact, atol=atol) M_dist = pops.distribute(M) M_sq_dist = jax.pmap(pmatmat, axis_name=pops.AXIS_NAME)(M_dist) M_sq_collected = pops.undistribute(M_sq_dist) np.testing.assert_allclose(M_sq_collected, M_sq_exact, atol=atol) def chebyshev_test( f, interval, M, v, n_cheb, is_vectorized, atol, rtol, test_samples, test_margin, p_sz, precision, ): """A utility function for running tests of Chebyshation. Chebyshizes the function f, and checks the accuracy of the approximation for scalars, the matrix M, M distributed over several devices, and for matrix-vector product f(M) @ v for both undistributed and distributed. Args: f: The function to test on interval: The interval to test in M: The matrix to test on v: The vector to test on n_cheb: Order of Chebyshev expansion is_vectorized: Whether f is already capabable of handling vector arguments. atol: Absolute tolerance for accuracy. rtol: Relative tolerance for accuracy. test_samples: How many scalar points to sample for testing within the interval. test_margin: How many points from the ends of the interval to discard. Chebyshev expansions are inaccurate near the ends. p_sz: Panel size for SUMMA. precision: Matmul precision. Raises: AssertionError if any of the tests fail. """ scalar, matmat, matvec, pmatmat, pmatvec, _ = chebyshev.chebyshize( f, interval, n=n_cheb, is_vectorized=is_vectorized, p_sz=p_sz, precision=precision, ) if is_vectorized: f_vec = f else: f_vec = np.vectorize(f) # Skip the first and last few points of the interval, because accuracy there # is bad. xs = np.linspace( interval[0], interval[1], test_samples, )[test_margin:-test_margin - 1] ys_exact = f_vec(xs) ys_cheb = scalar(xs) np.testing.assert_allclose(ys_cheb, ys_exact, rtol=rtol, atol=atol) # Apply f exactly to M using an eigenvalue decomposition. E, U = jnp.linalg.eigh(M) fE = f_vec(E) fM_exact = pops.dot(U * fE, U.T.conj()) fM_cheb = jax.jit(matmat)(M) np.testing.assert_allclose(fM_cheb, fM_exact, rtol=rtol, atol=atol) M_dist = pops.distribute(M) fM_dist = jax.pmap(pmatmat, axis_name=pops.AXIS_NAME)(M_dist) fM_collected = pops.undistribute(fM_dist) np.testing.assert_allclose(fM_collected, fM_exact, rtol=rtol, atol=atol) fMv_exact = pops.dot(fM_exact, v) fMv_cheb = jax.jit(matvec)(M, v) np.testing.assert_allclose(fMv_cheb, fMv_exact, rtol=rtol, atol=atol) v_dist = vops.distribute(v, column_replicated=True) fMv_dist = jax.pmap(pmatvec, axis_name=pops.AXIS_NAME)(M_dist, v_dist) fMv_collected = vops.undistribute(fMv_dist) np.testing.assert_allclose(fMv_collected, fMv_exact, rtol=rtol, atol=atol) @pytest.mark.parametrize("precision", precisions) def test_xlogx(precision): """Creates a Chebyshev approximation of x log(x) within the interval (1e-6, 1), and tests its accuracy for scalars, matrices, and distributed matrices. """ def f(x): return x * np.log(x) is_vectorized = True interval = (1e-6, 1.0) n_cheb = 200 # The first one comes from Chebyshev error, the latter from numerical. rtol = max(5e-6, 10 * testutils.eps(precision)) atol = max(5e-6, 10 * testutils.eps(precision)) test_samples = 1000 test_margin = 1 p_sz = 32 D = 128 dtype = np.float32 delta = 1e-4 M = random_posdef(D, delta, dtype) # Ensure the spectrum of M is within the interval. M = M / jnp.linalg.norm(M) v = np.random.randn(D, 8).astype(dtype) chebyshev_test( f, interval, M, v, n_cheb, is_vectorized, atol, rtol, test_samples, test_margin, p_sz, precision=precision, ) @pytest.mark.parametrize("precision", precisions) def test_piecewise_quartic(precision): """Creates a Chebyshev approximation of a piecewise quartic function within the interval (-2, 2), and tests its accuracy for scalars, matrices, and distributed matrices. """ def f(x): if x < 0: return x**4 else: return -x**4 is_vectorized = False interval = (-2, 2) n_cheb = 60 # The first one comes from Chebyshev error, the latter from numerical. rtol = max(5e-5, 10 * testutils.eps(precision)) atol = max(5e-5, 10 * testutils.eps(precision)) test_samples = 1000 test_margin = 0 p_sz = 8 D = 128 dtype = np.float32 M = random_self_adjoint(D, dtype) # Make sure the spectrum of M is within the interval. interval_range = max(abs(i) for i in interval) M = M / (jnp.linalg.norm(M) / interval_range) v = np.random.randn(D, 128).astype(dtype) chebyshev_test( f, interval, M, v, n_cheb, is_vectorized, atol, rtol, test_samples, test_margin, p_sz, precision=precision, ) @pytest.mark.parametrize("precision", precisions) def test_piecewise_fermidirac(precision): """Creates a Chebyshev approximation of the Fermi-Dirac distribution within the interval (-3, 3), and tests its accuracy for scalars, matrices, and distributed matrices. """ mu = 0.0 beta = 10.0 def f(x): return 1 / (np.exp(beta * (x - mu)) + 1) is_vectorized = True interval = (-3, 3) n_cheb = 200 # The first one comes from Chebyshev error, the latter from numerical. rtol = max(5e-6, 10 * testutils.eps(precision)) atol = max(5e-6, 10 * testutils.eps(precision)) test_samples = 1000 test_margin = 0 p_sz = 16 D = 128 dtype = np.float32 M = random_self_adjoint(D, dtype) # Make sure the spectrum of M is within the interval. interval_range = max(abs(i) for i in interval) M = M / (jnp.linalg.norm(M) / interval_range) v = np.random.randn(D, 1).astype(dtype) chebyshev_test( f, interval, M, v, n_cheb, is_vectorized, atol, rtol, test_samples, test_margin, p_sz, precision=precision, ) def test_trace_estimation(): """ Tests trace estimation of a projector using Chebyshev approximation to the step function. We generate a random Hermitian matrix M, and try to estimate the rank of P(M), where P(M) is the projector onto the space of positive eigenvalues of M. This is done by computing n_samples copies of v^T @ P(M) @ v, where v is a random vector with norm 1. Each of these elements vPv is an approximation to the rank, and by taking the average of the n_samples elements we get a more accurate estimate. Chebyshev polynomials enter as a way of approximating P(M) @ v by only using matrix-vector products. The function P(M) is essentially a step function on the eigenvalues of M, which is hard to approximate with Chebyshev polynomials due to its discontinuity, so instead we replace P(M) with a Fermi-Dirac distribution of M, with a very low temperature. """ D = 512 beta = 100 n_cheb = 1000 interval = (-1, 1) p_sz = 8 n_samples = 128 tol = 1e-2 dtype = np.float32 # We approximate the step function by a Fermi-Dirac distribution. def f(x): return 1 / (np.exp(beta * x) + 1) matvec, pmatvec, _ = chebyshev.chebyshize( f, interval, n=n_cheb, p_sz=p_sz, return_which=("matvec", "pmatvec"), ) # A random Hermitian matrix with spectrum within the given interval. M = random_self_adjoint(D, dtype) interval_range = max(abs(i) for i in interval) M = M / (jnp.linalg.norm(M) / interval_range) # A random thin matrix with normalised columns. v = np.random.randn(D, n_samples).astype(dtype) v = v / np.linalg.norm(v, axis=0) E, _ = np.linalg.eigh(M) k_exact = np.count_nonzero(E > 0) / D Pv = jax.jit(matvec)(M, v) k_estimate = jnp.vdot(v, Pv, precision=jax.lax.Precision.HIGHEST) / n_samples assert abs(k_exact - k_estimate) < tol M_dist = pops.distribute(M) v_dist = vops.distribute(v, column_replicated=True) Pv_dist = jax.pmap(pmatvec, axis_name=pops.AXIS_NAME)(M_dist, v_dist) Pv_collected = vops.undistribute(Pv_dist) k_estimate = jnp.vdot( v, Pv_collected, precision=jax.lax.Precision.HIGHEST) / n_samples assert abs(k_exact - k_estimate) < tol
google/distla_core
distla/distla_core/distla_core/chebyshev/test_chebyshev.py
Python
apache-2.0
11,028
[ "DIRAC" ]
87b8f0139af96d88e504d944a67ebc2c08226ee265b0f1f4b2ad813d4c50b440
# $HeadURL$ """ VirtualMachineHandler provides remote access to VirtualMachineDB The following methods are available in the Service interface: - insertInstance - declareInstanceSubmitted - declareInstanceRunning - instanceIDHeartBeat - declareInstanceHalting - getInstancesByStatus - declareInstancesStopping - getUniqueID( instanceID ) return cloud manager uniqueID form VMDIRAC instanceID """ from types import DictType, FloatType, IntType, ListType, LongType, StringType, TupleType, UnicodeType # DIRAC from DIRAC import gConfig, gLogger, S_ERROR, S_OK from DIRAC.Core.DISET.RequestHandler import RequestHandler from DIRAC.Core.Utilities.ThreadScheduler import gThreadScheduler # VMDIRAC from VMDIRAC.WorkloadManagementSystem.Client.NovaImage import NovaImage from VMDIRAC.WorkloadManagementSystem.Client.OcciImage import OcciImage from VMDIRAC.WorkloadManagementSystem.Client.AmazonImage import AmazonImage from VMDIRAC.WorkloadManagementSystem.DB.VirtualMachineDB import VirtualMachineDB from VMDIRAC.Security import VmProperties __RCSID__ = '$Id: $' # This is a global instance of the VirtualMachineDB class gVirtualMachineDB = False #def initializeVirtualMachineManagerHandler( _serviceInfo ): # # global gVirtualMachineDB # # gVirtualMachineDB = VirtualMachineDB() # gVirtualMachineDB.declareStalledInstances() # # if gVirtualMachineDB._connected: # gThreadScheduler.addPeriodicTask( 60 * 15, gVirtualMachineDB.declareStalledInstances ) # return S_OK() # # return S_ERROR() def initializeVirtualMachineManagerHandler( _serviceInfo ): global gVirtualMachineDB gVirtualMachineDB = VirtualMachineDB() checkStalledInstances() if gVirtualMachineDB._connected: gThreadScheduler.addPeriodicTask( 60 * 15, checkStalledInstances ) return S_OK() return S_ERROR() def checkStalledInstances(): """ To avoid stalling instances consuming resources at cloud endpoint, attempms to halt the stalled list in the cloud endpoint """ result = gVirtualMachineDB.declareStalledInstances() if not result[ 'OK' ]: return S_ERROR() stallingList = result[ 'Value' ] return haltInstances(stallingList) def haltInstances(vmList): """ Common haltInstances for Running(from class VirtualMachineManagerHandler) and Stalled(from checkStalledInstances periodic task) to Halt """ for instanceID in vmList: instanceID = int( instanceID ) result = gVirtualMachineDB.getUniqueID( instanceID ) if not result[ 'OK' ]: gLogger.error( 'haltInstances on getUniqueID call: %s' % result ) continue uniqueID = result [ 'Value' ] result = gVirtualMachineDB.getEndpointFromInstance( uniqueID ) if not result[ 'OK' ]: gLogger.error( 'haltInstances on getEndpointFrominstance call: %s' % result ) continue endpoint = result [ 'Value' ] cloudDriver = gConfig.getValue( "/Resources/VirtualMachines/CloudEndpoints/%s/%s" % ( endpoint, "cloudDriver" ) ) if not cloudDriver: gLogger.error( 'haltInstances: Cloud not found driver option in the Endpoint %s value %s' % (endpoint, cloudDriver)) continue imageName = gVirtualMachineDB.getImageNameFromInstance( uniqueID ) if not imageName[ 'OK' ]: gLogger.error( 'haltInstances: can not getImageNameFromInstance %s' % imageName) continue imageName = imageName[ 'Value' ] gLogger.info( 'Attemping to halt instance: %s, endpoint: %s imageName: %s' % (str(uniqueID),endpoint,imageName) ) if ( cloudDriver == 'occi-0.9' or cloudDriver == 'occi-0.8' or cloudDriver == 'rocci-1.1' ): oima = OcciImage( imageName, endpoint ) connOcci = oima.connectOcci() if not connOcci[ 'OK' ]: gLogger.error( 'haltInstances: can not connect occi' ) continue result = oima.stopInstance( uniqueID ) elif cloudDriver == 'nova-1.1': nima = NovaImage( imageName, endpoint ) connNova = nima.connectNova() if not connNova[ 'OK' ]: gLogger.error( 'haltInstances: can not connect nova' ) continue publicIP = gVirtualMachineDB.getPublicIpFromInstance ( uniqueID ) if not publicIP[ 'OK' ]: gLogger.error( 'haltInstances: can not get publicIP' ) continue publicIP = publicIP[ 'Value' ] result = nima.stopInstance( uniqueID, publicIP ) elif ( cloudDriver == 'amazon' ): awsima = None try: awsima = AmazonImage( imageName, endpoint ) except Exception: gLogger.error("Failed to connect to AWS") pass if awsima: connAmazon = awsima.connectAmazon() if not connAmazon[ 'OK' ]: gLogger.error( 'haltInstances: can not connect aws' ) continue result = awsima.stopInstance( uniqueID ) else: gLogger.warn( 'Unexpected cloud driver: %s' % cloudDriver ) if not result[ 'OK' ]: gLogger.error( 'haltInstances: attemping to halt instance %s: %s' % (uniqueID, result )) else: gVirtualMachineDB.recordDBHalt( instanceID, 0 ) return S_OK() class VirtualMachineManagerHandler( RequestHandler ): def initialize( self ): credDict = self.getRemoteCredentials() self.rpcProperties = credDict[ 'properties' ] # self.ownerDN = credDict[ 'DN' ] # self.ownerGroup = credDict[ 'group' ] # self.owner = credDict[ 'username' ] # self.peerUsesLimitedProxy = credDict[ 'isLimitedProxy' ] # # self.diracSetup = self.serviceInfoDict[ 'clientSetup' ] @staticmethod def __logResult( methodName, result ): ''' Method that writes to log error messages ''' if not result[ 'OK' ]: gLogger.error( '%s%s' % ( methodName, result[ 'Message' ] ) ) types_checkVmWebOperation = [ StringType ] def export_checkVmWebOperation( self, operation ): """ return true if rpc has VM_WEB_OPERATION """ if VmProperties.VM_WEB_OPERATION in self.rpcProperties: return S_OK( 'Auth' ) return S_OK( 'Unauth' ) types_insertInstance = [ StringType, ( StringType, UnicodeType ), ] def export_insertInstance( self, imageName, instanceName, endpoint, runningPodName ): """ Check Status of a given image Will insert a new Instance in the DB """ res = gVirtualMachineDB.insertInstance( imageName, instanceName, endpoint, runningPodName ) self.__logResult( 'insertInstance', res ) return res types_getUniqueID = [ StringType ] def export_getUniqueID( self, instanceID): """ return cloud manager uniqueID form VMDIRAC instanceID """ res = gVirtualMachineDB.getUniqueID( instanceID ) self.__logResult( 'getUniqueID', res ) return res types_setInstanceUniqueID = [ LongType, ( StringType, UnicodeType ) ] def export_setInstanceUniqueID( self, instanceID, uniqueID ): """ Check Status of a given image Will insert a new Instance in the DB """ res = gVirtualMachineDB.setInstanceUniqueID( instanceID, uniqueID ) self.__logResult( 'setInstanceUniqueID', res ) return res types_declareInstanceSubmitted = [ StringType ] def export_declareInstanceSubmitted( self, uniqueID ): """ After submission of the instance the Director should declare the new Status """ res = gVirtualMachineDB.declareInstanceSubmitted( uniqueID ) self.__logResult( 'declareInstanceSubmitted', res ) return res types_declareInstanceRunning = [ StringType, StringType ] def export_declareInstanceRunning( self, uniqueID, privateIP ): """ Declares an instance Running and sets its associated info (uniqueID, publicIP, privateIP) Returns S_ERROR if: - instanceName does not have a "Submitted" entry - uniqueID is not unique """ gLogger.info( 'Declare instance Running uniqueID: %s' % ( uniqueID ) ) if not VmProperties.VM_RPC_OPERATION in self.rpcProperties: return S_ERROR( "Unauthorized declareInstanceRunning RPC" ) publicIP = self.getRemoteAddress()[ 0 ] gLogger.info( 'Declare instance Running publicIP: %s' % ( publicIP ) ) res = gVirtualMachineDB.declareInstanceRunning( uniqueID, publicIP, privateIP ) self.__logResult( 'declareInstanceRunning', res ) return res types_instanceIDHeartBeat = [ StringType, FloatType, ( IntType, LongType ), ( IntType, LongType ), ( IntType, LongType ) ] def export_instanceIDHeartBeat( self, uniqueID, load, jobs, transferredFiles, transferredBytes, uptime = 0 ): """ Insert the heart beat info from a running instance It checks the status of the instance and the corresponding image Declares "Running" the instance and the image It returns S_ERROR if the status is not OK """ if not VmProperties.VM_RPC_OPERATION in self.rpcProperties: return S_ERROR( "Unauthorized declareInstanceIDHeartBeat RPC" ) #FIXME: do we really need the try / except. The type is fixed to int / long. try: uptime = int( uptime ) except ValueError: uptime = 0 res = gVirtualMachineDB.instanceIDHeartBeat( uniqueID, load, jobs, transferredFiles, transferredBytes, uptime ) self.__logResult( 'instanceIDHeartBeat', res ) return res types_declareInstancesStopping = [ ListType ] def export_declareInstancesStopping( self, instanceIdList ): """ Declares "Stoppig" the instance because the Delete button of Browse Instances The instanceID is the VMDIRAC VM id When next instanceID heat beat with stoppig status on the DB the VM will stop the job agent and terminates ordenery It returns S_ERROR if the status is not OK """ if not VmProperties.VM_WEB_OPERATION in self.rpcProperties: return S_ERROR( "Unauthorized VM Stopping" ) for instanceID in instanceIdList: gLogger.info( 'Stopping DIRAC instanceID: %s' % ( instanceID ) ) result = gVirtualMachineDB.getInstanceStatus( instanceID ) if not result[ 'OK' ]: self.__logResult( 'declareInstancesStopping on getInstanceStatus call: ', result ) return result state = result[ 'Value' ] gLogger.info( 'Stopping DIRAC instanceID: %s, current state %s' % ( instanceID, state ) ) if state == 'Stalled': result = gVirtualMachineDB.getUniqueID( instanceID ) if not result[ 'OK' ]: self.__logResult( 'declareInstancesStopping on getUniqueID call: ', result ) return result uniqueID = result [ 'Value' ] result = gVirtualMachineDB.getEndpointFromInstance( uniqueID ) if not result[ 'OK' ]: self.__logResult( 'declareInstancesStopping on getEndpointFromInstance call: ', result ) return result endpoint = result [ 'Value' ] cloudDriver = gConfig.getValue( "/Resources/VirtualMachines/CloudEndpoints/%s/%s" % ( endpoint, "cloudDriver" ) ) if not cloudDriver: msg = 'Cloud not found driver option in the Endpoint %s value %s' % (endpoint, cloudDriver) return S_ERROR( msg ) result = self.export_declareInstanceHalting( uniqueID, 0, cloudDriver ) elif state == 'New': result = gVirtualMachineDB.recordDBHalt( instanceID, 0 ) self.__logResult( 'declareInstanceHalted', result ) else: # this is only aplied to allowed trasitions result = gVirtualMachineDB.declareInstanceStopping( instanceID ) self.__logResult( 'declareInstancesStopping: on declareInstanceStopping call: ', result ) return result types_declareInstanceHalting = [ StringType, FloatType ] def export_declareInstanceHalting( self, uniqueID, load, cloudDriver ): """ Insert the heart beat info from a halting instance The VM has the uniqueID, which is the Cloud manager VM id Declares "Halted" the instance and the image It returns S_ERROR if the status is not OK """ if not VmProperties.VM_RPC_OPERATION in self.rpcProperties: return S_ERROR( "Unauthorized declareInstanceHalting RPC" ) endpoint = gVirtualMachineDB.getEndpointFromInstance( uniqueID ) if not endpoint[ 'OK' ]: self.__logResult( 'declareInstanceHalting', endpoint ) return endpoint endpoint = endpoint[ 'Value' ] result = gVirtualMachineDB.declareInstanceHalting( uniqueID, load ) if not result[ 'OK' ]: if "Halted ->" not in result["Message"]: self.__logResult( 'declareInstanceHalting on change status: ', result ) return result else: gLogger.info("Bad transition from Halted to something, will assume Halted") haltingList = [] instanceID = gVirtualMachineDB.getInstanceID( uniqueID ) if not instanceID[ 'OK' ]: self.__logResult( 'declareInstanceHalting', instanceID ) return instanceID instanceID = instanceID[ 'Value' ] haltingList.append( instanceID ) return haltInstances(haltingList) types_getInstancesByStatus = [ StringType ] def export_getInstancesByStatus( self, status ): """ Get dictionary of Image Names with InstanceIDs in given status """ res = gVirtualMachineDB.getInstancesByStatus( status ) self.__logResult( 'getInstancesByStatus', res ) return res types_getAllInfoForUniqueID = [ StringType ] def export_getAllInfoForUniqueID( self, uniqueID ): """ Get all the info for a UniqueID """ res = gVirtualMachineDB.getAllInfoForUniqueID( uniqueID ) self.__logResult( 'getAllInfoForUniqueID', res ) return res types_getInstancesContent = [ DictType, ( ListType, TupleType ), ( IntType, LongType ), ( IntType, LongType ) ] def export_getInstancesContent( self, selDict, sortDict, start, limit ): """ Retrieve the contents of the DB """ res = gVirtualMachineDB.getInstancesContent( selDict, sortDict, start, limit ) self.__logResult( 'getInstancesContent', res ) return res types_getHistoryForInstanceID = [ ( IntType, LongType ) ] def export_getHistoryForInstanceID( self, instanceId ): """ Retrieve the contents of the DB """ res = gVirtualMachineDB.getHistoryForInstanceID( instanceId ) self.__logResult( 'getHistoryForInstanceID', res ) return res types_getInstanceCounters = [] def export_getInstanceCounters( self ): """ Retrieve the contents of the DB """ res = gVirtualMachineDB.getInstanceCounters() self.__logResult( 'getInstanceCounters', res ) return res types_getHistoryValues = [ IntType, DictType ] def export_getHistoryValues( self, averageBucket, selDict, fields2Get = [], timespan = 0 ): """ Retrieve the contents of the DB """ res = gVirtualMachineDB.getHistoryValues( averageBucket, selDict, fields2Get, timespan ) self.__logResult( 'getHistoryValues', res ) return res types_getRunningInstancesHistory = [ IntType, IntType ] def export_getRunningInstancesHistory( self, timespan, bucketSize ): """ Retrieve number of running instances in each bucket """ res = gVirtualMachineDB.getRunningInstancesHistory( timespan, bucketSize ) self.__logResult( 'getRunningInstancesHistory', res ) return res types_getRunningInstancesBEPHistory = [ IntType, IntType ] def export_getRunningInstancesBEPHistory( self, timespan, bucketSize ): """ Retrieve number of running instances in each bucket by End-Point History """ res = gVirtualMachineDB.getRunningInstancesBEPHistory( timespan, bucketSize ) self.__logResult( 'getRunningInstancesBEPHistory', res ) return res types_getRunningInstancesByRunningPodHistory = [ IntType, IntType ] def export_getRunningInstancesByRunningPodHistory( self, timespan, bucketSize ): """ Retrieve number of running instances in each bucket by Running Pod History """ res = gVirtualMachineDB.getRunningInstancesByRunningPodHistory( timespan, bucketSize ) self.__logResult( 'getRunningInstancesByRunningPodHistory', res ) return res types_getRunningInstancesByImageHistory = [ IntType, IntType ] def export_getRunningInstancesByImageHistory( self, timespan, bucketSize ): """ Retrieve number of running instances in each bucket by Running Pod History """ res = gVirtualMachineDB.getRunningInstancesByImageHistory( timespan, bucketSize ) self.__logResult( 'getRunningInstancesByImageHistory', res ) return res #............................................................................... #EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
vmendez/VMDIRAC
WorkloadManagementSystem/Service/VirtualMachineManagerHandler.py
Python
gpl-3.0
16,993
[ "DIRAC" ]
3bd9cb06d97d5f480594153ff9b1671edaea980abed52e2c2f7aae4075d4a924
import os import copy import math import functools import numpy as np from ddapp import transformUtils from ddapp.asynctaskqueue import AsyncTaskQueue from ddapp import objectmodel as om from ddapp import visualization as vis from ddapp import robotstate from ddapp import segmentation from ddapp import planplayback from ddapp.pointpicker import PointPicker from ddapp import vtkAll as vtk from ddapp.simpletimer import SimpleTimer from ddapp import affordanceupdater from ddapp.debugVis import DebugData from ddapp import affordanceitems from ddapp import ikplanner from ddapp import vtkNumpy from numpy import array from ddapp.uuidutil import newUUID import ioUtils from ddapp.tasks.taskuserpanel import TaskUserPanel from ddapp.tasks.taskuserpanel import ImageBasedAffordanceFit import ddapp.tasks.robottasks as rt class TableDemo(object): def __init__(self, robotStateModel, playbackRobotModel, ikPlanner, manipPlanner, footstepPlanner, atlasDriver, lhandDriver, rhandDriver, multisenseDriver, view, sensorJointController, planPlaybackFunction, teleopPanel): self.planPlaybackFunction = planPlaybackFunction self.robotStateModel = robotStateModel self.playbackRobotModel = playbackRobotModel self.ikPlanner = ikPlanner self.manipPlanner = manipPlanner self.footstepPlanner = footstepPlanner self.atlasDriver = atlasDriver self.lhandDriver = lhandDriver self.rhandDriver = rhandDriver self.multisenseDriver = multisenseDriver self.sensorJointController = sensorJointController self.view = view self.teleopPanel = teleopPanel self.affordanceManager = segmentation.affordanceManager # live operation flags: self.useFootstepPlanner = True self.visOnly = False self.planFromCurrentRobotState = True self.useDevelopment = False if (self.useDevelopment): self.visOnly = True self.planFromCurrentRobotState = False extraModels = [self.robotStateModel] self.affordanceUpdater = affordanceupdater.AffordanceGraspUpdater(self.playbackRobotModel, self.ikPlanner, extraModels) else: extraModels = [self.playbackRobotModel] self.affordanceUpdater = affordanceupdater.AffordanceGraspUpdater(self.robotStateModel, self.ikPlanner, extraModels) self.affordanceManager.setAffordanceUpdater(self.affordanceUpdater) self.optionalUserPromptEnabled = True self.requiredUserPromptEnabled = True self.plans = [] self.frameSyncs = {} self.graspingHand = 'left' # left, right, both self.tableData = None self.binFrame = None # top level switch between BDI or IHMC (locked base) and MIT (moving base and back) self.lockBack = True self.lockBase = True self.constraintSet = [] self.reachDist = 0.07 # Switch indicating whether to use affordances as a collision environment self.useCollisionEnvironment = True self.sceneID = None # Switch between simulation/visualisation and real robot operation def setMode(self, mode='visualization'): ''' Switches between visualization and real robot operation. mode='visualization' mode='robot' ''' if (mode == 'visualization'): print "Setting mode to VISUALIZATION" self.useDevelopment = True self.visOnly = True self.planFromCurrentRobotState = False extraModels = [self.robotStateModel] self.affordanceUpdater = affordanceupdater.AffordanceGraspUpdater(self.playbackRobotModel, self.ikPlanner, extraModels) else: print "Setting mode to ROBOT OPERATION" self.useDevelopment = False extraModels = [self.playbackRobotModel] self.affordanceUpdater = affordanceupdater.AffordanceGraspUpdater(self.robotStateModel, self.ikPlanner, extraModels) def addPlan(self, plan): self.plans.append(plan) ### Table and Bin Focused Functions def userFitTable(self): self.tableData = None self.picker = PointPicker(self.view, numberOfPoints=2, drawLines=True, callback=self.onSegmentTable) self.picker.start() def userFitBin(self): self.binFrame = None self.picker = PointPicker(self.view, numberOfPoints=2, drawLines=True, callback=self.onSegmentBin) self.picker.start() def waitForTableFit(self): while not self.tableData: yield def waitForBinFit(self): while not self.binFrame: yield def getInputPointCloud(self): polyData = segmentation.getCurrentRevolutionData() if polyData is None: obj = om.findObjectByName('scene') if obj: polyData = obj.polyData else: # fall back to map in case we used mapping rather than loading of a scene obj = om.findObjectByName('map') if obj: polyData = obj.polyData else: # fall back to kinect source and get a frame copy obj = om.findObjectByName('kinect source') if obj: polyData = obj.polyData return polyData def onSegmentTable(self, p1, p2): print p1 print p2 self.picker.stop() om.removeFromObjectModel(self.picker.annotationObj) self.picker = None tableData = segmentation.segmentTableEdge(self.getInputPointCloud(), p1, p2) pose = transformUtils.poseFromTransform(tableData.frame) desc = dict(classname='MeshAffordanceItem', Name='table', Color=[0,1,0], pose=pose) aff = self.affordanceManager.newAffordanceFromDescription(desc) aff.setPolyData(tableData.mesh) self.tableData = tableData tableBox = vis.showPolyData(tableData.box, 'table box', parent=aff, color=[0,1,0], visible=False) tableBox.actor.SetUserTransform(tableData.frame) if self.useCollisionEnvironment: self.addCollisionObject(aff) def onSegmentBin(self, p1, p2): print p1 print p2 self.picker.stop() om.removeFromObjectModel(self.picker.annotationObj) self.picker = None om.removeFromObjectModel(om.findObjectByName('bin frame')) binEdge = p2 - p1 zaxis = [0.0, 0.0, 1.0] xaxis = np.cross(binEdge, zaxis) xaxis /= np.linalg.norm(xaxis) yaxis = np.cross(zaxis, xaxis) t = transformUtils.getTransformFromAxes(xaxis, yaxis, zaxis) t.PostMultiply() t.Translate(p1) pose = transformUtils.poseFromTransform(t) desc = dict(classname='BoxAffordanceItem', Name='bin', uuid=newUUID(), pose=pose, Color=[1, 0, 0], Dimensions=[0.02,0.02,0.02]) obj = self.affordanceManager.newAffordanceFromDescription(desc) #self.binFrame = vis.showFrame(t, 'bin frame', parent=None, scale=0.2) def sortClustersOnTable(self, clusters): ''' returns list copy of clusters, sorted left to right using the table coordinate system. (Table y axis points right to left) ''' tableTransform = om.findObjectByName('table').getChildFrame().transform tableYAxis = transformUtils.getAxesFromTransform(tableTransform)[1] tableOrigin = np.array(tableTransform.GetPosition()) origins = [np.array(c.frame.GetPosition()) for c in clusters] dists = [np.dot(origin-tableOrigin, -tableYAxis) for origin in origins] return [clusters[i] for i in np.argsort(dists)] def cleanupSegmentedObjects(self): om.removeFromObjectModel(om.findObjectByName('segmentation')) self.clusterObjects = None self.segmentationData = None def segmentTableObjects(self): tableFrame = om.findObjectByName('table').getChildFrame() #tableCentroid = segmentation.computeCentroid(self.tableData.box) #self.tableData.frame.TransformPoint(tableCentroid, tableFrame) data = segmentation.segmentTableScene(self.getInputPointCloud(), tableFrame.transform.GetPosition() ) data.clusters = self.sortClustersOnTable(data.clusters) objects = vis.showClusterObjects(data.clusters, parent='affordances') self.segmentationData = data self.clusterObjects = [] for i, cluster in enumerate(objects): affObj = affordanceitems.MeshAffordanceItem.promotePolyDataItem(cluster) self.affordanceManager.registerAffordance(affObj) self.clusterObjects.append(affObj) def graspTableObject(self, side='left'): obj, objFrame = self.getNextTableObject(side) if self.useCollisionEnvironment: objAffordance = om.findObjectByName(obj.getProperty('Name') + ' affordance') self.affordanceUpdater.graspAffordance(obj.getProperty('Name'), side) if self.useCollisionEnvironment: self.affordanceUpdater.graspAffordance(objAffordance.getProperty('Name'), side) if self.ikPlanner.fixedBaseArm: # if we're dealing with the real world, close hand self.closeHand(side) return self.delay(5) # wait for three seconds to allow for hand to close def dropTableObject(self, side='left'): obj, _ = self.getNextTableObject(side) obj.setProperty('Visible', False) for child in obj.children(): child.setProperty('Visible', False) self.clusterObjects.remove(obj) # remove from clusterObjects om.removeFromObjectModel(obj) # remove from objectModel if self.useCollisionEnvironment: objAffordance = om.findObjectByName(obj.getProperty('Name') + ' affordance') objAffordance.setProperty('Collision Enabled', False) objAffordance.setProperty('Visible', False) self.affordanceUpdater.ungraspAffordance(objAffordance.getProperty('Name')) self.affordanceUpdater.ungraspAffordance(obj.getProperty('Name')) if self.ikPlanner.fixedBaseArm: # if we're dealing with the real world, open hand self.openHand(side) return self.delay(5) def getNextTableObject(self, side='left'): assert len(self.clusterObjects) obj = self.clusterObjects[0] if side == 'left' else self.clusterObjects[-1] frameObj = obj.findChild(obj.getProperty('Name') + ' frame') if self.useCollisionEnvironment: self.prepCollisionEnvironment() collisionObj = om.findObjectByName(obj.getProperty('Name') + ' affordance') collisionObj.setProperty('Collision Enabled', False) return obj, frameObj def computeTableStanceFrame(self, relativeStance): tableTransform = om.findObjectByName('table').getChildFrame().transform zGround = 0.0 tableHeight = tableTransform.GetPosition()[2] - zGround t = vtk.vtkTransform() t.PostMultiply() t.Translate(relativeStance.GetPosition()[0], relativeStance.GetPosition()[1], -tableHeight) t.Concatenate(tableTransform) vis.showFrame(t, 'table stance frame', parent=om.findObjectByName('table'), scale=0.2) def computeCollisionGoalFrame(self, relativeFrame): tableTransform = om.findObjectByName('table').getChildFrame().transform #t = vtk.vtkTransform() #t.PostMultiply() #t.Translate(relativeStance.GetPosition()[0], relativeStance.GetPosition()[1], -tableHeight) relativeFrame.Concatenate(tableTransform) vis.showFrame(relativeFrame, 'table goal frame', parent=om.findObjectByName('table'), scale=0.2) def computeBinStanceFrame(self): binTransform = om.findObjectByName('bin').getChildFrame().transform zGround = 0.0 binHeight = binTransform.GetPosition()[2] - zGround t = vtk.vtkTransform() t.PostMultiply() t.Translate(-0.45, 0.1, -binHeight) t.Concatenate(binTransform) vis.showFrame(t, 'bin stance frame', parent=om.findObjectByName('bin'), scale=0.2) t = vtk.vtkTransform() t.PostMultiply() t.RotateZ(30) t.Translate(-0.8, 0.4, -binHeight) t.Concatenate(binTransform) vis.showFrame(t, 'start stance frame', parent=om.findObjectByName('bin'), scale=0.2) # TODO: deprecate this function: (to end of section): def moveRobotToTableStanceFrame(self): self.teleportRobotToStanceFrame(om.findObjectByName('table stance frame').transform) def moveRobotToBinStanceFrame(self): self.teleportRobotToStanceFrame(om.findObjectByName('bin stance frame').transform) def moveRobotToStartStanceFrame(self): self.teleportRobotToStanceFrame(om.findObjectByName('start stance frame').transform) ### End Object Focused Functions ############################################################### ### Planning Functions ######################################################################## def planFootsteps(self, goalFrame): startPose = self.getPlanningStartPose() request = self.footstepPlanner.constructFootstepPlanRequest(startPose, goalFrame) self.footstepPlan = self.footstepPlanner.sendFootstepPlanRequest(request, waitForResponse=True) def planWalking(self): startPose = self.getPlanningStartPose() plan = self.footstepPlanner.sendWalkingPlanRequest(self.footstepPlan, startPose, waitForResponse=True) self.addPlan(plan) def planWalkToStance(self, stanceTransform): if self.useFootstepPlanner: self.planFootsteps(stanceTransform) self.planWalking() else: self.teleportRobotToStanceFrame(stanceTransform) def planPostureFromDatabase(self, groupName, postureName, side='left'): startPose = self.getPlanningStartPose() endPose = self.ikPlanner.getMergedPostureFromDatabase(startPose, groupName, postureName, side=side) newPlan = self.ikPlanner.computePostureGoal(startPose, endPose) self.addPlan(newPlan) # TODO: integrate this function with the ones below def getRaisedArmPose(self, startPose, side): return self.ikPlanner.getMergedPostureFromDatabase(startPose, 'General', 'arm up pregrasp', side) def getPreDropHighPose(self, startPose, side): return self.ikPlanner.getMergedPostureFromDatabase(startPose, 'table clearing', 'pre drop 1', side) def getPreDropLowPose(self, startPose, side): return self.ikPlanner.getMergedPostureFromDatabase(startPose, 'table clearing', 'pre drop 2', side) def getLoweredArmPose(self, startPose, side): return self.ikPlanner.getMergedPostureFromDatabase(startPose, 'General', 'handdown', side) def planPreGrasp(self, side='left'): startPose = self.getPlanningStartPose() endPose = self.ikPlanner.getMergedPostureFromDatabase(startPose, 'General', 'arm up pregrasp', side=side) newPlan = self.ikPlanner.computePostureGoal(startPose, endPose) self.addPlan(newPlan) def planLowerArm(self, side): startPose = self.getPlanningStartPose() endPose = self.ikPlanner.getMergedPostureFromDatabase(startPose, 'General', 'handdown', side=side) newPlan = self.ikPlanner.computePostureGoal(startPose, endPose) self.addPlan(newPlan) def planDropPostureRaise(self, side): startPose = self.getPlanningStartPose() poseA = self.getRaisedArmPose(startPose, side) poseB = self.getPreDropHighPose(startPose, side) poseC = self.getPreDropLowPose(startPose, side) plan = self.ikPlanner.computeMultiPostureGoal([startPose, poseA, poseB, poseC]) self.addPlan(plan) def planDropPostureLower(self, side): startPose = self.getPlanningStartPose() poseA = self.getPreDropHighPose(startPose, side) poseB = self.getRaisedArmPose(startPose, side) poseC = self.getLoweredArmPose(startPose, side) plan = self.ikPlanner.computeMultiPostureGoal([startPose, poseA, poseB, poseC]) self.addPlan(plan) def planDropPostureSwap(self, lowerSide, raiseSide): startPose = self.getPlanningStartPose() poseA = self.getRaisedArmPose(startPose, raiseSide) poseA = self.getPreDropHighPose(poseA, lowerSide) poseB = self.getPreDropHighPose(poseA, raiseSide) poseB = self.getRaisedArmPose(poseB, lowerSide) poseC = self.getPreDropLowPose(poseB, raiseSide) poseC = self.getLoweredArmPose(poseC, lowerSide) plan = self.ikPlanner.computeMultiPostureGoal([startPose, poseA, poseB, poseC]) self.addPlan(plan) def planLowerArmAndStand(self, side): startPose = self.getPlanningStartPose() endPose = self.getLoweredArmPose(startPose, side) endPose, info = self.ikPlanner.computeStandPose(endPose) plan = self.ikPlanner.computePostureGoal(startPose, endPose) self.addPlan(plan) def planReachToTableObject(self, side='left'): obj, frame = self.getNextTableObject(side) startPose = self.getPlanningStartPose() if self.ikPlanner.fixedBaseArm: # includes reachDist hack instead of in ikPlanner (TODO!) f = transformUtils.frameFromPositionAndRPY( np.array(frame.transform.GetPosition())-np.array([self.reachDist+.15,0,-.03]), [0,0,-90] ) f.PreMultiply() f.RotateY(90) f.Update() self.constraintSet = self.ikPlanner.planEndEffectorGoal(startPose, side, f, lockBase=False, lockBack=True) #newFrame = vis.FrameItem('reach_item', f, self.view) #self.constraintSet = self.ikPlanner.planGraspOrbitReachPlan(startPose, side, newFrame, constraints=None, dist=self.reachDist, lockBase=self.lockBase, lockBack=self.lockBack, lockArm=False) else: self.constraintSet = self.ikPlanner.planGraspOrbitReachPlan(startPose, side, frame, constraints=None, dist=self.reachDist, lockBase=self.lockBase, lockBack=self.lockBack, lockArm=False) loweringSide = 'left' if side == 'right' else 'right' armPose = self.getLoweredArmPose(startPose, loweringSide) armPoseName = 'lowered_arm_pose' self.ikPlanner.ikServer.sendPoseToServer(armPose, armPoseName) loweringSideJoints = [] if (loweringSide == 'left'): loweringSideJoints += self.ikPlanner.leftArmJoints else: loweringSideJoints += self.ikPlanner.rightArmJoints reachingSideJoints = [] if (side == 'left'): reachingSideJoints += self.ikPlanner.leftArmJoints else: reachingSideJoints += self.ikPlanner.rightArmJoints armPostureConstraint = self.ikPlanner.createPostureConstraint(armPoseName, loweringSideJoints) armPostureConstraint.tspan = np.array([1.0, 1.0]) self.constraintSet.constraints.append(armPostureConstraint) self.constraintSet.runIk() #armPose = self.getRaisedArmPose(startPose, side) #armPoseName = 'raised_arm_pose' #self.ikPlanner.ikServer.sendPoseToServer(armPose, armPoseName) #armPostureConstraint = self.ikPlanner.createPostureConstraint(armPoseName, reachingSideJoints) #armPostureConstraint.tspan = np.array([0.5, 0.5]) #self.constraintSet.constraints.append(armPostureConstraint) print 'planning reach to' plan = self.constraintSet.runIkTraj() self.addPlan(plan) def planReachToTableObjectCollisionFree(self, side ='left'): # Hard-coded demonstration of collision reaching to object on table # Using RRT Connect if (self.lockBase is True): if (self.lockBack is False): print "Currently the combination Base Fixed, Back Free doesn't work" print "setting to Base Free, Back Free for collision planning" self.lockBase=False self.lockBack=False frameObj = om.findObjectByName( 'table goal frame') startPose = self.getPlanningStartPose() self.constraintSet = self.ikPlanner.planEndEffectorGoal(startPose, side, frameObj.transform, lockBase=self.lockBase, lockBack=self.lockBack) self.constraintSet.runIk() print 'planning reach to planReachToTableObjectCollisionFree' self.constraintSet.ikParameters.usePointwise = False self.constraintSet.ikParameters.useCollision = True self.teleopPanel.endEffectorTeleop.updateCollisionEnvironment() plan = self.constraintSet.runIkTraj() self.addPlan(plan) def planTouchTableObject(self, side='left'): obj, frame = self.getNextTableObject(side) startPose = self.getPlanningStartPose() if self.ikPlanner.fixedBaseArm: # includes distance hack and currently uses reachDist instead of touchDist (TODO!) f = transformUtils.frameFromPositionAndRPY( np.array(frame.transform.GetPosition())-np.array([self.reachDist+.05,0,-0.03]), [0,0,-90] ) f.PreMultiply() f.RotateY(90) f.Update() item = vis.FrameItem('reach_item', f, self.view) self.constraintSet = self.ikPlanner.planEndEffectorGoal(startPose, side, f, lockBase=False, lockBack=True) else: self.constraintSet = self.ikPlanner.planGraspOrbitReachPlan(startPose, side, frame, dist=0.05, lockBase=self.lockBase, lockBack=self.lockBack) self.constraintSet.constraints[-1].tspan = [-np.inf, np.inf] self.constraintSet.constraints[-2].tspan = [-np.inf, np.inf] self.constraintSet.runIk() print 'planning touch' plan = self.constraintSet.runIkTraj() self.addPlan(plan) def planLiftTableObject(self, side='left'): startPose = self.getPlanningStartPose() self.constraintSet = self.ikPlanner.planEndEffectorDelta(startPose, side, [0.0, 0.0, 0.15]) if not self.ikPlanner.fixedBaseArm: self.constraintSet.constraints[-1].tspan[1] = 1.0 endPose, info = self.constraintSet.runIk() if not self.ikPlanner.fixedBaseArm: endPose = self.getRaisedArmPose(endPose, side) reachingSideJoints = [] if (side == 'left'): reachingSideJoints += self.ikPlanner.leftArmJoints else: reachingSideJoints += self.ikPlanner.rightArmJoints endPoseName = 'raised_arm_end_pose' self.ikPlanner.ikServer.sendPoseToServer(endPose, endPoseName) postureConstraint = self.ikPlanner.createPostureConstraint(endPoseName, reachingSideJoints) postureConstraint.tspan = np.array([2.0, 2.0]) self.constraintSet.constraints.append(postureConstraint) #postureConstraint = self.ikPlanner.createPostureConstraint('q_nom', robotstate.matchJoints('.*_leg_kny')) #postureConstraint.tspan = np.array([2.0, 2.0]) #self.constraintSet.constraints.append(postureConstraint) #postureConstraint = self.ikPlanner.createPostureConstraint('q_nom', robotstate.matchJoints('back')) #postureConstraint.tspan = np.array([2.0, 2.0]) #self.constraintSet.constraints.append(postureConstraint) print 'planning lift' plan = self.constraintSet.runIkTraj() self.addPlan(plan) ### End Planning Functions #################################################################### ########## Glue Functions ##################################################################### def teleportRobotToStanceFrame(self, frame): self.sensorJointController.setPose('q_nom') stancePosition = frame.GetPosition() stanceOrientation = frame.GetOrientation() q = self.sensorJointController.q.copy() q[:2] = [stancePosition[0], stancePosition[1]] q[5] = math.radians(stanceOrientation[2]) self.sensorJointController.setPose('EST_ROBOT_STATE', q) def getHandDriver(self, side): assert side in ('left', 'right') return self.lhandDriver if side == 'left' else self.rhandDriver def openHand(self, side): #self.getHandDriver(side).sendOpen() self.getHandDriver(side).sendCustom(0.0, 100.0, 100.0, 0) def closeHand(self, side): self.getHandDriver(side).sendCustom(100.0, 100.0, 100.0, 0) def sendNeckPitchLookDown(self): self.multisenseDriver.setNeckPitch(40) def sendNeckPitchLookForward(self): self.multisenseDriver.setNeckPitch(15) def waitForAtlasBehaviorAsync(self, behaviorName): assert behaviorName in self.atlasDriver.getBehaviorMap().values() while self.atlasDriver.getCurrentBehaviorName() != behaviorName: yield def printAsync(self, s): yield print s def optionalUserPrompt(self, message): if not self.optionalUserPromptEnabled: return yield result = raw_input(message) if result != 'y': raise Exception('user abort.') def requiredUserPrompt(self, message): if not self.requiredUserPromptEnabled: return yield result = raw_input(message) if result != 'y': raise Exception('user abort.') def delay(self, delayTimeInSeconds): yield t = SimpleTimer() while t.elapsed() < delayTimeInSeconds: yield def waitForCleanLidarSweepAsync(self): currentRevolution = self.multisenseDriver.displayedRevolution desiredRevolution = currentRevolution + 2 while self.multisenseDriver.displayedRevolution < desiredRevolution: yield def getEstimatedRobotStatePose(self): return self.sensorJointController.getPose('EST_ROBOT_STATE') def getPlanningStartPose(self): if self.planFromCurrentRobotState: return self.getEstimatedRobotStatePose() else: if self.plans: return robotstate.convertStateMessageToDrakePose(self.plans[-1].plan[-1]) else: return self.getEstimatedRobotStatePose() def cleanupFootstepPlans(self): om.removeFromObjectModel(om.findObjectByName('walking goal')) om.removeFromObjectModel(om.findObjectByName('footstep plan')) self.footstepPlan = None def playSequenceNominal(self): assert None not in self.plans self.planPlaybackFunction(self.plans) def commitManipPlan(self): self.manipPlanner.commitManipPlan(self.plans[-1]) def commitFootstepPlan(self): self.footstepPlanner.commitFootstepPlan(self.footstepPlan) def waitForPlanExecution(self, plan): planElapsedTime = planplayback.PlanPlayback.getPlanElapsedTime(plan) return self.delay(planElapsedTime + 1.0) def animateLastPlan(self): plan = self.plans[-1] if not self.visOnly: self.commitManipPlan() return self.waitForPlanExecution(plan) def onRobotModelChanged(self, model): for linkName in self.frameSyncs.keys(): t = self.playbackRobotModel.getLinkFrame(linkName) vis.updateFrame(t, '%s frame' % linkName, scale=0.2, visible=False, parent='planning') def createCollisionPlanningScene(self, scene=0, loadPerception=True, moveRobot=False): self.createCollisionPlanningSceneMain(self.sceneID,loadPerception,moveRobot) def createCollisionPlanningSceneMain(self, scene=0, loadPerception=True, moveRobot=False): om.removeFromObjectModel(om.findObjectByName('affordances')) om.removeFromObjectModel(om.findObjectByName('segmentation')) if (self.sceneID is not None): # use variable if one exists scene = self.sceneID if (scene == 4): filename = os.path.expanduser('~/drc-testing-data/ihmc_table/ihmc_table.vtp') polyData = ioUtils.readPolyData( filename ) vis.showPolyData( polyData,'scene') self.segmentIhmcScene() relativeStance = transformUtils.frameFromPositionAndRPY([-0.6, 0, 0],[0,0,0]) relativeReachGoal = transformUtils.frameFromPositionAndRPY([-0.19,0.4,0.16],[90,90,0]) self.computeTableStanceFrame(relativeStance) self.computeCollisionGoalFrame(relativeReachGoal) if (moveRobot): self.moveRobotToTableStanceFrame() return elif (scene == 0): pose = (array([ 1.20, 0. , 0.8]), array([ 1., 0., 0., 0.])) desc = dict(classname='BoxAffordanceItem', Name='table', uuid=newUUID(), pose=pose, Color=[0.66, 0.66, 0.66], Dimensions=[0.5,1,0.06]) obj = self.affordanceManager.newAffordanceFromDescription(desc) pose = (array([ 1.20, 0.5 , 0.4]), array([ 1., 0., 0., 0.])) desc = dict(classname='BoxAffordanceItem', Name='scene0-leg1', uuid=newUUID(), pose=pose, Color=[0.66, 0.66, 0.66], Dimensions=[0.5,0.05,0.8]) obj = self.affordanceManager.newAffordanceFromDescription(desc) pose = (array([ 1.20, -0.5 , 0.4]), array([ 1., 0., 0., 0.])) desc = dict(classname='BoxAffordanceItem', Name='scene0-leg2', uuid=newUUID(), pose=pose, Color=[0.66, 0.66, 0.66], Dimensions=[0.5,0.05,0.8]) obj = self.affordanceManager.newAffordanceFromDescription(desc) pose = (array([ 1.05, 0.3 , 0.98]), array([ 1., 0., 0., 0.])) desc = dict(classname='BoxAffordanceItem', Name='scene0-object1', uuid=newUUID(), pose=pose, Color=[0.9, 0.9, 0.1], Dimensions=[0.08,0.08,0.24]) obj1 = self.affordanceManager.newAffordanceFromDescription(desc) pose = (array([ 1.25, 0.1 , 0.98]), array([ 1., 0., 0., 0.])) desc = dict(classname='BoxAffordanceItem', Name='scene0-object2', uuid=newUUID(), pose=pose, Color=[0.0, 0.9, 0.0], Dimensions=[0.07,0.07,0.25]) obj2 = self.affordanceManager.newAffordanceFromDescription(desc) pose = (array([ 1.25, -0.1 , 0.95]), array([ 1., 0., 0., 0.])) desc = dict(classname='CylinderAffordanceItem', Name='scene0-object3', uuid=newUUID(), pose=pose, Color=[0.0, 0.9, 0.0], Radius=0.035, Length = 0.22) obj3 = self.affordanceManager.newAffordanceFromDescription(desc) pose = (array([ 1.05, -0.2 , 0.95]), array([ 1., 0., 0., 0.])) desc = dict(classname='CylinderAffordanceItem', Name='scene0-object4', uuid=newUUID(), pose=pose, Color=[0.9, 0.1, 0.1], Radius=0.045, Length = 0.22) obj4 = self.affordanceManager.newAffordanceFromDescription(desc) self.clusterObjects = [obj1,obj2, obj3, obj4] relativeStance = transformUtils.frameFromPositionAndRPY([-0.58, 0, 0],[0,0,0]) relativeReachGoal = transformUtils.frameFromPositionAndRPY([-0.15,0.4,0.2],[90,90,0]) self.computeTableStanceFrame(relativeStance) self.computeCollisionGoalFrame(relativeReachGoal) elif (scene == 1): pose = (array([-0.98873106, 1.50393395, 0.91420001]), array([ 0.49752312, 0. , 0. , 0.86745072])) desc = dict(classname='BoxAffordanceItem', Name='table', uuid=newUUID(), pose=pose, Color=[0.66, 0.66, 0.66], Dimensions=[0.5,1,0.06]) obj = self.affordanceManager.newAffordanceFromDescription(desc) pose = (array([-0.98873106, 1.50393395, 0.57]), array([ 0.49752312, 0. , 0. , 0.86745072])) desc = dict(classname='BoxAffordanceItem', Name='scene1-object1', uuid=newUUID(), pose=pose, Color=[0.005, 0.005, 0.3], Dimensions=[0.05,0.05,0.14]) obj1 = self.affordanceManager.newAffordanceFromDescription(desc) self.clusterObjects = [obj1] relativeStance = transformUtils.frameFromPositionAndRPY([-0.6, 0, 0],[0,0,0]) relativeReachGoal = transformUtils.frameFromPositionAndRPY([0,0.1,-0.35],[90,90,0]) self.computeTableStanceFrame(relativeStance) self.computeCollisionGoalFrame(relativeReachGoal) elif (scene == 2): pose = (array([ 0.49374956, 1.51828255, 0.84852654]), array([ 0.86198582, 0. , 0. , 0.50693238])) desc = dict(classname='BoxAffordanceItem', Name='table', uuid=newUUID(), pose=pose, Color=[0.66, 0.66, 0.66], Dimensions=[0.5,1,0.06]) obj = self.affordanceManager.newAffordanceFromDescription(desc) pose = (array([ 0.57555491, 1.6445656 , 0.93993633]), array([ 0.86280979, 0. , 0. , 0.50552871])) desc = dict(classname='BoxAffordanceItem', Name='scene2-object', uuid=newUUID(), pose=pose, Color=[0.005, 0.005, 0.3], Dimensions=[0.05,0.05,0.12]) obj1 = self.affordanceManager.newAffordanceFromDescription(desc) pose = (array([ 0.38458635, 1.32625758, 1.37444768]), array([ 0.86314205, 0. , 0. , 0.50496119])) desc = dict(classname='BoxAffordanceItem', Name='scene2-wall1', uuid=newUUID(), pose=pose, Color=[0.66, 0.66, 0.66], Dimensions=[0.05,1.0,0.4]) obj = self.affordanceManager.newAffordanceFromDescription(desc) pose = (array([ 0.08282192, 1.49589397, 1.02518917]), array([ 0.86314205, 0. , 0. , 0.50496119])) desc = dict(classname='BoxAffordanceItem', Name='scene2-wall2', uuid=newUUID(), pose=pose, Color=[0.66, 0.66, 0.66], Dimensions=[0.05,0.3,0.29]) obj = self.affordanceManager.newAffordanceFromDescription(desc) pose = (array([ 0.69532105, 1.15157858, 1.02518917]), array([ 0.86314205, 0. , 0. , 0.50496119])) desc = dict(classname='BoxAffordanceItem', Name='scene2-wall3', uuid=newUUID(), pose=pose, Color=[0.66, 0.66, 0.66], Dimensions=[0.05,0.3,0.29]) obj = self.affordanceManager.newAffordanceFromDescription(desc) self.clusterObjects = [obj1] relativeStance = transformUtils.frameFromPositionAndRPY([-0.65, -0.3, 0],[0,0,0]) relativeReachGoal = transformUtils.frameFromPositionAndRPY([0.15,0.07,0.14],[90,90,0]) self.computeTableStanceFrame(relativeStance) self.computeCollisionGoalFrame(relativeReachGoal) elif (scene == 3): pose = (array([-0.69, -1.50, 0.92]), array([-0.707106781, 0. , 0. , 0.707106781 ])) desc = dict(classname='BoxAffordanceItem', Name='table', uuid=newUUID(), pose=pose, Color=[0.66, 0.66, 0.66], Dimensions=[0.5,1,0.06]) obj = self.affordanceManager.newAffordanceFromDescription(desc) pose = (array([-1.05, -1.10, 0.95]), array([-0.707106781, 0. , 0. , 0.707106781 ])) desc = dict(classname='BoxAffordanceItem', Name='scene3-edge1', uuid=newUUID(), pose=pose, Color=[0.66, 0.66, 0.66], Dimensions=[0.1,0.3,0.05]) obj = self.affordanceManager.newAffordanceFromDescription(desc) pose = (array([-0.35, -1.10, 0.95]), array([-0.707106781, 0. , 0. , 0.707106781 ])) desc = dict(classname='BoxAffordanceItem', Name='scene3-edge2', uuid=newUUID(), pose=pose, Color=[0.66, 0.66, 0.66], Dimensions=[0.1,0.3,0.05]) obj = self.affordanceManager.newAffordanceFromDescription(desc) pose = (array([-0.6803156 , -1.1826616 , 1.31299839]), array([-0.707106781, 0. , 0. , 0.707106781 ])) desc = dict(classname='BoxAffordanceItem', Name='scene3-edge3', uuid=newUUID(), pose=pose, Color=[0.66, 0.66, 0.66], Dimensions=[0.14,1.0,0.07]) obj = self.affordanceManager.newAffordanceFromDescription(desc) pose = (array([ -0.7, -1.5 , 1.03]), array([ 1., 0., 0., 0.])) desc = dict(classname='BoxAffordanceItem', Name='scene3-object1', uuid=newUUID(), pose=pose, Color=[0.9, 0.9, 0.1], Dimensions=[0.05,0.05,0.14]) obj1 = self.affordanceManager.newAffordanceFromDescription(desc) self.clusterObjects = [obj1] relativeStance = transformUtils.frameFromPositionAndRPY([-0.7, -0.1, 0],[0,0,0]) relativeReachGoal = transformUtils.frameFromPositionAndRPY([0.0,0.07,0.14],[90,90,0]) self.computeTableStanceFrame(relativeStance) self.computeCollisionGoalFrame(relativeReachGoal) self.userFitBin() self.onSegmentBin( np.array([ 0.62, -1.33, 0.80]), np.array([ 0.89, -0.87, 0.57]) ) self.computeBinStanceFrame() if (moveRobot): self.moveRobotToTableStanceFrame() if (loadPerception): filename = os.path.expanduser('~/drc-testing-data/ihmc_table/'+str(scene)+'.vtp') pd = ioUtils.readPolyData( filename ) vis.showPolyData(pd,'scene') ######### Setup collision environment #################### def prepCollisionEnvironment(self): assert len(self.clusterObjects) for obj in self.clusterObjects: self.addCollisionObject(obj) def addCollisionObject(self, obj): if om.getOrCreateContainer('affordances').findChild(obj.getProperty('Name') + ' affordance'): return # Affordance has been created previously frame = obj.findChild(obj.getProperty('Name') + ' frame') (origin, quat) = transformUtils.poseFromTransform(frame.transform) (xaxis, yaxis, zaxis) = transformUtils.getAxesFromTransform(frame.transform) # TODO: move this into transformUtils as getAxisDimensions or so box = obj.findChild(obj.getProperty('Name') + ' box') box_np = vtkNumpy.getNumpyFromVtk(box.polyData, 'Points') box_min = np.amin(box_np, 0) box_max = np.amax(box_np, 0) xwidth = np.linalg.norm(box_max[0]-box_min[0]) ywidth = np.linalg.norm(box_max[1]-box_min[1]) zwidth = np.linalg.norm(box_max[2]-box_min[2]) name = obj.getProperty('Name') + ' affordance' boxAffordance = segmentation.createBlockAffordance(origin, xaxis, yaxis, zaxis, xwidth, ywidth, zwidth, name, parent='affordances') boxAffordance.setSolidColor(obj.getProperty('Color')) boxAffordance.setProperty('Alpha', 0.3) ######### Nominal Plans and Execution ################################################################# def prepGetSceneFrame(self, createNewObj=False): if createNewObj: objScene = vis.showPolyData(self.getInputPointCloud(), 'scene', colorByName='rgb_colors') else: objScene = vis.updatePolyData(self.getInputPointCloud(), 'scene', colorByName='rgb_colors') def prepKukaTestDemoSequence(self, inputFile='~/drc-testing-data/tabletop/kinect_collision_environment.vtp'): filename = os.path.expanduser(inputFile) scene = ioUtils.readPolyData(filename) vis.showPolyData(scene,"scene") self.prepKukaLabScene() def prepKukaLabScene(self): self.userFitTable() self.onSegmentTable( np.array([ 0.91544128, 0.06092263, 0.14906664]), np.array([ 0.73494804, -0.21896157, 0.13435645]) ) self.userFitBin() # TODO: actually fit bin, put bin in picture. self.onSegmentBin( np.array([-0.02, 2.43, 0.61 ]), np.array([-0.40, 2.79, 0.61964661]) ) # TODO: fix bin location self.segmentTableObjects() # Plan sequence self.plans = [] def prepTestDemoSequence(self): ''' Running this function should launch a full planning sequence to pick to objects, walk and drop. Requires footstep footstepPlanner ''' filename = os.path.expanduser('~/drc-testing-data/tabletop/table-and-bin-scene.vtp') scene = ioUtils.readPolyData(filename) vis.showPolyData(scene,"scene") #stanceFrame = transformUtils.frameFromPositionAndRPY([0, 0, 0], [0, 0, 123.0]) #self.teleportRobotToStanceFrame(stanceFrame) self.userFitTable() self.onSegmentTable( np.array([-1.72105646, 2.73210716, 0.79449952]), np.array([-1.67336452, 2.63351011, 0.78698605]) ) self.userFitBin() self.onSegmentBin( np.array([-0.02, 2.43, 0.61 ]), np.array([-0.40, 2.79, 0.61964661]) ) relativeStance = transformUtils.frameFromPositionAndRPY([-0.65, 0, 0],[0,0,0]) self.computeTableStanceFrame(relativeStance) self.computeBinStanceFrame() # Actually plan the sequence: #self.demoSequence() def segmentIhmcScene(self): self.userFitBin() self.onSegmentBin( np.array([ 0.62, -1.33, 0.80]), np.array([ 0.89, -0.87, 0.57]) ) self.userFitTable() self.onSegmentTable( np.array([ 1.11, 0.11, 0.85]), np.array([ 0.97, 0.044, 0.84]) ) self.segmentTableObjects() self.computeBinStanceFrame() def planSequence(self): self.useFootstepPlanner = True self.cleanupFootstepPlans() self.planFromCurrentRobotState = False self.segmentTableObjects() self.plans = [] # Go home self.planWalkToStance(om.findObjectByName('start stance frame').transform) # Pick Objects from table: self.planWalkToStance(om.findObjectByName('table stance frame').transform) if (self.graspingHand == 'both'): self.planSequenceTablePick('left') self.planSequenceTablePick('right') else: self.planSequenceTablePick(self.graspingHand) # Go home self.planWalkToStance(om.findObjectByName('start stance frame').transform) # Go to Bin self.planWalkToStance(om.findObjectByName('bin stance frame').transform) # Drop into the Bin: if (self.graspingHand == 'both'): self.planDropPostureRaise('left') self.dropTableObject('left') self.planDropPostureLower('left') self.planDropPostureRaise('right') self.dropTableObject('right') self.planDropPostureLower('right') else: self.planDropPostureRaise(self.graspingHand) self.dropTableObject(self.graspingHand) self.planDropPostureLower(self.graspingHand) # Go home self.planWalkToStance(om.findObjectByName('start stance frame').transform) def planSequenceTablePick(self, side): self.planPreGrasp(side) if self.ikPlanner.fixedBaseArm: self.planLowerArm(side) self.planReachToTableObject(side) if not self.ikPlanner.fixedBaseArm: self.planTouchTableObject(side) # TODO: distance is handled by reach, hence ignore self.graspTableObject(side) self.planLiftTableObject(side) def autonomousExecute(self): ''' Use global variable self.useDevelopment to switch between simulation and real robot execution ''' #self.ikPlanner.ikServer.usePointwise = True #self.ikPlanner.ikServer.maxDegreesPerSecond = 20 taskQueue = AsyncTaskQueue() #self.addTasksToQueueInit(taskQueue) # Go home if not self.ikPlanner.fixedBaseArm: self.addTasksToQueueWalking(taskQueue, om.findObjectByName('start stance frame').transform, 'Walk to Start') for _ in self.clusterObjects: # Pick Objects from table: if not self.ikPlanner.fixedBaseArm: self.addTasksToQueueWalking(taskQueue, om.findObjectByName('table stance frame').transform, 'Walk to Table') taskQueue.addTask(self.printAsync('Pick with Left Arm')) self.addTasksToQueueTablePick(taskQueue, 'left') #taskQueue.addTask(self.printAsync('Pick with Right Arm')) #self.addTasksToQueueTablePick(taskQueue, 'right') # Go home if not self.ikPlanner.fixedBaseArm: self.addTasksToQueueWalking(taskQueue, om.findObjectByName('start stance frame').transform, 'Walk to Start') # Go to Bin if not self.ikPlanner.fixedBaseArm: self.addTasksToQueueWalking(taskQueue, om.findObjectByName('bin stance frame').transform, 'Walk to Bin') # Drop into the Bin: taskQueue.addTask(self.printAsync('Drop from Left Arm')) self.addTasksToQueueDropIntoBin(taskQueue, 'left') #taskQueue.addTask(self.printAsync('Drop from Right Arm')) #self.addTasksToQueueDropIntoBin(taskQueue, 'right') # Go home if not self.ikPlanner.fixedBaseArm: self.addTasksToQueueWalking(taskQueue, om.findObjectByName('start stance frame').transform, 'Walk to Start') taskQueue.addTask(self.printAsync('done!')) return taskQueue def addTasksToQueueInit(self, taskQueue): taskQueue.addTask(self.printAsync('user fit table')) taskQueue.addTask(self.userFitTable) taskQueue.addTask(self.waitForTableFit) taskQueue.addTask(self.printAsync('user fit bin')) taskQueue.addTask(self.userFitBin) taskQueue.addTask(self.waitForBinFit) if not self.ikPlanner.fixedBaseArm: taskQueue.addTask( om.findObjectByName('table stance frame').transform ) taskQueue.addTask(self.computeBinStanceFrame) def addTasksToQueueTablePick(self, taskQueue, side): taskQueue.addTask(self.requiredUserPrompt('continue? y/n: ')) taskQueue.addTask(functools.partial(self.planPreGrasp, side)) taskQueue.addTask(self.animateLastPlan) taskQueue.addTask(self.requiredUserPrompt('continue? y/n: ')) taskQueue.addTask(functools.partial(self.planReachToTableObject, side)) taskQueue.addTask(self.animateLastPlan) if not self.ikPlanner.fixedBaseArm: # TODO: distance is handled by reach, hence ignore taskQueue.addTask(self.requiredUserPrompt('continue? y/n: ')) taskQueue.addTask(functools.partial(self.planTouchTableObject, side)) taskQueue.addTask(self.animateLastPlan) taskQueue.addTask(self.requiredUserPrompt('continue? y/n: ')) taskQueue.addTask(functools.partial(self.closeHand, side)) taskQueue.addTask(functools.partial(self.graspTableObject, side)) taskQueue.addTask(self.requiredUserPrompt('continue? y/n: ')) taskQueue.addTask(functools.partial(self.planLiftTableObject, side)) taskQueue.addTask(self.animateLastPlan) def addTasksToQueueDropIntoBin(self, taskQueue, side): taskQueue.addTask(self.requiredUserPrompt('continue? y/n: ')) taskQueue.addTask(functools.partial(self.planDropPostureRaise, side)) taskQueue.addTask(self.animateLastPlan) taskQueue.addTask(functools.partial(self.openHand, side)) taskQueue.addTask(functools.partial(self.dropTableObject, side)) taskQueue.addTask(self.requiredUserPrompt('continue? y/n: ')) if not self.ikPlanner.fixedBaseArm: taskQueue.addTask(functools.partial(self.planDropPostureLower, side)) else: taskQueue.addTask(functools.partial(self.planPreGrasp, side)) taskQueue.addTask(self.animateLastPlan) def addTasksToQueueWalking(self, taskQueue, stanceTransform, message): taskQueue.addTask(self.printAsync(message)) taskQueue.addTask( functools.partial(self.planWalkToStance, stanceTransform )) taskQueue.addTask(self.optionalUserPrompt('Send footstep plan. continue? y/n: ')) taskQueue.addTask(self.commitFootstepPlan) #taskQueue.addTask(self.animateLastPlan) # ought to wait until arrival, currently doesnt wait the right amount of time taskQueue.addTask(self.requiredUserPrompt('Have you arrived? y/n: ')) ''' Tabledemo Image Fit for live-stream of webcam ''' class TableImageFitter(ImageBasedAffordanceFit): def __init__(self, tableDemo): ImageBasedAffordanceFit.__init__(self, numberOfPoints=1) self.tableDemo = tableDemo def fit(self, polyData, points): pass ''' Table Task Panel ''' class TableTaskPanel(TaskUserPanel): def __init__(self, tableDemo): TaskUserPanel.__init__(self, windowTitle='Table Task') self.tableDemo = tableDemo self.tableDemo.planFromCurrentRobotState = True self.addDefaultProperties() self.addButtons() self.addTasks() self.fitter = TableImageFitter(self.tableDemo) self.initImageView(self.fitter.imageView, activateAffordanceUpdater=False) def addButtons(self): self.addManualSpacer() self.addManualButton('Lower arm', functools.partial(self.tableDemo.planLowerArm, self.tableDemo.graspingHand)) self.addManualSpacer() self.addManualButton('Raise arm', self.tableDemo.planPreGrasp) self.addManualSpacer() self.addManualButton('Commit Manip', self.tableDemo.commitManipPlan) self.addManualSpacer() self.addManualButton('Open Hand', functools.partial(self.tableDemo.openHand, self.tableDemo.graspingHand)) self.addManualSpacer() self.addManualButton('Close Hand', functools.partial(self.tableDemo.closeHand, self.tableDemo.graspingHand)) def addDefaultProperties(self): self.params.addProperty('Hand', 0, attributes=om.PropertyAttributes(enumNames=['Left', 'Right'])) self.params.addProperty('Base', 1, attributes=om.PropertyAttributes(enumNames=['Fixed', 'Free'])) if self.tableDemo.ikPlanner.fixedBaseArm: self.params.addProperty('Back', 0, attributes=om.PropertyAttributes(enumNames=['Fixed', 'Free'])) else: self.params.addProperty('Back', 1, attributes=om.PropertyAttributes(enumNames=['Fixed', 'Free'])) # Hand control for Kuka LWR / Schunk SDH if self.tableDemo.ikPlanner.fixedBaseArm: self.params.addProperty('Hand Engaged (Powered)', False) # If we're dealing with humanoids, offer the scene selector if not self.tableDemo.ikPlanner.fixedBaseArm: self.params.addProperty('Scene', 4, attributes=om.PropertyAttributes(enumNames=['Objects on table','Object below table','Object through slot','Object at depth','Objects on table (fit)'])) # Init values as above self.tableDemo.graspingHand = self.getSide() self.tableDemo.lockBase = self.getLockBase() self.tableDemo.lockBack = self.getLockBack() if self.tableDemo.ikPlanner.fixedBaseArm: self.handEngaged = self.getHandEngaged() # WARNING: does not check current state [no status message] def getSide(self): return self.params.getPropertyEnumValue('Hand').lower() def getLockBase(self): return True if self.params.getPropertyEnumValue('Base') == 'Fixed' else False def getLockBack(self): return True if self.params.getPropertyEnumValue('Back') == 'Fixed' else False def getHandEngaged(self): return self.params.getProperty('Hand Engaged (Powered)') def onPropertyChanged(self, propertySet, propertyName): propertyName = str(propertyName) if propertyName == 'Hand': self.tableDemo.graspingHand = self.getSide() self.taskTree.removeAllTasks() self.addTasks() elif propertyName == 'Base': self.tableDemo.lockBase = self.getLockBase() elif propertyName == 'Back': self.tableDemo.lockBack = self.getLockBack() elif propertyName == 'Hand Engaged (Powered)': if self.handEngaged: # was engaged, hence deactivate self.tableDemo.getHandDriver(self.getSide()).sendDeactivate() # deactivate hand else: # was disenaged, hence activate self.tableDemo.getHandDriver(self.getSide()).sendActivate() # activate hand self.handEngaged = self.getHandEngaged() elif propertyName == 'Scene': self.tableDemo.sceneID = self.params.getProperty('Scene') def addTasks(self): # some helpers def addTask(task, parent=None): self.taskTree.onAddTask(task, copy=False, parent=parent) def addFunc(func, name, parent=None, confirm=False): addTask(rt.CallbackTask(callback=func, name=name), parent=parent) if confirm: addTask(rt.UserPromptTask(name='Confirm execution has finished', message='Continue when plan finishes.'), parent=parent) def addManipulation(func, name, parent=None, confirm=True): group = self.taskTree.addGroup(name, parent=parent) addFunc(func, name='plan motion', parent=group) addTask(rt.CheckPlanInfo(name='check manip plan info'), parent=group) addFunc(v.commitManipPlan, name='execute manip plan', parent=group) addTask(rt.WaitForManipulationPlanExecution(name='wait for manip execution'), parent=group) if confirm: addTask(rt.UserPromptTask(name='Confirm execution has finished', message='Continue when plan finishes.'), parent=group) v = self.tableDemo self.taskTree.removeAllTasks() # graspingHand is 'left', side is 'Left' side = self.params.getPropertyEnumValue('Hand') ############### # add the tasks # pre-prep if v.ikPlanner.fixedBaseArm: if not v.useDevelopment: addManipulation(functools.partial(v.planPostureFromDatabase, 'roomMapping', 'p3_down', side='left'), 'go to pre-mapping pose') # TODO: mapping # prep prep = self.taskTree.addGroup('Preparation') if v.ikPlanner.fixedBaseArm: addTask(rt.OpenHand(name='open hand', side=side), parent=prep) if v.useDevelopment: addFunc(v.prepKukaTestDemoSequence, 'prep from file', parent=prep) else: # get one frame from camera, segment on there addFunc(v.prepGetSceneFrame, 'capture scene frame', parent=prep) addFunc(v.prepKukaLabScene, 'prep kuka lab scene', parent=prep) else: addTask(rt.CloseHand(name='close grasp hand', side=side), parent=prep) addTask(rt.CloseHand(name='close left hand', side='Left'), parent=prep) addTask(rt.CloseHand(name='close right hand', side='Right'), parent=prep) addFunc(v.createCollisionPlanningScene, 'prep from file', parent=prep) # walk if not v.ikPlanner.fixedBaseArm: walk = self.taskTree.addGroup('Approach Table') addTask(rt.RequestFootstepPlan(name='plan walk to table', stanceFrameName='table stance frame'), parent=walk) addTask(rt.UserPromptTask(name='approve footsteps', message='Please approve footstep plan.'), parent=walk) addTask(rt.CommitFootstepPlan(name='walk to table', planName='table grasp stance footstep plan'), parent=walk) addTask(rt.SetNeckPitch(name='set neck position', angle=35), parent=walk) addTask(rt.WaitForWalkExecution(name='wait for walking'), parent=walk) # lift object if v.ikPlanner.fixedBaseArm: addManipulation(functools.partial(v.planPreGrasp, v.graspingHand ), name='raise arm') addManipulation(functools.partial(v.planReachToTableObject, v.graspingHand), name='reach') addManipulation(functools.partial(v.planTouchTableObject, v.graspingHand), name='touch') else: # Collision Free - Marco et al. addManipulation(functools.partial(v.planReachToTableObjectCollisionFree, v.graspingHand), name='reach') addFunc(functools.partial(v.graspTableObject, side=v.graspingHand), 'grasp', parent='reach', confirm=True) addManipulation(functools.partial(v.planLiftTableObject, v.graspingHand), name='lift object') # walk to start if not v.ikPlanner.fixedBaseArm: walkToStart = self.taskTree.addGroup('Walk to Start') addTask(rt.RequestFootstepPlan(name='plan walk to start', stanceFrameName='start stance frame'), parent=walkToStart) addTask(rt.UserPromptTask(name='approve footsteps', message='Please approve footstep plan.'), parent=walkToStart) addTask(rt.CommitFootstepPlan(name='walk to start', planName='start stance footstep plan'), parent=walkToStart) addTask(rt.WaitForWalkExecution(name='wait for walking'), parent=walkToStart) # walk to bin if not v.ikPlanner.fixedBaseArm: walkToBin = self.taskTree.addGroup('Walk to Bin') addTask(rt.RequestFootstepPlan(name='plan walk to bin', stanceFrameName='bin stance frame'), parent=walkToBin) addTask(rt.UserPromptTask(name='approve footsteps', message='Please approve footstep plan.'), parent=walkToBin) addTask(rt.CommitFootstepPlan(name='walk to start', planName='bin stance footstep plan'), parent=walkToBin) addTask(rt.WaitForWalkExecution(name='wait for walking'), parent=walkToBin) # drop in bin addManipulation(functools.partial(v.planDropPostureRaise, v.graspingHand), name='drop: raise arm') # seems to ignore arm side? addFunc(functools.partial(v.dropTableObject, side=v.graspingHand), 'drop', parent='drop: release', confirm=True) addManipulation(functools.partial(v.planDropPostureLower, v.graspingHand), name='drop: lower arm')
rdeits/director
src/python/ddapp/tabledemo.py
Python
bsd-3-clause
58,028
[ "VTK" ]
b2d4c0309c15b46b6fccaa7d3a425c635493079166d0d02a8aad763d41cedc36
# Copyright (C) 2012,2013,2015,2016 # Max Planck Institute for Polymer Research # Copyright (C) 2008,2009,2010,2011 # Max-Planck-Institute for Polymer Research & Fraunhofer SCAI # # This file is part of ESPResSo++. # # ESPResSo++ is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ESPResSo++ is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. r""" ************************** espressopp.FixedTripleList ************************** .. function:: espressopp.FixedTripleList(storage) :param storage: :type storage: .. function:: espressopp.FixedTripleList.add(pid1, pid2, pid3) :param pid1: :param pid2: :param pid3: :type pid1: :type pid2: :type pid3: :rtype: .. function:: espressopp.FixedTripleList.addTriples(triplelist) :param triplelist: :type triplelist: :rtype: .. function:: espressopp.FixedTripleList.getTriples() :rtype: .. function:: espressopp.FixedTripleList.size() :rtype: .. function:: espressopp.FixedTripleList.remove() remove the FixedPairList and disconnect """ from espressopp import pmi import _espressopp import espressopp from espressopp.esutil import cxxinit class FixedTripleListLocal(_espressopp.FixedTripleList): def __init__(self, storage): if pmi.workerIsActive(): cxxinit(self, _espressopp.FixedTripleList, storage) def add(self, pid1, pid2, pid3): if pmi.workerIsActive(): return self.cxxclass.add(self, pid1, pid2, pid3) def addTriples(self, triplelist): """ Each processor takes the broadcasted triplelist and adds those triples whose first particle is owned by this processor. """ if pmi.workerIsActive(): for triple in triplelist: pid1, pid2, pid3 = triple self.cxxclass.add(self, pid1, pid2, pid3) def size(self): if pmi.workerIsActive(): return self.cxxclass.size(self) def remove(self): if pmi.workerIsActive(): self.cxxclass.remove(self) ''' def addTriples(self, triplelist): """ Each processor takes the broadcasted triplelist and adds those triples whose first particle is owned by this processor. """ if pmi.workerIsActive(): for triple in triplelist: pid1, pid2, pid3 = triple self.cxxclass.add(self, pid1, pid2, pid3) ''' def getTriples(self): if pmi.workerIsActive(): triples = self.cxxclass.getTriples(self) return triples if pmi.isController: class FixedTripleList(metaclass=pmi.Proxy): pmiproxydefs = dict( cls = 'espressopp.FixedTripleListLocal', localcall = [ "add" ], pmicall = [ "addTriples","remove" ], pmiinvoke = ["getTriples", "size"] )
espressopp/espressopp
src/FixedTripleList.py
Python
gpl-3.0
3,576
[ "ESPResSo" ]
5f954f76d329829fa3d90c639f2d612ab5774c86fd33c8ee0a29f706216b7e55
from fitfunctionbase import FitFunctionBase from fitparameters import * from fithandles import * __all__ = [ 'FitFuncGaussian', 'FitFuncPseudoVoigt', 'FitFuncBoltzmann2', 'FitFuncConstant', 'FitFuncLine', 'FitFuncHeaviside', 'FitFuncRectangularWindow' ] class FitFuncGaussian(FitFunctionBase): name = 'gaussian' label = 'Gaussian' expr = 'a*exp(-(x-b)**2/(2*c**2))' expr_excel = '%(a)s*exp(-((%(x)s-%(b)s)^2)/(2*(%(c)s^2)))' expr_latex = r'y=a\exp\left[-\frac{(x-b)^2}{2c^2}\right]' parameters = [ ('a', 'Max height (at x=b)'), ('b', 'Center'), ('c', 'Standard deviation') ] def __init__(self, view): super().__init__(view) r = view.viewRect() x1, x2, y1, y2 = r.left(), r.right(), r.top(), r.bottom() self.addParam(FitParam('a', y2*0.6)) self.addParam(FitParam('b', (x1 + x2)/2)) self.addParam(FitParam('c', (x2 - x1)*0.1)) self.addParam(self.eval('Area', 'sqrt(2*pi)*a*c', None)) half = self.eval('half', 'a/2', None) x1 = self.eval('x1', 'b+c*sqrt(2*log(2))', self.c) self.addHandle(FitHandlePosition(view, self.b, self.a)) self.addHandle(FitHandleLine(view, self.b, half, x1, half)) class FitFuncPseudoVoigt(FitFunctionBase): name = 'pseudovoigt' label = 'PseudoVoigt' expr = 'a*(m*2/pi*w/(4*(x-x0)**2+w**2) + (1-m)*2*sqrt(ln(2))/(sqrt(pi)*w)*exp(-4*ln(2)/(w**2)*(x-x0)**2))' expr_excel = '%(a)s*(%(m)s*2/PI()*%(w)s/(4*((%(x)s-%(x0)s)^2)+%(w)s^2) + (1-%(m)s)*2*sqrt(ln(2))/(sqrt(PI())*%(w)s)*exp(-4*ln(2)/(%(w)s^2)*((%(x)s-%(x0)s)^2)))' expr_latex = r'y=a\left\{ m\frac{2}{\pi}\frac{w}{4(x-x_0)^2 + w^2} + (1-m)\frac{2\sqrt{\ln2}}{\sqrt{\pi}w}\exp\left[-\frac{4\ln2}{w^2}(x-x_0)^2\right] \right\}' parameters = [ ('a', 'Area'), ('x0', 'Center'), ('w', 'HWHM'), ('m', 'Mix ratio'), ('h', 'Max height at x=x0; h=a(2m/(PI*w)+(2*sqrt(ln2)*(1-m))/(sqrt(PI)*w))') ] def __init__(self, view): super().__init__(view) r = view.viewRect() x1, x2, y1, y2 = r.left(), r.right(), r.top(), r.bottom() self.addParam(FitParam('a', (x2-x1)*(y2-y1)*0.2)) self.addParam(FitParam('x0', (x1 + x2)/2)) self.addParam(FitParam('w', (x2 - x1)*0.1)) self.addParam(FitParam('m', 0.1)) self.addParam(self.eval('h', 'a*((2*m)/(pi*w)+(2*sqrt(ln(2))*(1-m))/(sqrt(pi)*w))', self.a)) self.addHandle(FitHandlePosition(view, self.x0, self.h)) half = self.eval('half', 'h/2', None) x1 = self.eval2('x1', 'x0+w/2', ''' w = 2*(x1-x0) a = pi*_h*w/2/( sqrt(pi*ln(2)) - (sqrt(pi*ln(2)) - 1)*m ) ''') self.addHandle(FitHandleLine(view, self.x0, half, x1, half)) class FitFuncBoltzmann2(FitFunctionBase): name = 'boltzmann2' label = 'Boltzmann 2' expr = '(a1*x+b1)/(1+exp((x-x0)/dx)) + (a2*x+b2)*(1-1/(1+exp((x-x0)/dx)))' expr_latex = r'y=(a_1x+b_1)\frac{1}{1+\exp[(x-x_0)/dx]} + (a_2x+b_2)\left\{1 - \frac{1}{1+\exp[(x-x_0)/dx]}\right\}' parameters = [ ('a1', 'Slope of line 1'), ('b1', 'Y-intercept of line 1'), ('a2', 'Slope of line 2'), ('b2', 'Y-intercept of line 2'), ('x0', 'Center'), ('dx', 'Transition factor') ] def __init__(self, view): super().__init__(view) r = view.viewRect() x1, x2, y1, y2 = r.left(), r.right(), r.top(), r.bottom() self.addParam(FitParam('a1', 0)) self.addParam(FitParam('b1', 0)) self.addParam(FitParam('a2', 0)) self.addParam(FitParam('b2', y2*0.8)) self.addParam(FitParam('x0', (x1+x2)/2)) self.addParam(FitParam('dx', 1)) y0 = '(a1*x0+b1)/2 + (a2*x0+b2)/2' y0 = self.eval('y0', y0, None) x1 = self.eval('x1', 'x0+2*dx', self.dx) self.addHandle(FitHandleLine(view, self.x0, y0, x1, y0)) self.addHandle(FitHandlePosition(view, self.x0, y0)) self.addParam(FitParam('cx1', self.x0.value(), hidden=True)) self.addParam(FitParam('cy1', self.b1.value(), hidden=True)) self.a1.valueChanged.connect(lambda: self.setB(1)) self.b1.valueChanged.connect(lambda: self.setcy(1)) self.cx1.valueChanged.connect(lambda: self.setB(1)) self.cy1.valueChanged.connect(lambda: self.setB(1)) self.addHandle(FitHandleGradient(view, self.cx1, self.cy1, self.a1, 50, False)) self.addHandle(FitHandlePosition(view, self.cx1, self.cy1)) self.addParam(FitParam('cx2', self.x0.value(), hidden=True)) self.addParam(FitParam('cy2', self.b2.value(), hidden=True)) self.a2.valueChanged.connect(lambda: self.setB(2)) self.b2.valueChanged.connect(lambda: self.setcy(2)) self.cx2.valueChanged.connect(lambda: self.setB(2)) self.cy2.valueChanged.connect(lambda: self.setB(2)) self.addHandle(FitHandleGradient(view, self.cx2, self.cy2, self.a2, 50)) self.addHandle(FitHandlePosition(view, self.cx2, self.cy2)) def setB(self, num): cx = getattr(self, 'cx%d' % num) cy = getattr(self, 'cy%d' % num) a = getattr(self, 'a%d' % num) b = getattr(self, 'b%d' % num) b.setValue(cy.value() - a.value()*cx.value()) def setcy(self, num): cx = getattr(self, 'cx%d' % num) cy = getattr(self, 'cy%d' % num) a = getattr(self, 'a%d' % num) b = getattr(self, 'b%d' % num) cy.setValue(a.value()*cx.value()+b.value()) class FitFuncConstant(FitFunctionBase): name = 'constant' label = 'Constant' expr = 'y0' expr_latex = r'y=y_0' parameters = [('y0', '')] def __init__(self, view): super().__init__(view) r = view.viewRect() x1, x2, y1, y2 = r.left(), r.right(), r.top(), r.bottom() self.addParam(FitParam('y0', y2*0.8)) self.addParam(FitParam('x0', x1, hidden=True)) self.addHandle(FitHandlePosition(view, self.x0, self.y0)) class FitFuncLine(FitFunctionBase): name = 'line' label = 'Line' expr = 'a*x+b' expr_latex = r'ax+b' parameters = [ ('a', None), ('b', None) ] def __init__(self, view): super().__init__(view) r = view.viewRect() x1, x2, y1, y2 = r.left(), r.right(), r.top(), r.bottom() self.addParam(FitParam('a', (y2-y1)/(x2-x1))) self.addParam(FitParam('b', y1-self.a.value()*x1)) cx = (x1+x2)/2 cy = self.a.value()*cx+self.b.value() self.addParam(FitParam('cx', cx, hidden=True)) self.addParam(FitParam('cy', cy, hidden=True)) self.a.valueChanged.connect(self.setB) self.b.valueChanged.connect(self.setcy) self.cx.valueChanged.connect(self.setB) self.cy.valueChanged.connect(self.setB) self.addHandle(FitHandleGradient(view, self.cx, self.cy, self.a, 50)) self.addHandle(FitHandlePosition(view, self.cx, self.cy)) def setB(self): self.b.setValue(self.cy.value() - self.a.value()*self.cx.value()) def setcy(self): self.cy.setValue(self.a.value()*self.cx.value()+self.b.value()) class FitFuncHeaviside(FitFunctionBase): name = 'heaviside' label = 'Heaviside' expr = 'a*heaviside(x-x0, 1)' expr_latex = r''' y=a\cdot\begin{cases} 0 & (x < x_0) \\ 1 & (x_0 \le x) \end{cases} ''' parameters = [ ('a', None), ('x0', None) ] def __init__(self, view): super().__init__(view) r = view.viewRect() x1, x2, y1, y2 = r.left(), r.right(), r.top(), r.bottom() self.addParam(FitParam('a', y2*0.8)) self.addParam(FitParam('x0', (x1 + x2)/2)) self.addHandle(FitHandlePosition(view, self.x0, self.a)) class FitFuncRectangularWindow(FitFunctionBase): name = 'rectangularwinow' label = 'Rectangular window' expr = 'a*heaviside(x-x0, 1)*heaviside(-(x-x1), 1)' expr_latex = r''' y=a\cdot\begin{cases} 0 & (x < x_0) \\ 1 & (x_0 \le x \le x_1) \\ 0 & (x_0 < x) \end{cases} ''' parameters = [ ('a', None), ('x0', None), ('x1', None) ] def __init__(self, view): super().__init__(view) r = view.viewRect() x1, x2, y1, y2 = r.left(), r.right(), r.top(), r.bottom() self.addParam(FitParam('a', y2*0.8)) self.addParam(FitParam('x0', x1 + (x2-x1)*0.2)) self.addParam(FitParam('x1', x2 - (x2-x1)*0.2)) self.addHandle(FitHandlePosition(view, self.x0, self.a)) self.addHandle(FitHandlePosition(view, self.x1, self.a))
takumak/tuna
src/fitfunctions.py
Python
mit
8,012
[ "Gaussian" ]
c0a2f53677df75a27276c402b8ff04c39dff7af001a72b19aa1fd62ae126f228
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class RRsamtools(RPackage): """Binary alignment (BAM), FASTA, variant call (BCF), and tabix file import. This package provides an interface to the 'samtools', 'bcftools', and 'tabix' utilities for manipulating SAM (Sequence Alignment / Map), FASTA, binary variant call (BCF) and compressed indexed tab-delimited (tabix) files.""" homepage = "https://bioconductor.org/packages/Rsamtools" git = "https://git.bioconductor.org/packages/Rsamtools.git" version('2.2.1', commit='f10084658b4c9744961fcacd79c0ae9a7a40cd30') version('2.0.3', commit='17d254cc026574d20db67474260944bf60befd70') version('1.34.1', commit='0ec1d45c7a14b51d019c3e20c4aa87c6bd2b0d0c') version('1.32.3', commit='0aa3f134143b045aa423894de81912becf64e4c2') version('1.30.0', commit='61b365fe3762e796b3808cec7238944b7f68d7a6') version('1.28.0', commit='dfa5b6abef68175586f21add7927174786412472') depends_on('r-genomeinfodb@1.1.3:', type=('build', 'run')) depends_on('r-genomicranges@1.21.6:', type=('build', 'run')) depends_on('r-biostrings@2.37.1:', type=('build', 'run')) depends_on('r-biocgenerics@0.1.3:', type=('build', 'run')) depends_on('r-s4vectors@0.13.8:', type=('build', 'run')) depends_on('r-iranges@2.3.7:', type=('build', 'run')) depends_on('r-xvector@0.15.1:', type=('build', 'run')) depends_on('r-zlibbioc', type=('build', 'run')) depends_on('r-bitops', type=('build', 'run')) depends_on('r-biocparallel', type=('build', 'run')) depends_on('r-genomicranges@1.31.8:', when='@1.32.3:', type=('build', 'run')) depends_on('r-biostrings@2.47.6:', when='@1.32.3:', type=('build', 'run')) depends_on('r-biocgenerics@0.25.1:', when='@1.32.3:', type=('build', 'run')) depends_on('r-s4vectors@0.17.25:', when='@1.32.3:', type=('build', 'run')) depends_on('r-iranges@2.13.12:', when='@1.32.3:', type=('build', 'run')) depends_on('r-xvector@0.19.7:', when='@1.32.3:', type=('build', 'run')) depends_on('r-rhtslib@1.16.3', when='@2.0.3', type=('build', 'run')) depends_on('r-rhtslib@1.17.7:', when='@2.2.1:', type=('build', 'run')) depends_on('gmake', type='build')
iulian787/spack
var/spack/repos/builtin/packages/r-rsamtools/package.py
Python
lgpl-2.1
2,411
[ "Bioconductor" ]
a4b456d40c05e368d10a05f9b133e64ab7e09d83fafb6381330519c5e8729bc7
#!/usr/bin/env python # -*- coding: utf-8 -*- # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Direct interaction In this example we start an interaction in a direct channel and then create a group channel to follow-up. Following commands are used in this example: - command: start - this is used in a direct channel to start a new transaction - command: input - reflect data that has been captured so far - command: close - this is used in a group channel to destroy it Multiple questions are adressed in this example: - How to engage with a bot in a direct channel? When the end user invites the bot in a direct channel, a first transaction starts immediately. Later on, the command ``start`` is used on each new transaction. In essence, this command resets and restarts the underlying state machine, so the design is really generic. In this example we use a simple state machine derived from ``Input`` for this purpose. For more sophisticated situations, you could consider ``Menu``, ``Sequence`` and ``Steps`` as well. Or write your own state machine if needed. - How to ask for data and manage the capture? Shellbot provides with mask and with regex expressions to validate information provided by end users. The state machines also provide tip and help information, or give up on time-out. - How to list participants of a channel? Here we retrieve the address of end user in direct channel, and add him to the participants of the new group channel. Look for ``bot.list_participants()`` in the code below. - How to store data that has been captured? State machines coming with shellbot save captured data in the store attached to each bot. In the example below, the state machine is configured to use the key ``order.id``. This is done in a specialized list of key-value pairs named ``input``. - How to display data that has been captured from the end user? Type the command ``input`` and that's it. - How to populate a bot store? When a bot is created, shellbot initializes it with content from the context. First, shellbot looks for generic key ``bot.store``. Second, shellbot also consider the key ``store.<channel_id>`` for content that is specific to one bot. Here we use the second mechanism so that input captured in a direct channel is replicated to the store of the group channel. - How to retrieve attachments from a channel? This capablity is required to replicate a document from the direct channel to the group channel. Attachments are listed from ``bot.space.list_message()``, with the addition of flag ``with_attachment``. Based on this, attachments can be downloaded on local computer, and uploaded as updates of the group channel. Look at the code, it is rather self-explanatory. To run this script you have to provide a custom configuration, or set environment variables instead:: - ``CISCO_SPARK_BOT_TOKEN`` - Received from Cisco Spark on bot registration - ``SERVER_URL`` - Public link used by Cisco Spark to reach your server The token is specific to your run-time, please visit Cisco Spark for Developers to get more details: https://developer.ciscospark.com/ For example, if you run this script under Linux or macOs with support from ngrok for exposing services to the Internet:: export CISCO_SPARK_BOT_TOKEN="<token id from Cisco Spark for Developers>" export SERVER_URL="http://1a107f21.ngrok.io" python direct.py """ import logging import os import time from shellbot import Engine, Context from shellbot.machines import MachineFactory, Input class MyInput(Input): # transition from direct channel to group channel def on_stop(self): team_title = 'shellbot environment' title = 'Follow-up in group room #{}'.format( self.bot.store.increment('group.count')) self.bot.say(u"Switching to a group channel:") logging.debug(u"- prevent racing conditions from webhooks") self.bot.engine.set('listener.lock', 'on') self.bot.say(u"- creating channel '{}'...".format(title)) channel = self.bot.space.create(title=title, ex_team=team_title) self.bot.say(u"- pushing input data to the group channel...") label = "store.{}".format(channel.id) self.bot.engine.set( label, {'from': self.bot.id, 'input': self.bot.recall('input')}, ) self.bot.say(u"- replicating documents to the group channel...") for counter, message in enumerate( self.bot.space.list_messages(id=self.bot.id, quantity=1, with_attachment=True)): if not counter: self.bot.space.post_message(channel.id, text="Documents gathered so far:") self.bot.say(u"- replicating document #{}...".format(counter+1)) name = self.bot.space.name_attachment(message.url) logging.debug(u"- attachment: {}".format(name)) downloaded = self.bot.space.download_attachment(message.url) self.bot.space.post_message(id=channel.id, text=name, file=downloaded) self.bot.say(u"- adding participants to the group channel...") participants = self.bot.space.list_participants(self.bot.id) for person in self.bot.engine.get('space.participants', []): participants.add(person) self.bot.space.add_participants(id=channel.id, persons=participants) self.bot.space.post_message( channel.id, content="Use command ``input`` to view data gathered so far.") logging.debug(u"- releasing listener lock") self.bot.engine.set('listener.lock', 'off') self.bot.say(u"- done") self.bot.say(content=(u"Please go to the new channel for group " u"interactions. Come back here and type " u"``start`` for a new sequence.")) class MyMachineFactory(MachineFactory): # provide machines to direct channels def get_machine_for_direct_channel(self, bot): return MyInput(bot=bot, question="PO number please?", mask="9999A", on_retry="PO number should have 4 digits and a letter", on_answer="Ok, PO number has been noted: {}", on_cancel="Ok, forget about the PO number", key='order.id') def get_machine_for_group_channel(self, bot): return None def get_default_machine(self, bot): return None if __name__ == '__main__': Context.set_logger() engine = Engine(type='spark', # use Cisco Spark and setup the environment commands=['shellbot.commands.input', 'shellbot.commands.start', 'shellbot.commands.close', ], machine_factory=MyMachineFactory()) os.environ['CHAT_ROOM_TITLE'] = '*dummy' engine.configure() # ensure that all components are ready print(u"Go to Cisco Spark and engage with the bot in a direct channel") engine.run() # until Ctl-C
bernard357/shellbot
examples/direct.py
Python
apache-2.0
8,139
[ "VisIt" ]
5ef782bac972f509daba92afe89bbce251af8d4c8849c177f7480e366c314e12
# -*- coding: utf-8 -*- # Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os import mock import grpc from grpc.experimental import aio import math import pytest from proto.marshal.rules.dates import DurationRule, TimestampRule from google.api_core import client_options from google.api_core import exceptions as core_exceptions from google.api_core import future from google.api_core import gapic_v1 from google.api_core import grpc_helpers from google.api_core import grpc_helpers_async from google.api_core import operation from google.api_core import operation_async # type: ignore from google.api_core import operations_v1 from google.api_core import path_template from google.auth import credentials as ga_credentials from google.auth.exceptions import MutualTLSChannelError from google.cloud.dialogflowcx_v3.services.agents import AgentsAsyncClient from google.cloud.dialogflowcx_v3.services.agents import AgentsClient from google.cloud.dialogflowcx_v3.services.agents import pagers from google.cloud.dialogflowcx_v3.services.agents import transports from google.cloud.dialogflowcx_v3.types import advanced_settings from google.cloud.dialogflowcx_v3.types import agent from google.cloud.dialogflowcx_v3.types import agent as gcdc_agent from google.cloud.dialogflowcx_v3.types import flow from google.longrunning import operations_pb2 from google.oauth2 import service_account from google.protobuf import field_mask_pb2 # type: ignore from google.protobuf import struct_pb2 # type: ignore import google.auth def client_cert_source_callback(): return b"cert bytes", b"key bytes" # If default endpoint is localhost, then default mtls endpoint will be the same. # This method modifies the default endpoint so the client can produce a different # mtls endpoint for endpoint testing purposes. def modify_default_endpoint(client): return ( "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT ) def test__get_default_mtls_endpoint(): api_endpoint = "example.googleapis.com" api_mtls_endpoint = "example.mtls.googleapis.com" sandbox_endpoint = "example.sandbox.googleapis.com" sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" non_googleapi = "api.example.com" assert AgentsClient._get_default_mtls_endpoint(None) is None assert AgentsClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint assert ( AgentsClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint ) assert ( AgentsClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint ) assert ( AgentsClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint ) assert AgentsClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi @pytest.mark.parametrize("client_class", [AgentsClient, AgentsAsyncClient,]) def test_agents_client_from_service_account_info(client_class): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_info" ) as factory: factory.return_value = creds info = {"valid": True} client = client_class.from_service_account_info(info) assert client.transport._credentials == creds assert isinstance(client, client_class) assert client.transport._host == "dialogflow.googleapis.com:443" @pytest.mark.parametrize( "transport_class,transport_name", [ (transports.AgentsGrpcTransport, "grpc"), (transports.AgentsGrpcAsyncIOTransport, "grpc_asyncio"), ], ) def test_agents_client_service_account_always_use_jwt(transport_class, transport_name): with mock.patch.object( service_account.Credentials, "with_always_use_jwt_access", create=True ) as use_jwt: creds = service_account.Credentials(None, None, None) transport = transport_class(credentials=creds, always_use_jwt_access=True) use_jwt.assert_called_once_with(True) with mock.patch.object( service_account.Credentials, "with_always_use_jwt_access", create=True ) as use_jwt: creds = service_account.Credentials(None, None, None) transport = transport_class(credentials=creds, always_use_jwt_access=False) use_jwt.assert_not_called() @pytest.mark.parametrize("client_class", [AgentsClient, AgentsAsyncClient,]) def test_agents_client_from_service_account_file(client_class): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_file" ) as factory: factory.return_value = creds client = client_class.from_service_account_file("dummy/file/path.json") assert client.transport._credentials == creds assert isinstance(client, client_class) client = client_class.from_service_account_json("dummy/file/path.json") assert client.transport._credentials == creds assert isinstance(client, client_class) assert client.transport._host == "dialogflow.googleapis.com:443" def test_agents_client_get_transport_class(): transport = AgentsClient.get_transport_class() available_transports = [ transports.AgentsGrpcTransport, ] assert transport in available_transports transport = AgentsClient.get_transport_class("grpc") assert transport == transports.AgentsGrpcTransport @pytest.mark.parametrize( "client_class,transport_class,transport_name", [ (AgentsClient, transports.AgentsGrpcTransport, "grpc"), (AgentsAsyncClient, transports.AgentsGrpcAsyncIOTransport, "grpc_asyncio"), ], ) @mock.patch.object( AgentsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AgentsClient) ) @mock.patch.object( AgentsAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AgentsAsyncClient) ) def test_agents_client_client_options(client_class, transport_class, transport_name): # Check that if channel is provided we won't create a new one. with mock.patch.object(AgentsClient, "get_transport_class") as gtc: transport = transport_class(credentials=ga_credentials.AnonymousCredentials()) client = client_class(transport=transport) gtc.assert_not_called() # Check that if channel is provided via str we will create a new one. with mock.patch.object(AgentsClient, "get_transport_class") as gtc: client = client_class(transport=transport_name) gtc.assert_called() # Check the case api_endpoint is provided. options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(transport=transport_name, client_options=options) patched.assert_called_once_with( credentials=None, credentials_file=None, host="squid.clam.whelk", scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is # "never". with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is # "always". with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, host=client.DEFAULT_MTLS_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has # unsupported value. with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): with pytest.raises(MutualTLSChannelError): client = client_class(transport=transport_name) # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. with mock.patch.dict( os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} ): with pytest.raises(ValueError): client = client_class(transport=transport_name) # Check the case quota_project_id is provided options = client_options.ClientOptions(quota_project_id="octopus") with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(client_options=options, transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id="octopus", client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) @pytest.mark.parametrize( "client_class,transport_class,transport_name,use_client_cert_env", [ (AgentsClient, transports.AgentsGrpcTransport, "grpc", "true"), ( AgentsAsyncClient, transports.AgentsGrpcAsyncIOTransport, "grpc_asyncio", "true", ), (AgentsClient, transports.AgentsGrpcTransport, "grpc", "false"), ( AgentsAsyncClient, transports.AgentsGrpcAsyncIOTransport, "grpc_asyncio", "false", ), ], ) @mock.patch.object( AgentsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AgentsClient) ) @mock.patch.object( AgentsAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AgentsAsyncClient) ) @mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) def test_agents_client_mtls_env_auto( client_class, transport_class, transport_name, use_client_cert_env ): # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. # Check the case client_cert_source is provided. Whether client cert is used depends on # GOOGLE_API_USE_CLIENT_CERTIFICATE value. with mock.patch.dict( os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} ): options = client_options.ClientOptions( client_cert_source=client_cert_source_callback ) with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(client_options=options, transport=transport_name) if use_client_cert_env == "false": expected_client_cert_source = None expected_host = client.DEFAULT_ENDPOINT else: expected_client_cert_source = client_cert_source_callback expected_host = client.DEFAULT_MTLS_ENDPOINT patched.assert_called_once_with( credentials=None, credentials_file=None, host=expected_host, scopes=None, client_cert_source_for_mtls=expected_client_cert_source, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) # Check the case ADC client cert is provided. Whether client cert is used depends on # GOOGLE_API_USE_CLIENT_CERTIFICATE value. with mock.patch.dict( os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} ): with mock.patch.object(transport_class, "__init__") as patched: with mock.patch( "google.auth.transport.mtls.has_default_client_cert_source", return_value=True, ): with mock.patch( "google.auth.transport.mtls.default_client_cert_source", return_value=client_cert_source_callback, ): if use_client_cert_env == "false": expected_host = client.DEFAULT_ENDPOINT expected_client_cert_source = None else: expected_host = client.DEFAULT_MTLS_ENDPOINT expected_client_cert_source = client_cert_source_callback patched.return_value = None client = client_class(transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, host=expected_host, scopes=None, client_cert_source_for_mtls=expected_client_cert_source, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) # Check the case client_cert_source and ADC client cert are not provided. with mock.patch.dict( os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} ): with mock.patch.object(transport_class, "__init__") as patched: with mock.patch( "google.auth.transport.mtls.has_default_client_cert_source", return_value=False, ): patched.return_value = None client = client_class(transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) @pytest.mark.parametrize("client_class", [AgentsClient, AgentsAsyncClient]) @mock.patch.object( AgentsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AgentsClient) ) @mock.patch.object( AgentsAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AgentsAsyncClient) ) def test_agents_client_get_mtls_endpoint_and_cert_source(client_class): mock_client_cert_source = mock.Mock() # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): mock_api_endpoint = "foo" options = client_options.ClientOptions( client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint ) api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( options ) assert api_endpoint == mock_api_endpoint assert cert_source == mock_client_cert_source # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): mock_client_cert_source = mock.Mock() mock_api_endpoint = "foo" options = client_options.ClientOptions( client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint ) api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( options ) assert api_endpoint == mock_api_endpoint assert cert_source is None # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() assert api_endpoint == client_class.DEFAULT_ENDPOINT assert cert_source is None # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source is None # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): with mock.patch( "google.auth.transport.mtls.has_default_client_cert_source", return_value=False, ): api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() assert api_endpoint == client_class.DEFAULT_ENDPOINT assert cert_source is None # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): with mock.patch( "google.auth.transport.mtls.has_default_client_cert_source", return_value=True, ): with mock.patch( "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): ( api_endpoint, cert_source, ) = client_class.get_mtls_endpoint_and_cert_source() assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source @pytest.mark.parametrize( "client_class,transport_class,transport_name", [ (AgentsClient, transports.AgentsGrpcTransport, "grpc"), (AgentsAsyncClient, transports.AgentsGrpcAsyncIOTransport, "grpc_asyncio"), ], ) def test_agents_client_client_options_scopes( client_class, transport_class, transport_name ): # Check the case scopes are provided. options = client_options.ClientOptions(scopes=["1", "2"],) with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(client_options=options, transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=["1", "2"], client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) @pytest.mark.parametrize( "client_class,transport_class,transport_name,grpc_helpers", [ (AgentsClient, transports.AgentsGrpcTransport, "grpc", grpc_helpers), ( AgentsAsyncClient, transports.AgentsGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async, ), ], ) def test_agents_client_client_options_credentials_file( client_class, transport_class, transport_name, grpc_helpers ): # Check the case credentials file is provided. options = client_options.ClientOptions(credentials_file="credentials.json") with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(client_options=options, transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file="credentials.json", host=client.DEFAULT_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) def test_agents_client_client_options_from_dict(): with mock.patch( "google.cloud.dialogflowcx_v3.services.agents.transports.AgentsGrpcTransport.__init__" ) as grpc_transport: grpc_transport.return_value = None client = AgentsClient(client_options={"api_endpoint": "squid.clam.whelk"}) grpc_transport.assert_called_once_with( credentials=None, credentials_file=None, host="squid.clam.whelk", scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) @pytest.mark.parametrize( "client_class,transport_class,transport_name,grpc_helpers", [ (AgentsClient, transports.AgentsGrpcTransport, "grpc", grpc_helpers), ( AgentsAsyncClient, transports.AgentsGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async, ), ], ) def test_agents_client_create_channel_credentials_file( client_class, transport_class, transport_name, grpc_helpers ): # Check the case credentials file is provided. options = client_options.ClientOptions(credentials_file="credentials.json") with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(client_options=options, transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file="credentials.json", host=client.DEFAULT_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) # test that the credentials from file are saved and used as the credentials. with mock.patch.object( google.auth, "load_credentials_from_file", autospec=True ) as load_creds, mock.patch.object( google.auth, "default", autospec=True ) as adc, mock.patch.object( grpc_helpers, "create_channel" ) as create_channel: creds = ga_credentials.AnonymousCredentials() file_creds = ga_credentials.AnonymousCredentials() load_creds.return_value = (file_creds, None) adc.return_value = (creds, None) client = client_class(client_options=options, transport=transport_name) create_channel.assert_called_with( "dialogflow.googleapis.com:443", credentials=file_creds, credentials_file=None, quota_project_id=None, default_scopes=( "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/dialogflow", ), scopes=None, default_host="dialogflow.googleapis.com", ssl_credentials=None, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) @pytest.mark.parametrize("request_type", [agent.ListAgentsRequest, dict,]) def test_list_agents(request_type, transport: str = "grpc"): client = AgentsClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_agents), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = agent.ListAgentsResponse( next_page_token="next_page_token_value", ) response = client.list_agents(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == agent.ListAgentsRequest() # Establish that the response is the type that we expect. assert isinstance(response, pagers.ListAgentsPager) assert response.next_page_token == "next_page_token_value" def test_list_agents_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = AgentsClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_agents), "__call__") as call: client.list_agents() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == agent.ListAgentsRequest() @pytest.mark.asyncio async def test_list_agents_async( transport: str = "grpc_asyncio", request_type=agent.ListAgentsRequest ): client = AgentsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_agents), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( agent.ListAgentsResponse(next_page_token="next_page_token_value",) ) response = await client.list_agents(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == agent.ListAgentsRequest() # Establish that the response is the type that we expect. assert isinstance(response, pagers.ListAgentsAsyncPager) assert response.next_page_token == "next_page_token_value" @pytest.mark.asyncio async def test_list_agents_async_from_dict(): await test_list_agents_async(request_type=dict) def test_list_agents_field_headers(): client = AgentsClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = agent.ListAgentsRequest() request.parent = "parent/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_agents), "__call__") as call: call.return_value = agent.ListAgentsResponse() client.list_agents(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] @pytest.mark.asyncio async def test_list_agents_field_headers_async(): client = AgentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = agent.ListAgentsRequest() request.parent = "parent/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_agents), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( agent.ListAgentsResponse() ) await client.list_agents(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] def test_list_agents_flattened(): client = AgentsClient(credentials=ga_credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_agents), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = agent.ListAgentsResponse() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.list_agents(parent="parent_value",) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] arg = args[0].parent mock_val = "parent_value" assert arg == mock_val def test_list_agents_flattened_error(): client = AgentsClient(credentials=ga_credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.list_agents( agent.ListAgentsRequest(), parent="parent_value", ) @pytest.mark.asyncio async def test_list_agents_flattened_async(): client = AgentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_agents), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = agent.ListAgentsResponse() call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( agent.ListAgentsResponse() ) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.list_agents(parent="parent_value",) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] arg = args[0].parent mock_val = "parent_value" assert arg == mock_val @pytest.mark.asyncio async def test_list_agents_flattened_error_async(): client = AgentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.list_agents( agent.ListAgentsRequest(), parent="parent_value", ) def test_list_agents_pager(transport_name: str = "grpc"): client = AgentsClient( credentials=ga_credentials.AnonymousCredentials, transport=transport_name, ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_agents), "__call__") as call: # Set the response to a series of pages. call.side_effect = ( agent.ListAgentsResponse( agents=[agent.Agent(), agent.Agent(), agent.Agent(),], next_page_token="abc", ), agent.ListAgentsResponse(agents=[], next_page_token="def",), agent.ListAgentsResponse(agents=[agent.Agent(),], next_page_token="ghi",), agent.ListAgentsResponse(agents=[agent.Agent(), agent.Agent(),],), RuntimeError, ) metadata = () metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_agents(request={}) assert pager._metadata == metadata results = [i for i in pager] assert len(results) == 6 assert all(isinstance(i, agent.Agent) for i in results) def test_list_agents_pages(transport_name: str = "grpc"): client = AgentsClient( credentials=ga_credentials.AnonymousCredentials, transport=transport_name, ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_agents), "__call__") as call: # Set the response to a series of pages. call.side_effect = ( agent.ListAgentsResponse( agents=[agent.Agent(), agent.Agent(), agent.Agent(),], next_page_token="abc", ), agent.ListAgentsResponse(agents=[], next_page_token="def",), agent.ListAgentsResponse(agents=[agent.Agent(),], next_page_token="ghi",), agent.ListAgentsResponse(agents=[agent.Agent(), agent.Agent(),],), RuntimeError, ) pages = list(client.list_agents(request={}).pages) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @pytest.mark.asyncio async def test_list_agents_async_pager(): client = AgentsAsyncClient(credentials=ga_credentials.AnonymousCredentials,) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.list_agents), "__call__", new_callable=mock.AsyncMock ) as call: # Set the response to a series of pages. call.side_effect = ( agent.ListAgentsResponse( agents=[agent.Agent(), agent.Agent(), agent.Agent(),], next_page_token="abc", ), agent.ListAgentsResponse(agents=[], next_page_token="def",), agent.ListAgentsResponse(agents=[agent.Agent(),], next_page_token="ghi",), agent.ListAgentsResponse(agents=[agent.Agent(), agent.Agent(),],), RuntimeError, ) async_pager = await client.list_agents(request={},) assert async_pager.next_page_token == "abc" responses = [] async for response in async_pager: responses.append(response) assert len(responses) == 6 assert all(isinstance(i, agent.Agent) for i in responses) @pytest.mark.asyncio async def test_list_agents_async_pages(): client = AgentsAsyncClient(credentials=ga_credentials.AnonymousCredentials,) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.list_agents), "__call__", new_callable=mock.AsyncMock ) as call: # Set the response to a series of pages. call.side_effect = ( agent.ListAgentsResponse( agents=[agent.Agent(), agent.Agent(), agent.Agent(),], next_page_token="abc", ), agent.ListAgentsResponse(agents=[], next_page_token="def",), agent.ListAgentsResponse(agents=[agent.Agent(),], next_page_token="ghi",), agent.ListAgentsResponse(agents=[agent.Agent(), agent.Agent(),],), RuntimeError, ) pages = [] async for page_ in (await client.list_agents(request={})).pages: pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @pytest.mark.parametrize("request_type", [agent.GetAgentRequest, dict,]) def test_get_agent(request_type, transport: str = "grpc"): client = AgentsClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_agent), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = agent.Agent( name="name_value", display_name="display_name_value", default_language_code="default_language_code_value", supported_language_codes=["supported_language_codes_value"], time_zone="time_zone_value", description="description_value", avatar_uri="avatar_uri_value", start_flow="start_flow_value", security_settings="security_settings_value", enable_stackdriver_logging=True, enable_spell_correction=True, ) response = client.get_agent(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == agent.GetAgentRequest() # Establish that the response is the type that we expect. assert isinstance(response, agent.Agent) assert response.name == "name_value" assert response.display_name == "display_name_value" assert response.default_language_code == "default_language_code_value" assert response.supported_language_codes == ["supported_language_codes_value"] assert response.time_zone == "time_zone_value" assert response.description == "description_value" assert response.avatar_uri == "avatar_uri_value" assert response.start_flow == "start_flow_value" assert response.security_settings == "security_settings_value" assert response.enable_stackdriver_logging is True assert response.enable_spell_correction is True def test_get_agent_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = AgentsClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_agent), "__call__") as call: client.get_agent() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == agent.GetAgentRequest() @pytest.mark.asyncio async def test_get_agent_async( transport: str = "grpc_asyncio", request_type=agent.GetAgentRequest ): client = AgentsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_agent), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( agent.Agent( name="name_value", display_name="display_name_value", default_language_code="default_language_code_value", supported_language_codes=["supported_language_codes_value"], time_zone="time_zone_value", description="description_value", avatar_uri="avatar_uri_value", start_flow="start_flow_value", security_settings="security_settings_value", enable_stackdriver_logging=True, enable_spell_correction=True, ) ) response = await client.get_agent(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == agent.GetAgentRequest() # Establish that the response is the type that we expect. assert isinstance(response, agent.Agent) assert response.name == "name_value" assert response.display_name == "display_name_value" assert response.default_language_code == "default_language_code_value" assert response.supported_language_codes == ["supported_language_codes_value"] assert response.time_zone == "time_zone_value" assert response.description == "description_value" assert response.avatar_uri == "avatar_uri_value" assert response.start_flow == "start_flow_value" assert response.security_settings == "security_settings_value" assert response.enable_stackdriver_logging is True assert response.enable_spell_correction is True @pytest.mark.asyncio async def test_get_agent_async_from_dict(): await test_get_agent_async(request_type=dict) def test_get_agent_field_headers(): client = AgentsClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = agent.GetAgentRequest() request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_agent), "__call__") as call: call.return_value = agent.Agent() client.get_agent(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] @pytest.mark.asyncio async def test_get_agent_field_headers_async(): client = AgentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = agent.GetAgentRequest() request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_agent), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(agent.Agent()) await client.get_agent(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] def test_get_agent_flattened(): client = AgentsClient(credentials=ga_credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_agent), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = agent.Agent() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.get_agent(name="name_value",) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] arg = args[0].name mock_val = "name_value" assert arg == mock_val def test_get_agent_flattened_error(): client = AgentsClient(credentials=ga_credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.get_agent( agent.GetAgentRequest(), name="name_value", ) @pytest.mark.asyncio async def test_get_agent_flattened_async(): client = AgentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_agent), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = agent.Agent() call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(agent.Agent()) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.get_agent(name="name_value",) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] arg = args[0].name mock_val = "name_value" assert arg == mock_val @pytest.mark.asyncio async def test_get_agent_flattened_error_async(): client = AgentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.get_agent( agent.GetAgentRequest(), name="name_value", ) @pytest.mark.parametrize("request_type", [gcdc_agent.CreateAgentRequest, dict,]) def test_create_agent(request_type, transport: str = "grpc"): client = AgentsClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_agent), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = gcdc_agent.Agent( name="name_value", display_name="display_name_value", default_language_code="default_language_code_value", supported_language_codes=["supported_language_codes_value"], time_zone="time_zone_value", description="description_value", avatar_uri="avatar_uri_value", start_flow="start_flow_value", security_settings="security_settings_value", enable_stackdriver_logging=True, enable_spell_correction=True, ) response = client.create_agent(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == gcdc_agent.CreateAgentRequest() # Establish that the response is the type that we expect. assert isinstance(response, gcdc_agent.Agent) assert response.name == "name_value" assert response.display_name == "display_name_value" assert response.default_language_code == "default_language_code_value" assert response.supported_language_codes == ["supported_language_codes_value"] assert response.time_zone == "time_zone_value" assert response.description == "description_value" assert response.avatar_uri == "avatar_uri_value" assert response.start_flow == "start_flow_value" assert response.security_settings == "security_settings_value" assert response.enable_stackdriver_logging is True assert response.enable_spell_correction is True def test_create_agent_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = AgentsClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_agent), "__call__") as call: client.create_agent() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == gcdc_agent.CreateAgentRequest() @pytest.mark.asyncio async def test_create_agent_async( transport: str = "grpc_asyncio", request_type=gcdc_agent.CreateAgentRequest ): client = AgentsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_agent), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( gcdc_agent.Agent( name="name_value", display_name="display_name_value", default_language_code="default_language_code_value", supported_language_codes=["supported_language_codes_value"], time_zone="time_zone_value", description="description_value", avatar_uri="avatar_uri_value", start_flow="start_flow_value", security_settings="security_settings_value", enable_stackdriver_logging=True, enable_spell_correction=True, ) ) response = await client.create_agent(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == gcdc_agent.CreateAgentRequest() # Establish that the response is the type that we expect. assert isinstance(response, gcdc_agent.Agent) assert response.name == "name_value" assert response.display_name == "display_name_value" assert response.default_language_code == "default_language_code_value" assert response.supported_language_codes == ["supported_language_codes_value"] assert response.time_zone == "time_zone_value" assert response.description == "description_value" assert response.avatar_uri == "avatar_uri_value" assert response.start_flow == "start_flow_value" assert response.security_settings == "security_settings_value" assert response.enable_stackdriver_logging is True assert response.enable_spell_correction is True @pytest.mark.asyncio async def test_create_agent_async_from_dict(): await test_create_agent_async(request_type=dict) def test_create_agent_field_headers(): client = AgentsClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = gcdc_agent.CreateAgentRequest() request.parent = "parent/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_agent), "__call__") as call: call.return_value = gcdc_agent.Agent() client.create_agent(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] @pytest.mark.asyncio async def test_create_agent_field_headers_async(): client = AgentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = gcdc_agent.CreateAgentRequest() request.parent = "parent/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_agent), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcdc_agent.Agent()) await client.create_agent(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] def test_create_agent_flattened(): client = AgentsClient(credentials=ga_credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_agent), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = gcdc_agent.Agent() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.create_agent( parent="parent_value", agent=gcdc_agent.Agent(name="name_value"), ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] arg = args[0].parent mock_val = "parent_value" assert arg == mock_val arg = args[0].agent mock_val = gcdc_agent.Agent(name="name_value") assert arg == mock_val def test_create_agent_flattened_error(): client = AgentsClient(credentials=ga_credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.create_agent( gcdc_agent.CreateAgentRequest(), parent="parent_value", agent=gcdc_agent.Agent(name="name_value"), ) @pytest.mark.asyncio async def test_create_agent_flattened_async(): client = AgentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_agent), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = gcdc_agent.Agent() call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcdc_agent.Agent()) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.create_agent( parent="parent_value", agent=gcdc_agent.Agent(name="name_value"), ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] arg = args[0].parent mock_val = "parent_value" assert arg == mock_val arg = args[0].agent mock_val = gcdc_agent.Agent(name="name_value") assert arg == mock_val @pytest.mark.asyncio async def test_create_agent_flattened_error_async(): client = AgentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.create_agent( gcdc_agent.CreateAgentRequest(), parent="parent_value", agent=gcdc_agent.Agent(name="name_value"), ) @pytest.mark.parametrize("request_type", [gcdc_agent.UpdateAgentRequest, dict,]) def test_update_agent(request_type, transport: str = "grpc"): client = AgentsClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.update_agent), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = gcdc_agent.Agent( name="name_value", display_name="display_name_value", default_language_code="default_language_code_value", supported_language_codes=["supported_language_codes_value"], time_zone="time_zone_value", description="description_value", avatar_uri="avatar_uri_value", start_flow="start_flow_value", security_settings="security_settings_value", enable_stackdriver_logging=True, enable_spell_correction=True, ) response = client.update_agent(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == gcdc_agent.UpdateAgentRequest() # Establish that the response is the type that we expect. assert isinstance(response, gcdc_agent.Agent) assert response.name == "name_value" assert response.display_name == "display_name_value" assert response.default_language_code == "default_language_code_value" assert response.supported_language_codes == ["supported_language_codes_value"] assert response.time_zone == "time_zone_value" assert response.description == "description_value" assert response.avatar_uri == "avatar_uri_value" assert response.start_flow == "start_flow_value" assert response.security_settings == "security_settings_value" assert response.enable_stackdriver_logging is True assert response.enable_spell_correction is True def test_update_agent_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = AgentsClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.update_agent), "__call__") as call: client.update_agent() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == gcdc_agent.UpdateAgentRequest() @pytest.mark.asyncio async def test_update_agent_async( transport: str = "grpc_asyncio", request_type=gcdc_agent.UpdateAgentRequest ): client = AgentsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.update_agent), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( gcdc_agent.Agent( name="name_value", display_name="display_name_value", default_language_code="default_language_code_value", supported_language_codes=["supported_language_codes_value"], time_zone="time_zone_value", description="description_value", avatar_uri="avatar_uri_value", start_flow="start_flow_value", security_settings="security_settings_value", enable_stackdriver_logging=True, enable_spell_correction=True, ) ) response = await client.update_agent(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == gcdc_agent.UpdateAgentRequest() # Establish that the response is the type that we expect. assert isinstance(response, gcdc_agent.Agent) assert response.name == "name_value" assert response.display_name == "display_name_value" assert response.default_language_code == "default_language_code_value" assert response.supported_language_codes == ["supported_language_codes_value"] assert response.time_zone == "time_zone_value" assert response.description == "description_value" assert response.avatar_uri == "avatar_uri_value" assert response.start_flow == "start_flow_value" assert response.security_settings == "security_settings_value" assert response.enable_stackdriver_logging is True assert response.enable_spell_correction is True @pytest.mark.asyncio async def test_update_agent_async_from_dict(): await test_update_agent_async(request_type=dict) def test_update_agent_field_headers(): client = AgentsClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = gcdc_agent.UpdateAgentRequest() request.agent.name = "agent.name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.update_agent), "__call__") as call: call.return_value = gcdc_agent.Agent() client.update_agent(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "agent.name=agent.name/value",) in kw["metadata"] @pytest.mark.asyncio async def test_update_agent_field_headers_async(): client = AgentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = gcdc_agent.UpdateAgentRequest() request.agent.name = "agent.name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.update_agent), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcdc_agent.Agent()) await client.update_agent(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "agent.name=agent.name/value",) in kw["metadata"] def test_update_agent_flattened(): client = AgentsClient(credentials=ga_credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.update_agent), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = gcdc_agent.Agent() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.update_agent( agent=gcdc_agent.Agent(name="name_value"), update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] arg = args[0].agent mock_val = gcdc_agent.Agent(name="name_value") assert arg == mock_val arg = args[0].update_mask mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) assert arg == mock_val def test_update_agent_flattened_error(): client = AgentsClient(credentials=ga_credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.update_agent( gcdc_agent.UpdateAgentRequest(), agent=gcdc_agent.Agent(name="name_value"), update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) @pytest.mark.asyncio async def test_update_agent_flattened_async(): client = AgentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.update_agent), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = gcdc_agent.Agent() call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcdc_agent.Agent()) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.update_agent( agent=gcdc_agent.Agent(name="name_value"), update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] arg = args[0].agent mock_val = gcdc_agent.Agent(name="name_value") assert arg == mock_val arg = args[0].update_mask mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) assert arg == mock_val @pytest.mark.asyncio async def test_update_agent_flattened_error_async(): client = AgentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.update_agent( gcdc_agent.UpdateAgentRequest(), agent=gcdc_agent.Agent(name="name_value"), update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) @pytest.mark.parametrize("request_type", [agent.DeleteAgentRequest, dict,]) def test_delete_agent(request_type, transport: str = "grpc"): client = AgentsClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_agent), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = None response = client.delete_agent(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == agent.DeleteAgentRequest() # Establish that the response is the type that we expect. assert response is None def test_delete_agent_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = AgentsClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_agent), "__call__") as call: client.delete_agent() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == agent.DeleteAgentRequest() @pytest.mark.asyncio async def test_delete_agent_async( transport: str = "grpc_asyncio", request_type=agent.DeleteAgentRequest ): client = AgentsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_agent), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) response = await client.delete_agent(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == agent.DeleteAgentRequest() # Establish that the response is the type that we expect. assert response is None @pytest.mark.asyncio async def test_delete_agent_async_from_dict(): await test_delete_agent_async(request_type=dict) def test_delete_agent_field_headers(): client = AgentsClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = agent.DeleteAgentRequest() request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_agent), "__call__") as call: call.return_value = None client.delete_agent(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] @pytest.mark.asyncio async def test_delete_agent_field_headers_async(): client = AgentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = agent.DeleteAgentRequest() request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_agent), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) await client.delete_agent(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] def test_delete_agent_flattened(): client = AgentsClient(credentials=ga_credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_agent), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = None # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.delete_agent(name="name_value",) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] arg = args[0].name mock_val = "name_value" assert arg == mock_val def test_delete_agent_flattened_error(): client = AgentsClient(credentials=ga_credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.delete_agent( agent.DeleteAgentRequest(), name="name_value", ) @pytest.mark.asyncio async def test_delete_agent_flattened_async(): client = AgentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_agent), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = None call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.delete_agent(name="name_value",) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] arg = args[0].name mock_val = "name_value" assert arg == mock_val @pytest.mark.asyncio async def test_delete_agent_flattened_error_async(): client = AgentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.delete_agent( agent.DeleteAgentRequest(), name="name_value", ) @pytest.mark.parametrize("request_type", [agent.ExportAgentRequest, dict,]) def test_export_agent(request_type, transport: str = "grpc"): client = AgentsClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.export_agent), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name="operations/spam") response = client.export_agent(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == agent.ExportAgentRequest() # Establish that the response is the type that we expect. assert isinstance(response, future.Future) def test_export_agent_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = AgentsClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.export_agent), "__call__") as call: client.export_agent() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == agent.ExportAgentRequest() @pytest.mark.asyncio async def test_export_agent_async( transport: str = "grpc_asyncio", request_type=agent.ExportAgentRequest ): client = AgentsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.export_agent), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/spam") ) response = await client.export_agent(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == agent.ExportAgentRequest() # Establish that the response is the type that we expect. assert isinstance(response, future.Future) @pytest.mark.asyncio async def test_export_agent_async_from_dict(): await test_export_agent_async(request_type=dict) def test_export_agent_field_headers(): client = AgentsClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = agent.ExportAgentRequest() request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.export_agent), "__call__") as call: call.return_value = operations_pb2.Operation(name="operations/op") client.export_agent(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] @pytest.mark.asyncio async def test_export_agent_field_headers_async(): client = AgentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = agent.ExportAgentRequest() request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.export_agent), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/op") ) await client.export_agent(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] @pytest.mark.parametrize("request_type", [agent.RestoreAgentRequest, dict,]) def test_restore_agent(request_type, transport: str = "grpc"): client = AgentsClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.restore_agent), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name="operations/spam") response = client.restore_agent(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == agent.RestoreAgentRequest() # Establish that the response is the type that we expect. assert isinstance(response, future.Future) def test_restore_agent_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = AgentsClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.restore_agent), "__call__") as call: client.restore_agent() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == agent.RestoreAgentRequest() @pytest.mark.asyncio async def test_restore_agent_async( transport: str = "grpc_asyncio", request_type=agent.RestoreAgentRequest ): client = AgentsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.restore_agent), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/spam") ) response = await client.restore_agent(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == agent.RestoreAgentRequest() # Establish that the response is the type that we expect. assert isinstance(response, future.Future) @pytest.mark.asyncio async def test_restore_agent_async_from_dict(): await test_restore_agent_async(request_type=dict) def test_restore_agent_field_headers(): client = AgentsClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = agent.RestoreAgentRequest() request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.restore_agent), "__call__") as call: call.return_value = operations_pb2.Operation(name="operations/op") client.restore_agent(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] @pytest.mark.asyncio async def test_restore_agent_field_headers_async(): client = AgentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = agent.RestoreAgentRequest() request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.restore_agent), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/op") ) await client.restore_agent(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] @pytest.mark.parametrize("request_type", [agent.ValidateAgentRequest, dict,]) def test_validate_agent(request_type, transport: str = "grpc"): client = AgentsClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.validate_agent), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = agent.AgentValidationResult(name="name_value",) response = client.validate_agent(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == agent.ValidateAgentRequest() # Establish that the response is the type that we expect. assert isinstance(response, agent.AgentValidationResult) assert response.name == "name_value" def test_validate_agent_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = AgentsClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.validate_agent), "__call__") as call: client.validate_agent() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == agent.ValidateAgentRequest() @pytest.mark.asyncio async def test_validate_agent_async( transport: str = "grpc_asyncio", request_type=agent.ValidateAgentRequest ): client = AgentsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.validate_agent), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( agent.AgentValidationResult(name="name_value",) ) response = await client.validate_agent(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == agent.ValidateAgentRequest() # Establish that the response is the type that we expect. assert isinstance(response, agent.AgentValidationResult) assert response.name == "name_value" @pytest.mark.asyncio async def test_validate_agent_async_from_dict(): await test_validate_agent_async(request_type=dict) def test_validate_agent_field_headers(): client = AgentsClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = agent.ValidateAgentRequest() request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.validate_agent), "__call__") as call: call.return_value = agent.AgentValidationResult() client.validate_agent(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] @pytest.mark.asyncio async def test_validate_agent_field_headers_async(): client = AgentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = agent.ValidateAgentRequest() request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.validate_agent), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( agent.AgentValidationResult() ) await client.validate_agent(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] @pytest.mark.parametrize("request_type", [agent.GetAgentValidationResultRequest, dict,]) def test_get_agent_validation_result(request_type, transport: str = "grpc"): client = AgentsClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.get_agent_validation_result), "__call__" ) as call: # Designate an appropriate return value for the call. call.return_value = agent.AgentValidationResult(name="name_value",) response = client.get_agent_validation_result(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == agent.GetAgentValidationResultRequest() # Establish that the response is the type that we expect. assert isinstance(response, agent.AgentValidationResult) assert response.name == "name_value" def test_get_agent_validation_result_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = AgentsClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.get_agent_validation_result), "__call__" ) as call: client.get_agent_validation_result() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == agent.GetAgentValidationResultRequest() @pytest.mark.asyncio async def test_get_agent_validation_result_async( transport: str = "grpc_asyncio", request_type=agent.GetAgentValidationResultRequest ): client = AgentsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.get_agent_validation_result), "__call__" ) as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( agent.AgentValidationResult(name="name_value",) ) response = await client.get_agent_validation_result(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == agent.GetAgentValidationResultRequest() # Establish that the response is the type that we expect. assert isinstance(response, agent.AgentValidationResult) assert response.name == "name_value" @pytest.mark.asyncio async def test_get_agent_validation_result_async_from_dict(): await test_get_agent_validation_result_async(request_type=dict) def test_get_agent_validation_result_field_headers(): client = AgentsClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = agent.GetAgentValidationResultRequest() request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.get_agent_validation_result), "__call__" ) as call: call.return_value = agent.AgentValidationResult() client.get_agent_validation_result(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] @pytest.mark.asyncio async def test_get_agent_validation_result_field_headers_async(): client = AgentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = agent.GetAgentValidationResultRequest() request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.get_agent_validation_result), "__call__" ) as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( agent.AgentValidationResult() ) await client.get_agent_validation_result(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] def test_get_agent_validation_result_flattened(): client = AgentsClient(credentials=ga_credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.get_agent_validation_result), "__call__" ) as call: # Designate an appropriate return value for the call. call.return_value = agent.AgentValidationResult() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.get_agent_validation_result(name="name_value",) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] arg = args[0].name mock_val = "name_value" assert arg == mock_val def test_get_agent_validation_result_flattened_error(): client = AgentsClient(credentials=ga_credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.get_agent_validation_result( agent.GetAgentValidationResultRequest(), name="name_value", ) @pytest.mark.asyncio async def test_get_agent_validation_result_flattened_async(): client = AgentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.get_agent_validation_result), "__call__" ) as call: # Designate an appropriate return value for the call. call.return_value = agent.AgentValidationResult() call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( agent.AgentValidationResult() ) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.get_agent_validation_result(name="name_value",) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] arg = args[0].name mock_val = "name_value" assert arg == mock_val @pytest.mark.asyncio async def test_get_agent_validation_result_flattened_error_async(): client = AgentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.get_agent_validation_result( agent.GetAgentValidationResultRequest(), name="name_value", ) def test_credentials_transport_error(): # It is an error to provide credentials and a transport instance. transport = transports.AgentsGrpcTransport( credentials=ga_credentials.AnonymousCredentials(), ) with pytest.raises(ValueError): client = AgentsClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # It is an error to provide a credentials file and a transport instance. transport = transports.AgentsGrpcTransport( credentials=ga_credentials.AnonymousCredentials(), ) with pytest.raises(ValueError): client = AgentsClient( client_options={"credentials_file": "credentials.json"}, transport=transport, ) # It is an error to provide an api_key and a transport instance. transport = transports.AgentsGrpcTransport( credentials=ga_credentials.AnonymousCredentials(), ) options = client_options.ClientOptions() options.api_key = "api_key" with pytest.raises(ValueError): client = AgentsClient(client_options=options, transport=transport,) # It is an error to provide an api_key and a credential. options = mock.Mock() options.api_key = "api_key" with pytest.raises(ValueError): client = AgentsClient( client_options=options, credentials=ga_credentials.AnonymousCredentials() ) # It is an error to provide scopes and a transport instance. transport = transports.AgentsGrpcTransport( credentials=ga_credentials.AnonymousCredentials(), ) with pytest.raises(ValueError): client = AgentsClient( client_options={"scopes": ["1", "2"]}, transport=transport, ) def test_transport_instance(): # A client may be instantiated with a custom transport instance. transport = transports.AgentsGrpcTransport( credentials=ga_credentials.AnonymousCredentials(), ) client = AgentsClient(transport=transport) assert client.transport is transport def test_transport_get_channel(): # A client may be instantiated with a custom transport instance. transport = transports.AgentsGrpcTransport( credentials=ga_credentials.AnonymousCredentials(), ) channel = transport.grpc_channel assert channel transport = transports.AgentsGrpcAsyncIOTransport( credentials=ga_credentials.AnonymousCredentials(), ) channel = transport.grpc_channel assert channel @pytest.mark.parametrize( "transport_class", [transports.AgentsGrpcTransport, transports.AgentsGrpcAsyncIOTransport,], ) def test_transport_adc(transport_class): # Test default credentials are used if not provided. with mock.patch.object(google.auth, "default") as adc: adc.return_value = (ga_credentials.AnonymousCredentials(), None) transport_class() adc.assert_called_once() def test_transport_grpc_default(): # A client should use the gRPC transport by default. client = AgentsClient(credentials=ga_credentials.AnonymousCredentials(),) assert isinstance(client.transport, transports.AgentsGrpcTransport,) def test_agents_base_transport_error(): # Passing both a credentials object and credentials_file should raise an error with pytest.raises(core_exceptions.DuplicateCredentialArgs): transport = transports.AgentsTransport( credentials=ga_credentials.AnonymousCredentials(), credentials_file="credentials.json", ) def test_agents_base_transport(): # Instantiate the base transport. with mock.patch( "google.cloud.dialogflowcx_v3.services.agents.transports.AgentsTransport.__init__" ) as Transport: Transport.return_value = None transport = transports.AgentsTransport( credentials=ga_credentials.AnonymousCredentials(), ) # Every method on the transport should just blindly # raise NotImplementedError. methods = ( "list_agents", "get_agent", "create_agent", "update_agent", "delete_agent", "export_agent", "restore_agent", "validate_agent", "get_agent_validation_result", ) for method in methods: with pytest.raises(NotImplementedError): getattr(transport, method)(request=object()) with pytest.raises(NotImplementedError): transport.close() # Additionally, the LRO client (a property) should # also raise NotImplementedError with pytest.raises(NotImplementedError): transport.operations_client def test_agents_base_transport_with_credentials_file(): # Instantiate the base transport with a credentials file with mock.patch.object( google.auth, "load_credentials_from_file", autospec=True ) as load_creds, mock.patch( "google.cloud.dialogflowcx_v3.services.agents.transports.AgentsTransport._prep_wrapped_messages" ) as Transport: Transport.return_value = None load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) transport = transports.AgentsTransport( credentials_file="credentials.json", quota_project_id="octopus", ) load_creds.assert_called_once_with( "credentials.json", scopes=None, default_scopes=( "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/dialogflow", ), quota_project_id="octopus", ) def test_agents_base_transport_with_adc(): # Test the default credentials are used if credentials and credentials_file are None. with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch( "google.cloud.dialogflowcx_v3.services.agents.transports.AgentsTransport._prep_wrapped_messages" ) as Transport: Transport.return_value = None adc.return_value = (ga_credentials.AnonymousCredentials(), None) transport = transports.AgentsTransport() adc.assert_called_once() def test_agents_auth_adc(): # If no credentials are provided, we should use ADC credentials. with mock.patch.object(google.auth, "default", autospec=True) as adc: adc.return_value = (ga_credentials.AnonymousCredentials(), None) AgentsClient() adc.assert_called_once_with( scopes=None, default_scopes=( "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/dialogflow", ), quota_project_id=None, ) @pytest.mark.parametrize( "transport_class", [transports.AgentsGrpcTransport, transports.AgentsGrpcAsyncIOTransport,], ) def test_agents_transport_auth_adc(transport_class): # If credentials and host are not provided, the transport class should use # ADC credentials. with mock.patch.object(google.auth, "default", autospec=True) as adc: adc.return_value = (ga_credentials.AnonymousCredentials(), None) transport_class(quota_project_id="octopus", scopes=["1", "2"]) adc.assert_called_once_with( scopes=["1", "2"], default_scopes=( "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/dialogflow", ), quota_project_id="octopus", ) @pytest.mark.parametrize( "transport_class,grpc_helpers", [ (transports.AgentsGrpcTransport, grpc_helpers), (transports.AgentsGrpcAsyncIOTransport, grpc_helpers_async), ], ) def test_agents_transport_create_channel(transport_class, grpc_helpers): # If credentials and host are not provided, the transport class should use # ADC credentials. with mock.patch.object( google.auth, "default", autospec=True ) as adc, mock.patch.object( grpc_helpers, "create_channel", autospec=True ) as create_channel: creds = ga_credentials.AnonymousCredentials() adc.return_value = (creds, None) transport_class(quota_project_id="octopus", scopes=["1", "2"]) create_channel.assert_called_with( "dialogflow.googleapis.com:443", credentials=creds, credentials_file=None, quota_project_id="octopus", default_scopes=( "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/dialogflow", ), scopes=["1", "2"], default_host="dialogflow.googleapis.com", ssl_credentials=None, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) @pytest.mark.parametrize( "transport_class", [transports.AgentsGrpcTransport, transports.AgentsGrpcAsyncIOTransport], ) def test_agents_grpc_transport_client_cert_source_for_mtls(transport_class): cred = ga_credentials.AnonymousCredentials() # Check ssl_channel_credentials is used if provided. with mock.patch.object(transport_class, "create_channel") as mock_create_channel: mock_ssl_channel_creds = mock.Mock() transport_class( host="squid.clam.whelk", credentials=cred, ssl_channel_credentials=mock_ssl_channel_creds, ) mock_create_channel.assert_called_once_with( "squid.clam.whelk:443", credentials=cred, credentials_file=None, scopes=None, ssl_credentials=mock_ssl_channel_creds, quota_project_id=None, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls # is used. with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: transport_class( credentials=cred, client_cert_source_for_mtls=client_cert_source_callback, ) expected_cert, expected_key = client_cert_source_callback() mock_ssl_cred.assert_called_once_with( certificate_chain=expected_cert, private_key=expected_key ) def test_agents_host_no_port(): client = AgentsClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="dialogflow.googleapis.com" ), ) assert client.transport._host == "dialogflow.googleapis.com:443" def test_agents_host_with_port(): client = AgentsClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="dialogflow.googleapis.com:8000" ), ) assert client.transport._host == "dialogflow.googleapis.com:8000" def test_agents_grpc_transport_channel(): channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials()) # Check that channel is used if provided. transport = transports.AgentsGrpcTransport( host="squid.clam.whelk", channel=channel, ) assert transport.grpc_channel == channel assert transport._host == "squid.clam.whelk:443" assert transport._ssl_channel_credentials == None def test_agents_grpc_asyncio_transport_channel(): channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials()) # Check that channel is used if provided. transport = transports.AgentsGrpcAsyncIOTransport( host="squid.clam.whelk", channel=channel, ) assert transport.grpc_channel == channel assert transport._host == "squid.clam.whelk:443" assert transport._ssl_channel_credentials == None # Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are # removed from grpc/grpc_asyncio transport constructor. @pytest.mark.parametrize( "transport_class", [transports.AgentsGrpcTransport, transports.AgentsGrpcAsyncIOTransport], ) def test_agents_transport_channel_mtls_with_client_cert_source(transport_class): with mock.patch( "grpc.ssl_channel_credentials", autospec=True ) as grpc_ssl_channel_cred: with mock.patch.object( transport_class, "create_channel" ) as grpc_create_channel: mock_ssl_cred = mock.Mock() grpc_ssl_channel_cred.return_value = mock_ssl_cred mock_grpc_channel = mock.Mock() grpc_create_channel.return_value = mock_grpc_channel cred = ga_credentials.AnonymousCredentials() with pytest.warns(DeprecationWarning): with mock.patch.object(google.auth, "default") as adc: adc.return_value = (cred, None) transport = transport_class( host="squid.clam.whelk", api_mtls_endpoint="mtls.squid.clam.whelk", client_cert_source=client_cert_source_callback, ) adc.assert_called_once() grpc_ssl_channel_cred.assert_called_once_with( certificate_chain=b"cert bytes", private_key=b"key bytes" ) grpc_create_channel.assert_called_once_with( "mtls.squid.clam.whelk:443", credentials=cred, credentials_file=None, scopes=None, ssl_credentials=mock_ssl_cred, quota_project_id=None, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) assert transport.grpc_channel == mock_grpc_channel assert transport._ssl_channel_credentials == mock_ssl_cred # Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are # removed from grpc/grpc_asyncio transport constructor. @pytest.mark.parametrize( "transport_class", [transports.AgentsGrpcTransport, transports.AgentsGrpcAsyncIOTransport], ) def test_agents_transport_channel_mtls_with_adc(transport_class): mock_ssl_cred = mock.Mock() with mock.patch.multiple( "google.auth.transport.grpc.SslCredentials", __init__=mock.Mock(return_value=None), ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), ): with mock.patch.object( transport_class, "create_channel" ) as grpc_create_channel: mock_grpc_channel = mock.Mock() grpc_create_channel.return_value = mock_grpc_channel mock_cred = mock.Mock() with pytest.warns(DeprecationWarning): transport = transport_class( host="squid.clam.whelk", credentials=mock_cred, api_mtls_endpoint="mtls.squid.clam.whelk", client_cert_source=None, ) grpc_create_channel.assert_called_once_with( "mtls.squid.clam.whelk:443", credentials=mock_cred, credentials_file=None, scopes=None, ssl_credentials=mock_ssl_cred, quota_project_id=None, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) assert transport.grpc_channel == mock_grpc_channel def test_agents_grpc_lro_client(): client = AgentsClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) transport = client.transport # Ensure that we have a api-core operations client. assert isinstance(transport.operations_client, operations_v1.OperationsClient,) # Ensure that subsequent calls to the property send the exact same object. assert transport.operations_client is transport.operations_client def test_agents_grpc_lro_async_client(): client = AgentsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio", ) transport = client.transport # Ensure that we have a api-core operations client. assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,) # Ensure that subsequent calls to the property send the exact same object. assert transport.operations_client is transport.operations_client def test_agent_path(): project = "squid" location = "clam" agent = "whelk" expected = "projects/{project}/locations/{location}/agents/{agent}".format( project=project, location=location, agent=agent, ) actual = AgentsClient.agent_path(project, location, agent) assert expected == actual def test_parse_agent_path(): expected = { "project": "octopus", "location": "oyster", "agent": "nudibranch", } path = AgentsClient.agent_path(**expected) # Check that the path construction is reversible. actual = AgentsClient.parse_agent_path(path) assert expected == actual def test_agent_validation_result_path(): project = "cuttlefish" location = "mussel" agent = "winkle" expected = "projects/{project}/locations/{location}/agents/{agent}/validationResult".format( project=project, location=location, agent=agent, ) actual = AgentsClient.agent_validation_result_path(project, location, agent) assert expected == actual def test_parse_agent_validation_result_path(): expected = { "project": "nautilus", "location": "scallop", "agent": "abalone", } path = AgentsClient.agent_validation_result_path(**expected) # Check that the path construction is reversible. actual = AgentsClient.parse_agent_validation_result_path(path) assert expected == actual def test_environment_path(): project = "squid" location = "clam" agent = "whelk" environment = "octopus" expected = "projects/{project}/locations/{location}/agents/{agent}/environments/{environment}".format( project=project, location=location, agent=agent, environment=environment, ) actual = AgentsClient.environment_path(project, location, agent, environment) assert expected == actual def test_parse_environment_path(): expected = { "project": "oyster", "location": "nudibranch", "agent": "cuttlefish", "environment": "mussel", } path = AgentsClient.environment_path(**expected) # Check that the path construction is reversible. actual = AgentsClient.parse_environment_path(path) assert expected == actual def test_flow_path(): project = "winkle" location = "nautilus" agent = "scallop" flow = "abalone" expected = "projects/{project}/locations/{location}/agents/{agent}/flows/{flow}".format( project=project, location=location, agent=agent, flow=flow, ) actual = AgentsClient.flow_path(project, location, agent, flow) assert expected == actual def test_parse_flow_path(): expected = { "project": "squid", "location": "clam", "agent": "whelk", "flow": "octopus", } path = AgentsClient.flow_path(**expected) # Check that the path construction is reversible. actual = AgentsClient.parse_flow_path(path) assert expected == actual def test_flow_validation_result_path(): project = "oyster" location = "nudibranch" agent = "cuttlefish" flow = "mussel" expected = "projects/{project}/locations/{location}/agents/{agent}/flows/{flow}/validationResult".format( project=project, location=location, agent=agent, flow=flow, ) actual = AgentsClient.flow_validation_result_path(project, location, agent, flow) assert expected == actual def test_parse_flow_validation_result_path(): expected = { "project": "winkle", "location": "nautilus", "agent": "scallop", "flow": "abalone", } path = AgentsClient.flow_validation_result_path(**expected) # Check that the path construction is reversible. actual = AgentsClient.parse_flow_validation_result_path(path) assert expected == actual def test_security_settings_path(): project = "squid" location = "clam" security_settings = "whelk" expected = "projects/{project}/locations/{location}/securitySettings/{security_settings}".format( project=project, location=location, security_settings=security_settings, ) actual = AgentsClient.security_settings_path(project, location, security_settings) assert expected == actual def test_parse_security_settings_path(): expected = { "project": "octopus", "location": "oyster", "security_settings": "nudibranch", } path = AgentsClient.security_settings_path(**expected) # Check that the path construction is reversible. actual = AgentsClient.parse_security_settings_path(path) assert expected == actual def test_common_billing_account_path(): billing_account = "cuttlefish" expected = "billingAccounts/{billing_account}".format( billing_account=billing_account, ) actual = AgentsClient.common_billing_account_path(billing_account) assert expected == actual def test_parse_common_billing_account_path(): expected = { "billing_account": "mussel", } path = AgentsClient.common_billing_account_path(**expected) # Check that the path construction is reversible. actual = AgentsClient.parse_common_billing_account_path(path) assert expected == actual def test_common_folder_path(): folder = "winkle" expected = "folders/{folder}".format(folder=folder,) actual = AgentsClient.common_folder_path(folder) assert expected == actual def test_parse_common_folder_path(): expected = { "folder": "nautilus", } path = AgentsClient.common_folder_path(**expected) # Check that the path construction is reversible. actual = AgentsClient.parse_common_folder_path(path) assert expected == actual def test_common_organization_path(): organization = "scallop" expected = "organizations/{organization}".format(organization=organization,) actual = AgentsClient.common_organization_path(organization) assert expected == actual def test_parse_common_organization_path(): expected = { "organization": "abalone", } path = AgentsClient.common_organization_path(**expected) # Check that the path construction is reversible. actual = AgentsClient.parse_common_organization_path(path) assert expected == actual def test_common_project_path(): project = "squid" expected = "projects/{project}".format(project=project,) actual = AgentsClient.common_project_path(project) assert expected == actual def test_parse_common_project_path(): expected = { "project": "clam", } path = AgentsClient.common_project_path(**expected) # Check that the path construction is reversible. actual = AgentsClient.parse_common_project_path(path) assert expected == actual def test_common_location_path(): project = "whelk" location = "octopus" expected = "projects/{project}/locations/{location}".format( project=project, location=location, ) actual = AgentsClient.common_location_path(project, location) assert expected == actual def test_parse_common_location_path(): expected = { "project": "oyster", "location": "nudibranch", } path = AgentsClient.common_location_path(**expected) # Check that the path construction is reversible. actual = AgentsClient.parse_common_location_path(path) assert expected == actual def test_client_with_default_client_info(): client_info = gapic_v1.client_info.ClientInfo() with mock.patch.object( transports.AgentsTransport, "_prep_wrapped_messages" ) as prep: client = AgentsClient( credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, ) prep.assert_called_once_with(client_info) with mock.patch.object( transports.AgentsTransport, "_prep_wrapped_messages" ) as prep: transport_class = AgentsClient.get_transport_class() transport = transport_class( credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, ) prep.assert_called_once_with(client_info) @pytest.mark.asyncio async def test_transport_close_async(): client = AgentsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio", ) with mock.patch.object( type(getattr(client.transport, "grpc_channel")), "close" ) as close: async with client: close.assert_not_called() close.assert_called_once() def test_transport_close(): transports = { "grpc": "_grpc_channel", } for transport, close_name in transports.items(): client = AgentsClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport ) with mock.patch.object( type(getattr(client.transport, close_name)), "close" ) as close: with client: close.assert_not_called() close.assert_called_once() def test_client_ctx(): transports = [ "grpc", ] for transport in transports: client = AgentsClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport ) # Test client calls underlying transport. with mock.patch.object(type(client.transport), "close") as close: close.assert_not_called() with client: pass close.assert_called() @pytest.mark.parametrize( "client_class,transport_class", [ (AgentsClient, transports.AgentsGrpcTransport), (AgentsAsyncClient, transports.AgentsGrpcAsyncIOTransport), ], ) def test_api_key_credentials(client_class, transport_class): with mock.patch.object( google.auth._default, "get_api_key_credentials", create=True ) as get_api_key_credentials: mock_cred = mock.Mock() get_api_key_credentials.return_value = mock_cred options = client_options.ClientOptions() options.api_key = "api_key" with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(client_options=options) patched.assert_called_once_with( credentials=mock_cred, credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, )
googleapis/python-dialogflow-cx
tests/unit/gapic/dialogflowcx_v3/test_agents.py
Python
apache-2.0
124,793
[ "Octopus" ]
e88634f5b214d82f4e03e2578743d662d9fdb72efd45ec4b1cb2a59e26b26f20
# This file is part of ts_wep. # # Developed for the LSST Telescope and Site Systems. # This product includes software developed by the LSST Project # (https://www.lsst.org). # See the COPYRIGHT file at the top-level directory of this distribution # for details of code ownership. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. import os import numpy as np from scipy.signal import correlate import lsst.utils.tests from lsst.afw import image as afwImage from lsst.daf import butler as dafButler from lsst.ts.wep.task.DonutStamps import DonutStamps from lsst.ts.wep.task.EstimateZernikesBase import ( EstimateZernikesBaseTask, EstimateZernikesBaseConfig, ) from lsst.ts.wep.task.CombineZernikesMeanTask import CombineZernikesMeanTask from lsst.ts.wep.task.CombineZernikesSigmaClipTask import CombineZernikesSigmaClipTask from lsst.ts.wep.Utility import ( getModulePath, runProgram, DefocalType, writePipetaskCmd, writeCleanUpRepoCmd, ) class TestEstimateZernikesBase(lsst.utils.tests.TestCase): @classmethod def setUpClass(cls): """ Generate donutCatalog needed for task. """ moduleDir = getModulePath() cls.testDataDir = os.path.join(moduleDir, "tests", "testData") testPipelineConfigDir = os.path.join(cls.testDataDir, "pipelineConfigs") cls.repoDir = os.path.join(cls.testDataDir, "gen3TestRepo") cls.runName = "run1" # Check that run doesn't already exist due to previous improper cleanup butler = dafButler.Butler(cls.repoDir) registry = butler.registry collectionsList = list(registry.queryCollections()) if cls.runName in collectionsList: cleanUpCmd = writeCleanUpRepoCmd(cls.repoDir, cls.runName) runProgram(cleanUpCmd) collections = "refcats,LSSTCam/calib,LSSTCam/raw/all" instrument = "lsst.obs.lsst.LsstCam" cls.cameraName = "LSSTCam" pipelineYaml = os.path.join(testPipelineConfigDir, "testBasePipeline.yaml") pipeCmd = writePipetaskCmd( cls.repoDir, cls.runName, instrument, collections, pipelineYaml=pipelineYaml ) runProgram(pipeCmd) @classmethod def tearDownClass(cls): cleanUpCmd = writeCleanUpRepoCmd(cls.repoDir, cls.runName) runProgram(cleanUpCmd) def setUp(self): self.config = EstimateZernikesBaseConfig() self.task = EstimateZernikesBaseTask(config=self.config, name="Base Task") self.butler = dafButler.Butler(self.repoDir) self.registry = self.butler.registry self.dataIdExtra = { "instrument": "LSSTCam", "detector": 94, "exposure": 4021123106001, "visit": 4021123106001, } self.dataIdIntra = { "instrument": "LSSTCam", "detector": 94, "exposure": 4021123106002, "visit": 4021123106002, } def _generateTestExposures(self): # Generate donut template template = self.task.getTemplate("R22_S11", DefocalType.Extra) correlatedImage = correlate(template, template) maxIdx = np.argmax(correlatedImage) maxLoc = np.unravel_index(maxIdx, np.shape(correlatedImage)) templateCenter = np.array(maxLoc) - self.task.donutTemplateSize / 2 # Make donut centered in exposure initCutoutSize = ( self.task.donutTemplateSize + self.task.initialCutoutPadding * 2 ) centeredArr = np.zeros((initCutoutSize, initCutoutSize), dtype=np.float32) centeredArr[ self.task.initialCutoutPadding : -self.task.initialCutoutPadding, self.task.initialCutoutPadding : -self.task.initialCutoutPadding, ] += template centeredImage = afwImage.ImageF(initCutoutSize, initCutoutSize) centeredImage.array = centeredArr centeredExp = afwImage.ExposureF(initCutoutSize, initCutoutSize) centeredExp.setImage(centeredImage) centerCoord = ( self.task.initialCutoutPadding + templateCenter[1], self.task.initialCutoutPadding + templateCenter[0], ) # Make new donut that needs to be shifted by 20 pixels # from the edge of the exposure offCenterArr = np.zeros((initCutoutSize, initCutoutSize), dtype=np.float32) offCenterArr[ : self.task.donutTemplateSize - 20, : self.task.donutTemplateSize - 20 ] = template[20:, 20:] offCenterImage = afwImage.ImageF(initCutoutSize, initCutoutSize) offCenterImage.array = offCenterArr offCenterExp = afwImage.ExposureF(initCutoutSize, initCutoutSize) offCenterExp.setImage(offCenterImage) # Center coord value 20 pixels closer than template center # due to stamp overrunning the edge of the exposure. offCenterCoord = templateCenter - 20 return centeredExp, centerCoord, template, offCenterExp, offCenterCoord def testValidateConfigs(self): self.assertEqual(self.task.donutTemplateSize, 160) self.assertEqual(self.task.donutStampSize, 160) self.assertEqual(self.task.initialCutoutPadding, 40) self.assertEqual(type(self.task.combineZernikes), CombineZernikesSigmaClipTask) self.config.donutTemplateSize = 120 self.config.donutStampSize = 120 self.config.initialCutoutPadding = 290 self.config.combineZernikes.retarget(CombineZernikesMeanTask) self.task = EstimateZernikesBaseTask(config=self.config, name="Base Task") self.assertEqual(self.task.donutTemplateSize, 120) self.assertEqual(self.task.donutStampSize, 120) self.assertEqual(self.task.initialCutoutPadding, 290) self.assertEqual(type(self.task.combineZernikes), CombineZernikesMeanTask) def testGetTemplate(self): extra_template = self.task.getTemplate("R22_S11", DefocalType.Extra) self.assertEqual( np.shape(extra_template), (self.config.donutTemplateSize, self.config.donutTemplateSize), ) self.config.donutTemplateSize = 180 self.task = EstimateZernikesBaseTask(config=self.config, name="Base Task") intra_template = self.task.getTemplate("R22_S11", DefocalType.Intra) self.assertEqual(np.shape(intra_template), (180, 180)) def testShiftCenter(self): centerUpperLimit = self.task.shiftCenter(190.0, 200.0, 20.0) self.assertEqual(centerUpperLimit, 180.0) centerLowerLimit = self.task.shiftCenter(10.0, 0.0, 20.0) self.assertEqual(centerLowerLimit, 20.0) centerNoChangeUpper = self.task.shiftCenter(100.0, 200.0, 20.0) self.assertEqual(centerNoChangeUpper, 100.0) centerNoChangeLower = self.task.shiftCenter(100.0, 200.0, 20.0) self.assertEqual(centerNoChangeLower, 100.0) def testCalculateFinalCentroid(self): ( centeredExp, centerCoord, template, offCenterExp, offCenterCoord, ) = self._generateTestExposures() centerX, centerY, cornerX, cornerY = self.task.calculateFinalCentroid( centeredExp, template, centerCoord[0], centerCoord[1] ) # For centered donut final center and final corner should be # half stamp width apart self.assertEqual(centerX, centerCoord[0]) self.assertEqual(centerY, centerCoord[1]) self.assertEqual(cornerX, centerCoord[0] - self.task.donutStampSize / 2) self.assertEqual(cornerY, centerCoord[1] - self.task.donutStampSize / 2) centerX, centerY, cornerX, cornerY = self.task.calculateFinalCentroid( offCenterExp, template, centerCoord[0], centerCoord[1] ) # For donut stamp that would go off the top corner of the exposure # then the stamp should start at (0, 0) instead self.assertAlmostEqual(centerX, offCenterCoord[0]) self.assertAlmostEqual(centerY, offCenterCoord[1]) # Corner of image should be 0, 0 self.assertEqual(cornerX, 0) self.assertEqual(cornerY, 0) def testCutOutStamps(self): exposure = self.butler.get( "postISRCCD", dataId=self.dataIdExtra, collections=[self.runName] ) donutCatalog = self.butler.get( "donutCatalog", dataId=self.dataIdExtra, collections=[self.runName] ) donutStamps = self.task.cutOutStamps( exposure, donutCatalog, DefocalType.Extra, self.cameraName ) self.assertTrue(len(donutStamps), 4) stampCentroid = donutStamps[0].centroid_position stampBBox = lsst.geom.Box2I( lsst.geom.Point2I(stampCentroid.getX() - 80, stampCentroid.getY() - 80), lsst.geom.Extent2I(160), ) expCutOut = exposure[stampBBox].image.array np.testing.assert_array_equal(donutStamps[0].stamp_im.image.array, expCutOut) def testEstimateZernikes(self): extraExposure = self.butler.get( "postISRCCD", dataId=self.dataIdExtra, collections=[self.runName] ) intraExposure = self.butler.get( "postISRCCD", dataId=self.dataIdIntra, collections=[self.runName] ) donutCatalog = self.butler.get( "donutCatalog", dataId=self.dataIdExtra, collections=[self.runName] ) donutStampsExtra = self.task.cutOutStamps( extraExposure, donutCatalog, DefocalType.Extra, self.cameraName ) donutStampsIntra = self.task.cutOutStamps( intraExposure, donutCatalog, DefocalType.Intra, self.cameraName ) zernCoeff = self.task.estimateZernikes(donutStampsExtra, donutStampsIntra) self.assertEqual(np.shape(zernCoeff), (len(donutStampsExtra), 19)) def testEstimateCornerZernikes(self): """ Test the rotated corner sensors (R04 and R40) and make sure no changes upstream in obs_lsst have created issues in Zernike estimation. """ donutStampDir = os.path.join(self.testDataDir, "donutImg", "donutStamps") # Test R04 donutStampsExtra = DonutStamps.readFits( os.path.join(donutStampDir, "R04_SW0_donutStamps.fits") ) donutStampsIntra = DonutStamps.readFits( os.path.join(donutStampDir, "R04_SW1_donutStamps.fits") ) zernCoeffAllR04 = self.task.estimateZernikes(donutStampsExtra, donutStampsIntra) zernCoeffAvgR04 = self.task.combineZernikes.run( zernCoeffAllR04 ).combinedZernikes trueZernCoeffR04 = np.array( [ -0.71201408, 1.12248525, 0.77794367, -0.04085477, -0.05272933, 0.16054277, 0.081405, -0.04382461, -0.04830676, -0.06218882, 0.10246469, 0.0197683, 0.007953, 0.00668697, -0.03570788, -0.03020376, 0.0039522, 0.04793133, -0.00804605, ] ) # Make sure the total rms error is less than 0.5 microns off # from the OPD truth as a sanity check self.assertLess( np.sqrt(np.sum(np.square(zernCoeffAvgR04 - trueZernCoeffR04))), 0.5 ) # Test R40 donutStampsExtra = DonutStamps.readFits( os.path.join(donutStampDir, "R40_SW0_donutStamps.fits") ) donutStampsIntra = DonutStamps.readFits( os.path.join(donutStampDir, "R40_SW1_donutStamps.fits") ) zernCoeffAllR40 = self.task.estimateZernikes(donutStampsExtra, donutStampsIntra) zernCoeffAvgR40 = self.task.combineZernikes.run( zernCoeffAllR40 ).combinedZernikes trueZernCoeffR40 = np.array( [ -0.6535694, 1.00838499, 0.55968811, -0.08899825, 0.00173607, 0.04133107, -0.10913093, -0.04363778, -0.03149601, -0.04941225, 0.09980538, 0.03704486, -0.00210766, 0.01737253, 0.01727539, 0.01278011, 0.01212878, 0.03876888, -0.00559142, ] ) # Make sure the total rms error is less than 0.5 microns off # from the OPD truth as a sanity check self.assertLess( np.sqrt(np.sum(np.square(zernCoeffAvgR40 - trueZernCoeffR40))), 0.5 ) def testGetCombinedZernikes(self): testArr = np.zeros((2, 19)) testArr[1] += 2.0 combinedZernikesStruct = self.task.getCombinedZernikes(testArr) np.testing.assert_array_equal( combinedZernikesStruct.combinedZernikes, np.ones(19) ) np.testing.assert_array_equal( combinedZernikesStruct.flags, np.zeros(len(testArr)) )
lsst-ts/ts_wep
tests/task/test_estimateZernikesBase.py
Python
gpl-3.0
13,756
[ "VisIt" ]
07663f764a26f57874d3d794688df521768dc08b2a08cb37df7ed8c59e6c11cd
#!/usr/bin/env python """ Utilities for using M2Crypto SSL with DIRAC. """ import os from M2Crypto import SSL, m2 from DIRAC.Core.Security import Locations from DIRAC.Core.Security.m2crypto.X509Chain import X509Chain # Default ciphers to use if unspecified # Cipher line should be as readable as possible, sorry pylint # pylint: disable=line-too-long DEFAULT_SSL_CIPHERS = "AES256-GCM-SHA384:AES256-SHA256:AES256-SHA:CAMELLIA256-SHA:AES128-GCM-SHA256:AES128-SHA256:AES128-SHA:HIGH:MEDIUM:RSA:!3DES:!RC4:!aNULL:!MD5:!SEED:!IDEA" # noqa # Verify depth of peer certs VERIFY_DEPTH = 50 def __loadM2SSLCTXHostcert(ctx): """ Load hostcert & key from the default location and set them as the credentials for SSL context ctx. Returns None. """ certKeyTuple = Locations.getHostCertificateAndKeyLocation() if not certKeyTuple: raise RuntimeError("Hostcert/key location not set") hostcert, hostkey = certKeyTuple if not os.path.isfile(hostcert): raise RuntimeError("Hostcert file (%s) is missing" % hostcert) if not os.path.isfile(hostkey): raise RuntimeError("Hostkey file (%s) is missing" % hostkey) # Make sure we never stall on a password prompt if the hostkey has a password # by specifying a blank string. ctx.load_cert(hostcert, hostkey, callback=lambda: "") def __loadM2SSLCTXProxy(ctx, proxyPath=None): """ Load proxy from proxyPath (or default location if not specified) and set it as the certificate & key to use for this SSL context. Returns None. """ if not proxyPath: proxyPath = Locations.getProxyLocation() if not proxyPath: raise RuntimeError("Proxy location not set") if not os.path.isfile(proxyPath): raise RuntimeError("Proxy file (%s) is missing" % proxyPath) # See __loadM2SSLCTXHostcert for description of why lambda is needed. ctx.load_cert_chain(proxyPath, proxyPath, callback=lambda: "") def getM2SSLContext(ctx=None, **kwargs): """ Gets an M2Crypto.SSL.Context configured using the standard DIRAC connection keywords from kwargs. The keywords are: - clientMode: Boolean, if False hostcerts are always used. If True a proxy is used unless other flags are set. - useCertificates: Boolean, Set to true to use hostcerts in client mode. - proxyString: String, no-longer supported, used to allow a literal proxy string to be provided. - proxyLocation: String, Path to file to use as proxy, defaults to usual location(s) if not set. - skipCACheck: Boolean, if True, don't verify peer certificates. - sslMethod: String, List of SSL algorithms to enable in OpenSSL style cipher format, e.g. "SSLv3:TLSv1". - sslCiphers: String, OpenSSL style cipher string of ciphers to allow on this connection. If an existing context "ctx" is provided, it is just reconfigured with the selected arguments. Returns the new or updated context. """ if not ctx: ctx = SSL.Context() # Set certificates for connection # CHRIS: I think clientMode was just an internal of pyGSI implementation # if kwargs.get('clientMode', False) and not kwargs.get('useCertificates', False): # if not kwargs.get('useCertificates', False): if kwargs.get('bServerMode', False) or kwargs.get('useCertificates', False): # Server mode always uses hostcert __loadM2SSLCTXHostcert(ctx) else: # Client mode has a choice of possible options if kwargs.get('proxyString', None): # We don't support this any more, there is no easy way # to convert a proxy string to something usable by M2Crypto SSL # Try writing it to a temp file and use proxyLocation instead? raise RuntimeError("Proxy string no longer supported.") else: # Use normal proxy __loadM2SSLCTXProxy(ctx, proxyPath=kwargs.get('proxyLocation', None)) # Set peer verification if kwargs.get('skipCACheck', False): # Don't validate peer, but still request creds ctx.set_verify(SSL.verify_fail_if_no_peer_cert, VERIFY_DEPTH) else: # Do validate peer ctx.set_verify(SSL.verify_peer | SSL.verify_fail_if_no_peer_cert, VERIFY_DEPTH) # Set CA location caPath = Locations.getCAsLocation() if not caPath: raise RuntimeError("Failed to find CA location") if not os.path.isdir(caPath): raise RuntimeError("CA path (%s) is not a valid directory" % caPath) ctx.load_verify_locations(capath=caPath) # Other parameters sslMethod = kwargs.get('sslMethod', None) if sslMethod: # Pylint can't see the m2 constants due to the way the library is loaded # We just have to disable that warning for the next bit... # pylint: disable=no-member methods = [('SSLv2', m2.SSL_OP_NO_SSLv2), ('SSLv3', m2.SSL_OP_NO_SSLv3), ('TLSv1', m2.SSL_OP_NO_TLSv1)] allowed_methods = sslMethod.split(':') # If a method isn't explicitly allowed, set the flag to disable it... for method, method_flag in methods: if method not in allowed_methods: ctx.set_options(method_flag) # SSL_OP_NO_SSLv2, SSL_OP_NO_SSLv3, SSL_OP_NO_TLSv1 ciphers = kwargs.get('sslCiphers', DEFAULT_SSL_CIPHERS) ctx.set_cipher_list(ciphers) # log the debug messages # ctx.set_info_callback() return ctx def getM2PeerInfo(conn): """ Gets the details of the current peer as a standard dict. The peer details are obtained from the supplied M2 SSL Connection obj "conn". The details returned are those from ~X509Chain.getCredentials: DN - Full peer DN as string x509Chain - Full chain of peer isProxy - Boolean, True if chain ends with proxy isLimitedProxy - Boolean, True if chain ends with limited proxy group - String, DIRAC group for this peer, if known Returns a dict of details. """ chain = X509Chain.generateX509ChainFromSSLConnection(conn) creds = chain.getCredentials() if not creds['OK']: raise RuntimeError("Failed to get SSL peer info (%s)." % creds['Message']) peer = creds['Value'] peer['x509Chain'] = chain isProxy = chain.isProxy() if not isProxy['OK']: raise RuntimeError("Failed to get SSL peer isProxy (%s)." % isProxy['Message']) peer['isProxy'] = isProxy['Value'] if peer['isProxy']: peer['DN'] = creds['Value']['identity'] else: peer['DN'] = creds['Value']['subject'] isLimited = chain.isLimitedProxy() if not isLimited['OK']: raise RuntimeError("Failed to get SSL peer isProxy (%s)." % isLimited['Message']) peer['isLimitedProxy'] = isLimited['Value'] return peer
petricm/DIRAC
Core/DISET/private/Transports/SSL/M2Utils.py
Python
gpl-3.0
6,737
[ "DIRAC" ]
565d1ffbad439b773341892c747f48acc76340cee2ba22fb267ca2c6b82358c9
# coding: utf-8 # Copyright (c) Pymatgen Development Team. # Distributed under the terms of the MIT License. from __future__ import division, unicode_literals from __future__ import absolute_import, print_function """ This module provides classes used to enumerate surface sites and to find adsorption sites on slabs """ import numpy as np from six.moves import range from pymatgen import Structure, Lattice, vis import tempfile import sys import subprocess import itertools import os from monty.serialization import loadfn from scipy.spatial import Delaunay import warnings from pymatgen.core.operations import SymmOp from pymatgen.symmetry.analyzer import SpacegroupAnalyzer from pymatgen.symmetry.analyzer import generate_full_symmops from pymatgen.util.coord import in_coord_list, in_coord_list_pbc from pymatgen.core.sites import PeriodicSite from pymatgen.analysis.structure_analyzer import VoronoiCoordFinder from pymatgen.core.surface import generate_all_slabs from pymatgen.analysis.structure_matcher import StructureMatcher from matplotlib import patches from matplotlib.path import Path __author__ = "Joseph Montoya" __copyright__ = "Copyright 2016, The Materials Project" __version__ = "0.1" __maintainer__ = "Joseph Montoya" __credits__ = "Richard Tran" __email__ = "montoyjh@lbl.gov" __status__ = "Development" __date__ = "December 2, 2015" class AdsorbateSiteFinder(object): """ This class finds adsorbate sites on slabs and generates adsorbate structures according to user-defined criteria. The algorithm for finding sites is essentially as follows: 1. Determine "surface sites" by finding those within a height threshold along the miller index of the highest site 2. Create a network of surface sites using the Delaunay triangulation of the surface sites 3. Assign on-top, bridge, and hollow adsorption sites at the nodes, edges, and face centers of the Del. Triangulation 4. Generate structures from a molecule positioned at these sites """ def __init__(self, slab, selective_dynamics=False, height=0.9, mi_vec=None): """ Create an AdsorbateSiteFinder object. Args: slab (Slab): slab object for which to find adsorbate sites selective_dynamics (bool): flag for whether to assign non-surface sites as fixed for selective dynamics height (float): height criteria for selection of surface sites mi_vec (3-D array-like): vector corresponding to the vector concurrent with the miller index, this enables use with slabs that have been reoriented, but the miller vector must be supplied manually top_surface (bool): Which surface to adsorb, True for the surface above the center of mass, False for the surface below center of mass """ # get surface normal from miller index if mi_vec: self.mvec = mi_vec else: self.mvec = get_mi_vec(slab) slab = self.assign_site_properties(slab, height) if selective_dynamics: slab = self.assign_selective_dynamics(slab) self.slab = slab @classmethod def from_bulk_and_miller(cls, structure, miller_index, min_slab_size=8.0, min_vacuum_size=10.0, max_normal_search=None, center_slab=True, selective_dynamics=False, undercoord_threshold=0.09): """ This method constructs the adsorbate site finder from a bulk structure and a miller index, which allows the surface sites to be determined from the difference in bulk and slab coordination, as opposed to the height threshold. Args: structure (Structure): structure from which slab input to the ASF is constructed miller_index (3-tuple or list): miller index to be used min_slab_size (float): min slab size for slab generation min_vacuum_size (float): min vacuum size for slab generation max_normal_search (int): max normal search for slab generation center_slab (bool): whether to center slab in slab generation selective dynamics (bool): whether to assign surface sites to selective dynamics undercoord_threshold (float): threshold of "undercoordation" to use for the assignment of surface sites. Default is 0.1, for which surface sites will be designated if they are 10% less coordinated than their bulk counterpart """ # TODO: for some reason this works poorly with primitive cells # may want to switch the coordination algorithm eventually vcf_bulk = VoronoiCoordFinder(structure) bulk_coords = [len(vcf_bulk.get_coordinated_sites(n, tol=0.05)) for n in range(len(structure))] struct = structure.copy(site_properties={'bulk_coordinations': bulk_coords}) slabs = generate_all_slabs(struct, max_index=max(miller_index), min_slab_size=min_slab_size, min_vacuum_size=min_vacuum_size, max_normal_search=max_normal_search, center_slab=center_slab) slab_dict = {slab.miller_index: slab for slab in slabs} if miller_index not in slab_dict: raise ValueError("Miller index not in slab dict") this_slab = slab_dict[miller_index] vcf_surface = VoronoiCoordFinder(this_slab, allow_pathological=True) surf_props, undercoords = [], [] this_mi_vec = get_mi_vec(this_slab) mi_mags = [np.dot(this_mi_vec, site.coords) for site in this_slab] average_mi_mag = np.average(mi_mags) for n, site in enumerate(this_slab): bulk_coord = this_slab.site_properties['bulk_coordinations'][n] slab_coord = len(vcf_surface.get_coordinated_sites(n, tol=0.05)) mi_mag = np.dot(this_mi_vec, site.coords) undercoord = (bulk_coord - slab_coord) / bulk_coord undercoords += [undercoord] if undercoord > undercoord_threshold and mi_mag > average_mi_mag: surf_props += ['surface'] else: surf_props += ['subsurface'] new_site_properties = {'surface_properties': surf_props, 'undercoords': undercoords} new_slab = this_slab.copy(site_properties=new_site_properties) return cls(new_slab, selective_dynamics) def find_surface_sites_by_height(self, slab, height=0.9, xy_tol=0.05): """ This method finds surface sites by determining which sites are within a threshold value in height from the topmost site in a list of sites Args: site_list (list): list of sites from which to select surface sites height (float): threshold in angstroms of distance from topmost site in slab along the slab c-vector to include in surface site determination xy_tol (float): if supplied, will remove any sites which are within a certain distance in the miller plane. Returns: list of sites selected to be within a threshold of the highest """ # Get projection of coordinates along the miller index m_projs = np.array([np.dot(site.coords, self.mvec) for site in slab.sites]) # Mask based on window threshold along the miller index. mask = (m_projs - np.amax(m_projs)) >= -height surf_sites = [slab.sites[n] for n in np.where(mask)[0]] if xy_tol: # sort surface sites by height surf_sites = [s for (h, s) in zip(m_projs[mask], surf_sites)] surf_sites.reverse() unique_sites, unique_perp_fracs = [], [] for site in surf_sites: this_perp = site.coords - np.dot(site.coords, self.mvec) this_perp_frac = slab.lattice.get_fractional_coords(this_perp) if not in_coord_list_pbc(unique_perp_fracs, this_perp_frac): unique_sites.append(site) unique_perp_fracs.append(this_perp_frac) surf_sites = unique_sites return surf_sites def assign_site_properties(self, slab, height=0.9): """ Assigns site properties. """ if 'surface_properties' in slab.site_properties.keys(): return slab else: surf_sites = self.find_surface_sites_by_height(slab, height) surf_props = ['surface' if site in surf_sites else 'subsurface' for site in slab.sites] return slab.copy( site_properties={'surface_properties': surf_props}) def get_extended_surface_mesh(self, repeat=(5, 5, 1)): """ Gets an extended surface mesh for to use for adsorption site finding by constructing supercell of surface sites Args: repeat (3-tuple): repeat for getting extended surface mesh """ surf_str = Structure.from_sites(self.surface_sites) surf_str.make_supercell(repeat) return surf_str @property def surface_sites(self): """ convenience method to return a list of surface sites """ return [site for site in self.slab.sites if site.properties['surface_properties'] == 'surface'] def subsurface_sites(self): """ convenience method to return list of subsurface sites """ return [site for site in self.slab.sites if site.properties['surface_properties'] == 'subsurface'] def find_adsorption_sites(self, distance=2.0, put_inside=True, symm_reduce=1e-2, near_reduce=1e-2, positions=['ontop', 'bridge', 'hollow'], no_obtuse_hollow=True): """ Finds surface sites according to the above algorithm. Returns a list of corresponding cartesian coordinates. Args: distance (float): distance from the coordinating ensemble of atoms along the miller index for the site (i. e. the distance from the slab itself) put_inside (bool): whether to put the site inside the cell symm_reduce (float): symm reduction threshold near_reduce (float): near reduction threshold positions (list): which positions to include in the site finding "ontop": sites on top of surface sites "bridge": sites at edges between surface sites in Delaunay triangulation of surface sites in the miller plane "hollow": sites at centers of Delaunay triangulation faces "subsurface": subsurface positions projected into miller plane no_obtuse_hollow (bool): flag to indicate whether to include obtuse triangular ensembles in hollow sites """ ads_sites = {k: [] for k in positions} if 'ontop' in positions: ads_sites['ontop'] = [s.coords for s in self.surface_sites] if 'subsurface' in positions: # Get highest site ref = self.slab.sites[np.argmax(self.slab.cart_coords[:, 2])] # Project diff between highest site and subs site into miller ss_sites = [self.mvec * np.dot(ref.coords - s.coords, self.mvec) + s.coords for s in self.subsurface_sites()] ads_sites['subsurface'] = ss_sites if 'bridge' in positions or 'hollow' in positions: mesh = self.get_extended_surface_mesh() sop = get_rot(self.slab) dt = Delaunay([sop.operate(m.coords)[:2] for m in mesh]) # TODO: refactor below to properly account for >3-fold for v in dt.simplices: if -1 not in v: dots = [] for i_corner, i_opp in zip(range(3), ((1, 2), (0, 2), (0, 1))): corner, opp = v[i_corner], [v[o] for o in i_opp] vecs = [mesh[d].coords - mesh[corner].coords for d in opp] vecs = [vec / np.linalg.norm(vec) for vec in vecs] dots.append(np.dot(*vecs)) # Add bridge sites at midpoints of edges of D. Tri if 'bridge' in positions: ads_sites["bridge"].append( self.ensemble_center(mesh, opp)) # Prevent addition of hollow sites in obtuse triangles obtuse = no_obtuse_hollow and (np.array(dots) < 1e-5).any() # Add hollow sites at centers of D. Tri faces if 'hollow' in positions and not obtuse: ads_sites['hollow'].append( self.ensemble_center(mesh, v)) ads_sites['all'] = sum(ads_sites.values(), []) for key, sites in ads_sites.items(): # Pare off outer sites for bridge/hollow if key in ['bridge', 'hollow']: frac_coords = [self.slab.lattice.get_fractional_coords(ads_site) for ads_site in sites] frac_coords = [frac_coord for frac_coord in frac_coords if (frac_coord[0] > 1 and frac_coord[0] < 4 and frac_coord[1] > 1 and frac_coord[1] < 4)] sites = [self.slab.lattice.get_cartesian_coords(frac_coord) for frac_coord in frac_coords] if near_reduce: sites = self.near_reduce(sites, threshold=near_reduce) if put_inside: sites = [put_coord_inside(self.slab.lattice, coord) for coord in sites] if symm_reduce: sites = self.symm_reduce(sites, threshold=symm_reduce) sites = [site + distance * self.mvec for site in sites] ads_sites[key] = sites return ads_sites def symm_reduce(self, coords_set, threshold=1e-6): """ Reduces the set of adsorbate sites by finding removing symmetrically equivalent duplicates Args: coords_set: coordinate set in cartesian coordinates threshold: tolerance for distance equivalence, used as input to in_coord_list_pbc for dupl. checking """ surf_sg = SpacegroupAnalyzer(self.slab, 0.1) symm_ops = surf_sg.get_symmetry_operations() unique_coords = [] # Convert to fractional coords_set = [self.slab.lattice.get_fractional_coords(coords) for coords in coords_set] for coords in coords_set: incoord = False for op in symm_ops: if in_coord_list_pbc(unique_coords, op.operate(coords), atol=threshold): incoord = True break if not incoord: unique_coords += [coords] # convert back to cartesian return [self.slab.lattice.get_cartesian_coords(coords) for coords in unique_coords] def near_reduce(self, coords_set, threshold=1e-4): """ Prunes coordinate set for coordinates that are within threshold Args: coords_set (Nx3 array-like): list or array of coordinates threshold (float): threshold value for distance """ unique_coords = [] coords_set = [self.slab.lattice.get_fractional_coords(coords) for coords in coords_set] for coord in coords_set: if not in_coord_list_pbc(unique_coords, coord, threshold): unique_coords += [coord] return [self.slab.lattice.get_cartesian_coords(coords) for coords in unique_coords] def ensemble_center(self, site_list, indices, cartesian=True): """ Finds the center of an ensemble of sites selected from a list of sites. Helper method for the find_adsorption_sites algorithm. Args: site_list (list of sites): list of sites indices (list of ints): list of ints from which to select sites from site list cartesian (bool): whether to get average fractional or cartesian coordinate """ if cartesian: return np.average([site_list[i].coords for i in indices], axis=0) else: return np.average([site_list[i].frac_coords for i in indices], axis=0) def add_adsorbate(self, molecule, ads_coord, repeat=None, reorient=True): """ Adds an adsorbate at a particular coordinate. Adsorbate represented by a Molecule object, and is positioned relative to the input adsorbate coordinate. Args: molecule (Molecule): molecule object representing the adsorbate ads_coord (array): coordinate of adsorbate position repeat (3-tuple or list): input for making a supercell of slab prior to placing the adsorbate reorient (bool): flag on whether to reorient the molecule to have its z-axis concurrent with miller index """ if reorient: # Reorient the molecule along slab m_index sop = get_rot(self.slab) molecule.apply_operation(sop.inverse) struct = self.slab.copy() if repeat: struct.make_supercell(repeat) if 'surface_properties' in struct.site_properties.keys(): molecule.add_site_property("surface_properties", ["adsorbate"] * molecule.num_sites) if 'selective_dynamics' in struct.site_properties.keys(): molecule.add_site_property("selective_dynamics", [[True, True, True]] * molecule.num_sites) for site in molecule: struct.append(site.specie, ads_coord + site.coords, coords_are_cartesian=True, properties=site.properties) return struct def assign_selective_dynamics(self, slab): """ Helper function to assign selective dynamics site_properties based on surface, subsurface site properties Args: slab (Slab): slab for which to assign selective dynamics """ sd_list = [] sd_list = [[False, False, False] if site.properties['surface_properties'] == 'subsurface' else [True, True, True] for site in slab.sites] new_sp = slab.site_properties new_sp['selective_dynamics'] = sd_list return slab.copy(site_properties=new_sp) def generate_adsorption_structures(self, molecule, repeat=None, min_lw=5.0, reorient=True, find_args={}): """ Function that generates all adsorption structures for a given molecular adsorbate. Can take repeat argument or minimum length/width of precursor slab as an input Args: molecule (Molecule): molecule corresponding to adsorbate repeat (3-tuple or list): repeat argument for supercell generation min_lw (float): minimum length and width of the slab, only used if repeat is None reorient (bool): flag on whether or not to reorient adsorbate along the miller index find_args (dict): dictionary of arguments to be passed to the call to self.find_adsorption_sites, e.g. {"distance":2.0} """ if repeat is None: xrep = np.ceil(min_lw / np.linalg.norm(self.slab.lattice.matrix[0])) yrep = np.ceil(min_lw / np.linalg.norm(self.slab.lattice.matrix[1])) repeat = [xrep, yrep, 1] structs = [] for coords in self.find_adsorption_sites(**find_args)['all']: structs.append(self.add_adsorbate( molecule, coords, repeat=repeat, reorient=reorient)) return structs def adsorb_both_surfaces(self, molecule, repeat=None, min_lw=5.0, reorient=True, find_args={}): """ Function that generates all adsorption structures for a given molecular adsorbate on both surfaces of a slab. This is useful for calculating surface energy where both surfaces need to be equivalent or if we want to calculate nonpolar systems. Args: molecule (Molecule): molecule corresponding to adsorbate repeat (3-tuple or list): repeat argument for supercell generation min_lw (float): minimum length and width of the slab, only used if repeat is None reorient (bool): flag on whether or not to reorient adsorbate along the miller index find_args (dict): dictionary of arguments to be passed to the call to self.find_adsorption_sites, e.g. {"distance":2.0} """ # Get the adsorbed surfaces first adslabs = self.generate_adsorption_structures(molecule, repeat=repeat, min_lw=min_lw, reorient=reorient, find_args=find_args) new_adslabs = [] for adslab in adslabs: # Find the adsorbate sites and indices in each slab symmetric, adsorbates, indices = False, [], [] for i, site in enumerate(adslab.sites): if site.surface_properties == "adsorbate": adsorbates.append(site) indices.append(i) # Start with the clean slab adslab.remove_sites(indices) slab = adslab.copy() # For each site, we add it back to the slab along with a # symmetrically equivalent position on the other side of # the slab using symmetry operations for adsorbate in adsorbates: p2 = adslab.get_symmetric_site(adsorbate.frac_coords) slab.append(adsorbate.specie, p2, properties={"surface_properties": "adsorbate"}) slab.append(adsorbate.specie, adsorbate.frac_coords, properties={"surface_properties": "adsorbate"}) new_adslabs.append(slab) return new_adslabs def generate_substitution_structures(self, atom, target_species=[], sub_both_sides=False, range_tol=1e-2, dist_from_surf=0): """ Function that performs substitution-type doping on the surface and returns all possible configurations where one dopant is substituted per surface. Can substitute one surface or both. Args: atom (str): atom corresponding to substitutional dopant sub_both_sides (bool): If true, substitute an equivalent site on the other surface target_species (list): List of specific species to substitute range_tol (float): Find viable substitution sites at a specific distance from the surface +- this tolerance dist_from_surf (float): Distance from the surface to find viable substitution sites, defaults to 0 to substitute at the surface """ # Get symmetrized structure in case we want to substitue both sides sym_slab = SpacegroupAnalyzer(self.slab).get_symmetrized_structure() # Define a function for substituting a site def substitute(site, i): slab = self.slab.copy() props = self.slab.site_properties if sub_both_sides: # Find an equivalent site on the other surface eq_indices = [indices for indices in sym_slab.equivalent_indices if i in indices][0] for ii in eq_indices: if "%.6f" % (sym_slab[ii].frac_coords[2]) != \ "%.6f" % (site.frac_coords[2]): props["surface_properties"][ii] = "substitute" slab.replace(ii, atom) props["surface_properties"][i] = "substitute" slab.replace(i, atom) slab.add_site_property("surface_properties", props["surface_properties"]) return slab # Get all possible substitution sites substituted_slabs = [] # Sort sites so that we can define a range relative to the position of the # surface atoms, i.e. search for sites above (below) the bottom (top) surface sorted_sites = sorted(sym_slab, key=lambda site: site.frac_coords[2]) if sorted_sites[0].surface_properties == "surface": d = sorted_sites[0].frac_coords[2] + dist_from_surf else: d = sorted_sites[-1].frac_coords[2] - dist_from_surf for i, site in enumerate(sym_slab): if d - range_tol < site.frac_coords[2] < d + range_tol: if target_species and site.species_string in target_species: substituted_slabs.append(substitute(site, i)) elif not target_species: substituted_slabs.append(substitute(site, i)) matcher = StructureMatcher() return [s[0] for s in matcher.group_structures(substituted_slabs)] def get_mi_vec(slab): """ Convenience function which returns the unit vector aligned with the miller index. """ mvec = np.cross(slab.lattice.matrix[0], slab.lattice.matrix[1]) return mvec / np.linalg.norm(mvec) def get_rot(slab): """ Gets the transformation to rotate the z axis into the miller index """ new_z = get_mi_vec(slab) a, b, c = slab.lattice.matrix new_x = a / np.linalg.norm(a) new_y = np.cross(new_z, new_x) x, y, z = np.eye(3) rot_matrix = np.array([np.dot(*el) for el in itertools.product([x, y, z], [new_x, new_y, new_z])]).reshape(3, 3) rot_matrix = np.transpose(rot_matrix) sop = SymmOp.from_rotation_and_translation(rot_matrix) return sop def put_coord_inside(lattice, cart_coordinate): """ converts a cartesian coordinate such that it is inside the unit cell. """ fc = lattice.get_fractional_coords(cart_coordinate) return lattice.get_cartesian_coords([c - np.floor(c) for c in fc]) def reorient_z(structure): """ reorients a structure such that the z axis is concurrent with the normal to the A-B plane """ struct = structure.copy() sop = get_rot(struct) struct.apply_operation(sop) return struct # Get color dictionary colors = loadfn(os.path.join(os.path.dirname(vis.__file__), "ElementColorSchemes.yaml")) color_dict = {el: [j / 256.001 for j in colors["Jmol"][el]] for el in colors["Jmol"].keys()} def plot_slab(slab, ax, scale=0.8, repeat=5, window=1.5, draw_unit_cell=True, decay=0.2, adsorption_sites=True): """ Function that helps visualize the slab in a 2-D plot, for convenient viewing of output of AdsorbateSiteFinder. Args: slab (slab): Slab object to be visualized ax (axes): matplotlib axes with which to visualize scale (float): radius scaling for sites repeat (int): number of repeating unit cells to visualize window (float): window for setting the axes limits, is essentially a fraction of the unit cell limits draw_unit_cell (bool): flag indicating whether or not to draw cell decay (float): how the alpha-value decays along the z-axis """ orig_slab = slab.copy() slab = reorient_z(slab) orig_cell = slab.lattice.matrix.copy() if repeat: slab.make_supercell([repeat, repeat, 1]) coords = np.array(sorted(slab.cart_coords, key=lambda x: x[2])) sites = sorted(slab.sites, key=lambda x: x.coords[2]) alphas = 1 - decay * (np.max(coords[:, 2]) - coords[:, 2]) alphas = alphas.clip(min=0) corner = [0, 0, slab.lattice.get_fractional_coords(coords[-1])[-1]] corner = slab.lattice.get_cartesian_coords(corner)[:2] verts = orig_cell[:2, :2] lattsum = verts[0] + verts[1] # Draw circles at sites and stack them accordingly for n, coord in enumerate(coords): r = sites[n].specie.atomic_radius * scale ax.add_patch(patches.Circle(coord[:2] - lattsum * (repeat // 2), r, color='w', zorder=2 * n)) color = color_dict[sites[n].species_string] ax.add_patch(patches.Circle(coord[:2] - lattsum * (repeat // 2), r, facecolor=color, alpha=alphas[n], edgecolor='k', lw=0.3, zorder=2 * n + 1)) # Adsorption sites if adsorption_sites: asf = AdsorbateSiteFinder(orig_slab) ads_sites = asf.find_adsorption_sites()['all'] sop = get_rot(orig_slab) ads_sites = [sop.operate(ads_site)[:2].tolist() for ads_site in ads_sites] ax.plot(*zip(*ads_sites), color='k', marker='x', markersize=10, mew=1, linestyle='', zorder=10000) # Draw unit cell if draw_unit_cell: verts = np.insert(verts, 1, lattsum, axis=0).tolist() verts += [[0., 0.]] verts = [[0., 0.]] + verts codes = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY] verts = [(np.array(vert) + corner).tolist() for vert in verts] path = Path(verts, codes) patch = patches.PathPatch(path, facecolor='none', lw=2, alpha=0.5, zorder=2 * n + 2) ax.add_patch(patch) ax.set_aspect("equal") center = corner + lattsum / 2. extent = np.max(lattsum) lim_array = [center - extent * window, center + extent * window] x_lim = [ele[0] for ele in lim_array] y_lim = [ele[1] for ele in lim_array] ax.set_xlim(x_lim) ax.set_ylim(y_lim) return ax
johnson1228/pymatgen
pymatgen/analysis/adsorption.py
Python
mit
30,855
[ "Jmol", "pymatgen" ]
89c045a3128766f9df1d1274d0531aa95f63b1bce04c16da60f28a47ffd459ed
# (C) 2016, Markus Wildi, wildi.markus@bluewin.ch # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2, or (at your option) # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # # Or visit http://www.gnu.org/licenses/gpl.html. # ''' Basic services for connection to HTTPD ''' __author__ = 'wildi.markus@bluewin.ch' import os,sys import configparser import psycopg2 import crypt import pwd from random import choice from string import ascii_letters def create_cfg(httpd_connect_string=None, user_name=None,passwd=None,pth_cfg=None,lg=None): lg.error('>>>>>>>> {}'.format(pth_cfg)) if not os.path.isfile(pth_cfg): cfgf = open(pth_cfg, 'w') cfg = configparser.ConfigParser() cfg.add_section('proxy') cfg.set('proxy', 'host', httpd_connect_string) cfg.set('proxy', 'user', user_name) cfg.set('proxy', 'passwd', passwd) cfg.write(cfgf) cfgf.close() os.chmod(pth_cfg,0o600) else: lg.warn('not over writing: {}'.format(pth_cfg)) def delete_cfg(pth_cfg=None,lg=None): try: os.unlink(pth_cfg) except: pass def create_pgsql(user_name=None,passwd=None,lg=None): db_user_name=pwd.getpwuid(os.getuid())[0] db_name='stars' prt_slt=''.join(choice(ascii_letters) for i in range(8)) pwhash = crypt.crypt(passwd, '$6$' + prt_slt) conn = psycopg2.connect('dbname={} user={} password={}'.format(db_name, db_user_name, '')) crsr = conn.cursor() # usr_login|usr_tmp|usr_email|usr_id|usr_execute_permission|usr_passwd|allowed_devices try: crsr.execute('INSERT INTO users VALUES (\'{}\', null, \'{}\', 2, \'t\', \'{}\', \'C0 T0 HTTPD\');'.format(user_name,'unittest@example.com',pwhash[0:98])) except psycopg2.IntegrityError as e: lg.error('create_pgsql: user or email address already exists: {}'.format(user_name)) return conn.commit() lg.debug('create_pgsql: {}'.format(crsr.statusmessage)) crsr.close() conn.close() def delete_pgsql(user_name=None,lg=None): db_user_name=pwd.getpwuid(os.getuid())[0] db_name='stars' conn = psycopg2.connect('dbname={} user={} password={}'.format(db_name, db_user_name, '')) crsr = conn.cursor() crsr.execute('DELETE FROM users WHERE usr_login=\'{}\' ;'.format(user_name)) result=crsr.rowcount conn.commit() crsr.close() conn.close() #lg.debug('delete_pgsql: {}'.format(crsr.statusmessage)) if result == 1: lg.info('user: {} deleted from database'.format(user_name)) elif result == 0: pass else: lg.warn('delete user: {} manually, result: {}'.format(user_name, result))
RTS2/rts2
scripts/u_point/u_point/httpd_connection.py
Python
lgpl-3.0
3,250
[ "VisIt" ]
3f8cd8036a6a95e23c965751bb48e89e95951e7d9e2f6d3a5305496c65dd420f
import scipy as sp import numpy as np import pylab as plt from scipy.integrate import odeint class HodgkinHuxley(): """Full Hodgkin-Huxley Model implemented in Python""" C_m = 1.0 """membrane capacitance, in uF/cm^2""" g_Na = 120.0 """Sodium (Na) maximum conductances, in mS/cm^2""" g_K = 36.0 """Postassium (K) maximum conductances, in mS/cm^2""" g_L = 0.3 """Leak maximum conductances, in mS/cm^2""" E_Na = 50.0 """Sodium (Na) Nernst reversal potentials, in mV""" E_K = -77.0 """Postassium (K) Nernst reversal potentials, in mV""" E_L = -54.387 """Leak Nernst reversal potentials, in mV""" t = np.arange(0.0, 450.0, 0.01) """ The time to integrate over """ def alpha_m(self, V): """Channel gating kinetics. Functions of membrane voltage""" return 0.1*(V+40.0)/(1.0 - np.exp(-(V+40.0) / 10.0)) def beta_m(self, V): """Channel gating kinetics. Functions of membrane voltage""" return 4.0*np.exp(-(V+65.0) / 18.0) def alpha_h(self, V): """Channel gating kinetics. Functions of membrane voltage""" return 0.07*np.exp(-(V+65.0) / 20.0) def beta_h(self, V): """Channel gating kinetics. Functions of membrane voltage""" return 1.0/(1.0 + np.exp(-(V+35.0) / 10.0)) def alpha_n(self, V): """Channel gating kinetics. Functions of membrane voltage""" return 0.01*(V+55.0)/(1.0 - np.exp(-(V+55.0) / 10.0)) def beta_n(self, V): """Channel gating kinetics. Functions of membrane voltage""" return 0.125*np.exp(-(V+65) / 80.0) def I_Na(self, V, m, h): """ Membrane current (in uA/cm^2) Sodium (Na = element name) | :param V: | :param m: | :param h: | :return: """ return self.g_Na * m**3 * h * (V - self.E_Na) def I_K(self, V, n): """ Membrane current (in uA/cm^2) Potassium (K = element name) | :param V: | :param h: | :return: """ return self.g_K * n**4 * (V - self.E_K) # Leak def I_L(self, V): """ Membrane current (in uA/cm^2) Leak | :param V: | :param h: | :return: """ return self.g_L * (V - self.E_L) def I_inj(self, t): """ External Current | :param t: time | :return: step up to 10 uA/cm^2 at t>100 | step down to 0 uA/cm^2 at t>200 | step up to 35 uA/cm^2 at t>300 | step down to 0 uA/cm^2 at t>400 """ return 10*(t>100) - 10*(t>200) + 35*(t>300) - 35*(t>400) @staticmethod def dALLdt(X, t, self): """ Integrate | :param X: | :param t: | :return: calculate membrane potential & activation variables """ V, m, h, n = X dVdt = (self.I_inj(t) - self.I_Na(V, m, h) - self.I_K(V, n) - self.I_L(V)) / self.C_m dmdt = self.alpha_m(V)*(1.0-m) - self.beta_m(V)*m dhdt = self.alpha_h(V)*(1.0-h) - self.beta_h(V)*h dndt = self.alpha_n(V)*(1.0-n) - self.beta_n(V)*n return dVdt, dmdt, dhdt, dndt def Main(self): """ Main demo for the Hodgkin Huxley neuron model """ X = odeint(self.dALLdt, [-65, 0.05, 0.6, 0.32], self.t, args=(self,)) V = X[:,0] m = X[:,1] h = X[:,2] n = X[:,3] ina = self.I_Na(V, m, h) ik = self.I_K(V, n) il = self.I_L(V) plt.figure() ax1 = plt.subplot(4,1,1) plt.title('Hodgkin-Huxley Neuron') plt.plot(self.t, V, 'k') plt.ylabel('V (mV)') plt.subplot(4,1,2, sharex = ax1) plt.plot(self.t, ina, 'c', label='$I_{Na}$') plt.plot(self.t, ik, 'y', label='$I_{K}$') plt.plot(self.t, il, 'm', label='$I_{L}$') plt.ylabel('Current') plt.legend() plt.subplot(4,1,3, sharex = ax1) plt.plot(self.t, m, 'r', label='m') plt.plot(self.t, h, 'g', label='h') plt.plot(self.t, n, 'b', label='n') plt.ylabel('Gating Value') plt.legend() plt.subplot(4,1,4, sharex = ax1) i_inj_values = [self.I_inj(t) for t in self.t] plt.plot(self.t, i_inj_values, 'k') plt.xlabel('t (ms)') plt.ylabel('$I_{inj}$ ($\\mu{A}/cm^2$)') plt.ylim(-1, 40) plt.tight_layout() plt.show() if __name__ == '__main__': runner = HodgkinHuxley() runner.Main()
openworm/hodgkin_huxley_tutorial
Tutorial/Source/HodgkinHuxley.py
Python
lgpl-3.0
4,594
[ "NEURON" ]
f9b420a9d4e498124cceaf5ad9896166651529cce948547a6e71a8685a2b9f62
#!/usr/bin/env python """ Remove the given file replica or a list of file replicas from the File Catalog This script should be used with great care as it may leave dark data in the storage! Use dirac-dms-remove-replicas instead """ import os from DIRAC import exit as dexit from DIRAC.Core.Base.Script import Script from DIRAC import gLogger @Script() def main(): # Registering arguments will automatically add their description to the help menu Script.registerArgument(("LocalFile: Path to local file containing LFNs", "LFN: Logical File Names")) Script.registerArgument(" SE: Storage element") Script.parseCommandLine() from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations allowUsers = Operations().getValue("DataManagement/AllowUserReplicaManagement", False) from DIRAC.Core.Security.ProxyInfo import getProxyInfo res = getProxyInfo() if not res["OK"]: gLogger.fatal("Can't get proxy info", res["Message"]) dexit(1) properties = res["Value"].get("groupProperties", []) if not allowUsers: if "FileCatalogManagement" not in properties: gLogger.error("You need to use a proxy from a group with FileCatalogManagement") dexit(5) from DIRAC.DataManagementSystem.Client.DataManager import DataManager dm = DataManager() # parseCommandLine show help when mandatory arguments are not specified or incorrect argument inputFileName, storageElementName = Script.getPositionalArgs(group=True) if os.path.exists(inputFileName): inputFile = open(inputFileName, "r") string = inputFile.read() lfns = [lfn.strip() for lfn in string.splitlines()] inputFile.close() else: lfns = [inputFileName] res = dm.removeReplicaFromCatalog(storageElementName, lfns) if not res["OK"]: print(res["Message"]) dexit(0) for lfn in sorted(res["Value"]["Failed"]): message = res["Value"]["Failed"][lfn] print("Failed to remove %s replica of %s: %s" % (storageElementName, lfn, message)) print("Successfully remove %d catalog replicas at %s" % (len(res["Value"]["Successful"]), storageElementName)) if __name__ == "__main__": main()
DIRACGrid/DIRAC
src/DIRAC/DataManagementSystem/scripts/dirac_dms_remove_catalog_replicas.py
Python
gpl-3.0
2,255
[ "DIRAC" ]
5a55a26d19dafaaa9985b512765582656df61e55c97df1cb50b8167462ec52be
""" Implementation of Harwell-Boeing read/write. At the moment not the full Harwell-Boeing format is supported. Supported features are: - assembled, non-symmetric, real matrices - integer for pointer/indices - exponential format for float values, and int format """ # TODO: # - Add more support (symmetric/complex matrices, non-assembled matrices ?) # XXX: reading is reasonably efficient (>= 85 % is in numpy.fromstring), but # takes a lot of memory. Being faster would require compiled code. # write is not efficient. Although not a terribly exciting task, # having reusable facilities to efficiently read/write fortran-formatted files # would be useful outside this module. import warnings import numpy as np from scipy.sparse import csc_matrix from scipy.io.harwell_boeing._fortran_format_parser import \ FortranFormatParser, IntFormat, ExpFormat __all__ = ["MalformedHeader", "hb_read", "hb_write", "HBInfo", "HBFile", "HBMatrixType"] class MalformedHeader(Exception): pass class LineOverflow(Warning): pass def _nbytes_full(fmt, nlines): """Return the number of bytes to read to get every full lines for the given parsed fortran format.""" return (fmt.repeat * fmt.width + 1) * (nlines - 1) class HBInfo(object): @classmethod def from_data(cls, m, title="Default title", key="0", mxtype=None, fmt=None): """Create a HBInfo instance from an existing sparse matrix. Parameters ---------- m : sparse matrix the HBInfo instance will derive its parameters from m title : str Title to put in the HB header key : str Key mxtype : HBMatrixType type of the input matrix fmt : dict not implemented Returns ------- hb_info : HBInfo instance """ m = m.tocsc(copy=False) pointer = m.indptr indices = m.indices values = m.data nrows, ncols = m.shape nnon_zeros = m.nnz if fmt is None: # +1 because HB use one-based indexing (Fortran), and we will write # the indices /pointer as such pointer_fmt = IntFormat.from_number(np.max(pointer+1)) indices_fmt = IntFormat.from_number(np.max(indices+1)) if values.dtype.kind in np.typecodes["AllFloat"]: values_fmt = ExpFormat.from_number(-np.max(np.abs(values))) elif values.dtype.kind in np.typecodes["AllInteger"]: values_fmt = IntFormat.from_number(-np.max(np.abs(values))) else: raise NotImplementedError("type %s not implemented yet" % values.dtype.kind) else: raise NotImplementedError("fmt argument not supported yet.") if mxtype is None: if not np.isrealobj(values): raise ValueError("Complex values not supported yet") if values.dtype.kind in np.typecodes["AllInteger"]: tp = "integer" elif values.dtype.kind in np.typecodes["AllFloat"]: tp = "real" else: raise NotImplementedError("type %s for values not implemented" % values.dtype) mxtype = HBMatrixType(tp, "unsymmetric", "assembled") else: raise ValueError("mxtype argument not handled yet.") def _nlines(fmt, size): nlines = size // fmt.repeat if nlines * fmt.repeat != size: nlines += 1 return nlines pointer_nlines = _nlines(pointer_fmt, pointer.size) indices_nlines = _nlines(indices_fmt, indices.size) values_nlines = _nlines(values_fmt, values.size) total_nlines = pointer_nlines + indices_nlines + values_nlines return cls(title, key, total_nlines, pointer_nlines, indices_nlines, values_nlines, mxtype, nrows, ncols, nnon_zeros, pointer_fmt.fortran_format, indices_fmt.fortran_format, values_fmt.fortran_format) @classmethod def from_file(cls, fid): """Create a HBInfo instance from a file object containing a matrix in the HB format. Parameters ---------- fid : file-like matrix File or file-like object containing a matrix in the HB format. Returns ------- hb_info : HBInfo instance """ # First line line = fid.readline().strip("\n") if not len(line) > 72: raise ValueError("Expected at least 72 characters for first line, " "got: \n%s" % line) title = line[:72] key = line[72:] # Second line line = fid.readline().strip("\n") if not len(line.rstrip()) >= 56: raise ValueError("Expected at least 56 characters for second line, " "got: \n%s" % line) total_nlines = _expect_int(line[:14]) pointer_nlines = _expect_int(line[14:28]) indices_nlines = _expect_int(line[28:42]) values_nlines = _expect_int(line[42:56]) rhs_nlines = line[56:72].strip() if rhs_nlines == '': rhs_nlines = 0 else: rhs_nlines = _expect_int(rhs_nlines) if not rhs_nlines == 0: raise ValueError("Only files without right hand side supported for " "now.") # Third line line = fid.readline().strip("\n") if not len(line) >= 70: raise ValueError("Expected at least 72 character for third line, got:\n" "%s" % line) mxtype_s = line[:3].upper() if not len(mxtype_s) == 3: raise ValueError("mxtype expected to be 3 characters long") mxtype = HBMatrixType.from_fortran(mxtype_s) if mxtype.value_type not in ["real", "integer"]: raise ValueError("Only real or integer matrices supported for " "now (detected %s)" % mxtype) if not mxtype.structure == "unsymmetric": raise ValueError("Only unsymmetric matrices supported for " "now (detected %s)" % mxtype) if not mxtype.storage == "assembled": raise ValueError("Only assembled matrices supported for now") if not line[3:14] == " " * 11: raise ValueError("Malformed data for third line: %s" % line) nrows = _expect_int(line[14:28]) ncols = _expect_int(line[28:42]) nnon_zeros = _expect_int(line[42:56]) nelementals = _expect_int(line[56:70]) if not nelementals == 0: raise ValueError("Unexpected value %d for nltvl (last entry of line 3)" % nelementals) # Fourth line line = fid.readline().strip("\n") ct = line.split() if not len(ct) == 3: raise ValueError("Expected 3 formats, got %s" % ct) return cls(title, key, total_nlines, pointer_nlines, indices_nlines, values_nlines, mxtype, nrows, ncols, nnon_zeros, ct[0], ct[1], ct[2], rhs_nlines, nelementals) def __init__(self, title, key, total_nlines, pointer_nlines, indices_nlines, values_nlines, mxtype, nrows, ncols, nnon_zeros, pointer_format_str, indices_format_str, values_format_str, right_hand_sides_nlines=0, nelementals=0): """Do not use this directly, but the class ctrs (from_* functions).""" self.title = title self.key = key if title is None: title = "No Title" if len(title) > 72: raise ValueError("title cannot be > 72 characters") if key is None: key = "|No Key" if len(key) > 8: warnings.warn("key is > 8 characters (key is %s)" % key, LineOverflow) self.total_nlines = total_nlines self.pointer_nlines = pointer_nlines self.indices_nlines = indices_nlines self.values_nlines = values_nlines parser = FortranFormatParser() pointer_format = parser.parse(pointer_format_str) if not isinstance(pointer_format, IntFormat): raise ValueError("Expected int format for pointer format, got %s" % pointer_format) indices_format = parser.parse(indices_format_str) if not isinstance(indices_format, IntFormat): raise ValueError("Expected int format for indices format, got %s" % indices_format) values_format = parser.parse(values_format_str) if isinstance(values_format, ExpFormat): if mxtype.value_type not in ["real", "complex"]: raise ValueError("Inconsistency between matrix type %s and " "value type %s" % (mxtype, values_format)) values_dtype = np.float64 elif isinstance(values_format, IntFormat): if mxtype.value_type not in ["integer"]: raise ValueError("Inconsistency between matrix type %s and " "value type %s" % (mxtype, values_format)) # XXX: fortran int -> dtype association ? values_dtype = int else: raise ValueError("Unsupported format for values %r" % (values_format,)) self.pointer_format = pointer_format self.indices_format = indices_format self.values_format = values_format self.pointer_dtype = np.int32 self.indices_dtype = np.int32 self.values_dtype = values_dtype self.pointer_nlines = pointer_nlines self.pointer_nbytes_full = _nbytes_full(pointer_format, pointer_nlines) self.indices_nlines = indices_nlines self.indices_nbytes_full = _nbytes_full(indices_format, indices_nlines) self.values_nlines = values_nlines self.values_nbytes_full = _nbytes_full(values_format, values_nlines) self.nrows = nrows self.ncols = ncols self.nnon_zeros = nnon_zeros self.nelementals = nelementals self.mxtype = mxtype def dump(self): """Gives the header corresponding to this instance as a string.""" header = [self.title.ljust(72) + self.key.ljust(8)] header.append("%14d%14d%14d%14d" % (self.total_nlines, self.pointer_nlines, self.indices_nlines, self.values_nlines)) header.append("%14s%14d%14d%14d%14d" % (self.mxtype.fortran_format.ljust(14), self.nrows, self.ncols, self.nnon_zeros, 0)) pffmt = self.pointer_format.fortran_format iffmt = self.indices_format.fortran_format vffmt = self.values_format.fortran_format header.append("%16s%16s%20s" % (pffmt.ljust(16), iffmt.ljust(16), vffmt.ljust(20))) return "\n".join(header) def _expect_int(value, msg=None): try: return int(value) except ValueError as e: if msg is None: msg = "Expected an int, got %s" raise ValueError(msg % value) from e def _read_hb_data(content, header): # XXX: look at a way to reduce memory here (big string creation) ptr_string = "".join([content.read(header.pointer_nbytes_full), content.readline()]) ptr = np.fromstring(ptr_string, dtype=int, sep=' ') ind_string = "".join([content.read(header.indices_nbytes_full), content.readline()]) ind = np.fromstring(ind_string, dtype=int, sep=' ') val_string = "".join([content.read(header.values_nbytes_full), content.readline()]) val = np.fromstring(val_string, dtype=header.values_dtype, sep=' ') try: return csc_matrix((val, ind-1, ptr-1), shape=(header.nrows, header.ncols)) except ValueError as e: raise e def _write_data(m, fid, header): m = m.tocsc(copy=False) def write_array(f, ar, nlines, fmt): # ar_nlines is the number of full lines, n is the number of items per # line, ffmt the fortran format pyfmt = fmt.python_format pyfmt_full = pyfmt * fmt.repeat # for each array to write, we first write the full lines, and special # case for partial line full = ar[:(nlines - 1) * fmt.repeat] for row in full.reshape((nlines-1, fmt.repeat)): f.write(pyfmt_full % tuple(row) + "\n") nremain = ar.size - full.size if nremain > 0: f.write((pyfmt * nremain) % tuple(ar[ar.size - nremain:]) + "\n") fid.write(header.dump()) fid.write("\n") # +1 is for Fortran one-based indexing write_array(fid, m.indptr+1, header.pointer_nlines, header.pointer_format) write_array(fid, m.indices+1, header.indices_nlines, header.indices_format) write_array(fid, m.data, header.values_nlines, header.values_format) class HBMatrixType(object): """Class to hold the matrix type.""" # q2f* translates qualified names to Fortran character _q2f_type = { "real": "R", "complex": "C", "pattern": "P", "integer": "I", } _q2f_structure = { "symmetric": "S", "unsymmetric": "U", "hermitian": "H", "skewsymmetric": "Z", "rectangular": "R" } _q2f_storage = { "assembled": "A", "elemental": "E", } _f2q_type = dict([(j, i) for i, j in _q2f_type.items()]) _f2q_structure = dict([(j, i) for i, j in _q2f_structure.items()]) _f2q_storage = dict([(j, i) for i, j in _q2f_storage.items()]) @classmethod def from_fortran(cls, fmt): if not len(fmt) == 3: raise ValueError("Fortran format for matrix type should be 3 " "characters long") try: value_type = cls._f2q_type[fmt[0]] structure = cls._f2q_structure[fmt[1]] storage = cls._f2q_storage[fmt[2]] return cls(value_type, structure, storage) except KeyError as e: raise ValueError("Unrecognized format %s" % fmt) from e def __init__(self, value_type, structure, storage="assembled"): self.value_type = value_type self.structure = structure self.storage = storage if value_type not in self._q2f_type: raise ValueError("Unrecognized type %s" % value_type) if structure not in self._q2f_structure: raise ValueError("Unrecognized structure %s" % structure) if storage not in self._q2f_storage: raise ValueError("Unrecognized storage %s" % storage) @property def fortran_format(self): return self._q2f_type[self.value_type] + \ self._q2f_structure[self.structure] + \ self._q2f_storage[self.storage] def __repr__(self): return "HBMatrixType(%s, %s, %s)" % \ (self.value_type, self.structure, self.storage) class HBFile(object): def __init__(self, file, hb_info=None): """Create a HBFile instance. Parameters ---------- file : file-object StringIO work as well hb_info : HBInfo, optional Should be given as an argument for writing, in which case the file should be writable. """ self._fid = file if hb_info is None: self._hb_info = HBInfo.from_file(file) else: #raise IOError("file %s is not writable, and hb_info " # "was given." % file) self._hb_info = hb_info @property def title(self): return self._hb_info.title @property def key(self): return self._hb_info.key @property def type(self): return self._hb_info.mxtype.value_type @property def structure(self): return self._hb_info.mxtype.structure @property def storage(self): return self._hb_info.mxtype.storage def read_matrix(self): return _read_hb_data(self._fid, self._hb_info) def write_matrix(self, m): return _write_data(m, self._fid, self._hb_info) def hb_read(path_or_open_file): """Read HB-format file. Parameters ---------- path_or_open_file : path-like or file-like If a file-like object, it is used as-is. Otherwise, it is opened before reading. Returns ------- data : scipy.sparse.csc_matrix instance The data read from the HB file as a sparse matrix. Notes ----- At the moment not the full Harwell-Boeing format is supported. Supported features are: - assembled, non-symmetric, real matrices - integer for pointer/indices - exponential format for float values, and int format Examples -------- We can read and write a harwell-boeing format file: >>> from scipy.io.harwell_boeing import hb_read, hb_write >>> from scipy.sparse import csr_matrix, eye >>> data = csr_matrix(eye(3)) # create a sparse matrix >>> hb_write("data.hb", data) # write a hb file >>> print(hb_read("data.hb")) # read a hb file (0, 0) 1.0 (1, 1) 1.0 (2, 2) 1.0 """ def _get_matrix(fid): hb = HBFile(fid) return hb.read_matrix() if hasattr(path_or_open_file, 'read'): return _get_matrix(path_or_open_file) else: with open(path_or_open_file) as f: return _get_matrix(f) def hb_write(path_or_open_file, m, hb_info=None): """Write HB-format file. Parameters ---------- path_or_open_file : path-like or file-like If a file-like object, it is used as-is. Otherwise, it is opened before writing. m : sparse-matrix the sparse matrix to write hb_info : HBInfo contains the meta-data for write Returns ------- None Notes ----- At the moment not the full Harwell-Boeing format is supported. Supported features are: - assembled, non-symmetric, real matrices - integer for pointer/indices - exponential format for float values, and int format Examples -------- We can read and write a harwell-boeing format file: >>> from scipy.io.harwell_boeing import hb_read, hb_write >>> from scipy.sparse import csr_matrix, eye >>> data = csr_matrix(eye(3)) # create a sparse matrix >>> hb_write("data.hb", data) # write a hb file >>> print(hb_read("data.hb")) # read a hb file (0, 0) 1.0 (1, 1) 1.0 (2, 2) 1.0 """ m = m.tocsc(copy=False) if hb_info is None: hb_info = HBInfo.from_data(m) def _set_matrix(fid): hb = HBFile(fid, hb_info) return hb.write_matrix(m) if hasattr(path_or_open_file, 'write'): return _set_matrix(path_or_open_file) else: with open(path_or_open_file, 'w') as f: return _set_matrix(f)
nmayorov/scipy
scipy/io/harwell_boeing/hb.py
Python
bsd-3-clause
19,254
[ "exciting" ]
60c55f2fdefce25a91e54a60a9881e27cac67addaac15fbdb11ed2a426451d86
#! /usr/bin/env python import os import sys import argparse import numpy as np import settings from optimize.gradient_optimizer import GradientOptimizer, FindParams from caffevis.caffevis_helper import check_force_backward_true, read_label_file LR_POLICY_CHOICES = ('constant', 'progress', 'progress01') def get_parser(): parser = argparse.ArgumentParser(description='Script to find, with or without regularization, images that cause high or low activations of specific neurons in a network via numerical optimization. Settings are read from settings.py, overridden in settings_local.py, and may be further overridden on the command line.', formatter_class=lambda prog: argparse.ArgumentDefaultsHelpFormatter(prog, width=100) ) # Network and data options parser.add_argument('--caffe-root', type = str, default = settings.caffevis_caffe_root, help = 'Path to caffe root directory.') parser.add_argument('--deploy-proto', type = str, default = settings.caffevis_deploy_prototxt, help = 'Path to caffe network prototxt.') parser.add_argument('--net-weights', type = str, default = settings.caffevis_network_weights, help = 'Path to caffe network weights.') parser.add_argument('--mean', type = str, default = repr(settings.caffevis_data_mean), help = '''Mean. The mean may be None, a tuple of one mean value per channel, or a string specifying the path to a mean image to load. Because of the multiple datatypes supported, this argument must be specified as a string that evaluates to a valid Python object. For example: "None", "(10,20,30)", and "'mean.npy'" are all valid values. Note that to specify a string path to a mean file, it must be passed with quotes, which usually entails passing it with double quotes in the shell! Alternately, just provide the mean in settings_local.py.''') parser.add_argument('--channel-swap-to-rgb', type = str, default = '(2,1,0)', help = 'Permutation to apply to channels to change to RGB space for plotting. Hint: (0,1,2) if your network is trained for RGB, (2,1,0) if it is trained for BGR.') parser.add_argument('--data-size', type = str, default = '(227,227)', help = 'Size of network input.') #### FindParams # Where to start parser.add_argument('--start-at', type = str, default = 'mean_plus_rand', choices = ('mean_plus_rand', 'randu', 'mean'), help = 'How to generate x0, the initial point used in optimization.') parser.add_argument('--rand-seed', type = int, default = 0, help = 'Random seed used for generating the start-at image (use different seeds to generate different images).') # What to optimize parser.add_argument('--push-layer', type = str, default = 'fc8', help = 'Name of layer that contains the desired neuron whose value is optimized.') parser.add_argument('--push-channel', type = int, default = '130', help = 'Channel number for desired neuron whose value is optimized (channel for conv, neuron index for FC).') parser.add_argument('--push-spatial', type = str, default = 'None', help = 'Which spatial location to push for conv layers. For FC layers, set this to None. For conv layers, set it to a tuple, e.g. when using `--push-layer conv5` on AlexNet, --push-spatial (6,6) will maximize the center unit of the 13x13 spatial grid.') parser.add_argument('--push-dir', type = float, default = 1, help = 'Which direction to push the activation of the selected neuron, that is, the value used to begin backprop. For example, use 1 to maximize the selected neuron activation and -1 to minimize it.') # Use regularization? parser.add_argument('--decay', type = float, default = 0, help = 'Amount of L2 decay to use.') parser.add_argument('--blur-radius', type = float, default = 0, help = 'Radius in pixels of blur to apply after each BLUR_EVERY steps. If 0, perform no blurring. Blur sizes between 0 and 0.3 work poorly.') parser.add_argument('--blur-every', type = int, default = 0, help = 'Blur every BLUR_EVERY steps. If 0, perform no blurring.') parser.add_argument('--small-val-percentile', type = float, default = 0, help = 'Induce sparsity by setting pixels with absolute value under SMALL_VAL_PERCENTILE percentile to 0. Not discussed in paper. 0 to disable.') parser.add_argument('--small-norm-percentile', type = float, default = 0, help = 'Induce sparsity by setting pixels with norm under SMALL_NORM_PERCENTILE percentile to 0. \\theta_{n_pct} from the paper. 0 to disable.') parser.add_argument('--px-benefit-percentile', type = float, default = 0, help = 'Induce sparsity by setting pixels with contribution under PX_BENEFIT_PERCENTILE percentile to 0. Mentioned briefly in paper but not used. 0 to disable.') parser.add_argument('--px-abs-benefit-percentile', type = float, default = 0, help = 'Induce sparsity by setting pixels with contribution under PX_BENEFIT_PERCENTILE percentile to 0. \\theta_{c_pct} from the paper. 0 to disable.') # How much to optimize parser.add_argument('--lr-policy', type = str, default = 'constant', choices = LR_POLICY_CHOICES, help = 'Learning rate policy. See description in lr-params.') parser.add_argument('--lr-params', type = str, default = '{"lr": 1}', help = 'Learning rate params, specified as a string that evalutes to a Python dict. Params that must be provided dependon which lr-policy is selected. The "constant" policy requires the "lr" key and uses the constant given learning rate. The "progress" policy requires the "max_lr" and "desired_prog" keys and scales the learning rate such that the objective function will change by an amount equal to DESIRED_PROG under a linear objective assumption, except the LR is limited to MAX_LR. The "progress01" policy requires the "max_lr", "early_prog", and "late_prog_mult" keys and is tuned for optimizing neurons with outputs in the [0,1] range, e.g. neurons on a softmax layer. Under this policy optimization slows down as the output approaches 1 (see code for details).') parser.add_argument('--max-iter', type = int, default = 500, help = 'Number of iterations of the optimization loop.') # Where to save results parser.add_argument('--output-prefix', type = str, default = 'optimize_results/opt', help = 'Output path and filename prefix (default: optimize_results/opt)') parser.add_argument('--output-template', type = str, default = '%(p.push_layer)s_%(p.push_channel)04d_%(p.rand_seed)d', help = 'Output filename template; see code for details (default: "%%(p.push_layer)s_%%(p.push_channel)04d_%%(p.rand_seed)d"). ' 'The default output-prefix and output-template produce filenames like "optimize_results/opt_prob_0278_0_best_X.jpg"') parser.add_argument('--brave', action = 'store_true', help = 'Allow overwriting existing results files. Default: off, i.e. cowardly refuse to overwrite existing files.') parser.add_argument('--skipbig', action = 'store_true', help = 'Skip outputting large *info_big.pkl files (contains pickled version of x0, last x, best x, first x that attained max on the specified layer.') return parser def parse_and_validate_lr_params(parser, lr_policy, lr_params): assert lr_policy in LR_POLICY_CHOICES try: lr_params = eval(lr_params) except (SyntaxError,NameError) as _: err = 'Tried to parse the following lr_params value\n%s\nas a Python expression, but it failed. lr_params should evaluate to a valid Python dict.' % lr_params parser.error(err) if lr_policy == 'constant': if not 'lr' in lr_params: parser.error('Expected lr_params to be dict with at least "lr" key, but dict is %s' % repr(lr_params)) elif lr_policy == 'progress': if not ('max_lr' in lr_params and 'desired_prog' in lr_params): parser.error('Expected lr_params to be dict with at least "max_lr" and "desired_prog" keys, but dict is %s' % repr(lr_params)) elif lr_policy == 'progress01': if not ('max_lr' in lr_params and 'early_prog' in lr_params and 'late_prog_mult' in lr_params): parser.error('Expected lr_params to be dict with at least "max_lr", "early_prog", and "late_prog_mult" keys, but dict is %s' % repr(lr_params)) return lr_params def parse_and_validate_push_spatial(parser, push_spatial): '''Returns tuple of length 2.''' try: push_spatial = eval(push_spatial) except (SyntaxError,NameError) as _: err = 'Tried to parse the following push_spatial value\n%s\nas a Python expression, but it failed. push_spatial should be a valid Python expression.' % push_spatial parser.error(err) if push_spatial == None: push_spatial = (0,0) # Convert to tuple format elif isinstance(push_spatial, tuple) and len(push_spatial) == 2: pass else: err = 'push_spatial should be None or a valid tuple of indices of length 2, but it is: %s' % push_spatial parser.error(err) return push_spatial def main(): parser = get_parser() args = parser.parse_args() # Finish parsing args channel_swap_to_rgb = eval(args.channel_swap_to_rgb) assert isinstance(channel_swap_to_rgb, tuple) and len(channel_swap_to_rgb) > 0, 'channel_swap_to_rgb should be a tuple' data_size = eval(args.data_size) assert isinstance(data_size, tuple) and len(data_size) == 2, 'data_size should be a length 2 tuple' #channel_swap_inv = tuple([net_channel_swap.index(ii) for ii in range(len(net_channel_swap))]) lr_params = parse_and_validate_lr_params(parser, args.lr_policy, args.lr_params) push_spatial = parse_and_validate_push_spatial(parser, args.push_spatial) # Load mean data_mean = eval(args.mean) if isinstance(data_mean, basestring): # If the mean is given as a filename, load the file try: data_mean = np.load(data_mean) except IOError: print '\n\nCound not load mean file:', data_mean print 'To fetch a default model and mean file, use:\n' print ' $ cd models/caffenet-yos/' print ' $ cp ./fetch.sh\n\n' print 'Or to use your own mean, change caffevis_data_mean in settings_local.py or override by running with `--mean MEAN_FILE` (see --help).\n' raise # Crop center region (e.g. 227x227) if mean is larger (e.g. 256x256) excess_h = data_mean.shape[1] - data_size[0] excess_w = data_mean.shape[2] - data_size[1] assert excess_h >= 0 and excess_w >= 0, 'mean should be at least as large as %s' % repr(data_size) data_mean = data_mean[:, (excess_h/2):(excess_h/2+data_size[0]), (excess_w/2):(excess_w/2+data_size[1])] elif data_mean is None: pass else: # The mean has been given as a value or a tuple of values data_mean = np.array(data_mean) # Promote to shape C,1,1 while len(data_mean.shape) < 3: data_mean = np.expand_dims(data_mean, -1) print 'Using mean:', repr(data_mean) # Load network sys.path.insert(0, os.path.join(args.caffe_root, 'python')) import caffe net = caffe.Classifier( args.deploy_proto, args.net_weights, mean = data_mean, raw_scale = 1.0, ) check_force_backward_true(settings.caffevis_deploy_prototxt) labels = None if settings.caffevis_labels: labels = read_label_file(settings.caffevis_labels) optimizer = GradientOptimizer(net, data_mean, labels = labels, label_layers = settings.caffevis_label_layers, channel_swap_to_rgb = channel_swap_to_rgb) params = FindParams( start_at = args.start_at, rand_seed = args.rand_seed, push_layer = args.push_layer, push_channel = args.push_channel, push_spatial = push_spatial, push_dir = args.push_dir, decay = args.decay, blur_radius = args.blur_radius, blur_every = args.blur_every, small_val_percentile = args.small_val_percentile, small_norm_percentile = args.small_norm_percentile, px_benefit_percentile = args.px_benefit_percentile, px_abs_benefit_percentile = args.px_abs_benefit_percentile, lr_policy = args.lr_policy, lr_params = lr_params, max_iter = args.max_iter, ) prefix_template = '%s_%s_' % (args.output_prefix, args.output_template) im = optimizer.run_optimize(params, prefix_template = prefix_template, brave = args.brave, skipbig = args.skipbig) if __name__ == '__main__': main()
yosinski/deep-visualization-toolbox
optimize_image.py
Python
mit
13,238
[ "NEURON" ]
3c873e21ad0f2b5c51e74d9868b1c6fd1893f151baff83ddcd982ed47fa54dd5
# -*- encoding: utf-8 -*- # Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ In the Watcher system, an :ref:`Audit <audit_definition>` is a request for optimizing a :ref:`Cluster <cluster_definition>`. The optimization is done in order to satisfy one :ref:`Goal <goal_definition>` on a given :ref:`Cluster <cluster_definition>`. For each :ref:`Audit <audit_definition>`, the Watcher system generates an :ref:`Action Plan <action_plan_definition>`. To see the life-cycle and description of an :ref:`Audit <audit_definition>` states, visit :ref:`the Audit State machine <audit_state_machine>`. """ import datetime from dateutil import tz import pecan from pecan import rest import wsme from wsme import types as wtypes from wsme import utils as wutils import wsmeext.pecan as wsme_pecan from oslo_log import log from watcher._i18n import _ from watcher.api.controllers import base from watcher.api.controllers import link from watcher.api.controllers.v1 import collection from watcher.api.controllers.v1 import types from watcher.api.controllers.v1 import utils as api_utils from watcher.common import exception from watcher.common import policy from watcher.common import utils from watcher.decision_engine import rpcapi from watcher import objects LOG = log.getLogger(__name__) def _get_object_by_value(context, class_name, value): if utils.is_uuid_like(value) or utils.is_int_like(value): return class_name.get(context, value) else: return class_name.get_by_name(context, value) def hide_fields_in_newer_versions(obj): """This method hides fields that were added in newer API versions. Certain node fields were introduced at certain API versions. These fields are only made available when the request's API version matches or exceeds the versions when these fields were introduced. """ if not api_utils.allow_start_end_audit_time(): obj.start_time = wtypes.Unset obj.end_time = wtypes.Unset if not api_utils.allow_force(): obj.force = wtypes.Unset class AuditPostType(wtypes.Base): name = wtypes.wsattr(wtypes.text, mandatory=False) audit_template_uuid = wtypes.wsattr(types.uuid, mandatory=False) goal = wtypes.wsattr(wtypes.text, mandatory=False) strategy = wtypes.wsattr(wtypes.text, mandatory=False) audit_type = wtypes.wsattr(wtypes.text, mandatory=True) state = wtypes.wsattr(wtypes.text, readonly=True, default=objects.audit.State.PENDING) parameters = wtypes.wsattr({wtypes.text: types.jsontype}, mandatory=False, default={}) interval = wtypes.wsattr(types.interval_or_cron, mandatory=False) scope = wtypes.wsattr(types.jsontype, readonly=True) auto_trigger = wtypes.wsattr(bool, mandatory=False) hostname = wtypes.wsattr(wtypes.text, readonly=True, mandatory=False) start_time = wtypes.wsattr(datetime.datetime, mandatory=False) end_time = wtypes.wsattr(datetime.datetime, mandatory=False) force = wtypes.wsattr(bool, mandatory=False) def as_audit(self, context): audit_type_values = [val.value for val in objects.audit.AuditType] if self.audit_type not in audit_type_values: raise exception.AuditTypeNotFound(audit_type=self.audit_type) if (self.audit_type == objects.audit.AuditType.ONESHOT.value and self.interval not in (wtypes.Unset, None)): raise exception.AuditIntervalNotAllowed(audit_type=self.audit_type) if (self.audit_type == objects.audit.AuditType.CONTINUOUS.value and self.interval in (wtypes.Unset, None)): raise exception.AuditIntervalNotSpecified( audit_type=self.audit_type) if self.audit_template_uuid and self.goal: raise exception.Invalid('Either audit_template_uuid ' 'or goal should be provided.') if (self.audit_type == objects.audit.AuditType.ONESHOT.value and (self.start_time not in (wtypes.Unset, None) or self.end_time not in (wtypes.Unset, None))): raise exception.AuditStartEndTimeNotAllowed( audit_type=self.audit_type) if not api_utils.allow_start_end_audit_time(): for field in ('start_time', 'end_time'): if getattr(self, field) not in (wtypes.Unset, None): raise exception.NotAcceptable() # If audit_template_uuid was provided, we will provide any # variables not included in the request, but not override # those variables that were included. if self.audit_template_uuid: try: audit_template = objects.AuditTemplate.get( context, self.audit_template_uuid) except exception.AuditTemplateNotFound: raise exception.Invalid( message=_('The audit template UUID or name specified is ' 'invalid')) at2a = { 'goal': 'goal_id', 'strategy': 'strategy_id', 'scope': 'scope', } to_string_fields = set(['goal', 'strategy']) for k in at2a: if not getattr(self, k): try: at_attr = getattr(audit_template, at2a[k]) if at_attr and (k in to_string_fields): at_attr = str(at_attr) setattr(self, k, at_attr) except AttributeError: pass # Note: If audit name was not provided, used a default name if not self.name: if self.strategy: strategy = _get_object_by_value(context, objects.Strategy, self.strategy) self.name = "%s-%s" % (strategy.name, datetime.datetime.utcnow().isoformat()) elif self.audit_template_uuid: audit_template = objects.AuditTemplate.get( context, self.audit_template_uuid) self.name = "%s-%s" % (audit_template.name, datetime.datetime.utcnow().isoformat()) else: goal = _get_object_by_value(context, objects.Goal, self.goal) self.name = "%s-%s" % (goal.name, datetime.datetime.utcnow().isoformat()) # No more than 63 characters if len(self.name) > 63: LOG.warning("Audit: %s length exceeds 63 characters", self.name) self.name = self.name[0:63] return Audit( name=self.name, audit_type=self.audit_type, parameters=self.parameters, goal_id=self.goal, strategy_id=self.strategy, interval=self.interval, scope=self.scope, auto_trigger=self.auto_trigger, start_time=self.start_time, end_time=self.end_time, force=self.force) class AuditPatchType(types.JsonPatchType): @staticmethod def mandatory_attrs(): return ['/audit_template_uuid', '/type'] @staticmethod def validate(patch): def is_new_state_none(p): return p.path == '/state' and p.op == 'replace' and p.value is None serialized_patch = {'path': patch.path, 'op': patch.op, 'value': patch.value} if (patch.path in AuditPatchType.mandatory_attrs() or is_new_state_none(patch)): msg = _("%(field)s can't be updated.") raise exception.PatchError( patch=serialized_patch, reason=msg % dict(field=patch.path)) return types.JsonPatchType.validate(patch) class Audit(base.APIBase): """API representation of an audit. This class enforces type checking and value constraints, and converts between the internal object model and the API representation of an audit. """ _goal_uuid = None _goal_name = None _strategy_uuid = None _strategy_name = None def _get_goal(self, value): if value == wtypes.Unset: return None goal = None try: if utils.is_uuid_like(value) or utils.is_int_like(value): goal = objects.Goal.get( pecan.request.context, value) else: goal = objects.Goal.get_by_name( pecan.request.context, value) except exception.GoalNotFound: pass if goal: self.goal_id = goal.id return goal def _get_goal_uuid(self): return self._goal_uuid def _set_goal_uuid(self, value): if value and self._goal_uuid != value: self._goal_uuid = None goal = self._get_goal(value) if goal: self._goal_uuid = goal.uuid def _get_goal_name(self): return self._goal_name def _set_goal_name(self, value): if value and self._goal_name != value: self._goal_name = None goal = self._get_goal(value) if goal: self._goal_name = goal.name def _get_strategy(self, value): if value == wtypes.Unset: return None strategy = None try: if utils.is_uuid_like(value) or utils.is_int_like(value): strategy = objects.Strategy.get( pecan.request.context, value) else: strategy = objects.Strategy.get_by_name( pecan.request.context, value) except exception.StrategyNotFound: pass if strategy: self.strategy_id = strategy.id return strategy def _get_strategy_uuid(self): return self._strategy_uuid def _set_strategy_uuid(self, value): if value and self._strategy_uuid != value: self._strategy_uuid = None strategy = self._get_strategy(value) if strategy: self._strategy_uuid = strategy.uuid def _get_strategy_name(self): return self._strategy_name def _set_strategy_name(self, value): if value and self._strategy_name != value: self._strategy_name = None strategy = self._get_strategy(value) if strategy: self._strategy_name = strategy.name uuid = types.uuid """Unique UUID for this audit""" name = wtypes.text """Name of this audit""" audit_type = wtypes.text """Type of this audit""" state = wtypes.text """This audit state""" goal_uuid = wtypes.wsproperty( wtypes.text, _get_goal_uuid, _set_goal_uuid, mandatory=True) """Goal UUID the audit refers to""" goal_name = wtypes.wsproperty( wtypes.text, _get_goal_name, _set_goal_name, mandatory=False) """The name of the goal this audit refers to""" strategy_uuid = wtypes.wsproperty( wtypes.text, _get_strategy_uuid, _set_strategy_uuid, mandatory=False) """Strategy UUID the audit refers to""" strategy_name = wtypes.wsproperty( wtypes.text, _get_strategy_name, _set_strategy_name, mandatory=False) """The name of the strategy this audit refers to""" parameters = {wtypes.text: types.jsontype} """The strategy parameters for this audit""" links = wtypes.wsattr([link.Link], readonly=True) """A list containing a self link and associated audit links""" interval = wtypes.wsattr(wtypes.text, mandatory=False) """Launch audit periodically (in seconds)""" scope = wtypes.wsattr(types.jsontype, mandatory=False) """Audit Scope""" auto_trigger = wtypes.wsattr(bool, mandatory=False, default=False) """Autoexecute action plan once audit is succeeded""" next_run_time = wtypes.wsattr(datetime.datetime, mandatory=False) """The next time audit launch""" hostname = wtypes.wsattr(wtypes.text, mandatory=False) """Hostname the audit is running on""" start_time = wtypes.wsattr(datetime.datetime, mandatory=False) """The start time for continuous audit launch""" end_time = wtypes.wsattr(datetime.datetime, mandatory=False) """The end time that stopping continuous audit""" force = wsme.wsattr(bool, mandatory=False, default=False) """Allow Action Plan of this Audit be executed in parallel with other Action Plan""" def __init__(self, **kwargs): self.fields = [] fields = list(objects.Audit.fields) for k in fields: # Skip fields we do not expose. if not hasattr(self, k): continue self.fields.append(k) setattr(self, k, kwargs.get(k, wtypes.Unset)) self.fields.append('goal_id') self.fields.append('strategy_id') fields.append('goal_uuid') setattr(self, 'goal_uuid', kwargs.get('goal_id', wtypes.Unset)) fields.append('goal_name') setattr(self, 'goal_name', kwargs.get('goal_id', wtypes.Unset)) fields.append('strategy_uuid') setattr(self, 'strategy_uuid', kwargs.get('strategy_id', wtypes.Unset)) fields.append('strategy_name') setattr(self, 'strategy_name', kwargs.get('strategy_id', wtypes.Unset)) @staticmethod def _convert_with_links(audit, url, expand=True): if not expand: audit.unset_fields_except(['uuid', 'name', 'audit_type', 'state', 'goal_uuid', 'interval', 'scope', 'strategy_uuid', 'goal_name', 'strategy_name', 'auto_trigger', 'next_run_time']) audit.links = [link.Link.make_link('self', url, 'audits', audit.uuid), link.Link.make_link('bookmark', url, 'audits', audit.uuid, bookmark=True) ] return audit @classmethod def convert_with_links(cls, rpc_audit, expand=True): audit = Audit(**rpc_audit.as_dict()) hide_fields_in_newer_versions(audit) return cls._convert_with_links(audit, pecan.request.host_url, expand) @classmethod def sample(cls, expand=True): sample = cls(uuid='27e3153e-d5bf-4b7e-b517-fb518e17f34c', name='My Audit', audit_type='ONESHOT', state='PENDING', created_at=datetime.datetime.utcnow(), deleted_at=None, updated_at=datetime.datetime.utcnow(), interval='7200', scope=[], auto_trigger=False, next_run_time=datetime.datetime.utcnow(), start_time=datetime.datetime.utcnow(), end_time=datetime.datetime.utcnow()) sample.goal_id = '7ae81bb3-dec3-4289-8d6c-da80bd8001ae' sample.strategy_id = '7ae81bb3-dec3-4289-8d6c-da80bd8001ff' return cls._convert_with_links(sample, 'http://localhost:9322', expand) class AuditCollection(collection.Collection): """API representation of a collection of audits.""" audits = [Audit] """A list containing audits objects""" def __init__(self, **kwargs): super(AuditCollection, self).__init__() self._type = 'audits' @staticmethod def convert_with_links(rpc_audits, limit, url=None, expand=False, **kwargs): collection = AuditCollection() collection.audits = [Audit.convert_with_links(p, expand) for p in rpc_audits] collection.next = collection.get_next(limit, url=url, **kwargs) return collection @classmethod def sample(cls): sample = cls() sample.audits = [Audit.sample(expand=False)] return sample class AuditsController(rest.RestController): """REST controller for Audits.""" def __init__(self): super(AuditsController, self).__init__() self.dc_client = rpcapi.DecisionEngineAPI() from_audits = False """A flag to indicate if the requests to this controller are coming from the top-level resource Audits.""" _custom_actions = { 'detail': ['GET'], } def _get_audits_collection(self, marker, limit, sort_key, sort_dir, expand=False, resource_url=None, goal=None, strategy=None): additional_fields = ["goal_uuid", "goal_name", "strategy_uuid", "strategy_name"] api_utils.validate_sort_key( sort_key, list(objects.Audit.fields) + additional_fields) limit = api_utils.validate_limit(limit) api_utils.validate_sort_dir(sort_dir) marker_obj = None if marker: marker_obj = objects.Audit.get_by_uuid(pecan.request.context, marker) filters = {} if goal: if utils.is_uuid_like(goal): filters['goal_uuid'] = goal else: # TODO(michaelgugino): add method to get goal by name. filters['goal_name'] = goal if strategy: if utils.is_uuid_like(strategy): filters['strategy_uuid'] = strategy else: # TODO(michaelgugino): add method to get goal by name. filters['strategy_name'] = strategy need_api_sort = api_utils.check_need_api_sort(sort_key, additional_fields) sort_db_key = (sort_key if not need_api_sort else None) audits = objects.Audit.list(pecan.request.context, limit, marker_obj, sort_key=sort_db_key, sort_dir=sort_dir, filters=filters) audits_collection = AuditCollection.convert_with_links( audits, limit, url=resource_url, expand=expand, sort_key=sort_key, sort_dir=sort_dir) if need_api_sort: api_utils.make_api_sort(audits_collection.audits, sort_key, sort_dir) return audits_collection @wsme_pecan.wsexpose(AuditCollection, types.uuid, int, wtypes.text, wtypes.text, wtypes.text, wtypes.text) def get_all(self, marker=None, limit=None, sort_key='id', sort_dir='asc', goal=None, strategy=None): """Retrieve a list of audits. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. :param goal: goal UUID or name to filter by :param strategy: strategy UUID or name to filter by """ context = pecan.request.context policy.enforce(context, 'audit:get_all', action='audit:get_all') return self._get_audits_collection(marker, limit, sort_key, sort_dir, goal=goal, strategy=strategy) @wsme_pecan.wsexpose(AuditCollection, wtypes.text, types.uuid, int, wtypes.text, wtypes.text) def detail(self, goal=None, marker=None, limit=None, sort_key='id', sort_dir='asc'): """Retrieve a list of audits with detail. :param goal: goal UUID or name to filter by :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. """ context = pecan.request.context policy.enforce(context, 'audit:detail', action='audit:detail') # NOTE(lucasagomes): /detail should only work agaist collections parent = pecan.request.path.split('/')[:-1][-1] if parent != "audits": raise exception.HTTPNotFound expand = True resource_url = '/'.join(['audits', 'detail']) return self._get_audits_collection(marker, limit, sort_key, sort_dir, expand, resource_url, goal=goal) @wsme_pecan.wsexpose(Audit, wtypes.text) def get_one(self, audit): """Retrieve information about the given audit. :param audit: UUID or name of an audit. """ if self.from_audits: raise exception.OperationNotPermitted context = pecan.request.context rpc_audit = api_utils.get_resource('Audit', audit) policy.enforce(context, 'audit:get', rpc_audit, action='audit:get') return Audit.convert_with_links(rpc_audit) @wsme_pecan.wsexpose(Audit, body=AuditPostType, status_code=201) def post(self, audit_p): """Create a new audit. :param audit_p: an audit within the request body. """ context = pecan.request.context policy.enforce(context, 'audit:create', action='audit:create') audit = audit_p.as_audit(context) if self.from_audits: raise exception.OperationNotPermitted if not audit._goal_uuid: raise exception.Invalid( message=_('A valid goal_id or audit_template_id ' 'must be provided')) strategy_uuid = audit.strategy_uuid no_schema = True if strategy_uuid is not None: # validate parameter when predefined strategy in audit template strategy = objects.Strategy.get(pecan.request.context, strategy_uuid) schema = strategy.parameters_spec if schema: # validate input parameter with default value feedback no_schema = False utils.StrictDefaultValidatingDraft4Validator(schema).validate( audit.parameters) if no_schema and audit.parameters: raise exception.Invalid(_('Specify parameters but no predefined ' 'strategy for audit, or no ' 'parameter spec in predefined strategy')) audit_dict = audit.as_dict() # convert local time to UTC time start_time_value = audit_dict.get('start_time') end_time_value = audit_dict.get('end_time') if start_time_value: audit_dict['start_time'] = start_time_value.replace( tzinfo=tz.tzlocal()).astimezone( tz.tzutc()).replace(tzinfo=None) if end_time_value: audit_dict['end_time'] = end_time_value.replace( tzinfo=tz.tzlocal()).astimezone( tz.tzutc()).replace(tzinfo=None) new_audit = objects.Audit(context, **audit_dict) new_audit.create() # Set the HTTP Location Header pecan.response.location = link.build_url('audits', new_audit.uuid) # trigger decision-engine to run the audit if new_audit.audit_type == objects.audit.AuditType.ONESHOT.value: self.dc_client.trigger_audit(context, new_audit.uuid) return Audit.convert_with_links(new_audit) @wsme.validate(types.uuid, [AuditPatchType]) @wsme_pecan.wsexpose(Audit, wtypes.text, body=[AuditPatchType]) def patch(self, audit, patch): """Update an existing audit. :param audit: UUID or name of an audit. :param patch: a json PATCH document to apply to this audit. """ if self.from_audits: raise exception.OperationNotPermitted context = pecan.request.context audit_to_update = api_utils.get_resource( 'Audit', audit, eager=True) policy.enforce(context, 'audit:update', audit_to_update, action='audit:update') try: audit_dict = audit_to_update.as_dict() initial_state = audit_dict['state'] new_state = api_utils.get_patch_value(patch, 'state') if not api_utils.check_audit_state_transition( patch, initial_state): error_message = _("State transition not allowed: " "(%(initial_state)s -> %(new_state)s)") raise exception.PatchError( patch=patch, reason=error_message % dict( initial_state=initial_state, new_state=new_state)) patch_path = api_utils.get_patch_key(patch, 'path') if patch_path in ('start_time', 'end_time'): patch_value = api_utils.get_patch_value(patch, patch_path) # convert string format to UTC time new_patch_value = wutils.parse_isodatetime( patch_value).replace( tzinfo=tz.tzlocal()).astimezone( tz.tzutc()).replace(tzinfo=None) api_utils.set_patch_value(patch, patch_path, new_patch_value) audit = Audit(**api_utils.apply_jsonpatch(audit_dict, patch)) except api_utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) # Update only the fields that have changed for field in objects.Audit.fields: try: patch_val = getattr(audit, field) except AttributeError: # Ignore fields that aren't exposed in the API continue if patch_val == wtypes.Unset: patch_val = None if audit_to_update[field] != patch_val: audit_to_update[field] = patch_val audit_to_update.save() return Audit.convert_with_links(audit_to_update) @wsme_pecan.wsexpose(None, wtypes.text, status_code=204) def delete(self, audit): """Delete an audit. :param audit: UUID or name of an audit. """ context = pecan.request.context audit_to_delete = api_utils.get_resource( 'Audit', audit, eager=True) policy.enforce(context, 'audit:delete', audit_to_delete, action='audit:delete') initial_state = audit_to_delete.state new_state = objects.audit.State.DELETED if not objects.audit.AuditStateTransitionManager( ).check_transition(initial_state, new_state): raise exception.DeleteError( state=initial_state) audit_to_delete.soft_delete()
stackforge/watcher
watcher/api/controllers/v1/audit.py
Python
apache-2.0
27,964
[ "VisIt" ]
51896800536228bd0965f4e44f6ce880ae1a1514ee881bd92eaa35eda4d45fb6
# Mantid Repository : https://github.com/mantidproject/mantid # # Copyright &copy; 2018 ISIS Rutherford Appleton Laboratory UKRI, # NScD Oak Ridge National Laboratory, European Spallation Source # & Institut Laue - Langevin # SPDX - License - Identifier: GPL - 3.0 + #pylint: disable=no-init from __future__ import (absolute_import, division, print_function) import systemtesting from mantid.simpleapi import * #---------------------------------------------------------------------- class ConvertToMDworkflow(systemtesting.MantidSystemTest): """ """ tolerance = 1e-5 def runTest(self): # let's load test event workspace, which has been already preprocessed and available in Mantid Test folder WS_Name='CNCS_7860_event' Load(Filename=WS_Name,OutputWorkspace=WS_Name) # this workspace has been obtained from an inelastic experiment with input energy Ei = 3. # Usually this energy is stored in workspace # but if it is not, we have to provide it for inelastic conversion to work. AddSampleLog(Workspace=WS_Name,LogName='Ei',LogText='3.0',LogType='Number') # disable multithreaded splitting as BoxID-s are assigned in random manner # AddSampleLog(Workspace=WS_Name,LogName='NUM_THREADS',LogText='0',LogType='Number') # # set up target ws name and remove target workspace with the same name which can occasionally exist. RezWS = 'WS_4D' try: DeleteWorkspace(RezWS) except ValueError: print("Target ws ",RezWS," not found in analysis data service\n") # #---> Start loop over contributing files for i in range(0,20,5): # the following operations simulate different workspaces, obtained from experiment using rotating crystal; # For real experiment we usually just load these workspaces from nxspe files with proper Psi values defined there # and have to set up ub matrix SourceWS = 'SourcePart'+str(i) # ws emulation begin ----> CloneWorkspace(InputWorkspace=WS_Name,OutputWorkspace=SourceWS) # using scattering on a crystal with cubic lattice and 1,0,0 direction along the beam. SetUB(Workspace=SourceWS,a='1.4165',b='1.4165',c='1.4165',u='1,0,0',v='0,1,0') # rotated by proper number of degrees around axis Y AddSampleLog(Workspace=SourceWS,LogName='Psi',LogText=str(i)+'.0',LogType='Number Series') SetGoniometer(Workspace=SourceWS,Axis0='Psi,0,1,0,1') # ws emulation, end --------------------------------------------------------------------------------------- ConvertToMD(InputWorkspace=SourceWS,OutputWorkspace=RezWS,QDimensions='Q3D',QConversionScales='HKL', OverwriteExisting=0,dEAnalysisMode='Direct',MinValues='-3,-3,-3,-1',MaxValues='3,3,3,3', SplitInto="20,20,1,1") # delete source workspace from memory; DeleteWorkspace(SourceWS) def validate(self): """Returns the name of the workspace & file to compare""" self.tolerance = 1e-5 #self.disableChecking.append('SpectraMap') #self.disableChecking.append('Instrument') result = 'WS_4D' reference = "ConvertToMDSample.nxs" valNames = [result,reference] Load(Filename=reference,OutputWorkspace=valNames[1]) checker = AlgorithmManager.create("CompareMDWorkspaces") checker.setLogging(True) checker.setPropertyValue("Workspace1",result) checker.setPropertyValue("Workspace2",valNames[1]) checker.setPropertyValue("Tolerance", str(self.tolerance)) checker.setPropertyValue("IgnoreBoxID", "1") checker.setPropertyValue("CheckEvents", "1") checker.execute() if checker.getPropertyValue("Equals") != "1": print(" Workspaces do not match, result: ",checker.getPropertyValue("Result")) print(self.__class__.__name__) SaveMD(InputWorkspace=valNames[0],Filename=self.__class__.__name__+'-mismatch.nxs') return False return True
mganeva/mantid
Testing/SystemTests/tests/analysis/ConvertToMDworkflow.py
Python
gpl-3.0
4,182
[ "CRYSTAL" ]
7d98829dc3b12ae9b1d4e9d6870d45289303ad17f6c1bfa6ea407c9ae0141d00
# Contributed by Eric E Monson import vtk def main(): colors = vtk.vtkNamedColors() colors.SetColor('bkg', [0.2, 0.3, 0.7, 1.0]) # create a rendering window and renderer ren = vtk.vtkRenderer() renWin = vtk.vtkRenderWindow() renWin.AddRenderer(ren) # create a renderwindowinteractor iren = vtk.vtkRenderWindowInteractor() iren.SetRenderWindow(renWin) cube = vtk.vtkCubeSource() cube.SetXLength(200) cube.SetYLength(200) cube.SetZLength(200) cube.Update() cm = vtk.vtkPolyDataMapper() cm.SetInputConnection(cube.GetOutputPort()) ca = vtk.vtkActor() ca.SetMapper(cm) ca.GetProperty().SetColor(colors.GetColor3d("BurlyWood")) # assign actor to the renderer ren.AddActor(ca) ren.SetBackground(colors.GetColor3d('bkg')) axesActor = vtk.vtkAnnotatedCubeActor() axesActor.SetXPlusFaceText('R') axesActor.SetXMinusFaceText('L') axesActor.SetYMinusFaceText('H') axesActor.SetYPlusFaceText('F') axesActor.SetZMinusFaceText('P') axesActor.SetZPlusFaceText('A') axesActor.GetTextEdgesProperty().SetColor(colors.GetColor3d("Yellow")) axesActor.GetTextEdgesProperty().SetLineWidth(2) axesActor.GetCubeProperty().SetColor(colors.GetColor3d("Blue")) axes = vtk.vtkOrientationMarkerWidget() axes.SetOrientationMarker(axesActor) axes.SetInteractor(iren) axes.EnabledOn() axes.InteractiveOn() ren.ResetCamera() # enable user interface interactor iren.Initialize() renWin.Render() ren.GetActiveCamera().SetPosition(-151.5, 540.1, 364.0) ren.GetActiveCamera().SetViewUp(0.2, 0.6, -0.8) renWin.Render() iren.Start() if __name__ == '__main__': main()
lorensen/VTKExamples
src/Python/Widgets/OrientationMarkerWidget.py
Python
apache-2.0
1,724
[ "VTK" ]
e3206fa4245a7d4df278d5bab6faea8b87d006444ec90dec678e32781408ef9d
try: paraview.simple except: from paraview.simple import * def RequestDataDescription(datadescription): "Callback to populate the request for current timestep" if datadescription.GetForceOutput() == True: for i in range(datadescription.GetNumberOfInputDescriptions()): datadescription.GetInputDescription(i).AllFieldsOn() datadescription.GetInputDescription(i).GenerateMeshOn() return timestep = datadescription.GetTimeStep() input_name = 'input' if (timestep % 1 == 0) : datadescription.GetInputDescriptionByName(input_name).AllFieldsOn() datadescription.GetInputDescriptionByName(input_name).GenerateMeshOn() else: datadescription.GetInputDescriptionByName(input_name).AllFieldsOff() datadescription.GetInputDescriptionByName(input_name).GenerateMeshOff() def DoCoProcessing(datadescription): "Callback to do co-processing for current timestep" cp_writers = [] cp_views = [] timestep = datadescription.GetTimeStep() RenderView1 = CreateView( CreateRenderView, "np-2_%t.png", 1, 0, 1, cp_views ) RenderView1.LightSpecularColor = [1.0, 1.0, 1.0] RenderView1.KeyLightAzimuth = 10.0 RenderView1.UseTexturedBackground = 0 RenderView1.UseLight = 1 RenderView1.CameraPosition = [217.11071921506289, 369.10961277586267, 116.87368903495434] RenderView1.FillLightKFRatio = 3.0 RenderView1.Background2 = [0.0, 0.0, 0.16470588235294117] RenderView1.FillLightAzimuth = -10.0 RenderView1.LODResolution = 50.0 RenderView1.BackgroundTexture = [] RenderView1.InteractionMode = '3D' RenderView1.StencilCapable = 1 RenderView1.LightIntensity = 1.0 RenderView1.CameraFocalPoint = [23.825137927181057, 0.0099511400572356041, 33.274249608756051] RenderView1.ImageReductionFactor = 2 RenderView1.CameraViewAngle = 30.0 RenderView1.CameraParallelScale = 109.98522628062371 RenderView1.EyeAngle = 2.0 RenderView1.HeadLightKHRatio = 3.0 RenderView1.StereoRender = 0 RenderView1.KeyLightIntensity = 0.75 RenderView1.BackLightAzimuth = 110.0 RenderView1.OrientationAxesInteractivity = 0 RenderView1.UseInteractiveRenderingForSceenshots = 0 RenderView1.UseOffscreenRendering = 0 RenderView1.Background = [0.0, 0.0, 0.0] RenderView1.UseOffscreenRenderingForScreenshots = 0 RenderView1.NonInteractiveRenderDelay = 2 RenderView1.CenterOfRotation = [63.5, 63.5, 63.5] RenderView1.CameraParallelProjection = 0 RenderView1.CompressorConfig = 'vtkSquirtCompressor 0 3' RenderView1.HeadLightWarmth = 0.5 RenderView1.MaximumNumberOfPeels = 4 RenderView1.LightDiffuseColor = [1.0, 1.0, 1.0] RenderView1.StereoType = 'Red-Blue' RenderView1.DepthPeeling = 1 RenderView1.BackLightKBRatio = 3.5 RenderView1.StereoCapableWindow = 1 RenderView1.CameraViewUp = [-0.10216114394387266, -0.16855412123856731, 0.98038390892628069] RenderView1.LightType = 'HeadLight' RenderView1.LightAmbientColor = [1.0, 1.0, 1.0] RenderView1.RemoteRenderThreshold = 3.0 RenderView1.CacheKey = 0.0 RenderView1.UseCache = 0 RenderView1.KeyLightElevation = 50.0 RenderView1.CenterAxesVisibility = 0 RenderView1.MaintainLuminance = 0 RenderView1.StillRenderImageReductionFactor = 1 RenderView1.BackLightWarmth = 0.5 RenderView1.FillLightElevation = -75.0 RenderView1.MultiSamples = 0 RenderView1.FillLightWarmth = 0.40000000000000002 RenderView1.AlphaBitPlanes = 1 RenderView1.LightSwitch = 1 RenderView1.OrientationAxesVisibility = 1 RenderView1.CameraClippingRange = [150.26173427523366, 592.80491988083509] RenderView1.BackLightElevation = 0.0 RenderView1.ViewTime = 0.0 RenderView1.OrientationAxesOutlineColor = [1.0, 1.0, 1.0] RenderView1.LODThreshold = 18.199999999999999 RenderView1.CollectGeometryThreshold = 100.0 RenderView1.UseGradientBackground = 0 RenderView1.KeyLightWarmth = 0.59999999999999998 RenderView1.OrientationAxesLabelColor = [1.0, 1.0, 1.0] test2_0_0_vti = CreateProducer( datadescription, "input" ) Calculator1 = Calculator( guiName="Calculator1", Function='mag(realtempx*iHat+realtempy*jHat+realtempz*kHat)', ReplacementValue=0.0, ResultArrayName='Result', ReplaceInvalidResults=1, AttributeMode='point_data', CoordinateResults=0 ) a1_realtempx_PiecewiseFunction = CreatePiecewiseFunction( Points=[0.0, 0.0, 0.5, 0.0, 1.0, 1.0, 0.5, 0.0] ) a1_realtempy_PiecewiseFunction = CreatePiecewiseFunction( Points=[0.0, 0.0, 0.5, 0.0, 1.0, 1.0, 0.5, 0.0] ) a1_realtempz_PiecewiseFunction = CreatePiecewiseFunction( Points=[0.0, 0.0, 0.5, 0.0, 1.0, 1.0, 0.5, 0.0] ) a1_Result_PiecewiseFunction = CreatePiecewiseFunction( Points=[5.1558018820268853e-15, 0.58799999952316284, 0.5, 0.60829490423202515, 0.76975244283676147, 0.0, 0.5, 0.0, 0.86602500000000004, 0.0, 0.5, 0.0, 0.95262794416286289, 0.0, 0.5, 0.39072847366333008, 1.7320507764816284, 0.71999996900558472, 0.5, 0.0] ) a1_realtempx_PVLookupTable = GetLookupTableForArray( "realtempx", 1, Discretize=1, RGBPoints=[-1.5000000000000338, 0.23000000000000001, 0.29899999999999999, 0.754, 1.5000000000000338, 0.70599999999999996, 0.016, 0.14999999999999999], UseLogScale=0, VectorComponent=0, NanColor=[0.25, 0.0, 0.0], NumberOfTableValues=256, ColorSpace='Diverging', VectorMode='Magnitude', HSVWrap=0, ScalarRangeInitialized=1.0, LockScalarRange=0 ) a1_realtempy_PVLookupTable = GetLookupTableForArray( "realtempy", 1, Discretize=1, RGBPoints=[-1.5000000000000393, 0.23000000000000001, 0.29899999999999999, 0.754, 1.5000000000000409, 0.70599999999999996, 0.016, 0.14999999999999999], UseLogScale=0, VectorComponent=0, NanColor=[0.25, 0.0, 0.0], NumberOfTableValues=256, ColorSpace='Diverging', VectorMode='Magnitude', HSVWrap=0, ScalarRangeInitialized=1.0, LockScalarRange=0 ) a1_realtempz_PVLookupTable = GetLookupTableForArray( "realtempz", 1, Discretize=1, RGBPoints=[-1.7320508075688363, 0.23000000000000001, 0.29899999999999999, 0.754, 1.7320508075688372, 0.70599999999999996, 0.016, 0.14999999999999999], UseLogScale=0, VectorComponent=0, NanColor=[0.25, 0.0, 0.0], NumberOfTableValues=256, ColorSpace='Diverging', VectorMode='Magnitude', HSVWrap=0, ScalarRangeInitialized=1.0, LockScalarRange=0 ) a1_Result_PVLookupTable = GetLookupTableForArray( "Result", 1, Discretize=1, RGBPoints=[5.1558017812958882e-15, 0.0, 1.0, 1.0, 0.77942286340597955, 0.015686274509803921, 0.0, 0.92156862745098034, 0.86602540378442117, 0.22745098039215686, 0.0, 0.38823529411764707, 0.95262794416286289, 0.88627450980392153, 0.0, 0.058823529411764705, 1.7320508075688372, 1.0, 1.0, 0.0], UseLogScale=0, VectorComponent=0, NanColor=[1.0, 1.0, 0.0], NumberOfTableValues=256, ColorSpace='RGB', VectorMode='Magnitude', HSVWrap=0, ScalarRangeInitialized=1.0, LockScalarRange=0 ) SetActiveSource(test2_0_0_vti) DataRepresentation1 = Show() DataRepresentation1.CubeAxesZAxisVisibility = 1 DataRepresentation1.SelectionPointLabelColor = [0.5, 0.5, 0.5] DataRepresentation1.SelectionPointFieldDataArrayName = 'realtempx' DataRepresentation1.SuppressLOD = 0 DataRepresentation1.CubeAxesXGridLines = 0 DataRepresentation1.CubeAxesYAxisTickVisibility = 1 DataRepresentation1.CubeAxesColor = [1.0, 1.0, 1.0] DataRepresentation1.Position = [0.0, 0.0, 0.0] DataRepresentation1.BackfaceRepresentation = 'Follow Frontface' DataRepresentation1.SelectionOpacity = 1.0 DataRepresentation1.SelectionPointLabelShadow = 0 DataRepresentation1.CubeAxesYGridLines = 0 DataRepresentation1.CubeAxesZAxisRange = [0.0, 1.0] DataRepresentation1.OrientationMode = 'Direction' DataRepresentation1.Source.TipResolution = 6 DataRepresentation1.ScaleMode = 'No Data Scaling Off' DataRepresentation1.Diffuse = 1.0 DataRepresentation1.SelectionUseOutline = 0 DataRepresentation1.CubeAxesZTitle = 'Z-Axis' DataRepresentation1.Specular = 0.10000000000000001 DataRepresentation1.SelectionVisibility = 1 DataRepresentation1.InterpolateScalarsBeforeMapping = 1 DataRepresentation1.CubeAxesZAxisTickVisibility = 1 DataRepresentation1.Origin = [0.0, 0.0, 0.0] DataRepresentation1.CubeAxesVisibility = 0 DataRepresentation1.Scale = [1.0, 1.0, 1.0] DataRepresentation1.SelectionCellLabelJustification = 'Left' DataRepresentation1.DiffuseColor = [1.0, 1.0, 1.0] DataRepresentation1.Shade = 0 DataRepresentation1.SelectionCellLabelOpacity = 1.0 DataRepresentation1.CubeAxesInertia = 1 DataRepresentation1.Source = "Arrow" DataRepresentation1.Source.Invert = 0 DataRepresentation1.Masking = 0 DataRepresentation1.Opacity = 1.0 DataRepresentation1.LineWidth = 1.0 DataRepresentation1.MeshVisibility = 0 DataRepresentation1.Visibility = 0 DataRepresentation1.SelectionCellLabelFontSize = 18 DataRepresentation1.CubeAxesCornerOffset = 0.0 DataRepresentation1.SelectionPointLabelJustification = 'Left' DataRepresentation1.SelectionPointLabelVisibility = 0 DataRepresentation1.SelectOrientationVectors = '' DataRepresentation1.CubeAxesTickLocation = 'Inside' DataRepresentation1.BackfaceDiffuseColor = [1.0, 1.0, 1.0] DataRepresentation1.CubeAxesYAxisVisibility = 1 DataRepresentation1.SelectionPointLabelFontFamily = 'Arial' DataRepresentation1.Source.ShaftResolution = 6 DataRepresentation1.CubeAxesFlyMode = 'Closest Triad' DataRepresentation1.SelectScaleArray = '' DataRepresentation1.CubeAxesYTitle = 'Y-Axis' DataRepresentation1.ColorAttributeType = 'POINT_DATA' DataRepresentation1.SpecularPower = 100.0 DataRepresentation1.Texture = [] DataRepresentation1.SelectionCellLabelShadow = 0 DataRepresentation1.AmbientColor = [1.0, 1.0, 1.0] DataRepresentation1.MapScalars = 1 DataRepresentation1.PointSize = 2.0 DataRepresentation1.Source.TipLength = 0.34999999999999998 DataRepresentation1.SelectionCellLabelFormat = '' DataRepresentation1.Scaling = 0 DataRepresentation1.StaticMode = 0 DataRepresentation1.SelectionCellLabelColor = [0.0, 1.0, 0.0] DataRepresentation1.SliceMode = 'XY Plane' DataRepresentation1.Source.TipRadius = 0.10000000000000001 DataRepresentation1.EdgeColor = [0.0, 0.0, 0.50000762951094835] DataRepresentation1.CubeAxesXAxisTickVisibility = 1 DataRepresentation1.SelectionCellLabelVisibility = 0 DataRepresentation1.NonlinearSubdivisionLevel = 1 DataRepresentation1.CubeAxesXAxisRange = [0.0, 1.0] DataRepresentation1.Representation = 'Surface' DataRepresentation1.CubeAxesYAxisRange = [0.0, 1.0] DataRepresentation1.CustomBounds = [0.0, 1.0, 0.0, 1.0, 0.0, 1.0] DataRepresentation1.Orientation = [0.0, 0.0, 0.0] DataRepresentation1.CubeAxesEnableCustomAxisRange = 0 DataRepresentation1.CubeAxesXTitle = 'X-Axis' DataRepresentation1.ScalarOpacityUnitDistance = 1.7320508075688776 DataRepresentation1.BackfaceOpacity = 1.0 DataRepresentation1.SelectionPointLabelFontSize = 18 DataRepresentation1.SelectionCellFieldDataArrayName = 'vtkOriginalCellIds' DataRepresentation1.SelectionColor = [1.0, 0.0, 1.0] DataRepresentation1.Ambient = 0.0 DataRepresentation1.VolumeRenderingMode = 'Smart' DataRepresentation1.CubeAxesXAxisMinorTickVisibility = 1 DataRepresentation1.ScaleFactor = 12.700000000000001 DataRepresentation1.BackfaceAmbientColor = [1.0, 1.0, 1.0] DataRepresentation1.Slice = 0 DataRepresentation1.Source.ShaftRadius = 0.029999999999999999 DataRepresentation1.ScalarOpacityFunction = a1_realtempz_PiecewiseFunction DataRepresentation1.SelectMaskArray = '' DataRepresentation1.SelectionLineWidth = 2.0 DataRepresentation1.CubeAxesZAxisMinorTickVisibility = 1 DataRepresentation1.CubeAxesXAxisVisibility = 1 DataRepresentation1.Interpolation = 'Gouraud' DataRepresentation1.SelectionCellLabelFontFamily = 'Arial' DataRepresentation1.SelectionCellLabelItalic = 0 DataRepresentation1.CubeAxesYAxisMinorTickVisibility = 1 DataRepresentation1.InterpolationType = 'Linear' DataRepresentation1.CubeAxesZGridLines = 0 DataRepresentation1.SelectionPointLabelFormat = '' DataRepresentation1.SelectionPointLabelOpacity = 1.0 DataRepresentation1.Pickable = 1 DataRepresentation1.CustomBoundsActive = [0, 0, 0] DataRepresentation1.SelectionRepresentation = 'Wireframe' DataRepresentation1.SelectionPointLabelBold = 0 DataRepresentation1.ColorArrayName = 'realtempz' DataRepresentation1.SelectionPointLabelItalic = 0 DataRepresentation1.AllowSpecularHighlightingWithScalarColoring = 0 DataRepresentation1.SpecularColor = [1.0, 1.0, 1.0] DataRepresentation1.LookupTable = a1_realtempz_PVLookupTable DataRepresentation1.SelectionPointSize = 5.0 DataRepresentation1.SelectionCellLabelBold = 0 DataRepresentation1.Orient = 0 SetActiveSource(Calculator1) DataRepresentation2 = Show() DataRepresentation2.CubeAxesZAxisVisibility = 1 DataRepresentation2.SelectionPointLabelColor = [0.5, 0.5, 0.5] DataRepresentation2.SelectionPointFieldDataArrayName = 'Result' DataRepresentation2.SuppressLOD = 0 DataRepresentation2.CubeAxesXGridLines = 0 DataRepresentation2.CubeAxesYAxisTickVisibility = 1 DataRepresentation2.CubeAxesColor = [1.0, 1.0, 1.0] DataRepresentation2.Position = [0.0, 0.0, 0.0] DataRepresentation2.BackfaceRepresentation = 'Follow Frontface' DataRepresentation2.SelectionOpacity = 1.0 DataRepresentation2.SelectionPointLabelShadow = 0 DataRepresentation2.CubeAxesYGridLines = 0 DataRepresentation2.CubeAxesZAxisRange = [0.0, 1.0] DataRepresentation2.OrientationMode = 'Direction' DataRepresentation2.Source.TipResolution = 6 DataRepresentation2.ScaleMode = 'No Data Scaling Off' DataRepresentation2.Diffuse = 1.0 DataRepresentation2.SelectionUseOutline = 0 DataRepresentation2.CubeAxesZTitle = 'Z-Axis' DataRepresentation2.Specular = 0.10000000000000001 DataRepresentation2.SelectionVisibility = 1 DataRepresentation2.InterpolateScalarsBeforeMapping = 1 DataRepresentation2.CubeAxesZAxisTickVisibility = 1 DataRepresentation2.Origin = [0.0, 0.0, 0.0] DataRepresentation2.CubeAxesVisibility = 0 DataRepresentation2.Scale = [1.0, 1.0, 1.0] DataRepresentation2.SelectionCellLabelJustification = 'Left' DataRepresentation2.DiffuseColor = [1.0, 1.0, 1.0] DataRepresentation2.Shade = 0 DataRepresentation2.SelectionCellLabelOpacity = 1.0 DataRepresentation2.CubeAxesInertia = 1 DataRepresentation2.Source = "Arrow" DataRepresentation2.Source.Invert = 0 DataRepresentation2.Masking = 0 DataRepresentation2.Opacity = 1.0 DataRepresentation2.LineWidth = 1.0 DataRepresentation2.MeshVisibility = 0 DataRepresentation2.Visibility = 1 DataRepresentation2.SelectionCellLabelFontSize = 18 DataRepresentation2.CubeAxesCornerOffset = 0.0 DataRepresentation2.SelectionPointLabelJustification = 'Left' DataRepresentation2.SelectionPointLabelVisibility = 0 DataRepresentation2.SelectOrientationVectors = '' DataRepresentation2.CubeAxesTickLocation = 'Inside' DataRepresentation2.BackfaceDiffuseColor = [1.0, 1.0, 1.0] DataRepresentation2.CubeAxesYAxisVisibility = 1 DataRepresentation2.SelectionPointLabelFontFamily = 'Arial' DataRepresentation2.Source.ShaftResolution = 6 DataRepresentation2.CubeAxesFlyMode = 'Closest Triad' DataRepresentation2.SelectScaleArray = '' DataRepresentation2.CubeAxesYTitle = 'Y-Axis' DataRepresentation2.ColorAttributeType = 'POINT_DATA' DataRepresentation2.SpecularPower = 100.0 DataRepresentation2.Texture = [] DataRepresentation2.SelectionCellLabelShadow = 0 DataRepresentation2.AmbientColor = [1.0, 1.0, 1.0] DataRepresentation2.MapScalars = 1 DataRepresentation2.PointSize = 2.0 DataRepresentation2.Source.TipLength = 0.34999999999999998 DataRepresentation2.SelectionCellLabelFormat = '' DataRepresentation2.Scaling = 0 DataRepresentation2.StaticMode = 0 DataRepresentation2.SelectionCellLabelColor = [0.0, 1.0, 0.0] DataRepresentation2.SliceMode = 'XY Plane' DataRepresentation2.Source.TipRadius = 0.10000000000000001 DataRepresentation2.EdgeColor = [0.0, 0.0, 0.50000762951094835] DataRepresentation2.CubeAxesXAxisTickVisibility = 1 DataRepresentation2.SelectionCellLabelVisibility = 0 DataRepresentation2.NonlinearSubdivisionLevel = 1 DataRepresentation2.CubeAxesXAxisRange = [0.0, 1.0] DataRepresentation2.Representation = 'Volume' DataRepresentation2.CubeAxesYAxisRange = [0.0, 1.0] DataRepresentation2.CustomBounds = [0.0, 1.0, 0.0, 1.0, 0.0, 1.0] DataRepresentation2.Orientation = [0.0, 0.0, 0.0] DataRepresentation2.CubeAxesEnableCustomAxisRange = 0 DataRepresentation2.CubeAxesXTitle = 'X-Axis' DataRepresentation2.ScalarOpacityUnitDistance = 1.7320508075688776 DataRepresentation2.BackfaceOpacity = 1.0 DataRepresentation2.SelectionPointLabelFontSize = 18 DataRepresentation2.SelectionCellFieldDataArrayName = 'vtkOriginalCellIds' DataRepresentation2.SelectionColor = [1.0, 0.0, 1.0] DataRepresentation2.Ambient = 0.0 DataRepresentation2.VolumeRenderingMode = 'Smart' DataRepresentation2.CubeAxesXAxisMinorTickVisibility = 1 DataRepresentation2.ScaleFactor = 12.700000000000001 DataRepresentation2.BackfaceAmbientColor = [1.0, 1.0, 1.0] DataRepresentation2.Slice = 0 DataRepresentation2.Source.ShaftRadius = 0.029999999999999999 DataRepresentation2.ScalarOpacityFunction = a1_Result_PiecewiseFunction DataRepresentation2.SelectMaskArray = '' DataRepresentation2.SelectionLineWidth = 2.0 DataRepresentation2.CubeAxesZAxisMinorTickVisibility = 1 DataRepresentation2.CubeAxesXAxisVisibility = 1 DataRepresentation2.Interpolation = 'Gouraud' DataRepresentation2.SelectionCellLabelFontFamily = 'Arial' DataRepresentation2.SelectionCellLabelItalic = 0 DataRepresentation2.CubeAxesYAxisMinorTickVisibility = 1 DataRepresentation2.InterpolationType = 'Linear' DataRepresentation2.CubeAxesZGridLines = 0 DataRepresentation2.SelectionPointLabelFormat = '' DataRepresentation2.SelectionPointLabelOpacity = 1.0 DataRepresentation2.Pickable = 1 DataRepresentation2.CustomBoundsActive = [0, 0, 0] DataRepresentation2.SelectionRepresentation = 'Wireframe' DataRepresentation2.SelectionPointLabelBold = 0 DataRepresentation2.ColorArrayName = 'Result' DataRepresentation2.SelectionPointLabelItalic = 0 DataRepresentation2.AllowSpecularHighlightingWithScalarColoring = 0 DataRepresentation2.SpecularColor = [1.0, 1.0, 1.0] DataRepresentation2.LookupTable = a1_Result_PVLookupTable DataRepresentation2.SelectionPointSize = 5.0 DataRepresentation2.SelectionCellLabelBold = 0 DataRepresentation2.Orient = 0 for writer in cp_writers: if timestep % writer.cpFrequency == 0 or datadescription.GetForceOutput() == True: writer.FileName = writer.cpFileName.replace("%t", str(timestep)) writer.UpdatePipeline() if False : # rescale data range import math for view in cp_views: if timestep % view.cpFrequency == 0 or datadescription.GetForceOutput() == True: reps = view.Representations for rep in reps: if hasattr(rep, 'Visibility') and rep.Visibility == 1 and hasattr(rep, 'MapScalars') and rep.MapScalars != '': input = rep.Input input.UpdatePipeline() #make sure range is up-to-date lut = rep.LookupTable if lut == None: continue if rep.ColorAttributeType == 'POINT_DATA': datainformation = input.GetPointDataInformation() elif rep.ColorAttributeType == 'CELL_DATA': datainformation = input.GetCellDataInformation() else: print 'something strange with color attribute type', rep.ColorAttributeType if lut.VectorMode != 'Magnitude' or datainformation.GetArray(rep.ColorArrayName).GetNumberOfComponents() == 1: datarange = datainformation.GetArray(rep.ColorArrayName).GetRange(lut.VectorComponent) else: datarange = [0,0] for i in range(datainformation.GetArray(rep.ColorArrayName).GetNumberOfComponents()): for j in range(2): datarange[j] += datainformation.GetArray(rep.ColorArrayName).GetRange(i)[j]*datainformation.GetArray(rep.ColorArrayName).GetRange(i)[j] datarange[0] = math.sqrt(datarange[0]) datarange[1] = math.sqrt(datarange[1]) rgbpoints = lut.RGBPoints.GetData() numpts = len(rgbpoints)/4 minvalue = min(datarange[0], rgbpoints[0]) maxvalue = max(datarange[1], rgbpoints[(numpts-1)*4]) if minvalue != rgbpoints[0] or maxvalue != rgbpoints[(numpts-1)*4]: # rescale all of the points oldrange = rgbpoints[(numpts-1)*4] - rgbpoints[0] newrange = maxvalue - minvalue newrgbpoints = list(rgbpoints) for v in range(numpts): newrgbpoints[v*4] = minvalue+(rgbpoints[v*4] - rgbpoints[0])*newrange/oldrange lut.RGBPoints.SetData(newrgbpoints) for view in cp_views: if timestep % view.cpFrequency == 0 or datadescription.GetForceOutput() == True: fname = view.cpFileName fname = fname.replace("%t", str(timestep)) if view.cpFitToScreen != 0: if view.IsA("vtkSMRenderViewProxy") == True: view.ResetCamera() elif view.IsA("vtkSMContextViewProxy") == True: view.ResetDisplay() else: print ' do not know what to do with a ', view.GetClassName() WriteImage(fname, view, Magnification=view.cpMagnification) # explicitly delete the proxies -- we do it this way to avoid problems with prototypes tobedeleted = GetNextProxyToDelete() while tobedeleted != None: Delete(tobedeleted) tobedeleted = GetNextProxyToDelete() def GetNextProxyToDelete(): proxyiterator = servermanager.ProxyIterator() for proxy in proxyiterator: group = proxyiterator.GetGroup() if group.find("prototypes") != -1: continue if group != 'timekeeper' and group.find("pq_helper_proxies") == -1 : return proxy return None def CreateProducer(datadescription, gridname): "Creates a producer proxy for the grid" if not datadescription.GetInputDescriptionByName(gridname): raise RuntimeError, "Simulation input name '%s' does not exist" % gridname grid = datadescription.GetInputDescriptionByName(gridname).GetGrid() producer = PVTrivialProducer() producer.GetClientSideObject().SetOutput(grid) if grid.IsA("vtkImageData") == True or grid.IsA("vtkStructuredGrid") == True or grid.IsA("vtkRectilinearGrid") == True: extent = datadescription.GetInputDescriptionByName(gridname).GetWholeExtent() producer.WholeExtent= [ extent[0], extent[1], extent[2], extent[3], extent[4], extent[5] ] producer.UpdatePipeline() return producer def CreateWriter(proxy_ctor, filename, freq, cp_writers): writer = proxy_ctor() writer.FileName = filename writer.add_attribute("cpFrequency", freq) writer.add_attribute("cpFileName", filename) cp_writers.append(writer) return writer def CreateView(proxy_ctor, filename, freq, fittoscreen, magnification, cp_views): view = proxy_ctor() view.add_attribute("cpFileName", filename) view.add_attribute("cpFrequency", freq) view.add_attribute("cpFileName", filename) view.add_attribute("cpFitToScreen", fittoscreen) view.add_attribute("cpMagnification", magnification) cp_views.append(view) return view
openmichigan/PSNM
NavierStokes/Programs/NavierStokes3dFortranMPIParaView/node-based/pipeline_images.py
Python
bsd-2-clause
24,636
[ "ParaView" ]
f579e542030ed17153c4d277f21e5ea3644a1e41ad69e88c84566b2828e8fbb0
#By /u/TachyonNZ # Van Doorn by /u/net_goblin #TEXT-COM, V0.1 import random as rd import time as tm ######################################################################## # constants # ######################################################################## SEX_FEMALE = 'f' SEX_MALE = 'm' XCOM_MALE_FIRSTNAME = [ 'Bob', 'Grant', 'Dylan', 'Fletcher', 'Daniel', 'Kav', 'Jackson', 'Alex', 'Tim', 'Peter', 'Jeb', 'Bill', 'Rune', 'Jeff', 'Lee', 'Iago', 'Dan', 'John', 'Isaac', 'Pedro', 'Juan', 'Rico', 'David', 'Andrew', 'Wilson', 'James', 'Richard', 'Rocky', 'Adam', 'Bear', 'Paul', 'Guy', 'Sid', 'Murray' ] XCOM_FEMALE_FIRSTNAME = [ 'Becks', 'Kate', 'Annetta', 'Violet', 'Kim', 'Iko', 'Megan', 'Shelly', 'Kim', 'Nina', 'Olga', 'Katherina', 'Anya', 'Suzie', 'Rebecca', 'Joanna', 'Patricia', 'Maria', 'Judith', 'Carmen', 'Isabel', 'Ana', 'Laura', 'Sara', 'Emma', 'Rachael', 'Ingrid', 'Nicole', 'Chelsea', 'Chell' ] XCOM_LASTNAME = [ 'Meier', 'Durant', 'Lee', 'Kerman', 'Nilsen', 'Fox', 'Vern Dern', 'Beagle', 'Green', 'Wolf', 'Grills', 'Red', 'Taa', 'Tank', 'Beardly', 'Sherman', 'Herman', 'Nerman', 'Nuton', 'Peterson', 'Clarke', 'French', 'Clark', 'Hayes', 'Munroe', ] # <http://www.ufopaedia.org/index.php?title=Nicknames_%28EU2012%29> XCOM_UNISEX_NICKNAMES_ASSAULT = [ 'All Day', 'Android', 'Blitz', 'Bonzai', 'Boomer', 'Caper', 'Chops', 'Cobra', 'Coney', 'Close Range', 'D.O.A.', 'DJ', 'Desperado', 'Devil Dog', 'Dice', 'Double Down', 'Geronimo', 'Gonzo', 'Gunner', 'Hardcore', 'Hazard', 'Loco', 'Mad Dog', 'Mustang', 'Pitbull', 'Psycho', 'Rabid', 'Rhino', 'Red Fox', 'Septic', 'Sheriff', 'Shotsy', 'Smash', 'Socks', 'Spitfire', 'Tombstone', 'Trips', 'Twitch', 'Vandal', 'Wardog', 'Werewolf', 'Wildchild', 'Wolverine', 'Zilch', 'Zap' ] XCOM_MALE_NICKNAMES_ASSAULT = [ 'Bull', 'Cash', 'Cowboy', 'Duke', 'Mad Man', 'Nitro', 'Rascal', 'Spike', 'Viking' ] XCOM_FEMALE_NICKNAMES_ASSAULT = [ 'All In', 'Freestyle', 'Wednesday', ] XCOM_UNISEX_NICKNAMES_HEAVY = [ '98', 'Arcade', 'Boom Boom', 'Brick', 'Casino', 'Collateral', 'Crash', 'Crater', 'Diesel', 'Disco', 'Doomsday', 'Dozer', 'Flash', 'Hulk', 'Leaded', 'Lights Out', 'Nova', 'Nuke', 'Painbringer', 'Prototype', 'Richter', 'Road Block', 'Seismic', 'Sledge', 'Smokey', 'Strobe', 'Terra', 'Tectonic', 'Thunder' ] XCOM_MALE_NICKNAMES_HEAVY = [ 'Buster', 'Kingpin', 'Kong', 'Mack', 'Moose', 'Nero', 'Odin', 'Papa Bear', 'Tank', 'Yeti' ] XCOM_FEMALE_NICKNAMES_HEAVY = [ 'Big Momma', 'Mama Bear' ] XCOM_UNISEX_NICKNAMES_SNIPER = [ 'Alpha', 'Checkmate', 'Claymore', 'Cyclops', 'Deadbolt', 'Demon', 'Drifter', 'Echo', 'Emo', 'Enigma', 'Garrote', 'Ghost', 'Hex', 'Ice', 'Lockdown', 'Long Shot', 'Longbow', 'Low Rider', 'Nightmare', 'Nix', 'Omega', 'Shadow', 'Snapsight' 'Snake Eyes', 'Solo', 'Specter', 'Spider', 'Stalker', 'Vampire', 'Xeno', 'Zero', 'Zulu' ] XCOM_MALE_NICKNAMES_SNIPER = [ 'Godfather', 'Loki', 'Pharaoh', 'Ranger', 'Slim', 'Walker', 'Warlock', 'Zed', 'Zeus' ] XCOM_FEMALE_NICKNAMES_SNIPER = [ 'Athena', 'Baroness', 'Black Widow', 'Lady Grey', 'Raven', 'Witchy' ] XCOM_UNISEX_NICKNAMES_SUPPORT = [ 'Angel', 'Axle', 'Bonus', 'Cargo', 'Carrier', 'Combo', 'Congo', 'Doc', 'Fast Lane', 'Missionary', 'Pox', 'Prophet', 'Rogue', 'Saturn', 'Scarecrow', 'Scotch', 'Sentinel', 'Shield', 'Skinner', 'Smokes', 'Stacks', 'Strings', 'Vita', 'Voodoo', 'Whiskey' ] XCOM_MALE_NICKNAMES_SUPPORT = [ 'Ace', 'Atlas', 'Bishop', 'Deacon', 'Freud', 'Hitch', 'Magic Man', 'Mr. Clean', 'Padre', 'Pops', 'Romeo', 'Santa' ] XCOM_FEMALE_NICKNAMES_SUPPORT = [ 'Cookie', 'Gypsy', 'Kitty', 'Pixie', 'Vixen' ] XCOM_MALE_NICKNAMES_MEC = [ 'Big Daddy', 'Bolts', 'Caliban', 'Chip', 'Clank', 'Data', 'Deep Teal', 'Forklift', 'Golem', 'Marvin', 'Murphy', 'Olivaw', 'Ratchet', 'Robby', 'Ryle', 'Stick', 'Sputnik', 'Talos', 'Tik-Tok', 'Tin Can', 'Vulcan' ] XCOM_FEMALE_NICKNAMES_MEC = [ 'Beeps', 'Big Mommy', 'Freya', 'Friday', 'Gadget', 'Gizmo', 'Hadaly', 'Iris', 'Maya', 'Molly', 'Number Six', 'Orianna', 'Rosie', 'Vanessa', 'Vesta', ] BRADFORD_RETORTS = [ 'CLOSE RANGE?!', 'WHAT HAVE YOU DONE?!', 'COMMANDER!', "WE'RE PICKING UP MULTIPLE CONTACTS!", 'CURRENT ENEMY STATUS AT THE SITE IS UNKNOWN!' ] VAN_DOORN_RETORTS = [ "I'm the Ops team!", "Only fair if I have all the fun.", "Get down there!", "Come on! I won't go down without a fight.", # "Thank God you're here. I'm still breathing, but I can't say the same for a lot of my boys. Let's get out of here before any more of those things show up.", "I don't know what outfit you're from, but I haven't seen gear like that before.", "Come on!", "I won't go down without a fight!", "It's looking bad...for you!", "I owe you one." ] #Soldier names VAN_DOORN = "Van Doorn" sectoidfName = ["Glip","Gleep","Glup","Glorp","Gloop","Glop","Glump","Glerp","Glurp","Glarp"] sectoidlName = ["Glop","Glarp","Glupple","Glorple","Gloopley","Glopperson","Glep","Glommery"] thinfName = ["T.","P.","H.","Z.","K.","A.","F.","X.","P.","L.","W.","S.","V."] thinlName = ["Hinman","Alium","Van Doom","Lmao","Notanalien","Anderson","Smith","Human","Clark","Warzonager","Iper","Thinmint","Mint","Spear","Infiltrator"] floaterfName = ["Dirk","Ferdinand","Frederick","Algernon","Angus","King","Cornelius","Francis","Christopher","Gustav","Richard","Ivan","Yuri","Vlad"] floaterlName = ["Meyer","Mleadeer","Peters","Prince","Vos","Wolf","Schwarz","Frank","Miller","Anderssen","Slavolav","Stroganov","Costarov"] #Aliem names mutonfName = ["Pooter","Dave","Holk","Billy","Tim","Jeffery","Leeroy","Jimmy","Hank"] mutonlName = ["Von Mooter","The Muton","Hugan","Jankins","Jefferson","Higgins","Jenkins"] RANK_ROOKIE = 0 RANK_SQUADDIE = 1 RANK_CORPORAL = 2 RANK_SERGEANT = 3 RANK_LIEUTENANT = 4 RANK_CAPTAIN = 5 RANK_MAJOR = 6 RANK_COLONEL = 7 RANK_GENERAL = 8 # only for Van Doorn RANK_CENTRAL_OFFICER = 9 # only for Bradford XCOM_RANKS = [ 'Rookie', 'Squaddie', 'Corporal', 'Sergeant', 'Lieutenant', 'Captain', 'Major', 'Colonel', # Special ranks 'General', 'Central Officer' ] ALIEN_RANKS = [ "Peon", "Guard", "Soldier", "Trooper", "Warrior", "Officer", "Commander", "Elite", "Uber" ] #when a soldier shoots at an alien retort = ("Suck on this!","Eat this!","Pick on someone your own size!","Take this!","Welcome to Earth!","AAGGHH!!!","HYAAA!") #human weapons + items drops = {0:"Frag Grenade",1:"Nano-Serum",2:"Alien Grenade",3:"Light Plasma Rifle",4:"Plasma Rifle"} #aliem weapons, items and powers apowers = {0: "Mindfray",1: "Psi Boost"} ######### # Cover # ######### COVER_FLANKED = -20 COVER_NONE = 0 COVER_FULL = 40 COVER_HALF = 20 ####### # Map # ####### NUMBER_OF_ROOMS = 31 ######################################################################## # Legacy stuff to be removed # ######################################################################## # to be removed pod = [] room = [[]] roomNo = -0 def p(spk,q): #print with speaker and possibly delay if spk != 0: print(str(spk)+': "'+str(q)+'"') else: print(q) #s(len(q)/50) #if uncommented, this will add delay to all instances of 'print' from this def def s(t): tm.sleep(t) #go to sleep for t seconds ######################################################################## # weapon classes # ######################################################################## class Weapon: '''Base class for all weapon classes''' def __init__(self, name, damage, clip_size): self.name = name self.damage = damage self.clip_size = clip_size self.ammo = clip_size def get_sound(self): '''Sound interface method''' pass def reload(self): self.ammo = self.clip_size s(0.5) def shoot(self): s(0.5) print(self.get_sound()) s(0.5) if (self.ammo == 0): print('Out of ammo') return 0 self.ammo -= 1 return self.damage + rd.randrange(-1, 2) class AlienWeapon(Weapon): '''Base class for alien weapons''' def __init__(self, name, damage, clip_size, elerium, fragments): super().__init__(name, damage, clip_size) self.elerium = elerium self.fragments = fragments def get_materials(self): '''Returns the amount of elerium and weapon fragments retrieved from this weapon''' return self.elerium, self.fragments class BallisticPistol(Weapon): def __init__(self): super().__init__('Ballistic Pistol', 2, 10) def get_sound(self): return '*Dak*' class Autopistol(Weapon): def __init__(self): super().__init__('Autopistol', 2, 10) def get_sound(self): return '*Dakdakdak*' class PlasmaPistol(AlienWeapon): def __init__(self): super().__init__('Plasma Pistol', 2, 10,1,1) def get_sound(self): return '*Whap*' class AlloyPistol(Weapon): def __init__(self): super().__init__('Alloy Pistol', 4, 10) def get_sound(self): return '*Kchak!*' class BallisticCarbine(Weapon): def __init__(self): super().__init__('Ballistic Carbine', 2, 3) def get_sound(self): return '*Dakkadakkadakka*' class BallisticRifle(Weapon): def __init__(self): super().__init__('Ballistic Rifle', 3, 4) def get_sound(self): return '*Dakkadakkadakka*' class LaserCarbine(Weapon): def __init__(self): super().__init__('Beam Carbine', 3, 999) def get_sound(self): return '*Zzzaaaaaap!*' class LaserRifle(Weapon): def __init__(self): super().__init__('Beam Rifle', 4, 999) def get_sound(self): return '*Zzzaaaaaap!*' class PlasmaCarbine(AlienWeapon): def __init__(self): super().__init__('Light Plasma Rifle', 4, 4, 1, 2) def get_sound(self): return '*Whap-whap-whap*' class PlasmaRifle(AlienWeapon): def __init__(self): super().__init__('Plasma Rifle', 6, 5, 2, 4) def get_sound(self): return '*Whap-whap-whap*' class BradfordsPistol(Weapon): def __init__(self): super().__init__("Bradford's Pistol", 5, 999) def get_sound(self): return '*Dak*' ######################################################################## # item classes # ######################################################################## class Item: '''Base class for items Items with an `use_ap_costs` of 0 are passive items. ''' def __init__(self, name, use_ap_costs, effect_descr, key = ""): self.name = name self.use_ap_costs = use_ap_costs self.effect_descr = effect_descr self.action_key = key def use(self, soldier): '''Interface method for active items''' pass class Explosive(Item): def __init__(self, name, use_ap_costs, damage, sound_descr, key): super().__init__(name, use_ap_costs, '{} dmg'.format(damage), key) self.damage = damage self.sound_descr = sound_descr def use(self, soldier): global alloy global elerium global fragments global meld global roomNo p(0, self.sound_descr) #the grenade only affects some of the aliens in the room, but is guaranteed to hit at least 1 #it's not a bug, it's a feature affected = room[roomNo] for i in range(len(affected) + 1): try: alien = affected[i] alien.hp -= self.damage alien.cover = COVER_NONE fragments += getLoot(alien)[0] elerium += getLoot(alien)[1] meld += getLoot(alien)[2] alloy += getLoot(alien)[3] alien.check_death() except (IndexError): i = 0 #reset the loop class Medkit(Item): def __init__(self): super().__init__('Nano-Serum', 10, '+4 HP', 's') def use(self, soldier): print("HP restored.") soldier.hp += 4 # XCOM items ITEM_SCOPE = Item('Scope', 0, 'Increase aim') ITEM_FRAG_GRENADE = Explosive('Frag Grenade', 10, 2, 'BAM!', 'g') ITEM_ALLOY_PLATING = Item('Alloy Plating', 0, 'Increase defense') ITEM_MEDKIT = Medkit() # Alien items # Alien grenade is also available to XCOM ITEM_ALIEN_GRENADE = Explosive('Alien Grenade', 15, 4, '**BLAM**!', 'G') ######################################################################## # unit classes # ######################################################################## class Unit: def __init__(self, hp, aim, mobility, nrank, firstname, lastname, armour, weapon, items, mods): self.hp = hp self.aim = aim self.mobility = mobility self.nrank = nrank self.firstname = firstname self.lastname = lastname self.armour = armour self.weapon = weapon self.items = items self.mods = mods self.cover = 0 self.on_overwatch = False self.alive = True def _handle_overwatch(self, target): ''' Generic overwatch handler which shoots at the target ''' chance = self.aim_at(target) return self.shoot_at(target, chance,10) def aim_at(self, target): hit_chance = self.aim - target.cover if ITEM_SCOPE in self.items: hit_chance += 10 # Carbines get an aim bonus if type(self.weapon) is BallisticCarbine \ or type(soldier.weapon) is LaserCarbine \ or type(self.weapon) is PlasmaCarbine: hit_chance += 10 if hit_chance < 0: hit_chance = 5 if hit_chance > 100: hit_chance = 95 return hit_chance def check_death(self): ''' Check for unit death and call death handler, if unit is dead. If the unit is not dead, `False` is returned and nothing happens, else the `_handle_death` method implemented by the subclass is called. ''' if self.hp <= 0: self._handle_death() return True return False def overwatch(self, target): ''' Perform overwatch reaction if unit is on overwatch. If the unit is on overwatch, the `_handle_overwatch` method is called (overwrite this to customize the overwatch handling) and `True` is returned, else `False` is returned and nothing happens. ''' if self.on_overwatch: self.on_overwatch = False p(0, str(self) + ' reacts!') self._handle_overwatch(target) return True return False def reload(self): self.weapon.reload() def shoot_at(self, target, chance, situation_modificator=0): ''' Perform an attack at the target Returns `True` if the target was hit, `False` otherwise. If the target was hit, hit points are discounted and the death check is performed. ''' damage = self.weapon.shoot() if rd.randrange(0, 100) < chance: p(0, str(damage) + ' damage!') target.hp -= damage target.check_death() return True else: p(0, ' Missed!') return False class Soldier(Unit): def __init__(self, sid, sex, hp, aim, mobility, rank, firstname, lastname, armour, weapon, items, mods): super().__init__(hp, aim, mobility, rank, firstname, lastname, armour, weapon, items, mods) self.ap = 0 self.sid = sid self.sex = sex self.xp = 0 self.aimpenalty = 0 self.nickname = None self.mods = [] self.hunkerbonus = 0 def __str__(self): middle = ' ' if self.nickname: middle = " '" + self.nickname + "' " return XCOM_RANKS[self.nrank] + middle + self.lastname def _handle_death(self): soldier.alive = False p(0, str(soldier) + ' was killed!') if not soldier.lastname == "Bradford": p("Bradford", "Commander, our unit was killed.") p("Bradford", "We were able to recover some materials, however.") print("Fragments:", fragments) print("Elerium:", elerium) print("Meld:", meld) print("Alloy:", alloy) print('Total Score: ' + str(fragments + elerium + meld + alloy \ + soldier.xp + roomNo)) else: p("Council Member", "Commander...you 'volunteered' your Central Officer to fight on the front lines.") p("Council Member","This was a foolish endeavour, and as a result, you lost him.") print('Monthly Rating: F') p("Council Member", "We have negotiated...a deal with the aliens, and so...your services are no longer required.") p("Council Member", "We are...terminating the XCOM Project, effective...immediately.") quit def _handle_overwatch(self, target): if super()._handle_overwatch(target) == False: p(spk, self.get_overwatch_miss_retort()) def get_overwatch_confirmation(self): return 'Got it, on Overwatch.' def get_overwatch_miss_retort(self): return 'Shot failed to connect!' def get_reposition_confirmation(self): return 'Moving to Full cover!' def get_retort(self): return rd.choice(retort) def print_summary(self): middle = ' ' if self.nickname: middle = " '" + self.nickname + "' " p(0, XCOM_RANKS[self.nrank] + ' ' + self.firstname + middle \ + self.lastname + ' - ' + str(self.hp) + ' HP' + ' - ' \ + str(self.aim) + ' Aim'+ ' - ' +str(self.mobility) + ' AP') p(0, 'Items: ' + self.weapon.name + ', ' + self.items[0].name + ', ' \ + self.items[1].name) #we define the aliens here. they are initialised as sectoids but this can be changed with the definitions, such #as thinman(), to convert the alien to a thinman class Alien(Unit): def __init__(self, alien_id, species, hp, aim, mobility, nrank, firstname,\ lastname, armour, weapon, items, mods): super().__init__(hp, aim, mobility, nrank, firstname, lastname, \ armour, weapon, items, mods) self.aid = alien_id self.species = species #gives us names for when we reference the alien in game def __str__(self): return '(' + self.species + ') ' + ALIEN_RANKS[self.nrank] + ' ' \ + self.firstname + " " + self.lastname def _handle_death(self): #kills, loots and removes the alien from the game p(0, str(self) + ' died!') getLoot(self) drop() checkXP() self.alive = False room[roomNo].pop(room[roomNo].index(self)) def refresh(self): self.hp += self.nrank * round(rd.random() * 2) self.aim += self.nrank * round(rd.random() * 2) SPECIES_RANK_FUNC = 0 SPECIES_HP_BASE = 1 SPECIES_AIM_RANGE = 2 SPECIES_MOBILITY_RANGE = 3 SPECIES_WEAPON_CLASS = 4 SPECIES_FIRSTNAME = 5 SPECIES_LASTNAME = 6 # Species data table, used to construct aliens with the `create_alien` # function with keywords # rank function, hp base, aim base range, mobility base range, # primary weapon class, firstname table, lastname table ALIEN_SPECIES = { 'Sectoid': [ lambda nroom: round(rd.randrange(round(nroom / 20), 2)), 2, (50, 75), (9, 13), PlasmaPistol, sectoidfName, sectoidlName ], 'Thinman': [ lambda nroom: round(rd.randrange(round(nroom / 20), 2)), 3, (60, 80), (12, 15), PlasmaCarbine, thinfName, thinlName ], 'Floater': [ lambda nroom: round(rd.randrange(round(nroom / 12), 3)), 4, (50, 70), (12, 15), PlasmaCarbine, floaterfName, floaterlName ], 'Muton': [ lambda nroom: round(rd.randrange(round(nroom / 12), 3)), 8, (50, 60), (10, 12), PlasmaRifle, mutonfName, mutonlName ] } def create_alien(alien_id, room_index, species, **kwargs): ''' Create a alien with random stats or stats supplied by keywords Returns a new alien with the stats read from kwargs, or if the stat is not contained in there, random stats according to the `ALIEN_SPECIES` table. ''' if species not in ALIEN_SPECIES: raise Exception('Unknown alien species') # the rank may be used for other values, so it is set differently nrank = 0 if not 'nrank' in kwargs: nrank = ALIEN_SPECIES[species][SPECIES_RANK_FUNC](room_index) kwargs['nrank'] = nrank else: nrank = kwargs['nrank'] if not 'hp' in kwargs: kwargs['hp'] = ALIEN_SPECIES[species][SPECIES_HP_BASE] + nrank if not 'aim' in kwargs: kwargs['aim'] = \ rd.randrange(*ALIEN_SPECIES[species][SPECIES_AIM_RANGE]) + nrank if not 'mobility' in kwargs: kwargs['mobility'] = \ rd.randrange(*ALIEN_SPECIES[species][SPECIES_MOBILITY_RANGE]) \ + nrank if not 'firstname' in kwargs: kwargs['firstname'] = \ rd.choice(ALIEN_SPECIES[species][SPECIES_FIRSTNAME]) if not 'lastname' in kwargs: kwargs['lastname'] = \ rd.choice(ALIEN_SPECIES[species][SPECIES_LASTNAME]) if not 'armour' in kwargs: kwargs['armour'] = 'BDY' if not 'weapon' in kwargs: kwargs['weapon'] = ALIEN_SPECIES[species][SPECIES_WEAPON_CLASS]() if not 'items' in kwargs: kwargs['items'] = [rd.choice([ITEM_ALIEN_GRENADE, ITEM_ALLOY_PLATING, ITEM_SCOPE]) ] if not 'mods' in kwargs: kwargs['mods'] = [] return Alien(alien_id, species, **kwargs) ########### # actions # ########### class Action: '''Base class for actions''' def __init__(self, soldier, name, ap_costs, ends_turn, key = ''): self.soldier = soldier self.name = name self.ap_costs = ap_costs self.ends_turn = ends_turn self.action_key = key def __str__(self): return self.name def perform(self): '''Interface method to perform action''' pass def _calc_ap(self): '''Should be the first thing executed by the perform functions''' if self.soldier.ap < self.ap_costs: raise Exception("Not enough AP to perform action '{}'". format(self.name)) if self.ends_turn: self.soldier.ap = 0 else: self.soldier.ap -= self.ap_costs class AdvanceAction(Action): def __init__(self, soldier): super().__init__(soldier, 'Advance', 1, True, 'a') def perform(self): global roomNo global fragments global elerium global meld global alloy self._calc_ap() roomNo += 1 if not "Drop Zone" in room[roomNo]: checkspot(roomNo) scatter(roomNo) else: p(spk,"Reached an access point, Commander. Requesting additional goods!") p(spk,"We only have a short time before the aliens close it off!") ap = 60 while ap != 0: print("Fragments:",fragments) print("Elerium:",elerium) print("Meld:",meld) print("Alloy:",alloy) sel = displayShop(ap) if sel == "AimBonus": soldier.mods.append("Aim") soldier.aim += 5 meld -= 15 ap -= 60 print("Depth Perception Insta-Genemod applied!") elif sel == "HPBonus": soldier.mods.append("HP") soldier.hp += 5 meld -= 20 ap -= 60 print("Muscle Regeneration Insta-Genemod applied!") elif sel == "APBonus": soldier.mods.append("HP") soldier.mobility += 2 meld -=15 ap -= 60 print("Micro Servomotors Augment inserted!") elif sel == "NadeBonus": soldier.mods.append("Nade") soldier.item.append(0) soldier.item.append(0) meld -= 20 ap -= 60 print("Grenade Launcher Augment inserted!") elif sel == "LaserRifle": soldier.weapon = LaserRifle() fragments -= 40 elerium -= 20 ap -= 40 print("Beam Rifle fabricated!") elif sel == "LaserCarbine": soldier.weapon = LaserCarbine() fragments -= 20 elerium -= 10 ap -= 40 print("Beam Carbine fabricated!") elif sel == "Frag": soldier.items.append(ITEM_FRAG_GRENADE) alloy -= 4 fragments -= 20 ap -= 20 print("Frag Grenade fabricated!") elif sel == "Meds": soldier.items.append(ITEM_MEDKIT) meld -= 10 fragments -= 10 ap -= 20 print("Nano Serum fabricated!") elif sel == "Reload": soldier.weapon.ammo = soldier.weapon.clip_size ap -= 20 print("Weapon reloaded!") elif sel == "Heal": soldier.hp += 1 ap -= 20 print("Healed 1HP!") elif sel == "Advance": ap = 0 s(.5) s(.5) p(spk,"All out of time! I'll have to keep moving!") s(.5) roomNo += 1 checkspot(roomNo) scatter(roomNo) class ReloadAdvanceAction(Action): def __init__(self, soldier): super().__init__(soldier,'Reload + Advance', 9, True, 'd') def perform(self): reload_action.perform() advance_action.perform() class EndTurnAction(Action): def __init__(self, soldier): super().__init__(soldier, 'End turn', 0, True, 'e') def perform(self): self._calc_ap() class FireAction(Action): def __init__(self, soldier, target): super().__init__(soldier, 'Fire', 6, False) self.target = target self.hit_chance = soldier.aim_at(target) def __str__(self): return '(~{} dmg) Fire {} at {} - {} HP - ({}%)'.\ format(soldier.weapon.damage, soldier.weapon.name, self.target,\ self.target.hp, self.hit_chance) def perform(self): global alloy global elerium global fragments global meld self._calc_ap() p(spk, self.soldier.get_retort()) if self.soldier.shoot_at(self.target, self.hit_chance): fragments += getLoot(self.target)[0] elerium += getLoot(self.target)[1] meld += getLoot(self.target)[2] alloy += getLoot(self.target)[3] class HunkerDownAction(Action): def __init__(self, soldier): super().__init__(soldier, 'Hunker down', 1, True, 'h') def perform(self): self._calc_ap() if soldier.cover == COVER_HALF or soldier.cover == COVER_FULL: soldier.hunkerbonus += 20 p(spk, 'Taking cover!') s(.5) class OverwatchAction(Action): def __init__(self, soldier): super().__init__(soldier, 'Overwatch', 6, True, 'o') def perform(self): self._calc_ap() p(spk, self.soldier.get_overwatch_confirmation()) s(.5) self.soldier.ap = 0 # TODO check if this is necessary self.soldier.on_overwatch = True class ReloadAction(Action): def __init__(self, soldier): super().__init__(soldier, 'Reload', 8, False, 'r') def perform(self): self._calc_ap() soldier.reload() class RepositionAction(Action): def __init__(self, soldier): super().__init__(soldier, 'Reposition', 3, False, 'p') def perform(self): self._calc_ap() # if any aliens are on overwatch, check and be shot at if they are check_for_alien_overwatch() self.soldier.cover = 40 # ?! p(spk, self.soldier.get_reposition_confirmation()) s(.5) #chance to flank an alien if rd.randrange(0, 100) < 50: alien = rd.choice(room[roomNo]) p(0, str(alien) + ' is flanked!') alien.cover = COVER_FLANKED class UseItemAction(Action): def __init__(self, soldier, item): super().__init__(soldier, 'Use ' + item.name, item.use_ap_costs, False) self.item = item self.action_key = item.action_key def __str__(self): return '({}) ({} AP) {}'.format(self.item.effect_descr, \ self.item.use_ap_costs, self.name) def perform(self): self._calc_ap() items = self.soldier.items del items[items.index(self.item)] self.item.use(self.soldier) ######################################################################## # globals # ######################################################################## # Global variables to prevent duplicate hero soldiers have_bradford = False have_vdoorn = False # Global stat counters fragments = 0 elerium = 0 meld = 0 alloy = 0 ######################################################################## # functions # ######################################################################## def get_action_input(prompt, actions): '''Get validated input, and return the chosen action''' if prompt[-1] != ' ': prompt += ' ' while True: instr = input(prompt) for action in actions: if action.action_key == instr: return action print("'" + instr + "' is not a valid action") # Legacy, can probably be removed def get_int_input(prompt, vmin, vmax): '''Get a range checked integer from the player.''' if prompt[-1] != ' ': prompt += ' ' while True: instr = input(prompt) if instr.isdigit(): val = int(instr) if val < vmin: print("That's not an option.") elif vmax < val: print("That's not an option.") else: return val else: print("Please enter the number of the action.") def create_soldier(sid): global have_bradford global have_vdoorn items = [(rd.choice([ITEM_FRAG_GRENADE, ITEM_MEDKIT, ITEM_SCOPE])), \ (rd.choice([ITEM_FRAG_GRENADE, ITEM_MEDKIT]))] mobility = rd.randrange(11, 16) armour = 'BDY' mods = [] if rd.randrange(1,100) < 5: if rd.randrange(0, 2) == 0: if not have_bradford: bradford = Soldier(sid, SEX_MALE, 6, 100, mobility, \ RANK_CENTRAL_OFFICER, '', 'Bradford', \ armour, BradfordsPistol(), items, mods) bradford.get_overwatch_confirmation = \ lambda: 'Keep your eyes peeled!' bradford.get_overwatch_miss_retort = \ lambda: 'How did I miss that?!' bradford.get_reposition_confirmation = \ lambda: "Moving to...wait...that's CLOSE RANGE!" bradford.get_retort = lambda: rd.choice(BRADFORD_RETORTS) return bradford if not have_vdoorn: van_doorn = Soldier(sid, SEX_MALE, 6, 80, mobility, RANK_GENERAL, \ 'Peter', VAN_DOORN, armour, BallisticRifle(), \ items, mods) van_doorn.get_overwatch_confirmation = \ lambda: 'You coming down here or what?' van_doorn.get_reposition_confirmation = \ lambda: "Come on! I won't go down without a fight." van_doorn.get_retort = lambda: rd.choice(VAN_DOORN_RETORTS) return van_doorn weapon = None if rd.randrange(0, 2) == 0: weapon = BallisticRifle() else: weapon = BallisticCarbine() sex = None if rd.randrange(0, 2) == 0: sex = SEX_FEMALE else: sex = SEX_MALE name = None if sex == SEX_FEMALE: name = rd.choice(XCOM_FEMALE_FIRSTNAME) else: name = rd.choice(XCOM_MALE_FIRSTNAME) return Soldier(sid, sex, rd.randrange(3, 6), rd.randrange(50, 75), \ mobility, RANK_ROOKIE, name, rd.choice(XCOM_LASTNAME), \ armour, weapon, items, mods) #scatters the aliens in a room, some won't find any cover. def scatter(roomNo): cover = ["Full","Full","Full","Half","Half","Half","Half","Half","Half","No"] covernumber = [40,40,40,20,20,20,20,20,20,-10] for i in range(len(room[roomNo])): room[roomNo][i].cover = rd.choice(covernumber) if not room[roomNo][i].cover == -10: p(0, str(room[roomNo][i]) + ' moves to ' \ + cover[covernumber.index(room[roomNo][i].cover)] + ' cover!') s(.5) else: p(0, str(room[roomNo][i]) + " can't find any cover!") s(.5) print() #could probably be merged in with scatter(). Tells you that you've seen an alien def checkspot(roomNo): for i in range(len(room[roomNo])): p(0, str(room[roomNo][i]) + ' spotted!') s(.5) def prompt_player(actions): for index, action in enumerate(actions): ap_str = '' ap_costs = action.ap_costs if ap_costs > 0: ap_str = ' (' + str(ap_costs) + ' AP) ' action_key = action.action_key if len(action_key) < 1: action_key = str(index + 1) actions[index].action_key = action_key print('[' + action_key + '] ' + ap_str + str(action)) return get_action_input('> ', actions) reload_action = "" advance_action = "" #ah, the player's turn. def playerTurn(): global reload_action global advance_action soldier.ap = soldier.mobility soldier.on_overwatch = False soldier.hunkerbonus = 0 # currently redundant and inefficient advance_action = AdvanceAction(soldier) end_turn_action = EndTurnAction(soldier) hunker_down_action = HunkerDownAction(soldier) overwatch_action = OverwatchAction(soldier) reload_action = ReloadAction(soldier) reposition_action = RepositionAction(soldier) reload_advance_action = ReloadAdvanceAction(soldier) #maybe just have these as def's instead of classes? # while the player has spare action points left while soldier.ap > 0 and soldier.alive == True: # displays stats p(0, 'HP: ' + str(soldier.hp) + '\tAP: ' + str(soldier.ap)+ '\tAmmo: ' + str(soldier.weapon.ammo)) if soldier.cover >= 40: p(0, str(soldier) + ' is in FULL cover.') elif soldier.cover <= 20: p(0, str(soldier) + ' is in HALF cover.') actions = [] if len(room[roomNo]) == 0: actions.append(advance_action) if soldier.ap >= reload_action.ap_costs and soldier.weapon.ammo < soldier.weapon.clip_size: actions.append(reload_action) actions.append(reload_advance_action) actions.append(end_turn_action) else: if soldier.weapon.ammo > 0: if soldier.ap >= 6: # TODO make the ap_cost a static member for i in range(len(room[roomNo])): alien = room[roomNo][i] actions.append(FireAction(soldier, alien)) if soldier.ap >= overwatch_action.ap_costs: actions.append(overwatch_action) if soldier.weapon.ammo < soldier.weapon.clip_size \ and soldier.ap >= reload_action.ap_costs: actions.append(reload_action) for item in soldier.items: if item.use_ap_costs > 0 and soldier.ap >= item.use_ap_costs: actions.append(UseItemAction(soldier, item)) if soldier.ap >= reposition_action.ap_costs: actions.append(reposition_action) if soldier.cover > COVER_NONE \ and soldier.ap >= hunker_down_action.ap_costs: actions.append(hunker_down_action) actions.append(end_turn_action) prompt_player(actions).perform() #ends turn by default def displayShop(ap): global fragments global elerium global meld global alloy options = [] print("Time: "+str(ap)) p(0, 'HP: ' + str(soldier.hp) + '\tAP: ' + str(soldier.ap)+ '\tAmmo: ' + str(soldier.weapon.ammo)) if ap == 60: if meld >= 15: if not "Aim" in soldier.mods: options.append("AimBonus") p(len(options),"(60 Time) (15m) Insta-Genemod: Depth Perception (+5 aim)") if not "AP" in soldier.mods: options.append("APBonus") p(len(options),"(60 Time) (15m) Micro-Augment: Reflex Servomotors (+2 AP)") options.append("") if meld >= 20: if not "HP" in soldier.mods: options.append("HPBonus") p(len(options),"(60 Time) (20m) Insta-Genemod: Muscle Regeneration (+5 HP)") if not "Nade" in soldier.mods: options.append("NadeBonus") p(len(options),"(60 Time) (20m) Micro-Augment: Grenade Launcher (+2 Frag Grenades)") if ap >= 50: if not type(soldier.weapon) is LaserRifle() and elerium >= 20 and fragments >= 40: options.append("LaserRifle") p(len(options),"(40 Time) (20e) (40f) Get Laser Rifle") print(" (~4dmg), infinite ammo") if not type(soldier.weapon) is LaserCarbine() and elerium >= 10 and fragments >= 30: options.append("LaserCarbine") p(len(options),"(40 Time) (10e) (30f) Get Laser Carbine") print(" (~3dmg), infinite ammo, +10% aim") if ap >= 30: if meld >= 10 and fragments >= 10: options.append("Meds") p(len(options),"(30 Time) (10m) (10f) Get Nano Serum") if alloy >= 4 and fragments >= 20: options.append("Frag") p(len(options),"(30 Time) (20f) (4a) Get Frag Grenade") if ap >= 20: if meld >= 5: options.append("Heal") p(len(options),"(20 Time) (5m) Recuperate (+1 HP)") options.append("Reload") p(len(options),"(20 Time) Reload Weapon") options.append("Skip") p(len(options),"("+str(ap)+" Time) Advance (Skip this Drop Zone)") selection = get_int_input('> ', 1, len(options) - 1) print('selected option ' + str(selection)) return options[selection - 1] def check_for_alien_overwatch(): for i in range(len(room[roomNo])): alium = room[roomNo][i] alium.overwatch(soldier) def fire(alium,cthplayer): alium.on_overwatch == False if alium.alive == True: if cthplayer > 0: p(0, str(alium) + ' fires at ' + str(soldier) + ' (' + str(cthplayer) + '%)'+'('+alium.weapon.name+")") alium.shoot_at(soldier, cthplayer) else: if rd.randrange(0,100) < 80: ow(alium) else: if ITEM_ALIEN_GRENADE in alium.items: nade(alium) def nade(alium): alium.on_overwatch == False if ITEM_ALIEN_GRENADE not in alium.items: raise Exception('No grenade in inventory') if alium.alive == True: p(0, str(alium) + ' uses Alien Grenade!') s(.5) p(0, '**BLAM!**') s(.5) del alium.items[alium.items.index(ITEM_ALIEN_GRENADE)] #sets the aliens item to 'none', no more grenades for you p(0, '3 damage!') soldier.cover = 20 soldier.hp -= 3 def ow(alium): if alium.alive == True: p(0, str(alium) + ' went on overwatch!') alium.on_overwatch = True def move(alium,cover): if alium.alive == True: s(.5) if cover == 40: p(0, str(alium) + ' runs to Full cover!') #if an alien has no cover, it will run to full cover. same goes if it's flanked elif cover == 20: p(0, str(alium) + ' runs to Half cover!') s(.5) soldier.overwatch(alium) alium.on_overwatch = False alium.cover = cover def alienTurn(soldier): for i in range(len(room[roomNo])): try: alium = room[roomNo][i] except ( Exception ): i = 0 #because something may have happened that causes an index error if alium.alive == True and soldier.alive == True: cthplayer = (alium.aim - soldier.cover) - soldier.hunkerbonus if ITEM_SCOPE in alium.items: cthplayer += 20 if alium.cover < 20: if rd.randrange(0,100) < 80: move(alium,40) elif rd.randrange(0,100) < 40: fire(alium,cthplayer) else: move(alium,20) elif alium.cover < 40: if cthplayer > 50 + rd.randrange(0,20): fire(alium,cthplayer) elif rd.randrange(0,100) < 20: if ITEM_ALIEN_GRENADE in alium.items: nade(alium) else: fire(alium,cthplayer) elif rd.randrange(0,100) < 20: if rd.randrange(0,100) < 50: move(alium,40) else: move(alium,20) #randomly moves to different cover sometimes else: if rd.randrange(0,100) < 20: ow(alium) else: fire(alium,cthplayer) else: if cthplayer > 30 + rd.randrange(0,20): fire(alium,cthplayer) elif rd.randrange(0,100) < 80: move(alium,20) else: ow(alium) s(.5) #levels up def checkXP(): was_promoted = False if soldier.xp >= 25 and soldier.nrank < RANK_SQUADDIE: soldier.nrank = RANK_SQUADDIE soldier.hp += 1 soldier.aim += 2 soldier.mobility += 1 drop() drop() was_promoted = True elif soldier.xp >= 100 and soldier.nrank < RANK_CORPORAL: soldier.nrank = RANK_CORPORAL soldier.hp += 1 soldier.aim += 2 soldier.mobility += 1 drop() drop() was_promoted = True elif soldier.xp >= 300 and soldier.nrank < RANK_SERGEANT: nicknames = XCOM_UNISEX_NICKNAMES_ASSAULT \ + XCOM_UNISEX_NICKNAMES_HEAVY \ + XCOM_UNISEX_NICKNAMES_SNIPER \ + XCOM_UNISEX_NICKNAMES_SUPPORT if soldier.sex == SEX_FEMALE: nicknames += XCOM_FEMALE_NICKNAMES_ASSAULT \ + XCOM_FEMALE_NICKNAMES_HEAVY \ + XCOM_FEMALE_NICKNAMES_MEC \ + XCOM_FEMALE_NICKNAMES_SNIPER \ + XCOM_FEMALE_NICKNAMES_SUPPORT else: nicknames += XCOM_MALE_NICKNAMES_ASSAULT \ + XCOM_MALE_NICKNAMES_HEAVY \ + XCOM_MALE_NICKNAMES_MEC \ + XCOM_MALE_NICKNAMES_SNIPER \ + XCOM_MALE_NICKNAMES_SUPPORT soldier.nickname = rd.choice(nicknames) p(0, XCOM_RANKS[soldier.nrank] + ' ' + soldier.firstname + ' ' \ + soldier.lastname + " earned the nickname '" + soldier.nickname \ + "'") soldier.nrank = RANK_SERGEANT soldier.hp += 2 soldier.aim += 1 soldier.mobility += 1 drop() drop() was_promoted = True elif soldier.xp >= 900 and soldier.nrank < RANK_LIEUTENANT: soldier.nrank = RANK_LIEUTENANT soldier.hp += 1 soldier.aim += 1 drop() drop() was_promoted = True elif soldier.xp >= 1500 and soldier.nrank < RANK_CAPTAIN: soldier.nrank = RANK_CAPTAIN soldier.hp += 2 soldier.aim += 1 drop() drop() drop() drop() was_promoted = True elif soldier.xp >= 2000 and soldier.nrank < RANK_MAJOR: soldier.nrank = RANK_MAJOR soldier.hp += 1 soldier.aim += 1 soldier.mobility += 1 drop() drop() drop() was_promoted = True elif soldier.xp >= 3000 and soldier.nrank < RANK_COLONEL: soldier.nrank = RANK_COLONEL soldier.hp += 1 soldier.aim += 1 drop() drop() drop() drop() drop() drop() was_promoted = True if was_promoted: p(0, str(soldier) + ' was promoted to ' + XCOM_RANKS[soldier.nrank]) #gets some sweet sweet loot from those aliens def getLoot(alium): fragments = 0 elerium = 0 meld = 0 alloy = 0 soldier.xp += alium.nrank * abs(alium.hp) fragments += abs(alium.hp) elerium += alium.nrank meld += 2 * alium.nrank if ITEM_ALIEN_GRENADE in alium.items: elerium += 2 elif ITEM_ALLOY_PLATING in alium.items: alloy += 2 elif ITEM_SCOPE in alium.items: fragments += 2 e, f = alium.weapon.get_materials() elerium += e fragments += f return [fragments, elerium, meld, alloy] def drop(): itemdrop = rd.randrange(0,5) if rd.randrange(1,100) <= 5: p(spk,"Recovered a "+drops[itemdrop]+"!") if itemdrop == 0: soldier.items.append(ITEM_FRAG_GRENADE) elif itemdrop == 1: soldier.items.append(ITEM_MEDKIT) elif itemdrop == 5: soldier.items.append(ITEM_ALIEN_GRENADE) elif itemdrop == 3: soldier.weapon = PlasmaCarbine() elif itemdrop == 4: soldier.weapon = PlasmaRifle() def create_map(scripted_levels): # the first room is empty, since the player starts there options = ['Sectoid', 'Thinman', 'Floater', 'Muton'] the_map = [[]] for i in range(1, NUMBER_OF_ROOMS): if i in scripted_levels: the_map.append(scripted_levels[i]) else: pod = [] # more aliens per room the further along you are for j in range(3 + rd.randrange(-2, 2 + round(i / 10))): # determine alien species species = options[0] nrank = 0 if 3 < i and i < 10: species = rd.choice(options[:2]) elif 10 <= i and i < 20: species = rd.choice(options) else: species = rd.choice(options[2:]) # determine rank # if species == 'Sectoid': maxrank = 4 if species == 'Thinman': maxrank = 5 elif species == 'Floater': maxrank = 6 elif species == 'Muton': maxrank = 8 nrank = rd.randrange(round(i / (NUMBER_OF_ROOMS \ / (maxrank - 1))), maxrank) alien = create_alien(j, i, species, nrank=nrank) pod.append(alien) the_map.append(pod) return the_map def dump_map(the_map): for index, location in enumerate(the_map): print('#{}:'.format(index)) for pod in location: print('{}'.format(pod)) # def main(): p("Bradford", "Welcome Commander. We've discovered an Alien Base, and it's your job to send someone out to deal with it.") p("Bradford", "Choose a soldier from the 3 below to go on the mission.") barracks = [] #generates soldiers for i in range(3): x = create_soldier(i) barracks.append(x) #displays a list of the soldiers for i in range(len(barracks)): p(0,str(i+1)+": ") barracks[i].print_summary() p(0,"") #forces you to pick only one soldier soldier = barracks[get_int_input('# ', 1, 3) - 1] spk = soldier.firstname + " " + soldier.lastname if soldier.lastname == "Bradford": p(spk, "What? There must have been a mistake on the sheet, Commander! You can't send --") elif soldier.lastname == VAN_DOORN: p(spk, "I'm the Ops team?") else: p(spk, "Ready for duty, Commander!") scripted_levels = { 1: [create_alien(1, 1, 'Sectoid', nrank=0)], 2: [ create_alien(1, 2, 'Sectoid', nrank=0), create_alien(1, 2, 'Sectoid', nrank=0) ], 3: [ create_alien(1, 3, 'Sectoid', nrank=0), create_alien(1, 3, 'Sectoid', nrank=1) ], 5: ["Drop Zone"], 10: ["Drop Zone"], 15: ["Drop Zone"], 20: ["Drop Zone"], 30: [create_alien(1, 1, 'Muton', nrank=8, hp=50)] } room = create_map(scripted_levels) # dump_map(room) roomNo = 0 #game loop, runs until your soldier is killed while soldier.alive == True: try: old_room = roomNo playerTurn() p(0, str(soldier) + ' is out of AP!') # Aliens are not allowed to act after the room was changed, # because they already scattered when the player entered the new # room. Also, there is no need for an alien turn if there are # no more aliens in the room. if soldier.alive == True and old_room == roomNo \ and len(room[roomNo]) > 0: print() print("--------------Alien Activity!--------------") print() s(1) alienTurn(soldier) print() print("--------------XCOM Turn--------------") print() except ( ValueError or IndexError ): pass if roomNo == NUMBER_OF_ROOMS: print("You have won the game!") break
voidref/textcom
textcom.py
Python
mit
52,947
[ "CASINO", "MOOSE" ]
257a35e6f93677a283ab2a22958a21464b24408757b1237a72ff4dec792c9205
# coding=utf8 # # Copyright 2013 Dreamlab Onet.pl # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; # version 3.0. # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, visit # # http://www.gnu.org/licenses/lgpl.txt # import sys import logging import socket from SocketServer import StreamRequestHandler from SocketServer import TCPServer logger = logging.getLogger("rmock.pop3") class Pop3ServerImpl(object): def connect(self): pass def login(self, user, password): pass def uidl(self): pass def retr(self, uid): pass def top(self, uid): pass def dele(self, uid): pass def quit(self): pass class Pop3ProtocolError(Exception): def __init__(self, message): Exception.__init__(self, message) self.message = message class Pop3RequestHandler(StreamRequestHandler): def handle(self): self.quitted = False self.impl = self.server.impl self.username = None self.password = None self.logged_in = False self.uid_mapping = {} welcome = self.impl.connect() self._send_single_response(welcome) while not self.quitted: try: request = self.rfile.readline() except socket.error, e: logger.info('socket error: %s', e) return if request is None: break request = request.strip() funcname, params = self._parse_request(request) logger.debug("request: %s %s", funcname, params) handler_function = getattr(self, '_handle_%s' % funcname, None) if handler_function is not None: try: handler_function(params) except Pop3ProtocolError, e: logger.error("pop3 protocol error: %s", e) self._send_error_response(e.message) except Exception: logger.exception("internal error") self._send_error_response("internal error") else: logger.warning("invalid fuction: %s", funcname) self._send_error_response('unknown command: %s' % funcname) def _handle_connect(self, params): welcome = self.impl.connect() self._send_single_response(welcome) def _handle_user(self, params): #<command name> <key> <flags> <exptime> <bytes> [noreply]\r\n self.username, = self._checked_get_params(params, 1) self._send_single_response('') def _handle_pass(self, params): #<command name> <key> <flags> <exptime> <bytes> [noreply]\r\n self.password, = self._checked_get_params(params, 1) self.logged_in = self.impl.login(self.username, self.password) if not self.logged_in: raise Pop3ProtocolError("auth error") result = 'connected :) ' self._send_single_response(result) def _handle_uidl(self, params): self._verify_logged_in() uids = self.impl.uidl() uids_nr = list(enumerate(uids, 1)) self.uid_mapping = {str(nr): uid for (nr, uid) in uids_nr} if len(params) > 0: nr = params[0] line = self._make_uid_line(nr, self.uid_mapping[nr]) self._send_single_response(line) else: lines = [self._make_uid_line(nr, uid) for (nr, uid) in uids_nr] self._send_multi_response('', lines) def _handle_retr(self, params): self._verify_logged_in() nr, = self._checked_get_params(params, 1) uid = self._get_uid_for_number(nr) content = self.impl.retr(uid) header = '%s octets' % (len(content)) self._send_multi_response(header, content) def _handle_top(self, params): self._verify_logged_in() nr, = self._checked_get_params(params, 1) uid = self._get_uid_for_number(nr) content = self.impl.top(uid) self._send_multi_response('', content) def _handle_dele(self, params): self._verify_logged_in() nr, = self._checked_get_params(params, 1) uid = self._get_uid_for_number(nr) content = self.impl.dele(uid) self._send_single_response('deleted') def _handle_quit(self, params): response = self.impl.quit() self._send_single_response(response) self.quitted = True def _verify_logged_in(self): if not self.logged_in: raise Pop3ProtocolError("please log in") def _checked_get_params(self, params, count): if len(params) < count: raise Pop3ProtocolError("not enough params") return params[:count] def _get_uid_for_number(self, nr): if not self.uid_mapping: logger.warning("no uidl before retr") self._handle_uidl([]) try: uid = self.uid_mapping[nr] except KeyError: raise Pop3ProtocolError('no such message') return uid def _make_uid_line(self, nr, uid): return ' '.join([str(nr), uid]) def _send_get_response(self, key, value): #VALUE <key> <flags> <bytes> [<cas unique>]\r\n if value: self._send_response('VALUE %s 0 %s' % (key, len(value))) self._send_response(value) def _send_response(self, message): logger.debug("response: %s", message) self.wfile.write(message + "\r\n") def _send_single_response(self, message): self._send_response('+OK %s' % message) def _send_multi_response(self, message, content): self._send_response('+OK %s' % message) if isinstance(content, basestring): content = content.splitlines() for line in content: self._send_response(line) self._send_response('.') def _send_error_response(self, message): self._send_response('-ERR %s' % message) def _parse_request(self, request): parts = request.split() if not parts: return '', [] return parts[0].lower(), parts[1:] class Pop3Server(TCPServer): #TODO: make ipv6 code more generic address_family = socket.AF_INET6 allow_reuse_address = True def __init__(self, impl, port): TCPServer.__init__(self, ('', port), Pop3RequestHandler) self.impl = impl def server_bind(self): # Override this method to be sure v6only is false: we want to # listen to both IPv4 and IPv6! self.socket.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, False) TCPServer.server_bind(self) def handle_error(self, request, client_address): etype = sys.exc_info()[0] if etype is SystemExit: raise logger.debug("pop3 server error", exc_info=True)
tikan/rmock
src/rmock/runners/pop3/server.py
Python
lgpl-3.0
7,734
[ "VisIt" ]
60949d0d553b314a1e781581a8bf802b3f572908baa1bb26044907df6d4a1b0b
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Makes sure that all files contain proper licensing information.""" import optparse import os.path import subprocess import sys def PrintUsage(): print """Usage: python checklicenses.py [--root <root>] [tocheck] --root Specifies the repository root. This defaults to "../.." relative to the script file. This will be correct given the normal location of the script in "<root>/tools/checklicenses". --ignore-suppressions Ignores path-specific license whitelist. Useful when trying to remove a suppression/whitelist entry. tocheck Specifies the directory, relative to root, to check. This defaults to "." so it checks everything. Examples: python checklicenses.py python checklicenses.py --root ~/chromium/src third_party""" WHITELISTED_LICENSES = [ 'Apache (v2.0)', 'Apache (v2.0) BSD (2 clause)', 'Apache (v2.0) GPL (v2)', 'Apple MIT', # https://fedoraproject.org/wiki/Licensing/Apple_MIT_License 'APSL (v2)', 'APSL (v2) BSD (4 clause)', 'BSD', 'BSD (2 clause)', 'BSD (2 clause) ISC', 'BSD (2 clause) MIT/X11 (BSD like)', 'BSD (3 clause)', 'BSD (3 clause) GPL (v2)', 'BSD (3 clause) ISC', 'BSD (3 clause) LGPL (v2 or later)', 'BSD (3 clause) LGPL (v2.1 or later)', 'BSD (3 clause) MIT/X11 (BSD like)', 'BSD (4 clause)', 'BSD-like', # TODO(phajdan.jr): Make licensecheck not print BSD-like twice. 'BSD-like MIT/X11 (BSD like)', 'BSL (v1.0)', 'GPL (v2) LGPL (v2.1 or later)', 'GPL (v2 or later) with Bison parser exception', 'GPL (v2 or later) with libtool exception', 'GPL (v3 or later) with Bison parser exception', 'GPL with Bison parser exception', 'ISC', 'LGPL (unversioned/unknown version)', 'LGPL (v2)', 'LGPL (v2 or later)', 'LGPL (v2.1)', 'LGPL (v2.1 or later)', 'LGPL (v3 or later)', 'MIT/X11 (BSD like)', 'MPL (v1.0) LGPL (v2 or later)', 'MPL (v1.1)', 'MPL (v1.1) BSD (3 clause) GPL (v2) LGPL (v2.1 or later)', 'MPL (v1.1) BSD (3 clause) LGPL (v2.1 or later)', 'MPL (v1.1) BSD-like', 'MPL (v1.1) BSD-like GPL (unversioned/unknown version)', 'MPL (v1.1) BSD-like GPL (v2) LGPL (v2.1 or later)', 'MPL (v1.1) GPL (v2)', 'MPL (v1.1) GPL (v2) LGPL (v2 or later)', 'MPL (v1.1) GPL (v2) LGPL (v2.1 or later)', 'MPL (v1.1) GPL (unversioned/unknown version)', 'MPL (v1.1) LGPL (v2 or later)', 'MPL (v1.1) LGPL (v2.1 or later)', 'MPL (v2.0)', 'Ms-PL', 'Public domain', 'Public domain BSD', 'Public domain BSD (3 clause)', 'Public domain BSD-like', 'Public domain LGPL (v2.1 or later)', 'libpng', 'zlib/libpng', 'SGI Free Software License B', 'University of Illinois/NCSA Open Source License (BSD like)', ] PATH_SPECIFIC_WHITELISTED_LICENSES = { 'base/hash.cc': [ # http://crbug.com/98100 'UNKNOWN', ], 'base/third_party/icu': [ # http://crbug.com/98087 'UNKNOWN', ], # http://code.google.com/p/google-breakpad/issues/detail?id=450 'breakpad/src': [ 'UNKNOWN', ], 'chrome/common/extensions/docs/examples': [ # http://crbug.com/98092 'UNKNOWN', ], 'chrome/test/data/gpu/vt': [ 'UNKNOWN', ], 'chrome/test/data/layout_tests/LayoutTests': [ 'UNKNOWN', ], 'courgette/third_party/bsdiff_create.cc': [ # http://crbug.com/98095 'UNKNOWN', ], 'data/mozilla_js_tests': [ 'UNKNOWN', ], 'data/page_cycler': [ 'UNKNOWN', 'GPL (v2 or later)', ], 'data/tab_switching': [ 'UNKNOWN', ], 'native_client': [ # http://crbug.com/98099 'UNKNOWN', ], 'native_client/toolchain': [ 'BSD GPL (v2 or later)', 'BSD (2 clause) GPL (v2 or later)', 'BSD (3 clause) GPL (v2 or later)', 'BSL (v1.0) GPL', 'BSL (v1.0) GPL (v3.1)', 'GPL', 'GPL (unversioned/unknown version)', 'GPL (v2)', 'GPL (v2 or later)', 'GPL (v3.1)', 'GPL (v3 or later)', ], 'net/tools/spdyshark': [ 'GPL (v2 or later)', 'UNKNOWN', ], 'third_party/WebKit': [ 'UNKNOWN', ], 'third_party/WebKit/Websites/webkit.org/blog/wp-content/plugins/' 'akismet/akismet.php': [ 'GPL (v2 or later)' ], 'third_party/WebKit/Source/JavaScriptCore/tests/mozilla': [ 'GPL', 'GPL (v2 or later)', 'GPL (unversioned/unknown version)', ], 'third_party/active_doc': [ # http://crbug.com/98113 'UNKNOWN', ], # http://code.google.com/p/angleproject/issues/detail?id=217 'third_party/angle': [ 'UNKNOWN', ], 'third_party/bsdiff/mbsdiff.cc': [ 'UNKNOWN', ], 'third_party/bzip2': [ 'UNKNOWN', ], # http://crbug.com/222828 # http://bugs.python.org/issue17514 'third_party/chromite/third_party/argparse.py': [ 'UNKNOWN', ], # Not used. http://crbug.com/156020 # Using third_party/cros_dbus_cplusplus/cros_dbus_cplusplus.gyp instead. 'third_party/cros_dbus_cplusplus/source/autogen.sh': [ 'UNKNOWN', ], # Included in the source tree but not built. http://crbug.com/156020 'third_party/cros_dbus_cplusplus/source/examples': [ 'UNKNOWN', ], 'third_party/devscripts': [ 'GPL (v2 or later)', ], 'third_party/expat/files/lib': [ # http://crbug.com/98121 'UNKNOWN', ], 'third_party/ffmpeg': [ 'GPL', 'GPL (v2)', 'GPL (v2 or later)', 'UNKNOWN', # http://crbug.com/98123 ], 'third_party/findbugs/doc': [ # http://crbug.com/157206 'UNKNOWN', ], 'third_party/freetype2': [ # http://crbug.com/177319 'UNKNOWN', ], 'third_party/gles2_book': [ # http://crbug.com/98130 'UNKNOWN', ], 'third_party/gles2_conform/GTF_ES': [ # http://crbug.com/98131 'UNKNOWN', ], 'third_party/harfbuzz': [ # http://crbug.com/98133 'UNKNOWN', ], 'third_party/hunspell': [ # http://crbug.com/98134 'UNKNOWN', ], 'third_party/hyphen/hyphen.tex': [ # http://crbug.com/157375 'UNKNOWN', ], 'third_party/iccjpeg': [ # http://crbug.com/98137 'UNKNOWN', ], 'third_party/icu': [ # http://crbug.com/98301 'UNKNOWN', ], 'third_party/jemalloc': [ # http://crbug.com/98302 'UNKNOWN', ], 'third_party/JSON': [ 'Perl', # Build-only. # License missing upstream on 3 minor files. 'UNKNOWN', # https://rt.cpan.org/Public/Bug/Display.html?id=85915 ], 'third_party/lcov': [ # http://crbug.com/98304 'UNKNOWN', ], 'third_party/lcov/contrib/galaxy/genflat.pl': [ 'GPL (v2 or later)', ], 'third_party/libevent': [ # http://crbug.com/98309 'UNKNOWN', ], 'third_party/libjingle/source/talk': [ # http://crbug.com/98310 'UNKNOWN', ], 'third_party/libjingle/source_internal/talk': [ # http://crbug.com/98310 'UNKNOWN', ], 'third_party/libjpeg': [ # http://crbug.com/98313 'UNKNOWN', ], 'third_party/libjpeg_turbo': [ # http://crbug.com/98314 'UNKNOWN', ], 'third_party/libpng': [ # http://crbug.com/98318 'UNKNOWN', ], # The following files lack license headers, but are trivial. 'third_party/libusb/src/libusb/os/poll_posix.h': [ 'UNKNOWN', ], 'third_party/libusb/src/libusb/version.h': [ 'UNKNOWN', ], 'third_party/libusb/src/autogen.sh': [ 'UNKNOWN', ], 'third_party/libusb/src/config.h': [ 'UNKNOWN', ], 'third_party/libusb/src/msvc/config.h': [ 'UNKNOWN', ], 'third_party/libvpx/source': [ # http://crbug.com/98319 'UNKNOWN', ], 'third_party/libvpx/source/libvpx/examples/includes': [ 'GPL (v2 or later)', ], 'third_party/libxml': [ 'UNKNOWN', ], 'third_party/libxslt': [ 'UNKNOWN', ], 'third_party/lzma_sdk': [ 'UNKNOWN', ], 'third_party/mesa/src': [ 'GPL (v2)', 'GPL (v3 or later)', 'MIT/X11 (BSD like) GPL (v3 or later) with Bison parser exception', 'UNKNOWN', # http://crbug.com/98450 ], 'third_party/modp_b64': [ 'UNKNOWN', ], 'third_party/npapi/npspy/extern/java': [ 'GPL (unversioned/unknown version)', ], 'third_party/openmax_dl/dl' : [ 'Khronos Group', ], 'third_party/openssl': [ # http://crbug.com/98451 'UNKNOWN', ], 'third_party/ots/tools/ttf-checksum.py': [ # http://code.google.com/p/ots/issues/detail?id=2 'UNKNOWN', ], 'third_party/molokocacao': [ # http://crbug.com/98453 'UNKNOWN', ], 'third_party/npapi/npspy': [ 'UNKNOWN', ], 'third_party/ocmock/OCMock': [ # http://crbug.com/98454 'UNKNOWN', ], 'third_party/ply/__init__.py': [ 'UNKNOWN', ], 'third_party/protobuf': [ # http://crbug.com/98455 'UNKNOWN', ], # http://crbug.com/222831 # https://bitbucket.org/eliben/pyelftools/issue/12 'third_party/pyelftools': [ 'UNKNOWN', ], 'third_party/pylib': [ 'UNKNOWN', ], 'third_party/scons-2.0.1/engine/SCons': [ # http://crbug.com/98462 'UNKNOWN', ], 'third_party/simplejson': [ 'UNKNOWN', ], 'third_party/skia': [ # http://crbug.com/98463 'UNKNOWN', ], 'third_party/snappy/src': [ # http://crbug.com/98464 'UNKNOWN', ], 'third_party/smhasher/src': [ # http://crbug.com/98465 'UNKNOWN', ], 'third_party/speech-dispatcher/libspeechd.h': [ 'GPL (v2 or later)', ], 'third_party/sqlite': [ 'UNKNOWN', ], # https://code.google.com/p/colorama/issues/detail?id=44 'tools/swarm_client/third_party/colorama': [ 'UNKNOWN', ], # https://github.com/kennethreitz/requests/issues/1610 'tools/swarm_client/third_party/requests': [ 'UNKNOWN', ], 'third_party/swig/Lib/linkruntime.c': [ # http://crbug.com/98585 'UNKNOWN', ], 'third_party/talloc': [ 'GPL (v3 or later)', 'UNKNOWN', # http://crbug.com/98588 ], 'third_party/tcmalloc': [ 'UNKNOWN', # http://crbug.com/98589 ], 'third_party/tlslite': [ 'UNKNOWN', ], 'third_party/webdriver': [ # http://crbug.com/98590 'UNKNOWN', ], 'third_party/webrtc': [ # http://crbug.com/98592 'UNKNOWN', ], 'third_party/xdg-utils': [ # http://crbug.com/98593 'UNKNOWN', ], 'third_party/yasm/source': [ # http://crbug.com/98594 'UNKNOWN', ], 'third_party/zlib/contrib/minizip': [ 'UNKNOWN', ], 'third_party/zlib/trees.h': [ 'UNKNOWN', ], 'tools/dromaeo_benchmark_runner/dromaeo_benchmark_runner.py': [ 'UNKNOWN', ], 'tools/emacs': [ # http://crbug.com/98595 'UNKNOWN', ], 'tools/grit/grit/node/custom/__init__.py': [ 'UNKNOWN', ], 'tools/gyp/test': [ 'UNKNOWN', ], 'tools/histograms': [ 'UNKNOWN', ], 'tools/memory_watcher': [ 'UNKNOWN', ], 'tools/playback_benchmark': [ 'UNKNOWN', ], 'tools/python/google/__init__.py': [ 'UNKNOWN', ], 'tools/site_compare': [ 'UNKNOWN', ], 'tools/stats_viewer/Properties/AssemblyInfo.cs': [ 'UNKNOWN', ], 'tools/symsrc/pefile.py': [ 'UNKNOWN', ], 'v8/test/cctest': [ # http://crbug.com/98597 'UNKNOWN', ], 'webkit/data/ico_decoder': [ 'UNKNOWN', ], } def check_licenses(options, args): # Figure out which directory we have to check. if len(args) == 0: # No directory to check specified, use the repository root. start_dir = options.base_directory elif len(args) == 1: # Directory specified. Start here. It's supposed to be relative to the # base directory. start_dir = os.path.abspath(os.path.join(options.base_directory, args[0])) else: # More than one argument, we don't handle this. PrintUsage() return 1 print "Using base directory:", options.base_directory print "Checking:", start_dir print licensecheck_path = os.path.abspath(os.path.join(options.base_directory, 'third_party', 'devscripts', 'licensecheck.pl')) licensecheck = subprocess.Popen([licensecheck_path, '-l', '100', '-r', start_dir], stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = licensecheck.communicate() if options.verbose: print '----------- licensecheck stdout -----------' print stdout print '--------- end licensecheck stdout ---------' if licensecheck.returncode != 0 or stderr: print '----------- licensecheck stderr -----------' print stderr print '--------- end licensecheck stderr ---------' print "\nFAILED\n" return 1 success = True for line in stdout.splitlines(): filename, license = line.split(':', 1) filename = os.path.relpath(filename.strip(), options.base_directory) # All files in the build output directory are generated one way or another. # There's no need to check them. if filename.startswith('out/') or filename.startswith('sconsbuild/'): continue # For now we're just interested in the license. license = license.replace('*No copyright*', '').strip() # Skip generated files. if 'GENERATED FILE' in license: continue if license in WHITELISTED_LICENSES: continue if not options.ignore_suppressions: found_path_specific = False for prefix in PATH_SPECIFIC_WHITELISTED_LICENSES: if (filename.startswith(prefix) and license in PATH_SPECIFIC_WHITELISTED_LICENSES[prefix]): found_path_specific = True break if found_path_specific: continue print "'%s' has non-whitelisted license '%s'" % (filename, license) success = False if success: print "\nSUCCESS\n" return 0 else: print "\nFAILED\n" print "Please read", print "http://www.chromium.org/developers/adding-3rd-party-libraries" print "for more info how to handle the failure." print print "Please respect OWNERS of checklicenses.py. Changes violating" print "this requirement may be reverted." return 1 def main(): default_root = os.path.abspath( os.path.join(os.path.dirname(__file__), '..', '..')) option_parser = optparse.OptionParser() option_parser.add_option('--root', default=default_root, dest='base_directory', help='Specifies the repository root. This defaults ' 'to "../.." relative to the script file, which ' 'will normally be the repository root.') option_parser.add_option('-v', '--verbose', action='store_true', default=False, help='Print debug logging') option_parser.add_option('--ignore-suppressions', action='store_true', default=False, help='Ignore path-specific license whitelist.') options, args = option_parser.parse_args() return check_licenses(options, args) if '__main__' == __name__: sys.exit(main())
mogoweb/chromium-crosswalk
tools/checklicenses/checklicenses.py
Python
bsd-3-clause
16,031
[ "Galaxy" ]
66fdca772868ce3fd44b74a695fd41bfd916ba6fb3ada2509f0d7123f32e1d7e
# -*- coding: utf-8 -*- # This file is part of Shuup. # # Copyright (c) 2012-2017, Shoop Commerce Ltd. All rights reserved. # # This source code is licensed under the OSL-3.0 license found in the # LICENSE file in the root directory of this source tree. import os import time import pytest from django.core.urlresolvers import reverse from shuup.testing.browser_utils import ( click_element, wait_until_appeared, wait_until_appeared_xpath, wait_until_condition ) from shuup.testing.factories import ( create_product, create_random_person, get_default_shop, get_default_supplier ) from shuup.testing.utils import initialize_admin_browser_test pytestmark = pytest.mark.skipif(os.environ.get("SHUUP_BROWSER_TESTS", "0") != "1", reason="No browser tests run.") def create_contacts(shop): for i in range(0, 200): contact = create_random_person() contact.save() def create_products(shop): supplier = get_default_supplier() for i in range(0, 200): sku = "sku-%d" % i create_product(sku, shop, supplier, default_price=i) # used in settings list_view_settings = { "contact": { "page_header": "Contacts", "default_column_count": 7, "addable_fields": [(1, "Account Manager")], "creator": create_contacts, "test_pagination": True }, "shop_product": { "page_header": "Shop Products", "default_column_count": 6, "addable_fields": [(11, "Gtin"), (6, "Default Price")], "creator": create_products, "test_pagination": False }, "permission_group": { "page_header": "Permission Groups", "default_column_count": 1, "addable_fields": [(2, "Permissions"), (1, "Id")], # use reverse order due idx "creator": None, "test_pagination": False } } @pytest.mark.browser @pytest.mark.djangodb @pytest.mark.parametrize("visit_type", list_view_settings.keys()) def test_list_views(browser, admin_user, live_server, settings, visit_type): shop = get_default_shop() creator = list_view_settings[visit_type].get("creator", None) if creator and callable(creator): creator(shop) initialize_admin_browser_test(browser, live_server, settings) _visit_list_view(browser, live_server, visit_type) if list_view_settings[visit_type].get("test_pagination", False): _test_pagination(browser) _set_settings(browser, visit_type) def _visit_list_view(browser, live_server, list_view_name): url = reverse("shuup_admin:%s.list" % list_view_name) browser.visit("%s%s" % (live_server, url)) wait_until_condition(browser, lambda x: x.is_text_present(list_view_settings[list_view_name]["page_header"])) wait_until_appeared(browser, ".picotable-item-info") def _test_pagination(browser): ellipses = u"\u22ef" items = _get_pagination_content(browser) _assert_pagination_content(items, ["Previous", "1", "2", "3", ellipses, "11", "Next"]) _goto_page(browser, 3) items = _get_pagination_content(browser) _assert_pagination_content(items, ["Previous", "1", "2", "3", "4", "5", ellipses, "11", "Next"]) _goto_page(browser, 5) items = _get_pagination_content(browser) _assert_pagination_content(items, ["Previous", "1", ellipses, "3", "4", "5", "6", "7", ellipses, "11", "Next"]) _goto_page(browser, 7) items = _get_pagination_content(browser) _assert_pagination_content(items, ["Previous", "1", ellipses, "5", "6", "7", "8", "9", ellipses, "11", "Next"]) _goto_page(browser, 9) items = _get_pagination_content(browser) _assert_pagination_content(items, ["Previous", "1", ellipses, "7", "8", "9", "10", "11", "Next"]) _goto_page(browser, 11) items = _get_pagination_content(browser) _assert_pagination_content(items, ["Previous", "1", ellipses, "9", "10", "11", "Next"]) def _get_pagination_content(browser): pagination = browser.find_by_css(".pagination")[0] return pagination.find_by_tag("a") def _assert_pagination_content(items, content): assert [item.text for item in items] == content def _goto_page(browser, page_number): click_element(browser, "a[rel='%s']" % page_number) wait_until_appeared(browser, "li.active a[rel='%s']" % page_number) def _click_item(items, value): index = [item.text for item in items].index(value) items[index].click() time.sleep(0.5) # Wait mithril for a half sec def _set_settings(browser, setting_type): used_settings = list_view_settings[setting_type] default_column_count = used_settings["default_column_count"] addable_fields = used_settings["addable_fields"] # not selected by default for idx, text in addable_fields: assert not browser.is_text_present(text) #shuup_tests/browser/front/test_category_view.py settings_xpath = "(//a[contains(text(),'Settings')])[2]" # go to settings browser.find_by_xpath(settings_xpath).click() # select settings for idx, (index_key, text) in enumerate(addable_fields): expected_index = default_column_count + 1 + idx assert browser.is_text_present(text) browser.find_by_xpath("//ul[@id='source-sortable']/li[%d]/button" % index_key).first.click() wait_until_appeared_xpath(browser, "//ul[@id='target-sortable']/li[%d]/button" % expected_index) # browser.find_by_css(".btn.btn-xs.btn-success.btn-add-sortable").first.click() # save settings browser.find_by_css(".btn.btn-success").first.click() wait_until_appeared(browser, ".picotable-item-info") for idx, text in addable_fields: wait_until_condition(browser, lambda x: x.is_text_present(text)) # go back to settings browser.find_by_xpath(settings_xpath).click() wait_until_appeared_xpath(browser, "//a[contains(text(),'Reset Defaults')]") # reset to defaults browser.find_by_xpath("//a[contains(text(),'Reset Defaults')]").click() # wait wait_until_appeared(browser, ".picotable-item-info") # not selected by default for idx, text in addable_fields: assert not browser.is_text_present(text)
suutari-ai/shoop
shuup_tests/browser/admin/test_picotable.py
Python
agpl-3.0
6,128
[ "VisIt" ]
7561096832ea45d639cf524ffd87a5b70b74fa0dfbde1b0956f56be920b420db
# encoding: utf-8 """ DEPRECATED Instead use netCDF4.num2date. cf.py - classes around CF compliant files The cf module is made for reading CF-compliant datasets, knowing data, its structure, units and conversions between units afterwards. Dependencies: ============= numpy netcdftime (packaged in netcdf4-python) """ __docformat__ = "restructuredtext en" import numpy as np import netcdftime import octant.io class time (np.ndarray): """Return time object from netCDF file Parameters ---------- nc : netCDF3/4 object or filename Time information will be read from this netCDF3/4 file. name : string, optional The name of the the variable. units : string, optional The name of the variable units. calendar : string, optional A string representing the calandar to use. See netcdftime documentation for possible values. Returns ------- nctime : ndarray A subclass of numpy.ndarray with values equal to the time variable in the netCDF file referenced with nc. """ _unit2sec={'seconds' : 1.0, 'minutes' : 60.0, 'hours' : 3600.0, 'days' : 3600.0*24.0, 'weeks' : 3600.0*24.0*7.0, 'years' : 3600.0*24.0*365.242198781} #ref to udunits _sec2unit={'seconds' : 1.0, 'minutes' : 1.0/60.0, 'hours' : 1.0/3600.0, 'days' : 1.0/(24.0*3600.0)} def __new__(self, ncfile, name='time', units=None, calendar='standard'): raise DeprecationWarning, 'Use netCDF4.num2date instead. Wrapper function in octant.roms.nc_time' self._nc = octant.io.Dataset(ncfile) data = self._nc.variables[name][:] data = data.view(time) if units == None: units = self._nc.variables[name].units data.utime = netcdftime.utime(units, calendar=calendar) return data def __array_finalize__(self, obj): self.utime = getattr(obj, 'utime', {}) def arg_nearest_date(self, dateo): """Return index of date nearest to query date. Prameters --------- dateo : datetime object The query date Returns ------- idx : integer The index of the date closest to dateo. If two dates are equidistant, the smaller is returned. """ to = self.utime.date2num(dateo) return np.min(np.where(np.abs(self-to) == \ np.min(np.abs(self-to)))[0]) def nearest_date(self, dateo): """Return the nearest date to query date. Prameters --------- dateo : datetime object The query date Returns ------- nearest_date : datetime object A datetime object of the date closest to dateo. If two dates are equidistant, the smaller is returned. """ idx = np.where(np.abs(self.dates-dateo) == \ np.min(np.abs(self.dates-dateo)))[0] idx = np.min(idx) return self.dates[idx] def arg_nearest(self, to, units=None): """Return index of time nearest to query time. Prameters --------- to : float The query time. units : string, optional The units of the reference time. Defaults to the reference time string 'units' in the netcdf oject. Returns ------- idx : integer The index of the date closest to to. If two times are equidistant, the smaller is returned. """ if units is not None: to *= self._unit2sec[units] * self._sec2unit[self.utime.units] return np.min(np.where(np.abs(self-to) == np.min(np.abs(self-to)))[0]) def nearest(self, to, units=None): """Return time nearest to time query. Prameters --------- to : float The query time. units : string, optional The units of the reference time. Defaults to the reference time string 'units' in the netcdf oject. Returns ------- idx : integer The index of the date closest to to. If two times are equidistant, the smaller is returned. """ if units is not None: to *= self._unit2sec[units] * self._sec2unit[self.utime.units] idx = np.where(np.abs(self-to) == np.min(np.abs(self-to)))[0] idx = np.min(idx) return self[idx] def get_seconds(self): fac = self._unit2sec[self.utime.units] * self._sec2unit['seconds'] return self*fac def get_minutes(self): fac = self._unit2sec[self.utime.units] * self._sec2unit['minutes'] return self*fac def get_hours(self): fac = self._unit2sec[self.utime.units] * self._sec2unit['hours'] return self*fac def get_days(self): fac = self._unit2sec[self.utime.units] * self._sec2unit['days'] return np.asarray(self,dtype='float64')*fac def get_jd(self): utime = netcdftime.utime('days since 0001-01-01 00:00:00', \ calendar='proleptic_gregorian') return utime.date2num(self.dates) def get_dates(self): return np.array([self.utime.num2date(tval) for tval in self]) jd = property(get_jd, None, doc="Julian day, for plotting in pylab") seconds = property(get_seconds, None, doc="seconds") minutes = property(get_minutes, None, doc="minutes") hours = property(get_hours, None, doc="hours") days = property(get_days, None, doc="days") dates = property(get_dates, None, doc="datetime objects") if __name__ == '__main__': import octant.test octant.test.cf_test()
kthyng/octant
octant/cf.py
Python
bsd-3-clause
5,955
[ "NetCDF" ]
f442d53e77dae938bf18b53c4dab0cd68d280630fe1d87c43135268294c2f7a5
#!/usr/bin/env python # -*- coding: UTF-8 -*- """ Syntenic path assembly. """ import sys import logging from string import maketrans from itertools import groupby, combinations from jcvi.formats.blast import BlastSlow, Blast from jcvi.formats.sizes import Sizes from jcvi.utils.iter import pairwise from jcvi.utils.range import range_intersect from jcvi.algorithms.graph import BiGraph, BiEdge from jcvi.apps.base import OptionParser, ActionDispatcher def main(): actions = ( ("bed", "convert ANCHORS file to BED format"), ('fromblast', 'Generate path from BLAST file'), ('happy', 'Make graph from happy mapping data'), ('partition', 'Make individual graphs partitioned by happy mapping'), ('merge', 'Merge multiple graphs together and visualize'), ('connect', 'connect contigs using long reads'), ) p = ActionDispatcher(actions) p.dispatch(globals()) def bed(args): """ %prog bed anchorsfile Convert ANCHORS file to BED format. """ from collections import defaultdict from jcvi.compara.synteny import AnchorFile, check_beds from jcvi.formats.bed import Bed, BedLine from jcvi.formats.base import get_number p = OptionParser(bed.__doc__) p.add_option("--switch", default=False, action="store_true", help="Switch reference and aligned map elements") p.add_option("--scale", type="float", help="Scale the aligned map distance by factor") p.set_beds() p.set_outfile() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) anchorsfile, = args switch = opts.switch scale = opts.scale ac = AnchorFile(anchorsfile) pairs = defaultdict(list) for a, b, block_id in ac.iter_pairs(): pairs[a].append(b) qbed, sbed, qorder, sorder, is_self = check_beds(anchorsfile, p, opts) bd = Bed() for q in qbed: qseqid, qstart, qend, qaccn = q.seqid, q.start, q.end, q.accn if qaccn not in pairs: continue for s in pairs[qaccn]: si, s = sorder[s] sseqid, sstart, send, saccn = s.seqid, s.start, s.end, s.accn if switch: qseqid, sseqid = sseqid, qseqid qstart, sstart = sstart, qstart qend, send = send, qend qaccn, saccn = saccn, qaccn if scale: sstart /= scale try: newsseqid = get_number(sseqid) except ValueError: raise ValueError, "`{0}` is on `{1}` with no number to extract".\ format(saccn, sseqid) bedline = "\t".join(str(x) for x in (qseqid, qstart - 1, qend, "{0}:{1}".format(newsseqid, sstart))) bd.append(BedLine(bedline)) bd.print_to_file(filename=opts.outfile, sorted=True) def happy_nodes(row, prefix=None): row = row.translate(None, "[](){}+-") scfs = [x.strip() for x in row.split(":")] if prefix: scfs = [prefix + x for x in scfs] return scfs def happy_edges(row, prefix=None): """ Convert a row in HAPPY file and yield edges. """ trans = maketrans("[](){}", " ") row = row.strip().strip("+") row = row.translate(trans) scfs = [x.strip("+") for x in row.split(":")] for a, b in pairwise(scfs): oa = '<' if a.strip()[0] == '-' else '>' ob = '<' if b.strip()[0] == '-' else '>' is_uncertain = a[-1] == ' ' or b[0] == ' ' a = a.strip().strip('-') b = b.strip().strip('-') if prefix: a = prefix + a b = prefix + b e = BiEdge(a, b, oa, ob) yield e, is_uncertain def partition(args): """ %prog partition happy.txt synteny.graph Select edges from another graph and merge it with the certain edges built from the HAPPY mapping data. """ allowed_format = ("png", "ps") p = OptionParser(partition.__doc__) p.add_option("--prefix", help="Add prefix to the name [default: %default]") p.add_option("--namestart", default=0, type="int", help="Use a shorter name, starting index [default: %default]") p.add_option("--format", default="png", choices=allowed_format, help="Generate image of format [default: %default]") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) happyfile, graphfile = args bg = BiGraph() bg.read(graphfile, color="red") prefix = opts.prefix fp = open(happyfile) for i, row in enumerate(fp): nns = happy_nodes(row, prefix=prefix) nodes = set(nns) edges = happy_edges(row, prefix=prefix) small_graph = BiGraph() for e, is_uncertain in edges: if is_uncertain: e.color = "gray" small_graph.add_edge(e) for (u, v), e in bg.edges.items(): # Grab edge if both vertices are on the same line if u in nodes and v in nodes: uv = (str(u), str(v)) if uv in small_graph.edges: e = small_graph.edges[uv] e.color = "blue" # supported by both evidences else: small_graph.add_edge(e) print >> sys.stderr, small_graph pngfile = "A{0:02d}.{1}".format(i + 1, opts.format) telomeres = (nns[0], nns[-1]) small_graph.draw(pngfile, namestart=opts.namestart, nodehighlight=telomeres, dpi=72) legend = ["Edge colors:"] legend.append("[BLUE] Experimental + Synteny") legend.append("[BLACK] Experimental certain") legend.append("[GRAY] Experimental uncertain") legend.append("[RED] Synteny only") legend.append("Rectangle nodes are telomeres.") print >> sys.stderr, "\n".join(legend) def merge(args): """ %prog merge graphs Merge multiple graphs together and visualize. """ p = OptionParser(merge.__doc__) p.add_option("--colorlist", default="black,red,pink,blue,green", help="The color palette [default: %default]") opts, args = p.parse_args(args) if len(args) < 1: sys.exit(not p.print_help()) colorlist = opts.colorlist.split(",") assert len(colorlist) >= len(args), "Need more colors in --colorlist" g = BiGraph() for a, c in zip(args, colorlist): g.read(a, color=c) g.draw("merged.png") def happy(args): """ %prog happy happy.txt Make bi-directed graph from HAPPY mapping data. JCVI encodes uncertainties in the order of the contigs / scaffolds. : separates scaffolds + means telomere (though the telomere repeats may not show because the telomere-adjacent sequence is missing) - means that the scaffold is in reverse orientation to that shown in the 2003 TIGR scaffolds. Ambiguities are represented as follows, using Paul Dear.s description: [ ] means undetermined orientation. error quite possible (70% confidence?) ( ) means uncertain orientation. small chance of error (90% confidence?) { } means uncertain order. Example: +-8254707:8254647:-8254690:{[8254694]:[8254713]:[8254531]:[8254797]}:8254802:8254788+ """ p = OptionParser(happy.__doc__) p.add_option("--prefix", help="Add prefix to the name [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) happyfile, = args certain = "certain.graph" uncertain = "uncertain.graph" fw1 = open(certain, "w") fw2 = open(uncertain, "w") fp = open(happyfile) for row in fp: for e, is_uncertain in happy_edges(row, prefix=opts.prefix): fw = fw2 if is_uncertain else fw1 print >> fw, e logging.debug("Edges written to `{0}`".format(",".join((certain, uncertain)))) def fromblast(args): """ %prog fromblast blastfile subject.fasta Generate path from BLAST file. If multiple subjects map to the same query, an edge is constructed between them (with the link provided by the query). The BLAST file MUST be filtered, chained, supermapped. """ from jcvi.formats.blast import sort from jcvi.utils.range import range_distance p = OptionParser(fromblast.__doc__) p.add_option("--clique", default=False, action="store_true", help="Populate clique instead of linear path [default: %default]") p.add_option("--maxdist", default=100000, type="int", help="Create edge within certain distance [default: %default]") p.set_verbose(help="Print verbose reports to stdout") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) blastfile, subjectfasta = args clique = opts.clique maxdist = opts.maxdist sort([blastfile, "--query"]) blast = BlastSlow(blastfile, sorted=True) g = BiGraph() for query, blines in groupby(blast, key=lambda x: x.query): blines = list(blines) iterator = combinations(blines, 2) if clique else pairwise(blines) for a, b in iterator: asub, bsub = a.subject, b.subject if asub == bsub: continue arange = (a.query, a.qstart, a.qstop, "+") brange = (b.query, b.qstart, b.qstop, "+") dist, oo = range_distance(arange, brange, distmode="ee") if dist > maxdist: continue atag = ">" if a.orientation == "+" else "<" btag = ">" if b.orientation == "+" else "<" g.add_edge(BiEdge(asub, bsub, atag, btag)) graph_to_agp(g, blastfile, subjectfasta, verbose=opts.verbose) def graph_to_agp(g, blastfile, subjectfasta, verbose=False): from jcvi.formats.agp import order_to_agp logging.debug(str(g)) g.write("graph.txt") #g.draw("graph.pdf") paths = [] for path in g.iter_paths(): m, oo = g.path(path) if len(oo) == 1: # Singleton path continue paths.append(oo) if verbose: print m print oo npaths = len(paths) ntigs = sum(len(x) for x in paths) logging.debug("Graph decomposed to {0} paths with {1} components.".\ format(npaths, ntigs)) agpfile = blastfile + ".agp" sizes = Sizes(subjectfasta) fwagp = open(agpfile, "w") scaffolded = set() for i, oo in enumerate(paths): ctgorder = [(str(ctg), ("+" if strand else "-")) \ for ctg, strand in oo] scaffolded |= set(ctg for ctg, strand in ctgorder) object = "pmol_{0:04d}".format(i) order_to_agp(object, ctgorder, sizes.mapping, fwagp) # Get the singletons as well nsingletons = 0 for ctg, size in sizes.iter_sizes(): if ctg in scaffolded: continue ctgorder = [(ctg, "+")] object = ctg order_to_agp(object, ctgorder, sizes.mapping, fwagp) nsingletons += 1 logging.debug("Written {0} unscaffolded singletons.".format(nsingletons)) fwagp.close() logging.debug("AGP file written to `{0}`.".format(agpfile)) def connect(args): """ %prog connect assembly.fasta read_mapping.blast Connect contigs using long reads. """ p = OptionParser(connect.__doc__) p.add_option("--clip", default=2000, type="int", help="Only consider end of contigs [default: %default]") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) fastafile, blastfile = args clip = opts.clip sizes = Sizes(fastafile).mapping blast = Blast(blastfile) blasts = [] for b in blast: seqid = b.subject size = sizes[seqid] start, end = b.sstart, b.sstop cstart, cend = min(size, clip), max(0, size - clip) if start > cstart and end < cend: continue blasts.append(b) key = lambda x: x.query blasts.sort(key=key) g = BiGraph() for query, bb in groupby(blasts, key=key): bb = sorted(bb, key=lambda x: x.qstart) nsubjects = len(set(x.subject for x in bb)) if nsubjects == 1: continue print "\n".join(str(x) for x in bb) for a, b in pairwise(bb): astart, astop = a.qstart, a.qstop bstart, bstop = b.qstart, b.qstop if a.subject == b.subject: continue arange = astart, astop brange = bstart, bstop ov = range_intersect(arange, brange) alen = astop - astart + 1 blen = bstop - bstart + 1 if ov: ostart, ostop = ov ov = ostop - ostart + 1 print ov, alen, blen if ov and (ov > alen / 2 or ov > blen / 2): print "Too much overlap ({0})".format(ov) continue asub = a.subject bsub = b.subject atag = ">" if a.orientation == "+" else "<" btag = ">" if b.orientation == "+" else "<" e = BiEdge(asub, bsub, atag, btag) g.add_edge(e) print "=" * 5, e graph_to_agp(g, blastfile, fastafile, verbose=False) if __name__ == '__main__': main()
sgordon007/jcvi_062915
assembly/syntenypath.py
Python
bsd-2-clause
13,323
[ "BLAST" ]
6e370bd51da2d78a720b92277cefd040f91ecae162b07ecc2b554aa5fda5c9f0
import common import string import os import re import test_db_util import shutil import logging import time import tempfile import tarfile import urllib import galaxy.webapps.tool_shed.util.hgweb_config import galaxy.model.tool_shed_install as galaxy_model import galaxy.util from base.tool_shed_util import repository_installation_timeout from base.twilltestcase import TwillTestCase from galaxy.util.json import loads from galaxy.web import security from tool_shed.util.encoding_util import tool_shed_encode from tool_shed.util import shed_util_common as suc from tool_shed.util import hg_util from tool_shed.util import xml_util from galaxy import eggs eggs.require( 'mercurial' ) eggs.require( 'twill' ) from mercurial.util import Abort from mercurial import commands from mercurial import hg from mercurial import ui import twill.commands as tc log = logging.getLogger( __name__ ) class ShedTwillTestCase( TwillTestCase ): def setUp( self ): # Security helper self.security = security.SecurityHelper( id_secret='changethisinproductiontoo' ) self.history_id = None self.hgweb_config_dir = os.environ.get( 'TEST_HG_WEB_CONFIG_DIR' ) self.hgweb_config_manager = galaxy.webapps.tool_shed.util.hgweb_config.HgWebConfigManager() self.hgweb_config_manager.hgweb_config_dir = self.hgweb_config_dir self.tool_shed_test_tmp_dir = os.environ.get( 'TOOL_SHED_TEST_TMP_DIR', None) self.host = os.environ.get( 'TOOL_SHED_TEST_HOST' ) self.port = os.environ.get( 'TOOL_SHED_TEST_PORT' ) self.url = "http://%s:%s" % ( self.host, self.port ) self.galaxy_host = os.environ.get( 'GALAXY_TEST_HOST' ) self.galaxy_port = os.environ.get( 'GALAXY_TEST_PORT' ) self.galaxy_url = "http://%s:%s" % ( self.galaxy_host, self.galaxy_port ) self.shed_tool_data_table_conf = os.environ.get( 'TOOL_SHED_TEST_TOOL_DATA_TABLE_CONF' ) self.file_dir = os.environ.get( 'TOOL_SHED_TEST_FILE_DIR', None ) self.tool_shed_test_file = None self.tool_data_path = os.environ.get( 'GALAXY_TEST_TOOL_DATA_PATH' ) self.shed_tool_conf = os.environ.get( 'GALAXY_TEST_SHED_TOOL_CONF' ) self.test_db_util = test_db_util # TODO: Figure out a way to alter these attributes during tests. self.galaxy_tool_dependency_dir = os.environ.get( 'GALAXY_TEST_TOOL_DEPENDENCY_DIR' ) self.shed_tools_dict = {} def add_repository_review_component( self, **kwd ): url = '/repository_review/create_component?operation=create' self.visit_url( url ) self.submit_form( 1, 'create_component_button', **kwd ) def assign_admin_role( self, repository, user ): # As elsewhere, twill limits the possibility of submitting the form, this time due to not executing the javascript # attached to the role selection form. Visit the action url directly with the necessary parameters. url = '/repository/manage_repository_admins?id=%s&in_users=%d&manage_role_associations_button=Save' % \ ( self.security.encode_id( repository.id ), user.id ) self.visit_url( url ) self.check_for_strings( strings_displayed=[ 'Role', 'has been associated' ] ) def browse_category( self, category, strings_displayed=[], strings_not_displayed=[] ): url = '/repository/browse_valid_categories?sort=name&operation=valid_repositories_by_category&id=%s' % \ self.security.encode_id( category.id ) self.visit_url( url ) self.check_for_strings( strings_displayed, strings_not_displayed ) def browse_component_review( self, review, strings_displayed=[], strings_not_displayed=[] ): url = '/repository_review/browse_review?id=%s' % self.security.encode_id( review.id ) self.visit_url( url ) self.check_for_strings( strings_displayed, strings_not_displayed ) def browse_custom_datatypes( self, strings_displayed=[], strings_not_displayed=[] ): url = '/repository/browse_datatypes' self.visit_url( url ) self.check_for_strings( strings_displayed, strings_not_displayed ) def browse_repository( self, repository, strings_displayed=[], strings_not_displayed=[] ): url = '/repository/browse_repository?id=%s' % self.security.encode_id( repository.id ) self.visit_url( url ) self.check_for_strings( strings_displayed, strings_not_displayed ) def browse_repository_dependencies( self, strings_displayed=[], strings_not_displayed=[] ): url = '/repository/browse_repository_dependencies' self.visit_url( url ) self.check_for_strings( strings_displayed, strings_not_displayed ) def browse_tool_shed( self, url, strings_displayed=[], strings_not_displayed=[] ): self.visit_galaxy_url( '/admin_toolshed/browse_tool_shed?tool_shed_url=%s' % url ) self.check_for_strings( strings_displayed, strings_not_displayed ) def browse_tool_dependencies( self, strings_displayed=[], strings_not_displayed=[] ): url = '/repository/browse_tool_dependencies' self.visit_url( url ) self.check_for_strings( strings_displayed, strings_not_displayed ) def browse_tools( self, strings_displayed=[], strings_not_displayed=[] ): url = '/repository/browse_tools' self.visit_url( url ) self.check_for_strings( strings_displayed, strings_not_displayed ) def check_count_of_metadata_revisions_associated_with_repository( self, repository, metadata_count ): self.check_repository_changelog( repository ) self.check_string_count_in_page( 'Repository metadata is associated with this change set.', metadata_count ) def check_exported_repository_dependency( self, dependency_filename, repository_name, repository_owner ): root, error_message = xml_util.parse_xml( dependency_filename ) for elem in root.findall( 'repository' ): if 'changeset_revision' in elem: raise AssertionError( 'Exported repository %s with owner %s has a dependency with a defined changeset revision.' % \ ( repository_name, repository_owner ) ) if 'toolshed' in elem: raise AssertionError( 'Exported repository %s with owner %s has a dependency with a defined tool shed.' % \ ( repository_name, repository_owner ) ) def check_for_valid_tools( self, repository, strings_displayed=[], strings_not_displayed=[] ): strings_displayed.append( 'Valid tools' ) self.display_manage_repository_page( repository, strings_displayed, strings_not_displayed ) def check_galaxy_repository_db_status( self, repository_name, owner, expected_status ): installed_repository = test_db_util.get_installed_repository_by_name_owner( repository_name, owner ) assert installed_repository.status == expected_status, 'Status in database is %s, expected %s' % \ ( installed_repository.status, expected_status ) def check_galaxy_repository_tool_panel_section( self, repository, expected_tool_panel_section ): metadata = repository.metadata assert 'tools' in metadata, 'Tools not found in repository metadata: %s' % metadata tool_metadata = metadata[ 'tools' ] # If integrated_tool_panel.xml is to be tested, this test method will need to be enhanced to handle tools # from the same repository in different tool panel sections. Getting the first tool guid is ok, because # currently all tools contained in a single repository will be loaded into the same tool panel section. if repository.status in [ galaxy_model.ToolShedRepository.installation_status.UNINSTALLED, galaxy_model.ToolShedRepository.installation_status.DEACTIVATED ]: tool_panel_section = self.get_tool_panel_section_from_repository_metadata( metadata ) else: tool_panel_section = self.get_tool_panel_section_from_api( metadata ) assert tool_panel_section == expected_tool_panel_section, 'Expected to find tool panel section *%s*, but instead found *%s*\nMetadata: %s\n' % \ ( expected_tool_panel_section, tool_panel_section, metadata ) def check_installed_repository_tool_dependencies( self, installed_repository, strings_displayed=[], strings_not_displayed=[], dependencies_installed=False ): # Tool dependencies are not being installed in these functional tests. If this is changed, the test method will also need to be updated. if not dependencies_installed: strings_displayed.append( 'Missing tool dependencies' ) else: strings_displayed.append( 'Tool dependencies' ) if dependencies_installed: strings_displayed.append( 'Installed' ) else: strings_displayed.append( 'Never installed' ) url = '/admin_toolshed/manage_repository?id=%s' % self.security.encode_id( installed_repository.id ) self.visit_galaxy_url( url ) self.check_for_strings( strings_displayed, strings_not_displayed ) def check_manifest( self, manifest_filepath, owner=None ): root, error_message = xml_util.parse_xml( manifest_filepath ) for elem in root.findall( 'repository' ): repository_name = elem.get( 'name' ) manifest_owner = elem.get( 'username' ) if owner is not None: assert manifest_owner == owner, 'Expected repository %s to be owned by %s, but found %s' % \ ( elem.get( 'name' ), owner, manifest_owner ) toolshed = elem.get( 'toolshed' ) changeset_revision = elem.get( 'changeset_revision' ) assert toolshed is None, 'Repository definition %s has a tool shed attribute %s.' % ( repository_name, toolshed ) assert changeset_revision is None, 'Repository definition %s specifies a changeset revision %s.' % \ ( repository_name, changeset_revision ) repository_archive = elem.find( 'archive' ).text filepath, filename = os.path.split( manifest_filepath ) repository_path = os.path.join( filepath, repository_archive ) self.verify_repository_in_capsule( repository_path, repository_name, owner ) def check_repository_changelog( self, repository, strings_displayed=[], strings_not_displayed=[] ): url = '/repository/view_changelog?id=%s' % self.security.encode_id( repository.id ) self.visit_url( url ) self.check_for_strings( strings_displayed, strings_not_displayed ) def check_repository_dependency( self, repository, depends_on_repository, depends_on_changeset_revision=None, changeset_revision=None ): strings_displayed = [ depends_on_repository.name, depends_on_repository.user.username ] if depends_on_changeset_revision: strings_displayed.append( depends_on_changeset_revision ) self.display_manage_repository_page( repository, changeset_revision=changeset_revision, strings_displayed=strings_displayed ) def check_repository_metadata( self, repository, tip_only=True ): if tip_only: assert self.tip_has_metadata( repository ) and len( self.get_repository_metadata_revisions( repository ) ) == 1, \ 'Repository tip is not a metadata revision: Repository tip - %s, metadata revisions - %s.' else: assert len( self.get_repository_metadata_revisions( repository ) ) > 0, \ 'Repository tip is not a metadata revision: Repository tip - %s, metadata revisions - %s.' % \ ( self.get_repository_tip( repository ), ', '.join( self.get_repository_metadata_revisions( repository ) ) ) def check_repository_tools_for_changeset_revision( self, repository, changeset_revision, tool_metadata_strings_displayed=[], tool_page_strings_displayed=[] ): ''' Loop through each tool dictionary in the repository metadata associated with the received changeset_revision. For each of these, check for a tools attribute, and load the tool metadata page if it exists, then display that tool's page. ''' test_db_util.refresh( repository ) repository_metadata = self.get_repository_metadata_by_changeset_revision( repository, changeset_revision ) metadata = repository_metadata.metadata if 'tools' not in metadata: raise AssertionError( 'No tools in %s revision %s.' % ( repository.name, changeset_revision ) ) for tool_dict in metadata[ 'tools' ]: tool_id = tool_dict[ 'id' ] tool_xml = tool_dict[ 'tool_config' ] url = '/repository/view_tool_metadata?repository_id=%s&changeset_revision=%s&tool_id=%s' % \ ( self.security.encode_id( repository.id ), changeset_revision, tool_id ) self.visit_url( url ) self.check_for_strings( tool_metadata_strings_displayed ) self.load_display_tool_page( repository, tool_xml_path=tool_xml, changeset_revision=changeset_revision, strings_displayed=tool_page_strings_displayed, strings_not_displayed=[] ) def check_repository_invalid_tools_for_changeset_revision( self, repository, changeset_revision, strings_displayed=[], strings_not_displayed=[] ): '''Load the invalid tool page for each invalid tool associated with this changeset revision and verify the received error messages.''' repository_metadata = self.get_repository_metadata_by_changeset_revision( repository, changeset_revision ) metadata = repository_metadata.metadata assert 'invalid_tools' in metadata, 'Metadata for changeset revision %s does not define invalid tools' % changeset_revision for tool_xml in metadata[ 'invalid_tools' ]: self.load_invalid_tool_page( repository, tool_xml=tool_xml, changeset_revision=changeset_revision, strings_displayed=strings_displayed, strings_not_displayed=strings_not_displayed ) def check_string_count_in_page( self, pattern, min_count, max_count=None ): """Checks the number of 'pattern' occurrences in the current browser page""" page = self.last_page() pattern_count = page.count( pattern ) if max_count is None: max_count = min_count # The number of occurrences of pattern in the page should be between min_count # and max_count, so show error if pattern_count is less than min_count or greater # than max_count. if pattern_count < min_count or pattern_count > max_count: fname = self.write_temp_file( page ) errmsg = "%i occurrences of '%s' found (min. %i, max. %i).\npage content written to '%s' " % \ ( pattern_count, pattern, min_count, max_count, fname ) raise AssertionError( errmsg ) def clone_repository( self, repository, destination_path ): url = '%s/repos/%s/%s' % ( self.url, repository.user.username, repository.name ) success, message = hg_util.clone_repository( url, destination_path, self.get_repository_tip( repository ) ) assert success is True, message def commit_and_push( self, repository, hgrepo, options, username, password ): url = 'http://%s:%s@%s:%s/repos/%s/%s' % ( username, password, self.host, self.port, repository.user.username, repository.name ) commands.commit( ui.ui(), hgrepo, **options ) try: commands.push( ui.ui(), hgrepo, dest=url ) except Abort, e: message = e if 'authorization failed' in message: return False else: raise return True def create_category( self, **kwd ): category = test_db_util.get_category_by_name( kwd[ 'name' ] ) if category is None: self.visit_url( '/admin/manage_categories?operation=create' ) self.submit_form( form_no=1, button="create_category_button", **kwd ) category = test_db_util.get_category_by_name( kwd[ 'name' ] ) return category def create_repository_dependency( self, repository=None, repository_tuples=[], filepath=None, prior_installation_required=False, complex=False, package=None, version=None, strings_displayed=[], strings_not_displayed=[] ): repository_names = [] if complex: filename = 'tool_dependencies.xml' self.generate_complex_dependency_xml( filename=filename, filepath=filepath, repository_tuples=repository_tuples, package=package, version=version ) else: for toolshed_url, name, owner, changeset_revision in repository_tuples: repository_names.append( name ) dependency_description = '%s depends on %s.' % ( repository.name, ', '.join( repository_names ) ) filename = 'repository_dependencies.xml' self.generate_simple_dependency_xml( repository_tuples=repository_tuples, filename=filename, filepath=filepath, dependency_description=dependency_description, prior_installation_required=prior_installation_required ) self.upload_file( repository, filename=filename, filepath=filepath, valid_tools_only=False, uncompress_file=False, remove_repo_files_not_in_tar=False, commit_message='Uploaded dependency on %s.' % ', '.join( repository_names ), strings_displayed=[], strings_not_displayed=[] ) def create_repository_review( self, repository, review_contents_dict, changeset_revision=None, copy_from=None): strings_displayed = [] if not copy_from: strings_displayed.append( 'Begin your review' ) strings_not_displayed = [] kwd = dict() if not changeset_revision: changeset_revision = self.get_repository_tip( repository ) url = '/repository_review/create_review?changeset_revision=%s&id=%s' % ( changeset_revision, self.security.encode_id( repository.id ) ) self.visit_url( url ) self.check_for_strings( strings_displayed, strings_not_displayed ) strings_displayed = [] if copy_from: old_changeset_revision, review_id = copy_from strings_displayed = [ 'You have elected to create a new review', 'Select previous revision', changeset_revision ] self.check_for_strings( strings_displayed ) strings_displayed = [] url = '/repository_review/create_review?changeset_revision=%s&id=%s&previous_review_id=%s' % \ ( self.get_repository_tip( repository ), self.security.encode_id( repository.id ), self.security.encode_id( review_id ) ) self.visit_url( url ) self.fill_review_form( review_contents_dict, strings_displayed, strings_not_displayed ) def create_user_in_galaxy( self, cntrller='user', email='test@bx.psu.edu', password='testuser', username='admin-user', redirect='' ): self.visit_galaxy_url( "/user/create?cntrller=%s&use_panels=False" % cntrller ) self.submit_form( '1', 'create_user_button', email=email, password=password, confirm=password, username=username, redirect=redirect ) previously_created = False username_taken = False invalid_username = False try: self.check_page_for_string( "Created new user account" ) except: try: # May have created the account in a previous test run... self.check_page_for_string( "User with that email already exists" ) previously_created = True except: try: self.check_page_for_string( 'Public name is taken; please choose another' ) username_taken = True except: try: # Note that we're only checking if the usr name is >< 4 chars here... self.check_page_for_string( 'Public name must be at least 4 characters in length' ) invalid_username = True except: pass return previously_created, username_taken, invalid_username def deactivate_repository( self, installed_repository, strings_displayed=[], strings_not_displayed=[] ): url = '/admin_toolshed/deactivate_or_uninstall_repository?id=%s' % self.security.encode_id( installed_repository.id ) self.visit_galaxy_url( url ) self.check_for_strings( strings_displayed, strings_not_displayed ) form = tc.browser.get_form( 'deactivate_or_uninstall_repository' ) kwd = self.set_form_value( form, {}, 'remove_from_disk', False ) tc.submit( 'deactivate_or_uninstall_repository_button' ) strings_displayed = [ 'The repository named', 'has been deactivated' ] self.check_for_strings( strings_displayed, strings_not_displayed=[] ) def delete_files_from_repository( self, repository, filenames=[], strings_displayed=[ 'were deleted from the repository' ], strings_not_displayed=[] ): files_to_delete = [] basepath = self.get_repo_path( repository ) repository_files = self.get_repository_file_list( base_path=basepath, current_path=None ) # Verify that the files to delete actually exist in the repository. for filename in repository_files: if filename in filenames: files_to_delete.append( os.path.join( basepath, filename ) ) self.browse_repository( repository ) # Twill sets hidden form fields to read-only by default. We need to write to this field. form = tc.browser.get_form( 'select_files_to_delete' ) form.find_control( "selected_files_to_delete" ).readonly = False tc.fv( "2", "selected_files_to_delete", ','.join( files_to_delete ) ) tc.submit( 'select_files_to_delete_button' ) self.check_for_strings( strings_displayed, strings_not_displayed ) def delete_repository( self, repository ): repository_id = self.security.encode_id( repository.id ) self.visit_url( '/admin/browse_repositories' ) url = '/admin/browse_repositories?operation=Delete&id=%s' % repository_id strings_displayed = [ 'Deleted 1 repository', repository.name ] strings_not_displayed = [] self.visit_url( url ) self.check_for_strings( strings_displayed, strings_not_displayed ) def display_all_workflows( self, strings_displayed=[], strings_not_displayed=[] ): url = '/workflow' self.visit_galaxy_url( url ) self.check_for_strings( strings_displayed, strings_not_displayed ) def display_galaxy_browse_repositories_page( self, strings_displayed=[], strings_not_displayed=[] ): url = '/admin_toolshed/browse_repositories' self.visit_galaxy_url( url ) self.check_for_strings( strings_displayed, strings_not_displayed ) def display_installed_manage_data_manager_page( self, installed_repository, data_manager_names=None, strings_displayed=[], strings_not_displayed=[] ): data_managers = installed_repository.metadata.get( 'data_manager', {} ).get( 'data_managers', {} ) if data_manager_names: if not isinstance( data_manager_names, list ): data_manager_names = [data_manager_names] for data_manager_name in data_manager_names: assert data_manager_name in data_managers, "The requested Data Manager '%s' was not found in repository metadata." % data_manager_name else: data_manager_name = data_managers.keys() for data_manager_name in data_manager_names: url = '/data_manager/manage_data_manager?id=%s' % data_managers[data_manager_name]['guid'] self.visit_galaxy_url( url ) self.check_for_strings( strings_displayed, strings_not_displayed ) def display_installed_repository_manage_page( self, installed_repository, strings_displayed=[], strings_not_displayed=[] ): url = '/admin_toolshed/manage_repository?id=%s' % self.security.encode_id( installed_repository.id ) self.visit_galaxy_url( url ) strings_displayed.append( str( installed_repository.installed_changeset_revision ) ) self.check_for_strings( strings_displayed, strings_not_displayed ) def display_installed_workflow_image( self, repository, workflow_name, strings_displayed=[], strings_not_displayed=[] ): url = '/admin_toolshed/generate_workflow_image?repository_id=%s&workflow_name=%s' % \ ( self.security.encode_id( repository.id ), tool_shed_encode( workflow_name ) ) self.visit_galaxy_url( url ) self.check_for_strings( strings_displayed, strings_not_displayed ) def display_manage_repository_page( self, repository, changeset_revision=None, strings_displayed=[], strings_not_displayed=[] ): base_url = '/repository/manage_repository?id=%s' % self.security.encode_id( repository.id ) if changeset_revision: url = '%s&changeset_revision=%s' % ( base_url, changeset_revision ) else: changeset_revision = self.get_repository_tip( repository ) url = base_url self.visit_url( url ) self.check_for_strings( strings_displayed, strings_not_displayed ) def display_repository_clone_page( self, owner_name, repository_name, strings_displayed=[], strings_not_displayed=[] ): url = '/repos/%s/%s' % ( owner_name, repository_name ) self.visit_url( url ) self.check_for_strings( strings_displayed, strings_not_displayed ) def display_repository_file_contents( self, repository, filename, filepath=None, strings_displayed=[], strings_not_displayed=[] ): '''Find a file in the repository and display the contents.''' basepath = self.get_repo_path( repository ) repository_file_list = [] if filepath: relative_path = os.path.join( basepath, filepath ) else: relative_path = basepath repository_file_list = self.get_repository_file_list( base_path=relative_path, current_path=None ) assert filename in repository_file_list, 'File %s not found in the repository under %s.' % ( filename, relative_path ) url = '/repository/get_file_contents?file_path=%s' % os.path.join( relative_path, filename ) self.visit_url( url ) self.check_for_strings( strings_displayed, strings_not_displayed ) def display_reviewed_repositories_owned_by_user( self, strings_displayed=[], strings_not_displayed=[] ): url = '/repository_review/reviewed_repositories_i_own' self.visit_url( url ) self.check_for_strings( strings_displayed, strings_not_displayed ) def display_repository_reviews_by_user( self, user, strings_displayed=[], strings_not_displayed=[] ): url = '/repository_review/repository_reviews_by_user?id=%s' % self.security.encode_id( user.id ) self.visit_url( url ) self.check_for_strings( strings_displayed, strings_not_displayed ) def edit_repository_categories( self, repository, categories_to_add=[], categories_to_remove=[], restore_original=True ): url = '/repository/manage_repository?id=%s' % self.security.encode_id( repository.id ) self.visit_url( url ) strings_displayed = [] strings_not_displayed = [] for category in categories_to_add: tc.fv( "2", "category_id", '+%s' % category) strings_displayed.append( "selected>%s" % category ) for category in categories_to_remove: tc.fv( "2", "category_id", '-%s' % category) strings_not_displayed.append( "selected>%s" % category ) tc.submit( "manage_categories_button" ) self.check_for_strings( strings_displayed, strings_not_displayed ) if restore_original: strings_displayed = [] strings_not_displayed = [] for category in categories_to_remove: tc.fv( "2", "category_id", '+%s' % category) strings_displayed.append( "selected>%s" % category ) for category in categories_to_add: tc.fv( "2", "category_id", '-%s' % category) strings_not_displayed.append( "selected>%s" % category ) tc.submit( "manage_categories_button" ) self.check_for_strings( strings_displayed, strings_not_displayed ) def edit_repository_information( self, repository, revert=True, **kwd ): url = '/repository/manage_repository?id=%s' % self.security.encode_id( repository.id ) self.visit_url( url ) original_information = dict( repo_name=repository.name, description=repository.description, long_description=repository.long_description ) strings_displayed = [] strings_not_displayed = [] for input_elem_name in [ 'repo_name', 'description', 'long_description', 'repository_type' ]: if input_elem_name in kwd: tc.fv( "edit_repository", input_elem_name, kwd[ input_elem_name ] ) strings_displayed.append( self.escape_html( kwd[ input_elem_name ] ) ) tc.submit( "edit_repository_button" ) self.check_for_strings( strings_displayed ) if revert: strings_displayed = [] for input_elem_name in [ 'repo_name', 'description', 'long_description' ]: tc.fv( "edit_repository", input_elem_name, original_information[ input_elem_name ] ) strings_displayed.append( self.escape_html( original_information[ input_elem_name ] ) ) tc.submit( "edit_repository_button" ) self.check_for_strings( strings_displayed ) def enable_email_alerts( self, repository, strings_displayed=[], strings_not_displayed=[] ): repository_id = self.security.encode_id( repository.id ) params = dict( operation='Receive email alerts', id=repository_id ) self.visit_url( '/repository/browse_repositories', params ) self.check_for_strings( strings_displayed ) def escape_html( self, string, unescape=False ): html_entities = [ ('&', 'X' ), ( "'", '&#39;' ), ( '"', '&#34;' ) ] for character, replacement in html_entities: if unescape: string = string.replace( replacement, character ) else: string = string.replace( character, replacement ) return string def export_capsule( self, repository ): url = '/repository/export?repository_id=%s&changeset_revision=%s' % \ ( self.security.encode_id( repository.id ), self.get_repository_tip( repository ) ) self.visit_url( url ) self.submit_form( 'export_repository', 'export_repository_button' ) fd, capsule_filename = tempfile.mkstemp() os.close( fd ) file( capsule_filename, 'w' ).write( self.last_page() ) return capsule_filename def fill_review_form( self, review_contents_dict, strings_displayed=[], strings_not_displayed=[] ): kwd = dict() changed = False for label, contents in review_contents_dict.items(): if contents: changed = True kwd[ '%s__ESEP__comment' % label ] = contents[ 'comment' ] kwd[ '%s__ESEP__rating' % label ] = contents[ 'rating' ] if 'private' in contents: kwd[ '%s__ESEP__private' % label ] = contents[ 'private' ] kwd[ '%s__ESEP__approved' % label ] = contents[ 'approved' ] else: kwd[ '%s__ESEP__approved' % label ] = 'not_applicable' self.check_for_strings( strings_displayed, strings_not_displayed ) self.submit_form( 1, 'Workflows__ESEP__review_button', **kwd ) if changed: strings_displayed.append( 'Reviews were saved' ) self.check_for_strings( strings_displayed, strings_not_displayed ) def galaxy_login( self, email='test@bx.psu.edu', password='testuser', username='admin-user', redirect='' ): previously_created, username_taken, invalid_username = \ self.create_user_in_galaxy( email=email, password=password, username=username, redirect=redirect ) if previously_created: self.visit_galaxy_url( "/user/login?use_panels=False" ) self.submit_form( '1', 'login_button', email=email, redirect=redirect, password=password ) def galaxy_logout( self ): self.visit_galaxy_url( "/user/logout" ) self.check_page_for_string( "You have been logged out" ) def generate_complex_dependency_xml( self, filename, filepath, repository_tuples, package, version ): file_path = os.path.join( filepath, filename ) dependency_entries = [] template = string.Template( common.new_repository_dependencies_line ) for toolshed_url, name, owner, changeset_revision in repository_tuples: dependency_entries.append( template.safe_substitute( toolshed_url=toolshed_url, owner=owner, repository_name=name, changeset_revision=changeset_revision, prior_installation_required='' ) ) if not os.path.exists( filepath ): os.makedirs( filepath ) dependency_template = string.Template( common.complex_repository_dependency_template ) repository_dependency_xml = dependency_template.safe_substitute( package=package, version=version, dependency_lines='\n'.join( dependency_entries ) ) # Save the generated xml to the specified location. file( file_path, 'w' ).write( repository_dependency_xml ) def generate_simple_dependency_xml( self, repository_tuples, filename, filepath, dependency_description='', complex=False, package=None, version=None, prior_installation_required=False ): if not os.path.exists( filepath ): os.makedirs( filepath ) dependency_entries = [] if prior_installation_required: prior_installation_value = ' prior_installation_required="True"' else: prior_installation_value = '' for toolshed_url, name, owner, changeset_revision in repository_tuples: template = string.Template( common.new_repository_dependencies_line ) dependency_entries.append( template.safe_substitute( toolshed_url=toolshed_url, owner=owner, repository_name=name, changeset_revision=changeset_revision, prior_installation_required=prior_installation_value ) ) if dependency_description: description = ' description="%s"' % dependency_description else: description = dependency_description template_parser = string.Template( common.new_repository_dependencies_xml ) repository_dependency_xml = template_parser.safe_substitute( description=description, dependency_lines='\n'.join( dependency_entries ) ) # Save the generated xml to the specified location. full_path = os.path.join( filepath, filename ) file( full_path, 'w' ).write( repository_dependency_xml ) def generate_temp_path( self, test_script_path, additional_paths=[] ): temp_path = os.path.join( self.tool_shed_test_tmp_dir, test_script_path, os.sep.join( additional_paths ) ) if not os.path.exists( temp_path ): os.makedirs( temp_path ) return temp_path def get_datatypes_count( self ): url = '/api/datatypes?upload_only=false' self.visit_galaxy_url( url ) html = self.last_page() datatypes = loads( html ) return len( datatypes ) def get_env_sh_path( self, tool_dependency_name, tool_dependency_version, repository ): '''Return the absolute path to an installed repository's env.sh file.''' env_sh_path = os.path.join( self.get_tool_dependency_path( tool_dependency_name, tool_dependency_version, repository ), 'env.sh' ) return env_sh_path def get_filename( self, filename, filepath=None ): if filepath is not None: return os.path.abspath( os.path.join( filepath, filename ) ) else: return os.path.abspath( os.path.join( self.file_dir, filename ) ) def get_hg_repo( self, path ): return hg.repository( ui.ui(), path ) def get_last_reviewed_revision_by_user( self, user, repository ): changelog_tuples = self.get_repository_changelog_tuples( repository ) reviews = test_db_util.get_reviews_ordered_by_changeset_revision( repository.id, changelog_tuples, reviewer_user_id = user.id ) if reviews: last_review = reviews[ -1 ] else: last_review = None return last_review def get_tool_dependency_path( self, tool_dependency_name, tool_dependency_version, repository ): '''Return the absolute path for an installed tool dependency.''' return os.path.join( self.galaxy_tool_dependency_dir, tool_dependency_name, tool_dependency_version, repository.owner, repository.name, repository.installed_changeset_revision ) def get_or_create_repository( self, owner=None, strings_displayed=[], strings_not_displayed=[], **kwd ): repository = test_db_util.get_repository_by_name_and_owner( kwd[ 'name' ], owner ) if repository is None: self.visit_url( '/repository/create_repository' ) self.submit_form( 1, 'create_repository_button', **kwd ) self.check_for_strings( strings_displayed, strings_not_displayed ) repository = test_db_util.get_repository_by_name_and_owner( kwd[ 'name' ], owner ) return repository def get_repo_path( self, repository ): # An entry in the hgweb.config file looks something like: repos/test/mira_assembler = database/community_files/000/repo_123 lhs = "repos/%s/%s" % ( repository.user.username, repository.name ) try: return self.hgweb_config_manager.get_entry( lhs ) except: raise Exception( "Entry for repository %s missing in hgweb config file %s." % ( lhs, self.hgweb_config_manager.hgweb_config ) ) def get_repository_changelog_tuples( self, repository ): repo = self.get_hg_repo( self.get_repo_path( repository ) ) changelog_tuples = [] for changeset in repo.changelog: ctx = repo.changectx( changeset ) changelog_tuples.append( ( ctx.rev(), repo.changectx( changeset ) ) ) return changelog_tuples def get_repository_datatypes_count( self, repository ): metadata = self.get_repository_metadata( repository )[0].metadata if 'datatypes' not in metadata: return 0 else: return len( metadata[ 'datatypes' ] ) def get_repository_file_list( self, base_path, current_path=None ): '''Recursively load repository folder contents and append them to a list. Similar to os.walk but via /repository/open_folder.''' if current_path is None: request_param_path = base_path else: request_param_path = os.path.join( base_path, current_path ) # Get the current folder's contents. url = '/repository/open_folder?folder_path=%s' % request_param_path self.visit_url( url ) file_list = loads( self.last_page() ) returned_file_list = [] if current_path is not None: returned_file_list.append( current_path ) # Loop through the json dict returned by /repository/open_folder. for file_dict in file_list: if file_dict[ 'isFolder' ]: # This is a folder. Get the contents of the folder and append it to the list, # prefixed with the path relative to the repository root, if any. if current_path is None: returned_file_list.extend( self.get_repository_file_list( base_path=base_path, current_path=file_dict[ 'title' ] ) ) else: sub_path = os.path.join( current_path, file_dict[ 'title' ] ) returned_file_list.extend( self.get_repository_file_list( base_path=base_path, current_path=sub_path ) ) else: # This is a regular file, prefix the filename with the current path and append it to the list. if current_path is not None: returned_file_list.append( os.path.join( current_path, file_dict[ 'title' ] ) ) else: returned_file_list.append( file_dict[ 'title' ] ) return returned_file_list def get_repository_metadata( self, repository ): return [ metadata_revision for metadata_revision in repository.metadata_revisions ] def get_repository_metadata_by_changeset_revision( self, repository, changeset_revision ): return test_db_util.get_repository_metadata_for_changeset_revision( repository.id, changeset_revision ) def get_repository_metadata_revisions( self, repository ): return [ str( repository_metadata.changeset_revision ) for repository_metadata in repository.metadata_revisions ] def get_repository_tip( self, repository ): repo = self.get_hg_repo( self.get_repo_path( repository ) ) return str( repo.changectx( repo.changelog.tip() ) ) def get_sniffers_count( self ): url = '/api/datatypes/sniffers' self.visit_galaxy_url( url ) html = self.last_page() sniffers = loads( html ) return len( sniffers ) def get_tools_from_repository_metadata( self, repository, include_invalid=False ): '''Get a list of valid and (optionally) invalid tool dicts from the repository metadata.''' valid_tools = [] invalid_tools = [] for repository_metadata in repository.metadata_revisions: if 'tools' in repository_metadata.metadata: valid_tools.append( dict( tools=repository_metadata.metadata[ 'tools' ], changeset_revision=repository_metadata.changeset_revision ) ) if include_invalid and 'invalid_tools' in repository_metadata.metadata: invalid_tools.append( dict( tools=repository_metadata.metadata[ 'invalid_tools' ], changeset_revision=repository_metadata.changeset_revision ) ) return valid_tools, invalid_tools def get_tool_panel_section_from_api( self, metadata ): tool_metadata = metadata[ 'tools' ] tool_guid = urllib.quote_plus( tool_metadata[ 0 ][ 'guid' ], safe='' ) api_url = '/%s' % '/'.join( [ 'api', 'tools', tool_guid ] ) self.visit_galaxy_url( api_url ) tool_dict = loads( self.last_page() ) tool_panel_section = tool_dict[ 'panel_section_name' ] return tool_panel_section def get_tool_panel_section_from_repository_metadata( self, metadata ): tool_metadata = metadata[ 'tools' ] tool_guid = tool_metadata[ 0 ][ 'guid' ] assert 'tool_panel_section' in metadata, 'Tool panel section not found in metadata: %s' % metadata tool_panel_section_metadata = metadata[ 'tool_panel_section' ] # tool_section_dict = dict( tool_config=guids_and_configs[ guid ], # id=section_id, # name=section_name, # version=section_version ) # This dict is appended to tool_panel_section_metadata[ tool_guid ] tool_panel_section = tool_panel_section_metadata[ tool_guid ][ 0 ][ 'name' ] return tool_panel_section def grant_role_to_user( self, user, role ): strings_displayed = [ self.security.encode_id( role.id ), role.name ] strings_not_displayed = [] self.visit_url( '/admin/roles' ) self.check_for_strings( strings_displayed, strings_not_displayed ) params = dict( operation='manage users and groups', id=self.security.encode_id( role.id ) ) url = '/admin/roles' self.visit_url( url, params ) strings_displayed = [ common.test_user_1_email, common.test_user_2_email ] self.check_for_strings( strings_displayed, strings_not_displayed ) # As elsewhere, twill limits the possibility of submitting the form, this time due to not executing the javascript # attached to the role selection form. Visit the action url directly with the necessary parameters. params = dict( id=self.security.encode_id( role.id ), in_users=user.id, operation='manage users and groups', role_members_edit_button='Save' ) url = '/admin/manage_users_and_groups_for_role' self.visit_url( url, params ) strings_displayed = [ "Role '%s' has been updated" % role.name ] self.check_for_strings( strings_displayed, strings_not_displayed ) def grant_write_access( self, repository, usernames=[], strings_displayed=[], strings_not_displayed=[], post_submit_strings_displayed=[], post_submit_strings_not_displayed=[] ): self.display_manage_repository_page( repository ) self.check_for_strings( strings_displayed, strings_not_displayed ) for username in usernames: tc.fv( "user_access", "allow_push", '+%s' % username ) tc.submit( 'user_access_button' ) self.check_for_strings( post_submit_strings_displayed, post_submit_strings_not_displayed ) def import_capsule( self, filename, strings_displayed=[], strings_not_displayed=[], strings_displayed_after_submit=[], strings_not_displayed_after_submit=[] ): url = '/repository/upload_capsule' self.visit_url( url ) tc.formfile( 'upload_capsule', 'file_data', filename ) tc.submit( 'upload_capsule_button' ) self.check_for_strings( strings_displayed, strings_not_displayed ) self.submit_form( 'import_capsule', 'import_capsule_button' ) self.check_for_strings( strings_displayed_after_submit, strings_not_displayed_after_submit ) def import_workflow( self, repository, workflow_name, strings_displayed=[], strings_not_displayed=[] ): url = '/admin_toolshed/import_workflow?repository_id=%s&workflow_name=%s' % \ ( self.security.encode_id( repository.id ), tool_shed_encode( workflow_name ) ) self.visit_galaxy_url( url ) if workflow_name not in strings_displayed: strings_displayed.append( workflow_name ) self.check_for_strings( strings_displayed, strings_not_displayed ) def initiate_installation_process( self, install_tool_dependencies=False, install_repository_dependencies=True, no_changes=True, new_tool_panel_section_label=None ): html = self.last_page() # Since the installation process is by necessity asynchronous, we have to get the parameters to 'manually' initiate the # installation process. This regex will return the tool shed repository IDs in group(1), the encoded_kwd parameter in # group(2), and the reinstalling flag in group(3) and pass them to the manage_repositories method in the Galaxy # admin_toolshed controller. install_parameters = re.search( 'initiate_repository_installation\( "([^"]+)", "([^"]+)", "([^"]+)" \);', html ) if install_parameters: iri_ids = install_parameters.group(1) # In some cases, the returned iri_ids are of the form: "[u'<encoded id>', u'<encoded id>']" # This regex ensures that non-hex characters are stripped out of the list, so that galaxy.util.listify/decode_id # will handle them correctly. It's safe to pass the cleaned list to manage_repositories, because it can parse # comma-separated values. repository_ids = str( iri_ids ) repository_ids = re.sub( '[^a-fA-F0-9,]+', '', repository_ids ) encoded_kwd = install_parameters.group(2) reinstalling = install_parameters.group(3) url = '/admin_toolshed/manage_repositories?operation=install&tool_shed_repository_ids=%s&encoded_kwd=%s&reinstalling=%s' % \ ( ','.join( galaxy.util.listify( repository_ids ) ), encoded_kwd, reinstalling ) self.visit_galaxy_url( url ) return galaxy.util.listify( repository_ids ) def install_repositories_from_search_results( self, repositories, install_tool_dependencies=False, strings_displayed=[], strings_not_displayed=[], **kwd ): ''' Normally, it would be possible to check the appropriate boxes in the search results, and click the install button. This works in a browser, but Twill manages to lose the 'toolshedgalaxyurl' cookie between one page and the next, so it's necessary to work around this by explicitly visiting the prepare_for_install method on the Galaxy side. ''' url = '/admin_toolshed/prepare_for_install?tool_shed_url=%s&repository_ids=%s&changeset_revisions=%s' % \ ( self.url, ','.join( self.security.encode_id( repository.id ) for repository in repositories ), \ ','.join( self.get_repository_tip( repository ) for repository in repositories ) ) self.visit_galaxy_url( url ) self.check_for_strings( strings_displayed, strings_not_displayed ) if 'install_tool_dependencies' in self.last_page(): form = tc.browser.get_form( 'select_tool_panel_section' ) checkbox = form.find_control( id="install_tool_dependencies" ) checkbox.disabled = False if install_tool_dependencies: checkbox.selected = True kwd[ 'install_tool_dependencies' ] = 'True' else: checkbox.selected = False kwd[ 'install_tool_dependencies' ] = 'False' self.submit_form( 1, 'select_tool_panel_section_button', **kwd ) repository_ids = self.initiate_installation_process() self.wait_for_repository_installation( repository_ids ) def install_repository( self, name, owner, category_name, install_tool_dependencies=False, install_repository_dependencies=True, changeset_revision=None, strings_displayed=[], strings_not_displayed=[], preview_strings_displayed=[], post_submit_strings_displayed=[], new_tool_panel_section_label=None, includes_tools_for_display_in_tool_panel=True, **kwd ): self.browse_tool_shed( url=self.url ) self.browse_category( test_db_util.get_category_by_name( category_name ) ) self.preview_repository_in_tool_shed( name, owner, strings_displayed=preview_strings_displayed ) repository = test_db_util.get_repository_by_name_and_owner( name, owner ) repository_id = self.security.encode_id( repository.id ) if changeset_revision is None: changeset_revision = self.get_repository_tip( repository ) url = '/repository/install_repositories_by_revision?changeset_revisions=%s&repository_ids=%s&galaxy_url=%s' % \ ( changeset_revision, repository_id, self.galaxy_url ) self.visit_url( url ) self.check_for_strings( strings_displayed, strings_not_displayed ) # This section is tricky, due to the way twill handles form submission. The tool dependency checkbox needs to # be hacked in through tc.browser, putting the form field in kwd doesn't work. form = tc.browser.get_form( 'select_tool_panel_section' ) if form is None: form = tc.browser.get_form( 'select_shed_tool_panel_config' ) assert form is not None, 'Could not find form select_shed_tool_panel_config or select_tool_panel_section.' kwd = self.set_form_value( form, kwd, 'install_tool_dependencies', install_tool_dependencies ) kwd = self.set_form_value( form, kwd, 'install_repository_dependencies', install_repository_dependencies ) kwd = self.set_form_value( form, kwd, 'shed_tool_conf', self.shed_tool_conf ) if new_tool_panel_section_label is not None: kwd = self.set_form_value( form, kwd, 'new_tool_panel_section_label', new_tool_panel_section_label ) submit_button_control = form.find_control( type='submit' ) assert submit_button_control is not None, 'No submit button found for form %s.' % form.attrs.get( 'id' ) self.submit_form( form.attrs.get( 'id' ), str( submit_button_control.name ), **kwd ) self.check_for_strings( post_submit_strings_displayed, strings_not_displayed ) repository_ids = self.initiate_installation_process( new_tool_panel_section_label=new_tool_panel_section_label ) log.debug( 'Waiting for the installation of repository IDs: %s' % str( repository_ids ) ) self.wait_for_repository_installation( repository_ids ) def load_citable_url( self, username, repository_name, changeset_revision, encoded_user_id, encoded_repository_id, strings_displayed=[], strings_not_displayed=[], strings_displayed_in_iframe=[], strings_not_displayed_in_iframe=[] ): url = '%s/view/%s' % ( self.url, username ) # If repository name is passed in, append that to the url. if repository_name: url += '/%s' % repository_name if changeset_revision: # Changeset revision should never be provided unless repository name also is. assert repository_name is not None, 'Changeset revision is present, but repository name is not - aborting.' url += '/%s' % changeset_revision self.visit_url( url ) self.check_for_strings( strings_displayed, strings_not_displayed ) # Now load the page that should be displayed inside the iframe and check for strings. if encoded_repository_id: url = '/repository/view_repository?id=%s&operation=view_or_manage_repository' % encoded_repository_id if changeset_revision: url += '&changeset_revision=%s' % changeset_revision self.visit_url( url ) self.check_for_strings( strings_displayed_in_iframe, strings_not_displayed_in_iframe ) elif encoded_user_id: url = '/repository/browse_repositories?user_id=%s&operation=repositories_by_user' % encoded_user_id self.visit_url( url ) self.check_for_strings( strings_displayed_in_iframe, strings_not_displayed_in_iframe ) def load_changeset_in_tool_shed( self, repository_id, changeset_revision, strings_displayed=[], strings_not_displayed=[] ): url = '/repository/view_changeset?ctx_str=%s&id=%s' % ( changeset_revision, repository_id ) self.visit_url( url ) self.check_for_strings( strings_displayed, strings_not_displayed ) def load_checkable_revisions( self, strings_displayed=[], strings_not_displayed=[] ): params = urllib.urlencode( dict( do_not_test='false', downloadable='true', includes_tools='true', malicious='false', missing_test_components='false', skip_tool_test='false' ) ) api_url = '%s?%s' % ( '/'.join( [ self.url, 'api', 'repository_revisions' ] ), params ) self.visit_url( api_url ) self.check_for_strings( strings_displayed, strings_not_displayed ) def load_display_tool_page( self, repository, tool_xml_path, changeset_revision, strings_displayed=[], strings_not_displayed=[] ): url = '/repository/display_tool?repository_id=%s&tool_config=%s&changeset_revision=%s' % \ ( self.security.encode_id( repository.id ), tool_xml_path, changeset_revision ) self.visit_url( url ) self.check_for_strings( strings_displayed, strings_not_displayed ) def load_galaxy_tool_migrations_page( self, strings_displayed=[], strings_not_displayed=[] ): url = '/admin/review_tool_migration_stages' self.visit_galaxy_url( url ) self.check_for_strings( strings_displayed, strings_not_displayed ) def load_invalid_tool_page( self, repository, tool_xml, changeset_revision, strings_displayed=[], strings_not_displayed=[] ): url = '/repository/load_invalid_tool?repository_id=%s&tool_config=%s&changeset_revision=%s' % \ ( self.security.encode_id( repository.id ), tool_xml, changeset_revision ) self.visit_url( url ) self.check_for_strings( strings_displayed, strings_not_displayed ) def load_page_for_installed_tool( self, tool_guid, strings_displayed=[], strings_not_displayed=[] ): url = '/tool_runner?tool_id=%s' % tool_guid self.visit_galaxy_url( url ) self.check_for_strings( strings_displayed, strings_not_displayed ) def load_workflow_image_in_tool_shed( self, repository, workflow_name, changeset_revision=None, strings_displayed=[], strings_not_displayed=[] ): if not changeset_revision: changeset_revision = self.get_repository_tip( repository ) metadata = self.get_repository_metadata_by_changeset_revision( repository, changeset_revision ) if not metadata: raise AssertionError( 'Metadata not found for changeset revision %s.' % changeset_revision ) url = '/repository/generate_workflow_image?repository_metadata_id=%s&workflow_name=%s' % \ ( self.security.encode_id( metadata.id ), tool_shed_encode( workflow_name ) ) self.visit_url( url ) self.check_for_strings( strings_displayed, strings_not_displayed ) def manage_review_components( self, strings_displayed=[], strings_not_displayed=[] ): url = '/repository_review/manage_components' self.visit_url( url ) self.check_for_strings( strings_displayed, strings_not_displayed ) def preview_repository_in_tool_shed( self, name, owner, changeset_revision=None, strings_displayed=[], strings_not_displayed=[] ): repository = test_db_util.get_repository_by_name_and_owner( name, owner ) if not changeset_revision: changeset_revision = self.get_repository_tip( repository ) self.visit_url( '/repository/preview_tools_in_changeset?repository_id=%s&changeset_revision=%s' % \ ( self.security.encode_id( repository.id ), changeset_revision ) ) self.check_for_strings( strings_displayed, strings_not_displayed ) def preview_workflow_in_tool_shed( self, repository_name, owner, workflow_name, strings_displayed=[], strings_not_displayed=[] ): repository = test_db_util.get_repository_by_name_and_owner( repository_name, owner ) metadata = self.get_repository_metadata( repository ) url = '/repository/view_workflow?workflow_name=%s&repository_metadata_id=%s' % \ ( tool_shed_encode( workflow_name ), self.security.encode_id( metadata[0].id ) ) self.visit_url( url ) self.check_for_strings( strings_displayed, strings_not_displayed ) def reactivate_repository( self, installed_repository ): params = dict( operation='activate or reinstall', id=self.security.encode_id( installed_repository.id ) ) url = '/admin_toolshed/browse_repositories' self.visit_galaxy_url( url, params ) strings_displayed = [ installed_repository.name, 'repository has been activated' ] self.check_for_strings( strings_displayed, [] ) def reinstall_repository( self, installed_repository, install_repository_dependencies=True, install_tool_dependencies=False, no_changes=True, new_tool_panel_section_label='', strings_displayed=[], strings_not_displayed=[] ): url = '/admin_toolshed/reselect_tool_panel_section?id=%s' % self.security.encode_id( installed_repository.id ) self.visit_galaxy_url( url ) self.check_for_strings( strings_displayed, strings_not_displayed=[] ) # Build the url that will simulate a filled-out form being submitted. Due to a limitation in twill, the reselect_tool_panel_section # form doesn't get parsed correctly. encoded_repository_id = self.security.encode_id( installed_repository.id ) params = dict( id=encoded_repository_id, no_changes=no_changes, new_tool_panel_section_label=new_tool_panel_section_label ) doseq = False if install_repository_dependencies: params[ 'install_repository_dependencies' ] = [ 'True', 'True' ] doseq = True else: params[ 'install_repository_dependencies' ] = False if install_tool_dependencies: params[ 'install_tool_dependencies' ] = [ 'True', 'True' ] doseq = True else: params[ 'install_tool_dependencies' ] = False url = '/admin_toolshed/reinstall_repository' self.visit_galaxy_url( url, params=params, doseq=doseq ) # Manually initiate the install process, as with installing a repository. See comments in the # initiate_installation_process method for details. repository_ids = self.initiate_installation_process( install_tool_dependencies, install_repository_dependencies, no_changes, new_tool_panel_section_label ) # Finally, wait until all repositories are in a final state (either Error or Installed) before returning. self.wait_for_repository_installation( repository_ids ) def repository_is_new( self, repository ): repo = self.get_hg_repo( self.get_repo_path( repository ) ) tip_ctx = repo.changectx( repo.changelog.tip() ) return tip_ctx.rev() < 0 def reset_installed_repository_metadata( self, repository ): url = '/admin_toolshed/reset_repository_metadata?id=%s' % self.security.encode_id( repository.id ) self.visit_galaxy_url( url ) self.check_for_strings( [ 'Metadata has been reset' ] ) def reset_metadata_on_selected_repositories( self, repository_ids ): self.visit_url( '/admin/reset_metadata_on_selected_repositories_in_tool_shed' ) kwd = dict( repository_ids=repository_ids ) self.submit_form( form_no=1, button="reset_metadata_on_selected_repositories_button", **kwd ) def reset_metadata_on_selected_installed_repositories( self, repository_ids ): self.visit_galaxy_url( '/admin_toolshed/reset_metadata_on_selected_installed_repositories' ) kwd = dict( repository_ids=repository_ids ) self.submit_form( form_no=1, button="reset_metadata_on_selected_repositories_button", **kwd ) def reset_repository_metadata( self, repository ): url = '/repository/reset_all_metadata?id=%s' % self.security.encode_id( repository.id ) self.visit_url( url ) self.check_for_strings( [ 'All repository metadata has been reset.' ] ) def repair_installed_repository( self, repository ): repository_id = self.security.encode_id( repository.id ) url = '/admin_toolshed/repair_repository?id=%s' % repository_id self.visit_galaxy_url( url ) self.submit_form( 'repair_repository', 'repair_repository_button' ) def review_repository( self, repository, review_contents_dict, user=None, changeset_revision=None ): strings_displayed = [] strings_not_displayed = [] kwd = dict() if not changeset_revision: changeset_revision = self.get_repository_tip( repository ) if user: review = test_db_util.get_repository_review_by_user_id_changeset_revision( user.id, repository.id, changeset_revision ) url = '/repository_review/edit_review?id=%s' % self.security.encode_id( review.id ) self.visit_url( url ) self.fill_review_form( review_contents_dict, strings_displayed, strings_not_displayed ) def revoke_write_access( self, repository, username ): url = '/repository/manage_repository?user_access_button=Remove&id=%s&remove_auth=%s' % \ ( self.security.encode_id( repository.id ), username ) self.visit_url( url ) def search_for_valid_tools( self, search_fields={}, exact_matches=False, strings_displayed=[], strings_not_displayed=[], from_galaxy=False ): if from_galaxy: galaxy_url = '?galaxy_url=%s' % self.galaxy_url else: galaxy_url = '' for field_name, search_string in search_fields.items(): url = '/repository/find_tools%s' % galaxy_url self.visit_url( url ) tc.fv( "1", "exact_matches", exact_matches ) tc.fv( "1", field_name, search_string ) tc.submit() self.check_for_strings( strings_displayed, strings_not_displayed ) def send_message_to_repository_owner( self, repository, message, strings_displayed=[], strings_not_displayed=[], post_submit_strings_displayed=[], post_submit_strings_not_displayed=[] ): url = '/repository/contact_owner?id=%s' % self.security.encode_id( repository.id ) self.visit_url( url ) self.check_for_strings( strings_displayed, strings_not_displayed ) tc.fv( 1, 'message', message ) tc.submit() self.check_for_strings( post_submit_strings_displayed, post_submit_strings_not_displayed ) def set_form_value( self, form, kwd, field_name, field_value ): ''' Set the form field field_name to field_value if it exists, and return the provided dict containing that value. If the field does not exist in the provided form, return a dict without that index. ''' form_id = form.attrs.get( 'id' ) controls = [ control for control in form.controls if str( control.name ) == field_name ] if len( controls ) > 0: log.debug( 'Setting field %s of form %s to %s.' % ( field_name, form_id, str( field_value ) ) ) tc.formvalue( form_id, field_name, str( field_value ) ) kwd[ field_name ] = str( field_value ) else: if field_name in kwd: log.debug( 'No field %s in form %s, discarding from return value.' % ( str( control ), str( form_id ) ) ) del( kwd[ field_name ] ) return kwd def set_repository_deprecated( self, repository, set_deprecated=True, strings_displayed=[], strings_not_displayed=[] ): url = '/repository/deprecate?id=%s&mark_deprecated=%s' % ( self.security.encode_id( repository.id ), set_deprecated ) self.visit_url( url ) self.check_for_strings( strings_displayed, strings_not_displayed ) def set_repository_malicious( self, repository, set_malicious=True, strings_displayed=[], strings_not_displayed=[] ): self.display_manage_repository_page( repository ) tc.fv( "malicious", "malicious", set_malicious ) tc.submit( "malicious_button" ) self.check_for_strings( strings_displayed, strings_not_displayed ) def set_skip_tool_tsts_flag( self, repository, flag_value, reason, changeset_revision=None ): if changeset_revision is None: changeset_revision = self.get_repository_tip( repository ) self.display_manage_repository_page( repository, changeset_revision=changeset_revision ) form = tc.browser.get_form( 'skip_tool_tests' ) assert form is not None, 'Could not find form skip_tool_tests.' for control in form.controls: control_name = str( control.name ) if control_name == 'skip_tool_tests' and control.type == 'checkbox': checkbox = control.get() checkbox.selected = flag_value elif control_name == 'skip_tool_tests_comment': tc.browser.clicked( form, control ) tc.formvalue( 'skip_tool_tests', control_name, reason ) kwd = dict() self.submit_form( 'skip_tool_tests', 'skip_tool_tests_button', **kwd ) if flag_value is True: self.check_for_strings( strings_displayed=[ 'Tools in this revision will not be tested by the automated test framework' ] ) else: self.check_for_strings( strings_displayed=[ 'Tools in this revision will be tested by the automated test framework' ] ) def tip_has_metadata( self, repository ): tip = self.get_repository_tip( repository ) return test_db_util.get_repository_metadata_by_repository_id_changeset_revision( repository.id, tip ) def undelete_repository( self, repository ): repository_id = self.security.encode_id( repository.id ) url = '/admin/browse_repositories?operation=Undelete&id=%s' % repository_id strings_displayed = [ 'Undeleted 1 repository', repository.name ] strings_not_displayed = [] self.visit_url( url ) self.check_for_strings( strings_displayed, strings_not_displayed ) def uninstall_repository( self, installed_repository, strings_displayed=[], strings_not_displayed=[] ): url = '/admin_toolshed/deactivate_or_uninstall_repository?id=%s' % self.security.encode_id( installed_repository.id ) self.visit_galaxy_url( url ) self.check_for_strings( strings_displayed, strings_not_displayed ) form = tc.browser.get_form( 'deactivate_or_uninstall_repository' ) kwd = self.set_form_value( form, {}, 'remove_from_disk', True ) tc.submit( 'deactivate_or_uninstall_repository_button' ) strings_displayed = [ 'The repository named', 'has been uninstalled' ] self.check_for_strings( strings_displayed, strings_not_displayed=[] ) def update_installed_repository( self, installed_repository, strings_displayed=[], strings_not_displayed=[] ): url = '/admin_toolshed/check_for_updates?id=%s' % self.security.encode_id( installed_repository.id ) self.visit_galaxy_url( url ) self.check_for_strings( strings_displayed, strings_not_displayed ) def update_tool_shed_status( self ): url = '/admin_toolshed/update_tool_shed_status_for_installed_repository?all_installed_repositories=True' self.visit_galaxy_url( url ) def upload_file( self, repository, filename, filepath, valid_tools_only, uncompress_file, remove_repo_files_not_in_tar, commit_message, strings_displayed=[], strings_not_displayed=[] ): removed_message = 'files were removed from the repository' if remove_repo_files_not_in_tar: if not self.repository_is_new( repository ): if removed_message not in strings_displayed: strings_displayed.append( removed_message ) else: if removed_message not in strings_not_displayed: strings_not_displayed.append( removed_message ) self.visit_url( '/upload/upload?repository_id=%s' % self.security.encode_id( repository.id ) ) if valid_tools_only: strings_displayed.extend( [ 'has been successfully', 'uploaded to the repository.' ] ) tc.formfile( "1", "file_data", self.get_filename( filename, filepath ) ) if uncompress_file: tc.fv( 1, 'uncompress_file', 'Yes' ) else: tc.fv( 1, 'uncompress_file', 'No' ) if not self.repository_is_new( repository ): if remove_repo_files_not_in_tar: tc.fv( 1, 'remove_repo_files_not_in_tar', 'Yes' ) else: tc.fv( 1, 'remove_repo_files_not_in_tar', 'No' ) tc.fv( 1, 'commit_message', commit_message ) tc.submit( "upload_button" ) self.check_for_strings( strings_displayed, strings_not_displayed ) # Uncomment this if it becomes necessary to wait for an asynchronous process to complete after submitting an upload. #for i in range( 5 ): # try: # self.check_for_strings( strings_displayed, strings_not_displayed ) # break # except Exception, e: # if i == 4: # raise e # else: # time.sleep( 1 ) # continue def upload_url( self, repository, url, filepath, valid_tools_only, uncompress_file, remove_repo_files_not_in_tar, commit_message, strings_displayed=[], strings_not_displayed=[] ): removed_message = 'files were removed from the repository' if remove_repo_files_not_in_tar: if not self.repository_is_new( repository ): if removed_message not in strings_displayed: strings_displayed.append( removed_message ) else: if removed_message not in strings_not_displayed: strings_not_displayed.append( removed_message ) self.visit_url( '/upload/upload?repository_id=%s' % self.security.encode_id( repository.id ) ) if valid_tools_only: strings_displayed.extend( [ 'has been successfully', 'uploaded to the repository.' ] ) tc.fv( "1", "url", url ) if uncompress_file: tc.fv( 1, 'uncompress_file', 'Yes' ) else: tc.fv( 1, 'uncompress_file', 'No' ) if not self.repository_is_new( repository ): if remove_repo_files_not_in_tar: tc.fv( 1, 'remove_repo_files_not_in_tar', 'Yes' ) else: tc.fv( 1, 'remove_repo_files_not_in_tar', 'No' ) tc.fv( 1, 'commit_message', commit_message ) tc.submit( "upload_button" ) self.check_for_strings( strings_displayed, strings_not_displayed ) def verify_capsule_contents( self, capsule_filepath, owner ): tar_object = tarfile.open( capsule_filepath, 'r:*' ) extraction_path = tempfile.mkdtemp() tar_object.extractall( extraction_path ) for root, dirs, files in os.walk( extraction_path ): if 'manifest.xml' in files: self.check_manifest( os.path.join( root, 'manifest.xml' ), owner=owner ) shutil.rmtree( extraction_path ) def verify_installed_repositories( self, installed_repositories=[], uninstalled_repositories=[] ): for repository_name, repository_owner in installed_repositories: galaxy_repository = test_db_util.get_installed_repository_by_name_owner( repository_name, repository_owner ) if galaxy_repository: assert galaxy_repository.status == 'Installed', \ 'Repository %s should be installed, but is %s' % ( repository_name, galaxy_repository.status ) def verify_installed_repository_metadata_unchanged( self, name, owner ): installed_repository = test_db_util.get_installed_repository_by_name_owner( name, owner ) metadata = installed_repository.metadata self.reset_installed_repository_metadata( installed_repository ) new_metadata = installed_repository.metadata assert metadata == new_metadata, 'Metadata for installed repository %s differs after metadata reset.' % name def verify_installed_repository_no_tool_panel_section( self, repository ): '''Verify that there is no 'tool_panel_section' entry in the repository metadata.''' metadata = repository.metadata assert 'tool_panel_section' not in metadata, 'Tool panel section incorrectly found in metadata: %s' % metadata def verify_installed_repository_data_table_entries( self, required_data_table_entries ): # The value of the received required_data_table_entries will be something like: [ 'sam_fa_indexes' ] data_tables, error_message = xml_util.parse_xml( self.shed_tool_data_table_conf ) found = False # With the tool shed, the "path" attribute that is hard-coded into the tool_data_tble_conf.xml # file is ignored. This is because the tool shed requires the directory location to which this # path points to be empty except when a specific tool is loaded. The default location for this # directory configured for the tool shed is <Galaxy root>/shed-tool-data. When a tool is loaded # in the tool shed, all contained .loc.sample files are copied to this directory and the # ToolDataTableManager parses and loads the files in the same way that Galaxy does with a very # important exception. When the tool shed loads a tool and parses and loads the copied ,loc.sample # files, the ToolDataTableManager is already instantiated, and so its add_new_entries_from_config_file() # method is called and the tool_data_path parameter is used to over-ride the hard-coded "tool-data" # directory that Galaxy always uses. # # Tool data table xml structure: # <tables> # <table comment_char="#" name="sam_fa_indexes"> # <columns>line_type, value, path</columns> # <file path="tool-data/sam_fa_indices.loc" /> # </table> # </tables> required_data_table_entry = None for table_elem in data_tables.findall( 'table' ): # The value of table_elem will be something like: <table comment_char="#" name="sam_fa_indexes"> for required_data_table_entry in required_data_table_entries: # The value of required_data_table_entry will be something like: 'sam_fa_indexes' if 'name' in table_elem.attrib and table_elem.attrib[ 'name' ] == required_data_table_entry: found = True # We're processing something like: sam_fa_indexes file_elem = table_elem.find( 'file' ) # We have something like: <file path="tool-data/sam_fa_indices.loc" /> # The "path" attribute of the "file" tag is the location that Galaxy always uses because the # Galaxy ToolDataTableManager was implemented in such a way that the hard-coded path is used # rather than allowing the location to be a configurable setting like the tool shed requires. file_path = file_elem.get( 'path', None ) # The value of file_path will be something like: "tool-data/all_fasta.loc" assert file_path is not None, 'The "path" attribute is missing for the %s entry.' % name # The following test is probably not necesary, but the tool-data directory should exist! galaxy_tool_data_dir, loc_file_name = os.path.split( file_path ) assert galaxy_tool_data_dir is not None, 'The hard-coded Galaxy tool-data directory is missing for the %s entry.' % name assert os.path.exists( galaxy_tool_data_dir ), 'The Galaxy tool-data directory does not exist.' # Make sure the loc_file_name was correctly copied into the configured directory location. configured_file_location = os.path.join( self.tool_data_path, loc_file_name ) assert os.path.isfile( configured_file_location ), 'The expected copied file "%s" is missing.' % configured_file_location # We've found the value of the required_data_table_entry in data_tables, which is the parsed # shed_tool_data_table_conf.xml, so all is well! break if found: break # We better have an entry like: <table comment_char="#" name="sam_fa_indexes"> in our parsed data_tables # or we know that the repository was not correctly installed! assert found, 'No entry for %s in %s.' % ( required_data_table_entry, self.shed_tool_data_table_conf ) def verify_repository_in_capsule( self, repository_archive, repository_name, repository_owner ): repository_extraction_dir = tempfile.mkdtemp() repository_tar_object = tarfile.open( repository_archive, 'r:*' ) repository_tar_object.extractall( repository_extraction_dir ) for root, dirs, files in os.walk( repository_extraction_dir ): for filename in files: if filename in [ 'tool_dependencies.xml', 'repository_dependencies.xml' ]: dependency_filepath = os.path.join( root, filename ) self.check_exported_repository_dependency( dependency_filepath, repository_name, repository_owner ) shutil.rmtree( repository_extraction_dir ) def verify_repository_reviews( self, repository, reviewer=None, strings_displayed=[], strings_not_displayed=[] ): changeset_revision = self.get_repository_tip( repository ) # Verify that the currently logged in user has a repository review for the specified repository, reviewer, and changeset revision. strings_displayed=[ repository.name, reviewer.username ] self.display_reviewed_repositories_owned_by_user( strings_displayed=strings_displayed ) # Verify that the reviewer has reviewed the specified repository's changeset revision. strings_displayed=[ repository.name, repository.description ] self.display_repository_reviews_by_user( reviewer, strings_displayed=strings_displayed ) # Load the review and check for the components passed in strings_displayed. review = test_db_util.get_repository_review_by_user_id_changeset_revision( reviewer.id, repository.id, changeset_revision ) self.browse_component_review( review, strings_displayed=strings_displayed ) def verify_tool_metadata_for_installed_repository( self, installed_repository, strings_displayed=[], strings_not_displayed=[] ): repository_id = self.security.encode_id( installed_repository.id ) for tool in installed_repository.metadata[ 'tools' ]: strings = list( strings_displayed ) strings.extend( [ tool[ 'id' ], tool[ 'description' ], tool[ 'version' ], tool[ 'guid' ], tool[ 'name' ] ] ) params = dict( repository_id=repository_id, tool_id=tool[ 'id' ] ) url = '/admin_toolshed/view_tool_metadata' self.visit_galaxy_url( url, params ) self.check_for_strings( strings, strings_not_displayed ) def verify_unchanged_repository_metadata( self, repository ): old_metadata = dict() new_metadata = dict() for metadata in self.get_repository_metadata( repository ): old_metadata[ metadata.changeset_revision ] = metadata.metadata self.reset_repository_metadata( repository ) for metadata in self.get_repository_metadata( repository ): new_metadata[ metadata.changeset_revision ] = metadata.metadata # Python's dict comparison recursively compares sorted key => value pairs and returns true if any key or value differs, # or if the number of keys differs. assert old_metadata == new_metadata, 'Metadata changed after reset on repository %s.' % repository.name def view_installed_workflow( self, repository, workflow_name, strings_displayed=[], strings_not_displayed=[] ): url = '/admin_toolshed/view_workflow?repository_id=%s&workflow_name=%s' % \ ( self.security.encode_id( repository.id ), tool_shed_encode( workflow_name ) ) self.visit_galaxy_url( url ) self.check_for_strings( strings, strings_not_displayed ) def visit_galaxy_url( self, url, params=None, doseq=False ): url = '%s%s' % ( self.galaxy_url, url ) self.visit_url( url, params=params, doseq=doseq ) def wait_for_repository_installation( self, repository_ids ): final_states = [ galaxy_model.ToolShedRepository.installation_status.ERROR, galaxy_model.ToolShedRepository.installation_status.INSTALLED ] # Wait until all repositories are in a final state before returning. This ensures that subsequent tests # are running against an installed repository, and not one that is still in the process of installing. if repository_ids: for repository_id in repository_ids: galaxy_repository = test_db_util.get_installed_repository_by_id( self.security.decode_id( repository_id ) ) timeout_counter = 0 while galaxy_repository.status not in final_states: test_db_util.ga_refresh( galaxy_repository ) timeout_counter = timeout_counter + 1 # This timeout currently defaults to 10 minutes. if timeout_counter > repository_installation_timeout: raise AssertionError( 'Repository installation timed out, %d seconds elapsed, repository state is %s.' % \ ( timeout_counter, galaxy_repository.status ) ) break time.sleep( 1 )
mikel-egana-aranguren/SADI-Galaxy-Docker
galaxy-dist/test/tool_shed/base/twilltestcase.py
Python
gpl-3.0
89,289
[ "Galaxy", "VisIt" ]
dee101fefc6cedd1a041250c2183f4e5181c09152ef0da26c60de9bf635f5c8b
"""The SOINN regressor.""" from . import isoinn2 from .kde import density from .__gaussian_custom import norm_pdf_multivariate from numpy import array,diag,matrix import time from pygraph.classes.graph import graph from pygraph.algorithms.accessibility import connected_components import itertools from copy import deepcopy from .gks import GKS class ISOINNregressor: """Regression interface based on SSL-GKS and SOINN. smooth can be set to None or real number, normally falls in [-1,0]. If set to None, SSL will be employed to estimate its value. response_dimension is integer, means the number of response variables. K is integer which is the number of neurons for kernel smoothing, larger K means little details but more smoothed predictions. The rest of the parameters are GNG training parameters.""" Pis = [] #:Distribution of the neuron populations. bands = [] #:Bandwidth for visualization. nodes = [] #:Weights of the neurons. sigmax = 0 ux = [] uy = [] gr = 0 #:Topology structure of neurons. counts = 0 standard_deviation = 0 smooth = -0.4 #:Smooth parameter for kernel smoothing, if set to None, SSL smooth parameter selection will be employed. reg_model = None __res_dimension = 1 __global = False __inn_parameter_list = [] K = 10 #:Number of neurons selected for kernel smoothing. def __init__(self, smooth = None, response_dimension = 1, K = 10, age_max = 200, nn_lambda = 60, alpha = 10,del_noise = True): isoinn2.set_parameter(age_max,nn_lambda,alpha,0,del_noise) self.__inn_parameter_list = [age_max,nn_lambda,alpha,0,del_noise] self.smooth = smooth self.__res_dimension = 1 self.K = K def fit(self, X, y): """X is array or list, each element is numpy array. Y is array or list containing the response varaible values.""" #print 'training with bandwidth calculation, please wait...' timecost = time.time() t = 0 for i in range(len(y)): n_point = array(list(X[i]) + list([y[i]])) if t == 0: EX = n_point EX2 = n_point ** 2 else: count = float(t) EX = (EX*count/(count + 1.0)) + (n_point/(count + 1.0)) EX2 = (EX2*count/(count + 1.0)) + ((n_point ** 2)/(count + 1.0)) t += 1 isoinn2.step(n_point,0,t) isoinn2.step(array([]),0,-1) #print 'time cost',time.time() - timecost standard_deviation = (EX2 - EX ** 2) ** 0.5 self.standard_deviation = standard_deviation if self.smooth == None: self.bands = standard_deviation * (len(isoinn2.setN) ** (-0.2)) else: self.bands = standard_deviation * (len(isoinn2.setN) ** (self.smooth)) Pis = isoinn2.accumulated self.counts = isoinn2.accumulated self.Pis = array(Pis) / float(sum(array(Pis)))#distribution of the clusters self.nodes = deepcopy(isoinn2.setN) self.sigmax = matrix(diag(array(self.bands)[0:-1]**2)) for each in self.nodes: self.ux.append(each[0:-1]) self.uy.append(each[-1]) self.uy = array(self.uy) self.gr = isoinn2.gr self.reg_model = GKS(self.nodes, self.counts, standard_deviation**2, self.__res_dimension, self.smooth, self.K) def predict(self, data): """This method returns the predictions the variable data. data should be within the same data space to X in the fit method. When smooth parameter is set to None, an SSL procedure will be employed to estimate it.""" if self.smooth == None: isoinn2.set_parameter(self.__inn_parameter_list[0],self.__inn_parameter_list[1],self.__inn_parameter_list[2],self.__inn_parameter_list[3],self.__inn_parameter_list[4]) t = 0 for i in range(len(data)): n_point = array(data[i]) if t == 0: EX = n_point EX2 = n_point ** 2 else: count = float(t) EX = (EX*count/(count + 1.0)) + (n_point/(count + 1.0)) EX2 = (EX2*count/(count + 1.0)) + ((n_point ** 2)/(count + 1.0)) t += 1 isoinn2.step(n_point,0,t) isoinn2.step(array([]),0,-1) return self.reg_model.responses(data, isoinn2.setN) else: return self.reg_model.responses(data) def draw_density(self, resolution = 0.05): """Draws the density contour of any regressor instance. It can only be called after calling the fit method, and only work in 2d case. resolution is a postitive real number definining the detail level of drawing. A smaller resolution number will generate more detailed drawings.""" from numpy import mgrid,zeros from copy import deepcopy the_d = density(self.nodes,array(self.counts),self.standard_deviation) dx, dy = resolution, resolution # generate 2 2d grids for the x & y bounds y, x = mgrid[slice(0, 1 + dy, dy),slice(0, 1 + dx, dx)] t=deepcopy(x[0]) z = zeros(shape = (len(x[0]),len(y[0]))) z1= zeros(shape = (len(x[0]),len(y[0]))) print('Please wait...') for i in range(len(t)): for j in range(len(t)): input_point = array([t[i],t[j]]) z[j][i] = the_d.estimate(input_point) if not ((input_point - array([0.5,0.2])).any()): print(i,j) print('drawing...') import matplotlib.pyplot as plt from matplotlib.colors import BoundaryNorm from matplotlib.ticker import MaxNLocator z = z[:-1, :-1] levels = MaxNLocator(nbins=15).bin_boundaries(z.min(), z.max()) cmap = plt.get_cmap('PiYG') plt.contourf(x[:-1, :-1] + dx / 2., y[:-1, :-1] + dy / 2., z, levels=levels, cmap=cmap) plt.colorbar() plt.title('Density estimation by SOINN') plt.show() if __name__ == '__main__': from .utils import csv_reader r = csv_reader('reg_intro.csv') X,y = r.separate_label() the_reg = ISOINNregressor(smooth = -0.4, K = 15) the_reg.fit(X,y) # the_reg.draw_density() test_x = [] draw_x = [] for i in range(50): test_x.append(array([i/50.0])) draw_x.append(i/50.0) test_y = the_reg.predict(test_x) import matplotlib.pyplot as plt fig = plt.figure() ax = fig.add_subplot(111) ax.plot(draw_x,test_y,'k-') plt.axis('off') plt.show()
sbxzy/pygks
pygks/reg_inn.py
Python
bsd-3-clause
6,646
[ "NEURON" ]
4c21dfcdebd25facdc03baca95d067e059f3b1e158183c10959cdb45a4372a5d
#!/usr/bin/env python # PyCal - Python web calendar # # Copyright (C) 2004 Ray Osborn # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # # $Id: SendRequest.py,v 1.1 2004/08/25 04:00:30 osborn Exp $ # """ CGI script to send an email request to approve an event. """ from pycal.PyCal import * from pycal.Editor import Editor from pycal.Event import Event from pycal.GetModule import GetEditors, GetSupervisors from pycal.PrintModule import AdminPage, LoginPage, ErrorPage from pycal.CGImodule import CGIlogin, CGIgetForm from pycal.CGImodule import SendEmail from pycal.Utilities import FormatDate, FormatTime import pycal.HTML as HTML def main(): try: form = CGIgetForm() user = CGIlogin(form) if form.has_key("ID"): e = Event(form["ID"]) if form.has_key("cancel"): print e.EventView() return if user in GetEditors(): name = Editor(user).name email = Editor(user).email if email: emailLink = "<%s>" % email else: emailLink = "" mailto = [] for supervisor in GetSupervisors(): mailto.append(Editor(supervisor).email) mailto.append(calendarEmail) subject = "%s Event Request" % calendarAbbr if form.has_key("message"): message = "Additional Message:\n%s" % form["message"] else: message = "" if form.has_key("prefix"): prefix = form["prefix"] else: prefix = "A change to" if e.status == "Approved": script = "EditEvent.py" else: script = "ViewEvent.py" text="""\ %s the following %s event has been requested: Title: %s Date: %s Time: %s to %s Location: %s Resource: %s Category: %s Requested by: %s %s Please visit the following URL to approve or modify the requested event: <%s/%s?ID=%s> %s """ % (prefix, calendarAbbr, e.title, FormatDate(e.start, day=True), FormatTime(e.reservation["start"]), FormatTime(e.reservation["end"]), ", ".join(e.locations), ", ".join(e.resources), ", ".join(e.categories), name, emailLink, cgiURL, script, e.ID, message) SendEmail(mailto, subject, text, cc=email) message = \ "Requested information has been sent to the %s Administration" \ % calendarAbbr print e.EventView(message) else: print LoginPage(script="SendRequest.py", form=form) except CalendarError, errorText: print ErrorPage(errorText) if __name__ == "__main__": main()
rayosborn/pycal
scripts/SendRequest.py
Python
lgpl-3.0
3,403
[ "VisIt" ]
9bffefb64fd37672b4a3b4444dc95de1b8df2461adda409934c8247857162114
import os import sys import argparse from itertools import groupby def fasta_iter(fasta_name): fh = open(fasta_name) faiter = (x[1] for x in groupby(fh, lambda line: line[0] == ">")) for header in faiter: header = next(header)[1:].strip() seq = "".join(s.strip() for s in next(faiter)) yield header, seq def main(): parser = argparse.ArgumentParser(description="Splits a BLAST file and splits query sequence file") parser.add_argument("-q","--query_file",help="A fasta file of query sequences",required=True) parser.add_argument("-b","--blast_file", help="BLAST output file with output format",required=True) parser.add_argument("-t","--threshold",help="Number of query sequences per BLAST chunk", default=1000, required=False) parser.add_argument("-p","--prefix", help="Output prefix",default = '',required=False) args = parser.parse_args() current = "None" threshold = int(args.threshold) counter = 0 current_num = 0 queries = {} file_name = str(args.prefix) +'myseq' with open(args.blast_file) as f: for line in f: valname = line.strip().split('\t')[0] if current == "None": fw = open(''.join([file_name,'_',str(current_num),'.blast.out']), 'w') current = valname queries[valname] = ''.join([file_name,'_',str(current_num)]) counter += 1 if valname == current: fw.write(''.join([line.strip(),'\n'])) else: if counter < threshold: current = valname queries[valname] = ''.join([file_name,'_',str(current_num)]) fw.write(''.join([line.strip(),'\n'])) counter += 1 else: current_num += 1 fw.close() fw = open(''.join([file_name,'_',str(current_num),'.blast.out']), 'w') fw.write(''.join([line.strip(),'\n'])) counter = 1 current = valname queries[valname] = ''.join([file_name,'_',str(current_num)]) query_files = list(set([queries[key] for key in queries])) file_handles = [open(''.join([filename,'.fasta']),'w') for filename in query_files] handle_map = {query_files[n]: file_handles[n] for n in range(len(query_files))} if args.query_file.lower().endswith(('.fasta', '.fa', '.fna')): fiter = fasta_iter(args.query_file) for ff in fiter: if ff[0] not in queries: continue file_handler = handle_map[queries[ff[0]]] file_handler.write(''.join(['>',ff[0],'\n',ff[1],'\n'])) else: print ("Need a fasta file for reads") if __name__ == '__main__': main()
shahnidhi/outlier_in_BLAST_hits
utils/split_merge.py
Python
mit
2,837
[ "BLAST" ]
4e309f471a2b95dc96c379457919d0d867da958b02a851b5c062941a053346ae
#!/usr/bin/env python import vtk from vtk.test import Testing from vtk.util.misc import vtkGetDataRoot VTK_DATA_ROOT = vtkGetDataRoot() # Create renderer stuff # ren1 = vtk.vtkRenderer() renWin = vtk.vtkRenderWindow() renWin.AddRenderer(ren1) iren = vtk.vtkRenderWindowInteractor() iren.SetRenderWindow(renWin) # create pipeline # cow = vtk.vtkBYUReader() cow.SetGeometryFileName("" + str(VTK_DATA_ROOT) + "/Data/Viewpoint/cow.g") cowMapper = vtk.vtkPolyDataMapper() cowMapper.SetInputConnection(cow.GetOutputPort()) cowActor = vtk.vtkActor() cowActor.SetMapper(cowMapper) cowActor.GetProperty().SetDiffuseColor(0.9608,0.8706,0.7020) cowAxesSource = vtk.vtkAxes() cowAxesSource.SetScaleFactor(10) cowAxesSource.SetOrigin(0,0,0) cowAxesMapper = vtk.vtkPolyDataMapper() cowAxesMapper.SetInputConnection(cowAxesSource.GetOutputPort()) cowAxes = vtk.vtkActor() cowAxes.SetMapper(cowAxesMapper) ren1.AddActor(cowAxes) cowAxes.VisibilityOff() # Add the actors to the renderer, set the background and size # ren1.AddActor(cowActor) ren1.SetBackground(0.1,0.2,0.4) renWin.SetSize(320,240) ren1.ResetCamera() ren1.GetActiveCamera().Azimuth(0) ren1.GetActiveCamera().Dolly(1.4) ren1.ResetCameraClippingRange() cowAxes.VisibilityOn() renWin.Render() # render the image # # prevent the tk window from showing up then start the event loop # def RotateX (__vtk__temp0=0,__vtk__temp1=0): cowActor.SetOrientation(0,0,0) ren1.ResetCameraClippingRange() renWin.Render() renWin.Render() renWin.EraseOff() i = 1 while i <= 6: cowActor.RotateX(60) renWin.Render() renWin.Render() i = i + 1 renWin.EraseOn() def RotateY (__vtk__temp0=0,__vtk__temp1=0): cowActor.SetOrientation(0,0,0) ren1.ResetCameraClippingRange() renWin.Render() renWin.Render() renWin.EraseOff() i = 1 while i <= 6: cowActor.RotateY(60) renWin.Render() renWin.Render() i = i + 1 renWin.EraseOn() def RotateZ (__vtk__temp0=0,__vtk__temp1=0): cowActor.SetOrientation(0,0,0) ren1.ResetCameraClippingRange() renWin.Render() renWin.Render() renWin.EraseOff() i = 1 while i <= 6: cowActor.RotateZ(60) renWin.Render() renWin.Render() i = i + 1 renWin.EraseOn() def RotateXY (__vtk__temp0=0,__vtk__temp1=0): cowActor.SetOrientation(0,0,0) cowActor.RotateX(60) ren1.ResetCameraClippingRange() renWin.Render() renWin.Render() renWin.EraseOff() i = 1 while i <= 6: cowActor.RotateY(60) renWin.Render() renWin.Render() i = i + 1 renWin.EraseOn() RotateX() RotateY() RotateZ() RotateXY() renWin.EraseOff() # --- end of script --
hlzz/dotfiles
graphics/VTK-7.0.0/Rendering/Core/Testing/Python/rotations.py
Python
bsd-3-clause
2,846
[ "VTK" ]
a87ae6e5f873063369a69c4fd02d8e25922cce38dced91cb83392852831efe4c
# -*- coding: utf-8 -*- """ *************************************************************************** translate.py --------------------- Date : August 2012 Copyright : (C) 2012 by Victor Olaya Email : volayaf at gmail dot com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ __author__ = 'Victor Olaya' __date__ = 'August 2012' __copyright__ = '(C) 2012, Victor Olaya' # This will get replaced with a git SHA1 when you do a git archive __revision__ = '$Format:%H$' import os from qgis.PyQt.QtGui import QIcon from qgis.core import (QgsProcessingException, QgsProcessingParameterRasterLayer, QgsProcessingParameterEnum, QgsProcessingParameterString, QgsProcessingParameterBoolean, QgsProcessingOutputRasterLayer) from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm from processing.algs.gdal.GdalUtils import GdalUtils pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0] class gdaladdo(GdalAlgorithm): INPUT = 'INPUT' LEVELS = 'LEVELS' CLEAN = 'CLEAN' RESAMPLING = 'RESAMPLING' FORMAT = 'FORMAT' OUTPUT = 'OUTPUT' def __init__(self): super().__init__() def initAlgorithm(self, config=None): self.methods = ((self.tr('Nearest neighbour'), 'nearest'), (self.tr('Average'), 'average'), (self.tr('Gaussian'), 'gauss'), (self.tr('Cubic convolution.'), 'cubic'), (self.tr('B-Spline convolution'), 'cubicspline'), (self.tr('Lanczos windowed sinc'), 'lanczos'), (self.tr('Average MP'), 'average_mp'), (self.tr('Average in mag/phase space'), 'average_magphase'), (self.tr('Mode'), 'mode')) self.formats = (self.tr('Internal (if possible)'), self.tr('External (GTiff .ovr)'), self.tr('External (ERDAS Imagine .aux)')) self.addParameter(QgsProcessingParameterRasterLayer(self.INPUT, self.tr('Input layer'))) self.addParameter(QgsProcessingParameterString(self.LEVELS, self.tr('Overview levels'), defaultValue='2 4 8 16')) self.addParameter(QgsProcessingParameterBoolean(self.CLEAN, self.tr('Remove all existing overviews'), defaultValue=False)) params = [] params.append(QgsProcessingParameterEnum(self.RESAMPLING, self.tr('Resampling method'), options=[i[0] for i in self.methods], allowMultiple=False, defaultValue=0)) params.append(QgsProcessingParameterEnum(self.FORMAT, self.tr('Overviews format'), options=self.formats, allowMultiple=False, defaultValue=0)) self.addOutput(QgsProcessingOutputRasterLayer(self.OUTPUT, self.tr('Pyramidized'))) def name(self): return 'overviews' def displayName(self): return self.tr('Build overviews (pyramids)') def group(self): return self.tr('Raster miscellaneous') def groupId(self): return 'rastermiscellaneous' def icon(self): return QIcon(os.path.join(pluginPath, 'images', 'gdaltools', 'raster-overview.png')) def commandName(self): return 'gdaladdo' def getConsoleCommands(self, parameters, context, feedback, executing=True): inLayer = self.parameterAsRasterLayer(parameters, self.INPUT, context) if inLayer is None: raise QgsProcessingException(self.invalidRasterError(parameters, self.INPUT)) fileName = inLayer.source() arguments = [] arguments.append(fileName) arguments.append('-r') arguments.append(self.methods[self.parameterAsEnum(parameters, self.RESAMPLING, context)][1]) ovrFormat = self.parameterAsEnum(parameters, self.FORMAT, context) if ovrFormat == 1: arguments.append('-ro') elif ovrFormat == 2: arguments.extend('--config USE_RRD YES'.split(' ')) if self.parameterAsBool(parameters, self.CLEAN, context): arguments.append('-clean') arguments.extend(self.parameterAsString(parameters, self.LEVELS, context).split(' ')) self.setOutputValue(self.OUTPUT, fileName) return [self.commandName(), GdalUtils.escapeAndJoin(arguments)]
dwadler/QGIS
python/plugins/processing/algs/gdal/gdaladdo.py
Python
gpl-2.0
5,646
[ "Gaussian" ]
b42c5aa5525064eea650c3c90b1935ebcf859d89212054dcc7eaa121fbdb07e4
"""This is a python library, extending [Mininet](http://mininet.org), in order to support emulation of (complex) IP networks. As such it provides new classes, such as Routers, auto-configures all properties not set by the user, such as IP addresses or router configuration files, ...""" # This module has a hard dependency against mininet, check here that it is # actually installed. This will yield a better (?) error message than just a raw # ImportError nested somewhere ... try: import mininet # noqa except ImportError as e: import sys sys.stderr.write('Failed to import mininet!\n' 'Using the mininet module requires mininet to be ' 'installed.\n' 'Visit www.mininet.org to learn how to do so.\n') sys.exit(1) # Define global constants MIN_IGP_METRIC = 1 OSPF_DEFAULT_AREA = '0.0.0.0' DEBUG_FLAG = False
oliviertilmans/ipmininet
ipmininet/__init__.py
Python
gpl-2.0
892
[ "VisIt" ]
62ee95fbaaf50cac8a735367b1122bdd6e48bc65142b055773fcc942fd7859ff
import expr from itertools import repeat from context import Symbol, Scope from type_objects import List, Dict, Unknown, Function, NoneType, Instance from util import type_intersection from evaluate import UnknownValue def get_token(node): return node.__class__.__name__ class NullEvaluator(object): # for recursion def evaluate(self, argument_scope): _ = argument_scope return Unknown(), UnknownValue() # "arguments" parameter is node.args for FunctionDef or Lambda class FunctionSignature(object): def __init__(self, name=None, arguments=None, context=None, decorator_list=[]): self.name = name if arguments is None: self.names = [] self.types = [] self.default_types = [] self.annotated_types = [] self.vararg_name = None self.kwarg_name = None self.min_count = 0 return # only for copy constructor below assert context is not None self.names = [arg.id for arg in arguments.args] self.min_count = len(arguments.args) - len(arguments.defaults) default_types = [expr.expression_type(d, context) for d in arguments.defaults] self.default_types = ([Unknown()] * self.min_count) + default_types self.annotated_types = self._get_annotated_types( decorator_list, context) self.types = [annotated if annotated != Unknown() else default for annotated, default in zip(self.annotated_types, self.default_types)] self.vararg_name = arguments.vararg self.kwarg_name = arguments.kwarg def __hash__(self): return hash(tuple(self.types)) def __contains__(self, name): return name in self.names def __len__(self): return len(self.names) def __getitem__(self, index): return zip(self.names, self.types)[index] @classmethod def copy_without_first_argument(cls, other_arguments): arguments = cls() arguments.names = other_arguments.names[1:] arguments.types = other_arguments.types[1:] arguments.default_types = other_arguments.default_types[1:] arguments.annotated_types = other_arguments.annotated_types[1:] arguments.min_count = max(0, other_arguments.min_count - 1) arguments.vararg_name = other_arguments.vararg_name arguments.kwarg_name = other_arguments.kwarg_name return arguments def constrain_type(self, name, type_): for i, arg_name in enumerate(self.names): if arg_name == name: intersection = type_intersection(type_, self.types[i]) if intersection is not None: self.types[i] = intersection return intersection else: return None def constrain_types(self, constraints): for name in self.names: if name in constraints: self.constrain_type(name, constraints[name]) def get_list(self): return zip(self.names, self.types) def get_dict(self): return dict(self.get_list()) def _get_annotated_types(self, decorator_list, context): types_decorator = [d for d in decorator_list if get_token(d) == 'Call' and d.func.id == 'types'] return ([expr.expression_type(arg, context) for arg in types_decorator[0].args] if len(types_decorator) == 1 else repeat(Unknown())) def generic_scope(self): scope = Scope() if self.name is not None: # for recursive calls function_type = Function(self, Unknown(), NullEvaluator()) scope.add(Symbol(self.name, function_type)) for name, argtype in self.get_dict().items(): scope.add(Symbol(name, argtype)) if self.vararg_name: scope.add(Symbol(self.vararg_name, List(Unknown()))) if self.kwarg_name: scope.add(Symbol(self.kwarg_name, Dict(Unknown(), Unknown()))) return scope def __str__(self): vararg = (', {0}: Tuple'.format(self.vararg_name) if self.vararg_name else '') kwarg = (', {0}: Dict'.format(self.kwarg_name) if self.kwarg_name else '') return (', '.join(name + ': ' + str(argtype) for name, argtype in zip(self.names, self.types)) + vararg + kwarg) # we only generate warnings on the first pass through a function definition # the FunctionEvaluator is only to evaluate the type and static value of # function calls class FunctionEvaluator(object): def __init__(self, body, visitor): self._body = body self._visitor = visitor self._recursion_block = False def _evaluate(self, argument_scope): visitor = self._visitor visitor.begin_scope() visitor.merge_scope(argument_scope) if isinstance(self._body, list): for stmt in self._body: visitor.visit(stmt) else: visitor.visit(self._body) return visitor.end_scope() def evaluate(self, argument_scope): if self._recursion_block: return Unknown(), UnknownValue() self._recursion_block = True if self._body is None: return NoneType(), None scope = self._evaluate(argument_scope) self._recursion_block = False return_type = scope.get_type() or NoneType() if return_type != NoneType(): return_value = scope.get_value() or UnknownValue() else: return_value = None return return_type, return_value class ClassEvaluator(object): def __init__(self, class_object): self._class_object = class_object def evaluate(self, argument_scope): # argument_scope does not contain "self" parameter at this point # because we create the "self" instance inside this method instance = Instance(self._class_object.name, Scope()) class_attributes = self._class_object.attributes for name in class_attributes.names(): symbol_type = class_attributes.get_type(name) if isinstance(symbol_type, Function): function_type = Function( symbol_type.signature, symbol_type.return_type, symbol_type.evaluator, instance) instance.attributes.add(Symbol(name, function_type)) # Note: error checking for arguments passed in has already been # handled because the signature for the class object is loaded # based on the signature of the __init__ function init_function_type = instance.attributes.get_type('__init__') if init_function_type is not None: symbol = Symbol(init_function_type.signature.names[0], instance) argument_scope.add(symbol) init_function_type.evaluator.evaluate(argument_scope) instance.initialized = True return instance, UnknownValue() # problem: where are we going to check for errors in the function call? def construct_function_type(functiondef_node, visitor, instance=None): name = getattr(functiondef_node, 'name', None) signature = FunctionSignature(name, functiondef_node.args, visitor.context()) body = functiondef_node.body first_visitor = (visitor if instance is None or (name == '__init__' and not instance.initialized) or (name != '__init__' and instance.initialized) else visitor.clone()) first_evaluator = FunctionEvaluator(body, first_visitor) first_visitor.context().clear_constraints() argument_scope = signature.generic_scope() if instance is not None: self_symbol = Symbol(signature.names[0], instance) argument_scope.add(self_symbol) return_type, _ = first_evaluator.evaluate(argument_scope) signature.constrain_types(first_visitor.context().get_constraints()) evaluator = FunctionEvaluator(body, visitor.clone()) return Function(signature, return_type, evaluator)
clark800/pystarch
backend/function.py
Python
mit
8,254
[ "VisIt" ]
b72d1649aea2782080f0403f1ac8ade88c0b1a2d093871b55383faa10fdc1d3d
#!/usr/bin/env python ################################################## ## DEPENDENCIES import sys import os import os.path try: import builtins as builtin except ImportError: import __builtin__ as builtin from os.path import getmtime, exists import time import types from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple from Cheetah.Template import Template from Cheetah.DummyTransaction import * from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList from Cheetah.CacheRegion import CacheRegion import Cheetah.Filters as Filters import Cheetah.ErrorCatchers as ErrorCatchers ################################################## ## MODULE CONSTANTS VFFSL=valueFromFrameOrSearchList VFSL=valueFromSearchList VFN=valueForName currentTime=time.time __CHEETAH_version__ = '2.4.4' __CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0) __CHEETAH_genTime__ = 1447321436.274228 __CHEETAH_genTimestamp__ = 'Thu Nov 12 18:43:56 2015' __CHEETAH_src__ = '/home/knuth/openpli-oe-core/build/tmp/work/fusionhd-oe-linux/enigma2-plugin-extensions-openwebif/1+gitAUTOINC+5837c87afc-r0/git/plugin/controllers/views/web/timercleanup.tmpl' __CHEETAH_srcLastModified__ = 'Thu Nov 12 18:43:41 2015' __CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine' if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple: raise AssertionError( 'This template was compiled with Cheetah version' ' %s. Templates compiled before version %s must be recompiled.'%( __CHEETAH_version__, RequiredCheetahVersion)) ################################################## ## CLASSES class timercleanup(Template): ################################################## ## CHEETAH GENERATED METHODS def __init__(self, *args, **KWs): super(timercleanup, self).__init__(*args, **KWs) if not self._CHEETAH__instanceInitialized: cheetahKWArgs = {} allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split() for k,v in KWs.items(): if k in allowedKWs: cheetahKWArgs[k] = v self._initCheetahInstance(**cheetahKWArgs) def respond(self, trans=None): ## CHEETAH: main method generated for this template if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)): trans = self.transaction # is None unless self.awake() was called if not trans: trans = DummyTransaction() _dummyTrans = True else: _dummyTrans = False write = trans.response().write SL = self._CHEETAH__searchList _filter = self._CHEETAH__currentFilter ######################################## ## START - generated method body _orig_filter_47832864 = _filter filterName = u'WebSafe' if self._CHEETAH__filters.has_key("WebSafe"): _filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName] else: _filter = self._CHEETAH__currentFilter = \ self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter write(u'''<?xml version="1.0" encoding="UTF-8"?> <e2simplexmlresult> \t<e2state>''') _v = VFFSL(SL,"result",True) # u'$result' on line 4, col 11 if _v is not None: write(_filter(_v, rawExpr=u'$result')) # from line 4, col 11. write(u'''</e2state> \t<e2statetext>''') _v = VFFSL(SL,"message",True) # u'$message' on line 5, col 15 if _v is not None: write(_filter(_v, rawExpr=u'$message')) # from line 5, col 15. write(u'''</e2statetext>\t </e2simplexmlresult> ''') _filter = self._CHEETAH__currentFilter = _orig_filter_47832864 ######################################## ## END - generated method body return _dummyTrans and trans.response().getvalue() or "" ################################################## ## CHEETAH GENERATED ATTRIBUTES _CHEETAH__instanceInitialized = False _CHEETAH_version = __CHEETAH_version__ _CHEETAH_versionTuple = __CHEETAH_versionTuple__ _CHEETAH_genTime = __CHEETAH_genTime__ _CHEETAH_genTimestamp = __CHEETAH_genTimestamp__ _CHEETAH_src = __CHEETAH_src__ _CHEETAH_srcLastModified = __CHEETAH_srcLastModified__ _mainCheetahMethod_for_timercleanup= 'respond' ## END CLASS DEFINITION if not hasattr(timercleanup, '_initCheetahAttributes'): templateAPIClass = getattr(timercleanup, '_CHEETAH_templateClass', Template) templateAPIClass._addCheetahPlumbingCodeToClass(timercleanup) # CHEETAH was developed by Tavis Rudd and Mike Orr # with code, advice and input from many other volunteers. # For more information visit http://www.CheetahTemplate.org/ ################################################## ## if run from command line: if __name__ == '__main__': from Cheetah.TemplateCmdLineIface import CmdLineIface CmdLineIface(templateObj=timercleanup()).run()
pli3/e2-openwbif
plugin/controllers/views/web/timercleanup.py
Python
gpl-2.0
5,192
[ "VisIt" ]
6a6ac8ef263f4141786c585fda1b1ff5e6de42559dd493ec37cdd793f17e7721
# Copyright (c) 2015 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # Root domains can not be used as a service domains, as most of the DNS # providers does not allow setting a CNAME on a root domain. This file contains # regualr expressions used for checking if a domain is a root domain or not. # regex for a generic country code based root domain generic_cc_tld = r'''([^.]+\.(ac|biz|co|com|edu|gov|id|int|ltd|me|mil|mod| my|name|net|nhs|nic|nom|or|org|plc|sch|web)\.(ac|ad|ae|af|ag|ai|al|am| an|ao|aq|ar|as|at|au|aw|ax|az|ba|bb|bd|be|bf|bg|bh|bi|bj|bm|bn|bo|br| bs|bt|bv|bw|by|bz|ca|cc|cd|cf|cg|ch|ci|ck|cl|cm|cn|co|cr|cu|cv|cw|cx| cy|cz|de|dj|dk|dm|do|dz|ec|ee|eg|er|es|et|eu|fi|fj|fk|fm|fo|fr|ga|gd| ge|gf|gg|gh|gi|gl|gm|gn|gp|gq|gr|gs|gt|gu|gw|gy|hk|hm|hn|hr|ht|hu|id| ie|il|im|in|io|iq|ir|is|it|je|jm|jo|jp|ke|kg|kh|ki|km|kn|kp|kr|kw|ky| kz|la|lb|lc|li|lk|lr|ls|lt|lu|lv|ly|ma|mc|md|me|mg|mh|mk|ml|mm|mn|mo| mp|mq|mr|ms|mt|mu|mv|mw|mx|my|mz|na|nc|ne|nf|ng|ni|nl|no|np|nr|nu|nz| om|pa|pe|pf|pg|ph|pk|pl|pm|pn|pr|ps|pt|pw|py|qa|re|ro|rs|ru|rw|sa|sb| sc|sd|se|sg|sh|si|sk|sl|sm|sn|so|sr|st|su|sv|sx|sy|sz|tc|td|tf|tg|th| tj|tk|tl|tm|tn|to|tp|tr|tt|tv|tw|tz|ua|ug|uk|us|uy|uz|va|vc|ve|vg|vi| vn|vu|wf|ws|ye|yt|za|zm|zw))$''' # edge cases regexs for country codes based root domain australia_tld = r'''([^.]+\.(act|asn|com|csiro|edu|gov|id|net|nsw|nt|org|oz| qld|sa|tas|vic|wa)\.au)$''' austria_tld = r'''([^.]+\.(ac|co|gv|or|priv)\.at)$''' france_tld = r'''([^.]+\.(aeroport|avocat|avoues|cci|chambagri| chirurgiens-dentistes|experts-comptables|geometre-expert|greta| huissier-justice|medecin|notaires|pharmacien|port|veterinaire)\.fr)$''' hungary_tld = r'''([^.]+\.(co|2000|erotika|jogasz|sex|video|info|agrar|film| konyvelo|shop|org|bolt|forum|lakas|suli|priv|casino|games|media|szex| sport|city|hotel|news|tozsde|tm|erotica|ingatlan|reklam|utazas)\ .hu)$''' russia_tld = r'''([^.]+\.(ac|com|edu|int|net|org|pp|gov|mil|test|adygeya| bashkiria|ulan-ude|buryatia|dagestan|nalchik|kalmykia|kchr|ptz|karelia| komi|mari-el|joshkar-ola|mari|mordovia|yakutia|vladikavkaz|kazan| tatarstan|tuva|udmurtia|izhevsk|udm|khakassia|grozny|chuvashia|altai| kuban|krasnoyarsk|marine|vladivostok|stavropol|stv|khabarovsk|khv|amur| arkhangelsk|astrakhan|belgorod|bryansk|vladimir|volgograd|tsaritsyn| vologda|voronezh|vrn|cbg|ivanovo|irkutsk|koenig|kaluga|kamchatka| kemerovo|kirov|vyatka|kostroma|kurgan|kursk|lipetsk|magadan|mosreg| murmansk|nnov|nov|nsk|novosibirsk|omsk|orenburg|oryol|penza|perm|pskov| rnd|ryazan|samara|saratov|sakhalin|yuzhno-sakhalinsk|yekaterinburg| e-burg|smolensk|tambov|tver|tomsk|tsk|tom|tula|tyumen|simbirsk| chelyabinsk|chel|chita|yaroslavl|msk|spb|bir|jar|palana|dudinka|surgut| chukotka|yamal|amursk|baikal|cmw|fareast|jamal|kms|k-uralsk|kustanai| kuzbass|magnitka|mytis|nakhodka|nkz|norilsk|snz|oskol|pyatigorsk| rubtsovsk|syzran|vdonskzgrad)\.ru)$''' south_africa_tld = r'''([^.]+\.(ac|gov|law|mil|net|nom|school)\.za)$''' spain_tld = r'''([^.]+\.(gob|nom|org)\.es)$''' turkey_tld = r'''([^.]+\.(av|bbs|bel|biz|com|dr|edu|gen|gov|info|k12|kep| name|net|org|pol|tel|tsk|tv|web)\.tr)$''' uk_tld = r'''([^.]+\.(ac|co|gov|ltd|me|mod|net|nhs|org|plc|police|sch) \.uk)$''' usa_tld = r'''([^.]+\.(al|ak|az|ar|as|ca|co|ct|de|dc|fl|ga|gu|hi|id|il|in| ia|ks|ky|la|me|md|ma|mi|mn|mp|ms|mo|mt|ne|nv|nh|nj|nm|ny|nc|nd|oh|ok| or|pa|pr|ri|sc|sd|tn|tx|um|ut|vt|va|vi|wa|wv|wi|wy)\.us)$''' # regexs for two, three and four segments two_segments = r'''^[^.]+\.[^.]+$''' three_segments = r'''^[^.]+\.[^.]+\.[^.]+$''' four_or_more_segments = r'''^[^.]+\.[^.]+\.[^.]+\.[^.]'''
obulpathi/poppy
poppy/transport/validators/root_domain_regexes.py
Python
apache-2.0
4,243
[ "CASINO" ]
491e3522aeb830961a4f238c88a8b7830355d33a21e565a1ee18312def2cfacb
import sys import argparse import time import random import cPickle import gzip import copy import codecs import operator import multiprocessing as mp from nltk.corpus import wordnet as wn import theano, numpy from theano import tensor as T from event_ae import EventAE from process_data import DataProcessor argparser = argparse.ArgumentParser(description="Run Selectional Preference AutoencoDEr") argparser.add_argument('train_file', metavar="TRAIN-FILE", type=str, help="File containing n-tuples to train on") argparser.add_argument('word_types', metavar="WORD-TYPES", type=str, help="String showing the WordNet POS types of the words in the train file, separated by _. Eg. a_n for adj-noun, v_n_n for verb-noun-noun etc.") argparser.add_argument('--pt_rep', type=str, help="File containing pretrained embeddings") argparser.add_argument('--change_word_rep', help="Make changes to word representations (Default is False)", action='store_true') argparser.set_defaults(change_word_rep=False) argparser.add_argument('--word_dim', type=int, help="Dimensionality of word representations", default=50) argparser.add_argument('--concept_dim', type=int, help="Dimensionality of concept representations", default=50) argparser.add_argument('--write_model_freq', type=int, help="Frequency at which the model will be written to disk", default=1) argparser.add_argument('--num_slots', type=int, help="Number of slots in the input", default=2) argparser.add_argument('--hyp_hidden_size', type=int, help="Hidden layer size in hypernymy if a NN factor is selected", default=20) argparser.add_argument('--wc_hidden_size', type=int, help="Hidden layer size in word-concept preferences if a NN factor is selected", default=20) argparser.add_argument('--wc_lr_wp_rank', type=int, help="Rank of the low rank weights for word-concept preferences if low rank weighted inner product is chosen", default=10) argparser.add_argument('--cc_hidden_size', type=int, help="Hidden layer size in concept-concept preferences if a NN factor is selected", default=20) argparser.add_argument('--cc_lr_wp_rank', type=int, help="Rank of the low rank weights for concept-concept preferences if low rank weighted inner product is chosen", default=10) argparser.add_argument('--lr', type=float, help="Learning rate", default=0.01) argparser.add_argument('--max_iter', type=int, help="Maximum number of iterations", default=100) argparser.add_argument('--vocab_file', type=str, help="Word vocabulary file", default="vocab.txt") argparser.add_argument('--ont_file', type=str, help="Concept vocabulary file", default="ont.txt") argparser.add_argument('--parallel', type=int, help="Number of processors to run on (default 1)", default=1) argparser.add_argument('--use_relaxation', help="Ignore inter-concept preferences and optimize", action='store_true') argparser.set_defaults(use_relaxation=False) argparser.add_argument('--no_hyp', help="Ignore hypernymy links", action='store_true') argparser.set_defaults(no_hyp=False) argparser.add_argument('--use_em', help="Use EM (Default is False)", action='store_true') argparser.set_defaults(use_em=False) argparser.add_argument('--use_nce', help="Use NCE for estimating encoding probability. (Default is False)", action='store_true') argparser.set_defaults(use_nce=False) argparser.add_argument('--hyp_model_type', type=str, help="Hypernymy model (weighted_prod, linlayer, tanhlayer)", default="weighted_prod") argparser.add_argument('--wc_pref_model_type', type=str, help="Word-concept preference model (weighted_prod, linlayer, tanhlayer)", default="tanhlayer") argparser.add_argument('--cc_pref_model_type', type=str, help="Concept-concept preference model (weighted_prod, linlayer, tanhlayer)", default="tanhlayer") argparser.add_argument('--rec_model_type', type=str, help="Reconstruction model (gaussian, multinomial)", default="gaussian") args = argparser.parse_args() pred_arg_pos = args.word_types.split("_") learning_rate = args.lr use_pretrained_wordrep = False if args.pt_rep: use_pretrained_wordrep = True pt_word_rep = {l.split()[0]: numpy.asarray([float(f) for f in l.strip().split()[1:]]) for l in gzip.open(args.pt_rep)} dp = DataProcessor(pred_arg_pos) x_data, y_s_data, w_ind, c_ind, w_h_map, w_oov, c_oov = dp.make_data(args.train_file, relaxed=args.use_relaxation) rev_w_ind = {ind:word for word, ind in w_ind.items()} rev_c_ind = {ind:concept for concept, ind in c_ind.items()} init_hyp_strengths = None if args.rec_model_type == "multinomial": init_hyp_strengths = numpy.zeros((len(c_ind), len(w_ind))) for word in w_h_map: word_ind = w_ind[word] if word in w_ind else 0 for concept in w_h_map[word]: concept_ind = c_ind[concept] if concept in c_ind else 0 init_hyp_strengths[concept_ind][word_ind] = 1.0 if len(w_oov) != 0: print >>sys.stderr, "Regarding %d words as OOV"%(len(w_oov)) if len(c_oov) != 0: print >>sys.stderr, "Regarding %d concepts as OOV"%(len(c_oov)) if not args.use_relaxation: num_slots = len(x_data[0]) else: num_slots = len(x_data[0]) - 1 num_args = num_slots - 1 wc_hidden_sizes = [args.wc_hidden_size] * num_slots cc_hidden_sizes = [args.cc_hidden_size] * num_args vocab_file = codecs.open(args.vocab_file, "w", "utf-8") for w, ind in w_ind.items(): print >>vocab_file, w, ind vocab_file.close() ont_file = codecs.open(args.ont_file, "w", "utf-8") for c, ind in c_ind.items(): print >>ont_file, c, ind ont_file.close() train_data = zip(x_data, y_s_data) sanity_test_data = random.sample(train_data, min(len(train_data)/10, 20)) num_cands = sum([len(y_s_d) for _, y_s_d in train_data]) print >>sys.stderr, "Read training data. Average number of concept candidates per point: %f"%(float(num_cands)/len(train_data)) vocab_size = len(w_ind) ont_size = len(c_ind) comp_starttime = time.time() event_ae = EventAE(num_args, vocab_size, ont_size, args.hyp_hidden_size, wc_hidden_sizes, cc_hidden_sizes, word_dim=args.word_dim, concept_dim=args.concept_dim, word_rep_param=args.change_word_rep, hyp_model_type=args.hyp_model_type, wc_pref_model_type=args.wc_pref_model_type, cc_pref_model_type=args.cc_pref_model_type, rec_model_type=args.rec_model_type, init_hyp_strengths=init_hyp_strengths, relaxed=args.use_relaxation, no_hyp=args.no_hyp, wc_lr_wp_rank=args.wc_lr_wp_rank, cc_lr_wp_rank=args.cc_lr_wp_rank) if use_pretrained_wordrep: print >>sys.stderr, "Using pretrained word representations from %s"%(args.pt_rep) num_words_covered = 0 init_wordrep = event_ae.vocab_rep.get_value() for word in w_ind: if word in pt_word_rep: init_wordrep[w_ind[word]] = pt_word_rep[word] num_words_covered += 1 event_ae.vocab_rep.set_value(init_wordrep) print >>sys.stderr, "\tcoverage for word representation is %f"%(float(num_words_covered)/len(w_ind)) """num_syns_covered = 0 rep_lemmas_props = [] init_ontrep = event_ae.ont_rep.get_value() for syn in c_ind: if len(syn.split(".")) == 3: wn_syn = wn.synset(syn) syn_lemmas = wn_syn.lemma_names() lemma_reps = [] for lemma in syn_lemmas: if lemma in pt_word_rep: lemma_reps.append(pt_word_rep[lemma]) if len(lemma_reps) == 0: rep_lemmas_props.append(0) else: rep_lemmas_props.append(float(len(lemma_reps))/len(syn_lemmas)) init_ontrep[c_ind[syn]] = numpy.mean(lemma_reps, axis=0) num_syns_covered += 1 else: if syn in pt_word_rep: init_ontrep[c_ind[syn]] = pt_word_rep[syn] num_syns_covered += 1 event_ae.ont_rep.set_value(init_ontrep) print >>sys.stderr, "\tcoverage for ontology representations is %f"%(float(num_syns_covered)/len(c_ind)) print >>sys.stderr, "\taverage lemma coverage per synset is %f"%(sum(rep_lemmas_props)/len(rep_lemmas_props))""" if args.parallel != 1: eaes = [copy.deepcopy(event_ae) for _ in range(args.parallel)] if args.use_relaxation: part_train_funcs = [[eae.get_relaxed_train_func(learning_rate, s) for s in range(num_slots)] for eae in eaes] else: part_train_funcs = [eae.get_train_func(learning_rate) for eae in eaes] else: if args.use_relaxation: train_funcs = [event_ae.get_relaxed_train_func(learning_rate, s) for s in range(num_slots)] post_score_funcs = [event_ae.get_relaxed_posterior_func(s) for s in range(num_slots)] else: train_func = event_ae.get_train_func(learning_rate, em=args.use_em, nce=args.use_nce) post_score_func = event_ae.get_posterior_func() comp_endtime = time.time() print >>sys.stderr, "Theano compilation took %d seconds"%(comp_endtime - comp_starttime) def synchronize_param(): raise NotImplementedError, "wcp param averaging should be changed to deal with the new dict structure" all_repr_params = [ [param.get_value() for param in eae.repr_params] for eae in eaes ] avg_repr_params = [numpy.mean(param, axis=0) for param in zip(*all_repr_params)] event_ae.set_repr_params(avg_repr_params) for eae in eaes: eae.set_repr_params(avg_repr_params) if not no_hyp: all_hyp_params = [ [param.get_value() for param in eae.hyp_model.get_params()] for eae in eaes ] avg_hyp_params = [numpy.mean(param, axis=0) for param in zip(*all_hyp_params)] event_ae.hyp_model.set_params(avg_hyp_params) for eae in eaes: eae.hyp_model.set_params(avg_hyp_params) #TODO: Fix averaging wcp_params given the new dict structure all_worker_model_wcp_params = [ [[param.get_value() for param in wcp_model.get_params()] for wcp_model in eae.wc_pref_models] for eae in eaes] for model_num, all_model_wcp_params in enumerate(zip(*all_worker_model_wcp_params)): avg_wcp_model_params = [] for param_num, all_wcp_params in enumerate(zip(*all_model_wcp_params)): # Averaging over all worker params avg_wcp_model_params.append(numpy.mean(all_wcp_params, axis=0)) event_ae.wc_pref_models[model_num].set_params(avg_wcp_model_params) for eae in eaes: eae.wc_pref_models[model_num].set_params(avg_wcp_model_params) if not args.use_relaxation: all_worker_model_ccp_params = [ [[param.get_value() for param in ccp_model.get_params()] for ccp_model in eae.cc_pref_models] for eae in eaes] for model_num, all_model_ccp_params in enumerate(zip(*all_worker_model_ccp_params)): avg_ccp_model_params = [] for param_num, all_ccp_params in enumerate(zip(*all_model_ccp_params)): # Averaging over all worker params avg_ccp_model_params.append(numpy.mean(all_ccp_params, axis=0)) event_ae.cc_pref_models[model_num].set_params(avg_ccp_model_params) for eae in eaes: eae.cc_pref_models[model_num].set_params(avg_ccp_model_params) all_rec_params = [ [param.get_value() for param in eae.rec_params] for eae in eaes ] avg_rec_params = [numpy.mean(param, axis=0) for param in zip(*all_rec_params)] event_ae.set_rec_params(avg_rec_params) for eae in eaes: eae.set_rec_params(avg_rec_params) def get_mle_y(x_datum, y_s_datum): max_score = -float("inf") best_y = [] for y_datum in y_s_datum: if args.use_relaxation: s = x_datum[-1] pscore_func = post_score_funcs[s] score = pscore_func(numpy.asarray(x_datum[:-1], dtype='int32'), numpy.asarray(y_datum, dtype='int32')) else: global post_score_func score = post_score_func(numpy.asarray(x_datum, dtype='int32'), numpy.asarray(y_datum, dtype='int32')) if score > max_score: max_score = score best_y = y_datum return best_y, max_score def train_on_data(part_train_func, train_data_part, proc_ind, costs=None): pool_costs = False if costs is None: costs = [] pool_costs = True for dt_ind, (x_datum, y_s_datum) in enumerate(train_data_part): if args.use_relaxation: cost = part_train_func[x_datum[-1]](numpy.asarray(x_datum[:-1], dtype='int32'), numpy.asarray(y_s_datum, dtype='int32')) else: cost = part_train_func(numpy.asarray(x_datum, dtype='int32'), numpy.asarray(y_s_datum, dtype='int32')) if pool_costs: costs.append(cost) else: costs.put(cost) if dt_ind % 1000 == 0: print "Process %d trained on %d points"%(proc_ind, dt_ind) if pool_costs: return costs print >>sys.stderr, "Starting training" for num_iter in range(args.max_iter): costs = [] times = [] random.shuffle(train_data) epoch_starttime = time.time() if args.parallel != 1: train_data_parts = [] chunk_size = len(train_data) / args.parallel for chunk_start_ind in range(0, len(train_data), chunk_size): train_data_parts.append(train_data[chunk_start_ind : chunk_start_ind + chunk_size]) print >>sys.stderr, "Starting %d processes each with %d points to train"%(args.parallel, chunk_size) costs = [] pool = mp.Pool(processes=args.parallel) results = [pool.apply_async(train_on_data, args=(part_train_funcs[i], train_data_parts[i], i)) for i in range(args.parallel)] for r in results: costs.extend(r.get()) else: for i, (x_datum, y_s_datum) in enumerate(train_data): inst_starttime = time.time() if args.use_relaxation: train_func = train_funcs[x_datum[-1]] cost = train_func(numpy.asarray(x_datum[:-1], dtype='int32'), numpy.asarray(y_s_datum, dtype='int32')) else: cost = train_func(numpy.asarray(x_datum, dtype='int32'), numpy.asarray(y_s_datum, dtype='int32')) times.append(time.time() - inst_starttime) costs.append(cost) if (i+1) % 1000 == 0: print >>sys.stderr, "Processed %d points. Average cost till now is %r"%(i, sum(costs)/len(costs)) print >>sys.stderr, "\tAverage time per point till now is %f sec"%(float(sum(times))/len(times)) avg_cost = sum(costs)/len(costs) epoch_endtime = time.time() print >>sys.stderr, "Finished iteration %d.\n\tAverage train cost: %f\n\tTime %d sec"%(num_iter + 1, avg_cost, epoch_endtime-epoch_starttime) if args.parallel != 1: print >>sys.stderr, "Synchronizing param" synchronize_param() print >>sys.stderr, "Done synchronizing. Dumping param." if (num_iter+1) % args.write_model_freq == 0: repr_param_out = open("repr_params_%d.pkl"%(num_iter + 1), "wb") #repr_params = [param.get_value() for param in event_ae.repr_params] repr_params = [event_ae.vocab_rep.get_value(), event_ae.ont_rep.get_value()] cPickle.dump(repr_params, repr_param_out) repr_param_out.close() if not args.no_hyp: hyp_param_out = open("hyp_params_%d.pkl"%(num_iter + 1), "wb") hyp_params = [param.get_value() for param in event_ae.hyp_model.get_params()] cPickle.dump(hyp_params, hyp_param_out) hyp_param_out.close() wcp_param_out = open("wcp_params_%d.pkl"%(num_iter + 1), "wb") wcp_params = [{ i: [param.get_value() for param in wcp_models[i].get_params()] for i in wcp_models} for wcp_models in event_ae.wc_pref_models] cPickle.dump(wcp_params, wcp_param_out) wcp_param_out.close() if not args.use_relaxation: ccp_param_out = open("ccp_params_%d.pkl"%(num_iter + 1), "wb") ccp_params = [[param.get_value() for param in ccp_model.get_params()] for ccp_model in event_ae.cc_pref_models] cPickle.dump(ccp_params, ccp_param_out) ccp_param_out.close() rec_param_out = open("rec_params_%d.pkl"%(num_iter + 1), "wb") rec_params = [param.get_value() for param in event_ae.rec_params] cPickle.dump(rec_params, rec_param_out) rec_param_out.close() print >>sys.stderr, "Sanity test output:" for (x_datum, y_s_datum) in sanity_test_data: x_words = [rev_w_ind[x_ind] for x_ind in x_datum[:-1]] if args.use_relaxation else [rev_w_ind[x_ind] for x_ind in x_datum] best_y_ind, best_score = get_mle_y(x_datum, y_s_datum) if args.use_relaxation: y_concepts = [rev_c_ind[best_y_ind]] else: y_concepts = [rev_c_ind[ind] for ind in best_y_ind] print >>sys.stderr, "x: %s"%(" ".join(x_words)) print >>sys.stderr, "y: %s"%(" ".join(y_concepts))
pdasigi/spade
run_event_ae.py
Python
gpl-2.0
15,910
[ "Gaussian" ]
c7cd548b5597f1e0fd2d0d53f4090f3cdc5739c1ee5d32cb5a5428f30d8dace3
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """timecast/series/random.py""" from typing import Tuple import jax.numpy as jnp import numpy as np def generate(n: int = 1000, loc: float = 0.0, scale: float = 1.0) -> Tuple[np.ndarray, np.ndarray]: """ Description: outputs a timeline randomly distributed i.i.d. from gaussian with mean `loc`, standard deviation `scale` """ X = np.random.normal(loc=loc, scale=scale, size=(n + 1)) return jnp.asarray(X)[:-1].reshape(-1, 1), jnp.asarray(X)[1:].reshape(-1, 1)
google/timecast
timecast/series/random.py
Python
apache-2.0
1,062
[ "Gaussian" ]
2a9cb8a10d79b3444ff7dd4a06f66946b6fe2200813b92225f43e7393e0406e3
# coding=utf8 # # Copyright 2013 Dreamlab Onet.pl # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; # version 3.0. # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, visit # # http://www.gnu.org/licenses/lgpl.txt # import functools from inspect import isclass from rmock.config import get_config class Patch(object): """ Decorator or context manager which injects properly configured mock into the current context. If Patch is used as a method decorator than it a mock is created before each method call, injected as a first parameter and than finalized after the method returns. If it's used as a class decorator than a mock is created on class definition and it's reused in all test method calls, just being resetted before each call. Be default the mock objects is passed as a first argument to each method. If you want the mock to be stored in class-level attribute instead than pass the attribute name as a `classvar` attribute to `Patch` __init__ method. This class could also be used as a context manager - in this case mock object is created and returned from __enter__ method and finalized in __exit__ method. In that moment `Patch` decorator doesn't work for free (non-method) functions, it should be considered as a bug and will be fix in the future. You could use context manager around whole function body instead for now. Additionally, because of the way in which class decorators work in python subclasses of a class decorated with `Patch` wouldn't have its test methods wrapped with reset_mock calls. The simple workaround for this problem is to manually call reset_mock in setup method in the base class. Note that in future that behaviour may be changed. """ def __init__(self, run_manager, *mock_args, **mock_kwargs): """ Initialize Patch object with given attributes The `args` and `kwargs` and generally the same """ self.run_manager = run_manager self.mock_args = mock_args self.mock_kwargs = mock_kwargs self._context_manager_mock = None self.classvar = mock_kwargs.pop('classvar', None) def __call__(self, test_object): if isclass(test_object): test_class = test_object mock = self.run_manager.run(*self.mock_args, **self.mock_kwargs) if self.classvar is not None: setattr(test_class, self.classvar, mock) for function_name, test_function in self._list_test_functions(test_class): wrapped = self._make_function_wrapper(test_function, mock) setattr(test_class, function_name, wrapped) return test_class else: if self.classvar is not None: raise TypeError("classvar supported only for class decorator") test_function = test_object @functools.wraps(test_function) def wrapped(function_self, *args, **kwargs): mock = self.run_manager.run(*self.mock_args, **self.mock_kwargs) try: return test_function(function_self, mock, *args, **kwargs) finally: mock.finalize() return wrapped def _make_function_wrapper(self, test_function, mock): @functools.wraps(test_function) def wrapped(function_self, *args, **kwargs): func_args = list(args) if self.classvar is None: func_args.insert(0, mock) func_kwargs = kwargs try: return test_function(function_self, *func_args, **func_kwargs) finally: mock.reset_mock() mock.start_server() return wrapped def _list_test_functions(self, test_class): test_method_prefix = get_config()['test_method_prefix'] for attr in dir(test_class): if not attr.startswith(test_method_prefix): continue attr_value = getattr(test_class, attr) if not hasattr(attr_value, "__call__"): continue yield attr, attr_value def __enter__(self): self._context_manager_mock = self.run_manager.run(*self.mock_args, **self.mock_kwargs) return self._context_manager_mock def __exit__(self, *args): if self._context_manager_mock: self._context_manager_mock.finalize() self._context_manager_mock = None
tikan/rmock
src/rmock/core/patch.py
Python
lgpl-3.0
5,184
[ "VisIt" ]
a3efccbf671ab25bee56b61015b9ccecf960d9c126783c15086eacc443722015
# Built-in import itertools as itt # Common import numpy as np from scipy.interpolate import BSpline import matplotlib.pyplot as plt import matplotlib.lines as mlines import matplotlib.patches as mpatches import matplotlib.colors as mplcol import matplotlib.gridspec as gridspec from matplotlib.axes._axes import Axes from mpl_toolkits.mplot3d import Axes3D # tofu from tofu.version import __version__ from . import _def as _def _GITHUB = 'https://github.com/ToFuProject/tofu/issues' _WINTIT = 'tofu-%s report issues / requests at %s'%(__version__, _GITHUB) _QUIVERCOLOR = plt.cm.viridis(np.linspace(0, 1, 3)) _QUIVERCOLOR = np.array([[1., 0., 0., 1.], [0., 1., 0., 1.], [0., 0., 1., 1.]]) _QUIVERCOLOR = mplcol.ListedColormap(_QUIVERCOLOR) _RAYS_NPTS = 10 # Generic def _check_projdax_mpl( dax=None, proj=None, dmargin=None, fs=None, wintit=None, ): # ---------------------- # Check inputs if proj is None: proj = 'all' assert isinstance(proj, str) proj = proj.lower() lproj = ['cross', 'hor', '3d', 'im'] assert proj in lproj + ['all'] # ---------------------- # Check dax lc = [ dax is None, issubclass(dax.__class__, Axes), isinstance(dax, dict), isinstance(dax, list), ] assert any(lc) if lc[0]: dax = dict.fromkeys(proj) elif lc[1]: assert len(proj) == 1 dax = {proj[0]: dax} elif lc[2]: lcax = [ dax.get(pp) is None or issubclass(dax.get(pp).__class__, Axes) for pp in proj ] if not all(lcax): msg = ( "Wrong key or axes in dax:\n" + " - proj = {}".format(proj) + " - dax = {}".format(dax) ) raise Exception(msg) else: assert len(dax) == 2 assert all( [ax is None or issubclass(ax.__class__, Axes) for ax in dax] ) dax = {'cross': dax[0], 'hor': dax[1]} # Populate with default axes if necessary if proj == 'cross' and dax['cross'] is None: dax['cross'] = _def.Plot_LOSProj_DefAxes( 'cross', fs=fs, dmargin=dmargin, wintit=wintit, ) elif proj == 'hor' and dax['hor'] is None: dax['hor'] = _def.Plot_LOSProj_DefAxes( 'hor', fs=fs, dmargin=dmargin, wintit=wintit, ) elif proj == '3d' and dax['3d'] is None: dax['3d'] = _def.Plot_3D_plt_Tor_DefAxes( fs=fs, dmargin=dmargin, wintit=wintit, ) elif proj == 'im' and dax['im'] is None: dax['im'] = _def.Plot_CrystIm( fs=fs, dmargin=dmargin, wintit=wintit, ) elif proj == 'all' and any([dax.get(k0) is None for k0 in lproj]): dax = _def.Plot_AllCryst( fs=fs, dmargin=dmargin, wintit=wintit, ) for kk in lproj: dax[kk] = dax.get(kk, None) return dax # ################################################################# # ################################################################# # Generic geometry plot # ################################################################# # ################################################################# def _CrystalBragg_plot_check( cryst=None, dcryst=None, det=None, ddet=None, res=None, element=None, color=None, pts_summit=None, pts1=None, pts2=None, xi=None, xj=None, rays_color=None, rays_npts=None, dleg=None, draw=True, use_non_parallelism=None, wintit=None, tit=None, ): # plotting assert type(draw) is bool, "Arg draw must be a bool !" assert cryst is None or cryst.__class__.__name__ == 'CrystalBragg' if wintit is None: wintit = _WINTIT if tit is None: tit = False if dleg is None: dleg = _def.TorLegd # rays color if rays_color is None: if pts_summit is None: rays_color = 'k' else: rays_color = 'pts' if pts_summit.shape[2] > 1 else 'lamb' if rays_color in ['pts', 'lamb']: pass else: try: rays_color = mplcol.to_rgba(rays_color) except Exception as err: msg = ( "Arg rays_color must be either:\n" + "\t- 'pts': color by pts of origin\n" + "\t- 'lamb': color by wavelength\n" + "\t- a matplotlib color (e.g.: 'k', (0.1, 0.2, 0.1), ...)\n" ) raise Exception(msg) # rays_npts if rays_npts is None: rays_npts = _RAYS_NPTS assert rays_npts >= 2, "Arg rays_npts must be a int >= 2" # elements lelement = ['s', 'c', 'r', 'o', 'v'] if element is None: element = 'oscrv' c0 = ( isinstance(element, str) and all([ss in lelement for ss in element.lower()]) ) if not c0: msg = ("Arg element must be str contain some of the following:\n" + "\t- 'o': outline\n" + "\t- 'c': center (of curvature sphere)\n" + "\t- 's': summit (geometrical center of crystal piece)\n" + "\t- 'r': rowland circle (along e1 direction)\n" + "\t- 'v': local unit vectors\n" + "You provided:\n{}".format(element)) raise Exception(msg) element = element.lower() # cryst if element != '' and cryst is None: msg = ( "cryst cannot be None if element contains any of:\n" + "\t- {}\n".format(lelement) + "You provided: {}".format(element) ) raise Exception(msg) # vectors and outline if cryst is not None: nout, e1, e2, use_non_parallelism = cryst.get_unit_vectors( use_non_parallelism=use_non_parallelism, ) nin = -nout outline = cryst.sample_outline_plot( res=res, use_non_parallelism=use_non_parallelism, ) # det if det is None: det = False if det is not False and any([ss in element for ss in 'ocv']): c0 = isinstance(det, dict) and 'cent' in det.keys() if c0 and 'o' in element: c0 = c0 and all( [ss in det.keys() for ss in ['outline', 'ei', 'ej']] ) if c0 and 'v' in element: c0 = c0 and all([ss in det.keys() for ss in ['nout', 'ei', 'ej']]) if not c0: msg = ("Arg det must be a dict with keys:\n" + "\t- 'cent': center of the detector\n" + "\t- 'nout': outward unit vector (normal to surface)\n" + "\t- 'ei': first local coordinate unit vector\n" + "\t- 'ej': second coordinate unit vector\n" + "\t- 'outline': 2d local coordinates of outline\n" + "\tAll (except outline) are 3d cartesian coordinates\n" + "You provided:\n{}".format(det)) raise Exception(msg) # pts lc = [pts_summit is not None, pts1 is not None, pts2 is not None] c0 = ( (not any(lc)) or all(lc) ) if not c0: msg = ( "pts_summit and pts1 and pts2 must be:\n" + "\t- all None\n" + "\t- all np.ndarray of same shape, with shape[0] == 3\n" + " You provided:\n" + "\t- pts_summit: {}\n".format(pts_summit) + "\t- pts1: {}".format(pts1) + "\t- pts2: {}".format(pts2) ) raise Exception(msg) if pts_summit is not None: if not (pts_summit.shape == pts1.shape == pts2.shape): msg = ( "Args pts_summit, pts1 and pts2 must have the same shape!\n" + " You provided:\n" + "\t- pts_summit.shape: {}\n".format(pts_summit.shape) + "\t- pts1.shape: {}\n".format(pts1.shape) + "\t- pts2.shape: {}\n".format(pts2.shape) ) raise Exception(msg) if rays_color in ['pts', 'lamb'] and pts_summit is not None: if pts_summit.ndim not in [4, 5]: msg = ( "For pts-wise or lambda-wise coloring, " + "input pts_summit must be 4d np.ndarray of shape " + "(3, nlamb, npts, ndtheta)\n" + " You provided:\n" + "\t- pts_summit.shape = {}".format(pts_summit.shape) ) raise Exception(msg) # rays rays = None if pts_summit is not None: # pts.shape = (3, nlamb, npts, ndtheta) # rays.shape = (3, nlamb, npts, ndtheta, 2, 2*rays_npts) shape = np.r_[pts1.shape, 1] k = np.linspace(0, 1, rays_npts) rays = np.concatenate(( pts1[..., None] + k*(pts_summit-pts1)[..., None], pts_summit[..., None] + k[1:]*(pts2-pts_summit)[..., None], np.full(shape, np.nan), ), axis=-1) nlamb, npts, ndtheta, _, nk = rays.shape[1:] if rays_color in ['pts', 'lamb']: if rays_color == 'lamb': rays = rays.reshape(3, nlamb, npts*ndtheta*nk*2).swapaxes(1, 2) elif rays_color == 'pts': rays = rays.swapaxes(1, 2).reshape( 3, npts, nlamb*ndtheta*nk*2, ).swapaxes(1, 2) else: rays = rays.reshape(3, nlamb*npts*ndtheta*nk*2, order='C') # xi, xj lc = [xi is not None, xj is not None] c0 = ( np.sum(lc) in [0, 2] and ( not any(lc) or (lc[0] and xi.shape == xj.shape == pts1.shape[1:]) ) ) if not c0: msg = ( "Args xi, xj must be either both None of 2 array of same shape!\n" + " Provided:\n\t- xi:\n{}\n\t- xj:\n{}".format(xi, xj) ) raise Exception(msg) if lc[0]: if rays_color in ['pts', 'lamb']: if rays_color == 'lamb': xi = xi.reshape(nlamb, npts*ndtheta*2).T xj = xj.reshape(nlamb, npts*ndtheta*2).T elif rays_color == 'pts': xi = xi.swapaxes(0, 1).reshape(npts, nlamb*ndtheta*2).T xj = xj.swapaxes(0, 1).reshape(npts, nlamb*ndtheta*2).T else: xi = xi.ravel() xj = xj.ravel() # dict for plotting if color is None: color = False lkd = ['outline', 'cent', 'summit', 'rowland', 'vectors'] # Avoid passing default by reference if dcryst is None: dcryst = dict({k0: dict(v0) for k0, v0 in _def._CRYSTAL_PLOT_DDICT.items()}) else: dcryst = dict({k0: dict(v0) for k0, v0 in dcryst.items()}) for k0 in lkd: if dcryst.get(k0) is None: dcryst[k0] = dict(_def._CRYSTAL_PLOT_DDICT[k0]) if dcryst[k0].get('color') is None: if cryst is not None and cryst._dmisc.get('color') is not None: dcryst[k0]['color'] = cryst._dmisc['color'] if color is not False: dcryst[k0]['color'] = color if ddet is None: # Avoid passing default by reference ddet = dict({k0: dict(v0) for k0, v0 in _def._DET_PLOT_DDICT.items()}) else: ddet = dict({k0: dict(v0) for k0, v0 in ddet.items()}) for k0 in lkd: if ddet.get(k0) is None: ddet[k0] = dict(_def._DET_PLOT_DDICT[k0]) if color is not False: ddet[k0]['color'] = color return ( dcryst, det, ddet, nout, nin, e1, e2, xi, xj, outline, element, color, rays, rays_color, rays_npts, dleg, wintit, ) def CrystalBragg_plot( cryst=None, dcryst=None, det=None, ddet=None, dax=None, proj=None, res=None, element=None, color=None, pts_summit=None, pts1=None, pts2=None, xi=None, xj=None, rays_color=None, rays_npts=None, dleg=None, draw=True, fs=None, dmargin=None, use_non_parallelism=None, wintit=None, tit=None, ): # --------------------- # Check / format inputs ( dcryst, det, ddet, nout, nin, e1, e2, xi, xj, outline, element, color, rays, rays_color, rays_npts, dleg, wintit, ) = _CrystalBragg_plot_check( cryst=cryst, dcryst=dcryst, det=det, ddet=ddet, res=res, element=element, color=color, pts_summit=pts_summit, pts1=pts1, pts2=pts2, xi=xi, xj=xj, rays_color=rays_color, rays_npts=rays_npts, dleg=dleg, draw=draw, use_non_parallelism=use_non_parallelism, wintit=wintit, tit=tit, ) # --------------------- # call plotting functions dax = _CrystalBragg_plot( cryst=cryst, dcryst=dcryst, det=det, ddet=ddet, nout=nout, nin=nin, e1=e1, e2=e2, outline=outline, proj=proj, dax=dax, element=element, rays=rays, rays_color=rays_color, rays_npts=rays_npts, xi=xi, xj=xj, draw=draw, dmargin=dmargin, fs=fs, wintit=wintit, ) # recompute the ax.dataLim ax0 = None for kk, vv in dax.items(): if vv is None: continue dax[kk].relim() dax[kk].autoscale_view() if dleg is not False: dax[kk].legend(**dleg) ax0 = vv # set title if tit != False: ax0.figure.suptitle(tit) if draw: ax0.figure.canvas.draw() return dax def _CrystalBragg_plot( cryst=None, dcryst=None, det=None, ddet=None, nout=None, nin=None, e1=None, e2=None, outline=None, proj=None, dax=None, element=None, rays=None, rays_color=None, rays_npts=None, xi=None, xj=None, quiver_cmap=None, draw=True, dmargin=None, fs=None, wintit=None, ): # --------------------- # Check / format inputs if 'v' in element and quiver_cmap is None: quiver_cmap = _QUIVERCOLOR # --------------------- # Prepare axe and data dax = _check_projdax_mpl( dax=dax, proj=proj, dmargin=dmargin, fs=fs, wintit=wintit, ) if 's' in element or 'v' in element: summ = cryst._dgeom['summit'] if 'c' in element: cent = cryst._dgeom['center'] if 'r' in element: ang = np.linspace(0, 2.*np.pi, 200) rr = 0.5*cryst._dgeom['rcurve'] row = cryst._dgeom['summit'] + rr*nin row = (row[:, None] + rr*(np.cos(ang)[None, :]*nin[:, None] + np.sin(ang)[None, :]*e1[:, None])) # --------------------- # plot cross = dax.get('cross') is not None hor = dax.get('hor') is not None d3 = dax.get('3d') is not None im = dax.get('im') is not None if 'o' in element: if cross: dax['cross'].plot( np.hypot(outline[0, :], outline[1, :]), outline[2, :], label=cryst.Id.NameLTX+' outline', **dcryst['outline'], ) if hor: dax['hor'].plot( outline[0, :], outline[1, :], label=cryst.Id.NameLTX+' outline', **dcryst['outline'], ) if d3: dax['3d'].plot( outline[0, :], outline[1, :], outline[2, :], label=cryst.Id.NameLTX+' outline', **dcryst['outline'], ) if 's' in element: if cross: dax['cross'].plot( np.hypot(summ[0], summ[1]), summ[2], label=cryst.Id.NameLTX+" summit", **dcryst['summit'], ) if hor: dax['hor'].plot( summ[0], summ[1], label=cryst.Id.NameLTX+" summit", **dcryst['summit'], ) if d3: dax['3d'].plot( summ[0:1], summ[1:2], summ[2:3], label=cryst.Id.NameLTX+" summit", **dcryst['summit'], ) if 'c' in element: if cross: dax['cross'].plot( np.hypot(cent[0], cent[1]), cent[2], label=cryst.Id.NameLTX+" center", **dcryst['cent'], ) if hor: dax['hor'].plot( cent[0], cent[1], label=cryst.Id.NameLTX+" center", **dcryst['cent'], ) if d3: dax['3d'].plot( cent[0:1], cent[1:2], cent[2:3], label=cryst.Id.NameLTX+" center", **dcryst['cent'], ) if 'r' in element: if cross: dax['cross'].plot( np.hypot(row[0, :], row[1, :]), row[2, :], label=cryst.Id.NameLTX+' rowland', **dcryst['rowland'], ) if hor: dax['hor'].plot( row[0, :], row[1, :], label=cryst.Id.NameLTX+' rowland', **dcryst['rowland'], ) if d3: dax['3d'].plot( row[0, :], row[1, :], row[2, :], label=cryst.Id.NameLTX+' rowland', **dcryst['rowland'], ) if 'v' in element: p0 = np.repeat(summ[:,None], 3, axis=1) v = np.concatenate((nout[:, None], e1[:, None], e2[:, None]), axis=1) if cross: pr = np.hypot(p0[0, :], p0[1, :]) vr = np.hypot(p0[0, :]+v[0, :], p0[1, :]+v[1, :]) - pr dax['cross'].quiver( pr, p0[2, :], vr, v[2, :], np.r_[0., 0.5, 1.], cmap=quiver_cmap, angles='xy', scale_units='xy', label=cryst.Id.NameLTX+" unit vect", **dcryst['vectors'], ) if hor: dax['hor'].quiver( p0[0, :], p0[1, :], v[0, :], v[1, :], np.r_[0., 0.5, 1.], cmap=quiver_cmap, angles='xy', scale_units='xy', label=cryst.Id.NameLTX+" unit vect", **dcryst['vectors'], ) if d3: dax['3d'].quiver( p0[0, :], p0[1, :], p0[2, :], v[0, :], v[1, :], v[2, :], np.r_[0., 0.5, 1.], length=0.1, normalize=True, cmap=quiver_cmap, label=cryst.Id.NameLTX+" unit vect", **dcryst['vectors'], ) # ------------- # Detector if det is not False: if det.get('cent') is not None and 'c' in element: if cross: dax['cross'].plot( np.hypot(det['cent'][0], det['cent'][1]), det['cent'][2], label="det_cent", **ddet['cent'], ) if hor: dax['hor'].plot( det['cent'][0], det['cent'][1], label="det_cent", **ddet['cent'], ) if d3: dax['3d'].plot( det['cent'][0:1], det['cent'][1:2], det['cent'][2:3], label="det_cent", **ddet['cent'], ) if det.get('nout') is not None and 'v' in element: assert det.get('ei') is not None and det.get('ej') is not None p0 = np.repeat(det['cent'][:, None], 3, axis=1) v = np.concatenate((det['nout'][:, None], det['ei'][:, None], det['ej'][:, None]), axis=1) if cross: pr = np.hypot(p0[0, :], p0[1, :]) vr = np.hypot(p0[0, :]+v[0, :], p0[1, :]+v[1, :]) - pr dax['cross'].quiver( pr, p0[2, :], vr, v[2, :], np.r_[0., 0.5, 1.], cmap=quiver_cmap, angles='xy', scale_units='xy', label="det unit vect", **ddet['vectors'], ) if hor: dax['hor'].quiver( p0[0, :], p0[1, :], v[0, :], v[1, :], np.r_[0., 0.5, 1.], cmap=quiver_cmap, angles='xy', scale_units='xy', label="det unit vect", **ddet['vectors'], ) if d3: dax['3d'].quiver( p0[0, :], p0[1, :], p0[2, :], v[0, :], v[1, :], v[2, :], np.r_[0., 0.5, 1.], length=0.1, normalize=True, cmap=quiver_cmap, label="det unit vect", **ddet['vectors'], ) if det.get('outline') is not None and 'o' in element: det_out = ( det['outline'][0:1, :]*det['ei'][:, None] + det['outline'][1:2, :]*det['ej'][:, None] + det['cent'][:, None] ) if cross: dax['cross'].plot( np.hypot(det_out[0, :], det_out[1, :]), det_out[2, :], label='det outline', **ddet['outline'], ) if hor: dax['hor'].plot( det_out[0, :], det_out[1, :], label='det outline', **ddet['outline'], ) if d3: dax['3d'].plot( det_out[0, :], det_out[1, :], det_out[2, :], label='det outline', **ddet['outline'], ) if im: dax['im'].plot( det['outline'][0, :], det['outline'][1, :], label='det outline', **ddet['outline'], ) # ------------- # rays if rays is not None: if rays_color in ['pts', 'lamb']: if cross: dax['cross'].set_prop_cycle(None) dax['cross'].plot( np.hypot(rays[0, :, :], rays[1, :, :]), rays[2, :, :], lw=1., ls='-', ) if hor: dax['hor'].set_prop_cycle(None) dax['hor'].plot( rays[0, :, :], rays[1, :, :], lw=1., ls='-', ) if d3: dax['3d'].set_prop_cycle(None) for ii in range(rays.shape[2]): dax['3d'].plot( rays[0, :, ii], rays[1, :, ii], rays[2, :, ii], lw=1., ls='-', ) if im: dax['3d'].set_prop_cycle(None) dax['im'].plot( xi, xj, ls='None', marker='.', ms=6, ) else: if cross: dax['cross'].set_prop_cycle(None) dax['cross'].plot( np.hypot(rays[0, :], rays[1, :]), rays[2, :], color=rays_color, lw=1., ls='-', ) if hor: dax['hor'].set_prop_cycle(None) dax['hor'].plot( rays[0, :], rays[1, :], color=rays_color, lw=1., ls='-', ) if d3: dax['3d'].set_prop_cycle(None) dax['3d'].plot( rays[0, :], rays[1, :], rays[2, :], color=rays_color, lw=1., ls='-', ) if im: dax['3d'].set_prop_cycle(None) dax['im'].plot( xi, xj, ls='None', marker='.', ms=6, ) return dax # ################################################################# # ################################################################# # Rocking curve plot # ################################################################# # ################################################################# def CrystalBragg_plot_rockingcurve( func=None, bragg=None, lamb=None, sigma=None, npts=None, ang_units=None, axtit=None, color=None, legend=None, fs=None, ax=None, ): # Prepare if legend is None: legend = True if color is None: color = 'k' if ang_units is None: ang_units = 'deg' if axtit is None: axtit = 'Rocking curve' if sigma is None: sigma = 0.005*np.pi/180. if npts is None: npts = 1000 angle = bragg + 3.*sigma*np.linspace(-1, 1, npts) curve = func(angle) lab = r"$\lambda = {:9.6} A$".format(lamb*1.e10) if ang_units == 'deg': angle = angle*180/np.pi bragg = bragg*180/np.pi # Plot if ax is None: if fs is None: fs = (8, 6) fig = plt.figure(figsize=fs) ax = fig.add_axes([0.1, 0.1, 0.8, 0.8]) ax.set_title(axtit, size=12) ax.set_xlabel('angle ({})'.format(ang_units)) ax.set_ylabel('reflectivity (adim.)') ax.plot(angle, curve, ls='-', lw=1., c=color, label=lab) ax.axvline(bragg, ls='--', lw=1, c=color) if legend is not False: ax.legend() return ax # ################################################################# # ################################################################# # Bragg diffraction plot # ################################################################# # ################################################################# # Deprecated ? re-use ? def CrystalBragg_plot_approx_detector_params(Rrow, bragg, d, Z, frame_cent, nn): R = 2.*Rrow L = 2.*R ang = np.linspace(0., 2.*np.pi, 100) fig = plt.figure() ax = fig.add_axes([0.1,0.1,0.8,0.8], aspect='equal') ax.axvline(0, ls='--', c='k') ax.plot(Rrow*np.cos(ang), Rrow + Rrow*np.sin(ang), c='r') ax.plot(R*np.cos(ang), R + R*np.sin(ang), c='b') ax.plot(L*np.cos(bragg)*np.r_[-1,0,1], L*np.sin(bragg)*np.r_[1,0,1], c='k') ax.plot([0, d*np.cos(bragg)], [Rrow, d*np.sin(bragg)], c='r') ax.plot([0, d*np.cos(bragg)], [Z, d*np.sin(bragg)], 'g') ax.plot([0, L/10*nn[1]], [Z, Z+L/10*nn[2]], c='g') ax.plot(frame_cent[1]*np.cos(2*bragg-np.pi), Z + frame_cent[1]*np.sin(2*bragg-np.pi), c='k', marker='o', ms=10) ax.set_xlabel(r'y') ax.set_ylabel(r'z') ax.legend(loc='upper left', bbox_to_anchor=(1.05, 1.), frameon=False) return ax def CrystalBragg_plot_xixj_from_braggangle(bragg=None, xi=None, xj=None, data=None, ax=None): if ax is None: fig = plt.figure() ax = fig.add_axes([0.1,0.1,0.8,0.8], aspect='equal') for ii in range(len(bragg)): deg ='{0:07.3f}'.format(bragg[ii]*180/np.pi) ax.plot(xi[:,ii], xj[:,ii], '.', label='bragg %s'%deg) ax.set_xlabel(r'xi') ax.set_ylabel(r'yi') ax.legend(loc='upper left', bbox_to_anchor=(1.05, 1.), frameon=False) return ax def CrystalBragg_plot_braggangle_from_xixj(xi=None, xj=None, bragg=None, angle=None, ax=None, plot=None, braggunits='rad', angunits='rad', leg=None, colorbar=None, fs=None, wintit=None, tit=None, **kwdargs): # Check inputs if isinstance(plot, bool): plot = 'contour' if fs is None: fs = (6, 6) if wintit is None: wintit = _WINTIT if tit is None: tit = False if colorbar is None: colorbar = True if leg is None: leg = False if leg is True: leg = {} # Prepare axes if ax is None: fig = plt.figure(figsize=fs) ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], aspect='equal', adjustable='box') dobj = {'phi': {'ax': ax}, 'bragg': {'ax': ax}} dobj['bragg']['kwdargs'] = dict(kwdargs) dobj['phi']['kwdargs'] = dict(kwdargs) dobj['phi']['kwdargs']['cmap'] = plt.cm.seismic # Clear cmap if colors provided if 'colors' in kwdargs.keys(): if 'cmap' in dobj['bragg']['kwdargs'].keys(): del dobj['bragg']['kwdargs']['cmap'] if 'cmap' in dobj['phi']['kwdargs'].keys(): del dobj['phi']['kwdargs']['cmap'] # Plot if plot == 'contour': if 'levels' in kwdargs.keys(): lvls = kwdargs['levels'] del kwdargs['levels'] obj0 = dobj['bragg']['ax'].contour(xi, xj, bragg, lvls, **dobj['bragg']['kwdargs']) obj1 = dobj['phi']['ax'].contour(xi, xj, angle, lvls, **dobj['phi']['kwdargs']) else: obj0 = dobj['bragg']['ax'].contour(xi, xj, bragg, **dobj['bragg']['kwdargs']) obj1 = dobj['phi']['ax'].contour(xi, xj, angle, **dobj['phi']['kwdargs']) elif plot == 'imshow': extent = (xi.min(), xi.max(), xj.min(), xj.max()) obj0 = dobj['bragg']['ax'].imshow(bragg, extent=extent, aspect='equal', adjustable='datalim', **dobj['bragg']['kwdargs']) obj1 = dobj['phi']['ax'].imshow(angle, extent=extent, aspect='equal', adjustable='datalim', **dobj['phi']['kwdargs']) elif plot == 'pcolor': obj0 = dobj['bragg']['ax'].pcolor(xi, xj, bragg, **dobj['bragg']['kwdargs']) obj1 = dobj['phi']['ax'].pcolor(xi, xj, angle, **dobj['phi']['kwdargs']) dobj['bragg']['obj'] = obj0 dobj['phi']['obj'] = obj1 # Post polish for k0 in set(dobj.keys()): dobj[k0]['ax'].set_xlabel(r'xi (m)') dobj[k0]['ax'].set_ylabel(r'xj (m)') if colorbar is True: cax0 = plt.colorbar(dobj['bragg']['obj'], ax=dobj['bragg']['ax']) cax1 = plt.colorbar(dobj['phi']['obj'], ax=dobj['phi']['ax']) cax0.ax.set_title(r'$\theta_{bragg}$' + '\n' + r'($%s$)' % braggunits) cax1.ax.set_title(r'$ang$' + '\n' + r'($%s$)' % angunits) if leg is not False: ax.legend(**leg) if wintit is not False: ax.figure.canvas.set_window_title(wintit) if tit is not False: ax.figure.suptitle(tit, size=10, weight='bold', ha='right') return ax def CrystalBragg_plot_line_tracing_on_det( lamb, xi, xj, xi_err, xj_err, det=None, johann=None, rocking=None, ax=None, dleg=None, fs=None, dmargin=None, wintit=None, tit=None, ): # Check inputs # ------------ if dleg is None: dleg = {'loc': 'upper right', 'bbox_to_anchor': (0.93, 0.8)} if fs is None: fs = (6, 8) if dmargin is None: dmargin = {'left': 0.05, 'right': 0.99, 'bottom': 0.06, 'top': 0.92, 'wspace': None, 'hspace': 0.4} if wintit is None: wintit = _WINTIT if tit is None: tit = "line tracing" if johann is True: tit += " - johann error" if rocking is True: tit += " - rocking curve" plot_err = johann is True or rocking is True # Plot # ------------ if ax is None: fig = plt.figure(figsize=fs) gs = gridspec.GridSpec(1, 1, **dmargin) ax = fig.add_subplot(gs[0, 0], aspect='equal', adjustable='datalim') if wintit is not False: fig.canvas.set_window_title(wintit) if tit is not False: fig.suptitle(tit, size=14, weight='bold') if det.get('outline') is not None: ax.plot( det['outline'][0, :], det['outline'][1, :], ls='-', lw=1., c='k', ) for l in range(lamb.size): lab = r'$\lambda$'+' = {:6.3f} A'.format(lamb[l]*1.e10) l0, = ax.plot(xi[l, :], xj[l, :], ls='-', lw=1., label=lab) if plot_err: ax.plot( xi_err[l, ...], xj_err[l, ...], ls='None', lw=1., c=l0.get_color(), ms=4, marker='.', ) if dleg is not False: ax.legend(**dleg) return ax def CrystalBragg_plot_johannerror( xi, xj, lamb, phi, err_lamb, err_phi, err_lamb_units=None, err_phi_units=None, cmap=None, vmin=None, vmax=None, fs=None, dmargin=None, wintit=None, tit=None, angunits=None, ): # Check inputs # ------------ if fs is None: fs = (14, 8) if cmap is None: cmap = plt.cm.viridis if dmargin is None: dmargin = {'left': 0.05, 'right': 0.99, 'bottom': 0.06, 'top': 0.92, 'wspace': None, 'hspace': 0.4} if angunits is None: angunits = 'rad' assert angunits in ['deg', 'rad'] if angunits == 'deg': # bragg = bragg*180./np.pi phi = phi*180./np.pi err_phi = err_phi*180./np.pi err_phi_units = angunits if wintit is None: wintit = _WINTIT if tit is None: tit = False # pre-compute # ------------ # extent extent = (xi.min(), xi.max(), xj.min(), xj.max()) # Plot # ------------ fig = plt.figure(figsize=fs) gs = gridspec.GridSpec(1, 3, **dmargin) ax0 = fig.add_subplot(gs[0, 0], aspect='equal') # adjustable='datalim') ax1 = fig.add_subplot( gs[0, 1], aspect='equal', sharex=ax0, sharey=ax0, ) ax2 = fig.add_subplot( gs[0, 2], aspect='equal', sharex=ax0, sharey=ax0, ) ax0.set_title('Iso-lamb and iso-phi at crystal summit') ax1.set_title(f'Focalization error on lamb ({err_lamb_units})') ax2.set_title(f'Focalization error on phi ({err_phi_units})') ax0.contour(xi, xj, lamb.T, 10, cmap=cmap) ax0.contour(xi, xj, phi.T, 10, cmap=cmap, ls='--') imlamb = ax1.imshow( err_lamb.T, extent=extent, aspect='equal', origin='lower', interpolation='nearest', vmin=vmin, vmax=vmax, ) imphi = ax2.imshow( err_phi.T, extent=extent, aspect='equal', origin='lower', interpolation='nearest', vmin=vmin, vmax=vmax, ) plt.colorbar(imlamb, ax=ax1) plt.colorbar(imphi, ax=ax2) if wintit is not False: fig.canvas.set_window_title(wintit) if tit is not False: fig.suptitle(tit, size=14, weight='bold') return [ax0, ax1, ax2] def CrystalBragg_plot_focal_error_summed( cryst=None, dcryst=None, lamb=None, bragg=None, error_lambda=None, ddist=None, di=None, ddist0=None, di0=None, dj0=None, dtheta0=None, dpsi0=None, tilt0=None, angle_nout=None, det_ref=None, units=None, plot_dets=None, nsort=None, tangent_to_rowland=None, use_non_parallelism=None, pts=None, test_lamb_interv=None, contour=None, fs=None, cmap=None, vmin=None, vmax=None, ax=None, ): if cmap is None: # cmap = 'RdYlBu' cmap = plt.cm.viridis if nsort is None: nsort = 5 if contour is None: errmin = np.nanmin(error_lambda) contour = [errmin + (np.nanmax(error_lambda) - errmin)/50.] if fs is None: fs = (6, 8) if ax is None: fig = plt.figure(figsize=fs) gs = gridspec.GridSpec(1, 1) ax = fig.add_subplot(gs[0, 0]) ax.set_title('Mean focalization error\non detector') ax.set_xlabel('ddist (m)') ax.set_ylabel('di (m)') # plot error map function(ddist, di) extent = (ddist.min(), ddist.max(), di.min(), di.max()) errmap = ax.imshow( error_lambda, cmap=cmap, vmin=vmin, vmax=vmax, origin='lower', extent=extent, interpolation='nearest', ) cbar = plt.colorbar( errmap, label=f"error on lambda ({units})", orientation="vertical", ) ax.contour( ddist, di, error_lambda, contour, colors='w', linewstyles='-', linewidths=1., ) ax.contour( ddist, di, test_lamb_interv, contour, colors='yellow', linewstyles='-', linewidths=1., ) # Computing detector with exact position of det_ref if det_ref: dpsi0bis = float(dpsi0) if tangent_to_rowland: dpsi0bis = dpsi0 - angle_nout detector_comp = cryst.get_detector_approx( ddist=ddist0, di=di0, dj=dj0, dtheta=dtheta0, dpsi=dpsi0bis, tilt=tilt0, lamb=lamb, use_non_parallelism=use_non_parallelism, tangent_to_rowland=False, ) detector_comp['outline'] = det_ref['outline'] ax.plot( ddist0, di0, marker='x', ls='None', color='w', ) if plot_dets: indsort = np.argsort(np.ravel(error_lambda)) inddist = indsort % ddist.size inddi = indsort // ddist.size # plot nbr of dets on map "mean focalization error = f(ddist, di)" ax.plot( ddist[inddist[:nsort]], di[inddi[:nsort]], marker='x', ls='None', color='r', ) # plot dets geometry with CrystalBragg_plot() if det_ref is not None: dax = cryst.plot( det=det_ref, pts=pts, color='black', ) dax = cryst.plot( det=detector_comp, pts=pts, color='blue', element='ocv', dax=dax, ) msg = ( "Parameters of reference detector:\n" + "Center position in (x, y, z): ({})\n".format( np.round(detector_comp['cent'], decimals=4) ) + "Translations (ddist, di, dj): ({}, {}, {}) [m]\n".format( ddist0, di0, dj0, ) + "Rotations (dtheta, dpsi, tilt): ({}, {}, {}) [rad]\n".format( dtheta0, dpsi0, tilt0, ) ) print(msg) det = {} for ii in range(nsort): det[ii] = cryst.get_detector_approx( ddist=ddist[inddist[ii]], di=di[inddi[ii]], tangent_to_rowland=tangent_to_rowland, ) det[ii]['outline'] = det_ref['outline'] dax = cryst.plot( det=det[ii], pts=pts, color='red', element='oc', dax=dax, ) print( "det: {}\n".format(det[ii]) + "\t ddist: {}\n".format(ddist[inddist[ii]]) + "\t di: {}\n".format(di[inddi[ii]]) ) return ax # ################################################################# # ################################################################# # Ray tracing plot # ################################################################# # ################################################################# # To be clarified def CrystalBragg_plot_raytracing_from_lambpts(xi=None, xj=None, lamb=None, xi_bounds=None, xj_bounds=None, pts=None, ptscryst=None, ptsdet=None, det_cent=None, det_nout=None, det_ei=None, det_ej=None, cryst=None, proj=None, fs=None, ax=None, dmargin=None, wintit=None, tit=None, legend=None, draw=None): # Check assert xi.shape == xj.shape and xi.ndim == 3 assert (isinstance(proj, list) and all([pp in ['det', '2d', '3d'] for pp in proj])) if legend is None or legend is True: legend = dict(bbox_to_anchor=(1.02, 1.), loc='upper left', ncol=1, mode="expand", borderaxespad=0., prop={'size': 6}) if wintit is None: wintit = _WINTIT if draw is None: draw = True # Prepare nlamb, npts, ndtheta = xi.shape det = np.array([[xi_bounds[0], xi_bounds[1], xi_bounds[1], xi_bounds[0], xi_bounds[0]], [xj_bounds[0], xj_bounds[0], xj_bounds[1], xj_bounds[1], xj_bounds[0]]]) lcol = ['r', 'g', 'b', 'm', 'y', 'c'] lm = ['+', 'o', 'x', 's'] lls = ['-', '--', ':', '-.'] ncol, nm, nls = len(lcol), len(lm), len(lls) if '2d' in proj or '3d' in proj: pts = np.repeat(np.repeat(pts[:, None, :], nlamb, axis=1)[..., None], ndtheta, axis=-1)[..., None] ptsall = np.concatenate( ( pts, ptscryst[..., None], ptsdet[..., None], np.full((3, nlamb, npts, ndtheta, 1), np.nan), ), axis=-1, ).reshape((3, nlamb, npts, ndtheta*4)) del pts, ptscryst, ptsdet if '2d' in proj: R = np.hypot(ptsall[0, ...], ptsall[1, ...]) # -------- # Plot lax = [] if 'det' in proj: # Prepare if ax is None: if fs is None: fsi = (8, 6) else: fsi = fs if dmargin is None: dmargini = {'left': 0.1, 'right': 0.8, 'bottom': 0.1, 'top': 0.9, 'wspace': None, 'hspace': 0.4} else: dmargini = dmargin if tit is None: titi = False else: titi = tit fig = plt.figure(figsize=fsi) gs = gridspec.GridSpec(1, 1, **dmargini) axi = fig.add_subplot( gs[0, 0], aspect='equal', adjustable='datalim', ) axi.set_xlabel(r'$x_i$ (m)') axi.set_ylabel(r'$x_j$ (m)') else: axi = ax # plot axi.plot(det[0, :], det[1, :], ls='-', lw=1., c='k') for pp in range(npts): for ll in range(nlamb): lab = ( r'pts {} - '.format(pp) + r'$\lambda$' + ' = {:6.3f} A'.format(lamb[ll]*1.e10) ) axi.plot( xi[ll, pp, :], xj[ll, pp, :], ls='None', marker=lm[ll % nm], c=lcol[pp % ncol], label=lab, ) # decorate if legend is not False: axi.legend(**legend) if wintit is not False: axi.figure.canvas.set_window_title(wintit) if titi is not False: axi.figure.suptitle(titi, size=14, weight='bold') if draw: axi.figure.canvas.draw() lax.append(axi) if '2d' in proj: # Prepare if tit is None: titi = False else: titi = tit # plot dax = cryst.plot(lax=ax, proj='all', det_cent=det_cent, det_nout=det_nout, det_ei=det_ei, det_ej=det_ej, draw=False) for pp in range(npts): for ll in range(nlamb): lab = (r'pts {} - '.format(pp) + r'$\lambda$'+' = {:6.3f} A'.format(lamb[ll]*1.e10)) dax['cross'].plot( R[ll, pp, :], ptsall[2, ll, pp, :], ls=lls[ll % nls], color=lcol[pp % ncol], label=lab, ) dax['hor'].plot( ptsall[0, ll, pp, :], ptsall[1, ll, pp, :], ls=lls[ll % nls], color=lcol[pp % ncol], label=lab, ) # decorate if legend is not False: dax['cross'].legend(**legend) if wintit is not False: dax['cross'].figure.canvas.set_window_title(wintit) if titi is not False: dax['cross'].figure.suptitle(titi, size=14, weight='bold') if draw: dax['cross'].figure.canvas.draw() lax.append(dax['cross']) lax.append(dax['hor']) return lax
Didou09/tofu
tofu/geom/_plot_optics.py
Python
mit
45,222
[ "CRYSTAL" ]
79e5b8162cbea22937797b0d3db9f012295c7374b1a87f401ea2c82a21693c0a
# $Id$ # # Copyright (C) 2001-2006 greg Landrum and Rational Discovery LLC # # @@ All Rights Reserved @@ # This file is part of the RDKit. # The contents are covered by the terms of the BSD license # which is included in the file license.txt, found at the root # of the RDKit source tree. # """testing code inspired by old bugs The bugs were in the OELib code, so these are maybe no longer relevant... but tests are tests """ from rdkit import RDConfig import unittest, os from rdkit.six.moves import cPickle from rdkit import Chem from rdkit.Chem import AllChem def feq(n1, n2, tol=1e-4): return abs(n1 - n2) <= tol class TestCase(unittest.TestCase): def testBug12a(self): from rdkit.Chem import MolSurf inD = [ ('OC(=O)[CH](CC1=CC=CC=C1)C2=CC=CC=C2', 37.3), ('OC(=O)C(C1=CC=CC=C1)(C2=CC=CC=C2)C3=CC=CC=C3', 37.3), ('CCC(CC)(CC)[CH](OC(=O)C1=C(C=CC=C1)C(O)=O)C2=CC=CC=C2', 63.6), ('C[C](O)([CH](C(O)=O)C1=CC=CC=C1)C2=CC=CC=C2', 57.53), ('C[CH]([CH](C(O)=O)C1=CC=CC=C1)C2=CC=CC=C2', 37.3), ('OC(=O)CBr', 37.3), ('OC(=O)CCl', 37.3), ('OC(=O)C=CC(=O)C1=CC=CC=C1', 54.37), ('NC1=C(C=CC=C1)C(O)=O', 63.32), ('OC(=O)C1=CC=CC=C1', 37.3), ('CN(C)C(=N)NC1=NC(=C2C=C(Cl)C=CC2=N1)C.O[N+]([O-])=O', 128.27), ('CCN(CC)C(=N)NC1=NC(=C2C=C(Cl)C=CC2=N1)C.O[N+]([O-])=O', 128.27), ('ON(O)NC(=N)NN=C1C(=O)NC2=C1C=CC=C2', 133.07), ('NC1=CC=C(C=C1)C=NNC(=N)NN(O)O', 129.99), ('CC(=O)NC1=CC=C(C=C1)C=NNC(=N)NN(O)O', 133.07), ('COC1=CC=C(C=C1)C=NNC(=N)NN(O)O', 113.2), ('ON(O)NC(=N)NN=CC1=CC=CC=C1', 103.97), ('ON(O)NC(=N)NN=CC=CC1=CC=CC=C1', 103.97), ('ON(O)NC(=N)NN=CC1=C(Cl)C=C(Cl)C=C1', 103.97), ('CC(C)=CCCC(C)=CC=NNC(=N)NN(O)O', 103.97), ('CN(C)C1=CC=C(C=C1)C=NNC(=N)NN(O)O', 107.21), ('ON(O)NC(=N)NN=CC1=CC=CO1', 117.11), ('ON(O)NC(=N)NN=CC1=CC=C(O)C=C1', 124.2), ('CC(C)C1=CC=C(C=C1)C=NNC(=N)NN(O)O', 103.97), ('COC1=C(C=CC=C1)C=NNC(=N)NN(O)O', 113.2), ('ON(O)NC(=N)NN=CC1=C(C=CC=C1)[N+]([O-])=O', 147.11), ('ON(O)NC(=N)NN=CC1=CC=C(C=C1)[N+]([O-])=O', 147.11), ('ON(O)NC(=N)NN=CC1=C(O)C=CC(=C1)[N+]([O-])=O', 167.34), ('ON(O)NC(=N)NN=CC1=CC=NC=C1', 116.86), ('ON(O)NC(=N)NN=CC1=CC=CC=N1', 116.86), ('ON(O)NC(=N)NN=CC1=CC=CN=C1', 116.86), ] for smi, val in inD: mol = Chem.MolFromSmiles(smi) v = MolSurf.TPSA(mol) assert feq(v, val), 'bad TPSA (%f != %f) for smiles: %s' % (v, val, smi) def testBug12b(self): """ failures for Bug12 which are actually related to Bug14 """ from rdkit.Chem import MolSurf inD = [('[O-][N+](=O)C1=CNC(=N)S1', 82.78), ] for smi, val in inD: mol = Chem.MolFromSmiles(smi) v = MolSurf.TPSA(mol) assert feq(v, val), 'bad TPSA (%f != %f) for smiles: %s' % (v, val, smi) def testBug14(self): """ """ smi = '[O-][N+](=O)C1=CNC(=N)S1' mol = Chem.MolFromSmiles(smi) at = mol.GetAtomWithIdx(5) assert at.GetHybridization() == Chem.HybridizationType.SP2, 'bad hyb' assert at.GetTotalNumHs() == 1, 'bad H count' mol = Chem.MolFromSmiles(smi) at = mol.GetAtomWithIdx(5) assert at.GetTotalNumHs() == 1, 'bad H count' assert at.GetHybridization() == Chem.HybridizationType.SP2, 'bad hyb' def testGithub112(self): """ problems with AllChem.GetBestRMS() and molecules with Hs """ m0 = Chem.MolFromMolFile( os.path.join(RDConfig.RDCodeDir, 'Chem', 'test_data', 'github112_tgt.mol'), removeHs=False) m1 = Chem.MolFromMolFile( os.path.join(RDConfig.RDCodeDir, 'Chem', 'test_data', 'github112_qry.mol'), removeHs=False) rms = AllChem.GetBestRMS(m0, m1) self.assertAlmostEqual(rms, 0.456, 3) if __name__ == '__main__': unittest.main()
rvianello/rdkit
rdkit/Chem/UnitTestOldBugs.py
Python
bsd-3-clause
3,815
[ "RDKit" ]
d689a72185421b19449e67cf1547e46bff436733ca5365001078f903513844aa
# -*- coding: utf-8 -*- """Factories for the OSF models, including an abstract ModularOdmFactory. Example usage: :: >>> from tests.factories import UserFactory >>> user1 = UserFactory() >>> user1.username fred0@example.com >>> user2 = UserFactory() fred1@example.com Factory boy docs: http://factoryboy.readthedocs.org/ """ import datetime import functools from django.utils import timezone from factory import base, Sequence, SubFactory, post_generation, LazyAttribute import mock from mock import patch, Mock from modularodm import Q from modularodm.exceptions import NoResultsFound from framework.auth import User, Auth from framework.auth.utils import impute_names_model, impute_names from framework.guid.model import Guid from framework.mongo import StoredObject from framework.sessions.model import Session from tests.base import fake from tests.base import get_default_metaschema from website.addons import base as addons_base from addons.wiki.models import NodeWikiPage from website.oauth.models import ( ApiOAuth2Application, ApiOAuth2PersonalToken, ExternalAccount, ExternalProvider ) from website.preprints.model import PreprintProvider, PreprintService from website.project.model import ( Comment, DraftRegistration, MetaSchema, Node, NodeLog, Pointer, PrivateLink, Tag, WatchConfig, AlternativeCitation, ensure_schemas, Institution ) from website.project.sanctions import ( Embargo, RegistrationApproval, Retraction, Sanction, ) from website.project.taxonomies import Subject from website.notifications.model import NotificationSubscription, NotificationDigest from website.archiver.model import ArchiveTarget, ArchiveJob from website.identifiers.model import Identifier from website.archiver import ARCHIVER_SUCCESS from website.project.licenses import NodeLicense, NodeLicenseRecord, ensure_licenses from website.util import permissions from website.files.models.osfstorage import OsfStorageFile from website.exceptions import InvalidSanctionApprovalToken ensure_licenses = functools.partial(ensure_licenses, warn=False) # TODO: This is a hack. Check whether FactoryBoy can do this better def save_kwargs(**kwargs): for value in kwargs.itervalues(): if isinstance(value, StoredObject) and not value._is_loaded: value.save() def FakerAttribute(provider, **kwargs): """Attribute that lazily generates a value using the Faker library. Example: :: class UserFactory(ModularOdmFactory): name = FakerAttribute('name') """ fake_gen = getattr(fake, provider) if not fake_gen: raise ValueError('{0!r} is not a valid faker provider.'.format(provider)) return LazyAttribute(lambda x: fake_gen(**kwargs)) class ModularOdmFactory(base.Factory): """Base factory for modular-odm objects. """ class Meta: abstract = True @classmethod def _build(cls, target_class, *args, **kwargs): """Build an object without saving it.""" save_kwargs(**kwargs) return target_class(*args, **kwargs) @classmethod def _create(cls, target_class, *args, **kwargs): save_kwargs(**kwargs) instance = target_class(*args, **kwargs) instance.save() return instance class PreprintProviderFactory(ModularOdmFactory): class Meta: model = PreprintProvider abstract = False def __init__(self, provider_id, provider_name): super(PreprintProviderFactory, self).__init() self._id = provider_id self.name = provider_name self.save() class UserFactory(ModularOdmFactory): class Meta: model = User abstract = False username = Sequence(lambda n: 'fred{0}@mail.com'.format(n)) # Don't use post generation call to set_password because # It slows down the tests dramatically password = 'password' fullname = Sequence(lambda n: 'Freddie Mercury{0}'.format(n)) is_registered = True is_claimed = True date_confirmed = timezone.now() merged_by = None email_verifications = {} verification_key = None verification_key_v2 = {} @post_generation def set_names(self, create, extracted): parsed = impute_names_model(self.fullname) for key, value in parsed.items(): setattr(self, key, value) if create: self.save() @post_generation def set_emails(self, create, extracted): if self.username not in self.emails: self.emails.append(self.username) self.save() class AuthUserFactory(UserFactory): """A user that automatically has an api key, for quick authentication. Example: :: user = AuthUserFactory() res = self.app.get(url, auth=user.auth) # user is "logged in" """ @post_generation def add_auth(self, create, extracted): self.set_password('password', notify=False) self.save() self.auth = (self.username, 'password') class TagFactory(ModularOdmFactory): class Meta: model = Tag _id = Sequence(lambda n: 'scientastic-{}'.format(n)) class ApiOAuth2ApplicationFactory(ModularOdmFactory): class Meta: model = ApiOAuth2Application owner = SubFactory(UserFactory) name = Sequence(lambda n: 'Example OAuth2 Application #{}'.format(n)) home_url = 'ftp://ftp.ncbi.nlm.nimh.gov/' callback_url = 'http://example.uk' class ApiOAuth2PersonalTokenFactory(ModularOdmFactory): class Meta: model = ApiOAuth2PersonalToken owner = SubFactory(UserFactory) scopes = 'osf.full_write osf.full_read' name = Sequence(lambda n: 'Example OAuth2 Personal Token #{}'.format(n)) class PrivateLinkFactory(ModularOdmFactory): class Meta: model = PrivateLink name = "link" key = Sequence(lambda n: 'foobar{}'.format(n)) anonymous = False creator = SubFactory(AuthUserFactory) class AbstractNodeFactory(ModularOdmFactory): class Meta: model = Node title = 'The meaning of life' description = 'The meaning of life is 42.' creator = SubFactory(AuthUserFactory) class ProjectFactory(AbstractNodeFactory): type = 'osf.node' category = 'project' class CollectionFactory(ProjectFactory): is_collection = True class BookmarkCollectionFactory(CollectionFactory): is_bookmark_collection = True class NodeFactory(AbstractNodeFactory): category = 'hypothesis' parent = SubFactory(ProjectFactory) class PreprintProviderFactory(ModularOdmFactory): name = 'OSFArxiv' description = 'Preprint service for the OSF' class Meta: model = PreprintProvider @classmethod def _create(cls, target_class, name=None, description=None, *args, **kwargs): provider = target_class(*args, **kwargs) provider.name = name provider.description = description provider.save() return provider class PreprintFactory(ModularOdmFactory): creator = None category = 'project' doi = Sequence(lambda n: '10.12345/0{}'.format(n)) provider = SubFactory(PreprintProviderFactory) external_url = 'http://hello.org' class Meta: model = PreprintService @classmethod def _create(cls, target_class, project=None, is_public=True, filename='preprint_file.txt', provider=None, doi=None, external_url=None, is_published=True, subjects=None, finish=True, *args, **kwargs): save_kwargs(**kwargs) user = None if project: user = project.creator user = kwargs.get('user') or kwargs.get('creator') or user or UserFactory() kwargs['creator'] = user # Original project to be converted to a preprint project = project or AbstractNodeFactory(*args, **kwargs) if user._id not in project.permissions: project.add_contributor( contributor=user, permissions=permissions.CREATOR_PERMISSIONS, log=False, save=False ) project.save() project.reload() file = OsfStorageFile.create( is_file=True, node=project, path='/{}'.format(filename), name=filename, materialized_path='/{}'.format(filename)) file.save() preprint = target_class(node=project, provider=provider) auth = Auth(project.creator) if finish: preprint.set_primary_file(file, auth=auth) subjects = subjects or [[SubjectFactory()._id]] preprint.set_subjects(subjects, auth=auth) preprint.set_published(is_published, auth=auth) if not preprint.is_published: project._has_abandoned_preprint = True project.preprint_article_doi = doi project.save() preprint.save() return preprint class SubjectFactory(ModularOdmFactory): text = Sequence(lambda n: 'Example Subject #{}'.format(n)) class Meta: model = Subject @classmethod def _create(cls, target_class, text=None, parents=[], *args, **kwargs): try: subject = Subject.find_one(Q('text', 'eq', text)) except NoResultsFound: subject = target_class(*args, **kwargs) subject.text = text subject.save() subject.parents.add(*parents) subject.save() return subject class RegistrationFactory(AbstractNodeFactory): creator = None # Default project is created if not provided category = 'project' @classmethod def _build(cls, target_class, *args, **kwargs): raise Exception('Cannot build registration without saving.') @classmethod def _create(cls, target_class, project=None, is_public=False, schema=None, data=None, archive=False, embargo=None, registration_approval=None, retraction=None, *args, **kwargs): save_kwargs(**kwargs) user = None if project: user = project.creator user = kwargs.get('user') or kwargs.get('creator') or user or UserFactory() kwargs['creator'] = user # Original project to be registered project = project or target_class(*args, **kwargs) if user._id not in project.permissions: project.add_contributor( contributor=user, permissions=permissions.CREATOR_PERMISSIONS, log=False, save=False ) project.save() # Default registration parameters schema = schema or get_default_metaschema() data = data or {'some': 'data'} auth = Auth(user=user) register = lambda: project.register_node( schema=schema, auth=auth, data=data ) def add_approval_step(reg): if embargo: reg.embargo = embargo elif registration_approval: reg.registration_approval = registration_approval elif retraction: reg.retraction = retraction else: reg.require_approval(reg.creator) reg.save() reg.sanction.add_authorizer(reg.creator, reg) reg.sanction.save() with patch('framework.celery_tasks.handlers.enqueue_task'): reg = register() add_approval_step(reg) if not archive: with patch.object(reg.archive_job, 'archive_tree_finished', Mock(return_value=True)): reg.archive_job.status = ARCHIVER_SUCCESS reg.archive_job.save() reg.sanction.state = Sanction.APPROVED reg.sanction.save() ArchiveJob( src_node=project, dst_node=reg, initiator=user, ) if is_public: reg.is_public = True reg.save() return reg class WithdrawnRegistrationFactory(AbstractNodeFactory): @classmethod def _create(cls, *args, **kwargs): registration = kwargs.pop('registration', None) registration.is_public = True user = kwargs.pop('user', registration.creator) registration.retract_registration(user) withdrawal = registration.retraction for token in withdrawal.approval_state.values(): try: withdrawal.approve_retraction(user, token['approval_token']) withdrawal.save() return withdrawal except InvalidSanctionApprovalToken: continue class ForkFactory(ModularOdmFactory): class Meta: model = Node @classmethod def _create(cls, *args, **kwargs): project = kwargs.pop('project', None) user = kwargs.pop('user', project.creator) title = kwargs.pop('title', None) fork = project.fork_node(auth=Auth(user), title=title) fork.save() return fork class PointerFactory(ModularOdmFactory): class Meta: model = Pointer node = SubFactory(NodeFactory) class NodeLogFactory(ModularOdmFactory): class Meta: model = NodeLog action = 'file_added' user = SubFactory(UserFactory) class WatchConfigFactory(ModularOdmFactory): class Meta: model = WatchConfig node = SubFactory(NodeFactory) class SanctionFactory(ModularOdmFactory): class Meta: abstract = True @classmethod def _create(cls, target_class, initiated_by=None, approve=False, *args, **kwargs): user = kwargs.get('user') or UserFactory() kwargs['initiated_by'] = initiated_by or user sanction = ModularOdmFactory._create(target_class, *args, **kwargs) reg_kwargs = { 'creator': user, 'user': user, sanction.SHORT_NAME: sanction } RegistrationFactory(**reg_kwargs) if not approve: sanction.state = Sanction.UNAPPROVED sanction.save() return sanction class RetractionFactory(SanctionFactory): class Meta: model = Retraction user = SubFactory(UserFactory) class EmbargoFactory(SanctionFactory): class Meta: model = Embargo user = SubFactory(UserFactory) class RegistrationApprovalFactory(SanctionFactory): class Meta: model = RegistrationApproval user = SubFactory(UserFactory) class EmbargoTerminationApprovalFactory(ModularOdmFactory): FACTORY_STRATEGY = base.CREATE_STRATEGY @classmethod def create(cls, registration=None, user=None, embargo=None, *args, **kwargs): if registration: if not user: user = registration.creator else: user = user or AuthUserFactory() if not embargo: embargo = EmbargoFactory(initiated_by=user) registration = embargo._get_registration() else: registration = RegistrationFactory(creator=user, user=user, embargo=embargo) with mock.patch('website.project.sanctions.Sanction.is_approved', mock.Mock(return_value=True)): with mock.patch('website.project.sanctions.TokenApprovableSanction.ask', mock.Mock()): approval = registration.request_embargo_termination(Auth(user)) return approval class NodeWikiFactory(ModularOdmFactory): class Meta: model = NodeWikiPage page_name = 'home' content = 'Some content' version = 1 user = SubFactory(UserFactory) node = SubFactory(NodeFactory) @post_generation def set_node_keys(self, create, extracted): self.node.wiki_pages_current[self.page_name] = self._id if self.node.wiki_pages_versions.get(self.page_name, None): self.node.wiki_pages_versions[self.page_name].append(self._id) else: self.node.wiki_pages_versions[self.page_name] = [self._id] self.node.save() class UnregUserFactory(ModularOdmFactory): """Factory for an unregistered user. Uses User.create_unregistered() to create an instance. """ class Meta: model = User abstract = False email = Sequence(lambda n: "brian{0}@queen.com".format(n)) fullname = Sequence(lambda n: "Brian May{0}".format(n)) @classmethod def _build(cls, target_class, *args, **kwargs): '''Build an object without saving it.''' return target_class.create_unregistered(*args, **kwargs) @classmethod def _create(cls, target_class, *args, **kwargs): instance = target_class.create_unregistered(*args, **kwargs) instance.save() return instance class UnconfirmedUserFactory(ModularOdmFactory): """Factory for a user that has not yet confirmed their primary email address (username). """ class Meta: model = User username = Sequence(lambda n: 'roger{0}@queen.com'.format(n)) fullname = Sequence(lambda n: 'Roger Taylor{0}'.format(n)) password = 'killerqueen' @classmethod def _build(cls, target_class, username, password, fullname): '''Build an object without saving it.''' return target_class.create_unconfirmed( username=username, password=password, fullname=fullname ) @classmethod def _create(cls, target_class, username, password, fullname): instance = target_class.create_unconfirmed( username=username, password=password, fullname=fullname ) instance.save() return instance class AuthFactory(base.Factory): class Meta: model = Auth user = SubFactory(UserFactory) class ProjectWithAddonFactory(ProjectFactory): """Factory for a project that has an addon. The addon will be added to both the Node and the creator records. :: p = ProjectWithAddonFactory(addon='github') p.get_addon('github') # => github node settings object p.creator.get_addon('github') # => github user settings object """ # TODO: Should use mock addon objects @classmethod def _build(cls, target_class, addon='s3', *args, **kwargs): '''Build an object without saving it.''' instance = ProjectFactory._build(target_class, *args, **kwargs) auth = Auth(user=instance.creator) instance.add_addon(addon, auth) instance.creator.add_addon(addon) return instance @classmethod def _create(cls, target_class, addon='s3', *args, **kwargs): instance = ProjectFactory._create(target_class, *args, **kwargs) auth = Auth(user=instance.creator) instance.add_addon(addon, auth) instance.creator.add_addon(addon) instance.save() return instance # Deprecated unregistered user factory, used mainly for testing migration class DeprecatedUnregUser(object): '''A dummy "model" for an unregistered user.''' def __init__(self, nr_name, nr_email): self.nr_name = nr_name self.nr_email = nr_email def to_dict(self): return {"nr_name": self.nr_name, "nr_email": self.nr_email} class DeprecatedUnregUserFactory(base.Factory): """Generates a dictonary represenation of an unregistered user, in the format expected by the OSF. :: >>> from tests.factories import UnregUserFactory >>> UnregUserFactory() {'nr_name': 'Tom Jones0', 'nr_email': 'tom0@example.com'} >>> UnregUserFactory() {'nr_name': 'Tom Jones1', 'nr_email': 'tom1@example.com'} """ class Meta: model = DeprecatedUnregUser nr_name = Sequence(lambda n: "Tom Jones{0}".format(n)) nr_email = Sequence(lambda n: "tom{0}@mail.com".format(n)) @classmethod def _create(cls, target_class, *args, **kwargs): return target_class(*args, **kwargs).to_dict() _build = _create class CommentFactory(ModularOdmFactory): class Meta: model = Comment content = Sequence(lambda n: 'Comment {0}'.format(n)) is_public = True @classmethod def _build(cls, target_class, *args, **kwargs): node = kwargs.pop('node', None) or NodeFactory() user = kwargs.pop('user', None) or node.creator target = kwargs.pop('target', None) or Guid.load(node._id) content = kwargs.pop('content', None) or 'Test comment.' instance = target_class( node=node, user=user, target=target, content=content, *args, **kwargs ) if isinstance(target.referent, target_class): instance.root_target = target.referent.root_target else: instance.root_target = target return instance @classmethod def _create(cls, target_class, *args, **kwargs): node = kwargs.pop('node', None) or NodeFactory() user = kwargs.pop('user', None) or node.creator target = kwargs.pop('target', None) or Guid.load(node._id) content = kwargs.pop('content', None) or 'Test comment.' instance = target_class( node=node, user=user, target=target, content=content, *args, **kwargs ) if isinstance(target.referent, target_class): instance.root_target = target.referent.root_target else: instance.root_target = target instance.save() return instance class InstitutionFactory(ProjectFactory): default_institution_attributes = { '_id': fake.md5, 'name': fake.company, 'logo_name': fake.file_name, 'auth_url': fake.url, 'domains': lambda: [fake.url()], 'email_domains': lambda: [fake.domain_name()], } def _build(cls, target_class, *args, **kwargs): inst = ProjectFactory._build(target_class) for inst_attr, node_attr in Institution.attribute_map.items(): default = cls.default_institution_attributes.get(inst_attr) if callable(default): default = default() setattr(inst, node_attr, kwargs.pop(inst_attr, default)) for key, val in kwargs.items(): setattr(inst, key, val) return Institution(inst) @classmethod def _create(cls, target_class, *args, **kwargs): inst = ProjectFactory._build(target_class) for inst_attr, node_attr in Institution.attribute_map.items(): default = cls.default_institution_attributes.get(inst_attr) if callable(default): default = default() setattr(inst, node_attr, kwargs.pop(inst_attr, default)) for key, val in kwargs.items(): setattr(inst, key, val) inst.save() return Institution(inst) class NotificationSubscriptionFactory(ModularOdmFactory): class Meta: model = NotificationSubscription class NotificationDigestFactory(ModularOdmFactory): class Meta: model = NotificationDigest class ExternalAccountFactory(ModularOdmFactory): class Meta: model = ExternalAccount provider = 'mock2' provider_id = Sequence(lambda n: 'user-{0}'.format(n)) provider_name = 'Fake Provider' display_name = Sequence(lambda n: 'user-{0}'.format(n)) class SessionFactory(ModularOdmFactory): class Meta: model = Session @classmethod def _build(cls, target_class, *args, **kwargs): user = kwargs.pop('user', None) instance = target_class(*args, **kwargs) if user: instance.data['auth_user_username'] = user.username instance.data['auth_user_id'] = user._primary_key instance.data['auth_user_fullname'] = user.fullname return instance @classmethod def _create(cls, target_class, *args, **kwargs): instance = cls._build(target_class, *args, **kwargs) instance.save() return instance class MockOAuth2Provider(ExternalProvider): name = "Mock OAuth 2.0 Provider" short_name = "mock2" client_id = "mock2_client_id" client_secret = "mock2_client_secret" auth_url_base = "https://mock2.com/auth" callback_url = "https://mock2.com/callback" auto_refresh_url = "https://mock2.com/callback" refresh_time = 300 expiry_time = 9001 def handle_callback(self, response): return { 'provider_id': 'mock_provider_id' } class MockAddonNodeSettings(addons_base.AddonNodeSettingsBase): pass class MockAddonUserSettings(addons_base.AddonUserSettingsBase): pass class MockAddonUserSettingsMergeable(addons_base.AddonUserSettingsBase): def merge(self): pass class MockOAuthAddonUserSettings(addons_base.AddonOAuthUserSettingsBase): oauth_provider = MockOAuth2Provider class MockOAuthAddonNodeSettings(addons_base.AddonOAuthNodeSettingsBase): oauth_provider = MockOAuth2Provider folder_id = 'foo' folder_name = 'Foo' folder_path = '/Foo' class ArchiveTargetFactory(ModularOdmFactory): class Meta: model = ArchiveTarget class ArchiveJobFactory(ModularOdmFactory): class Meta: model = ArchiveJob class AlternativeCitationFactory(ModularOdmFactory): class Meta: model = AlternativeCitation @classmethod def _create(cls, target_class, *args, **kwargs): name = kwargs.get('name') text = kwargs.get('text') instance = target_class( name=name, text=text ) instance.save() return instance class DraftRegistrationFactory(ModularOdmFactory): class Meta: model = DraftRegistration @classmethod def _create(cls, *args, **kwargs): branched_from = kwargs.get('branched_from') initiator = kwargs.get('initiator') registration_schema = kwargs.get('registration_schema') registration_metadata = kwargs.get('registration_metadata') if not branched_from: project_params = {} if initiator: project_params['creator'] = initiator branched_from = ProjectFactory(**project_params) initiator = branched_from.creator try: registration_schema = registration_schema or MetaSchema.find()[0] except IndexError: ensure_schemas() registration_metadata = registration_metadata or {} draft = DraftRegistration.create_from_node( branched_from, user=initiator, schema=registration_schema, data=registration_metadata, ) return draft class NodeLicenseRecordFactory(ModularOdmFactory): class Meta: model = NodeLicenseRecord @classmethod def _create(cls, *args, **kwargs): try: NodeLicense.find_one( Q('name', 'eq', 'No license') ) except NoResultsFound: ensure_licenses() kwargs['node_license'] = kwargs.get( 'node_license', NodeLicense.find_one( Q('name', 'eq', 'No license') ) ) return super(NodeLicenseRecordFactory, cls)._create(*args, **kwargs) class IdentifierFactory(ModularOdmFactory): class Meta: model = Identifier referent = SubFactory(RegistrationFactory) value = Sequence(lambda n: 'carp:/2460{}'.format(n)) @classmethod def _create(cls, *args, **kwargs): kwargs['category'] = kwargs.get('category', 'carpid') return super(IdentifierFactory, cls)._create(*args, **kwargs) def render_generations_from_parent(parent, creator, num_generations): current_gen = parent for generation in xrange(0, num_generations): next_gen = NodeFactory( parent=current_gen, creator=creator, title=fake.sentence(), description=fake.paragraph() ) current_gen = next_gen return current_gen def render_generations_from_node_structure_list(parent, creator, node_structure_list): new_parent = None for node_number in node_structure_list: if isinstance(node_number, list): render_generations_from_node_structure_list(new_parent or parent, creator, node_number) else: new_parent = render_generations_from_parent(parent, creator, node_number) return new_parent def create_fake_user(): email = fake.email() name = fake.name() parsed = impute_names(name) user = UserFactory( username=email, fullname=name, is_registered=True, is_claimed=True, date_registered=fake.date_time(), emails=[email], **parsed ) user.set_password('faker123') user.save() return user def create_fake_project(creator, n_users, privacy, n_components, name, n_tags, presentation_name, is_registration): auth = Auth(user=creator) project_title = name if name else fake.sentence() if not is_registration: project = ProjectFactory( title=project_title, description=fake.paragraph(), creator=creator ) else: project = RegistrationFactory( title=project_title, description=fake.paragraph(), creator=creator ) project.set_privacy(privacy) for _ in range(n_users): contrib = create_fake_user() project.add_contributor(contrib, auth=auth) if isinstance(n_components, int): for _ in range(n_components): NodeFactory( project=project, title=fake.sentence(), description=fake.paragraph(), creator=creator ) elif isinstance(n_components, list): render_generations_from_node_structure_list(project, creator, n_components) for _ in range(n_tags): project.add_tag(fake.word(), auth=auth) if presentation_name is not None: project.add_tag(presentation_name, auth=auth) project.add_tag('poster', auth=auth) project.save() return project
monikagrabowska/osf.io
tests/factories.py
Python
apache-2.0
30,109
[ "Brian" ]
a51379265e771aa1a71e7dae49a933ad31e6cce5ea42f3166469a2ab515af60c
#!/usr/bin/env python #pylint: disable=missing-docstring ################################################################# # DO NOT MODIFY THIS HEADER # # MOOSE - Multiphysics Object Oriented Simulation Environment # # # # (c) 2010 Battelle Energy Alliance, LLC # # ALL RIGHTS RESERVED # # # # Prepared by Battelle Energy Alliance, LLC # # Under Contract No. DE-AC07-05ID14517 # # With the U. S. Department of Energy # # # # See COPYRIGHT for full restrictions # ################################################################# import chigger moose = chigger.annotations.ImageAnnotation(filename='../../../chigger/logos/moose.png', opacity=0.5, scale=0.5, position=[0.5, 0.75]) window = chigger.RenderWindow(moose, size=[400,400], test=True) window.write('image_annotation.png') window.start()
Chuban/moose
python/chigger/tests/annotations/image_annotation.py
Python
lgpl-2.1
1,225
[ "MOOSE" ]
e2a80881446f1c46eacf45c95ed982ed6a65451a9a88ae317719ec1308bbafbe
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Primitive Neural Net (NN) Operations. ## Notes on padding Several neural network operations, such as `tf.nn.conv2d` and `tf.nn.max_pool2d`, take a `padding` parameter, which controls how the input is padded before running the operation. The input is padded by inserting values (typically zeros) before and after the tensor in each spatial dimension. The `padding` parameter can either be the string `'VALID'`, which means use no padding, or `'SAME'` which adds padding according to a formula which is described below. Certain ops also allow the amount of padding per dimension to be explicitly specified by passing a list to `padding`. In the case of convolutions, the input is padded with zeros. In case of pools, the padded input values are ignored. For example, in a max pool, the sliding window ignores padded values, which is equivalent to the padded values being `-infinity`. ### `'VALID'` padding Passing `padding='VALID'` to an op causes no padding to be used. This causes the output size to typically be smaller than the input size, even when the stride is one. In the 2D case, the output size is computed as: ```python out_height = ceil((in_height - filter_height + 1) / stride_height) out_width = ceil((in_width - filter_width + 1) / stride_width) ``` The 1D and 3D cases are similar. Note `filter_height` and `filter_width` refer to the filter size after dilations (if any) for convolutions, and refer to the window size for pools. ### `'SAME'` padding With `'SAME'` padding, padding is applied to each spatial dimension. When the strides are 1, the input is padded such that the output size is the same as the input size. In the 2D case, the output size is computed as: ```python out_height = ceil(in_height / stride_height) out_width = ceil(in_width / stride_width) ``` The amount of padding used is the smallest amount that results in the output size. The formula for the total amount of padding per dimension is: ```python if (in_height % strides[1] == 0): pad_along_height = max(filter_height - stride_height, 0) else: pad_along_height = max(filter_height - (in_height % stride_height), 0) if (in_width % strides[2] == 0): pad_along_width = max(filter_width - stride_width, 0) else: pad_along_width = max(filter_width - (in_width % stride_width), 0) ``` Finally, the padding on the top, bottom, left and right are: ```python pad_top = pad_along_height // 2 pad_bottom = pad_along_height - pad_top pad_left = pad_along_width // 2 pad_right = pad_along_width - pad_left ``` Note that the division by 2 means that there might be cases when the padding on both sides (top vs bottom, right vs left) are off by one. In this case, the bottom and right sides always get the one additional padded pixel. For example, when pad_along_height is 5, we pad 2 pixels at the top and 3 pixels at the bottom. Note that this is different from existing libraries such as PyTorch and Caffe, which explicitly specify the number of padded pixels and always pad the same number of pixels on both sides. Here is an example of `'SAME'` padding: >>> in_height = 5 >>> filter_height = 3 >>> stride_height = 2 >>> >>> in_width = 2 >>> filter_width = 2 >>> stride_width = 1 >>> >>> inp = tf.ones((2, in_height, in_width, 2)) >>> filter = tf.ones((filter_height, filter_width, 2, 2)) >>> strides = [stride_height, stride_width] >>> output = tf.nn.conv2d(inp, filter, strides, padding='SAME') >>> output.shape[1] # output_height: ceil(5 / 2) 3 >>> output.shape[2] # output_width: ceil(2 / 1) 2 ### Explicit padding Certain ops, like `tf.nn.conv2d`, also allow a list of explicit padding amounts to be passed to the `padding` parameter. This list is in the same format as what is passed to `tf.pad`, except the padding must be a nested list, not a tensor. For example, in the 2D case, the list is in the format `[[0, 0], [pad_top, pad_bottom], [pad_left, pad_right], [0, 0]]` when `data_format` is its default value of `'NHWC'`. The two `[0, 0]` pairs indicate the batch and channel dimensions have no padding, which is required, as only spatial dimensions can have padding. For example: >>> inp = tf.ones((1, 3, 3, 1)) >>> filter = tf.ones((2, 2, 1, 1)) >>> strides = [1, 1] >>> padding = [[0, 0], [1, 2], [0, 1], [0, 0]] >>> output = tf.nn.conv2d(inp, filter, strides, padding=padding) >>> tuple(output.shape) (1, 5, 3, 1) >>> # Equivalently, tf.pad can be used, since convolutions pad with zeros. >>> inp = tf.pad(inp, padding) >>> # 'VALID' means to use no padding in conv2d (we already padded inp) >>> output2 = tf.nn.conv2d(inp, filter, strides, padding='VALID') >>> tf.debugging.assert_equal(output, output2) """ import functools import numbers import numpy as np from tensorflow.python.eager import context from tensorflow.python.framework import config from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors_impl from tensorflow.python.framework import graph_util from tensorflow.python.framework import ops from tensorflow.python.framework import random_seed from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import tensor_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import gen_math_ops from tensorflow.python.ops import gen_nn_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops import stateless_random_ops from tensorflow.python.ops import variables as variables_lib # go/tf-wildcard-import # pylint: disable=wildcard-import from tensorflow.python.ops.gen_nn_ops import * # pylint: enable=wildcard-import from tensorflow.python.platform import device_context from tensorflow.python.util import deprecation from tensorflow.python.util import dispatch from tensorflow.python.util.compat import collections_abc from tensorflow.python.util.deprecation import deprecated_args from tensorflow.python.util.deprecation import deprecated_argument_lookup from tensorflow.python.util.tf_export import tf_export # Aliases for some automatically-generated names. local_response_normalization = gen_nn_ops.lrn # pylint: disable=protected-access # pylint: disable=g-classes-have-attributes # Acceptable channels last formats (robust to H, W, D order). _CHANNELS_LAST_FORMATS = frozenset({ "NWC", "NHC", "NHWC", "NWHC", "NDHWC", "NDWHC", "NHDWC", "NHWDC", "NWDHC", "NWHDC" }) def _get_sequence(value, n, channel_index, name): """Formats a value input for gen_nn_ops.""" # Performance is fast-pathed for common cases: # `None`, `list`, `tuple` and `int`. if value is None: return [1] * (n + 2) # Always convert `value` to a `list`. if isinstance(value, list): pass elif isinstance(value, tuple): value = list(value) elif isinstance(value, int): value = [value] elif not isinstance(value, collections_abc.Sized): value = [value] else: value = list(value) # Try casting to a list. len_value = len(value) # Fully specified, including batch and channel dims. if len_value == n + 2: return value # Apply value to spatial dims only. if len_value == 1: value = value * n # Broadcast to spatial dimensions. elif len_value != n: raise ValueError(f"{name} should be of length 1, {n} or {n + 2}. " f"Received: {name}={value} of length {len_value}") # Add batch and channel dims (always 1). if channel_index == 1: return [1, 1] + value else: return [1] + value + [1] def _non_atrous_convolution( input, # pylint: disable=redefined-builtin filter, # pylint: disable=redefined-builtin padding, data_format=None, # pylint: disable=redefined-builtin strides=None, name=None): """Computes sums of N-D convolutions (actually cross correlation). It is required that 1 <= N <= 3. This is used to implement the more generic `convolution` function, which extends the interface of this function with a `dilation_rate` parameter. Args: input: Rank N+2 tensor of type T of shape `[batch_size] + input_spatial_shape + [in_channels]` if `data_format` does not start with `"NC"`, or `[batch_size, in_channels] + input_spatial_shape` if `data_format` starts with `"NC"`. filter: Rank N+2 tensor of type T of shape `filter_spatial_shape + [in_channels, out_channels]`. Rank of either `input` or `filter` must be known. padding: Padding method to use, must be either "VALID" or "SAME". data_format: A string or None. Specifies whether the channel dimension of the `input` and output is the last dimension (default, or if `data_format` does not start with "NC"), or the second dimension (if `data_format` starts with "NC"). For N=1, the valid values are "NWC" (default) and "NCW". For N=2, the valid values are "NHWC" (default) and "NCHW". For N=3, the valid values are "NDHWC" (default) and "NCDHW". strides: Sequence of N positive integers, defaults to `[1] * N`. name: Name prefix to use. Returns: Rank N+2 tensor of type T of shape `[batch_size] + output_spatial_shape + [out_channels]`, where if padding == "SAME": output_spatial_shape = input_spatial_shape if padding == "VALID": output_spatial_shape = input_spatial_shape - filter_spatial_shape + 1. Raises: ValueError: if ranks are incompatible. """ with ops.name_scope(name, "non_atrous_convolution", [input, filter]) as scope: input = ops.convert_to_tensor(input, name="input") # pylint: disable=redefined-builtin input_shape = input.shape filter = ops.convert_to_tensor(filter, name="filter") # pylint: disable=redefined-builtin filter_shape = filter.shape op = _NonAtrousConvolution( input_shape, filter_shape=filter_shape, padding=padding, data_format=data_format, strides=strides, name=scope) return op(input, filter) class _NonAtrousConvolution: """Helper class for _non_atrous_convolution. Note that this class assumes that shapes of input and filter passed to `__call__` are compatible with `input_shape` and filter_shape passed to the constructor. Args: input_shape: static input shape, i.e. input.shape. filter_shape: static filter shape, i.e. filter.shape. padding: see _non_atrous_convolution. data_format: see _non_atrous_convolution. strides: see _non_atrous_convolution. name: see _non_atrous_convolution. num_batch_dims: (Optional.) The number of batch dimensions in the input; if not provided, the default of `1` is used. """ def __init__( self, input_shape, filter_shape, padding, data_format=None, strides=None, name=None, num_batch_dims=1): # filter shape is always rank num_spatial_dims + 2 # and num_spatial_dims == input_shape.ndims - num_batch_dims - 1 if input_shape.ndims is not None: filter_shape = filter_shape.with_rank( input_shape.ndims - num_batch_dims + 1) self.padding = padding self.name = name # input shape is == num_spatial_dims + num_batch_dims + 1 # and filter_shape is always rank num_spatial_dims + 2 if filter_shape.ndims is not None: input_shape = input_shape.with_rank( filter_shape.ndims + num_batch_dims - 1) if input_shape.ndims is None: raise ValueError( "Rank of convolution must be known. " f"Received: input_shape={input_shape} of rank {input_shape.rank}") if input_shape.ndims < 3 or input_shape.ndims - num_batch_dims + 1 > 5: raise ValueError( "`input_shape.rank - num_batch_dims + 1` must be at least 3 and at " f"most 5. Received: input_shape.rank={input_shape.rank} and " f"num_batch_dims={num_batch_dims}") conv_dims = input_shape.ndims - num_batch_dims - 1 if strides is None: strides = [1] * conv_dims elif len(strides) != conv_dims: raise ValueError( f"`len(strides)` should be {conv_dims}. " f"Received: strides={strides} of length {len(strides)}") if conv_dims == 1: # conv1d uses the 2-d data format names if data_format is None: data_format = "NWC" elif data_format not in {"NCW", "NWC", "NCHW", "NHWC"}: raise ValueError("`data_format` must be 'NWC' or 'NCW'. " f"Received: data_format={data_format}") self.strides = strides[0] self.data_format = data_format self.conv_op = self._conv1d elif conv_dims == 2: if data_format is None or data_format == "NHWC": data_format = "NHWC" strides = [1] + list(strides) + [1] elif data_format == "NCHW": strides = [1, 1] + list(strides) else: raise ValueError("`data_format` must be 'NHWC' or 'NCHW'. " f"Received: data_format={data_format}") self.strides = strides self.data_format = data_format self.conv_op = conv2d elif conv_dims == 3: if data_format is None or data_format == "NDHWC": strides = [1] + list(strides) + [1] elif data_format == "NCDHW": strides = [1, 1] + list(strides) else: raise ValueError("`data_format` must be 'NDHWC' or 'NCDHW'. " f"Received: data_format={data_format}") self.strides = strides self.data_format = data_format self.conv_op = _conv3d_expanded_batch # Note that we need this adapter since argument names for conv1d don't match # those for gen_nn_ops.conv2d and gen_nn_ops.conv3d. # pylint: disable=redefined-builtin def _conv1d(self, input, filter, strides, padding, data_format, name): return conv1d( value=input, filters=filter, stride=strides, padding=padding, data_format=data_format, name=name) # pylint: enable=redefined-builtin def __call__(self, inp, filter): # pylint: disable=redefined-builtin return self.conv_op( input=inp, filter=filter, strides=self.strides, padding=self.padding, data_format=self.data_format, name=self.name) def squeeze_batch_dims(inp, op, inner_rank, name=None): """Returns `unsqueeze_batch(op(squeeze_batch(inp)))`. Where `squeeze_batch` reshapes `inp` to shape `[prod(inp.shape[:-inner_rank])] + inp.shape[-inner_rank:]` and `unsqueeze_batch` does the reverse reshape but on the output. Args: inp: A tensor with dims `batch_shape + inner_shape` where `inner_shape` is length `inner_rank`. op: A callable that takes a single input tensor and returns a single. output tensor. inner_rank: A python integer. name: A string. Returns: `unsqueeze_batch_op(squeeze_batch(inp))`. """ with ops.name_scope(name, "squeeze_batch_dims", [inp]): inp = ops.convert_to_tensor(inp, name="input") shape = inp.shape inner_shape = shape[-inner_rank:] if not inner_shape.is_fully_defined(): inner_shape = array_ops.shape(inp)[-inner_rank:] batch_shape = shape[:-inner_rank] if not batch_shape.is_fully_defined(): batch_shape = array_ops.shape(inp)[:-inner_rank] if isinstance(inner_shape, tensor_shape.TensorShape): inp_reshaped = array_ops.reshape(inp, [-1] + inner_shape.as_list()) else: inp_reshaped = array_ops.reshape( inp, array_ops.concat(([-1], inner_shape), axis=-1)) out_reshaped = op(inp_reshaped) out_inner_shape = out_reshaped.shape[-inner_rank:] if not out_inner_shape.is_fully_defined(): out_inner_shape = array_ops.shape(out_reshaped)[-inner_rank:] out = array_ops.reshape( out_reshaped, array_ops.concat((batch_shape, out_inner_shape), axis=-1)) out.set_shape(inp.shape[:-inner_rank] + out.shape[-inner_rank:]) return out @tf_export("nn.dilation2d", v1=[]) @dispatch.add_dispatch_support def dilation2d_v2( input, # pylint: disable=redefined-builtin filters, # pylint: disable=redefined-builtin strides, padding, data_format, dilations, name=None): """Computes the grayscale dilation of 4-D `input` and 3-D `filters` tensors. The `input` tensor has shape `[batch, in_height, in_width, depth]` and the `filters` tensor has shape `[filter_height, filter_width, depth]`, i.e., each input channel is processed independently of the others with its own structuring function. The `output` tensor has shape `[batch, out_height, out_width, depth]`. The spatial dimensions of the output tensor depend on the `padding` algorithm. We currently only support the default "NHWC" `data_format`. In detail, the grayscale morphological 2-D dilation is the max-sum correlation (for consistency with `conv2d`, we use unmirrored filters): output[b, y, x, c] = max_{dy, dx} input[b, strides[1] * y + rates[1] * dy, strides[2] * x + rates[2] * dx, c] + filters[dy, dx, c] Max-pooling is a special case when the filter has size equal to the pooling kernel size and contains all zeros. Note on duality: The dilation of `input` by the `filters` is equal to the negation of the erosion of `-input` by the reflected `filters`. Args: input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`. 4-D with shape `[batch, in_height, in_width, depth]`. filters: A `Tensor`. Must have the same type as `input`. 3-D with shape `[filter_height, filter_width, depth]`. strides: A list of `ints` that has length `>= 4`. The stride of the sliding window for each dimension of the input tensor. Must be: `[1, stride_height, stride_width, 1]`. padding: A `string` from: `"SAME", "VALID"`. The type of padding algorithm to use. See [here](https://www.tensorflow.org/api_docs/python/tf/nn#notes_on_padding_2) for more information. data_format: A `string`, only `"NHWC"` is currently supported. dilations: A list of `ints` that has length `>= 4`. The input stride for atrous morphological dilation. Must be: `[1, rate_height, rate_width, 1]`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ if data_format != "NHWC": raise ValueError("`data_format` values other than 'NHWC' are not " f"supported. Received: data_format={data_format}") return gen_nn_ops.dilation2d(input=input, filter=filters, strides=strides, rates=dilations, padding=padding, name=name) @tf_export(v1=["nn.dilation2d"]) @dispatch.add_dispatch_support def dilation2d_v1( # pylint: disable=missing-docstring input, # pylint: disable=redefined-builtin filter=None, # pylint: disable=redefined-builtin strides=None, rates=None, padding=None, name=None, filters=None, dilations=None): filter = deprecated_argument_lookup("filters", filters, "filter", filter) rates = deprecated_argument_lookup("dilations", dilations, "rates", rates) return gen_nn_ops.dilation2d(input, filter, strides, rates, padding, name) dilation2d_v1.__doc__ = gen_nn_ops.dilation2d.__doc__ @tf_export("nn.with_space_to_batch") @dispatch.add_dispatch_support def with_space_to_batch( input, # pylint: disable=redefined-builtin dilation_rate, padding, op, filter_shape=None, spatial_dims=None, data_format=None): """Performs `op` on the space-to-batch representation of `input`. This has the effect of transforming sliding window operations into the corresponding "atrous" operation in which the input is sampled at the specified `dilation_rate`. In the special case that `dilation_rate` is uniformly 1, this simply returns: op(input, num_spatial_dims, padding) Otherwise, it returns: batch_to_space_nd( op(space_to_batch_nd(input, adjusted_dilation_rate, adjusted_paddings), num_spatial_dims, "VALID") adjusted_dilation_rate, adjusted_crops), where: adjusted_dilation_rate is an int64 tensor of shape [max(spatial_dims)], adjusted_{paddings,crops} are int64 tensors of shape [max(spatial_dims), 2] defined as follows: We first define two int64 tensors `paddings` and `crops` of shape `[num_spatial_dims, 2]` based on the value of `padding` and the spatial dimensions of the `input`: If `padding = "VALID"`, then: paddings, crops = required_space_to_batch_paddings( input_shape[spatial_dims], dilation_rate) If `padding = "SAME"`, then: dilated_filter_shape = filter_shape + (filter_shape - 1) * (dilation_rate - 1) paddings, crops = required_space_to_batch_paddings( input_shape[spatial_dims], dilation_rate, [(dilated_filter_shape - 1) // 2, dilated_filter_shape - 1 - (dilated_filter_shape - 1) // 2]) Because `space_to_batch_nd` and `batch_to_space_nd` assume that the spatial dimensions are contiguous starting at the second dimension, but the specified `spatial_dims` may not be, we must adjust `dilation_rate`, `paddings` and `crops` in order to be usable with these operations. For a given dimension, if the block size is 1, and both the starting and ending padding and crop amounts are 0, then space_to_batch_nd effectively leaves that dimension alone, which is what is needed for dimensions not part of `spatial_dims`. Furthermore, `space_to_batch_nd` and `batch_to_space_nd` handle this case efficiently for any number of leading and trailing dimensions. For 0 <= i < len(spatial_dims), we assign: adjusted_dilation_rate[spatial_dims[i] - 1] = dilation_rate[i] adjusted_paddings[spatial_dims[i] - 1, :] = paddings[i, :] adjusted_crops[spatial_dims[i] - 1, :] = crops[i, :] All unassigned values of `adjusted_dilation_rate` default to 1, while all unassigned values of `adjusted_paddings` and `adjusted_crops` default to 0. Note in the case that `dilation_rate` is not uniformly 1, specifying "VALID" padding is equivalent to specifying `padding = "SAME"` with a filter_shape of `[1]*N`. Advanced usage. Note the following optimization: A sequence of `with_space_to_batch` operations with identical (not uniformly 1) `dilation_rate` parameters and "VALID" padding net = with_space_to_batch(net, dilation_rate, "VALID", op_1) ... net = with_space_to_batch(net, dilation_rate, "VALID", op_k) can be combined into a single `with_space_to_batch` operation as follows: def combined_op(converted_input, num_spatial_dims, _): result = op_1(converted_input, num_spatial_dims, "VALID") ... result = op_k(result, num_spatial_dims, "VALID") net = with_space_to_batch(net, dilation_rate, "VALID", combined_op) This eliminates the overhead of `k-1` calls to `space_to_batch_nd` and `batch_to_space_nd`. Similarly, a sequence of `with_space_to_batch` operations with identical (not uniformly 1) `dilation_rate` parameters, "SAME" padding, and odd filter dimensions net = with_space_to_batch(net, dilation_rate, "SAME", op_1, filter_shape_1) ... net = with_space_to_batch(net, dilation_rate, "SAME", op_k, filter_shape_k) can be combined into a single `with_space_to_batch` operation as follows: def combined_op(converted_input, num_spatial_dims, _): result = op_1(converted_input, num_spatial_dims, "SAME") ... result = op_k(result, num_spatial_dims, "SAME") net = with_space_to_batch(net, dilation_rate, "VALID", combined_op) Args: input: Tensor of rank > max(spatial_dims). dilation_rate: int32 Tensor of *known* shape [num_spatial_dims]. padding: str constant equal to "VALID" or "SAME" op: Function that maps (input, num_spatial_dims, padding) -> output filter_shape: If padding = "SAME", specifies the shape of the convolution kernel/pooling window as an integer Tensor of shape [>=num_spatial_dims]. If padding = "VALID", filter_shape is ignored and need not be specified. spatial_dims: Monotonically increasing sequence of `num_spatial_dims` integers (which are >= 1) specifying the spatial dimensions of `input` and output. Defaults to: `range(1, num_spatial_dims+1)`. data_format: A string or None. Specifies whether the channel dimension of the `input` and output is the last dimension (default, or if `data_format` does not start with "NC"), or the second dimension (if `data_format` starts with "NC"). For N=1, the valid values are "NWC" (default) and "NCW". For N=2, the valid values are "NHWC" (default) and "NCHW". For N=3, the valid values are "NDHWC" (default) and "NCDHW". Returns: The output Tensor as described above, dimensions will vary based on the op provided. Raises: ValueError: if `padding` is invalid or the arguments are incompatible. ValueError: if `spatial_dims` are invalid. """ input = ops.convert_to_tensor(input, name="input") # pylint: disable=redefined-builtin input_shape = input.shape def build_op(num_spatial_dims, padding): return lambda inp, _: op(inp, num_spatial_dims, padding) new_op = _WithSpaceToBatch( input_shape, dilation_rate, padding, build_op, filter_shape=filter_shape, spatial_dims=spatial_dims, data_format=data_format) return new_op(input, None) class _WithSpaceToBatch: """Helper class for with_space_to_batch. Note that this class assumes that shapes of input and filter passed to `__call__` are compatible with `input_shape`, `filter_shape`, and `spatial_dims` passed to the constructor. Arguments input_shape: static shape of input. i.e. input.shape. dilation_rate: see `with_space_to_batch`. padding: see `with_space_to_batch`. build_op: Function that maps (num_spatial_dims, paddings) -> (function that maps (input, filter) -> output). filter_shape: see `with_space_to_batch`. spatial_dims: `see with_space_to_batch`. data_format: see `with_space_to_batch`. num_batch_dims: (Optional). Number of batch dims in `input_shape`. """ def __init__(self, input_shape, dilation_rate, padding, build_op, filter_shape=None, spatial_dims=None, data_format=None, num_batch_dims=1): """Helper class for _with_space_to_batch.""" dilation_rate = ops.convert_to_tensor( dilation_rate, dtypes.int32, name="dilation_rate") if dilation_rate.shape.ndims not in (None, 1): raise ValueError( "`dilation_rate.shape.rank` must be 1. Received: " f"dilation_rate={dilation_rate} of rank {dilation_rate.shape.rank}") if not dilation_rate.shape.is_fully_defined(): raise ValueError( "`dilation_rate.shape` must be fully defined. Received: " f"dilation_rate={dilation_rate} with shape " f"{dilation_rate.shape}") num_spatial_dims = dilation_rate.shape.dims[0].value if data_format is not None and data_format.startswith("NC"): starting_spatial_dim = num_batch_dims + 1 else: starting_spatial_dim = num_batch_dims if spatial_dims is None: spatial_dims = range(starting_spatial_dim, num_spatial_dims + starting_spatial_dim) orig_spatial_dims = list(spatial_dims) spatial_dims = sorted(set(int(x) for x in orig_spatial_dims)) if spatial_dims != orig_spatial_dims or any(x < 1 for x in spatial_dims): raise ValueError( "`spatial_dims` must be a monotonically increasing sequence of " f"positive integers. Received: spatial_dims={orig_spatial_dims}") if data_format is not None and data_format.startswith("NC"): expected_input_rank = spatial_dims[-1] else: expected_input_rank = spatial_dims[-1] + 1 try: input_shape.with_rank_at_least(expected_input_rank) except ValueError: raise ValueError( f"`input.shape.rank` must be at least {expected_input_rank}. " f"Received: input.shape={input_shape} with rank {input_shape.rank}") const_rate = tensor_util.constant_value(dilation_rate) rate_or_const_rate = dilation_rate if const_rate is not None: rate_or_const_rate = const_rate if np.any(const_rate < 1): raise ValueError( "`dilation_rate` must be positive. " f"Received: dilation_rate={const_rate}") if np.all(const_rate == 1): self.call = build_op(num_spatial_dims, padding) return padding, explicit_paddings = convert_padding(padding) # We have two padding contributions. The first is used for converting "SAME" # to "VALID". The second is required so that the height and width of the # zero-padded value tensor are multiples of rate. # Padding required to reduce to "VALID" convolution if padding == "SAME": if filter_shape is None: raise ValueError( "`filter_shape` must be specified for `padding='SAME'`. " f"Received: filter_shape={filter_shape} and padding={padding}") filter_shape = ops.convert_to_tensor(filter_shape, name="filter_shape") const_filter_shape = tensor_util.constant_value(filter_shape) if const_filter_shape is not None: filter_shape = const_filter_shape self.base_paddings = _with_space_to_batch_base_paddings( const_filter_shape, num_spatial_dims, rate_or_const_rate) else: self.num_spatial_dims = num_spatial_dims self.rate_or_const_rate = rate_or_const_rate self.base_paddings = None elif padding == "VALID": self.base_paddings = np.zeros([num_spatial_dims, 2], np.int32) elif padding == "EXPLICIT": base_paddings = (np.array(explicit_paddings) .reshape([num_spatial_dims + 2, 2])) # Remove batch and channel dimensions if data_format is not None and data_format.startswith("NC"): self.base_paddings = base_paddings[2:] else: self.base_paddings = base_paddings[1:-1] else: raise ValueError("`padding` must be one of 'SAME' or 'VALID'. " f"Received: padding={padding}") self.input_shape = input_shape self.spatial_dims = spatial_dims self.dilation_rate = dilation_rate self.data_format = data_format self.op = build_op(num_spatial_dims, "VALID") self.call = self._with_space_to_batch_call def _with_space_to_batch_call(self, inp, filter): # pylint: disable=redefined-builtin """Call functionality for with_space_to_batch.""" # Handle input whose shape is unknown during graph creation. input_spatial_shape = None input_shape = self.input_shape spatial_dims = self.spatial_dims if input_shape.ndims is not None: input_shape_list = input_shape.as_list() input_spatial_shape = [input_shape_list[i] for i in spatial_dims] if input_spatial_shape is None or None in input_spatial_shape: input_shape_tensor = array_ops.shape(inp) input_spatial_shape = array_ops.stack( [input_shape_tensor[i] for i in spatial_dims]) base_paddings = self.base_paddings if base_paddings is None: # base_paddings could not be computed at build time since static filter # shape was not fully defined. filter_shape = array_ops.shape(filter) base_paddings = _with_space_to_batch_base_paddings( filter_shape, self.num_spatial_dims, self.rate_or_const_rate) paddings, crops = array_ops.required_space_to_batch_paddings( input_shape=input_spatial_shape, base_paddings=base_paddings, block_shape=self.dilation_rate) dilation_rate = _with_space_to_batch_adjust(self.dilation_rate, 1, spatial_dims) paddings = _with_space_to_batch_adjust(paddings, 0, spatial_dims) crops = _with_space_to_batch_adjust(crops, 0, spatial_dims) input_converted = array_ops.space_to_batch_nd( input=inp, block_shape=dilation_rate, paddings=paddings) result = self.op(input_converted, filter) result_converted = array_ops.batch_to_space_nd( input=result, block_shape=dilation_rate, crops=crops) # Recover channel information for output shape if channels are not last. if self.data_format is not None and self.data_format.startswith("NC"): if not result_converted.shape.dims[1].value and filter is not None: output_shape = result_converted.shape.as_list() output_shape[1] = filter.shape[-1] result_converted.set_shape(output_shape) return result_converted def __call__(self, inp, filter): # pylint: disable=redefined-builtin return self.call(inp, filter) def _with_space_to_batch_base_paddings(filter_shape, num_spatial_dims, rate_or_const_rate): """Helper function to compute base_paddings.""" # Spatial dimensions of the filters and the upsampled filters in which we # introduce (rate - 1) zeros between consecutive filter values. filter_spatial_shape = filter_shape[:num_spatial_dims] pad_extra_shape = (filter_spatial_shape - 1) * rate_or_const_rate # When full_padding_shape is odd, we pad more at end, following the same # convention as conv2d. pad_extra_start = pad_extra_shape // 2 pad_extra_end = pad_extra_shape - pad_extra_start base_paddings = array_ops.stack( [[pad_extra_start[i], pad_extra_end[i]] for i in range(num_spatial_dims)]) return base_paddings def _with_space_to_batch_adjust(orig, fill_value, spatial_dims): """Returns an `adjusted` version of `orig` based on `spatial_dims`. Tensor of the same type as `orig` and with shape `[max(spatial_dims), ...]` where: adjusted[spatial_dims[i] - 1, ...] = orig[i, ...] for 0 <= i < len(spatial_dims), and adjusted[j, ...] = fill_value for j != spatial_dims[i] - 1 for some i. If `orig` is a constant value, then the result will be a constant value. Args: orig: Tensor of rank > max(spatial_dims). fill_value: Numpy scalar (of same data type as `orig) specifying the fill value for non-spatial dimensions. spatial_dims: See with_space_to_batch. Returns: `adjusted` tensor. """ fill_dims = orig.get_shape().as_list()[1:] dtype = orig.dtype.as_numpy_dtype parts = [] const_orig = tensor_util.constant_value(orig) const_or_orig = const_orig if const_orig is not None else orig prev_spatial_dim = 0 i = 0 while i < len(spatial_dims): start_i = i start_spatial_dim = spatial_dims[i] if start_spatial_dim > 1: # Fill in any gap from the previous spatial dimension (or dimension 1 if # this is the first spatial dimension) with `fill_value`. parts.append( np.full( [start_spatial_dim - 1 - prev_spatial_dim] + fill_dims, fill_value, dtype=dtype)) # Find the largest value of i such that: # [spatial_dims[start_i], ..., spatial_dims[i]] # == [start_spatial_dim, ..., start_spatial_dim + i - start_i], # i.e. the end of a contiguous group of spatial dimensions. while (i + 1 < len(spatial_dims) and spatial_dims[i + 1] == spatial_dims[i] + 1): i += 1 parts.append(const_or_orig[start_i:i + 1]) prev_spatial_dim = spatial_dims[i] i += 1 if const_orig is not None: return np.concatenate(parts) else: return array_ops.concat(parts, 0) def _get_strides_and_dilation_rate(num_spatial_dims, strides, dilation_rate): """Helper function for verifying strides and dilation_rate arguments. This is used by `convolution` and `pool`. Args: num_spatial_dims: int strides: Optional. List of N ints >= 1. Defaults to `[1]*N`. If any value of strides is > 1, then all values of dilation_rate must be 1. dilation_rate: Optional. List of N ints >= 1. Defaults to `[1]*N`. If any value of dilation_rate is > 1, then all values of strides must be 1. Returns: Normalized (strides, dilation_rate) as int32 numpy arrays of shape [num_spatial_dims]. Raises: ValueError: if the parameters are invalid. """ if dilation_rate is None: dilation_rate = [1] * num_spatial_dims elif len(dilation_rate) != num_spatial_dims: raise ValueError(f"`len(dilation_rate)` should be {num_spatial_dims}. " f"Received: dilation_rate={dilation_rate} of length " f"{len(dilation_rate)}") dilation_rate = np.array(dilation_rate, dtype=np.int32) if np.any(dilation_rate < 1): raise ValueError("all values of `dilation_rate` must be positive. " f"Received: dilation_rate={dilation_rate}") if strides is None: strides = [1] * num_spatial_dims elif len(strides) != num_spatial_dims: raise ValueError(f"`len(strides)` should be {num_spatial_dims}. " f"Received: strides={strides} of length {len(strides)}") strides = np.array(strides, dtype=np.int32) if np.any(strides < 1): raise ValueError("all values of `strides` must be positive. " f"Received: strides={strides}") if np.any(strides > 1) and np.any(dilation_rate > 1): raise ValueError( "`strides > 1` not supported in conjunction with `dilation_rate > 1`. " f"Received: strides={strides} and dilation_rate={dilation_rate}") return strides, dilation_rate @tf_export(v1=["nn.convolution"]) @dispatch.add_dispatch_support def convolution( input, # pylint: disable=redefined-builtin filter, # pylint: disable=redefined-builtin padding, strides=None, dilation_rate=None, name=None, data_format=None, filters=None, dilations=None): # pylint: disable=g-doc-args """Computes sums of N-D convolutions (actually cross-correlation). This also supports either output striding via the optional `strides` parameter or atrous convolution (also known as convolution with holes or dilated convolution, based on the French word "trous" meaning holes in English) via the optional `dilation_rate` parameter. Currently, however, output striding is not supported for atrous convolutions. Specifically, in the case that `data_format` does not start with "NC", given a rank (N+2) `input` Tensor of shape [num_batches, input_spatial_shape[0], ..., input_spatial_shape[N-1], num_input_channels], a rank (N+2) `filter` Tensor of shape [spatial_filter_shape[0], ..., spatial_filter_shape[N-1], num_input_channels, num_output_channels], an optional `dilation_rate` tensor of shape N (defaults to `[1]*N`) specifying the filter upsampling/input downsampling rate, and an optional list of N `strides` (defaults to `[1]*N`), this computes for each N-D spatial output position `(x[0], ..., x[N-1])`: ``` output[b, x[0], ..., x[N-1], k] = sum_{z[0], ..., z[N-1], q} filter[z[0], ..., z[N-1], q, k] * padded_input[b, x[0]*strides[0] + dilation_rate[0]*z[0], ..., x[N-1]*strides[N-1] + dilation_rate[N-1]*z[N-1], q] ``` where b is the index into the batch, k is the output channel number, q is the input channel number, and z is the N-D spatial offset within the filter. Here, `padded_input` is obtained by zero padding the input using an effective spatial filter shape of `(spatial_filter_shape-1) * dilation_rate + 1` and output striding `strides`. In the case that `data_format` does start with `"NC"`, the `input` and output (but not the `filter`) are simply transposed as follows: ```python convolution(input, data_format, **kwargs) = tf.transpose(convolution(tf.transpose(input, [0] + range(2,N+2) + [1]), **kwargs), [0, N+1] + range(1, N+1)) ``` It is required that 1 <= N <= 3. Args: input: An (N+2)-D `Tensor` of type `T`, of shape `[batch_size] + input_spatial_shape + [in_channels]` if data_format does not start with "NC" (default), or `[batch_size, in_channels] + input_spatial_shape` if data_format starts with "NC". filter: An (N+2)-D `Tensor` with the same type as `input` and shape `spatial_filter_shape + [in_channels, out_channels]`. padding: A string, either `"VALID"` or `"SAME"`. The padding algorithm. `"valid"` means no padding. `"same"` results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input when the strides are 1. See [here](https://www.tensorflow.org/api_docs/python/tf/nn#notes_on_padding_2) for more information. strides: Optional. Sequence of N ints >= 1. Specifies the output stride. Defaults to `[1]*N`. If any value of strides is > 1, then all values of dilation_rate must be 1. dilation_rate: Optional. Sequence of N ints >= 1. Specifies the filter upsampling/input downsampling rate. In the literature, the same parameter is sometimes called `input stride` or `dilation`. The effective filter size used for the convolution will be `spatial_filter_shape + (spatial_filter_shape - 1) * (rate - 1)`, obtained by inserting (dilation_rate[i]-1) zeros between consecutive elements of the original filter in each spatial dimension i. If any value of dilation_rate is > 1, then all values of strides must be 1. name: Optional name for the returned tensor. data_format: A string or None. Specifies whether the channel dimension of the `input` and output is the last dimension (default, or if `data_format` does not start with "NC"), or the second dimension (if `data_format` starts with "NC"). For N=1, the valid values are "NWC" (default) and "NCW". For N=2, the valid values are "NHWC" (default) and "NCHW". For N=3, the valid values are "NDHWC" (default) and "NCDHW". Returns: A `Tensor` with the same type as `input` of shape `[batch_size] + output_spatial_shape + [out_channels]` if data_format is None or does not start with "NC", or `[batch_size, out_channels] + output_spatial_shape` if data_format starts with "NC", where `output_spatial_shape` depends on the value of `padding`. If padding == "SAME": output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides[i]) If padding == "VALID": output_spatial_shape[i] = ceil((input_spatial_shape[i] - (spatial_filter_shape[i]-1) * dilation_rate[i]) / strides[i]). Raises: ValueError: If input/output depth does not match `filter` shape, if padding is other than `"VALID"` or `"SAME"`, or if data_format is invalid. """ filter = deprecated_argument_lookup("filters", filters, "filter", filter) dilation_rate = deprecated_argument_lookup( "dilations", dilations, "dilation_rate", dilation_rate) return convolution_internal( input, filter, strides=strides, padding=padding, data_format=data_format, dilations=dilation_rate, name=name) @tf_export("nn.convolution", v1=[]) @dispatch.add_dispatch_support def convolution_v2( # pylint: disable=missing-docstring input, # pylint: disable=redefined-builtin filters, strides=None, padding="VALID", data_format=None, dilations=None, name=None): return convolution_internal( input, # pylint: disable=redefined-builtin filters, strides=strides, padding=padding, data_format=data_format, dilations=dilations, name=name) convolution_v2.__doc__ = deprecation.rewrite_argument_docstring( deprecation.rewrite_argument_docstring( convolution.__doc__, "dilation_rate", "dilations"), "filter", "filters") def convolution_internal( input, # pylint: disable=redefined-builtin filters, strides=None, padding="VALID", data_format=None, dilations=None, name=None, call_from_convolution=True, num_spatial_dims=None): """Internal function which performs rank agnostic convolution. Args: input: See `convolution`. filters: See `convolution`. strides: See `convolution`. padding: See `convolution`. data_format: See `convolution`. dilations: See `convolution`. name: See `convolution`. call_from_convolution: See `convolution`. num_spatial_dims: (Optional.). It is a integer describing the rank of the spatial dimensions. For `1-D`, `2-D` and `3-D` convolutions, the value of `num_spatial_dims` is `1`, `2`, and `3`, respectively. This argument is only required to disambiguate the rank of `batch_shape` when `filter_shape.ndims is None` and `len(batch_shape) > 1`. For backwards compatibility, if `num_spatial_dims is None` and `filter_shape.ndims is None`, then `len(batch_shape)` is assumed to be `1` (i.e., the input is expected to be `[batch_size, num_channels] + input_spatial_shape` or `[batch_size] + input_spatial_shape + [num_channels]`. Returns: A tensor of shape and dtype matching that of `input`. Raises: ValueError: If input and filter both have unknown shapes, or if `num_spatial_dims` is provided and incompatible with the value estimated from `filters.shape`. """ if (not isinstance(filters, variables_lib.Variable) and not tensor_util.is_tf_type(filters)): with ops.name_scope("convolution_internal", None, [filters, input]): filters = ops.convert_to_tensor(filters, name='filters') if (not isinstance(input, ops.Tensor) and not tensor_util.is_tf_type(input)): with ops.name_scope("convolution_internal", None, [filters, input]): input = ops.convert_to_tensor(input, name="input") filters_rank = filters.shape.rank inputs_rank = input.shape.rank if num_spatial_dims is None: if filters_rank: num_spatial_dims = filters_rank - 2 elif inputs_rank: num_spatial_dims = inputs_rank - 2 else: raise ValueError( "When `num_spatial_dims` is not set, one of `input.shape.rank` or " "`filters.shape.rank` must be known. " f"Received: input.shape={input.shape} of rank {inputs_rank} and " f"filters.shape={filters.shape} of rank {filters_rank}") elif filters_rank and filters_rank - 2 != num_spatial_dims: raise ValueError( "`filters.shape.rank - 2` should equal `num_spatial_dims`. Received: " f"filters.shape={filters.shape} of rank {filters_rank} and " f"num_spatial_dims={num_spatial_dims}") if inputs_rank: num_batch_dims = inputs_rank - num_spatial_dims - 1 # Channel dimension. else: num_batch_dims = 1 # By default, assume single batch dimension. if num_spatial_dims not in {1, 2, 3}: raise ValueError( "`num_spatial_dims` must be 1, 2, or 3. " f"Received: num_spatial_dims={num_spatial_dims}.") if data_format is None or data_format in _CHANNELS_LAST_FORMATS: channel_index = num_batch_dims + num_spatial_dims else: channel_index = num_batch_dims if dilations is None: dilations = _get_sequence(dilations, num_spatial_dims, channel_index, "dilations") is_dilated_conv = False else: dilations = _get_sequence(dilations, num_spatial_dims, channel_index, "dilations") is_dilated_conv = any(i != 1 for i in dilations) strides = _get_sequence(strides, num_spatial_dims, channel_index, "strides") has_tpu_context = device_context.enclosing_tpu_context() is not None if name: default_name = None elif not has_tpu_context or call_from_convolution: default_name = "convolution" elif num_spatial_dims == 2: # Most common case. default_name = "Conv2D" elif num_spatial_dims == 3: default_name = "Conv3D" else: default_name = "conv1d" with ops.name_scope(name, default_name, [input, filters]) as name: # Fast path for TPU or if no dilation, as gradient only supported on TPU # for dilations. if not is_dilated_conv or has_tpu_context: if num_spatial_dims == 2: # Most common case. op = _conv2d_expanded_batch elif num_spatial_dims == 3: op = _conv3d_expanded_batch else: op = conv1d return op( input, filters, strides, padding=padding, data_format=data_format, dilations=dilations, name=name) else: if channel_index == 1: strides = strides[2:] dilations = dilations[2:] else: strides = strides[1:-1] dilations = dilations[1:-1] op = Convolution( tensor_shape.as_shape(input.shape), tensor_shape.as_shape(filters.shape), padding, strides=strides, dilation_rate=dilations, name=name, data_format=data_format, num_spatial_dims=num_spatial_dims) return op(input, filters) class Convolution: """Helper class for convolution. Note that this class assumes that shapes of input and filter passed to `__call__` are compatible with `input_shape`, `filter_shape`, and `num_spatial_dims` passed to the constructor. Arguments input_shape: static shape of input. i.e. input.shape. Its length is `batch_shape + input_spatial_shape + [num_channels]` if `data_format` does not start with `NC`, or `batch_shape + [num_channels] + input_spatial_shape` if `data_format` starts with `NC`. filter_shape: static shape of the filter. i.e. filter.shape. padding: The padding algorithm, must be "SAME" or "VALID". strides: see convolution. dilation_rate: see convolution. name: see convolution. data_format: A string or `None`. Specifies whether the channel dimension of the `input` and output is the last dimension (if `data_format` is `None` or does not start with `NC`), or the first post-batch dimension (i.e. if `data_format` starts with `NC`). num_spatial_dims: (Usually optional.) Python integer, the rank of the spatial and channel dimensions. For `1-D`, `2-D` and `3-D` convolutions, the value of `num_spatial_dims` is `1`, `2`, and `3`, respectively. This argument is only required to disambiguate the rank of `batch_shape` when `filter_shape.ndims is None` and `len(batch_shape) > 1`. For backwards compatibility, if `num_spatial_dims is None` and `filter_shape.ndims is None`, then `len(batch_shape)` is assumed to be `1` (i.e., the input is expected to be `[batch_size, num_channels] + input_spatial_shape` or `[batch_size] + input_spatial_shape + [num_channels]`. """ def __init__(self, input_shape, filter_shape, padding, strides=None, dilation_rate=None, name=None, data_format=None, num_spatial_dims=None): """Helper function for convolution.""" num_batch_dims = None filter_shape = tensor_shape.as_shape(filter_shape) input_shape = tensor_shape.as_shape(input_shape) if filter_shape.ndims is not None: if (num_spatial_dims is not None and filter_shape.ndims != num_spatial_dims + 2): raise ValueError( "`filters.shape.rank` must be `num_spatial_dims + 2`. Received: " f"filters.shape={filter_shape} of rank {filter_shape.rank} and " f"num_spatial_dims={num_spatial_dims}") else: num_spatial_dims = filter_shape.ndims - 2 if input_shape.ndims is not None and num_spatial_dims is not None: num_batch_dims = input_shape.ndims - num_spatial_dims - 1 if num_spatial_dims is None: num_spatial_dims = input_shape.ndims - 2 else: if input_shape.ndims is not None: if input_shape.ndims < num_spatial_dims + 2: raise ValueError( "`input.shape.rank` must be >= than `num_spatial_dims + 2`. " f"Received: input.shape={input_shape} of rank {input_shape.rank} " f"and num_spatial_dims={num_spatial_dims}") else: if num_batch_dims is None: num_batch_dims = input_shape.ndims - num_spatial_dims - 1 if num_spatial_dims is None: raise ValueError( "When `num_spatial_dims` is not set, one of `input.shape.rank` or " "`filters.shape.rank` must be known. " f"Received: input.shape={input_shape} of rank {input_shape.rank} and " f"`filters.shape={filter_shape}` of rank {filter_shape.rank}") if num_batch_dims is None: num_batch_dims = 1 if num_batch_dims < 1: raise ValueError( f"Batch dims should be >= 1, but found {num_batch_dims}. " "Batch dims was estimated as " "`input.shape.rank - num_spatial_dims - 1` and `num_spatial_dims` " "was either provided or estimated as `filters.shape.rank - 2`. " f"Received: input.shape={input_shape} of rank {input_shape.rank}, " f"filters.shape={filter_shape} of rank {filter_shape.rank}, and " f"num_spatial_dims={num_spatial_dims}") if data_format is None or not data_format.startswith("NC"): input_channels_dim = tensor_shape.dimension_at_index( input_shape, num_spatial_dims + num_batch_dims) spatial_dims = range(num_batch_dims, num_spatial_dims + num_batch_dims) else: input_channels_dim = tensor_shape.dimension_at_index( input_shape, num_batch_dims) spatial_dims = range( num_batch_dims + 1, num_spatial_dims + num_batch_dims + 1) filter_dim = tensor_shape.dimension_at_index(filter_shape, num_spatial_dims) if not (input_channels_dim % filter_dim).is_compatible_with(0): raise ValueError( "The number of input channels is not divisible by the corresponding " f"number of output filters. Received: input.shape={input_shape} with " f"{input_channels_dim} channels and filters.shape={filter_shape} " f"with {filter_dim} output filters.") strides, dilation_rate = _get_strides_and_dilation_rate( num_spatial_dims, strides, dilation_rate) self.input_shape = input_shape self.filter_shape = filter_shape self.data_format = data_format self.strides = strides self.padding = padding self.name = name self.dilation_rate = dilation_rate self.num_batch_dims = num_batch_dims self.num_spatial_dims = num_spatial_dims self.conv_op = _WithSpaceToBatch( input_shape, dilation_rate=dilation_rate, padding=padding, build_op=self._build_op, filter_shape=filter_shape, spatial_dims=spatial_dims, data_format=data_format, num_batch_dims=num_batch_dims) def _build_op(self, _, padding): return _NonAtrousConvolution( self.input_shape, filter_shape=self.filter_shape, padding=padding, data_format=self.data_format, strides=self.strides, name=self.name, num_batch_dims=self.num_batch_dims) def __call__(self, inp, filter): # pylint: disable=redefined-builtin # TPU convolution supports dilations greater than 1. if device_context.enclosing_tpu_context() is not None: return convolution_internal( inp, filter, strides=self.strides, padding=self.padding, data_format=self.data_format, dilations=self.dilation_rate, name=self.name, call_from_convolution=False, num_spatial_dims=self.num_spatial_dims) else: return self.conv_op(inp, filter) @tf_export(v1=["nn.pool"]) @dispatch.add_dispatch_support def pool( input, # pylint: disable=redefined-builtin window_shape, pooling_type, padding, dilation_rate=None, strides=None, name=None, data_format=None, dilations=None): """Performs an N-D pooling operation. In the case that `data_format` does not start with "NC", computes for 0 <= b < batch_size, 0 <= x[i] < output_spatial_shape[i], 0 <= c < num_channels: ``` output[b, x[0], ..., x[N-1], c] = REDUCE_{z[0], ..., z[N-1]} input[b, x[0] * strides[0] - pad_before[0] + dilation_rate[0]*z[0], ... x[N-1]*strides[N-1] - pad_before[N-1] + dilation_rate[N-1]*z[N-1], c], ``` where the reduction function REDUCE depends on the value of `pooling_type`, and pad_before is defined based on the value of `padding` as described in the "returns" section of `tf.nn.convolution` for details. The reduction never includes out-of-bounds positions. In the case that `data_format` starts with `"NC"`, the `input` and output are simply transposed as follows: ```python pool(input, data_format, **kwargs) = tf.transpose(pool(tf.transpose(input, [0] + range(2,N+2) + [1]), **kwargs), [0, N+1] + range(1, N+1)) ``` Args: input: Tensor of rank N+2, of shape `[batch_size] + input_spatial_shape + [num_channels]` if data_format does not start with "NC" (default), or `[batch_size, num_channels] + input_spatial_shape` if data_format starts with "NC". Pooling happens over the spatial dimensions only. window_shape: Sequence of N ints >= 1. pooling_type: Specifies pooling operation, must be "AVG" or "MAX". padding: The padding algorithm, must be "SAME" or "VALID". See the "returns" section of `tf.nn.convolution` for details. dilation_rate: Optional. Dilation rate. List of N ints >= 1. Defaults to `[1]*N`. If any value of dilation_rate is > 1, then all values of strides must be 1. strides: Optional. Sequence of N ints >= 1. Defaults to `[1]*N`. If any value of strides is > 1, then all values of dilation_rate must be 1. name: Optional. Name of the op. data_format: A string or None. Specifies whether the channel dimension of the `input` and output is the last dimension (default, or if `data_format` does not start with "NC"), or the second dimension (if `data_format` starts with "NC"). For N=1, the valid values are "NWC" (default) and "NCW". For N=2, the valid values are "NHWC" (default) and "NCHW". For N=3, the valid values are "NDHWC" (default) and "NCDHW". dilations: Alias for dilation_rate Returns: Tensor of rank N+2, of shape [batch_size] + output_spatial_shape + [num_channels] if data_format is None or does not start with "NC", or [batch_size, num_channels] + output_spatial_shape if data_format starts with "NC", where `output_spatial_shape` depends on the value of padding: If padding = "SAME": output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides[i]) If padding = "VALID": output_spatial_shape[i] = ceil((input_spatial_shape[i] - (window_shape[i] - 1) * dilation_rate[i]) / strides[i]). Raises: ValueError: if arguments are invalid. """ dilation_rate = deprecated_argument_lookup( "dilations", dilations, "dilation_rate", dilation_rate) # pylint: enable=line-too-long with ops.name_scope(name, "%s_pool" % (pooling_type.lower()), [input]) as scope: input = ops.convert_to_tensor(input, name="input") # pylint: disable=redefined-builtin num_spatial_dims = len(window_shape) if num_spatial_dims < 1 or num_spatial_dims > 3: raise ValueError("`len(window_shape)` must be 1, 2, or 3. Received: " f"window_shape={window_shape} of length " f"{len(window_shape)}") input.get_shape().with_rank(num_spatial_dims + 2) strides, dilation_rate = _get_strides_and_dilation_rate( num_spatial_dims, strides, dilation_rate) if padding == "SAME" and np.any(dilation_rate > 1): raise ValueError( "pooling with 'SAME' padding is not implemented for " f"`dilation_rate` > 1. Received: padding={padding} and " f"dilation_rate={dilation_rate}") if np.any(strides > window_shape): raise ValueError( "`strides` > `window_shape` not supported due to inconsistency " f"between CPU and GPU implementations. Received: strides={strides} " f"and window_shape={window_shape}") pooling_ops = { ("MAX", 1): max_pool, ("MAX", 2): max_pool, ("MAX", 3): max_pool3d, # pylint: disable=undefined-variable ("AVG", 1): avg_pool, ("AVG", 2): avg_pool, ("AVG", 3): avg_pool3d, # pylint: disable=undefined-variable } op_key = (pooling_type, num_spatial_dims) if op_key not in pooling_ops: raise ValueError( f"{num_spatial_dims}-D {pooling_type} pooling is not supported.") if data_format is None or not data_format.startswith("NC"): adjusted_window_shape = [1] + list(window_shape) + [1] adjusted_strides = [1] + list(strides) + [1] spatial_dims = range(1, num_spatial_dims + 1) else: adjusted_window_shape = [1, 1] + list(window_shape) adjusted_strides = [1, 1] + list(strides) spatial_dims = range(2, num_spatial_dims + 2) if num_spatial_dims == 1: if data_format is None or data_format == "NWC": data_format_kwargs = dict(data_format="NHWC") elif data_format == "NCW": data_format_kwargs = dict(data_format="NCHW") else: raise ValueError("data_format must be either 'NWC' or 'NCW'. " f"Received: data_format={data_format}") adjusted_window_shape = [1] + adjusted_window_shape adjusted_strides = [1] + adjusted_strides else: data_format_kwargs = dict(data_format=data_format) def op(converted_input, _, converted_padding): # pylint: disable=missing-docstring if num_spatial_dims == 1: converted_input = array_ops.expand_dims(converted_input, spatial_dims[0]) result = pooling_ops[op_key]( converted_input, adjusted_window_shape, adjusted_strides, converted_padding, name=scope, **data_format_kwargs) if num_spatial_dims == 1: result = array_ops.squeeze(result, [spatial_dims[0]]) return result return with_space_to_batch( input=input, dilation_rate=dilation_rate, padding=padding, op=op, spatial_dims=spatial_dims, filter_shape=window_shape) @tf_export("nn.pool", v1=[]) @dispatch.add_dispatch_support def pool_v2( input, # pylint: disable=redefined-builtin window_shape, pooling_type, strides=None, padding="VALID", data_format=None, dilations=None, name=None): # pylint: disable=line-too-long """Performs an N-D pooling operation. In the case that `data_format` does not start with "NC", computes for 0 <= b < batch_size, 0 <= x[i] < output_spatial_shape[i], 0 <= c < num_channels: ``` output[b, x[0], ..., x[N-1], c] = REDUCE_{z[0], ..., z[N-1]} input[b, x[0] * strides[0] - pad_before[0] + dilation_rate[0]*z[0], ... x[N-1]*strides[N-1] - pad_before[N-1] + dilation_rate[N-1]*z[N-1], c], ``` where the reduction function REDUCE depends on the value of `pooling_type`, and pad_before is defined based on the value of `padding` as described in the "returns" section of `tf.nn.convolution` for details. The reduction never includes out-of-bounds positions. In the case that `data_format` starts with `"NC"`, the `input` and output are simply transposed as follows: ```python pool(input, data_format, **kwargs) = tf.transpose(pool(tf.transpose(input, [0] + range(2,N+2) + [1]), **kwargs), [0, N+1] + range(1, N+1)) ``` Args: input: Tensor of rank N+2, of shape `[batch_size] + input_spatial_shape + [num_channels]` if data_format does not start with "NC" (default), or `[batch_size, num_channels] + input_spatial_shape` if data_format starts with "NC". Pooling happens over the spatial dimensions only. window_shape: Sequence of N ints >= 1. pooling_type: Specifies pooling operation, must be "AVG" or "MAX". strides: Optional. Sequence of N ints >= 1. Defaults to `[1]*N`. If any value of strides is > 1, then all values of dilation_rate must be 1. padding: The padding algorithm, must be "SAME" or "VALID". Defaults to "SAME". See [here](https://www.tensorflow.org/api_docs/python/tf/nn#notes_on_padding_2) for more information. data_format: A string or None. Specifies whether the channel dimension of the `input` and output is the last dimension (default, or if `data_format` does not start with "NC"), or the second dimension (if `data_format` starts with "NC"). For N=1, the valid values are "NWC" (default) and "NCW". For N=2, the valid values are "NHWC" (default) and "NCHW". For N=3, the valid values are "NDHWC" (default) and "NCDHW". dilations: Optional. Dilation rate. List of N ints >= 1. Defaults to `[1]*N`. If any value of dilation_rate is > 1, then all values of strides must be 1. name: Optional. Name of the op. Returns: Tensor of rank N+2, of shape [batch_size] + output_spatial_shape + [num_channels] if data_format is None or does not start with "NC", or [batch_size, num_channels] + output_spatial_shape if data_format starts with "NC", where `output_spatial_shape` depends on the value of padding: If padding = "SAME": output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides[i]) If padding = "VALID": output_spatial_shape[i] = ceil((input_spatial_shape[i] - (window_shape[i] - 1) * dilation_rate[i]) / strides[i]). Raises: ValueError: if arguments are invalid. """ return pool( input=input, window_shape=window_shape, pooling_type=pooling_type, padding=padding, dilation_rate=dilations, strides=strides, name=name, data_format=data_format) @tf_export("nn.atrous_conv2d") @dispatch.add_dispatch_support def atrous_conv2d(value, filters, rate, padding, name=None): """Atrous convolution (a.k.a. convolution with holes or dilated convolution). This function is a simpler wrapper around the more general `tf.nn.convolution`, and exists only for backwards compatibility. You can use `tf.nn.convolution` to perform 1-D, 2-D, or 3-D atrous convolution. Computes a 2-D atrous convolution, also known as convolution with holes or dilated convolution, given 4-D `value` and `filters` tensors. If the `rate` parameter is equal to one, it performs regular 2-D convolution. If the `rate` parameter is greater than one, it performs convolution with holes, sampling the input values every `rate` pixels in the `height` and `width` dimensions. This is equivalent to convolving the input with a set of upsampled filters, produced by inserting `rate - 1` zeros between two consecutive values of the filters along the `height` and `width` dimensions, hence the name atrous convolution or convolution with holes (the French word trous means holes in English). More specifically: ``` output[batch, height, width, out_channel] = sum_{dheight, dwidth, in_channel} ( filters[dheight, dwidth, in_channel, out_channel] * value[batch, height + rate*dheight, width + rate*dwidth, in_channel] ) ``` Atrous convolution allows us to explicitly control how densely to compute feature responses in fully convolutional networks. Used in conjunction with bilinear interpolation, it offers an alternative to `conv2d_transpose` in dense prediction tasks such as semantic image segmentation, optical flow computation, or depth estimation. It also allows us to effectively enlarge the field of view of filters without increasing the number of parameters or the amount of computation. For a description of atrous convolution and how it can be used for dense feature extraction, please see: (Chen et al., 2015). The same operation is investigated further in (Yu et al., 2016). Previous works that effectively use atrous convolution in different ways are, among others, (Sermanet et al., 2014) and (Giusti et al., 2013). Atrous convolution is also closely related to the so-called noble identities in multi-rate signal processing. There are many different ways to implement atrous convolution (see the refs above). The implementation here reduces ```python atrous_conv2d(value, filters, rate, padding=padding) ``` to the following three operations: ```python paddings = ... net = space_to_batch(value, paddings, block_size=rate) net = conv2d(net, filters, strides=[1, 1, 1, 1], padding="VALID") crops = ... net = batch_to_space(net, crops, block_size=rate) ``` Advanced usage. Note the following optimization: A sequence of `atrous_conv2d` operations with identical `rate` parameters, 'SAME' `padding`, and filters with odd heights/ widths: ```python net = atrous_conv2d(net, filters1, rate, padding="SAME") net = atrous_conv2d(net, filters2, rate, padding="SAME") ... net = atrous_conv2d(net, filtersK, rate, padding="SAME") ``` can be equivalently performed cheaper in terms of computation and memory as: ```python pad = ... # padding so that the input dims are multiples of rate net = space_to_batch(net, paddings=pad, block_size=rate) net = conv2d(net, filters1, strides=[1, 1, 1, 1], padding="SAME") net = conv2d(net, filters2, strides=[1, 1, 1, 1], padding="SAME") ... net = conv2d(net, filtersK, strides=[1, 1, 1, 1], padding="SAME") net = batch_to_space(net, crops=pad, block_size=rate) ``` because a pair of consecutive `space_to_batch` and `batch_to_space` ops with the same `block_size` cancel out when their respective `paddings` and `crops` inputs are identical. Args: value: A 4-D `Tensor` of type `float`. It needs to be in the default "NHWC" format. Its shape is `[batch, in_height, in_width, in_channels]`. filters: A 4-D `Tensor` with the same type as `value` and shape `[filter_height, filter_width, in_channels, out_channels]`. `filters`' `in_channels` dimension must match that of `value`. Atrous convolution is equivalent to standard convolution with upsampled filters with effective height `filter_height + (filter_height - 1) * (rate - 1)` and effective width `filter_width + (filter_width - 1) * (rate - 1)`, produced by inserting `rate - 1` zeros along consecutive elements across the `filters`' spatial dimensions. rate: A positive int32. The stride with which we sample input values across the `height` and `width` dimensions. Equivalently, the rate by which we upsample the filter values by inserting zeros across the `height` and `width` dimensions. In the literature, the same parameter is sometimes called `input stride` or `dilation`. padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See [here](https://www.tensorflow.org/api_docs/python/tf/nn#notes_on_padding_2) for more information. name: Optional name for the returned tensor. Returns: A `Tensor` with the same type as `value`. Output shape with `'VALID'` padding is: [batch, height - 2 * (filter_width - 1), width - 2 * (filter_height - 1), out_channels]. Output shape with `'SAME'` padding is: [batch, height, width, out_channels]. Raises: ValueError: If input/output depth does not match `filters`' shape, or if padding is other than `'VALID'` or `'SAME'`. References: Multi-Scale Context Aggregation by Dilated Convolutions: [Yu et al., 2016](https://arxiv.org/abs/1511.07122) ([pdf](https://arxiv.org/pdf/1511.07122.pdf)) Semantic Image Segmentation with Deep Convolutional Nets and Fully Connected CRFs: [Chen et al., 2015](http://arxiv.org/abs/1412.7062) ([pdf](https://arxiv.org/pdf/1412.7062)) OverFeat - Integrated Recognition, Localization and Detection using Convolutional Networks: [Sermanet et al., 2014](https://arxiv.org/abs/1312.6229) ([pdf](https://arxiv.org/pdf/1312.6229.pdf)) Fast Image Scanning with Deep Max-Pooling Convolutional Neural Networks: [Giusti et al., 2013] (https://ieeexplore.ieee.org/abstract/document/6738831) ([pdf](https://arxiv.org/pdf/1302.1700.pdf)) """ return convolution( input=value, filter=filters, padding=padding, dilation_rate=np.broadcast_to(rate, (2,)), name=name) def convert_padding(padding, expected_length=4): """Converts Python padding to C++ padding for ops which take EXPLICIT padding. Args: padding: the `padding` argument for a Python op which supports EXPLICIT padding. expected_length: Expected number of entries in the padding list when explicit padding is used. Returns: (padding, explicit_paddings) pair, which should be passed as attributes to a C++ op. Raises: ValueError: If padding is invalid. """ explicit_paddings = [] if padding == "EXPLICIT": raise ValueError("'EXPLICIT' is not a valid value for `padding`. To use " "explicit padding, `padding` must be a list.") if isinstance(padding, (list, tuple)): for i, dim_paddings in enumerate(padding): if not isinstance(dim_paddings, (list, tuple)): raise ValueError("When `padding` is a list, each element of `padding` " "must be a list/tuple of size 2. Received: " f"padding={padding} with element at index {i} of type " f"{type(dim_paddings)}") if len(dim_paddings) != 2: raise ValueError("When `padding` is a list, each element of `padding` " "must be a list/tuple of size 2. Received: " f"padding={padding} with element at index {i} of size " f"{len(dim_paddings)}") explicit_paddings.extend(dim_paddings) if len(padding) != expected_length: raise ValueError( f"When padding is a list, it must be of size {expected_length}. " f"Received: padding={padding} of size {len(padding)}") padding = "EXPLICIT" return padding, explicit_paddings @tf_export(v1=["nn.conv1d"]) @dispatch.add_dispatch_support @deprecation.deprecated_arg_values( None, "`NCHW` for data_format is deprecated, use `NCW` instead", warn_once=True, data_format="NCHW") @deprecation.deprecated_arg_values( None, "`NHWC` for data_format is deprecated, use `NWC` instead", warn_once=True, data_format="NHWC") def conv1d( value=None, filters=None, stride=None, padding=None, use_cudnn_on_gpu=None, data_format=None, name=None, input=None, # pylint: disable=redefined-builtin dilations=None): r"""Computes a 1-D convolution of input with rank `>=3` and a `3-D` filter. Given an input tensor of shape `batch_shape + [in_width, in_channels]` if `data_format` is `"NWC"`, or `batch_shape + [in_channels, in_width]` if `data_format` is `"NCW"`, and a filter / kernel tensor of shape `[filter_width, in_channels, out_channels]`, this op reshapes the arguments to pass them to `conv2d` to perform the equivalent convolution operation. Internally, this op reshapes the input tensors and invokes `tf.nn.conv2d`. For example, if `data_format` does not start with "NC", a tensor of shape `batch_shape + [in_width, in_channels]` is reshaped to `batch_shape + [1, in_width, in_channels]`, and the filter is reshaped to `[1, filter_width, in_channels, out_channels]`. The result is then reshaped back to `batch_shape + [out_width, out_channels]` \(where out_width is a function of the stride and padding as in conv2d\) and returned to the caller. Args: value: A Tensor of rank at least 3. Must be of type `float16`, `float32`, or `float64`. filters: A Tensor of rank at least 3. Must have the same type as `value`. stride: An int or list of `ints` that has length `1` or `3`. The number of entries by which the filter is moved right at each step. padding: 'SAME' or 'VALID' use_cudnn_on_gpu: An optional `bool`. Defaults to `True`. data_format: An optional `string` from `"NWC", "NCW"`. Defaults to `"NWC"`, the data is stored in the order of `batch_shape + [in_width, in_channels]`. The `"NCW"` format stores data as `batch_shape + [in_channels, in_width]`. name: A name for the operation (optional). input: Alias for value. dilations: An int or list of `ints` that has length `1` or `3` which defaults to 1. The dilation factor for each dimension of input. If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. Dilations in the batch and depth dimensions must be 1. Returns: A `Tensor`. Has the same type as input. Raises: ValueError: if `data_format` is invalid. """ value = deprecation.deprecated_argument_lookup("input", input, "value", value) with ops.name_scope(name, "conv1d", [value, filters]) as name: # Reshape the input tensor to batch_shape + [1, in_width, in_channels] if data_format is None or data_format == "NHWC" or data_format == "NWC": data_format = "NHWC" spatial_start_dim = -3 channel_index = 2 elif data_format == "NCHW" or data_format == "NCW": data_format = "NCHW" spatial_start_dim = -2 channel_index = 1 else: raise ValueError("`data_format` must be 'NWC' or 'NCW'. " f"Received: data_format={data_format}") strides = [1] + _get_sequence(stride, 1, channel_index, "stride") dilations = [1] + _get_sequence(dilations, 1, channel_index, "dilations") value = array_ops.expand_dims(value, spatial_start_dim) filters = array_ops.expand_dims(filters, 0) if value.shape.ndims in (4, 3, 2, 1, 0, None): result = gen_nn_ops.conv2d( value, filters, strides, padding, use_cudnn_on_gpu=use_cudnn_on_gpu, data_format=data_format, dilations=dilations, name=name) else: result = squeeze_batch_dims( value, functools.partial( gen_nn_ops.conv2d, filter=filters, strides=strides, padding=padding, use_cudnn_on_gpu=use_cudnn_on_gpu, data_format=data_format, dilations=dilations, ), inner_rank=3, name=name) return array_ops.squeeze(result, [spatial_start_dim]) @tf_export("nn.conv1d", v1=[]) @dispatch.add_dispatch_support def conv1d_v2( input, # pylint: disable=redefined-builtin filters, stride, padding, data_format="NWC", dilations=None, name=None): r"""Computes a 1-D convolution given 3-D input and filter tensors. Given an input tensor of shape `batch_shape + [in_width, in_channels]` if `data_format` is `"NWC"`, or `batch_shape + [in_channels, in_width]` if `data_format` is `"NCW"`, and a filter / kernel tensor of shape `[filter_width, in_channels, out_channels]`, this op reshapes the arguments to pass them to `conv2d` to perform the equivalent convolution operation. Internally, this op reshapes the input tensors and invokes `tf.nn.conv2d`. For example, if `data_format` does not start with `"NC"`, a tensor of shape `batch_shape + [in_width, in_channels]` is reshaped to `batch_shape + [1, in_width, in_channels]`, and the filter is reshaped to `[1, filter_width, in_channels, out_channels]`. The result is then reshaped back to `batch_shape + [out_width, out_channels]` \(where out_width is a function of the stride and padding as in conv2d\) and returned to the caller. Args: input: A Tensor of rank at least 3. Must be of type `float16`, `float32`, or `float64`. filters: A Tensor of rank at least 3. Must have the same type as `input`. stride: An int or list of `ints` that has length `1` or `3`. The number of entries by which the filter is moved right at each step. padding: 'SAME' or 'VALID'. See [here](https://www.tensorflow.org/api_docs/python/tf/nn#notes_on_padding_2) for more information. data_format: An optional `string` from `"NWC", "NCW"`. Defaults to `"NWC"`, the data is stored in the order of `batch_shape + [in_width, in_channels]`. The `"NCW"` format stores data as `batch_shape + [in_channels, in_width]`. dilations: An int or list of `ints` that has length `1` or `3` which defaults to 1. The dilation factor for each dimension of input. If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. Dilations in the batch and depth dimensions must be 1. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as input. Raises: ValueError: if `data_format` is invalid. """ return conv1d( input, # pylint: disable=redefined-builtin filters, stride, padding, use_cudnn_on_gpu=True, data_format=data_format, name=name, dilations=dilations) @tf_export("nn.conv1d_transpose") @dispatch.add_dispatch_support def conv1d_transpose( input, # pylint: disable=redefined-builtin filters, output_shape, strides, padding="SAME", data_format="NWC", dilations=None, name=None): """The transpose of `conv1d`. This operation is sometimes called "deconvolution" after (Zeiler et al., 2010), but is actually the transpose (gradient) of `conv1d` rather than an actual deconvolution. Args: input: A 3-D `Tensor` of type `float` and shape `[batch, in_width, in_channels]` for `NWC` data format or `[batch, in_channels, in_width]` for `NCW` data format. filters: A 3-D `Tensor` with the same type as `input` and shape `[filter_width, output_channels, in_channels]`. `filter`'s `in_channels` dimension must match that of `input`. output_shape: A 1-D `Tensor`, containing three elements, representing the output shape of the deconvolution op. strides: An int or list of `ints` that has length `1` or `3`. The number of entries by which the filter is moved right at each step. padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See [here](https://www.tensorflow.org/api_docs/python/tf/nn#notes_on_padding_2) for more information. data_format: A string. `'NWC'` and `'NCW'` are supported. dilations: An int or list of `ints` that has length `1` or `3` which defaults to 1. The dilation factor for each dimension of input. If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. Dilations in the batch and depth dimensions must be 1. name: Optional name for the returned tensor. Returns: A `Tensor` with the same type as `input`. Raises: ValueError: If input/output depth does not match `filter`'s shape, if `output_shape` is not at 3-element vector, if `padding` is other than `'VALID'` or `'SAME'`, or if `data_format` is invalid. References: Deconvolutional Networks: [Zeiler et al., 2010] (https://ieeexplore.ieee.org/abstract/document/5539957) ([pdf] (http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.232.4023&rep=rep1&type=pdf)) """ with ops.name_scope(name, "conv1d_transpose", [input, filters, output_shape]) as name: # The format could be either NWC or NCW, map to NHWC or NCHW if data_format is None or data_format == "NWC": data_format = "NHWC" spatial_start_dim = 1 channel_index = 2 elif data_format == "NCW": data_format = "NCHW" spatial_start_dim = 2 channel_index = 1 else: raise ValueError("`data_format` must be 'NWC' or 'NCW'. " f"Received: data_format={data_format}") # Reshape the input tensor to [batch, 1, in_width, in_channels] strides = [1] + _get_sequence(strides, 1, channel_index, "stride") dilations = [1] + _get_sequence(dilations, 1, channel_index, "dilations") input = array_ops.expand_dims(input, spatial_start_dim) filters = array_ops.expand_dims(filters, 0) output_shape = list(output_shape) if not isinstance( output_shape, ops.Tensor) else output_shape output_shape = array_ops.concat([output_shape[: spatial_start_dim], [1], output_shape[spatial_start_dim:]], 0) result = gen_nn_ops.conv2d_backprop_input( input_sizes=output_shape, filter=filters, out_backprop=input, strides=strides, padding=padding, data_format=data_format, dilations=dilations, name=name) return array_ops.squeeze(result, spatial_start_dim) @tf_export("nn.conv2d", v1=[]) @dispatch.add_dispatch_support def conv2d_v2(input, # pylint: disable=redefined-builtin filters, strides, padding, data_format="NHWC", dilations=None, name=None): # pylint: disable=line-too-long r"""Computes a 2-D convolution given `input` and 4-D `filters` tensors. The `input` tensor may have rank `4` or higher, where shape dimensions `[:-3]` are considered batch dimensions (`batch_shape`). Given an input tensor of shape `batch_shape + [in_height, in_width, in_channels]` and a filter / kernel tensor of shape `[filter_height, filter_width, in_channels, out_channels]`, this op performs the following: 1. Flattens the filter to a 2-D matrix with shape `[filter_height * filter_width * in_channels, output_channels]`. 2. Extracts image patches from the input tensor to form a *virtual* tensor of shape `[batch, out_height, out_width, filter_height * filter_width * in_channels]`. 3. For each patch, right-multiplies the filter matrix and the image patch vector. In detail, with the default NHWC format, output[b, i, j, k] = sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] * filter[di, dj, q, k] Must have `strides[0] = strides[3] = 1`. For the most common case of the same horizontal and vertical strides, `strides = [1, stride, stride, 1]`. Usage Example: >>> x_in = np.array([[ ... [[2], [1], [2], [0], [1]], ... [[1], [3], [2], [2], [3]], ... [[1], [1], [3], [3], [0]], ... [[2], [2], [0], [1], [1]], ... [[0], [0], [3], [1], [2]], ]]) >>> kernel_in = np.array([ ... [ [[2, 0.1]], [[3, 0.2]] ], ... [ [[0, 0.3]],[[1, 0.4]] ], ]) >>> x = tf.constant(x_in, dtype=tf.float32) >>> kernel = tf.constant(kernel_in, dtype=tf.float32) >>> tf.nn.conv2d(x, kernel, strides=[1, 1, 1, 1], padding='VALID') <tf.Tensor: shape=(1, 4, 4, 2), dtype=float32, numpy=..., dtype=float32)> Args: input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`. A Tensor of rank at least 4. The dimension order is interpreted according to the value of `data_format`; with the all-but-inner-3 dimensions acting as batch dimensions. See below for details. filters: A `Tensor`. Must have the same type as `input`. A 4-D tensor of shape `[filter_height, filter_width, in_channels, out_channels]` strides: An int or list of `ints` that has length `1`, `2` or `4`. The stride of the sliding window for each dimension of `input`. If a single value is given it is replicated in the `H` and `W` dimension. By default the `N` and `C` dimensions are set to 1. The dimension order is determined by the value of `data_format`, see below for details. padding: Either the `string` `"SAME"` or `"VALID"` indicating the type of padding algorithm to use, or a list indicating the explicit paddings at the start and end of each dimension. See [here](https://www.tensorflow.org/api_docs/python/tf/nn#notes_on_padding_2) for more information. When explicit padding is used and data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top, pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0], [pad_top, pad_bottom], [pad_left, pad_right]]`. data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`. Specify the data format of the input and output data. With the default format "NHWC", the data is stored in the order of: `batch_shape + [height, width, channels]`. Alternatively, the format could be "NCHW", the data storage order of: `batch_shape + [channels, height, width]`. dilations: An int or list of `ints` that has length `1`, `2` or `4`, defaults to 1. The dilation factor for each dimension of`input`. If a single value is given it is replicated in the `H` and `W` dimension. By default the `N` and `C` dimensions are set to 1. If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. The dimension order is determined by the value of `data_format`, see above for details. Dilations in the batch and depth dimensions if a 4-d tensor must be 1. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input` and the same outer batch shape. """ # pylint: enable=line-too-long return conv2d(input, # pylint: disable=redefined-builtin filters, strides, padding, use_cudnn_on_gpu=True, data_format=data_format, dilations=dilations, name=name) @tf_export(v1=["nn.conv2d"]) @dispatch.add_dispatch_support def conv2d( # pylint: disable=redefined-builtin,dangerous-default-value input, filter=None, strides=None, padding=None, use_cudnn_on_gpu=True, data_format="NHWC", dilations=[1, 1, 1, 1], name=None, filters=None): r"""Computes a 2-D convolution given 4-D `input` and `filter` tensors. Given an input tensor of shape `[batch, in_height, in_width, in_channels]` and a filter / kernel tensor of shape `[filter_height, filter_width, in_channels, out_channels]`, this op performs the following: 1. Flattens the filter to a 2-D matrix with shape `[filter_height * filter_width * in_channels, output_channels]`. 2. Extracts image patches from the input tensor to form a *virtual* tensor of shape `[batch, out_height, out_width, filter_height * filter_width * in_channels]`. 3. For each patch, right-multiplies the filter matrix and the image patch vector. In detail, with the default NHWC format, output[b, i, j, k] = sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] * filter[di, dj, q, k] Must have `strides[0] = strides[3] = 1`. For the most common case of the same horizontal and vertical strides, `strides = [1, stride, stride, 1]`. Args: input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`. A 4-D tensor. The dimension order is interpreted according to the value of `data_format`, see below for details. filter: A `Tensor`. Must have the same type as `input`. A 4-D tensor of shape `[filter_height, filter_width, in_channels, out_channels]` strides: An int or list of `ints` that has length `1`, `2` or `4`. The stride of the sliding window for each dimension of `input`. If a single value is given it is replicated in the `H` and `W` dimension. By default the `N` and `C` dimensions are set to 1. The dimension order is determined by the value of `data_format`, see below for details. padding: Either the `string` `"SAME"` or `"VALID"` indicating the type of padding algorithm to use, or a list indicating the explicit paddings at the start and end of each dimension. When explicit padding is used and data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top, pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0], [pad_top, pad_bottom], [pad_left, pad_right]]`. use_cudnn_on_gpu: An optional `bool`. Defaults to `True`. data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`. Specify the data format of the input and output data. With the default format "NHWC", the data is stored in the order of: [batch, height, width, channels]. Alternatively, the format could be "NCHW", the data storage order of: [batch, channels, height, width]. dilations: An int or list of `ints` that has length `1`, `2` or `4`, defaults to 1. The dilation factor for each dimension of`input`. If a single value is given it is replicated in the `H` and `W` dimension. By default the `N` and `C` dimensions are set to 1. If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. The dimension order is determined by the value of `data_format`, see above for details. Dilations in the batch and depth dimensions if a 4-d tensor must be 1. name: A name for the operation (optional). filters: Alias for filter. Returns: A `Tensor`. Has the same type as `input`. """ filter = deprecation.deprecated_argument_lookup( "filters", filters, "filter", filter) padding, explicit_paddings = convert_padding(padding) if data_format is None: data_format = "NHWC" channel_index = 1 if data_format.startswith("NC") else 3 strides = _get_sequence(strides, 2, channel_index, "strides") dilations = _get_sequence(dilations, 2, channel_index, "dilations") shape = input.shape # shape object may lack ndims, e.g., if input is an np.ndarray. In that case, # we fall back to len(shape). ndims = getattr(shape, "ndims", -1) if ndims == -1: ndims = len(shape) if ndims in (4, 3, 2, 1, 0, None): # We avoid calling squeeze_batch_dims to reduce extra python function # call slowdown in eager mode. This branch doesn't require reshapes. return gen_nn_ops.conv2d( input, filter=filter, strides=strides, padding=padding, use_cudnn_on_gpu=use_cudnn_on_gpu, explicit_paddings=explicit_paddings, data_format=data_format, dilations=dilations, name=name) return squeeze_batch_dims( input, functools.partial( gen_nn_ops.conv2d, filter=filter, strides=strides, padding=padding, use_cudnn_on_gpu=use_cudnn_on_gpu, explicit_paddings=explicit_paddings, data_format=data_format, dilations=dilations), inner_rank=3, name=name) @tf_export(v1=["nn.conv2d_backprop_filter"]) @dispatch.add_dispatch_support def conv2d_backprop_filter( # pylint: disable=redefined-builtin,dangerous-default-value input, filter_sizes, out_backprop, strides, padding, use_cudnn_on_gpu=True, data_format="NHWC", dilations=[1, 1, 1, 1], name=None): r"""Computes the gradients of convolution with respect to the filter. Args: input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`. 4-D with shape `[batch, in_height, in_width, in_channels]`. filter_sizes: A `Tensor` of type `int32`. An integer vector representing the tensor shape of `filter`, where `filter` is a 4-D `[filter_height, filter_width, in_channels, out_channels]` tensor. out_backprop: A `Tensor`. Must have the same type as `input`. 4-D with shape `[batch, out_height, out_width, out_channels]`. Gradients w.r.t. the output of the convolution. strides: A list of `ints`. The stride of the sliding window for each dimension of the input of the convolution. Must be in the same order as the dimension specified with format. padding: Either the `string` `"SAME"` or `"VALID"` indicating the type of padding algorithm to use, or a list indicating the explicit paddings at the start and end of each dimension. When explicit padding is used and data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top, pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0], [pad_top, pad_bottom], [pad_left, pad_right]]`. use_cudnn_on_gpu: An optional `bool`. Defaults to `True`. data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`. Specify the data format of the input and output data. With the default format "NHWC", the data is stored in the order of: [batch, in_height, in_width, in_channels]. Alternatively, the format could be "NCHW", the data storage order of: [batch, in_channels, in_height, in_width]. dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`. 1-D tensor of length 4. The dilation factor for each dimension of `input`. If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. The dimension order is determined by the value of `data_format`, see above for details. Dilations in the batch and depth dimensions must be 1. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ padding, explicit_paddings = convert_padding(padding) return gen_nn_ops.conv2d_backprop_filter( input, filter_sizes, out_backprop, strides, padding, use_cudnn_on_gpu, explicit_paddings, data_format, dilations, name) @tf_export(v1=["nn.conv2d_backprop_input"]) @dispatch.add_dispatch_support def conv2d_backprop_input( # pylint: disable=redefined-builtin,dangerous-default-value input_sizes, filter=None, out_backprop=None, strides=None, padding=None, use_cudnn_on_gpu=True, data_format="NHWC", dilations=[1, 1, 1, 1], name=None, filters=None): r"""Computes the gradients of convolution with respect to the input. Args: input_sizes: A `Tensor` of type `int32`. An integer vector representing the shape of `input`, where `input` is a 4-D `[batch, height, width, channels]` tensor. filter: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`. 4-D with shape `[filter_height, filter_width, in_channels, out_channels]`. out_backprop: A `Tensor`. Must have the same type as `filter`. 4-D with shape `[batch, out_height, out_width, out_channels]`. Gradients w.r.t. the output of the convolution. strides: A list of `ints`. The stride of the sliding window for each dimension of the input of the convolution. Must be in the same order as the dimension specified with format. padding: Either the `string` `"SAME"` or `"VALID"` indicating the type of padding algorithm to use, or a list indicating the explicit paddings at the start and end of each dimension. When explicit padding is used and data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top, pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0], [pad_top, pad_bottom], [pad_left, pad_right]]`. use_cudnn_on_gpu: An optional `bool`. Defaults to `True`. data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`. Specify the data format of the input and output data. With the default format "NHWC", the data is stored in the order of: [batch, in_height, in_width, in_channels]. Alternatively, the format could be "NCHW", the data storage order of: [batch, in_channels, in_height, in_width]. dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`. 1-D tensor of length 4. The dilation factor for each dimension of `input`. If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. The dimension order is determined by the value of `data_format`, see above for details. Dilations in the batch and depth dimensions must be 1. name: A name for the operation (optional). filters: Alias for filter. Returns: A `Tensor`. Has the same type as `filter`. """ filter = deprecation.deprecated_argument_lookup( "filters", filters, "filter", filter) padding, explicit_paddings = convert_padding(padding) return gen_nn_ops.conv2d_backprop_input( input_sizes, filter, out_backprop, strides, padding, use_cudnn_on_gpu, explicit_paddings, data_format, dilations, name) @tf_export(v1=["nn.conv2d_transpose"]) @dispatch.add_dispatch_support def conv2d_transpose( value=None, filter=None, # pylint: disable=redefined-builtin output_shape=None, strides=None, padding="SAME", data_format="NHWC", name=None, input=None, # pylint: disable=redefined-builtin filters=None, dilations=None): """The transpose of `conv2d`. This operation is sometimes called "deconvolution" after (Zeiler et al., 2010), but is really the transpose (gradient) of `conv2d` rather than an actual deconvolution. Args: value: A 4-D `Tensor` of type `float` and shape `[batch, height, width, in_channels]` for `NHWC` data format or `[batch, in_channels, height, width]` for `NCHW` data format. filter: A 4-D `Tensor` with the same type as `value` and shape `[height, width, output_channels, in_channels]`. `filter`'s `in_channels` dimension must match that of `value`. output_shape: A 1-D `Tensor` representing the output shape of the deconvolution op. strides: An int or list of `ints` that has length `1`, `2` or `4`. The stride of the sliding window for each dimension of `input`. If a single value is given it is replicated in the `H` and `W` dimension. By default the `N` and `C` dimensions are set to 0. The dimension order is determined by the value of `data_format`, see below for details. padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See the "returns" section of `tf.nn.convolution` for details. data_format: A string. 'NHWC' and 'NCHW' are supported. name: Optional name for the returned tensor. input: Alias for value. filters: Alias for filter. dilations: An int or list of `ints` that has length `1`, `2` or `4`, defaults to 1. The dilation factor for each dimension of`input`. If a single value is given it is replicated in the `H` and `W` dimension. By default the `N` and `C` dimensions are set to 1. If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. The dimension order is determined by the value of `data_format`, see above for details. Dilations in the batch and depth dimensions if a 4-d tensor must be 1. Returns: A `Tensor` with the same type as `value`. Raises: ValueError: If input/output depth does not match `filter`'s shape, or if padding is other than `'VALID'` or `'SAME'`. References: Deconvolutional Networks: [Zeiler et al., 2010] (https://ieeexplore.ieee.org/abstract/document/5539957) ([pdf] (http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.232.4023&rep=rep1&type=pdf)) """ value = deprecated_argument_lookup("input", input, "value", value) filter = deprecated_argument_lookup("filters", filters, "filter", filter) with ops.name_scope(name, "conv2d_transpose", [value, filter, output_shape]) as name: return conv2d_transpose_v2( value, filter, output_shape, strides, padding=padding, data_format=data_format, dilations=dilations, name=name) @tf_export("nn.conv2d_transpose", v1=[]) @dispatch.add_dispatch_support def conv2d_transpose_v2( input, # pylint: disable=redefined-builtin filters, # pylint: disable=redefined-builtin output_shape, strides, padding="SAME", data_format="NHWC", dilations=None, name=None): """The transpose of `conv2d`. This operation is sometimes called "deconvolution" after (Zeiler et al., 2010), but is really the transpose (gradient) of `atrous_conv2d` rather than an actual deconvolution. Args: input: A 4-D `Tensor` of type `float` and shape `[batch, height, width, in_channels]` for `NHWC` data format or `[batch, in_channels, height, width]` for `NCHW` data format. filters: A 4-D `Tensor` with the same type as `input` and shape `[height, width, output_channels, in_channels]`. `filter`'s `in_channels` dimension must match that of `input`. output_shape: A 1-D `Tensor` representing the output shape of the deconvolution op. strides: An int or list of `ints` that has length `1`, `2` or `4`. The stride of the sliding window for each dimension of `input`. If a single value is given it is replicated in the `H` and `W` dimension. By default the `N` and `C` dimensions are set to 0. The dimension order is determined by the value of `data_format`, see below for details. padding: Either the `string` `"SAME"` or `"VALID"` indicating the type of padding algorithm to use, or a list indicating the explicit paddings at the start and end of each dimension. See [here](https://www.tensorflow.org/api_docs/python/tf/nn#notes_on_padding_2) for more information. When explicit padding is used and data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top, pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0], [pad_top, pad_bottom], [pad_left, pad_right]]`. data_format: A string. 'NHWC' and 'NCHW' are supported. dilations: An int or list of `ints` that has length `1`, `2` or `4`, defaults to 1. The dilation factor for each dimension of`input`. If a single value is given it is replicated in the `H` and `W` dimension. By default the `N` and `C` dimensions are set to 1. If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. The dimension order is determined by the value of `data_format`, see above for details. Dilations in the batch and depth dimensions if a 4-d tensor must be 1. name: Optional name for the returned tensor. Returns: A `Tensor` with the same type as `input`. Raises: ValueError: If input/output depth does not match `filter`'s shape, or if padding is other than `'VALID'` or `'SAME'`. References: Deconvolutional Networks: [Zeiler et al., 2010] (https://ieeexplore.ieee.org/abstract/document/5539957) ([pdf] (http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.232.4023&rep=rep1&type=pdf)) """ with ops.name_scope(name, "conv2d_transpose", [input, filter, output_shape]) as name: if data_format is None: data_format = "NHWC" channel_index = 1 if data_format.startswith("NC") else 3 strides = _get_sequence(strides, 2, channel_index, "strides") dilations = _get_sequence(dilations, 2, channel_index, "dilations") padding, explicit_paddings = convert_padding(padding) return gen_nn_ops.conv2d_backprop_input( input_sizes=output_shape, filter=filters, out_backprop=input, strides=strides, padding=padding, explicit_paddings=explicit_paddings, data_format=data_format, dilations=dilations, name=name) def _conv2d_expanded_batch( input, # pylint: disable=redefined-builtin filters, strides, padding, data_format, dilations, name): """Helper function for `convolution_internal`; handles expanded batches.""" # Try really hard to avoid modifying the legacy name scopes - return early. input_rank = input.shape.rank if input_rank is None or input_rank < 5: # We avoid calling squeeze_batch_dims to reduce extra python function # call slowdown in eager mode. This branch doesn't require reshapes. return gen_nn_ops.conv2d( input, filter=filters, strides=strides, padding=padding, data_format=data_format, dilations=dilations, name=name) return squeeze_batch_dims( input, functools.partial( gen_nn_ops.conv2d, filter=filters, strides=strides, padding=padding, data_format=data_format, dilations=dilations), inner_rank=3, name=name) @tf_export("nn.atrous_conv2d_transpose") @dispatch.add_dispatch_support def atrous_conv2d_transpose(value, filters, output_shape, rate, padding, name=None): """The transpose of `atrous_conv2d`. This operation is sometimes called "deconvolution" after (Zeiler et al., 2010), but is really the transpose (gradient) of `atrous_conv2d` rather than an actual deconvolution. Args: value: A 4-D `Tensor` of type `float`. It needs to be in the default `NHWC` format. Its shape is `[batch, in_height, in_width, in_channels]`. filters: A 4-D `Tensor` with the same type as `value` and shape `[filter_height, filter_width, out_channels, in_channels]`. `filters`' `in_channels` dimension must match that of `value`. Atrous convolution is equivalent to standard convolution with upsampled filters with effective height `filter_height + (filter_height - 1) * (rate - 1)` and effective width `filter_width + (filter_width - 1) * (rate - 1)`, produced by inserting `rate - 1` zeros along consecutive elements across the `filters`' spatial dimensions. output_shape: A 1-D `Tensor` of shape representing the output shape of the deconvolution op, of form `[batch, out_height, out_width, out_channels]`. rate: A positive int32. The stride with which we sample input values across the `height` and `width` dimensions. Equivalently, the rate by which we upsample the filter values by inserting zeros across the `height` and `width` dimensions. In the literature, the same parameter is sometimes called `input stride` or `dilation`. padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See [here](https://www.tensorflow.org/api_docs/python/tf/nn#notes_on_padding_2) for more information. name: Optional name for the returned tensor. Returns: A `Tensor` with the same type as `value`. Raises: ValueError: If input/output depth does not match `filters`' shape, or if padding is other than `'VALID'` or `'SAME'`, or if the `rate` is less than one, or if the output_shape is not a tensor with 4 elements. References: Deconvolutional Networks: [Zeiler et al., 2010] (https://ieeexplore.ieee.org/abstract/document/5539957) ([pdf] (http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.232.4023&rep=rep1&type=pdf)) """ with ops.name_scope(name, "atrous_conv2d_transpose", [value, filters, output_shape]) as name: value = ops.convert_to_tensor(value, name="value") filters = ops.convert_to_tensor(filters, name="filters") if not value.get_shape().dims[3].is_compatible_with(filters.get_shape()[3]): raise ValueError( "`value` channel count must be compatible with `filters` input " f"channel count. Received: value.shape={value.get_shape()} with " f"channel count {value.get_shape()[3]} and " f"filters.shape={filters.get_shape()} with input channel count " f"{filters.get_shape()[3]}.") if rate < 1: raise ValueError(f"`rate` cannot be less than one. Received: rate={rate}") if rate == 1: return conv2d_transpose( value, filters, output_shape, strides=[1, 1, 1, 1], padding=padding, data_format="NHWC") output_shape_ = ops.convert_to_tensor(output_shape, name="output_shape") if not output_shape_.get_shape().is_compatible_with( tensor_shape.TensorShape([4])): raise ValueError("`output_shape` must have shape (4,). " f"Received: output_shape={output_shape_.get_shape()}") if isinstance(output_shape, tuple): output_shape = list(output_shape) if isinstance(output_shape, (list, np.ndarray)): # output_shape's shape should be == [4] if reached this point. if not filters.get_shape().dims[2].is_compatible_with(output_shape[3]): raise ValueError( "`output_shape` channel count must be compatible with `filters` " f"output channel count. Received: output_shape={output_shape} with " f"channel count {output_shape[3]} and " f"filters.shape={filters.get_shape()} with output channel count " f"{filters.get_shape()[3]}.") # We have two padding contributions. The first is used for converting "SAME" # to "VALID". The second is required so that the height and width of the # zero-padded value tensor are multiples of rate. # Padding required to reduce to "VALID" convolution if padding == "SAME": # Handle filters whose shape is unknown during graph creation. if filters.get_shape().is_fully_defined(): filter_shape = filters.get_shape().as_list() else: filter_shape = array_ops.shape(filters) filter_height, filter_width = filter_shape[0], filter_shape[1] # Spatial dimensions of the filters and the upsampled filters in which we # introduce (rate - 1) zeros between consecutive filter values. filter_height_up = filter_height + (filter_height - 1) * (rate - 1) filter_width_up = filter_width + (filter_width - 1) * (rate - 1) pad_height = filter_height_up - 1 pad_width = filter_width_up - 1 # When pad_height (pad_width) is odd, we pad more to bottom (right), # following the same convention as conv2d(). pad_top = pad_height // 2 pad_bottom = pad_height - pad_top pad_left = pad_width // 2 pad_right = pad_width - pad_left elif padding == "VALID": pad_top = 0 pad_bottom = 0 pad_left = 0 pad_right = 0 else: raise ValueError("`padding` must be either 'VALID' or 'SAME'. " f"Received: padding={padding}") in_height = output_shape[1] + pad_top + pad_bottom in_width = output_shape[2] + pad_left + pad_right # More padding so that rate divides the height and width of the input. pad_bottom_extra = (rate - in_height % rate) % rate pad_right_extra = (rate - in_width % rate) % rate # The paddings argument to space_to_batch is just the extra padding # component. space_to_batch_pad = [[0, pad_bottom_extra], [0, pad_right_extra]] value = array_ops.space_to_batch( input=value, paddings=space_to_batch_pad, block_size=rate) input_sizes = [ rate * rate * output_shape[0], (in_height + pad_bottom_extra) // rate, (in_width + pad_right_extra) // rate, output_shape[3] ] value = gen_nn_ops.conv2d_backprop_input( input_sizes=input_sizes, filter=filters, out_backprop=value, strides=[1, 1, 1, 1], padding="VALID", data_format="NHWC") # The crops argument to batch_to_space includes both padding components. batch_to_space_crop = [[pad_top, pad_bottom + pad_bottom_extra], [pad_left, pad_right + pad_right_extra]] return array_ops.batch_to_space( input=value, crops=batch_to_space_crop, block_size=rate) @tf_export(v1=["nn.depthwise_conv2d_native"]) @dispatch.add_dispatch_support @deprecation.deprecated_endpoints("nn.depthwise_conv2d_native") def depthwise_conv2d_native( # pylint: disable=redefined-builtin,dangerous-default-value input, filter, strides, padding, data_format="NHWC", dilations=[1, 1, 1, 1], name=None): r"""Computes a 2-D depthwise convolution. Given an input tensor of shape `[batch, in_height, in_width, in_channels]` and a filter / kernel tensor of shape `[filter_height, filter_width, in_channels, channel_multiplier]`, containing `in_channels` convolutional filters of depth 1, `depthwise_conv2d` applies a different filter to each input channel (expanding from 1 channel to `channel_multiplier` channels for each), then concatenates the results together. Thus, the output has `in_channels * channel_multiplier` channels. ``` for k in 0..in_channels-1 for q in 0..channel_multiplier-1 output[b, i, j, k * channel_multiplier + q] = sum_{di, dj} input[b, strides[1] * i + di, strides[2] * j + dj, k] * filter[di, dj, k, q] ``` Must have `strides[0] = strides[3] = 1`. For the most common case of the same horizontal and vertices strides, `strides = [1, stride, stride, 1]`. Args: input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`. filter: A `Tensor`. Must have the same type as `input`. strides: A list of `ints`. 1-D of length 4. The stride of the sliding window for each dimension of `input`. padding: Controls how to pad the image before applying the convolution. Can be the string `"SAME"` or `"VALID"` indicating the type of padding algorithm to use, or a list indicating the explicit paddings at the start and end of each dimension. When explicit padding is used and data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top, pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0], [pad_top, pad_bottom], [pad_left, pad_right]]`. data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`. Specify the data format of the input and output data. With the default format "NHWC", the data is stored in the order of: [batch, height, width, channels]. Alternatively, the format could be "NCHW", the data storage order of: [batch, channels, height, width]. dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`. 1-D tensor of length 4. The dilation factor for each dimension of `input`. If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. The dimension order is determined by the value of `data_format`, see above for details. Dilations in the batch and depth dimensions must be 1. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ padding, explicit_paddings = convert_padding(padding) return gen_nn_ops.depthwise_conv2d_native( input, filter, strides, padding, explicit_paddings=explicit_paddings, data_format=data_format, dilations=dilations, name=name) @tf_export( "nn.depthwise_conv2d_backprop_input", v1=[ "nn.depthwise_conv2d_native_backprop_input", "nn.depthwise_conv2d_backprop_input" ]) @dispatch.add_dispatch_support @deprecation.deprecated_endpoints("nn.depthwise_conv2d_native_backprop_input") def depthwise_conv2d_native_backprop_input( # pylint: disable=redefined-builtin,dangerous-default-value input_sizes, filter, out_backprop, strides, padding, data_format="NHWC", dilations=[1, 1, 1, 1], name=None): r"""Computes the gradients of depthwise convolution with respect to the input. Args: input_sizes: A `Tensor` of type `int32`. An integer vector representing the shape of `input`, based on `data_format`. For example, if `data_format` is 'NHWC' then `input` is a 4-D `[batch, height, width, channels]` tensor. filter: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`. 4-D with shape `[filter_height, filter_width, in_channels, depthwise_multiplier]`. out_backprop: A `Tensor`. Must have the same type as `filter`. 4-D with shape based on `data_format`. For example, if `data_format` is 'NHWC' then out_backprop shape is `[batch, out_height, out_width, out_channels]`. Gradients w.r.t. the output of the convolution. strides: A list of `ints`. The stride of the sliding window for each dimension of the input of the convolution. padding: Controls how to pad the image before applying the convolution. Can be the string `"SAME"` or `"VALID"` indicating the type of padding algorithm to use, or a list indicating the explicit paddings at the start and end of each dimension. See [here](https://www.tensorflow.org/api_docs/python/tf/nn#notes_on_padding_2) for more information. When explicit padding is used and data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top, pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0], [pad_top, pad_bottom], [pad_left, pad_right]]`. data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`. Specify the data format of the input and output data. With the default format "NHWC", the data is stored in the order of: [batch, height, width, channels]. Alternatively, the format could be "NCHW", the data storage order of: [batch, channels, height, width]. dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`. 1-D tensor of length 4. The dilation factor for each dimension of `input`. If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. The dimension order is determined by the value of `data_format`, see above for details. Dilations in the batch and depth dimensions must be 1. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `filter`. """ padding, explicit_paddings = convert_padding(padding) return gen_nn_ops.depthwise_conv2d_native_backprop_input( input_sizes, filter, out_backprop, strides, padding, explicit_paddings=explicit_paddings, data_format=data_format, dilations=dilations, name=name) @tf_export( "nn.depthwise_conv2d_backprop_filter", v1=[ "nn.depthwise_conv2d_native_backprop_filter", "nn.depthwise_conv2d_backprop_filter" ]) @dispatch.add_dispatch_support @deprecation.deprecated_endpoints("nn.depthwise_conv2d_native_backprop_filter") def depthwise_conv2d_native_backprop_filter( # pylint: disable=redefined-builtin,dangerous-default-value input, filter_sizes, out_backprop, strides, padding, data_format="NHWC", dilations=[1, 1, 1, 1], name=None): r"""Computes the gradients of depthwise convolution with respect to the filter. Args: input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`. 4-D with shape based on `data_format`. For example, if `data_format` is 'NHWC' then `input` is a 4-D `[batch, in_height, in_width, in_channels]` tensor. filter_sizes: A `Tensor` of type `int32`. An integer vector representing the tensor shape of `filter`, where `filter` is a 4-D `[filter_height, filter_width, in_channels, depthwise_multiplier]` tensor. out_backprop: A `Tensor`. Must have the same type as `input`. 4-D with shape based on `data_format`. For example, if `data_format` is 'NHWC' then out_backprop shape is `[batch, out_height, out_width, out_channels]`. Gradients w.r.t. the output of the convolution. strides: A list of `ints`. The stride of the sliding window for each dimension of the input of the convolution. padding: Controls how to pad the image before applying the convolution. Can be the string `"SAME"` or `"VALID"` indicating the type of padding algorithm to use, or a list indicating the explicit paddings at the start and end of each dimension. See [here](https://www.tensorflow.org/api_docs/python/tf/nn#notes_on_padding_2) for more information. When explicit padding is used and data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top, pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0], [pad_top, pad_bottom], [pad_left, pad_right]]`. data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`. Specify the data format of the input and output data. With the default format "NHWC", the data is stored in the order of: [batch, height, width, channels]. Alternatively, the format could be "NCHW", the data storage order of: [batch, channels, height, width]. dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`. 1-D tensor of length 4. The dilation factor for each dimension of `input`. If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. The dimension order is determined by the value of `data_format`, see above for details. Dilations in the batch and depth dimensions must be 1. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ padding, explicit_paddings = convert_padding(padding) return gen_nn_ops.depthwise_conv2d_native_backprop_filter( input, filter_sizes, out_backprop, strides, padding, explicit_paddings=explicit_paddings, data_format=data_format, dilations=dilations, name=name) def _conv3d_expanded_batch( input, # pylint: disable=redefined-builtin filter, # pylint: disable=redefined-builtin strides, padding, data_format, dilations=None, name=None): """Helper function for `conv3d`; handles expanded batches.""" shape = input.shape # shape object may lack ndims, e.g., if input is an np.ndarray. In that case, # we fall back to len(shape). ndims = getattr(shape, "ndims", -1) if ndims == -1: ndims = len(shape) if ndims in (5, 4, 3, 2, 1, 0, None): # We avoid calling squeeze_batch_dims to reduce extra python function # call slowdown in eager mode. This branch doesn't require reshapes. return gen_nn_ops.conv3d( input, filter, strides, padding, data_format=data_format, dilations=dilations, name=name) else: return squeeze_batch_dims( input, functools.partial( gen_nn_ops.conv3d, filter=filter, strides=strides, padding=padding, data_format=data_format, dilations=dilations), inner_rank=4, name=name) @tf_export("nn.conv3d", v1=[]) @dispatch.add_dispatch_support def conv3d_v2(input, # pylint: disable=redefined-builtin,missing-docstring filters, strides, padding, data_format="NDHWC", dilations=None, name=None): if dilations is None: dilations = [1, 1, 1, 1, 1] return _conv3d_expanded_batch(input, filters, strides, padding, data_format, dilations, name) @tf_export(v1=["nn.conv3d"]) @dispatch.add_dispatch_support def conv3d_v1( # pylint: disable=missing-docstring,dangerous-default-value input, # pylint: disable=redefined-builtin filter=None, # pylint: disable=redefined-builtin strides=None, padding=None, data_format="NDHWC", dilations=[1, 1, 1, 1, 1], name=None, filters=None): filter = deprecated_argument_lookup("filters", filters, "filter", filter) return gen_nn_ops.conv3d( input, filter, strides, padding, data_format, dilations, name) conv3d_v2.__doc__ = deprecation.rewrite_argument_docstring( gen_nn_ops.conv3d.__doc__, "filter", "filters") conv3d_v1.__doc__ = gen_nn_ops.conv3d.__doc__ @tf_export(v1=["nn.conv3d_transpose"]) @dispatch.add_dispatch_support def conv3d_transpose( value, filter=None, # pylint: disable=redefined-builtin output_shape=None, strides=None, padding="SAME", data_format="NDHWC", name=None, input=None, # pylint: disable=redefined-builtin filters=None, dilations=None): """The transpose of `conv3d`. This operation is sometimes called "deconvolution" after (Zeiler et al., 2010), but is really the transpose (gradient) of `conv3d` rather than an actual deconvolution. Args: value: A 5-D `Tensor` of type `float` and shape `[batch, depth, height, width, in_channels]`. filter: A 5-D `Tensor` with the same type as `value` and shape `[depth, height, width, output_channels, in_channels]`. `filter`'s `in_channels` dimension must match that of `value`. output_shape: A 1-D `Tensor` representing the output shape of the deconvolution op. strides: A list of ints. The stride of the sliding window for each dimension of the input tensor. padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See the "returns" section of `tf.nn.convolution` for details. data_format: A string, either `'NDHWC'` or `'NCDHW`' specifying the layout of the input and output tensors. Defaults to `'NDHWC'`. name: Optional name for the returned tensor. input: Alias of value. filters: Alias of filter. dilations: An int or list of `ints` that has length `1`, `3` or `5`, defaults to 1. The dilation factor for each dimension of`input`. If a single value is given it is replicated in the `D`, `H` and `W` dimension. By default the `N` and `C` dimensions are set to 1. If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. The dimension order is determined by the value of `data_format`, see above for details. Dilations in the batch and depth dimensions if a 5-d tensor must be 1. Returns: A `Tensor` with the same type as `value`. Raises: ValueError: If input/output depth does not match `filter`'s shape, or if padding is other than `'VALID'` or `'SAME'`. References: Deconvolutional Networks: [Zeiler et al., 2010] (https://ieeexplore.ieee.org/abstract/document/5539957) ([pdf] (http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.232.4023&rep=rep1&type=pdf)) """ filter = deprecated_argument_lookup("filters", filters, "filter", filter) value = deprecated_argument_lookup("input", input, "value", value) return conv3d_transpose_v2( value, filter, output_shape, strides, padding=padding, data_format=data_format, dilations=dilations, name=name) @tf_export("nn.conv3d_transpose", v1=[]) @dispatch.add_dispatch_support def conv3d_transpose_v2(input, # pylint: disable=redefined-builtin filters, output_shape, strides, padding="SAME", data_format="NDHWC", dilations=None, name=None): """The transpose of `conv3d`. This operation is sometimes called "deconvolution" after (Zeiler et al., 2010), but is really the transpose (gradient) of `conv3d` rather than an actual deconvolution. Args: input: A 5-D `Tensor` of type `float` and shape `[batch, depth, height, width, in_channels]` for `NDHWC` data format or `[batch, in_channels, depth, height, width]` for `NCDHW` data format. filters: A 5-D `Tensor` with the same type as `input` and shape `[depth, height, width, output_channels, in_channels]`. `filter`'s `in_channels` dimension must match that of `input`. output_shape: A 1-D `Tensor` representing the output shape of the deconvolution op. strides: An int or list of `ints` that has length `1`, `3` or `5`. The stride of the sliding window for each dimension of `input`. If a single value is given it is replicated in the `D`, `H` and `W` dimension. By default the `N` and `C` dimensions are set to 0. The dimension order is determined by the value of `data_format`, see below for details. padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See [here](https://www.tensorflow.org/api_docs/python/tf/nn#notes_on_padding_2) for more information. data_format: A string. 'NDHWC' and 'NCDHW' are supported. dilations: An int or list of `ints` that has length `1`, `3` or `5`, defaults to 1. The dilation factor for each dimension of`input`. If a single value is given it is replicated in the `D`, `H` and `W` dimension. By default the `N` and `C` dimensions are set to 1. If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. The dimension order is determined by the value of `data_format`, see above for details. Dilations in the batch and depth dimensions if a 5-d tensor must be 1. name: Optional name for the returned tensor. Returns: A `Tensor` with the same type as `input`. References: Deconvolutional Networks: [Zeiler et al., 2010] (https://ieeexplore.ieee.org/abstract/document/5539957) ([pdf] (http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.232.4023&rep=rep1&type=pdf)) """ with ops.name_scope(name, "conv3d_transpose", [input, filter, output_shape]) as name: if data_format is None: data_format = "NDHWC" channel_index = 1 if data_format.startswith("NC") else 4 strides = _get_sequence(strides, 3, channel_index, "strides") dilations = _get_sequence(dilations, 3, channel_index, "dilations") return gen_nn_ops.conv3d_backprop_input_v2( input_sizes=output_shape, filter=filters, out_backprop=input, strides=strides, padding=padding, data_format=data_format, dilations=dilations, name=name) CONV_TRANSPOSE_OPS = ( conv1d_transpose, conv2d_transpose_v2, conv3d_transpose_v2, ) @tf_export("nn.conv_transpose") @dispatch.add_dispatch_support def conv_transpose(input, # pylint: disable=redefined-builtin filters, output_shape, strides, padding="SAME", data_format=None, dilations=None, name=None): """The transpose of `convolution`. This operation is sometimes called "deconvolution" after (Zeiler et al., 2010), but is really the transpose (gradient) of `conv3d` rather than an actual deconvolution. Args: input: An N+2 dimensional `Tensor` of shape `[batch_size] + input_spatial_shape + [in_channels]` if data_format does not start with "NC" (default), or `[batch_size, in_channels] + input_spatial_shape` if data_format starts with "NC". It must be one of the following types: `half`, `bfloat16`, `float32`, `float64`. filters: An N+2 dimensional `Tensor` with the same type as `input` and shape `spatial_filter_shape + [in_channels, out_channels]`. output_shape: A 1-D `Tensor` representing the output shape of the deconvolution op. strides: An int or list of `ints` that has length `1`, `N` or `N+2`. The stride of the sliding window for each dimension of `input`. If a single value is given it is replicated in the spatial dimensions. By default the `N` and `C` dimensions are set to 0. The dimension order is determined by the value of `data_format`, see below for details. padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See [here](https://www.tensorflow.org/api_docs/python/tf/nn#notes_on_padding_2) for more information. data_format: A string or None. Specifies whether the channel dimension of the `input` and output is the last dimension (default, or if `data_format` does not start with "NC"), or the second dimension (if `data_format` starts with "NC"). For N=1, the valid values are "NWC" (default) and "NCW". For N=2, the valid values are "NHWC" (default) and "NCHW". For N=3, the valid values are "NDHWC" (default) and "NCDHW". dilations: An int or list of `ints` that has length `1`, `N` or `N+2`, defaults to 1. The dilation factor for each dimension of`input`. If a single value is given it is replicated in the spatial dimensions. By default the `N` and `C` dimensions are set to 1. If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. The dimension order is determined by the value of `data_format`, see above for details. name: A name for the operation (optional). If not specified "conv_transpose" is used. Returns: A `Tensor` with the same type as `value`. References: Deconvolutional Networks: [Zeiler et al., 2010] (https://ieeexplore.ieee.org/abstract/document/5539957) ([pdf] (http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.232.4023&rep=rep1&type=pdf)) """ with ops.name_scope(name, "conv_transpose", [input, filter, output_shape]) as name: if tensor_util.is_tf_type(output_shape): n = output_shape.shape[0] - 2 elif isinstance(output_shape, collections_abc.Sized): n = len(output_shape) - 2 else: raise ValueError("`output_shape` must be a tensor or sized collection. " f"Received: output_shape={output_shape}") if not 1 <= n <= 3: raise ValueError( f"`output_shape` must be of length 3, 4 or 5. " f"Received: output_shape={output_shape} of length {n + 2}.") op = CONV_TRANSPOSE_OPS[n-1] return op( input, filters, output_shape, strides, padding=padding, data_format=data_format, dilations=dilations, name=name) @tf_export("nn.bias_add") @dispatch.add_dispatch_support def bias_add(value, bias, data_format=None, name=None): """Adds `bias` to `value`. This is (mostly) a special case of `tf.add` where `bias` is restricted to 1-D. Broadcasting is supported, so `value` may have any number of dimensions. Unlike `tf.add`, the type of `bias` is allowed to differ from `value` in the case where both types are quantized. Args: value: A `Tensor` with type `float`, `double`, `int64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, or `complex128`. bias: A 1-D `Tensor` with size matching the channel dimension of `value`. Must be the same type as `value` unless `value` is a quantized type, in which case a different quantized type may be used. data_format: A string. 'N...C' and 'NC...' are supported. If `None` (the default) is specified then 'N..C' is assumed. name: A name for the operation (optional). Returns: A `Tensor` with the same type as `value`. Raises: ValueError if data format is unrecognized, if `value` has less than two dimensions when `data_format` is 'N..C'/`None` or `value` has less then three dimensions when `data_format` is `NC..`, if `bias` does not have exactly one dimension (is a vector), or if the size of `bias` does not match the size of the channel dimension of `value`. """ with ops.name_scope(name, "BiasAdd", [value, bias]) as name: if data_format is not None: if data_format.startswith("NC"): data_format = "NCHW" elif data_format.startswith("N") and data_format.endswith("C"): data_format = "NHWC" else: raise ValueError("`data_format` must be of the form `N...C` or " f"`NC...`. Received: data_format={data_format}") if not context.executing_eagerly(): value = ops.convert_to_tensor(value, name="input") bias = ops.convert_to_tensor(bias, dtype=value.dtype, name="bias") return gen_nn_ops.bias_add(value, bias, data_format=data_format, name=name) def bias_add_v1(value, bias, name=None): """Adds `bias` to `value`. This is a deprecated version of bias_add and will soon to be removed. This is (mostly) a special case of `tf.add` where `bias` is restricted to 1-D. Broadcasting is supported, so `value` may have any number of dimensions. Unlike `tf.add`, the type of `bias` is allowed to differ from `value` in the case where both types are quantized. Args: value: A `Tensor` with type `float`, `double`, `int64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, or `complex128`. bias: A 1-D `Tensor` with size matching the last dimension of `value`. Must be the same type as `value` unless `value` is a quantized type, in which case a different quantized type may be used. name: A name for the operation (optional). Returns: A `Tensor` with the same type as `value`. """ with ops.name_scope(name, "BiasAddV1", [value, bias]) as name: value = ops.convert_to_tensor(value, name="input") bias = ops.convert_to_tensor(bias, dtype=value.dtype, name="bias") return gen_nn_ops.bias_add_v1(value, bias, name=name) @tf_export(v1=["nn.crelu"]) @dispatch.add_dispatch_support def crelu(features, name=None, axis=-1): """Computes Concatenated ReLU. Concatenates a ReLU which selects only the positive part of the activation with a ReLU which selects only the *negative* part of the activation. Note that as a result this non-linearity doubles the depth of the activations. Source: [Understanding and Improving Convolutional Neural Networks via Concatenated Rectified Linear Units. W. Shang, et al.](https://arxiv.org/abs/1603.05201) Args: features: A `Tensor` with type `float`, `double`, `int32`, `int64`, `uint8`, `int16`, or `int8`. name: A name for the operation (optional). axis: The axis that the output values are concatenated along. Default is -1. Returns: A `Tensor` with the same type as `features`. References: Understanding and Improving Convolutional Neural Networks via Concatenated Rectified Linear Units: [Shang et al., 2016](http://proceedings.mlr.press/v48/shang16) ([pdf](http://proceedings.mlr.press/v48/shang16.pdf)) """ with ops.name_scope(name, "CRelu", [features]) as name: features = ops.convert_to_tensor(features, name="features") c = array_ops.concat([features, -features], axis, name=name) # pylint: disable=invalid-unary-operand-type return gen_nn_ops.relu(c) @tf_export("nn.crelu", v1=[]) @dispatch.add_dispatch_support def crelu_v2(features, axis=-1, name=None): return crelu(features, name=name, axis=axis) crelu_v2.__doc__ = crelu.__doc__ @tf_export("nn.relu6") @dispatch.register_unary_elementwise_api @dispatch.add_dispatch_support def relu6(features, name=None): """Computes Rectified Linear 6: `min(max(features, 0), 6)`. In comparison with `tf.nn.relu`, relu6 activation functions have shown to empirically perform better under low-precision conditions (e.g. fixed point inference) by encouraging the model to learn sparse features earlier. Source: [Convolutional Deep Belief Networks on CIFAR-10: Krizhevsky et al., 2010](http://www.cs.utoronto.ca/~kriz/conv-cifar10-aug2010.pdf). For example: >>> x = tf.constant([-3.0, -1.0, 0.0, 6.0, 10.0], dtype=tf.float32) >>> y = tf.nn.relu6(x) >>> y.numpy() array([0., 0., 0., 6., 6.], dtype=float32) Args: features: A `Tensor` with type `float`, `double`, `int32`, `int64`, `uint8`, `int16`, or `int8`. name: A name for the operation (optional). Returns: A `Tensor` with the same type as `features`. References: Convolutional Deep Belief Networks on CIFAR-10: Krizhevsky et al., 2010 ([pdf](http://www.cs.utoronto.ca/~kriz/conv-cifar10-aug2010.pdf)) """ with ops.name_scope(name, "Relu6", [features]) as name: features = ops.convert_to_tensor(features, name="features") return gen_nn_ops.relu6(features, name=name) @tf_export("nn.leaky_relu") @dispatch.register_unary_elementwise_api @dispatch.add_dispatch_support def leaky_relu(features, alpha=0.2, name=None): """Compute the Leaky ReLU activation function. Source: [Rectifier Nonlinearities Improve Neural Network Acoustic Models. AL Maas, AY Hannun, AY Ng - Proc. ICML, 2013] (https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf). Args: features: A `Tensor` representing preactivation values. Must be one of the following types: `float16`, `float32`, `float64`, `int32`, `int64`. alpha: Slope of the activation function at x < 0. name: A name for the operation (optional). Returns: The activation value. References: Rectifier Nonlinearities Improve Neural Network Acoustic Models: [Maas et al., 2013] (http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.693.1422) ([pdf] (http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.693.1422&rep=rep1&type=pdf)) """ with ops.name_scope(name, "LeakyRelu", [features, alpha]) as name: features = ops.convert_to_tensor(features, name="features") if features.dtype.is_integer: features = math_ops.cast(features, dtypes.float32) if isinstance(alpha, np.ndarray): alpha = alpha.item() return gen_nn_ops.leaky_relu(features, alpha=alpha, name=name) @tf_export("nn.gelu", v1=[]) @dispatch.register_unary_elementwise_api @dispatch.add_dispatch_support def gelu(features, approximate=False, name=None): """Compute the Gaussian Error Linear Unit (GELU) activation function. Gaussian error linear unit (GELU) computes `x * P(X <= x)`, where `P(X) ~ N(0, 1)`. The (GELU) nonlinearity weights inputs by their value, rather than gates inputs by their sign as in ReLU. For example: >>> x = tf.constant([-3.0, -1.0, 0.0, 1.0, 3.0], dtype=tf.float32) >>> y = tf.nn.gelu(x) >>> y.numpy() array([-0.00404951, -0.15865529, 0. , 0.8413447 , 2.9959507 ], dtype=float32) >>> y = tf.nn.gelu(x, approximate=True) >>> y.numpy() array([-0.00363752, -0.15880796, 0. , 0.841192 , 2.9963627 ], dtype=float32) Args: features: A `Tensor` representing preactivation values. approximate: An optional `bool`. Defaults to `False`. Whether to enable approximation. name: A name for the operation (optional). Returns: A `Tensor` with the same type as `features`. References: [Gaussian Error Linear Units (GELUs)](https://arxiv.org/abs/1606.08415). """ with ops.name_scope(name, "Gelu", [features]): features = ops.convert_to_tensor(features, name="features") if approximate: coeff = math_ops.cast(0.044715, features.dtype) return 0.5 * features * ( 1.0 + math_ops.tanh(0.7978845608028654 * (features + coeff * math_ops.pow(features, 3)))) else: return 0.5 * features * (1.0 + math_ops.erf( features / math_ops.cast(1.4142135623730951, features.dtype))) def _flatten_outer_dims(logits): """Flattens logits' outer dimensions and keep its last dimension.""" rank = array_ops.rank(logits) last_dim_size = array_ops.slice( array_ops.shape(logits), [math_ops.subtract(rank, 1)], [1]) output = array_ops.reshape(logits, array_ops.concat([[-1], last_dim_size], 0)) # Set output shape if known. if not context.executing_eagerly(): shape = logits.get_shape() if shape is not None and shape.dims is not None: shape = shape.as_list() product = 1 product_valid = True for d in shape[:-1]: if d is None: product_valid = False break else: product *= d if product_valid: output_shape = [product, shape[-1]] output.set_shape(output_shape) return output def _wrap_2d_function(inputs, compute_op, dim=-1, name=None): """Helper function for ops that accept and return 2d inputs of same shape. It reshapes and transposes the inputs into a 2-D Tensor and then invokes the given function. The output would be transposed and reshaped back. If the given function returns a tuple of tensors, each of them will be transposed and reshaped. Args: inputs: A non-empty `Tensor`. Must be one of the following types: `half`, `float32`, `float64`. compute_op: The function to wrap. Must accept the input tensor as its first arugment, and a second keyword argument `name`. dim: The dimension softmax would be performed on. The default is -1 which indicates the last dimension. name: A name for the operation (optional). Returns: A `Tensor`. Has the same shape as inputs. If compute_op returns multiple tensors, each of them have the same shape as the input. Raises: InvalidArgumentError: if `inputs` is empty or `dim` is beyond the last dimension of `inputs`. """ def _swap_axis(input_tensor, dim_index, last_index, name=None): """Swaps logits's dim_index and last_index.""" return array_ops.transpose( input_tensor, array_ops.concat([ math_ops.range(dim_index), [last_index], math_ops.range(dim_index + 1, last_index), [dim_index] ], 0), name=name) inputs = ops.convert_to_tensor(inputs) # We need its original shape for shape inference. shape = inputs.get_shape() is_last_dim = (dim == -1) or (dim == shape.ndims - 1) if is_last_dim: return compute_op(inputs, name=name) dim_val = dim if isinstance(dim, ops.Tensor): dim_val = tensor_util.constant_value(dim) if dim_val is not None and not -shape.ndims <= dim_val < shape.ndims: raise errors_impl.InvalidArgumentError( None, None, f"`dim` must be in the range [{-shape.ndims}, {shape.ndims}) where " f"{shape.ndims} is the number of dimensions in the input. " f"Received: dim={dim_val}") # If dim is not the last dimension, we have to do a transpose so that we can # still perform the op on its last dimension. # In case dim is negative (and is not last dimension -1), add shape.ndims ndims = array_ops.rank(inputs) if not isinstance(dim, ops.Tensor): if dim < 0: dim += ndims else: dim = array_ops.where(math_ops.less(dim, 0), dim + ndims, dim) # Swap logits' dimension of dim and its last dimension. input_rank = array_ops.rank(inputs) dim_axis = dim % shape.ndims inputs = _swap_axis(inputs, dim_axis, math_ops.subtract(input_rank, 1)) # Do the actual call on its last dimension. def fix_output(output): output = _swap_axis( output, dim_axis, math_ops.subtract(input_rank, 1), name=name) # Make shape inference work since transpose may erase its static shape. output.set_shape(shape) return output outputs = compute_op(inputs) if isinstance(outputs, tuple): return tuple(fix_output(output) for output in outputs) else: return fix_output(outputs) @tf_export("nn.softmax", "math.softmax", v1=[]) @dispatch.add_dispatch_support def softmax_v2(logits, axis=None, name=None): """Computes softmax activations. Used for multi-class predictions. The sum of all outputs generated by softmax is 1. This function performs the equivalent of ```python softmax = tf.exp(logits) / tf.reduce_sum(tf.exp(logits), axis, keepdims=True) ``` Example usage: >>> softmax = tf.nn.softmax([-1, 0., 1.]) >>> softmax <tf.Tensor: shape=(3,), dtype=float32, numpy=array([0.09003057, 0.24472848, 0.66524094], dtype=float32)> >>> sum(softmax) <tf.Tensor: shape=(), dtype=float32, numpy=1.0> Args: logits: A non-empty `Tensor`. Must be one of the following types: `half`, `float32`, `float64`. axis: The dimension softmax would be performed on. The default is -1 which indicates the last dimension. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type and shape as `logits`. Raises: InvalidArgumentError: if `logits` is empty or `axis` is beyond the last dimension of `logits`. """ if axis is None: axis = -1 return _wrap_2d_function(logits, gen_nn_ops.softmax, axis, name) @tf_export(v1=["nn.softmax", "math.softmax"]) @dispatch.add_dispatch_support @deprecation.deprecated_args(None, "dim is deprecated, use axis instead", "dim") def softmax(logits, axis=None, name=None, dim=None): axis = deprecation.deprecated_argument_lookup("axis", axis, "dim", dim) if axis is None: axis = -1 return _wrap_2d_function(logits, gen_nn_ops.softmax, axis, name) softmax.__doc__ = softmax_v2.__doc__ @tf_export(v1=["nn.log_softmax", "math.log_softmax"]) @dispatch.register_unary_elementwise_api @dispatch.add_dispatch_support @deprecation.deprecated_args(None, "dim is deprecated, use axis instead", "dim") def log_softmax(logits, axis=None, name=None, dim=None): """Computes log softmax activations. For each batch `i` and class `j` we have logsoftmax = logits - log(reduce_sum(exp(logits), axis)) Args: logits: A non-empty `Tensor`. Must be one of the following types: `half`, `float32`, `float64`. axis: The dimension softmax would be performed on. The default is -1 which indicates the last dimension. name: A name for the operation (optional). dim: Deprecated alias for `axis`. Returns: A `Tensor`. Has the same type as `logits`. Same shape as `logits`. Raises: InvalidArgumentError: if `logits` is empty or `axis` is beyond the last dimension of `logits`. """ axis = deprecation.deprecated_argument_lookup("axis", axis, "dim", dim) if axis is None: axis = -1 return _wrap_2d_function(logits, gen_nn_ops.log_softmax, axis, name) @tf_export("nn.log_softmax", "math.log_softmax", v1=[]) @dispatch.add_dispatch_support def log_softmax_v2(logits, axis=None, name=None): """Computes log softmax activations. For each batch `i` and class `j` we have logsoftmax = logits - log(reduce_sum(exp(logits), axis)) Args: logits: A non-empty `Tensor`. Must be one of the following types: `half`, `float32`, `float64`. axis: The dimension softmax would be performed on. The default is -1 which indicates the last dimension. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `logits`. Same shape as `logits`. Raises: InvalidArgumentError: if `logits` is empty or `axis` is beyond the last dimension of `logits`. """ if axis is None: axis = -1 return _wrap_2d_function(logits, gen_nn_ops.log_softmax, axis, name) def _ensure_xent_args(name, sentinel, labels, logits): # Make sure that all arguments were passed as named arguments. if sentinel is not None: raise ValueError( f"Only call {name} with named arguments (labels=..., logits=..., ...). " f"Received unnamed argument: {sentinel}") if labels is None or logits is None: raise ValueError("Both `labels` and `logits` must be provided. " f"Received: labels={labels} and logits={logits}") @tf_export("nn.softmax_cross_entropy_with_logits", v1=[]) @dispatch.add_dispatch_support def softmax_cross_entropy_with_logits_v2(labels, logits, axis=-1, name=None): """Computes softmax cross entropy between `logits` and `labels`. Measures the probability error in discrete classification tasks in which the classes are mutually exclusive (each entry is in exactly one class). For example, each CIFAR-10 image is labeled with one and only one label: an image can be a dog or a truck, but not both. **NOTE:** While the classes are mutually exclusive, their probabilities need not be. All that is required is that each row of `labels` is a valid probability distribution. If they are not, the computation of the gradient will be incorrect. If using exclusive `labels` (wherein one and only one class is true at a time), see `sparse_softmax_cross_entropy_with_logits`. Usage: >>> logits = [[4.0, 2.0, 1.0], [0.0, 5.0, 1.0]] >>> labels = [[1.0, 0.0, 0.0], [0.0, 0.8, 0.2]] >>> tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits) <tf.Tensor: shape=(2,), dtype=float32, numpy=array([0.16984604, 0.82474494], dtype=float32)> **WARNING:** This op expects unscaled logits, since it performs a `softmax` on `logits` internally for efficiency. Do not call this op with the output of `softmax`, as it will produce incorrect results. A common use case is to have logits and labels of shape `[batch_size, num_classes]`, but higher dimensions are supported, with the `axis` argument specifying the class dimension. `logits` and `labels` must have the same dtype (either `float16`, `float32`, or `float64`). Backpropagation will happen into both `logits` and `labels`. To disallow backpropagation into `labels`, pass label tensors through `tf.stop_gradient` before feeding it to this function. **Note that to avoid confusion, it is required to pass only named arguments to this function.** Args: labels: Each vector along the class dimension should hold a valid probability distribution e.g. for the case in which labels are of shape `[batch_size, num_classes]`, each row of `labels[i]` must be a valid probability distribution. logits: Per-label activations, typically a linear output. These activation energies are interpreted as unnormalized log probabilities. axis: The class dimension. Defaulted to -1 which is the last dimension. name: A name for the operation (optional). Returns: A `Tensor` that contains the softmax cross entropy loss. Its type is the same as `logits` and its shape is the same as `labels` except that it does not have the last dimension of `labels`. """ return softmax_cross_entropy_with_logits_v2_helper( labels=labels, logits=logits, axis=axis, name=name) @tf_export(v1=["nn.softmax_cross_entropy_with_logits_v2"]) @dispatch.add_dispatch_support @deprecated_args(None, "dim is deprecated, use axis instead", "dim") def softmax_cross_entropy_with_logits_v2_helper( labels, logits, axis=None, name=None, dim=None): """Computes softmax cross entropy between `logits` and `labels`. Measures the probability error in discrete classification tasks in which the classes are mutually exclusive (each entry is in exactly one class). For example, each CIFAR-10 image is labeled with one and only one label: an image can be a dog or a truck, but not both. **NOTE:** While the classes are mutually exclusive, their probabilities need not be. All that is required is that each row of `labels` is a valid probability distribution. If they are not, the computation of the gradient will be incorrect. If using exclusive `labels` (wherein one and only one class is true at a time), see `sparse_softmax_cross_entropy_with_logits`. **WARNING:** This op expects unscaled logits, since it performs a `softmax` on `logits` internally for efficiency. Do not call this op with the output of `softmax`, as it will produce incorrect results. A common use case is to have logits and labels of shape `[batch_size, num_classes]`, but higher dimensions are supported, with the `axis` argument specifying the class dimension. `logits` and `labels` must have the same dtype (either `float16`, `float32`, or `float64`). Backpropagation will happen into both `logits` and `labels`. To disallow backpropagation into `labels`, pass label tensors through `tf.stop_gradient` before feeding it to this function. **Note that to avoid confusion, it is required to pass only named arguments to this function.** Args: labels: Each vector along the class dimension should hold a valid probability distribution e.g. for the case in which labels are of shape `[batch_size, num_classes]`, each row of `labels[i]` must be a valid probability distribution. logits: Unscaled log probabilities. axis: The class dimension. Defaulted to -1 which is the last dimension. name: A name for the operation (optional). dim: Deprecated alias for axis. Returns: A `Tensor` that contains the softmax cross entropy loss. Its type is the same as `logits` and its shape is the same as `labels` except that it does not have the last dimension of `labels`. """ # TODO(pcmurray) Raise an error when the labels do not sum to 1. Note: This # could break users who call this with bad labels, but disregard the bad # results. axis = deprecated_argument_lookup("axis", axis, "dim", dim) del dim if axis is None: axis = -1 with ops.name_scope(name, "softmax_cross_entropy_with_logits", [logits, labels]) as name: logits = ops.convert_to_tensor(logits, name="logits") labels = ops.convert_to_tensor(labels, name="labels") convert_to_float32 = ( logits.dtype == dtypes.float16 or logits.dtype == dtypes.bfloat16) precise_logits = math_ops.cast( logits, dtypes.float32) if convert_to_float32 else logits # labels and logits must be of the same type labels = math_ops.cast(labels, precise_logits.dtype) input_rank = array_ops.rank(precise_logits) # For shape inference. shape = logits.get_shape() # Move the dim to the end if dim is not the last dimension. if axis != -1: def _move_dim_to_end(tensor, dim_index, rank): return array_ops.transpose( tensor, array_ops.concat([ math_ops.range(dim_index), math_ops.range(dim_index + 1, rank), [dim_index] ], 0)) precise_logits = _move_dim_to_end(precise_logits, axis, input_rank) labels = _move_dim_to_end(labels, axis, input_rank) input_shape = array_ops.shape(precise_logits) # Make precise_logits and labels into matrices. precise_logits = _flatten_outer_dims(precise_logits) labels = _flatten_outer_dims(labels) # Do the actual op computation. if config.is_op_determinism_enabled(): log_probs = log_softmax_v2(precise_logits) cost = -math_ops.reduce_sum(labels * log_probs, axis=1) else: # The second output tensor contains the gradients. We use it in # CrossEntropyGrad() in nn_grad but not here. cost, unused_backprop = gen_nn_ops.softmax_cross_entropy_with_logits( precise_logits, labels, name=name) # The output cost shape should be the input minus axis. output_shape = array_ops.slice(input_shape, [0], [math_ops.subtract(input_rank, 1)]) cost = array_ops.reshape(cost, output_shape) # Make shape inference work since reshape and transpose may erase its static # shape. if not context.executing_eagerly( ) and shape is not None and shape.dims is not None: shape = shape.as_list() del shape[axis] cost.set_shape(shape) if convert_to_float32: return math_ops.cast(cost, logits.dtype) else: return cost _XENT_DEPRECATION = """ Future major versions of TensorFlow will allow gradients to flow into the labels input on backprop by default. See `tf.nn.softmax_cross_entropy_with_logits_v2`. """ @tf_export(v1=["nn.softmax_cross_entropy_with_logits"]) @dispatch.add_dispatch_support @deprecation.deprecated(date=None, instructions=_XENT_DEPRECATION) def softmax_cross_entropy_with_logits( _sentinel=None, # pylint: disable=invalid-name labels=None, logits=None, dim=-1, name=None, axis=None): """Computes softmax cross entropy between `logits` and `labels`. Measures the probability error in discrete classification tasks in which the classes are mutually exclusive (each entry is in exactly one class). For example, each CIFAR-10 image is labeled with one and only one label: an image can be a dog or a truck, but not both. **NOTE:** While the classes are mutually exclusive, their probabilities need not be. All that is required is that each row of `labels` is a valid probability distribution. If they are not, the computation of the gradient will be incorrect. If using exclusive `labels` (wherein one and only one class is true at a time), see `sparse_softmax_cross_entropy_with_logits`. **WARNING:** This op expects unscaled logits, since it performs a `softmax` on `logits` internally for efficiency. Do not call this op with the output of `softmax`, as it will produce incorrect results. A common use case is to have logits and labels of shape `[batch_size, num_classes]`, but higher dimensions are supported, with the `dim` argument specifying the class dimension. Backpropagation will happen only into `logits`. To calculate a cross entropy loss that allows backpropagation into both `logits` and `labels`, see `tf.nn.softmax_cross_entropy_with_logits_v2`. **Note that to avoid confusion, it is required to pass only named arguments to this function.** Args: _sentinel: Used to prevent positional parameters. Internal, do not use. labels: Each vector along the class dimension should hold a valid probability distribution e.g. for the case in which labels are of shape `[batch_size, num_classes]`, each row of `labels[i]` must be a valid probability distribution. logits: Per-label activations, typically a linear output. These activation energies are interpreted as unnormalized log probabilities. dim: The class dimension. Defaulted to -1 which is the last dimension. name: A name for the operation (optional). axis: Alias for dim. Returns: A `Tensor` that contains the softmax cross entropy loss. Its type is the same as `logits` and its shape is the same as `labels` except that it does not have the last dimension of `labels`. """ dim = deprecated_argument_lookup("axis", axis, "dim", dim) _ensure_xent_args("softmax_cross_entropy_with_logits", _sentinel, labels, logits) with ops.name_scope(name, "softmax_cross_entropy_with_logits_sg", [logits, labels]) as name: labels = array_ops.stop_gradient(labels, name="labels_stop_gradient") return softmax_cross_entropy_with_logits_v2( labels=labels, logits=logits, axis=dim, name=name) def _sparse_softmax_cross_entropy_with_rank_2_logits(logits, labels, name): if config.is_op_determinism_enabled(): # TODO(duncanriach): Implement a GPU-deterministic version of this op at # the C++/CUDA level. # The actual op functionality log_probs = log_softmax_v2(logits) cost = math_ops.negative(array_ops.gather(log_probs, labels, batch_dims=1)) # Force the output to be NaN when the corresponding label is invalid. # Without the selective gradient gating provided by the following code, # backprop into the actual op functionality above, when there are invalid # labels, leads to corruption of the gradients associated with valid labels. # TODO(duncanriach): Uncover the source of the aforementioned corruption. nan_tensor = constant_op.constant(float("Nan"), dtype=logits.dtype) cost_all_nans = array_ops.broadcast_to(nan_tensor, array_ops.shape(cost)) class_count = math_ops.cast(array_ops.shape(logits)[-1], labels.dtype) cost = array_ops.where( math_ops.logical_or( math_ops.less(labels, 0), math_ops.greater_equal(labels, class_count)), cost_all_nans, cost) else: # The second output tensor contains the gradients. We use it in # _CrossEntropyGrad() in nn_grad but not here. cost, _ = gen_nn_ops.sparse_softmax_cross_entropy_with_logits( logits, labels, name=name) return cost @tf_export(v1=["nn.sparse_softmax_cross_entropy_with_logits"]) @dispatch.add_dispatch_support def sparse_softmax_cross_entropy_with_logits( _sentinel=None, # pylint: disable=invalid-name labels=None, logits=None, name=None): """Computes sparse softmax cross entropy between `logits` and `labels`. Measures the probability error in discrete classification tasks in which the classes are mutually exclusive (each entry is in exactly one class). For example, each CIFAR-10 image is labeled with one and only one label: an image can be a dog or a truck, but not both. **NOTE:** For this operation, the probability of a given label is considered exclusive. That is, soft classes are not allowed, and the `labels` vector must provide a single specific index for the true class for each row of `logits` (each minibatch entry). For soft softmax classification with a probability distribution for each entry, see `softmax_cross_entropy_with_logits_v2`. **WARNING:** This op expects unscaled logits, since it performs a `softmax` on `logits` internally for efficiency. Do not call this op with the output of `softmax`, as it will produce incorrect results. A common use case is to have logits of shape `[batch_size, num_classes]` and have labels of shape `[batch_size]`, but higher dimensions are supported, in which case the `dim`-th dimension is assumed to be of size `num_classes`. `logits` must have the dtype of `float16`, `float32`, or `float64`, and `labels` must have the dtype of `int32` or `int64`. **Note that to avoid confusion, it is required to pass only named arguments to this function.** Args: _sentinel: Used to prevent positional parameters. Internal, do not use. labels: `Tensor` of shape `[d_0, d_1, ..., d_{r-1}]` (where `r` is rank of `labels` and result) and dtype `int32` or `int64`. Each entry in `labels` must be an index in `[0, num_classes)`. Other values will raise an exception when this op is run on CPU, and return `NaN` for corresponding loss and gradient rows on GPU. logits: Per-label activations (typically a linear output) of shape `[d_0, d_1, ..., d_{r-1}, num_classes]` and dtype `float16`, `float32`, or `float64`. These activation energies are interpreted as unnormalized log probabilities. name: A name for the operation (optional). Returns: A `Tensor` of the same shape as `labels` and of the same type as `logits` with the softmax cross entropy loss. Raises: ValueError: If logits are scalars (need to have rank >= 1) or if the rank of the labels is not equal to the rank of the logits minus one. """ _ensure_xent_args("sparse_softmax_cross_entropy_with_logits", _sentinel, labels, logits) # TODO(pcmurray) Raise an error when the label is not an index in # [0, num_classes). Note: This could break users who call this with bad # labels, but disregard the bad results. # Reshape logits and labels to rank 2. with ops.name_scope(name, "SparseSoftmaxCrossEntropyWithLogits", [labels, logits]): labels = ops.convert_to_tensor(labels) logits = ops.convert_to_tensor(logits) precise_logits = math_ops.cast(logits, dtypes.float32) if (dtypes.as_dtype( logits.dtype) == dtypes.float16) else logits # Store label shape for result later. labels_static_shape = labels.get_shape() labels_shape = array_ops.shape(labels) static_shapes_fully_defined = ( labels_static_shape.is_fully_defined() and logits.get_shape()[:-1].is_fully_defined()) if logits.get_shape().ndims is not None and logits.get_shape().ndims == 0: raise ValueError( f"`logits` cannot be a scalar. Received logits={logits}`") if logits.get_shape().ndims is not None and ( labels_static_shape.ndims is not None and labels_static_shape.ndims != logits.get_shape().ndims - 1): raise ValueError( "`labels.shape.rank` must equal `logits.shape.rank - 1`. " f"Received: labels.shape={labels_static_shape} of rank " f"{labels_static_shape.rank} and logits.shape={logits.get_shape()} " f"of rank {logits.get_shape().rank}") if (static_shapes_fully_defined and labels_static_shape != logits.get_shape()[:-1]): raise ValueError( "`labels.shape` must equal `logits.shape` except for " f"the last dimension. Received: labels.shape={labels_static_shape} " f"and logits.shape={logits.get_shape()}") # Check if no reshapes are required. if logits.get_shape().ndims == 2: cost = _sparse_softmax_cross_entropy_with_rank_2_logits( precise_logits, labels, name=name) if logits.dtype == dtypes.float16: return math_ops.cast(cost, dtypes.float16) else: return cost # Perform a check of the dynamic shapes if the static shapes are not fully # defined. shape_checks = [] if not static_shapes_fully_defined: shape_checks.append( check_ops.assert_equal( array_ops.shape(labels), array_ops.shape(logits)[:-1])) with ops.control_dependencies(shape_checks): # Reshape logits to 2 dim, labels to 1 dim. num_classes = array_ops.shape(logits)[array_ops.rank(logits) - 1] precise_logits = array_ops.reshape(precise_logits, [-1, num_classes]) labels = array_ops.reshape(labels, [-1]) cost = _sparse_softmax_cross_entropy_with_rank_2_logits( precise_logits, labels, name=name) cost = array_ops.reshape(cost, labels_shape) cost.set_shape(labels_static_shape) if logits.dtype == dtypes.float16: return math_ops.cast(cost, dtypes.float16) else: return cost @tf_export("nn.sparse_softmax_cross_entropy_with_logits", v1=[]) @dispatch.add_dispatch_support def sparse_softmax_cross_entropy_with_logits_v2(labels, logits, name=None): """Computes sparse softmax cross entropy between `logits` and `labels`. Measures the probability error in discrete classification tasks in which the classes are mutually exclusive (each entry is in exactly one class). For example, each CIFAR-10 image is labeled with one and only one label: an image can be a dog or a truck, but not both. Note: For this operation, the probability of a given label is considered exclusive. That is, soft classes are not allowed, and the `labels` vector must provide a single specific index for the true class for each row of `logits` (each minibatch entry). For soft softmax classification with a probability distribution for each entry, see `softmax_cross_entropy_with_logits_v2`. Warning: This op expects unscaled logits, since it performs a `softmax` on `logits` internally for efficiency. Do not call this op with the output of `softmax`, as it will produce incorrect results. A common use case is to have logits of shape `[batch_size, num_classes]` and have labels of shape `[batch_size]`, but higher dimensions are supported, in which case the `dim`-th dimension is assumed to be of size `num_classes`. `logits` must have the dtype of `float16`, `float32`, or `float64`, and `labels` must have the dtype of `int32` or `int64`. >>> logits = tf.constant([[2., -5., .5, -.1], ... [0., 0., 1.9, 1.4], ... [-100., 100., -100., -100.]]) >>> labels = tf.constant([0, 3, 1]) >>> tf.nn.sparse_softmax_cross_entropy_with_logits( ... labels=labels, logits=logits).numpy() array([0.29750752, 1.1448325 , 0. ], dtype=float32) To avoid confusion, passing only named arguments to this function is recommended. Args: labels: `Tensor` of shape `[d_0, d_1, ..., d_{r-1}]` (where `r` is rank of `labels` and result) and dtype `int32` or `int64`. Each entry in `labels` must be an index in `[0, num_classes)`. Other values will raise an exception when this op is run on CPU, and return `NaN` for corresponding loss and gradient rows on GPU. logits: Unscaled log probabilities of shape `[d_0, d_1, ..., d_{r-1}, num_classes]` and dtype `float16`, `float32`, or `float64`. name: A name for the operation (optional). Returns: A `Tensor` of the same shape as `labels` and of the same type as `logits` with the softmax cross entropy loss. Raises: ValueError: If logits are scalars (need to have rank >= 1) or if the rank of the labels is not equal to the rank of the logits minus one. """ return sparse_softmax_cross_entropy_with_logits( labels=labels, logits=logits, name=name) @tf_export("nn.avg_pool", v1=["nn.avg_pool_v2"]) @dispatch.add_dispatch_support def avg_pool_v2(input, ksize, strides, padding, data_format=None, name=None): # pylint: disable=redefined-builtin """Performs the avg pooling on the input. Each entry in `output` is the mean of the corresponding size `ksize` window in `value`. Args: input: Tensor of rank N+2, of shape `[batch_size] + input_spatial_shape + [num_channels]` if `data_format` does not start with "NC" (default), or `[batch_size, num_channels] + input_spatial_shape` if data_format starts with "NC". Pooling happens over the spatial dimensions only. ksize: An int or list of `ints` that has length `1`, `N` or `N+2`. The size of the window for each dimension of the input tensor. strides: An int or list of `ints` that has length `1`, `N` or `N+2`. The stride of the sliding window for each dimension of the input tensor. padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See [here](https://www.tensorflow.org/api_docs/python/tf/nn#notes_on_padding_2) for more information. data_format: A string. Specifies the channel dimension. For N=1 it can be either "NWC" (default) or "NCW", for N=2 it can be either "NHWC" (default) or "NCHW" and for N=3 either "NDHWC" (default) or "NCDHW". name: Optional name for the operation. Returns: A `Tensor` of format specified by `data_format`. The average pooled output tensor. """ if input.shape is not None: n = len(input.shape) - 2 elif data_format is not None: n = len(data_format) - 2 else: raise ValueError( "`input` must have a static shape or `data_format` must be given. " f"Received: input.shape={input.shape} and " f"data_format={data_format}") if not 1 <= n <= 3: raise ValueError( f"`input.shape.rank` must be 3, 4 or 5. Received: " f"input.shape={input.shape} of rank {n + 2}.") if data_format is None: channel_index = n + 1 else: channel_index = 1 if data_format.startswith("NC") else n + 1 ksize = _get_sequence(ksize, n, channel_index, "ksize") strides = _get_sequence(strides, n, channel_index, "strides") avg_pooling_ops = { 1: avg_pool1d, 2: gen_nn_ops.avg_pool, 3: gen_nn_ops.avg_pool3d } op = avg_pooling_ops[n] return op( input, ksize=ksize, strides=strides, padding=padding, data_format=data_format, name=name) @tf_export(v1=["nn.avg_pool", "nn.avg_pool2d"]) @dispatch.add_dispatch_support def avg_pool(value, ksize, strides, padding, data_format="NHWC", name=None, input=None): # pylint: disable=redefined-builtin """Performs the average pooling on the input. Each entry in `output` is the mean of the corresponding size `ksize` window in `value`. Args: value: A 4-D `Tensor` of shape `[batch, height, width, channels]` and type `float32`, `float64`, `qint8`, `quint8`, or `qint32`. ksize: An int or list of `ints` that has length `1`, `2` or `4`. The size of the window for each dimension of the input tensor. strides: An int or list of `ints` that has length `1`, `2` or `4`. The stride of the sliding window for each dimension of the input tensor. padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See the "returns" section of `tf.nn.convolution` for details. data_format: A string. 'NHWC' and 'NCHW' are supported. name: Optional name for the operation. input: Alias for value. Returns: A `Tensor` with the same type as `value`. The average pooled output tensor. """ with ops.name_scope(name, "AvgPool", [value]) as name: value = deprecation.deprecated_argument_lookup( "input", input, "value", value) if data_format is None: data_format = "NHWC" channel_index = 1 if data_format.startswith("NC") else 3 ksize = _get_sequence(ksize, 2, channel_index, "ksize") strides = _get_sequence(strides, 2, channel_index, "strides") return gen_nn_ops.avg_pool( value, ksize=ksize, strides=strides, padding=padding, data_format=data_format, name=name) @tf_export("nn.avg_pool2d", v1=[]) @dispatch.add_dispatch_support def avg_pool2d(input, ksize, strides, padding, data_format="NHWC", name=None): # pylint: disable=redefined-builtin """Performs the average pooling on the input. Each entry in `output` is the mean of the corresponding size `ksize` window in `value`. Args: input: A 4-D `Tensor` of shape `[batch, height, width, channels]` and type `float32`, `float64`, `qint8`, `quint8`, or `qint32`. ksize: An int or list of `ints` that has length `1`, `2` or `4`. The size of the window for each dimension of the input tensor. strides: An int or list of `ints` that has length `1`, `2` or `4`. The stride of the sliding window for each dimension of the input tensor. padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See [here](https://www.tensorflow.org/api_docs/python/tf/nn#notes_on_padding_2) for more information. data_format: A string. 'NHWC' and 'NCHW' are supported. name: Optional name for the operation. Returns: A `Tensor` with the same type as `value`. The average pooled output tensor. """ with ops.name_scope(name, "AvgPool2D", [input]) as name: if data_format is None: data_format = "NHWC" channel_index = 1 if data_format.startswith("NC") else 3 ksize = _get_sequence(ksize, 2, channel_index, "ksize") strides = _get_sequence(strides, 2, channel_index, "strides") return gen_nn_ops.avg_pool( input, ksize=ksize, strides=strides, padding=padding, data_format=data_format, name=name) @tf_export("nn.avg_pool1d") @dispatch.add_dispatch_support def avg_pool1d(input, ksize, strides, padding, data_format="NWC", name=None): # pylint: disable=redefined-builtin """Performs the average pooling on the input. Each entry in `output` is the mean of the corresponding size `ksize` window in `value`. Note internally this op reshapes and uses the underlying 2d operation. Args: input: A 3-D `Tensor` of the format specified by `data_format`. ksize: An int or list of `ints` that has length `1` or `3`. The size of the window for each dimension of the input tensor. strides: An int or list of `ints` that has length `1` or `3`. The stride of the sliding window for each dimension of the input tensor. padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See [here](https://www.tensorflow.org/api_docs/python/tf/nn#notes_on_padding_2) for more information. data_format: An optional string from: "NWC", "NCW". Defaults to "NWC". name: A name for the operation (optional). Returns: A `Tensor` of format specified by `data_format`. The max pooled output tensor. """ with ops.name_scope(name, "AvgPool1D", [input]) as name: if data_format is None: data_format = "NWC" channel_index = 1 if data_format.startswith("NC") else 2 ksize = [1] + _get_sequence(ksize, 1, channel_index, "ksize") strides = [1] + _get_sequence(strides, 1, channel_index, "strides") expanding_dim = 1 if data_format == "NWC" else 2 data_format = "NHWC" if data_format == "NWC" else "NCHW" input = array_ops.expand_dims_v2(input, expanding_dim) result = gen_nn_ops.avg_pool( input, ksize=ksize, strides=strides, padding=padding, data_format=data_format, name=name) return array_ops.squeeze(result, expanding_dim) @tf_export("nn.avg_pool3d") @dispatch.add_dispatch_support def avg_pool3d(input, ksize, strides, padding, data_format="NDHWC", name=None): # pylint: disable=redefined-builtin """Performs the average pooling on the input. Each entry in `output` is the mean of the corresponding size `ksize` window in `value`. Args: input: A 5-D `Tensor` of shape `[batch, depth, height, width, channels]` and type `float32`, `float64`, `qint8`, `quint8`, or `qint32`. ksize: An int or list of `ints` that has length `1`, `3` or `5`. The size of the window for each dimension of the input tensor. strides: An int or list of `ints` that has length `1`, `3` or `5`. The stride of the sliding window for each dimension of the input tensor. padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See [here](https://www.tensorflow.org/api_docs/python/tf/nn#notes_on_padding_2) for more information. data_format: A string. 'NDHWC' and 'NCDHW' are supported. name: Optional name for the operation. Returns: A `Tensor` with the same type as `value`. The average pooled output tensor. """ with ops.name_scope(name, "AvgPool3D", [input]) as name: if data_format is None: data_format = "NDHWC" channel_index = 1 if data_format.startswith("NC") else 3 ksize = _get_sequence(ksize, 3, channel_index, "ksize") strides = _get_sequence(strides, 3, channel_index, "strides") return gen_nn_ops.avg_pool3d( input, ksize=ksize, strides=strides, padding=padding, data_format=data_format, name=name) # pylint: disable=redefined-builtin @tf_export("nn.max_pool", v1=["nn.max_pool_v2"]) @dispatch.add_dispatch_support def max_pool_v2(input, ksize, strides, padding, data_format=None, name=None): """Performs max pooling on the input. For a given window of `ksize`, takes the maximum value within that window. Used for reducing computation and preventing overfitting. Consider an example of pooling with 2x2, non-overlapping windows: >>> matrix = tf.constant([ ... [0, 0, 1, 7], ... [0, 2, 0, 0], ... [5, 2, 0, 0], ... [0, 0, 9, 8], ... ]) >>> reshaped = tf.reshape(matrix, (1, 4, 4, 1)) >>> tf.nn.max_pool(reshaped, ksize=2, strides=2, padding="SAME") <tf.Tensor: shape=(1, 2, 2, 1), dtype=int32, numpy= array([[[[2], [7]], [[5], [9]]]], dtype=int32)> We can adjust the window size using the `ksize` parameter. For example, if we were to expand the window to 3: >>> tf.nn.max_pool(reshaped, ksize=3, strides=2, padding="SAME") <tf.Tensor: shape=(1, 2, 2, 1), dtype=int32, numpy= array([[[[5], [7]], [[9], [9]]]], dtype=int32)> We've now picked up two additional large numbers (5 and 9) in two of the pooled spots. Note that our windows are now overlapping, since we're still moving by 2 units on each iteration. This is causing us to see the same 9 repeated twice, since it is part of two overlapping windows. We can adjust how far we move our window with each iteration using the `strides` parameter. Updating this to the same value as our window size eliminates the overlap: >>> tf.nn.max_pool(reshaped, ksize=3, strides=3, padding="SAME") <tf.Tensor: shape=(1, 2, 2, 1), dtype=int32, numpy= array([[[[2], [7]], [[5], [9]]]], dtype=int32)> Because the window does not neatly fit into our input, padding is added around the edges, giving us the same result as when we used a 2x2 window. We can skip padding altogether and simply drop the windows that do not fully fit into our input by instead passing `"VALID"` to the `padding` argument: >>> tf.nn.max_pool(reshaped, ksize=3, strides=3, padding="VALID") <tf.Tensor: shape=(1, 1, 1, 1), dtype=int32, numpy=array([[[[5]]]], dtype=int32)> Now we've grabbed the largest value in the 3x3 window starting from the upper- left corner. Since no other windows fit in our input, they are dropped. Args: input: Tensor of rank N+2, of shape `[batch_size] + input_spatial_shape + [num_channels]` if `data_format` does not start with "NC" (default), or `[batch_size, num_channels] + input_spatial_shape` if data_format starts with "NC". Pooling happens over the spatial dimensions only. ksize: An int or list of `ints` that has length `1`, `N` or `N+2`. The size of the window for each dimension of the input tensor. strides: An int or list of `ints` that has length `1`, `N` or `N+2`. The stride of the sliding window for each dimension of the input tensor. padding: Either the `string` `"SAME"` or `"VALID"` indicating the type of padding algorithm to use, or a list indicating the explicit paddings at the start and end of each dimension. See [here](https://www.tensorflow.org/api_docs/python/tf/nn#notes_on_padding_2) for more information. When explicit padding is used and data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top, pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0], [pad_top, pad_bottom], [pad_left, pad_right]]`. When using explicit padding, the size of the paddings cannot be greater than the sliding window size. data_format: A string. Specifies the channel dimension. For N=1 it can be either "NWC" (default) or "NCW", for N=2 it can be either "NHWC" (default) or "NCHW" and for N=3 either "NDHWC" (default) or "NCDHW". name: Optional name for the operation. Returns: A `Tensor` of format specified by `data_format`. The max pooled output tensor. """ if input.shape is not None: n = len(input.shape) - 2 elif data_format is not None: n = len(data_format) - 2 else: raise ValueError( "`input` must have a static shape or a data format must be given. " f"Received: input.shape={input.shape} and " f"data_format={data_format}") if not 1 <= n <= 3: raise ValueError( f"`input.shape.rank` must be 3, 4 or 5. Received: " f"input.shape={input.shape} of rank {n + 2}.") if data_format is None: channel_index = n + 1 else: channel_index = 1 if data_format.startswith("NC") else n + 1 if isinstance(padding, (list, tuple)) and data_format == "NCHW_VECT_C": raise ValueError("`data_format='NCHW_VECT_C'` is not supported with " f"explicit padding. Received: padding={padding}") ksize = _get_sequence(ksize, n, channel_index, "ksize") strides = _get_sequence(strides, n, channel_index, "strides") if (isinstance(padding, (list, tuple)) and n == 3): raise ValueError("Explicit padding is not supported with an input " f"tensor of rank 5. Received: padding={padding}") max_pooling_ops = { 1: max_pool1d, 2: max_pool2d, 3: gen_nn_ops.max_pool3d } op = max_pooling_ops[n] return op( input, ksize=ksize, strides=strides, padding=padding, data_format=data_format, name=name) # pylint: enable=redefined-builtin @tf_export(v1=["nn.max_pool"]) @dispatch.add_dispatch_support def max_pool(value, ksize, strides, padding, data_format="NHWC", name=None, input=None): # pylint: disable=redefined-builtin """Performs the max pooling on the input. Args: value: A 4-D `Tensor` of the format specified by `data_format`. ksize: An int or list of `ints` that has length `1`, `2` or `4`. The size of the window for each dimension of the input tensor. strides: An int or list of `ints` that has length `1`, `2` or `4`. The stride of the sliding window for each dimension of the input tensor. padding: Either the `string` `"SAME"` or `"VALID"` indicating the type of padding algorithm to use, or a list indicating the explicit paddings at the start and end of each dimension. When explicit padding is used and data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top, pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0], [pad_top, pad_bottom], [pad_left, pad_right]]`. When using explicit padding, the size of the paddings cannot be greater than the sliding window size. data_format: A string. 'NHWC', 'NCHW' and 'NCHW_VECT_C' are supported. name: Optional name for the operation. input: Alias for value. Returns: A `Tensor` of format specified by `data_format`. The max pooled output tensor. """ value = deprecation.deprecated_argument_lookup("input", input, "value", value) with ops.name_scope(name, "MaxPool", [value]) as name: if data_format is None: data_format = "NHWC" channel_index = 1 if data_format.startswith("NC") else 3 ksize = _get_sequence(ksize, 2, channel_index, "ksize") strides = _get_sequence(strides, 2, channel_index, "strides") if isinstance(padding, (list, tuple)) and data_format == "NCHW_VECT_C": raise ValueError("`data_format='NCHW_VECT_C'` is not supported with " f"explicit padding. Received: padding={padding}") padding, explicit_paddings = convert_padding(padding) if ((np.isscalar(ksize) and ksize == 0) or (isinstance(ksize, (list, tuple, np.ndarray)) and any(v == 0 for v in ksize))): raise ValueError(f"`ksize` cannot be zero. Received: ksize={ksize}") return gen_nn_ops.max_pool( value, ksize=ksize, strides=strides, padding=padding, explicit_paddings=explicit_paddings, data_format=data_format, name=name) # pylint: disable=redefined-builtin @tf_export("nn.max_pool1d") @dispatch.add_dispatch_support def max_pool1d(input, ksize, strides, padding, data_format="NWC", name=None): """Performs the max pooling on the input. Note internally this op reshapes and uses the underlying 2d operation. Args: input: A 3-D `Tensor` of the format specified by `data_format`. ksize: An int or list of `ints` that has length `1` or `3`. The size of the window for each dimension of the input tensor. strides: An int or list of `ints` that has length `1` or `3`. The stride of the sliding window for each dimension of the input tensor. padding: Either the `string` `"SAME"` or `"VALID"` indicating the type of padding algorithm to use, or a list indicating the explicit paddings at the start and end of each dimension. See [here](https://www.tensorflow.org/api_docs/python/tf/nn#notes_on_padding_2) for more information. When explicit padding is used and data_format is `"NWC"`, this should be in the form `[[0, 0], [pad_left, pad_right], [0, 0]]`. When explicit padding used and data_format is `"NCW"`, this should be in the form `[[0, 0], [0, 0], [pad_left, pad_right]]`. When using explicit padding, the size of the paddings cannot be greater than the sliding window size. data_format: An optional string from: "NWC", "NCW". Defaults to "NWC". name: A name for the operation (optional). Returns: A `Tensor` of format specified by `data_format`. The max pooled output tensor. """ with ops.name_scope(name, "MaxPool1d", [input]) as name: if isinstance(padding, (list, tuple)) and data_format == "NCHW_VECT_C": raise ValueError("`data_format='NCHW_VECT_C'` is not supported with " f"explicit padding. Received: padding={padding}") if data_format is None: data_format = "NWC" channel_index = 1 if data_format.startswith("NC") else 2 ksize = [1] + _get_sequence(ksize, 1, channel_index, "ksize") strides = [1] + _get_sequence(strides, 1, channel_index, "strides") padding, explicit_paddings = convert_padding(padding, 3) if padding == "EXPLICIT": explicit_paddings = [0, 0] + explicit_paddings expanding_dim = 1 if data_format == "NWC" else 2 data_format = "NHWC" if data_format == "NWC" else "NCHW" input = array_ops.expand_dims_v2(input, expanding_dim) result = gen_nn_ops.max_pool( input, ksize=ksize, strides=strides, padding=padding, explicit_paddings=explicit_paddings, data_format=data_format, name=name) return array_ops.squeeze(result, expanding_dim) # pylint: enable=redefined-builtin # pylint: disable=redefined-builtin @tf_export("nn.max_pool2d") @dispatch.add_dispatch_support def max_pool2d(input, ksize, strides, padding, data_format="NHWC", name=None): """Performs max pooling on 2D spatial data such as images. This is a more specific version of `tf.nn.max_pool` where the input tensor is 4D, representing 2D spatial data such as images. Using these APIs are equivalent Downsamples the input images along theirs spatial dimensions (height and width) by taking its maximum over an input window defined by `ksize`. The window is shifted by `strides` along each dimension. For example, for `strides=(2, 2)` and `padding=VALID` windows that extend outside of the input are not included in the output: >>> x = tf.constant([[1., 2., 3., 4.], ... [5., 6., 7., 8.], ... [9., 10., 11., 12.]]) >>> # Add the `batch` and `channels` dimensions. >>> x = x[tf.newaxis, :, :, tf.newaxis] >>> result = tf.nn.max_pool2d(x, ksize=(2, 2), strides=(2, 2), ... padding="VALID") >>> result[0, :, :, 0] <tf.Tensor: shape=(1, 2), dtype=float32, numpy= array([[6., 8.]], dtype=float32)> With `padding=SAME`, we get: >>> x = tf.constant([[1., 2., 3., 4.], ... [5., 6., 7., 8.], ... [9., 10., 11., 12.]]) >>> x = x[tf.newaxis, :, :, tf.newaxis] >>> result = tf.nn.max_pool2d(x, ksize=(2, 2), strides=(2, 2), ... padding='SAME') >>> result[0, :, :, 0] <tf.Tensor: shape=(2, 2), dtype=float32, numpy= array([[ 6., 8.], [10.,12.]], dtype=float32)> We can also specify padding explicitly. The following example adds width-1 padding on all sides (top, bottom, left, right): >>> x = tf.constant([[1., 2., 3., 4.], ... [5., 6., 7., 8.], ... [9., 10., 11., 12.]]) >>> x = x[tf.newaxis, :, :, tf.newaxis] >>> result = tf.nn.max_pool2d(x, ksize=(2, 2), strides=(2, 2), ... padding=[[0, 0], [1, 1], [1, 1], [0, 0]]) >>> result[0, :, :, 0] <tf.Tensor: shape=(2, 3), dtype=float32, numpy= array([[ 1., 3., 4.], [ 9., 11., 12.]], dtype=float32)> For more examples and detail, see `tf.nn.max_pool`. Args: input: A 4-D `Tensor` of the format specified by `data_format`. ksize: An int or list of `ints` that has length `1`, `2` or `4`. The size of the window for each dimension of the input tensor. If only one integer is specified, then we apply the same window for all 4 dims. If two are provided then we use those for H, W dimensions and keep N, C dimension window size = 1. strides: An int or list of `ints` that has length `1`, `2` or `4`. The stride of the sliding window for each dimension of the input tensor. If only one integer is specified, we apply the same stride to all 4 dims. If two are provided we use those for the H, W dimensions and keep N, C of stride = 1. padding: Either the `string` `"SAME"` or `"VALID"` indicating the type of padding algorithm to use, or a list indicating the explicit paddings at the start and end of each dimension. See [here](https://www.tensorflow.org/api_docs/python/tf/nn#notes_on_padding_2) for more information. When explicit padding is used and data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top, pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0], [pad_top, pad_bottom], [pad_left, pad_right]]`. When using explicit padding, the size of the paddings cannot be greater than the sliding window size. data_format: A string. 'NHWC', 'NCHW' and 'NCHW_VECT_C' are supported. name: Optional name for the operation. Returns: A `Tensor` of format specified by `data_format`. The max pooled output tensor. """ with ops.name_scope(name, "MaxPool2d", [input]) as name: if data_format is None: data_format = "NHWC" channel_index = 1 if data_format.startswith("NC") else 3 ksize = _get_sequence(ksize, 2, channel_index, "ksize") strides = _get_sequence(strides, 2, channel_index, "strides") if isinstance(padding, (list, tuple)) and data_format == "NCHW_VECT_C": raise ValueError("`data_format='NCHW_VECT_C'` is not supported with " f"explicit padding. Received: padding={padding}") padding, explicit_paddings = convert_padding(padding) return gen_nn_ops.max_pool( input, ksize=ksize, strides=strides, padding=padding, explicit_paddings=explicit_paddings, data_format=data_format, name=name) # pylint: enable=redefined-builtin # pylint: disable=redefined-builtin @tf_export("nn.max_pool3d") @dispatch.add_dispatch_support def max_pool3d(input, ksize, strides, padding, data_format="NDHWC", name=None): """Performs the max pooling on the input. Args: input: A 5-D `Tensor` of the format specified by `data_format`. ksize: An int or list of `ints` that has length `1`, `3` or `5`. The size of the window for each dimension of the input tensor. strides: An int or list of `ints` that has length `1`, `3` or `5`. The stride of the sliding window for each dimension of the input tensor. padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See [here](https://www.tensorflow.org/api_docs/python/tf/nn#notes_on_padding_2) for more information. data_format: An optional string from: "NDHWC", "NCDHW". Defaults to "NDHWC". The data format of the input and output data. With the default format "NDHWC", the data is stored in the order of: [batch, in_depth, in_height, in_width, in_channels]. Alternatively, the format could be "NCDHW", the data storage order is: [batch, in_channels, in_depth, in_height, in_width]. name: A name for the operation (optional). Returns: A `Tensor` of format specified by `data_format`. The max pooled output tensor. """ with ops.name_scope(name, "MaxPool3D", [input]) as name: if data_format is None: data_format = "NDHWC" channel_index = 1 if data_format.startswith("NC") else 4 ksize = _get_sequence(ksize, 3, channel_index, "ksize") strides = _get_sequence(strides, 3, channel_index, "strides") return gen_nn_ops.max_pool3d( input, ksize=ksize, strides=strides, padding=padding, data_format=data_format, name=name) # pylint: enable=redefined-builtin @tf_export("nn.max_pool_with_argmax", v1=[]) @dispatch.add_dispatch_support def max_pool_with_argmax_v2( input, # pylint: disable=redefined-builtin ksize, strides, padding, data_format="NHWC", output_dtype=dtypes.int64, include_batch_in_index=False, name=None): """Performs max pooling on the input and outputs both max values and indices. The indices in `argmax` are flattened, so that a maximum value at position `[b, y, x, c]` becomes flattened index: `(y * width + x) * channels + c` if `include_batch_in_index` is False; `((b * height + y) * width + x) * channels + c` if `include_batch_in_index` is True. The indices returned are always in `[0, height) x [0, width)` before flattening, even if padding is involved and the mathematically correct answer is outside (either negative or too large). This is a bug, but fixing it is difficult to do in a safe backwards compatible way, especially due to flattening. Args: input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`. 4-D with shape `[batch, height, width, channels]`. Input to pool over. ksize: An int or list of `ints` that has length `1`, `2` or `4`. The size of the window for each dimension of the input tensor. strides: An int or list of `ints` that has length `1`, `2` or `4`. The stride of the sliding window for each dimension of the input tensor. padding: A `string` from: `"SAME", "VALID"`. The type of padding algorithm to use. See [here](https://www.tensorflow.org/api_docs/python/tf/nn#notes_on_padding_2) for more information. data_format: An optional `string`, must be set to `"NHWC"`. Defaults to `"NHWC"`. Specify the data format of the input and output data. output_dtype: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int64`. The dtype of the returned argmax tensor. include_batch_in_index: An optional `boolean`. Defaults to `False`. Whether to include batch dimension in flattened index of `argmax`. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (output, argmax). output: A `Tensor`. Has the same type as `input`. argmax: A `Tensor` of type `output_dtype`. """ if data_format != "NHWC": raise ValueError("`data_format` values other than 'NHWC' are not " f"supported. Received: data_format={data_format}") ksize = _get_sequence(ksize, 2, 3, "ksize") strides = _get_sequence(strides, 2, 3, "strides") return gen_nn_ops.max_pool_with_argmax( input=input, ksize=ksize, strides=strides, padding=padding, Targmax=output_dtype, include_batch_in_index=include_batch_in_index, name=name) @tf_export(v1=["nn.max_pool_with_argmax"]) @dispatch.add_dispatch_support def max_pool_with_argmax_v1( # pylint: disable=missing-docstring,invalid-name input, # pylint: disable=redefined-builtin ksize, strides, padding, data_format="NHWC", Targmax=None, name=None, output_dtype=None, include_batch_in_index=False): if data_format != "NHWC": raise ValueError("`data_format` values other than 'NHWC' are not " f"supported. Received: data_format={data_format}") Targmax = deprecated_argument_lookup( "output_dtype", output_dtype, "Targmax", Targmax) if Targmax is None: Targmax = dtypes.int64 return gen_nn_ops.max_pool_with_argmax( input=input, ksize=ksize, strides=strides, padding=padding, Targmax=Targmax, include_batch_in_index=include_batch_in_index, name=name) max_pool_with_argmax_v1.__doc__ = gen_nn_ops.max_pool_with_argmax.__doc__ @ops.RegisterStatistics("Conv3D", "flops") def _calc_conv3d_flops(graph, node): """Calculates the compute resources needed for Conv3D.""" input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0]) input_shape.assert_is_fully_defined() filter_shape = graph_util.tensor_shape_from_node_def_name( graph, node.input[1]) filter_shape.assert_is_fully_defined() output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name) output_shape.assert_is_fully_defined() filter_time = int(filter_shape[0]) filter_height = int(filter_shape[1]) filter_width = int(filter_shape[2]) filter_in_depth = int(filter_shape[3]) output_count = np.prod(output_shape.as_list(), dtype=np.int64) return ops.OpStats("flops", (output_count * filter_in_depth * filter_time * filter_height * filter_width * 2)) @ops.RegisterStatistics("Conv2D", "flops") def _calc_conv_flops(graph, node): """Calculates the compute resources needed for Conv2D.""" input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0]) input_shape.assert_is_fully_defined() filter_shape = graph_util.tensor_shape_from_node_def_name( graph, node.input[1]) filter_shape.assert_is_fully_defined() output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name) output_shape.assert_is_fully_defined() filter_height = int(filter_shape[0]) filter_width = int(filter_shape[1]) filter_in_depth = int(filter_shape[2]) output_count = np.prod(output_shape.as_list(), dtype=np.int64) return ops.OpStats( "flops", (output_count * filter_in_depth * filter_height * filter_width * 2)) @ops.RegisterStatistics("DepthwiseConv2dNative", "flops") def _calc_depthwise_conv_flops(graph, node): """Calculates the compute resources needed for DepthwiseConv2dNative.""" input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0]) input_shape.assert_is_fully_defined() filter_shape = graph_util.tensor_shape_from_node_def_name( graph, node.input[1]) filter_shape.assert_is_fully_defined() output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name) output_shape.assert_is_fully_defined() filter_height = int(filter_shape[0]) filter_width = int(filter_shape[1]) output_count = np.prod(output_shape.as_list(), dtype=np.int64) return ops.OpStats("flops", (output_count * filter_height * filter_width * 2)) @ops.RegisterStatistics("BiasAdd", "flops") def _calc_bias_add_flops(graph, node): """Calculates the computing needed for BiasAdd.""" input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0]) input_shape.assert_is_fully_defined() input_count = np.prod(input_shape.as_list()) return ops.OpStats("flops", input_count) @tf_export(v1=["nn.xw_plus_b"]) @dispatch.add_dispatch_support def xw_plus_b(x, weights, biases, name=None): # pylint: disable=invalid-name """Computes matmul(x, weights) + biases. Args: x: a 2D tensor. Dimensions typically: batch, in_units weights: a 2D tensor. Dimensions typically: in_units, out_units biases: a 1D tensor. Dimensions: out_units name: A name for the operation (optional). If not specified "xw_plus_b" is used. Returns: A 2-D Tensor computing matmul(x, weights) + biases. Dimensions typically: batch, out_units. """ with ops.name_scope(name, "xw_plus_b", [x, weights, biases]) as name: x = ops.convert_to_tensor(x, name="x") weights = ops.convert_to_tensor(weights, name="weights") biases = ops.convert_to_tensor(biases, name="biases") mm = math_ops.matmul(x, weights) return bias_add(mm, biases, name=name) def xw_plus_b_v1(x, weights, biases, name=None): """Computes matmul(x, weights) + biases. This is a deprecated version of that will soon be removed. Args: x: a 2D tensor. Dimensions typically: batch, in_units weights: a 2D tensor. Dimensions typically: in_units, out_units biases: a 1D tensor. Dimensions: out_units name: A name for the operation (optional). If not specified "xw_plus_b_v1" is used. Returns: A 2-D Tensor computing matmul(x, weights) + biases. Dimensions typically: batch, out_units. """ with ops.name_scope(name, "xw_plus_b_v1", [x, weights, biases]) as name: x = ops.convert_to_tensor(x, name="x") weights = ops.convert_to_tensor(weights, name="weights") biases = ops.convert_to_tensor(biases, name="biases") mm = math_ops.matmul(x, weights) return bias_add_v1(mm, biases, name=name) def _get_noise_shape(x, noise_shape): # If noise_shape is none return immediately. if noise_shape is None: return array_ops.shape(x) try: # Best effort to figure out the intended shape. # If not possible, let the op to handle it. # In eager mode exception will show up. noise_shape_ = tensor_shape.as_shape(noise_shape) except (TypeError, ValueError): return noise_shape if x.shape.dims is not None and len(x.shape.dims) == len(noise_shape_.dims): new_dims = [] for i, dim in enumerate(x.shape.dims): if noise_shape_.dims[i].value is None and dim.value is not None: new_dims.append(dim.value) else: new_dims.append(noise_shape_.dims[i].value) return tensor_shape.TensorShape(new_dims) return noise_shape @tf_export(v1=["nn.dropout"]) @dispatch.add_dispatch_support @deprecation.deprecated_args(None, "Please use `rate` instead of `keep_prob`. " "Rate should be set to `rate = 1 - keep_prob`.", "keep_prob") def dropout(x, keep_prob=None, noise_shape=None, seed=None, name=None, rate=None): """Computes dropout. For each element of `x`, with probability `rate`, outputs `0`, and otherwise scales up the input by `1 / (1-rate)`. The scaling is such that the expected sum is unchanged. By default, each element is kept or dropped independently. If `noise_shape` is specified, it must be [broadcastable](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) to the shape of `x`, and only dimensions with `noise_shape[i] == shape(x)[i]` will make independent decisions. For example, if `shape(x) = [k, l, m, n]` and `noise_shape = [k, 1, 1, n]`, each batch and channel component will be kept independently and each row and column will be kept or not kept together. Args: x: A floating point tensor. keep_prob: (deprecated) A deprecated alias for `(1-rate)`. noise_shape: A 1-D integer `Tensor`, representing the shape for randomly generated keep/drop flags. seed: A Python integer. Used to create random seeds. See `tf.random.set_seed` for behavior. name: A name for this operation (optional). rate: A scalar `Tensor` with the same type as `x`. The probability that each element of `x` is discarded. Returns: A Tensor of the same shape of `x`. Raises: ValueError: If `rate` is not in `[0, 1)` or if `x` is not a floating point tensor. """ try: rate_from_keep_prob = 1. - keep_prob if keep_prob is not None else None except TypeError: raise ValueError("`keep_prob` must be a floating point number or Tensor. " f"Received: keep_prob={keep_prob}") rate = deprecation.deprecated_argument_lookup( "rate", rate, "keep_prob", rate_from_keep_prob) if rate is None: raise ValueError(f"`rate` must be provided. Received: rate={rate}") return dropout_v2(x, rate, noise_shape=noise_shape, seed=seed, name=name) @tf_export("nn.dropout", v1=[]) @dispatch.add_dispatch_support def dropout_v2(x, rate, noise_shape=None, seed=None, name=None): """Computes dropout: randomly sets elements to zero to prevent overfitting. Warning: You should consider using `tf.nn.experimental.stateless_dropout` instead of this function. The difference between `tf.nn.experimental.stateless_dropout` and this function is analogous to the difference between `tf.random.stateless_uniform` and `tf.random.uniform`. Please see [Random number generation](https://www.tensorflow.org/guide/random_numbers) guide for a detailed description of the various RNG systems in TF. As the guide states, legacy stateful RNG ops like `tf.random.uniform` and `tf.nn.dropout` are not deprecated yet but highly discouraged, because their states are hard to control. Note: The behavior of dropout has changed between TensorFlow 1.x and 2.x. When converting 1.x code, please use named arguments to ensure behavior stays consistent. See also: `tf.keras.layers.Dropout` for a dropout layer. [Dropout](https://arxiv.org/abs/1207.0580) is useful for regularizing DNN models. Inputs elements are randomly set to zero (and the other elements are rescaled). This encourages each node to be independently useful, as it cannot rely on the output of other nodes. More precisely: With probability `rate` elements of `x` are set to `0`. The remaining elements are scaled up by `1.0 / (1 - rate)`, so that the expected value is preserved. >>> tf.random.set_seed(0) >>> x = tf.ones([3,5]) >>> tf.nn.dropout(x, rate = 0.5, seed = 1).numpy() array([[2., 0., 0., 2., 2.], [2., 2., 2., 2., 2.], [2., 0., 2., 0., 2.]], dtype=float32) >>> tf.random.set_seed(0) >>> x = tf.ones([3,5]) >>> tf.nn.dropout(x, rate = 0.8, seed = 1).numpy() array([[0., 0., 0., 5., 5.], [0., 5., 0., 5., 0.], [5., 0., 5., 0., 5.]], dtype=float32) >>> tf.nn.dropout(x, rate = 0.0) == x <tf.Tensor: shape=(3, 5), dtype=bool, numpy= array([[ True, True, True, True, True], [ True, True, True, True, True], [ True, True, True, True, True]])> By default, each element is kept or dropped independently. If `noise_shape` is specified, it must be [broadcastable](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) to the shape of `x`, and only dimensions with `noise_shape[i] == shape(x)[i]` will make independent decisions. This is useful for dropping whole channels from an image or sequence. For example: >>> tf.random.set_seed(0) >>> x = tf.ones([3,10]) >>> tf.nn.dropout(x, rate = 2/3, noise_shape=[1,10], seed=1).numpy() array([[0., 0., 0., 3., 3., 0., 3., 3., 3., 0.], [0., 0., 0., 3., 3., 0., 3., 3., 3., 0.], [0., 0., 0., 3., 3., 0., 3., 3., 3., 0.]], dtype=float32) Args: x: A floating point tensor. rate: A scalar `Tensor` with the same type as x. The probability that each element is dropped. For example, setting rate=0.1 would drop 10% of input elements. noise_shape: A 1-D integer `Tensor`, representing the shape for randomly generated keep/drop flags. seed: A Python integer. Used to create random seeds. See `tf.random.set_seed` for behavior. name: A name for this operation (optional). Returns: A Tensor of the same shape of `x`. Raises: ValueError: If `rate` is not in `[0, 1)` or if `x` is not a floating point tensor. `rate=1` is disallowed, because the output would be all zeros, which is likely not what was intended. """ uniform_sampler = functools.partial(random_ops.random_uniform, seed=seed) def dummy_rng_step(): random_seed.get_seed(seed) return _dropout(x=x, rate=rate, noise_shape=noise_shape, uniform_sampler=uniform_sampler, dummy_rng_step=dummy_rng_step, name=name, default_name="dropout") @tf_export("nn.experimental.stateless_dropout") @dispatch.add_dispatch_support def stateless_dropout(x, rate, seed, rng_alg=None, noise_shape=None, name=None): """Computes dropout: randomly sets elements to zero to prevent overfitting. [Dropout](https://arxiv.org/abs/1207.0580) is useful for regularizing DNN models. Inputs elements are randomly set to zero (and the other elements are rescaled). This encourages each node to be independently useful, as it cannot rely on the output of other nodes. More precisely: With probability `rate` elements of `x` are set to `0`. The remaining elements are scaled up by `1.0 / (1 - rate)`, so that the expected value is preserved. >>> x = tf.ones([3,5]) >>> tf.nn.experimental.stateless_dropout(x, rate=0.5, seed=[1, 0]) <tf.Tensor: shape=(3, 5), dtype=float32, numpy= array([[2., 0., 2., 0., 0.], [0., 0., 2., 0., 2.], [0., 0., 0., 0., 2.]], dtype=float32)> >>> x = tf.ones([3,5]) >>> tf.nn.experimental.stateless_dropout(x, rate=0.8, seed=[1, 0]) <tf.Tensor: shape=(3, 5), dtype=float32, numpy= array([[5., 0., 0., 0., 0.], [0., 0., 0., 0., 5.], [0., 0., 0., 0., 5.]], dtype=float32)> >>> tf.nn.experimental.stateless_dropout(x, rate=0.0, seed=[1, 0]) == x <tf.Tensor: shape=(3, 5), dtype=bool, numpy= array([[ True, True, True, True, True], [ True, True, True, True, True], [ True, True, True, True, True]])> This function is a stateless version of `tf.nn.dropout`, in the sense that no matter how many times you call this function, the same `seed` will lead to the same results, and different `seed` will lead to different results. >>> x = tf.ones([3,5]) >>> tf.nn.experimental.stateless_dropout(x, rate=0.8, seed=[1, 0]) <tf.Tensor: shape=(3, 5), dtype=float32, numpy= array([[5., 0., 0., 0., 0.], [0., 0., 0., 0., 5.], [0., 0., 0., 0., 5.]], dtype=float32)> >>> tf.nn.experimental.stateless_dropout(x, rate=0.8, seed=[1, 0]) <tf.Tensor: shape=(3, 5), dtype=float32, numpy= array([[5., 0., 0., 0., 0.], [0., 0., 0., 0., 5.], [0., 0., 0., 0., 5.]], dtype=float32)> >>> tf.nn.experimental.stateless_dropout(x, rate=0.8, seed=[2, 0]) <tf.Tensor: shape=(3, 5), dtype=float32, numpy= array([[5., 0., 0., 0., 0.], [0., 0., 0., 5., 0.], [0., 0., 0., 0., 0.]], dtype=float32)> >>> tf.nn.experimental.stateless_dropout(x, rate=0.8, seed=[2, 0]) <tf.Tensor: shape=(3, 5), dtype=float32, numpy= array([[5., 0., 0., 0., 0.], [0., 0., 0., 5., 0.], [0., 0., 0., 0., 0.]], dtype=float32)> Compare the above results to those of `tf.nn.dropout` below. The second time `tf.nn.dropout` is called with the same seed, it will give a different output. >>> tf.random.set_seed(0) >>> x = tf.ones([3,5]) >>> tf.nn.dropout(x, rate=0.8, seed=1) <tf.Tensor: shape=(3, 5), dtype=float32, numpy= array([[0., 0., 0., 5., 5.], [0., 5., 0., 5., 0.], [5., 0., 5., 0., 5.]], dtype=float32)> >>> tf.nn.dropout(x, rate=0.8, seed=1) <tf.Tensor: shape=(3, 5), dtype=float32, numpy= array([[0., 0., 0., 0., 0.], [0., 0., 0., 5., 0.], [0., 0., 0., 0., 0.]], dtype=float32)> >>> tf.nn.dropout(x, rate=0.8, seed=2) <tf.Tensor: shape=(3, 5), dtype=float32, numpy= array([[0., 0., 0., 0., 0.], [0., 5., 0., 5., 0.], [0., 0., 0., 0., 0.]], dtype=float32)> >>> tf.nn.dropout(x, rate=0.8, seed=2) <tf.Tensor: shape=(3, 5), dtype=float32, numpy= array([[0., 0., 0., 0., 0.], [5., 0., 5., 0., 5.], [0., 5., 0., 0., 5.]], dtype=float32)> The difference between this function and `tf.nn.dropout` is analogous to the difference between `tf.random.stateless_uniform` and `tf.random.uniform`. Please see [Random number generation](https://www.tensorflow.org/guide/random_numbers) guide for a detailed description of the various RNG systems in TF. As the guide states, legacy stateful RNG ops like `tf.random.uniform` and `tf.nn.dropout` are not deprecated yet but highly discouraged, because their states are hard to control. By default, each element is kept or dropped independently. If `noise_shape` is specified, it must be [broadcastable](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) to the shape of `x`, and only dimensions with `noise_shape[i] == shape(x)[i]` will make independent decisions. This is useful for dropping whole channels from an image or sequence. For example: >>> x = tf.ones([3,10]) >>> tf.nn.experimental.stateless_dropout(x, rate=2/3, noise_shape=[1,10], ... seed=[1, 0]) <tf.Tensor: shape=(3, 10), dtype=float32, numpy= array([[3., 0., 0., 0., 0., 0., 0., 3., 0., 3.], [3., 0., 0., 0., 0., 0., 0., 3., 0., 3.], [3., 0., 0., 0., 0., 0., 0., 3., 0., 3.]], dtype=float32)> Args: x: A floating point tensor. rate: A scalar `Tensor` with the same type as x. The probability that each element is dropped. For example, setting rate=0.1 would drop 10% of input elements. seed: An integer tensor of shape `[2]`. The seed of the random numbers. rng_alg: The algorithm used to generate the random numbers (default to `"auto_select"`). See the `alg` argument of `tf.random.stateless_uniform` for the supported values. noise_shape: A 1-D integer `Tensor`, representing the shape for randomly generated keep/drop flags. name: A name for this operation. Returns: A Tensor of the same shape and dtype of `x`. Raises: ValueError: If `rate` is not in `[0, 1)` or if `x` is not a floating point tensor. `rate=1` is disallowed, because the output would be all zeros, which is likely not what was intended. """ uniform_sampler = functools.partial( stateless_random_ops.stateless_random_uniform, seed=seed, alg=rng_alg) def dummy_rng_step(): pass return _dropout(x=x, rate=rate, noise_shape=noise_shape, uniform_sampler=uniform_sampler, dummy_rng_step=dummy_rng_step, name=name, default_name="stateless_dropout") def _dropout(x, rate, noise_shape, uniform_sampler, dummy_rng_step, name, default_name): """Shared implementation of the various dropout functions. Args: x: same as the namesake in `dropout_v2`. rate: same as the namesake in `dropout_v2`. noise_shape: same as the namesake in `dropout_v2`. uniform_sampler: a callable of signature `(shape, dtype) -> Tensor`, used to generate a tensor of uniformly-distributed random numbers, of the given shape and dtype. dummy_rng_step: a callable of signature `() -> None`, to make a dummy RNG call in the fast path. In the fast path where rate is 0, we don't need to generate random numbers, but some samplers still require you to make an RNG call, to make sure that RNG states won't depend on whether the fast path is taken. name: same as the namesake in `dropout_v2`. default_name: a default name in case `name` is `None`. Returns: A Tensor of the same shape and dtype of `x`. """ with ops.name_scope(name, default_name, [x]) as name: is_rate_number = isinstance(rate, numbers.Real) if is_rate_number and (rate < 0 or rate >= 1): raise ValueError("`rate` must be a scalar tensor or a float in the " f"range [0, 1). Received: rate={rate}") x = ops.convert_to_tensor(x, name="x") x_dtype = x.dtype if not x_dtype.is_floating: raise ValueError( "`x.dtype` must be a floating point tensor as `x` will be " f"scaled. Received: x_dtype={x_dtype}") if is_rate_number and rate == 0: # Fast-path: Return the input immediately if rate is non-tensor & is `0`. # We trigger this after all error checking # and after `x` has been converted to a tensor, to prevent inconsistent # tensor conversions/error raising if rate is changed to/from 0. # # We also explicitly call `dummy_rng_step` to make sure # we don't change the random number generation behavior of # stateful random ops by entering a fastpath, # despite not generating a random tensor in the fastpath dummy_rng_step() return x is_executing_eagerly = context.executing_eagerly() if not tensor_util.is_tf_type(rate): if is_rate_number: keep_prob = 1 - rate scale = 1 / keep_prob scale = ops.convert_to_tensor(scale, dtype=x_dtype) ret = gen_math_ops.mul(x, scale) else: raise ValueError( f"`rate` must be a scalar or scalar tensor. Received: rate={rate}") else: rate.get_shape().assert_has_rank(0) rate_dtype = rate.dtype if rate_dtype != x_dtype: if not rate_dtype.is_compatible_with(x_dtype): raise ValueError( "`x.dtype` must be compatible with `rate.dtype`. " f"Received: x.dtype={x_dtype} and rate.dtype={rate_dtype}") rate = gen_math_ops.cast(rate, x_dtype, name="rate") one_tensor = constant_op.constant(1, dtype=x_dtype) ret = gen_math_ops.real_div(x, gen_math_ops.sub(one_tensor, rate)) noise_shape = _get_noise_shape(x, noise_shape) # Sample a uniform distribution on [0.0, 1.0) and select values larger # than or equal to `rate`. random_tensor = uniform_sampler(shape=noise_shape, dtype=x_dtype) keep_mask = random_tensor >= rate ret = gen_math_ops.mul(ret, gen_math_ops.cast(keep_mask, x_dtype)) if not is_executing_eagerly: ret.set_shape(x.get_shape()) return ret @tf_export("math.top_k", "nn.top_k") @dispatch.add_dispatch_support def top_k(input, k=1, sorted=True, name=None): # pylint: disable=redefined-builtin """Finds values and indices of the `k` largest entries for the last dimension. If the input is a vector (rank=1), finds the `k` largest entries in the vector and outputs their values and indices as vectors. Thus `values[j]` is the `j`-th largest entry in `input`, and its index is `indices[j]`. >>> result = tf.math.top_k([1, 2, 98, 1, 1, 99, 3, 1, 3, 96, 4, 1], ... k=3) >>> result.values.numpy() array([99, 98, 96], dtype=int32) >>> result.indices.numpy() array([5, 2, 9], dtype=int32) For matrices (resp. higher rank input), computes the top `k` entries in each row (resp. vector along the last dimension). Thus, >>> input = tf.random.normal(shape=(3,4,5,6)) >>> k = 2 >>> values, indices = tf.math.top_k(input, k=k) >>> values.shape.as_list() [3, 4, 5, 2] >>> >>> values.shape == indices.shape == input.shape[:-1] + [k] True The indices can be used to `gather` from a tensor who's shape matches `input`. >>> gathered_values = tf.gather(input, indices, batch_dims=-1) >>> assert tf.reduce_all(gathered_values == values) If two elements are equal, the lower-index element appears first. >>> result = tf.math.top_k([1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0], ... k=3) >>> result.indices.numpy() array([0, 1, 3], dtype=int32) Args: input: 1-D or higher `Tensor` with last dimension at least `k`. k: 0-D `int32` `Tensor`. Number of top elements to look for along the last dimension (along each row for matrices). sorted: If true the resulting `k` elements will be sorted by the values in descending order. name: Optional name for the operation. Returns: A tuple with two named fields: values: The `k` largest elements along each last dimensional slice. indices: The indices of `values` within the last dimension of `input`. """ return gen_nn_ops.top_kv2(input, k=k, sorted=sorted, name=name) def nth_element(input, n, reverse=False, name=None): # pylint: disable=redefined-builtin r"""Finds values of the `n`-th smallest value for the last dimension. Note that n is zero-indexed. If the input is a vector (rank-1), finds the entries which is the nth-smallest value in the vector and outputs their values as scalar tensor. For matrices (resp. higher rank input), computes the entries which is the nth-smallest value in each row (resp. vector along the last dimension). Thus, values.shape = input.shape[:-1] Args: input: 1-D or higher `Tensor` with last dimension at least `n+1`. n: A `Tensor` of type `int32`. 0-D. Position of sorted vector to select along the last dimension (along each row for matrices). Valid range of n is `[0, input.shape[:-1])` reverse: An optional `bool`. Defaults to `False`. When set to True, find the nth-largest value in the vector and vice versa. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. The `n`-th order statistic along each last dimensional slice. """ return gen_nn_ops.nth_element(input, n, reverse=reverse, name=name) @tf_export(v1=["nn.fractional_max_pool"]) @dispatch.add_dispatch_support @deprecation.deprecated(date=None, instructions="`seed2` and `deterministic` " "args are deprecated. Use fractional_max_pool_v2.") def fractional_max_pool(value, pooling_ratio, pseudo_random=False, overlapping=False, deterministic=False, seed=0, seed2=0, name=None): # pylint: disable=redefined-builtin r"""Performs fractional max pooling on the input. This is a deprecated version of `fractional_max_pool`. Fractional max pooling is slightly different than regular max pooling. In regular max pooling, you downsize an input set by taking the maximum value of smaller N x N subsections of the set (often 2x2), and try to reduce the set by a factor of N, where N is an integer. Fractional max pooling, as you might expect from the word "fractional", means that the overall reduction ratio N does not have to be an integer. The sizes of the pooling regions are generated randomly but are fairly uniform. For example, let's look at the height dimension, and the constraints on the list of rows that will be pool boundaries. First we define the following: 1. input_row_length : the number of rows from the input set 2. output_row_length : which will be smaller than the input 3. alpha = input_row_length / output_row_length : our reduction ratio 4. K = floor(alpha) 5. row_pooling_sequence : this is the result list of pool boundary rows Then, row_pooling_sequence should satisfy: 1. a[0] = 0 : the first value of the sequence is 0 2. a[end] = input_row_length : the last value of the sequence is the size 3. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size 4. length(row_pooling_sequence) = output_row_length+1 Args: value: A `Tensor`. 4-D with shape `[batch, height, width, channels]`. pooling_ratio: A list of `floats` that has length >= 4. Pooling ratio for each dimension of `value`, currently only supports row and col dimension and should be >= 1.0. For example, a valid pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements must be 1.0 because we don't allow pooling on batch and channels dimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions respectively. pseudo_random: An optional `bool`. Defaults to `False`. When set to `True`, generates the pooling sequence in a pseudorandom fashion, otherwise, in a random fashion. Check (Graham, 2015) for difference between pseudorandom and random. overlapping: An optional `bool`. Defaults to `False`. When set to `True`, it means when pooling, the values at the boundary of adjacent pooling cells are used by both cells. For example: `index 0 1 2 3 4` `value 20 5 16 3 7` If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice. The result would be [20, 16] for fractional max pooling. deterministic: An optional `bool`. Deprecated; use `fractional_max_pool_v2` instead. seed: An optional `int`. Defaults to `0`. If set to be non-zero, the random number generator is seeded by the given seed. Otherwise it is seeded by a random seed. seed2: An optional `int`. Deprecated; use `fractional_max_pool_v2` instead. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (`output`, `row_pooling_sequence`, `col_pooling_sequence`). output: Output `Tensor` after fractional max pooling. Has the same type as `value`. row_pooling_sequence: A `Tensor` of type `int64`. col_pooling_sequence: A `Tensor` of type `int64`. Raises: ValueError: If op determinism is enabled and either the seeds are not set or the "deterministic" argument is False. References: Fractional Max-Pooling: [Graham, 2015](https://arxiv.org/abs/1412.6071) ([pdf](https://arxiv.org/pdf/1412.6071.pdf)) """ if config.is_op_determinism_enabled() and (not seed or not seed2 or not deterministic): raise ValueError( f'tf.compat.v1.nn.fractional_max_pool requires "seed" and ' f'"seed2" to be non-zero and "deterministic" to be true when op ' f"determinism is enabled. Please pass in such values, e.g. by passing" f'"seed=1, seed2=1, deterministic=True". Got: seed={seed}, ' f'seed2={seed2}, deterministic={deterministic}') return gen_nn_ops.fractional_max_pool(value, pooling_ratio, pseudo_random, overlapping, deterministic, seed, seed2, name) @tf_export("nn.fractional_max_pool", v1=[]) @dispatch.add_dispatch_support def fractional_max_pool_v2(value, pooling_ratio, pseudo_random=False, overlapping=False, seed=0, name=None): # pylint: disable=redefined-builtin r"""Performs fractional max pooling on the input. Fractional max pooling is slightly different than regular max pooling. In regular max pooling, you downsize an input set by taking the maximum value of smaller N x N subsections of the set (often 2x2), and try to reduce the set by a factor of N, where N is an integer. Fractional max pooling, as you might expect from the word "fractional", means that the overall reduction ratio N does not have to be an integer. The sizes of the pooling regions are generated randomly but are fairly uniform. For example, let's look at the height dimension, and the constraints on the list of rows that will be pool boundaries. First we define the following: 1. input_row_length : the number of rows from the input set 2. output_row_length : which will be smaller than the input 3. alpha = input_row_length / output_row_length : our reduction ratio 4. K = floor(alpha) 5. row_pooling_sequence : this is the result list of pool boundary rows Then, row_pooling_sequence should satisfy: 1. a[0] = 0 : the first value of the sequence is 0 2. a[end] = input_row_length : the last value of the sequence is the size 3. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size 4. length(row_pooling_sequence) = output_row_length+1 Args: value: A `Tensor`. 4-D with shape `[batch, height, width, channels]`. pooling_ratio: An int or list of `ints` that has length `1`, `2` or `4`. Pooling ratio for each dimension of `value`, currently only supports row and col dimension and should be >= 1.0. For example, a valid pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements must be 1.0 because we don't allow pooling on batch and channels dimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions respectively. pseudo_random: An optional `bool`. Defaults to `False`. When set to `True`, generates the pooling sequence in a pseudorandom fashion, otherwise, in a random fashion. Check paper (Graham, 2015) for difference between pseudorandom and random. overlapping: An optional `bool`. Defaults to `False`. When set to `True`, it means when pooling, the values at the boundary of adjacent pooling cells are used by both cells. For example: `index 0 1 2 3 4` `value 20 5 16 3 7` If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice. The result would be [20, 16] for fractional max pooling. seed: An optional `int`. Defaults to `0`. If set to be non-zero, the random number generator is seeded by the given seed. Otherwise it is seeded by a random seed. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (`output`, `row_pooling_sequence`, `col_pooling_sequence`). output: Output `Tensor` after fractional max pooling. Has the same type as `value`. row_pooling_sequence: A `Tensor` of type `int64`. col_pooling_sequence: A `Tensor` of type `int64`. Raises: ValueError: If no seed is specified and op determinism is enabled. References: Fractional Max-Pooling: [Graham, 2015](https://arxiv.org/abs/1412.6071) ([pdf](https://arxiv.org/pdf/1412.6071.pdf)) """ if (isinstance(pooling_ratio, (list, tuple))): if (pooling_ratio[0] != 1.0 or pooling_ratio[-1] != 1.0): raise ValueError( "`pooling_ratio` should have first and last elements with value 1.0. " f"Received: pooling_ratio={pooling_ratio}") for element in pooling_ratio: if element < 1.0: raise ValueError( f"`pooling_ratio` elements should be >= 1.0. " f"Received: pooling_ratio={pooling_ratio}") elif (isinstance(pooling_ratio, (int, float))): if pooling_ratio < 1.0: raise ValueError( "`pooling_ratio` should be >= 1.0. " f"Received: pooling_ratio={pooling_ratio}") else: raise ValueError( "`pooling_ratio` should be an int or a list of ints. " f"Received: pooling_ratio={pooling_ratio}") pooling_ratio = _get_sequence(pooling_ratio, 2, 3, "pooling_ratio") if seed == 0: if config.is_op_determinism_enabled(): raise ValueError( f"tf.nn.fractional_max_pool requires a non-zero seed to be passed in " f"when determinism is enabled, but got seed={seed}. Please pass in a " f'non-zero seed, e.g. by passing "seed=1".') return gen_nn_ops.fractional_max_pool(value, pooling_ratio, pseudo_random, overlapping, deterministic=False, seed=0, seed2=0, name=name) else: seed1, seed2 = random_seed.get_seed(seed) return gen_nn_ops.fractional_max_pool(value, pooling_ratio, pseudo_random, overlapping, deterministic=True, seed=seed1, seed2=seed2, name=name) @tf_export(v1=["nn.fractional_avg_pool"]) @dispatch.add_dispatch_support @deprecation.deprecated(date=None, instructions="`seed2` and `deterministic` " "args are deprecated. Use fractional_avg_pool_v2.") def fractional_avg_pool(value, pooling_ratio, pseudo_random=False, overlapping=False, deterministic=False, seed=0, seed2=0, name=None): # pylint: disable=redefined-builtin r"""Performs fractional average pooling on the input. This is a deprecated version of `fractional_avg_pool`. Fractional average pooling is similar to Fractional max pooling in the pooling region generation step. The only difference is that after pooling regions are generated, a mean operation is performed instead of a max operation in each pooling region. Args: value: A `Tensor`. 4-D with shape `[batch, height, width, channels]`. pooling_ratio: A list of `floats` that has length >= 4. Pooling ratio for each dimension of `value`, currently only supports row and col dimension and should be >= 1.0. For example, a valid pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements must be 1.0 because we don't allow pooling on batch and channels dimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions respectively. pseudo_random: An optional `bool`. Defaults to `False`. When set to `True`, generates the pooling sequence in a pseudorandom fashion, otherwise, in a random fashion. Check paper (Graham, 2015) for difference between pseudorandom and random. overlapping: An optional `bool`. Defaults to `False`. When set to `True`, it means when pooling, the values at the boundary of adjacent pooling cells are used by both cells. For example: `index 0 1 2 3 4` `value 20 5 16 3 7` If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice. The result would be [20, 16] for fractional avg pooling. deterministic: An optional `bool`. Deprecated; use `fractional_avg_pool_v2` instead. seed: An optional `int`. Defaults to `0`. If set to be non-zero, the random number generator is seeded by the given seed. Otherwise it is seeded by a random seed. seed2: An optional `int`. Deprecated; use `fractional_avg_pool_v2` instead. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (`output`, `row_pooling_sequence`, `col_pooling_sequence`). output: Output `Tensor` after fractional avg pooling. Has the same type as `value`. row_pooling_sequence: A `Tensor` of type `int64`. col_pooling_sequence: A `Tensor` of type `int64`. References: Fractional Max-Pooling: [Graham, 2015](https://arxiv.org/abs/1412.6071) ([pdf](https://arxiv.org/pdf/1412.6071.pdf)) """ return gen_nn_ops.fractional_avg_pool(value, pooling_ratio, pseudo_random, overlapping, deterministic, seed, seed2, name=name) @tf_export("nn.fractional_avg_pool", v1=[]) @dispatch.add_dispatch_support def fractional_avg_pool_v2(value, pooling_ratio, pseudo_random=False, overlapping=False, seed=0, name=None): # pylint: disable=redefined-builtin r"""Performs fractional average pooling on the input. Fractional average pooling is similar to Fractional max pooling in the pooling region generation step. The only difference is that after pooling regions are generated, a mean operation is performed instead of a max operation in each pooling region. Args: value: A `Tensor`. 4-D with shape `[batch, height, width, channels]`. pooling_ratio: A list of `floats` that has length >= 4. Pooling ratio for each dimension of `value`, currently only supports row and col dimension and should be >= 1.0. For example, a valid pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements must be 1.0 because we don't allow pooling on batch and channels dimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions respectively. pseudo_random: An optional `bool`. Defaults to `False`. When set to `True`, generates the pooling sequence in a pseudorandom fashion, otherwise, in a random fashion. Check paper (Graham, 2015) for difference between pseudorandom and random. overlapping: An optional `bool`. Defaults to `False`. When set to `True`, it means when pooling, the values at the boundary of adjacent pooling cells are used by both cells. For example: `index 0 1 2 3 4` `value 20 5 16 3 7` If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice. The result would be [20, 16] for fractional avg pooling. seed: An optional `int`. Defaults to `0`. If set to be non-zero, the random number generator is seeded by the given seed. Otherwise it is seeded by a random seed. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (`output`, `row_pooling_sequence`, `col_pooling_sequence`). output: Output `Tensor` after fractional avg pooling. Has the same type as `value`. row_pooling_sequence: A `Tensor` of type `int64`. col_pooling_sequence: A `Tensor` of type `int64`. References: Fractional Max-Pooling: [Graham, 2015](https://arxiv.org/abs/1412.6071) ([pdf](https://arxiv.org/pdf/1412.6071.pdf)) """ if seed == 0: return gen_nn_ops.fractional_avg_pool(value, pooling_ratio, pseudo_random, overlapping, deterministic=False, seed=0, seed2=0, name=name) else: seed1, seed2 = random_seed.get_seed(seed) return gen_nn_ops.fractional_avg_pool(value, pooling_ratio, pseudo_random, overlapping, deterministic=True, seed=seed1, seed2=seed2, name=name) @ops.RegisterStatistics("Dilation2D", "flops") def _calc_dilation2d_flops(graph, node): """Calculates the compute resources needed for Dilation2D.""" input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0]) input_shape.assert_is_fully_defined() filter_shape = graph_util.tensor_shape_from_node_def_name( graph, node.input[1]) filter_shape.assert_is_fully_defined() output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name) output_shape.assert_is_fully_defined() filter_height = int(filter_shape[0]) filter_width = int(filter_shape[1]) output_count = np.prod(output_shape.as_list(), dtype=np.int64) return ops.OpStats("flops", (output_count * filter_height * filter_width * 2)) @tf_export(v1=["nn.erosion2d"]) @dispatch.add_dispatch_support def erosion2d(value, kernel, strides, rates, padding, name=None): """Computes the grayscale erosion of 4-D `value` and 3-D `kernel` tensors. The `value` tensor has shape `[batch, in_height, in_width, depth]` and the `kernel` tensor has shape `[kernel_height, kernel_width, depth]`, i.e., each input channel is processed independently of the others with its own structuring function. The `output` tensor has shape `[batch, out_height, out_width, depth]`. The spatial dimensions of the output tensor depend on the `padding` algorithm. We currently only support the default "NHWC" `data_format`. In detail, the grayscale morphological 2-D erosion is given by: output[b, y, x, c] = min_{dy, dx} value[b, strides[1] * y - rates[1] * dy, strides[2] * x - rates[2] * dx, c] - kernel[dy, dx, c] Duality: The erosion of `value` by the `kernel` is equal to the negation of the dilation of `-value` by the reflected `kernel`. Args: value: A `Tensor`. 4-D with shape `[batch, in_height, in_width, depth]`. kernel: A `Tensor`. Must have the same type as `value`. 3-D with shape `[kernel_height, kernel_width, depth]`. strides: A list of `ints` that has length `>= 4`. 1-D of length 4. The stride of the sliding window for each dimension of the input tensor. Must be: `[1, stride_height, stride_width, 1]`. rates: A list of `ints` that has length `>= 4`. 1-D of length 4. The input stride for atrous morphological dilation. Must be: `[1, rate_height, rate_width, 1]`. padding: A `string` from: `"SAME", "VALID"`. The type of padding algorithm to use. name: A name for the operation (optional). If not specified "erosion2d" is used. Returns: A `Tensor`. Has the same type as `value`. 4-D with shape `[batch, out_height, out_width, depth]`. Raises: ValueError: If the `value` depth does not match `kernel`' shape, or if padding is other than `'VALID'` or `'SAME'`. """ with ops.name_scope(name, "erosion2d", [value, kernel]) as name: # Reduce erosion to dilation by duality. return math_ops.negative( gen_nn_ops.dilation2d( input=math_ops.negative(value), filter=array_ops.reverse_v2(kernel, [0, 1]), strides=strides, rates=rates, padding=padding, name=name)) @tf_export("nn.erosion2d", v1=[]) @dispatch.add_dispatch_support def erosion2d_v2(value, filters, strides, padding, data_format, dilations, name=None): """Computes the grayscale erosion of 4-D `value` and 3-D `filters` tensors. The `value` tensor has shape `[batch, in_height, in_width, depth]` and the `filters` tensor has shape `[filters_height, filters_width, depth]`, i.e., each input channel is processed independently of the others with its own structuring function. The `output` tensor has shape `[batch, out_height, out_width, depth]`. The spatial dimensions of the output tensor depend on the `padding` algorithm. We currently only support the default "NHWC" `data_format`. In detail, the grayscale morphological 2-D erosion is given by: output[b, y, x, c] = min_{dy, dx} value[b, strides[1] * y - dilations[1] * dy, strides[2] * x - dilations[2] * dx, c] - filters[dy, dx, c] Duality: The erosion of `value` by the `filters` is equal to the negation of the dilation of `-value` by the reflected `filters`. Args: value: A `Tensor`. 4-D with shape `[batch, in_height, in_width, depth]`. filters: A `Tensor`. Must have the same type as `value`. 3-D with shape `[filters_height, filters_width, depth]`. strides: A list of `ints` that has length `>= 4`. 1-D of length 4. The stride of the sliding window for each dimension of the input tensor. Must be: `[1, stride_height, stride_width, 1]`. padding: A `string` from: `"SAME", "VALID"`. The type of padding algorithm to use. See [here](https://www.tensorflow.org/api_docs/python/tf/nn#notes_on_padding_2) for more information. data_format: A `string`, only `"NHWC"` is currently supported. dilations: A list of `ints` that has length `>= 4`. 1-D of length 4. The input stride for atrous morphological dilation. Must be: `[1, rate_height, rate_width, 1]`. name: A name for the operation (optional). If not specified "erosion2d" is used. Returns: A `Tensor`. Has the same type as `value`. 4-D with shape `[batch, out_height, out_width, depth]`. Raises: ValueError: If the `value` depth does not match `filters`' shape, or if padding is other than `'VALID'` or `'SAME'`. """ if data_format != "NHWC": raise ValueError("`data_format` values other than 'NHWC' are not " f"supported. Received: data_format={data_format}") with ops.name_scope(name, "erosion2d", [value, filters]) as name: # Reduce erosion to dilation by duality. return math_ops.negative( gen_nn_ops.dilation2d( input=math_ops.negative(value), filter=array_ops.reverse_v2(filters, [0, 1]), strides=strides, rates=dilations, padding=padding, name=name)) @tf_export(v1=["math.in_top_k", "nn.in_top_k"]) @dispatch.add_dispatch_support def in_top_k(predictions, targets, k, name=None): r"""Says whether the targets are in the top `K` predictions. This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the prediction for the target class is finite (not inf, -inf, or nan) and among the top `k` predictions among all predictions for example `i`. Note that the behavior of `InTopK` differs from the `TopK` op in its handling of ties; if multiple classes have the same prediction value and straddle the top-`k` boundary, all of those classes are considered to be in the top `k`. More formally, let \\(predictions_i\\) be the predictions for all classes for example `i`, \\(targets_i\\) be the target class for example `i`, \\(out_i\\) be the output for example `i`, $$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$ Args: predictions: A `Tensor` of type `float32`. A `batch_size` x `classes` tensor. targets: A `Tensor`. Must be one of the following types: `int32`, `int64`. A `batch_size` vector of class ids. k: An `int`. Number of top elements to look at for computing precision. name: A name for the operation (optional). Returns: A `Tensor` of type `bool`. Computed Precision at `k` as a `bool Tensor`. """ with ops.name_scope(name, "in_top_k"): return gen_nn_ops.in_top_kv2(predictions, targets, k, name=name) @tf_export("math.in_top_k", "nn.in_top_k", v1=[]) @dispatch.add_dispatch_support def in_top_k_v2(targets, predictions, k, name=None): return in_top_k(predictions, targets, k, name) in_top_k_v2.__doc__ = in_top_k.__doc__ tf_export(v1=["nn.quantized_avg_pool"])( dispatch.add_dispatch_support(gen_nn_ops.quantized_avg_pool)) tf_export(v1=["nn.quantized_conv2d"])( dispatch.add_dispatch_support(gen_nn_ops.quantized_conv2d)) tf_export(v1=["nn.quantized_relu_x"])( dispatch.add_dispatch_support(gen_nn_ops.quantized_relu_x)) tf_export(v1=["nn.quantized_max_pool"])( dispatch.add_dispatch_support(gen_nn_ops.quantized_max_pool)) @tf_export("nn.isotonic_regression", v1=[]) @dispatch.add_dispatch_support def isotonic_regression(inputs, decreasing=True, axis=-1): r"""Solves isotonic regression problems along the given axis. For each vector x, the problem solved is $$\argmin_{y_1 >= y_2 >= ... >= y_n} \sum_i (x_i - y_i)^2.$$ As the solution is component-wise constant, a second tensor is returned that encodes the segments. The problems are solved over the given axis. Consider the following example, where we solve a batch of two problems. The first input is [3, 1, 2], while the second [1, 3, 4] (as the axis is 1). >>> x = tf.constant([[3, 1, 2], [1, 3, 4]], dtype=tf.float32) >>> y, segments = tf.nn.isotonic_regression(x, axis=1) >>> y # The solution. <tf.Tensor: shape=(2, 3), dtype=float32, numpy= array([[3. , 1.5 , 1.5 ], [2.6666667, 2.6666667, 2.6666667]], dtype=float32)> Note that the first solution has two blocks [2] and [1.5, 1.5]. The second solution is constant, and thus has a single segment. These segments are exactly what the second returned tensor encodes: >>> segments <tf.Tensor: shape=(2, 3), dtype=int32, numpy= array([[0, 1, 1], [0, 0, 0]], dtype=int32)> Args: inputs: A tensor holding the inputs. decreasing: If set to False, the inequalities in the optimizing constrained are flipped. axis: The axis along which the problems should be solved. Returns: output: The solutions, same shape as type as the input. segments: An int32 tensor, same shape as the input indicating the segments that have the same value. Specifically, those positions that have the same value correspond to the same segment. These values start at zero, and are monotonously increasing for each solution. """ type_promotions = { # Float types get mapped to themselves, int8/16 to float32, rest to double dtypes.float32: dtypes.float32, dtypes.half: dtypes.half, dtypes.bfloat16: dtypes.bfloat16, dtypes.int8: dtypes.float32, dtypes.int16: dtypes.float32, } inputs = ops.convert_to_tensor(inputs) try: output_dtype = type_promotions[inputs.dtype] except KeyError: output_dtype = dtypes.float64 def compute_on_matrix(matrix, name=None): iso_fn = functools.partial( gen_nn_ops.isotonic_regression, output_dtype=output_dtype, name=name) if decreasing: return iso_fn(matrix) else: output, segments = iso_fn(-matrix) return -output, segments return _wrap_2d_function(inputs, compute_on_matrix, axis) # Register elementwise ops that don't have Python wrappers. # Unary elementwise ops. dispatch.register_unary_elementwise_api(gen_nn_ops.elu) dispatch.register_unary_elementwise_api(gen_nn_ops.relu) dispatch.register_unary_elementwise_api(gen_nn_ops.selu) dispatch.register_unary_elementwise_api(gen_nn_ops.softsign)
Intel-Corporation/tensorflow
tensorflow/python/ops/nn_ops.py
Python
apache-2.0
262,706
[ "Gaussian" ]
8260d50589767c9ecb2bdd5bc19ed0cd09467cb039dbc97ecda4bbf79c89cc51
# coding: utf-8 # Copyright (c) Pymatgen Development Team. # Distributed under the terms of the MIT License. from __future__ import division, unicode_literals, print_function """ This module implements plotter for DOS and band structure. """ __author__ = "Shyue Ping Ong, Geoffroy Hautier" __copyright__ = "Copyright 2012, The Materials Project" __version__ = "0.1" __maintainer__ = "Shyue Ping Ong" __email__ = "shyuep@gmail.com" __date__ = "May 1, 2012" import logging import math import itertools from collections import OrderedDict import numpy as np from monty.json import jsanitize from pymatgen.electronic_structure.core import Spin from pymatgen.electronic_structure.bandstructure import BandStructureSymmLine logger = logging.getLogger('BSPlotter') class DosPlotter(object): """ Class for plotting DOSs. Note that the interface is extremely flexible given that there are many different ways in which people want to view DOS. The typical usage is:: # Initializes plotter with some optional args. Defaults are usually # fine, plotter = DosPlotter() # Adds a DOS with a label. plotter.add_dos("Total DOS", dos) # Alternatively, you can add a dict of DOSs. This is the typical # form returned by CompleteDos.get_spd/element/others_dos(). plotter.add_dos_dict({"dos1": dos1, "dos2": dos2}) plotter.add_dos_dict(complete_dos.get_spd_dos()) Args: zero_at_efermi: Whether to shift all Dos to have zero energy at the fermi energy. Defaults to True. stack: Whether to plot the DOS as a stacked area graph key_sort_func: function used to sort the dos_dict keys. sigma: A float specifying a standard deviation for Gaussian smearing the DOS for nicer looking plots. Defaults to None for no smearing. """ def __init__(self, zero_at_efermi=True, stack=False, sigma=None): self.zero_at_efermi = zero_at_efermi self.stack = stack self.sigma = sigma self._doses = OrderedDict() def add_dos(self, label, dos): """ Adds a dos for plotting. Args: label: label for the DOS. Must be unique. dos: Dos object """ energies = dos.energies - dos.efermi if self.zero_at_efermi \ else dos.energies densities = dos.get_smeared_densities(self.sigma) if self.sigma \ else dos.densities efermi = dos.efermi self._doses[label] = {'energies': energies, 'densities': densities, 'efermi': efermi} def add_dos_dict(self, dos_dict, key_sort_func=None): """ Add a dictionary of doses, with an optional sorting function for the keys. Args: dos_dict: dict of {label: Dos} key_sort_func: function used to sort the dos_dict keys. """ if key_sort_func: keys = sorted(dos_dict.keys(), key=key_sort_func) else: keys = dos_dict.keys() for label in keys: self.add_dos(label, dos_dict[label]) def get_dos_dict(self): """ Returns the added doses as a json-serializable dict. Note that if you have specified smearing for the DOS plot, the densities returned will be the smeared densities, not the original densities. Returns: Dict of dos data. Generally of the form, {label: {'energies':.., 'densities': {'up':...}, 'efermi':efermi}} """ return jsanitize(self._doses) def get_plot(self, xlim=None, ylim=None): """ Get a matplotlib plot showing the DOS. Args: xlim: Specifies the x-axis limits. Set to None for automatic determination. ylim: Specifies the y-axis limits. """ import prettyplotlib as ppl from prettyplotlib import brewer2mpl from pymatgen.util.plotting_utils import get_publication_quality_plot ncolors = max(3, len(self._doses)) ncolors = min(9, ncolors) colors = brewer2mpl.get_map('Set1', 'qualitative', ncolors).mpl_colors y = None alldensities = [] allenergies = [] plt = get_publication_quality_plot(12, 8) # Note that this complicated processing of energies is to allow for # stacked plots in matplotlib. for key, dos in self._doses.items(): energies = dos['energies'] densities = dos['densities'] if not y: y = {Spin.up: np.zeros(energies.shape), Spin.down: np.zeros(energies.shape)} newdens = {} for spin in [Spin.up, Spin.down]: if spin in densities: if self.stack: y[spin] += densities[spin] newdens[spin] = y[spin].copy() else: newdens[spin] = densities[spin] allenergies.append(energies) alldensities.append(newdens) keys = list(self._doses.keys()) keys.reverse() alldensities.reverse() allenergies.reverse() allpts = [] for i, key in enumerate(keys): x = [] y = [] for spin in [Spin.up, Spin.down]: if spin in alldensities[i]: densities = list(int(spin) * alldensities[i][spin]) energies = list(allenergies[i]) if spin == Spin.down: energies.reverse() densities.reverse() x.extend(energies) y.extend(densities) allpts.extend(list(zip(x, y))) if self.stack: plt.fill(x, y, color=colors[i % ncolors], label=str(key)) else: ppl.plot(x, y, color=colors[i % ncolors], label=str(key), linewidth=3) if not self.zero_at_efermi: ylim = plt.ylim() ppl.plot([self._doses[key]['efermi'], self._doses[key]['efermi']], ylim, color=colors[i % ncolors], linestyle='--', linewidth=2) if xlim: plt.xlim(xlim) if ylim: plt.ylim(ylim) else: xlim = plt.xlim() relevanty = [p[1] for p in allpts if xlim[0] < p[0] < xlim[1]] plt.ylim((min(relevanty), max(relevanty))) if self.zero_at_efermi: ylim = plt.ylim() plt.plot([0, 0], ylim, 'k--', linewidth=2) plt.xlabel('Energies (eV)') plt.ylabel('Density of states') plt.legend() leg = plt.gca().get_legend() ltext = leg.get_texts() # all the text.Text instance in the legend plt.setp(ltext, fontsize=30) plt.tight_layout() return plt def save_plot(self, filename, img_format="eps", xlim=None, ylim=None): """ Save matplotlib plot to a file. Args: filename: Filename to write to. img_format: Image format to use. Defaults to EPS. xlim: Specifies the x-axis limits. Set to None for automatic determination. ylim: Specifies the y-axis limits. """ plt = self.get_plot(xlim, ylim) plt.savefig(filename, format=img_format) def show(self, xlim=None, ylim=None): """ Show the plot using matplotlib. Args: xlim: Specifies the x-axis limits. Set to None for automatic determination. ylim: Specifies the y-axis limits. """ plt = self.get_plot(xlim, ylim) plt.show() class BSPlotter(object): """ Class to plot or get data to facilitate the plot of band structure objects. Args: bs: A BandStructureSymmLine object. """ def __init__(self, bs): if not isinstance(bs, BandStructureSymmLine): raise ValueError( "BSPlotter only works with BandStructureSymmLine objects. " "A BandStructure object (on a uniform grid for instance and " "not along symmetry lines won't work)") self._bs = bs # TODO: come with an intelligent way to cut the highest unconverged # bands self._nb_bands = self._bs._nb_bands def _maketicks(self, plt): """ utility private method to add ticks to a band structure """ ticks = self.get_ticks() # Sanitize only plot the uniq values uniq_d = [] uniq_l = [] temp_ticks = list(zip(ticks['distance'], ticks['label'])) for i in range(len(temp_ticks)): if i == 0: uniq_d.append(temp_ticks[i][0]) uniq_l.append(temp_ticks[i][1]) logger.debug("Adding label {l} at {d}".format( l=temp_ticks[i][0], d=temp_ticks[i][1])) else: if temp_ticks[i][1] == temp_ticks[i - 1][1]: logger.debug("Skipping label {i}".format( i=temp_ticks[i][1])) else: logger.debug("Adding label {l} at {d}".format( l=temp_ticks[i][0], d=temp_ticks[i][1])) uniq_d.append(temp_ticks[i][0]) uniq_l.append(temp_ticks[i][1]) logger.debug("Unique labels are %s" % list(zip(uniq_d, uniq_l))) plt.gca().set_xticks(uniq_d) plt.gca().set_xticklabels(uniq_l) for i in range(len(ticks['label'])): if ticks['label'][i] is not None: # don't print the same label twice if i != 0: if ticks['label'][i] == ticks['label'][i - 1]: logger.debug("already print label... " "skipping label {i}".format( i=ticks['label'][i])) else: logger.debug("Adding a line at {d}" " for label {l}".format( d=ticks['distance'][i], l=ticks['label'][i])) plt.axvline(ticks['distance'][i], color='k') else: logger.debug("Adding a line at {d} for label {l}".format( d=ticks['distance'][i], l=ticks['label'][i])) plt.axvline(ticks['distance'][i], color='k') return plt def bs_plot_data(self, zero_to_efermi=True): """ Get the data nicely formatted for a plot Args: zero_to_efermi: Automatically subtract off the Fermi energy from the eigenvalues and plot. Returns: A dict of the following format: ticks: A dict with the 'distances' at which there is a kpoint (the x axis) and the labels (None if no label) energy: A dict storing bands for spin up and spin down data [{Spin:[band_index][k_point_index]}] as a list (one element for each branch) of energy for each kpoint. The data is stored by branch to facilitate the plotting vbm: A list of tuples (distance,energy) marking the vbms. The energies are shifted with respect to the fermi level is the option has been selected. cbm: A list of tuples (distance,energy) marking the cbms. The energies are shifted with respect to the fermi level is the option has been selected. lattice: The reciprocal lattice. zero_energy: This is the energy used as zero for the plot. band_gap:A string indicating the band gap and its nature (empty if it's a metal). is_metal: True if the band structure is metallic (i.e., there is at least one band crossing the fermi level). """ distance = [] energy = [] if self._bs.is_metal(): zero_energy = self._bs.efermi else: zero_energy = self._bs.get_vbm()['energy'] if not zero_to_efermi: zero_energy = 0.0 for b in self._bs._branches: if self._bs.is_spin_polarized: energy.append({str(Spin.up): [], str(Spin.down): []}) else: energy.append({str(Spin.up): []}) distance.append([self._bs._distance[j] for j in range(b['start_index'], b['end_index'] + 1)]) ticks = self.get_ticks() for i in range(self._nb_bands): energy[-1][str(Spin.up)].append( [self._bs._bands[Spin.up][i][j] - zero_energy for j in range(b['start_index'], b['end_index'] + 1)]) if self._bs.is_spin_polarized: for i in range(self._nb_bands): energy[-1][str(Spin.down)].append( [self._bs._bands[Spin.down][i][j] - zero_energy for j in range(b['start_index'], b['end_index'] + 1)]) vbm = self._bs.get_vbm() cbm = self._bs.get_cbm() vbm_plot = [] cbm_plot = [] for index in cbm['kpoint_index']: cbm_plot.append((self._bs._distance[index], cbm['energy'] - zero_energy if zero_to_efermi else cbm['energy'])) for index in vbm['kpoint_index']: vbm_plot.append((self._bs._distance[index], vbm['energy'] - zero_energy if zero_to_efermi else vbm['energy'])) bg = self._bs.get_band_gap() direct = "Indirect" if bg['direct']: direct = "Direct" return {'ticks': ticks, 'distances': distance, 'energy': energy, 'vbm': vbm_plot, 'cbm': cbm_plot, 'lattice': self._bs._lattice_rec.as_dict(), 'zero_energy': zero_energy, 'is_metal': self._bs.is_metal(), 'band_gap': "{} {} bandgap = {}".format(direct, bg['transition'], bg['energy']) if not self._bs.is_metal() else ""} def get_plot(self, zero_to_efermi=True, ylim=None, smooth=False, vbm_cbm_marker=False): """ get a matplotlib object for the bandstructure plot. Blue lines are up spin, red lines are down spin. Args: zero_to_efermi: Automatically subtract off the Fermi energy from the eigenvalues and plot (E-Ef). ylim: Specify the y-axis (energy) limits; by default None let the code choose. It is vbm-4 and cbm+4 if insulator efermi-10 and efermi+10 if metal smooth: interpolates the bands by a spline cubic """ from pymatgen.util.plotting_utils import get_publication_quality_plot plt = get_publication_quality_plot(12, 8) from matplotlib import rc import scipy.interpolate as scint rc('text', usetex=True) # main internal config options e_min = -4 e_max = 4 if self._bs.is_metal(): e_min = -10 e_max = 10 #band_linewidth = 3 band_linewidth = 1 data = self.bs_plot_data(zero_to_efermi) if not smooth: for d in range(len(data['distances'])): for i in range(self._nb_bands): plt.plot(data['distances'][d], [data['energy'][d][str(Spin.up)][i][j] for j in range(len(data['distances'][d]))], 'b-', linewidth=band_linewidth) if self._bs.is_spin_polarized: plt.plot(data['distances'][d], [data['energy'][d][str(Spin.down)][i][j] for j in range(len(data['distances'][d]))], 'r--', linewidth=band_linewidth) else: for d in range(len(data['distances'])): for i in range(self._nb_bands): tck = scint.splrep( data['distances'][d], [data['energy'][d][str(Spin.up)][i][j] for j in range(len(data['distances'][d]))]) step = (data['distances'][d][-1] - data['distances'][d][0]) / 1000 plt.plot([x * step + data['distances'][d][0] for x in range(1000)], [scint.splev(x * step + data['distances'][d][0], tck, der=0) for x in range(1000)], 'b-', linewidth=band_linewidth) if self._bs.is_spin_polarized: tck = scint.splrep( data['distances'][d], [data['energy'][d][str(Spin.down)][i][j] for j in range(len(data['distances'][d]))]) step = (data['distances'][d][-1] - data['distances'][d][0]) / 1000 plt.plot([x * step + data['distances'][d][0] for x in range(1000)], [scint.splev( x * step + data['distances'][d][0], tck, der=0) for x in range(1000)], 'r--', linewidth=band_linewidth) self._maketicks(plt) # Main X and Y Labels plt.xlabel(r'$\mathrm{Wave\ Vector}$', fontsize=30) ylabel = r'$\mathrm{E\ -\ E_f\ (eV)}$' if zero_to_efermi \ else r'$\mathrm{Energy\ (eV)}$' plt.ylabel(ylabel, fontsize=30) # Draw Fermi energy, only if not the zero if not zero_to_efermi: ef = self._bs.efermi plt.axhline(ef, linewidth=2, color='k') # X range (K) # last distance point x_max = data['distances'][-1][-1] plt.xlim(0, x_max) if ylim is None: if self._bs.is_metal(): # Plot A Metal if zero_to_efermi: plt.ylim(e_min, e_max) else: plt.ylim(self._bs.efermi + e_min, self._bs._efermi + e_max) else: if vbm_cbm_marker: for cbm in data['cbm']: plt.scatter(cbm[0], cbm[1], color='r', marker='o', s=100) for vbm in data['vbm']: plt.scatter(vbm[0], vbm[1], color='g', marker='o', s=100) plt.ylim(data['vbm'][0][1] + e_min, data['cbm'][0][1] + e_max) else: plt.ylim(ylim) plt.tight_layout() return plt def show(self, zero_to_efermi=True, ylim=None, smooth=False): """ Show the plot using matplotlib. Args: zero_to_efermi: Automatically subtract off the Fermi energy from the eigenvalues and plot (E-Ef). ylim: Specify the y-axis (energy) limits; by default None let the code choose. It is vbm-4 and cbm+4 if insulator efermi-10 and efermi+10 if metal smooth: interpolates the bands by a spline cubic """ plt = self.get_plot(zero_to_efermi, ylim, smooth) plt.show() def save_plot(self, filename, img_format="eps", ylim=None, zero_to_efermi=True, smooth=False): """ Save matplotlib plot to a file. Args: filename: Filename to write to. img_format: Image format to use. Defaults to EPS. ylim: Specifies the y-axis limits. """ plt = self.get_plot(ylim=ylim, zero_to_efermi=zero_to_efermi, smooth=smooth) plt.savefig(filename, format=img_format) plt.close() def get_ticks(self): """ Get all ticks and labels for a band structure plot. Returns: A dict with 'distance': a list of distance at which ticks should be set and 'label': a list of label for each of those ticks. """ tick_distance = [] tick_labels = [] previous_label = self._bs._kpoints[0].label previous_branch = self._bs._branches[0]['name'] for i, c in enumerate(self._bs._kpoints): if c.label is not None: tick_distance.append(self._bs._distance[i]) this_branch = None for b in self._bs._branches: if b['start_index'] <= i <= b['end_index']: this_branch = b['name'] break if c.label != previous_label \ and previous_branch != this_branch: label1 = c.label if label1.startswith("\\") or label1.find("_") != -1: label1 = "$" + label1 + "$" label0 = previous_label if label0.startswith("\\") or label0.find("_") != -1: label0 = "$" + label0 + "$" tick_labels.pop() tick_distance.pop() tick_labels.append(label0 + "$\mid$" + label1) else: if c.label.startswith("\\") or c.label.find("_") != -1: tick_labels.append("$" + c.label + "$") else: tick_labels.append(c.label) previous_label = c.label previous_branch = this_branch return {'distance': tick_distance, 'label': tick_labels} def plot_compare(self, other_plotter): """ plot two band structure for comparison. One is in red the other in blue (no difference in spins). The two band structures need to be defined on the same symmetry lines! and the distance between symmetry lines is the one of the band structure used to build the BSPlotter Args: another band structure object defined along the same symmetry lines Returns: a matplotlib object with both band structures """ # TODO: add exception if the band structures are not compatible plt = self.get_plot() data_orig = self.bs_plot_data() data = other_plotter.bs_plot_data() band_linewidth = 3 for i in range(other_plotter._nb_bands): plt.plot(data_orig['distances'], [e for e in data['energy'][str(Spin.up)][i]], 'r-', linewidth=band_linewidth) if other_plotter._bs.is_spin_polarized: plt.plot(data_orig['distances'], [e for e in data['energy'][str(Spin.down)][i]], 'r-', linewidth=band_linewidth) return plt def plot_brillouin(self): """ plot the Brillouin zone """ import matplotlib as mpl import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D mpl.rcParams['legend.fontsize'] = 10 fig = plt.figure() ax = Axes3D(fig) vec1 = self._bs.lattice.matrix[0] vec2 = self._bs.lattice.matrix[1] vec3 = self._bs.lattice.matrix[2] # make the grid max_x = -1000 max_y = -1000 max_z = -1000 min_x = 1000 min_y = 1000 min_z = 1000 list_k_points = [] for i in [-1, 0, 1]: for j in [-1, 0, 1]: for k in [-1, 0, 1]: list_k_points.append(i * vec1 + j * vec2 + k * vec3) if list_k_points[-1][0] > max_x: max_x = list_k_points[-1][0] if list_k_points[-1][1] > max_y: max_y = list_k_points[-1][1] if list_k_points[-1][2] > max_z: max_z = list_k_points[-1][0] if list_k_points[-1][0] < min_x: min_x = list_k_points[-1][0] if list_k_points[-1][1] < min_y: min_y = list_k_points[-1][1] if list_k_points[-1][2] < min_z: min_z = list_k_points[-1][0] vertex = _qvertex_target(list_k_points, 13) lines = get_lines_voronoi(vertex) for i in range(len(lines)): vertex1 = lines[i]['start'] vertex2 = lines[i]['end'] ax.plot([vertex1[0], vertex2[0]], [vertex1[1], vertex2[1]], [vertex1[2], vertex2[2]], color='k') for b in self._bs._branches: vertex1 = self._bs.kpoints[b['start_index']].cart_coords vertex2 = self._bs.kpoints[b['end_index']].cart_coords ax.plot([vertex1[0], vertex2[0]], [vertex1[1], vertex2[1]], [vertex1[2], vertex2[2]], color='r', linewidth=3) for k in self._bs.kpoints: if k.label: label = k.label if k.label.startswith("\\") or k.label.find("_") != -1: label = "$" + k.label + "$" off = 0.01 ax.text(k.cart_coords[0] + off, k.cart_coords[1] + off, k.cart_coords[2] + off, label, color='b', size='25') ax.scatter([k.cart_coords[0]], [k.cart_coords[1]], [k.cart_coords[2]], color='b') # make ticklabels and ticklines invisible for a in ax.w_xaxis.get_ticklines() + ax.w_xaxis.get_ticklabels(): a.set_visible(False) for a in ax.w_yaxis.get_ticklines() + ax.w_yaxis.get_ticklabels(): a.set_visible(False) for a in ax.w_zaxis.get_ticklines() + ax.w_zaxis.get_ticklabels(): a.set_visible(False) ax.grid(False) plt.show() ax.axis("off") class BSPlotterProjected(BSPlotter): """ Class to plot or get data to facilitate the plot of band structure objects projected along orbitals, elements or sites. Args: bs: A BandStructureSymmLine object with projections. """ def __init__(self, bs): if len(bs._projections) == 0: raise ValueError("try to plot projections" " on a band structure without any") super(BSPlotterProjected, self).__init__(bs) def _get_projections_by_branches(self, dictio): proj = self._bs.get_projections_on_elts_and_orbitals(dictio) proj_br = [] print(len(proj[Spin.up])) print(len(proj[Spin.up][0])) for c in proj[Spin.up][0]: print(c) for b in self._bs._branches: print(b) if self._bs.is_spin_polarized: proj_br.append( {str(Spin.up): [[] for l in range(self._nb_bands)], str(Spin.down): [[] for l in range(self._nb_bands)]}) else: proj_br.append( {str(Spin.up): [[] for l in range(self._nb_bands)]}) print((len(proj_br[-1][str(Spin.up)]), self._nb_bands)) for i in range(self._nb_bands): for j in range(b['start_index'], b['end_index'] + 1): proj_br[-1][str(Spin.up)][i].append( {e: {o: proj[Spin.up][i][j][e][o] for o in proj[Spin.up][i][j][e]} for e in proj[Spin.up][i][j]}) if self._bs.is_spin_polarized: for b in self._bs._branches: for i in range(self._nb_bands): for j in range(b['start_index'], b['end_index'] + 1): proj_br[-1][str(Spin.down)][i].append( {e: {o: proj[Spin.down][i][j][e][o] for o in proj[Spin.down][i][j][e]} for e in proj[Spin.down][i][j]}) return proj_br def get_projected_plots_dots(self, dictio, zero_to_efermi=True, ylim=None, vbm_cbm_marker=False): """ Method returning a plot composed of subplots along different elements and orbitals. Args: dictio: The element and orbitals you want a projection on. The format is {Element:[Orbitals]} for instance {'Cu':['d','s'],'O':['p']} will give projections for Cu on d and s orbitals and on oxygen p. Returns: a pylab object with different subfigures for each projection The blue and red colors are for spin up and spin down. The bigger the red or blue dot in the band structure the higher character for the corresponding element and orbital. """ from pymatgen.util.plotting_utils import get_publication_quality_plot band_linewidth = 1.0 fig_number = sum([len(v) for v in dictio.values()]) proj = self._get_projections_by_branches(dictio) data = self.bs_plot_data(zero_to_efermi) plt = get_publication_quality_plot(12, 8) e_min = -4 e_max = 4 if self._bs.is_metal(): e_min = -10 e_max = 10 count = 1 for el in dictio: for o in dictio[el]: plt.subplot(100 * math.ceil(fig_number / 2) + 20 + count) self._maketicks(plt) for b in range(len(data['distances'])): for i in range(self._nb_bands): plt.plot(data['distances'][b], [data['energy'][b][str(Spin.up)][i][j] for j in range(len(data['distances'][b]))], 'b-', linewidth=band_linewidth) if self._bs.is_spin_polarized: plt.plot(data['distances'][b], [data['energy'][b][str(Spin.down)][i][j] for j in range(len(data['distances'][b]))], 'r--', linewidth=band_linewidth) for j in range( len(data['energy'][b][str(Spin.up)][i])): plt.plot(data['distances'][b][j], data['energy'][b][str(Spin.down)][i][ j], 'ro', markersize= proj[b][str(Spin.down)][i][j][str(el)][ o] * 15.0) for j in range(len(data['energy'][b][str(Spin.up)][i])): plt.plot(data['distances'][b][j], data['energy'][b][str(Spin.up)][i][j], 'bo', markersize= proj[b][str(Spin.up)][i][j][str(el)][ o] * 15.0) if ylim is None: if self._bs.is_metal(): if zero_to_efermi: plt.ylim(e_min, e_max) else: plt.ylim(self._bs.efermi + e_min, self._bs._efermi + e_max) else: if vbm_cbm_marker: for cbm in data['cbm']: plt.scatter(cbm[0], cbm[1], color='r', marker='o', s=100) for vbm in data['vbm']: plt.scatter(vbm[0], vbm[1], color='g', marker='o', s=100) plt.ylim(data['vbm'][0][1] + e_min, data['cbm'][0][1] + e_max) else: plt.ylim(ylim) plt.title(str(el) + " " + str(o)) count += 1 return plt def get_elt_projected_plots(self, zero_to_efermi=True, ylim=None, vbm_cbm_marker=False): """ Method returning a plot composed of subplots along different elements Returns: a pylab object with different subfigures for each projection The blue and red colors are for spin up and spin down The bigger the red or blue dot in the band structure the higher character for the corresponding element and orbital """ band_linewidth = 1.0 proj = self._get_projections_by_branches({e.symbol: ['s', 'p', 'd'] for e in self._bs._structure.composition.elements}) data = self.bs_plot_data(zero_to_efermi) from pymatgen.util.plotting_utils import get_publication_quality_plot plt = get_publication_quality_plot(12, 8) e_min = -4 e_max = 4 if self._bs.is_metal(): e_min = -10 e_max = 10 count = 1 for el in self._bs._structure.composition.elements: plt.subplot(220 + count) self._maketicks(plt) for b in range(len(data['distances'])): for i in range(self._nb_bands): plt.plot(data['distances'][b], [data['energy'][b][str(Spin.up)][i][j] for j in range(len(data['distances'][b]))], 'b-', linewidth=band_linewidth) if self._bs.is_spin_polarized: plt.plot(data['distances'][b], [data['energy'][b][str(Spin.down)][i][j] for j in range(len(data['distances'][b]))], 'r--', linewidth=band_linewidth) for j in range(len(data['energy'][b][str(Spin.up)][i])): plt.plot(data['distances'][b][j], data['energy'][b][str(Spin.down)][i][j], 'ro', markersize=sum([proj[b][str(Spin.down)][i][ j][str(el)][o] for o in proj[b] [str(Spin.down)][i][j][ str(el)]]) * 15.0) for j in range(len(data['energy'][b][str(Spin.up)][i])): plt.plot(data['distances'][b][j], data['energy'][b][str(Spin.up)][i][j], 'bo', markersize=sum( [proj[b][str(Spin.up)][i][j][str(el)][o] for o in proj[b] [str(Spin.up)][i][j][str(el)]]) * 15.0) if ylim is None: if self._bs.is_metal(): if zero_to_efermi: plt.ylim(e_min, e_max) else: plt.ylim(self._bs.efermi + e_min, self._bs._efermi + e_max) else: if vbm_cbm_marker: for cbm in data['cbm']: plt.scatter(cbm[0], cbm[1], color='r', marker='o', s=100) for vbm in data['vbm']: plt.scatter(vbm[0], vbm[1], color='g', marker='o', s=100) plt.ylim(data['vbm'][0][1] + e_min, data['cbm'][0][1] + e_max) else: plt.ylim(ylim) plt.title(str(el)) count += 1 return plt def get_elt_projected_plots_color(self, zero_to_efermi=True, elt_ordered=None): """ returns a pylab plot object with one plot where the band structure line color depends on the character of the band (along different elements). Each element is associated with red, green or blue and the corresponding rgb color depending on the character of the band is used. The method can only deal with binary and ternary compounds spin up and spin down are differientiated by a '-' and a '--' line Args: elt_ordered: A list of Element ordered. The first one is red, second green, last blue Returns: a pylab object """ band_linewidth = 3.0 if len(self._bs._structure.composition.elements) > 3: raise ValueError if elt_ordered is None: elt_ordered = self._bs._structure.composition.elements proj = self._get_projections_by_branches( {e.symbol: ['s', 'p', 'd'] for e in self._bs._structure.composition.elements}) data = self.bs_plot_data(zero_to_efermi) from pymatgen.util.plotting_utils import get_publication_quality_plot plt = get_publication_quality_plot(12, 8) spins = [Spin.up] if self._bs.is_spin_polarized: spins = [Spin.up, Spin.down] self._maketicks(plt) for s in spins: for b in range(len(data['distances'])): for i in range(self._nb_bands): for j in range(len(data['energy'][b][str(s)][i]) - 1): sum_e = 0.0 for el in elt_ordered: sum_e = sum_e + \ sum([proj[b][str(s)][i][j][str(el)][o] for o in proj[b][str(s)][i][j][str(el)]]) if sum_e == 0.0: color = [0.0] * len(elt_ordered) else: color = [sum([proj[b][str(s)][i][j][str(el)][o] for o in proj[b][str(s)][i][j][str(el)]]) / sum_e for el in elt_ordered] if len(color) == 2: color.append(0.0) color[2] = color[1] color[1] = 0.0 sign = '-' if s == Spin.down: sign = '--' plt.plot([data['distances'][b][j], data['distances'][b][j + 1]], [data['energy'][b][str(s)][i][j], data['energy'][b][str(s)][i][j + 1]], sign, color=color, linewidth=band_linewidth) plt.ylim(data['vbm'][0][1] - 4.0, data['cbm'][0][1] + 2.0) return plt def _qvertex_target(data, index): """ Input data should be in the form of a list of a list of floats. index is the index of the targeted point Returns the vertices of the voronoi construction around this target point. """ from pyhull import qvoronoi output = qvoronoi("p QV" + str(index), data) output.pop(0) output.pop(0) return [[float(i) for i in row.split()] for row in output] def get_lines_voronoi(data): from pyhull import qconvex output = qconvex("o", data) nb_points = int(output[1].split(" ")[0]) list_lines = [] list_points = [] for i in range(2, 2 + nb_points): list_points.append([float(c) for c in output[i].strip().split()]) facets = [] for i in range(2 + nb_points, len(output)): if output[i] != '': tmp = output[i].strip().split(" ") facets.append([int(tmp[j]) for j in range(1, len(tmp))]) for i in range(len(facets)): for line in itertools.combinations(facets[i], 2): for j in range(len(facets)): if i != j and line[0] in facets[j] and line[1] in facets[j]: # check if the two facets i and j are not coplanar vector1 = np.array(list_points[facets[j][0]]) \ - np.array(list_points[facets[j][1]]) vector2 = np.array(list_points[facets[j][0]]) \ - np.array(list_points[facets[j][2]]) n1 = np.cross(vector1, vector2) vector1 = np.array(list_points[facets[i][0]]) \ - np.array(list_points[facets[i][1]]) vector2 = np.array(list_points[facets[i][0]]) \ - np.array(list_points[facets[i][2]]) n2 = np.cross(vector1, vector2) dot = math.fabs(np.dot(n1, n2) / (np.linalg.norm(n1) * np.linalg.norm(n2))) if 1.05 > dot > 0.95: continue list_lines.append({'start': list_points[line[0]], 'end': list_points[line[1]]}) break return list_lines
migueldiascosta/pymatgen
pymatgen/electronic_structure/plotter.py
Python
mit
42,465
[ "Gaussian", "pymatgen" ]
e14d4f46c83e14bd10ea34a01b9810f82df22b1b70e680f0902529f20b0ac082
# -*- coding: utf-8 -*- import numpy as np import pymoskito as pm import matplotlib as mpl import matplotlib.patches import matplotlib.transforms from . import settings as st HKS41K100 = '#0b2a51' HKS44K100 = '#0059a3' HKS44K80 = '#346FB2' HKS36K100 = '#512947' HKS33K100 = '#811a78' HKS57K100 = '#007a47' HKS65K100 = '#22ad36' HKS07K100 = '#e87b14' HKS07K80 = '#ef9c51' try: import vtk class BallBeamVisualizer(pm.VtkVisualizer): def __init__(self, renderer): pm.VtkVisualizer.__init__(self, renderer) # -------- add the beam ---- # geometry self.beam = vtk.vtkCubeSource() self.beam.SetXLength(st.visBeamLength) self.beam.SetYLength(st.visBeamWidth) self.beam.SetZLength(st.visBeamDepth) # mapper self.beamMapper = vtk.vtkPolyDataMapper() self.beamMapper.SetInputConnection(self.beam.GetOutputPort()) # actor self.beamActor = vtk.vtkLODActor() self.beamActor.SetMapper(self.beamMapper) # make it look nice self.beamProp = self.beamActor.GetProperty() self.beamProp.SetColor(101 / 255, 123 / 255, 131 / 255) self.ren.AddActor(self.beamActor) # -------- add the ball ---- # geometry self.ball = vtk.vtkSphereSource() self.ball.SetRadius(st.visR) self.ball.SetThetaResolution(20) self.ball.SetPhiResolution(20) # mapper self.ballMapper = vtk.vtkPolyDataMapper() self.ballMapper.SetInputConnection(self.ball.GetOutputPort()) # actor self.ballActor = vtk.vtkLODActor() self.ballActor.SetMapper(self.ballMapper) # make it look nice self.ballProp = self.ballActor.GetProperty() self.ballProp.SetColor(255 / 255, 255 / 255, 0) self.ballProp.SetAmbient(0.2) self.ballProp.SetDiffuse(0.8) self.ballProp.SetSpecular(0.5) self.ballProp.SetSpecularPower(0.5) self.ren.AddActor(self.ballActor) # add background self.ren.GradientBackgroundOn() self.ren.SetBackground(228 / 255, 232 / 255, 213 / 255) self.ren.SetBackground2(38 / 255, 139 / 255, 210 / 255) # apply some sane initial state self.update_scene(np.array([0, 0, 0, 0])) # get everybody into the frame self.ren.ResetCamera() self.ren.GetActiveCamera().Zoom(1.7) # save this view self.save_camera_pose() @staticmethod def calc_positions(x): """ Calculate stationary vectors and rot. matrices for bodies """ # beam t_beam = np.array([[np.cos(x[2]), -np.sin(x[2]), 0], [np.sin(x[2]), np.cos(x[2]), 0], [0, 0, 1]]) r_beam0 = np.array([0, -st.visR - st.visBeamWidth / 2, 0]) r_beam = np.dot(t_beam, r_beam0) # ball r_ball0 = np.array([x[0], 0, 0]) r_ball = np.dot(t_beam, r_ball0) phi = x[0] / st.visR t_ball = np.array([[np.cos(phi), -np.sin(phi), 0], [np.sin(phi), np.cos(phi), 0], [0, 0, 1]]) return [r_beam, t_beam, r_ball, t_ball] @staticmethod def set_body_state(actor, r, t): poke = vtk.vtkMatrix4x4() for i in range(3): for n in range(3): poke.SetElement(i, n, t[i, n]) poke.SetElement(i, 3, r[i]) actor.PokeMatrix(poke) def update_scene(self, x): """ update the body states """ r_beam, t_beam, r_ball, t_ball = self.calc_positions(x) self.set_body_state(self.beamActor, r_beam, t_beam) self.set_body_state(self.ballActor, r_ball, t_ball) pm.register_visualizer(BallBeamVisualizer) except ImportError as e: vtk = None print("BallBeam Visualizer:") print(e) print("VTK Visualization not available.") class MplBallBeamVisualizer(pm.MplVisualizer): def __init__(self, q_widget, q_layout): pm.MplVisualizer.__init__(self, q_widget, q_layout) self.axes.set_xlim(st.x_min_plot, st.x_max_plot) self.axes.set_ylim(st.y_min_plot, st.y_max_plot) self.axes.set_aspect("equal") self.ball_base = mpl.patches.Circle( xy=[0, 0], radius=st.visR, color=HKS44K100, zorder=1) self.ball_highlight = mpl.patches.Wedge( center=[0, 0], r=st.visR, theta1=0, theta2=90, color=HKS07K100, zorder=2) self.beam = mpl.patches.Rectangle( xy=[-st.visBeamLength/2, -(st.visR + st.visBeamWidth)], width=st.visBeamLength, height=st.visBeamWidth, color=HKS41K100, zorder=0) self.axes.add_patch(self.ball_base) self.axes.add_patch(self.ball_highlight) self.axes.add_patch(self.beam) def update_scene(self, x): x_ball, dx_ball, theta_beam, dtheta_beam = x theta_ball = -x_ball / st.visR t_beam = (mpl.transforms.Affine2D().rotate_around(0, 0, theta_beam) + self.axes.transData) t_ball = (mpl.transforms.Affine2D().rotate_around(0, 0, theta_ball) + mpl.transforms.Affine2D().translate(x_ball, 0) + t_beam) # ball self.ball_base.set_transform(t_ball) self.ball_highlight.set_transform(t_ball) # beam self.beam.set_y(-(st.visR + st.visBeamWidth)) self.beam.set_transform(t_beam) self.canvas.draw() pm.register_visualizer(MplBallBeamVisualizer)
cklb/PyMoskito
pymoskito/examples/ballbeam/visualization.py
Python
bsd-3-clause
5,996
[ "VTK" ]
8fad5414703ab088973fc8351b9cf2b42a859f0418eafa0c19b0487772ffb52f
#!/usr/bin/env python import os import re import sys import numpy as np from chrom import * from ..util import util as u from ..util.elements import ELEMENTS # # NWChem output file # class NWChem(Chrom): '''A class to parse NWChem td logfiles.''' def __init__(self, outfile): # Inherit from Chrom Chrom.__init__(self, outfile) # Fill in attributes self.type = self.__class__.__name__ self.get_data() def get_data(self): '''Fills in class attributes with the data from td logfile.''' # # Molecular Properties # parsed_data = self.extract() # Geometrical properties self.Z_atoms = parsed_data[0] self.atoms = parsed_data[1] self.coords = parsed_data[2] self.natoms = len(self.atoms) self.com = self.calc_com(self.atoms, self.coords) # Electronic properties self.energies = parsed_data[3] self.ntran = len(self.energies) self.f_osc = parsed_data[4] self.mu_len = parsed_data[5] self.r_vel = None self.r_len = None self.mu_vel = None self.mag = None self.trchgs = np.zeros((self.natoms, self.ntran)) pass def extract(self): '''Parses G09 td logfile for geometric and electronic properties.''' with open(self.file) as f: structure = [] energies = [] oscillators = [] mu_len = [] for line in f: # # Reoriented geometry, overwrite previous # if "XYZ format geometry" in line: structure = [] line = u.skiplines(f, 3) data = line.split() while len(data) == 4: atom = data[0] atom_x = float(data[1]) atom_y = float(data[2]) atom_z = float(data[3]) structure.append([atom, atom_x, atom_y, atom_z]) data = next(f).split() # # Excitation Energies # pattern = re.compile('\s+Root\s+\d+\s+singlet') if pattern.search(line): energy = float(line.split()[-2]) energies.append(energy) line = u.skiplines(f, 1) while '-------------' not in line: # # Transition Electric Dipole Moment # if 'Transition Moments X' in line: mu_x = float(line.split()[3]) mu_y = float(line.split()[5]) mu_z = float(line.split()[7]) mu_len.append([mu_x, mu_y, mu_z]) # # Oscillator Strength # if 'Dipole Oscillator Strength' in line: oscillators.append(float(line.split()[-1])) # Update line value for while cycle line = u.skiplines(f) Z_atoms = [ ELEMENTS[x[0]].number for x in structure ] atoms = [ x[0] for x in structure ] coords = np.array([ x[1:] for x in structure ]) energies = np.array(energies) oscillators = np.array(oscillators) mu_len = np.array(mu_len) return Z_atoms, atoms, coords, energies, oscillators, mu_len if __name__ == '__main__': a = NWChem("../data/NWChem/nwchem_td.out") a.save_es_analysis(2, 25, 31) a.save_visdip(center=1) pass
dpadula85/ExSPy
stable/QM_parser/parser/nwchem.py
Python
gpl-3.0
3,785
[ "NWChem" ]
9465e11c391b87af75748e92f26d0bda9687a4616263a7ab0640cc51bb5e406a
#!/usr/bin/python # -*- coding: utf-8 -*- # # (c) 2015, Brian Coca <bcoca@ansible.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/> DOCUMENTATION = ''' --- module: svc author: Brian Coca version_added: short_description: Manage daemontools services. description: - Controls daemontools services on remote hosts using the svc utility. options: name: required: true description: - Name of the service to manage. state: required: false choices: [ started, stopped, restarted, reloaded, once ] description: - C(Started)/C(stopped) are idempotent actions that will not run commands unless necessary. C(restarted) will always bounce the svc (svc -t) and C(killed) will always bounce the svc (svc -k). C(reloaded) will send a sigusr1 (svc -u). C(once) will run a normally downed svc once (svc -o), not really an idempotent operation. downed: required: false choices: [ "yes", "no" ] default: no description: - Should a 'down' file exist or not, if it exists it disables auto startup. defaults to no. Downed does not imply stopped. enabled: required: false choices: [ "yes", "no" ] description: - Wheater the service is enabled or not, if disabled it also implies stopped. Make note that a service can be enabled and downed (no auto restart). service_dir: required: false default: /service description: - directory svscan watches for services service_src: required: false description: - directory where services are defined, the source of symlinks to service_dir. ''' EXAMPLES = ''' # Example action to start svc dnscache, if not running - svc: name=dnscache state=started # Example action to stop svc dnscache, if running - svc: name=dnscache state=stopped # Example action to kill svc dnscache, in all cases - svc : name=dnscache state=killed # Example action to restart svc dnscache, in all cases - svc : name=dnscache state=restarted # Example action to reload svc dnscache, in all cases - svc: name=dnscache state=reloaded # Example using alt svc directory location - svc: name=dnscache state=reloaded service_dir=/var/service ''' import platform import shlex def _load_dist_subclass(cls, *args, **kwargs): ''' Used for derivative implementations ''' subclass = None distro = kwargs['module'].params['distro'] # get the most specific superclass for this platform if distro is not None: for sc in cls.__subclasses__(): if sc.distro is not None and sc.distro == distro: subclass = sc if subclass is None: subclass = cls return super(cls, subclass).__new__(subclass) class Svc(object): """ Main class that handles daemontools, can be subclassed and overriden in case we want to use a 'derivative' like encore, s6, etc """ #def __new__(cls, *args, **kwargs): # return _load_dist_subclass(cls, args, kwargs) def __init__(self, module): self.extra_paths = [ '/command', '/usr/local/bin' ] self.report_vars = ['state', 'enabled', 'downed', 'svc_full', 'src_full', 'pid', 'duration', 'full_state'] self.module = module self.name = module.params['name'] self.service_dir = module.params['service_dir'] self.service_src = module.params['service_src'] self.enabled = None self.downed = None self.full_state = None self.state = None self.pid = None self.duration = None self.svc_cmd = module.get_bin_path('svc', opt_dirs=self.extra_paths) self.svstat_cmd = module.get_bin_path('svstat', opt_dirs=self.extra_paths) self.svc_full = '/'.join([ self.service_dir, self.name ]) self.src_full = '/'.join([ self.service_src, self.name ]) self.enabled = os.path.lexists(self.svc_full) if self.enabled: self.downed = os.path.lexists('%s/down' % self.svc_full) self.get_status() else: self.downed = os.path.lexists('%s/down' % self.src_full) self.state = 'stopped' def enable(self): if os.path.exists(self.src_full): try: os.symlink(self.src_full, self.svc_full) except OSError, e: self.module.fail_json(path=self.src_full, msg='Error while linking: %s' % str(e)) else: self.module.fail_json(msg="Could not find source for service to enable (%s)." % self.src_full) def disable(self): try: os.unlink(self.svc_full) except OSError, e: self.module.fail_json(path=self.svc_full, msg='Error while unlinking: %s' % str(e)) self.execute_command([self.svc_cmd,'-dx',self.src_full]) src_log = '%s/log' % self.src_full if os.path.exists(src_log): self.execute_command([self.svc_cmd,'-dx',src_log]) def get_status(self): (rc, out, err) = self.execute_command([self.svstat_cmd, self.svc_full]) if err is not None and err: self.full_state = self.state = err else: self.full_state = out m = re.search('\(pid (\d+)\)', out) if m: self.pid = m.group(1) m = re.search('(\d+) seconds', out) if m: self.duration = m.group(1) if re.search(' up ', out): self.state = 'start' elif re.search(' down ', out): self.state = 'stopp' else: self.state = 'unknown' return if re.search(' want ', out): self.state += 'ing' else: self.state += 'ed' def start(self): return self.execute_command([self.svc_cmd, '-u', self.svc_full]) def stopp(self): return self.stop() def stop(self): return self.execute_command([self.svc_cmd, '-d', self.svc_full]) def once(self): return self.execute_command([self.svc_cmd, '-o', self.svc_full]) def reload(self): return self.execute_command([self.svc_cmd, '-1', self.svc_full]) def restart(self): return self.execute_command([self.svc_cmd, '-t', self.svc_full]) def kill(self): return self.execute_command([self.svc_cmd, '-k', self.svc_full]) def execute_command(self, cmd): try: (rc, out, err) = self.module.run_command(' '.join(cmd)) except Exception, e: self.module.fail_json(msg="failed to execute: %s" % str(e)) return (rc, out, err) def report(self): self.get_status() states = {} for k in self.report_vars: states[k] = self.__dict__[k] return states # =========================================== # Main control flow def main(): module = AnsibleModule( argument_spec = dict( name = dict(required=True), state = dict(choices=['started', 'stopped', 'restarted', 'killed', 'reloaded', 'once']), enabled = dict(required=False, type='bool', choices=BOOLEANS), downed = dict(required=False, type='bool', choices=BOOLEANS), dist = dict(required=False, default='daemontools'), service_dir = dict(required=False, default='/service'), service_src = dict(required=False, default='/etc/service'), ), supports_check_mode=True, ) state = module.params['state'] enabled = module.params['enabled'] downed = module.params['downed'] svc = Svc(module) changed = False orig_state = svc.report() if enabled is not None and enabled != svc.enabled: changed = True if not module.check_mode: try: if enabled: svc.enable() else: svc.disable() except (OSError, IOError), e: module.fail_json(msg="Could change service link: %s" % str(e)) if state is not None and state != svc.state: changed = True if not module.check_mode: getattr(svc,state[:-2])() if downed is not None and downed != svc.downed: changed = True if not module.check_mode: d_file = "%s/down" % svc.svc_full try: if downed: open(d_file, "a").close() else: os.unlink(d_file) except (OSError, IOError), e: module.fail_json(msg="Could change downed file: %s " % (str(e))) module.exit_json(changed=changed, svc=svc.report()) # this is magic, not normal python include from ansible.module_utils.basic import * main()
lberruti/ansible-modules-extras
system/svc.py
Python
gpl-3.0
9,616
[ "Brian" ]
8eb470e115185593ba3b304b5cd7a0c1bb4220def4d6ec549cf46b38c1b294f1
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Layers for regularization models via the addition of noise. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.contrib.keras.python.keras import backend as K from tensorflow.contrib.keras.python.keras.engine import Layer class GaussianNoise(Layer): """Apply additive zero-centered Gaussian noise. This is useful to mitigate overfitting (you could see it as a form of random data augmentation). Gaussian Noise (GS) is a natural choice as corruption process for real valued inputs. As it is a regularization layer, it is only active at training time. Arguments: stddev: float, standard deviation of the noise distribution. Input shape: Arbitrary. Use the keyword argument `input_shape` (tuple of integers, does not include the samples axis) when using this layer as the first layer in a model. Output shape: Same shape as input. """ def __init__(self, stddev, **kwargs): super(GaussianNoise, self).__init__(**kwargs) self.supports_masking = True self.stddev = stddev def call(self, inputs, training=None): def noised(): return inputs + K.random_normal( shape=K.shape(inputs), mean=0., stddev=self.stddev) return K.in_train_phase(noised, inputs, training=training) def get_config(self): config = {'stddev': self.stddev} base_config = super(GaussianNoise, self).get_config() return dict(list(base_config.items()) + list(config.items())) class GaussianDropout(Layer): """Apply multiplicative 1-centered Gaussian noise. As it is a regularization layer, it is only active at training time. Arguments: rate: float, drop probability (as with `Dropout`). The multiplicative noise will have standard deviation `sqrt(rate / (1 - rate))`. Input shape: Arbitrary. Use the keyword argument `input_shape` (tuple of integers, does not include the samples axis) when using this layer as the first layer in a model. Output shape: Same shape as input. References: - [Dropout: A Simple Way to Prevent Neural Networks from Overfitting Srivastava, Hinton, et al. 2014](http://www.cs.toronto.edu/~rsalakhu/papers/srivastava14a.pdf) """ def __init__(self, rate, **kwargs): super(GaussianDropout, self).__init__(**kwargs) self.supports_masking = True self.rate = rate def call(self, inputs, training=None): if 0 < self.rate < 1: def noised(): stddev = np.sqrt(self.rate / (1.0 - self.rate)) return inputs * K.random_normal( shape=K.shape(inputs), mean=1.0, stddev=stddev) return K.in_train_phase(noised, inputs, training=training) return inputs def get_config(self): config = {'rate': self.rate} base_config = super(GaussianDropout, self).get_config() return dict(list(base_config.items()) + list(config.items()))
unnikrishnankgs/va
venv/lib/python3.5/site-packages/tensorflow/contrib/keras/python/keras/layers/noise.py
Python
bsd-2-clause
3,671
[ "Gaussian" ]
56614cfc48d79705e850735065b3c8c5b6db2e7aaaa28ca2d19dd8398aeec034
class VehicleInfo(object): def __init__(self): """ make_target: option passed to make to create binaries. Usually sitl, and "-debug" may be appended if -D is passed to sim_vehicle.py default_params_filename: filename of default parameters file. Taken to be relative to autotest dir. extra_mavlink_cmds: extra parameters that will be passed to mavproxy """ self.options = { "ArduCopter": { "default_frame": "quad", "frames": { # COPTER "+": { "waf_target": "bin/arducopter", "default_params_filename": "default_params/copter.parm", }, "quad": { "model": "+", "waf_target": "bin/arducopter", "default_params_filename": "default_params/copter.parm", }, "X": { "waf_target": "bin/arducopter", "default_params_filename": "default_params/copter.parm", # this param set FRAME doesn't actually work because mavproxy # won't set a parameter unless it knows of it, and the # param fetch happens asynchronously "extra_mavlink_cmds": "param fetch frame; param set FRAME 1;", }, "hexa": { "make_target": "sitl", "waf_target": "bin/arducopter", "default_params_filename": ["default_params/copter.parm", "default_params/copter-hexa.parm" ], }, "octa-quad": { "make_target": "sitl", "waf_target": "bin/arducopter", "default_params_filename": ["default_params/copter.parm", "default_params/copter-octaquad.parm" ], }, "octa": { "make_target": "sitl", "waf_target": "bin/arducopter", "default_params_filename": ["default_params/copter.parm", "default_params/copter-octa.parm" ], }, "tri": { "make_target": "sitl", "waf_target": "bin/arducopter", "default_params_filename": ["default_params/copter.parm", "default_params/copter-tri.parm" ], }, "y6": { "make_target": "sitl", "waf_target": "bin/arducopter", "default_params_filename": ["default_params/copter.parm", "default_params/copter-y6.parm" ], }, "firefly": { "waf_target": "bin/arduplane", "default_params_filename": "default_params/firefly.parm", }, "dodeca-hexa": { "make_target": "sitl", "waf_target": "bin/arducopter", "default_params_filename": ["default_params/copter.parm", "default_params/copter-dodecahexa.parm" ], }, # SIM "IrisRos": { "waf_target": "bin/arducopter", "default_params_filename": "default_params/copter.parm", }, "gazebo-iris": { "waf_target": "bin/arducopter", "default_params_filename": ["default_params/copter.parm", "default_params/gazebo-iris.parm"], }, # HELICOPTER "heli": { "make_target": "sitl-heli", "waf_target": "bin/arducopter-heli", "default_params_filename": "default_params/copter-heli.parm", }, "heli-dual": { "make_target": "sitl-heli-dual", "waf_target": "bin/arducopter-heli", "default_params_filename": "default_params/copter-heli-dual.parm", }, "heli-compound": { "make_target": "sitl-heli-compound", "waf_target": "bin/arducopter-heli", }, "singlecopter": { "make_target": "sitl", "waf_target": "bin/arducopter", "default_params_filename": "default_params/copter-single.parm", }, "coaxcopter": { "make_target": "sitl", "waf_target": "bin/arducopter", "default_params_filename": "default_params/copter-coax.parm", }, "calibration": { "extra_mavlink_cmds": "module load sitl_calibration;", }, }, }, "ArduPlane": { "default_frame": "jsbsim", "frames": { # PLANE "quadplane-tilttri": { "make_target": "sitl", "waf_target": "bin/arduplane", "default_params_filename": "default_params/quadplane-tilttri.parm", }, "quadplane-tilttrivec": { "make_target": "sitl", "waf_target": "bin/arduplane", "default_params_filename": "default_params/quadplane-tilttrivec.parm", }, "quadplane-tri": { "make_target": "sitl", "waf_target": "bin/arduplane", "default_params_filename": "default_params/quadplane-tri.parm", }, "quadplane-cl84" : { "make_target" : "sitl", "waf_target" : "bin/arduplane", "default_params_filename": "default_params/quadplane-cl84.parm", }, "quadplane": { "waf_target": "bin/arduplane", "default_params_filename": "default_params/quadplane.parm", }, "plane-elevon": { "waf_target": "bin/arduplane", "default_params_filename": "default_params/plane-elevons.parm", }, "plane-vtail": { "waf_target": "bin/arduplane", "default_params_filename": "default_params/plane-vtail.parm", }, "plane-tailsitter": { "waf_target": "bin/arduplane", "default_params_filename": "default_params/plane-tailsitter.parm", }, "plane": { "waf_target": "bin/arduplane", "default_params_filename": "default_params/plane.parm", }, "gazebo-zephyr": { "waf_target": "bin/arduplane", "default_params_filename": "default_params/gazebo-zephyr.parm", }, "last_letter": { "waf_target": "bin/arduplane", }, "CRRCSim": { "waf_target": "bin/arduplane", }, "jsbsim": { "waf_target": "bin/arduplane", "default_params_filename": "default_params/plane-jsbsim.parm", }, "calibration": { "extra_mavlink_cmds": "module load sitl_calibration;", }, }, }, "APMrover2": { "default_frame": "rover", "frames": { # ROVER "rover": { "waf_target": "bin/ardurover", "default_params_filename": "default_params/rover.parm", }, "rover-skid": { "waf_target": "bin/ardurover", "default_params_filename": "default_params/rover-skid.parm", }, "gazebo-rover": { "waf_target": "bin/ardurover", "default_params_filename": "default_params/rover-skid.parm", }, "calibration": { "extra_mavlink_cmds": "module load sitl_calibration;", }, }, }, "ArduSub": { "default_frame": "vectored", "frames": { "vectored": { "waf_target": "bin/ardusub", "default_params_filename": "default_params/sub.parm", }, }, }, "AntennaTracker": { "default_frame": "tracker", "frames": { "tracker": { "waf_target": "bin/antennatracker", }, }, }, } def default_frame(self, vehicle): return self.options[vehicle]["default_frame"] def default_waf_target(self, vehicle): """Returns a waf target based on vehicle type, which is often determined by which directory the user is in""" default_frame = self.default_frame(vehicle) return self.options[vehicle]["frames"][default_frame]["waf_target"] def options_for_frame(self, frame, vehicle, opts): """Return informatiom about how to sitl for frame e.g. build-type==sitl""" ret = None frames = self.options[vehicle]["frames"] if frame in frames: ret = self.options[vehicle]["frames"][frame] else: for p in ["octa", "tri", "y6", "firefly", "heli", "gazebo", "last_letter", "jsbsim", "quadplane", "plane-elevon", "plane-vtail", "plane"]: if frame.startswith(p): ret = self.options[vehicle]["frames"][p] break if ret is None: if frame.endswith("-heli"): ret = self.options[vehicle]["frames"]["heli"] if ret is None: print("WARNING: no config for frame (%s)" % frame) ret = {} if "model" not in ret: ret["model"] = frame if "sitl-port" not in ret: ret["sitl-port"] = True if opts.model is not None: ret["model"] = opts.model if (ret["model"].find("xplane") != -1 or ret["model"].find("flightaxis") != -1): ret["sitl-port"] = False if "make_target" not in ret: ret["make_target"] = "sitl" if "waf_target" not in ret: ret["waf_target"] = self.default_waf_target(vehicle) if opts.build_target is not None: ret["make_target"] = opts.build_target ret["waf_target"] = opts.build_target return ret
peterbarker/ardupilot-1
Tools/autotest/pysim/vehicleinfo.py
Python
gpl-3.0
10,252
[ "Firefly" ]
79f23e0aedacda262ae05f6a577d860c3b4c8bd8c49be3593021e3758f10b4bc
#! /usr/bin/env python import random,sys import re import math import collections import numpy as np import time import operator from scipy.io import mmread, mmwrite from random import randint from sklearn import cross_validation from sklearn import linear_model from sklearn.grid_search import GridSearchCV from sklearn import preprocessing as pp from sklearn.svm import SVR from sklearn.ensemble import GradientBoostingRegressor, GradientBoostingClassifier from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier from sklearn.ensemble import ExtraTreesRegressor from sklearn.decomposition import ProbabilisticPCA, KernelPCA from sklearn.decomposition import NMF from sklearn.pipeline import Pipeline from sklearn.svm import LinearSVC from sklearn.linear_model import LogisticRegression, Ridge, Lasso, ElasticNet import scipy.stats as stats from sklearn import tree from sklearn.feature_selection import f_regression from sklearn.metrics import precision_recall_curve from sklearn.metrics import auc, f1_score from sklearn.gaussian_process import GaussianProcess import features # working directory dir = '.' label_index = 770 # load train data def load_train_fs(): # In the validation process, the training data was randomly shuffled firstly. # For the prediction process, there is no need to shuffle the dataset. # Owing to out of memory problem, Gaussian process only use part of training data, the prediction of gaussian process # may be a little different from the model,which the training data was shuffled. train_fs = np.genfromtxt(open(dir + '/train_v2_100.csv','rb'), delimiter=',', skip_header=1) col_mean = stats.nanmean(train_fs, axis=0) inds = np.where(np.isnan(train_fs)) train_fs[inds] = np.take(col_mean, inds[1]) train_fs[np.isinf(train_fs)] = 0 return train_fs # load test data def load_test_fs(): test_fs = np.genfromtxt(open(dir + '/test_v2_100.csv','rb'), delimiter=',', skip_header = 1) col_mean = stats.nanmean(test_fs, axis=0) inds = np.where(np.isnan(test_fs)) test_fs[inds] = np.take(col_mean, inds[1]) test_fs[np.isinf(test_fs)] = 0 return test_fs # extract features from test data def test_type(test_fs): x_Test = test_fs[:,range(1, label_index)] return x_Test # extract features from train data def train_type(train_fs): count=0 train_x_temp=[] default_count=0 non_default_count = 0 while(default_count<5 or non_default_count<5): randline=random.choice(train_fs) if randline[-1]==0 and non_default_count < 5: non_default_count+=1 train_x_temp.append(randline) elif randline[-1] !=0 and default_count < 5: default_count+=1 train_x_temp.append(randline) train_x = train_x_temp[:,range(1, label_index)] train_y= train_x_temp[:,-1] print len(train_y) return train_x, train_y # transform the loss to the binary form def toLabels(train_y): labels = np.zeros(len(train_y)) labels[train_y>0] = 1 return labels # generate the output file based to the predictions def output_preds(preds): out_file = dir + '/output.csv' fs = open(out_file,'w') fs.write('id,loss\n') for i in range(len(preds)): if preds[i] > 100: preds[i] = 100 elif preds[i] < 0: preds[i] = 0 strs = str(i+105472) + ',' + str(np.float(preds[i])) fs.write(strs + '\n'); fs.close() return # get the top feature indexes by invoking f_regression def getTopFeatures(train_x, train_y, n_features=100): for i in range(len(f_val)): if math.isnan(f_val[i]): f_val[i] = 0.0 f_val_dict[i] = f_val[i] if math.isnan(p_val[i]): p_val[i] = 0.0 p_val_dict[i] = p_val[i] sorted_f = sorted(f_val_dict.iteritems(), key=operator.itemgetter(1),reverse=True) sorted_p = sorted(p_val_dict.iteritems(), key=operator.itemgetter(1),reverse=True) feature_indexs = [] for i in range(0,n_features): feature_indexs.append(sorted_f[i][0]) # print len(feature_indexs) return feature_indexs # generate the new data, based on which features are generated, and used def get_data(train_x, feature_indexs, feature_minus_pair_list=[], feature_plus_pair_list=[], feature_mul_pair_list=[], feature_divide_pair_list = [], feature_pair_sub_mul_list=[], feature_pair_plus_mul_list = [],feature_pair_sub_divide_list = [], feature_minus2_pair_list = [],feature_mul2_pair_list=[], feature_sub_square_pair_list=[], feature_square_sub_pair_list=[],feature_square_plus_pair_list=[]): sub_train_x = train_x[:,feature_indexs] for i in range(len(feature_minus_pair_list)): ind_i = feature_minus_pair_list[i][0] ind_j = feature_minus_pair_list[i][1] sub_train_x = np.column_stack((sub_train_x, train_x[:,ind_i]-train_x[:,ind_j])) for i in range(len(feature_plus_pair_list)): ind_i = feature_plus_pair_list[i][0] ind_j = feature_plus_pair_list[i][1] sub_train_x = np.column_stack((sub_train_x, train_x[:,ind_i] + train_x[:,ind_j])) for i in range(len(feature_mul_pair_list)): ind_i = feature_mul_pair_list[i][0] ind_j = feature_mul_pair_list[i][1] sub_train_x = np.column_stack((sub_train_x, train_x[:,ind_i] * train_x[:,ind_j])) for i in range(len(feature_divide_pair_list)): ind_i = feature_divide_pair_list[i][0] ind_j = feature_divide_pair_list[i][1] sub_train_x = np.column_stack((sub_train_x, train_x[:,ind_i] / train_x[:,ind_j])) for i in range(len(feature_pair_sub_mul_list)): ind_i = feature_pair_sub_mul_list[i][0] ind_j = feature_pair_sub_mul_list[i][1] ind_k = feature_pair_sub_mul_list[i][2] sub_train_x = np.column_stack((sub_train_x, (train_x[:,ind_i]-train_x[:,ind_j]) * train_x[:,ind_k])) return sub_train_x # use gbm classifier to predict whether the loan defaults or not def gbc_classify(train_x, train_y): feature_indexs = getTopFeatures(train_x, train_y) sub_x_Train = get_data(train_x, feature_indexs[:16], features.feature_pair_sub_list ,features.feature_pair_plus_list, features.feature_pair_mul_list, features.feature_pair_divide_list[:20], features.feature_pair_sub_mul_list[:20]) labels = toLabels(train_y) gbc = GradientBoostingClassifier(n_estimators=3000, max_depth=8) gbc.fit(sub_x_Train, labels) return gbc # use svm to predict the loss, based on the result of gbm classifier def gbc_svr_predict_part(gbc, train_x, train_y, test_x, feature_pair_sub_list, feature_pair_plus_list, feature_pair_mul_list, feature_pair_divide_list, feature_pair_sub_mul_list, feature_pair_sub_list_sf, feature_pair_plus_list2): feature_indexs = getTopFeatures(train_x, train_y) sub_x_Train = get_data(train_x, feature_indexs[:16], feature_pair_sub_list ,feature_pair_plus_list, feature_pair_mul_list, feature_pair_divide_list[:20], feature_pair_sub_mul_list[:20]) sub_x_Test = get_data(test_x, feature_indexs[:16], feature_pair_sub_list ,feature_pair_plus_list, feature_pair_mul_list, feature_pair_divide_list[:20], feature_pair_sub_mul_list[:20]) pred_labels = gbc.predict(sub_x_Test) pred_probs = gbc.predict_proba(sub_x_Test)[:,1] ind_test = np.where(pred_probs>0.55)[0] ind_train = np.where(train_y > 0)[0] ind_train0 = np.where(train_y == 0)[0] preds_all = np.zeros([len(sub_x_Test)]) flag = (sub_x_Test[:,16] >= 1) ind_tmp0 = np.where(flag)[0] ind_tmp = np.where(~flag)[0] sub_x_Train = get_data(train_x, feature_indexs[:100], feature_pair_sub_list_sf ,feature_pair_plus_list2[:100], feature_pair_mul_list[:40], feature_pair_divide_list, feature_pair_sub_mul_list) sub_x_Test = get_data(test_x, feature_indexs[:100], feature_pair_sub_list_sf ,feature_pair_plus_list2[:100], feature_pair_mul_list[:40], feature_pair_divide_list, feature_pair_sub_mul_list) sub_x_Train[:,101] = np.log(1-sub_x_Train[:,101]) sub_x_Test[ind_tmp,101] = np.log(1-sub_x_Test[ind_tmp,101]) scaler = pp.StandardScaler() scaler.fit(sub_x_Train) sub_x_Train = scaler.transform(sub_x_Train) sub_x_Test[ind_tmp] = scaler.transform(sub_x_Test[ind_tmp]) svr = SVR(C=16, kernel='rbf', gamma = 0.000122) svr.fit(sub_x_Train[ind_train], np.log(train_y[ind_train])) preds = svr.predict(sub_x_Test[ind_test]) preds_all[ind_test] = np.power(np.e, preds) preds_all[ind_tmp0] = 0 return preds_all # use gbm regression to predict the loss, based on the result of gbm classifier def gbc_gbr_predict_part(gbc, train_x, train_y, test_x, feature_pair_sub_list, feature_pair_plus_list, feature_pair_mul_list, feature_pair_divide_list, feature_pair_sub_mul_list, feature_pair_sub_list2): feature_indexs = getTopFeatures(train_x, train_y) sub_x_Train = get_data(train_x, feature_indexs[:16], feature_pair_sub_list ,feature_pair_plus_list, feature_pair_mul_list, feature_pair_divide_list[:20],feature_pair_sub_mul_list[:20]) sub_x_Test = get_data(test_x, feature_indexs[:16], feature_pair_sub_list ,feature_pair_plus_list, feature_pair_mul_list, feature_pair_divide_list[:20], feature_pair_sub_mul_list[:20]) pred_labels = gbc.predict(sub_x_Test) pred_probs = gbc.predict_proba(sub_x_Test)[:,1] ind_test = np.where(pred_probs>0.55)[0] ind_train = np.where(train_y > 0)[0] ind_train0 = np.where(train_y == 0)[0] preds_all = np.zeros([len(sub_x_Test)]) flag = (sub_x_Test[:,16] >= 1) ind_tmp0 = np.where(flag)[0] ind_tmp = np.where(~flag)[0] sub_x_Train = get_data(train_x, feature_indexs[:16], feature_pair_sub_list2[:70] ,feature_pair_plus_list, feature_pair_mul_list, feature_pair_divide_list, feature_pair_sub_mul_list) sub_x_Test = get_data(test_x, feature_indexs[:16], feature_pair_sub_list2[:70] ,feature_pair_plus_list, feature_pair_mul_list, feature_pair_divide_list, feature_pair_sub_mul_list) scaler = pp.StandardScaler() scaler.fit(sub_x_Train) sub_x_Train = scaler.transform(sub_x_Train) sub_x_Test[ind_tmp] = scaler.transform(sub_x_Test[ind_tmp]) gbr1000 = GradientBoostingRegressor(n_estimators=1300, max_depth=4, subsample=0.5, learning_rate=0.05) gbr1000.fit(sub_x_Train[ind_train], np.log(train_y[ind_train])) preds = gbr1000.predict(sub_x_Test[ind_test]) preds_all[ind_test] = np.power(np.e, preds) preds_all[ind_tmp0] = 0 return preds_all # predict the loss based on the Gaussian process regressor, which has been trained def gp_predict(clf, x_Test): size = len(x_Test) part_size = 3000 cnt = (size-1) / part_size + 1 preds = [] for i in range(cnt): if i < cnt - 1: pred_part = clf.predict(x_Test[i*part_size: (i+1) * part_size]) else: pred_part = clf.predict(x_Test[i*part_size: size]) preds.extend(pred_part) return np.power(np.e,preds) # train the gaussian process regressor def gbc_gp_predict_part(sub_x_Train, train_y, sub_x_Test_part): #Owing to out of memory, the model was trained by part of training data #Attention, this part was trained on the ram of more than 96G sub_x_Train[:,16] = np.log(1-sub_x_Train[:,16]) scaler = pp.StandardScaler() scaler.fit(sub_x_Train) sub_x_Train = scaler.transform(sub_x_Train) ind_train = np.where(train_y>0)[0] part_size= int(0.7 * len(ind_train)) gp = GaussianProcess(theta0=1e-3, thetaL=1e-5, thetaU=10, corr= 'absolute_exponential') gp.fit(sub_x_Train[ind_train[:part_size]], np.log(train_y[ind_train[:part_size]])) flag = (sub_x_Test_part[:,16] >= 1) ind_tmp0 = np.where(flag)[0] ind_tmp = np.where(~flag)[0] sub_x_Test_part[ind_tmp,16] = np.log(1-sub_x_Test_part[ind_tmp,16]) sub_x_Test_part[ind_tmp] = scaler.transform(sub_x_Test_part[ind_tmp]) gp_preds_tmp = gp_predict(gp, sub_x_Test_part[ind_tmp]) gp_preds = np.zeros(len(sub_x_Test_part)) gp_preds[ind_tmp] = gp_preds_tmp return gp_preds # use gbm classifier to predict whether the loan defaults or not, then invoke the function gbc_gp_predict_part def gbc_gp_predict(train_x, train_y, test_x): feature_indexs = getTopFeatures(train_x, train_y) sub_x_Train = get_data(train_x, feature_indexs[:16], features.feature_pair_sub_list ,features.feature_pair_plus_list, features.feature_pair_mul_list, features.feature_pair_divide_list[:20]) sub_x_Test = get_data(test_x, feature_indexs[:16], features.feature_pair_sub_list ,features.feature_pair_plus_list, features.feature_pair_mul_list, features.feature_pair_divide_list[:20]) labels = toLabels(train_y) gbc = GradientBoostingClassifier(n_estimators=3000, max_depth=9) gbc.fit(sub_x_Train, labels) pred_probs = gbc.predict_proba(sub_x_Test)[:,1] ind_test = np.where(pred_probs>0.55)[0] gp_preds_part = gbc_gp_predict_part(sub_x_Train, train_y, sub_x_Test[ind_test]) gp_preds = np.zeros(len(test_x)) gp_preds[ind_test] = gp_preds_part return gp_preds # invoke the function gbc_svr_predict_part def gbc_svr_predict(gbc, train_x, train_y, test_x): svr_preds = gbc_svr_predict_part(gbc, train_x, train_y, test_x, features.feature_pair_sub_list, features.feature_pair_plus_list, features.feature_pair_mul_list, features.feature_pair_divide_list, features.feature_pair_sub_mul_list, features.feature_pair_sub_list_sf, features.feature_pair_plus_list2) return svr_preds # invoke the function gbc_gbr_predict_part def gbc_gbr_predict(gbc, train_x, train_y, test_x): gbr_preds = gbc_gbr_predict_part(gbc, train_x, train_y, test_x, features.feature_pair_sub_list, features.feature_pair_plus_list, features.feature_pair_mul_list, features.feature_pair_divide_list, features.feature_pair_sub_mul_list, features.feature_pair_sub_list2) return gbr_preds # the main function if __name__ == '__main__': train_fs = load_train_fs() test_fs = load_test_fs() train_x, train_y = train_type(train_fs) test_x = test_type(test_fs) gbc = gbc_classify(train_x, train_y) svr_preds = gbc_svr_predict(gbc, train_x, train_y, test_x) gbr_preds = gbc_gbr_predict(gbc, train_x, train_y, test_x) gp_preds = gbc_gp_predict(train_x, train_y, test_x) preds_all = svr_preds * 0.4 + gp_preds * 0.25 + gbr_preds * 0.35 output_preds(preds_all)
Goodideax/CS249
predict_ws.py
Python
bsd-3-clause
14,972
[ "Gaussian" ]
1708884fd4355f26d5e4bf3193d42d3ea0b5c7a98011e9d994d3dad42a846373
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) import os class Ioapi(MakefilePackage): """Models-3/EDSS Input/Output Applications Programming Interface.""" homepage = "https://www.cmascenter.org/ioapi/" url = "https://www.cmascenter.org/ioapi/download/ioapi-3.2.tar.gz" version('3.2', sha256='56771ff0053d47f2445e00ff369bca7bfc484325a2816b2c648744e523134fe9') depends_on('netcdf-c@4:') depends_on('netcdf-fortran@4:') depends_on('sed', type='build') def edit(self, spec, prefix): # No default Makefile bundled; edit the template. os.symlink('Makefile.template', 'Makefile') # The makefile uses stubborn assignments of = instead of ?= so # edit the makefile instead of using environmental variables. makefile = FileFilter('Makefile') makefile.filter('^BASEDIR.*', 'BASEDIR = ' + self.build_directory) makefile.filter('^INSTALL.*', 'INSTALL = ' + prefix) makefile.filter('^BININST.*', 'BININST = ' + prefix.bin) makefile.filter('^LIBINST.*', 'LIBINST = ' + prefix.lib) def install(self, spec, prefix): make('install') # Install the header files. mkdirp(prefix.include.fixed132) install('ioapi/*.EXT', prefix.include) # Install the header files for CMAQ and SMOKE in the # non-standard -ffixed-line-length-132 format. install('ioapi/fixed_src/*.EXT', prefix.include.fixed132)
iulian787/spack
var/spack/repos/builtin/packages/ioapi/package.py
Python
lgpl-2.1
1,598
[ "NetCDF" ]
06ee3e9170cd00c4125fe99a7290a38bbe759c459345809bd866ab8b995babb1
# -*- coding: utf-8 -*- # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr> # Mathieu Blondel <mathieu@mblondel.org> # Robert Layton <robertlayton@gmail.com> # Andreas Mueller <amueller@ais.uni-bonn.de> # Philippe Gervais <philippe.gervais@inria.fr> # Lars Buitinck <larsmans@gmail.com> # Joel Nothman <joel.nothman@gmail.com> # License: BSD 3 clause import itertools import numpy as np from scipy.spatial import distance from scipy.sparse import csr_matrix from scipy.sparse import issparse from ..utils import check_array from ..utils import gen_even_slices from ..utils import gen_batches from ..utils.fixes import partial from ..utils.extmath import row_norms, safe_sparse_dot from ..preprocessing import normalize from ..externals.joblib import Parallel from ..externals.joblib import delayed from ..externals.joblib.parallel import cpu_count from .pairwise_fast import _chi2_kernel_fast, _sparse_manhattan # Utility Functions def _return_float_dtype(X, Y): """ 1. If dtype of X and Y is float32, then dtype float32 is returned. 2. Else dtype float is returned. """ if not issparse(X) and not isinstance(X, np.ndarray): X = np.asarray(X) if Y is None: Y_dtype = X.dtype elif not issparse(Y) and not isinstance(Y, np.ndarray): Y = np.asarray(Y) Y_dtype = Y.dtype else: Y_dtype = Y.dtype if X.dtype == Y_dtype == np.float32: dtype = np.float32 else: dtype = np.float return X, Y, dtype def check_pairwise_arrays(X, Y): """ Set X and Y appropriately and checks inputs If Y is None, it is set as a pointer to X (i.e. not a copy). If Y is given, this does not happen. All distance metrics should use this function first to assert that the given parameters are correct and safe to use. Specifically, this function first ensures that both X and Y are arrays, then checks that they are at least two dimensional while ensuring that their elements are floats. Finally, the function checks that the size of the second dimension of the two arrays is equal. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples_a, n_features) Y : {array-like, sparse matrix}, shape (n_samples_b, n_features) Returns ------- safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features) An array equal to X, guaranteed to be a numpy array. safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features) An array equal to Y if Y was not None, guaranteed to be a numpy array. If Y was None, safe_Y will be a pointer to X. """ X, Y, dtype = _return_float_dtype(X, Y) if Y is X or Y is None: X = Y = check_array(X, accept_sparse='csr', dtype=dtype) else: X = check_array(X, accept_sparse='csr', dtype=dtype) Y = check_array(Y, accept_sparse='csr', dtype=dtype) if X.shape[1] != Y.shape[1]: raise ValueError("Incompatible dimension for X and Y matrices: " "X.shape[1] == %d while Y.shape[1] == %d" % ( X.shape[1], Y.shape[1])) return X, Y def check_paired_arrays(X, Y): """ Set X and Y appropriately and checks inputs for paired distances All paired distance metrics should use this function first to assert that the given parameters are correct and safe to use. Specifically, this function first ensures that both X and Y are arrays, then checks that they are at least two dimensional while ensuring that their elements are floats. Finally, the function checks that the size of the dimensions of the two arrays are equal. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples_a, n_features) Y : {array-like, sparse matrix}, shape (n_samples_b, n_features) Returns ------- safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features) An array equal to X, guaranteed to be a numpy array. safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features) An array equal to Y if Y was not None, guaranteed to be a numpy array. If Y was None, safe_Y will be a pointer to X. """ X, Y = check_pairwise_arrays(X, Y) if X.shape != Y.shape: raise ValueError("X and Y should be of same shape. They were " "respectively %r and %r long." % (X.shape, Y.shape)) return X, Y # Pairwise distances def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False): """ Considering the rows of X (and Y=X) as vectors, compute the distance matrix between each pair of vectors. For efficiency reasons, the euclidean distance between a pair of row vector x and y is computed as:: dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y)) This formulation has two advantages over other ways of computing distances. First, it is computationally efficient when dealing with sparse data. Second, if x varies but y remains unchanged, then the right-most dot product `dot(y, y)` can be pre-computed. However, this is not the most precise way of doing this computation, and the distance matrix returned by this function may not be exactly symmetric as required by, e.g., ``scipy.spatial.distance`` functions. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples_1, n_features) Y : {array-like, sparse matrix}, shape (n_samples_2, n_features) Y_norm_squared : array-like, shape (n_samples_2, ), optional Pre-computed dot-products of vectors in Y (e.g., ``(Y**2).sum(axis=1)``) squared : boolean, optional Return squared Euclidean distances. Returns ------- distances : {array, sparse matrix}, shape (n_samples_1, n_samples_2) Examples -------- >>> from sklearn.metrics.pairwise import euclidean_distances >>> X = [[0, 1], [1, 1]] >>> # distance between rows of X >>> euclidean_distances(X, X) array([[ 0., 1.], [ 1., 0.]]) >>> # get distance to origin >>> euclidean_distances(X, [[0, 0]]) array([[ 1. ], [ 1.41421356]]) See also -------- paired_distances : distances betweens pairs of elements of X and Y. """ # should not need X_norm_squared because if you could precompute that as # well as Y, then you should just pre-compute the output and not even # call this function. X, Y = check_pairwise_arrays(X, Y) if Y_norm_squared is not None: YY = check_array(Y_norm_squared) if YY.shape != (1, Y.shape[0]): raise ValueError( "Incompatible dimensions for Y and Y_norm_squared") else: YY = row_norms(Y, squared=True)[np.newaxis, :] if X is Y: # shortcut in the common case euclidean_distances(X, X) XX = YY.T else: XX = row_norms(X, squared=True)[:, np.newaxis] distances = safe_sparse_dot(X, Y.T, dense_output=True) distances *= -2 distances += XX distances += YY np.maximum(distances, 0, out=distances) if X is Y: # Ensure that distances between vectors and themselves are set to 0.0. # This may not be the case due to floating point rounding errors. distances.flat[::distances.shape[0] + 1] = 0.0 return distances if squared else np.sqrt(distances, out=distances) def pairwise_distances_argmin_min(X, Y, axis=1, metric="euclidean", batch_size=500, metric_kwargs=None): """Compute minimum distances between one point and a set of points. This function computes for each row in X, the index of the row of Y which is closest (according to the specified distance). The minimal distances are also returned. This is mostly equivalent to calling: (pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis), pairwise_distances(X, Y=Y, metric=metric).min(axis=axis)) but uses much less memory, and is faster for large arrays. Parameters ---------- X, Y : {array-like, sparse matrix} Arrays containing points. Respective shapes (n_samples1, n_features) and (n_samples2, n_features) batch_size : integer To reduce memory consumption over the naive solution, data are processed in batches, comprising batch_size rows of X and batch_size rows of Y. The default value is quite conservative, but can be changed for fine-tuning. The larger the number, the larger the memory usage. metric : string or callable, default 'euclidean' metric to use for distance computation. Any metric from scikit-learn or scipy.spatial.distance can be used. If metric is a callable function, it is called on each pair of instances (rows) and the resulting value recorded. The callable should take two arrays as input and return one value indicating the distance between them. This works for Scipy's metrics, but is less efficient than passing the metric name as a string. Distance matrices are not supported. Valid values for metric are: - from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2', 'manhattan'] - from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev', 'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule'] See the documentation for scipy.spatial.distance for details on these metrics. metric_kwargs : dict, optional Keyword arguments to pass to specified metric function. axis : int, optional, default 1 Axis along which the argmin and distances are to be computed. Returns ------- argmin : numpy.ndarray Y[argmin[i], :] is the row in Y that is closest to X[i, :]. distances : numpy.ndarray distances[i] is the distance between the i-th row in X and the argmin[i]-th row in Y. See also -------- sklearn.metrics.pairwise_distances sklearn.metrics.pairwise_distances_argmin """ dist_func = None if metric in PAIRWISE_DISTANCE_FUNCTIONS: dist_func = PAIRWISE_DISTANCE_FUNCTIONS[metric] elif not callable(metric) and not isinstance(metric, str): raise ValueError("'metric' must be a string or a callable") X, Y = check_pairwise_arrays(X, Y) if metric_kwargs is None: metric_kwargs = {} if axis == 0: X, Y = Y, X # Allocate output arrays indices = np.empty(X.shape[0], dtype=np.intp) values = np.empty(X.shape[0]) values.fill(np.infty) for chunk_x in gen_batches(X.shape[0], batch_size): X_chunk = X[chunk_x, :] for chunk_y in gen_batches(Y.shape[0], batch_size): Y_chunk = Y[chunk_y, :] if dist_func is not None: if metric == 'euclidean': # special case, for speed d_chunk = safe_sparse_dot(X_chunk, Y_chunk.T, dense_output=True) d_chunk *= -2 d_chunk += row_norms(X_chunk, squared=True)[:, np.newaxis] d_chunk += row_norms(Y_chunk, squared=True)[np.newaxis, :] np.maximum(d_chunk, 0, d_chunk) else: d_chunk = dist_func(X_chunk, Y_chunk, **metric_kwargs) else: d_chunk = pairwise_distances(X_chunk, Y_chunk, metric=metric, **metric_kwargs) # Update indices and minimum values using chunk min_indices = d_chunk.argmin(axis=1) min_values = d_chunk[np.arange(chunk_x.stop - chunk_x.start), min_indices] flags = values[chunk_x] > min_values indices[chunk_x][flags] = min_indices[flags] + chunk_y.start values[chunk_x][flags] = min_values[flags] if metric == "euclidean" and not metric_kwargs.get("squared", False): np.sqrt(values, values) return indices, values def pairwise_distances_argmin(X, Y, axis=1, metric="euclidean", batch_size=500, metric_kwargs=None): """Compute minimum distances between one point and a set of points. This function computes for each row in X, the index of the row of Y which is closest (according to the specified distance). This is mostly equivalent to calling: pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis) but uses much less memory, and is faster for large arrays. This function works with dense 2D arrays only. Parameters ========== X : array-like Arrays containing points. Respective shapes (n_samples1, n_features) and (n_samples2, n_features) Y : array-like Arrays containing points. Respective shapes (n_samples1, n_features) and (n_samples2, n_features) batch_size : integer To reduce memory consumption over the naive solution, data are processed in batches, comprising batch_size rows of X and batch_size rows of Y. The default value is quite conservative, but can be changed for fine-tuning. The larger the number, the larger the memory usage. metric : string or callable metric to use for distance computation. Any metric from scikit-learn or scipy.spatial.distance can be used. If metric is a callable function, it is called on each pair of instances (rows) and the resulting value recorded. The callable should take two arrays as input and return one value indicating the distance between them. This works for Scipy's metrics, but is less efficient than passing the metric name as a string. Distance matrices are not supported. Valid values for metric are: - from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2', 'manhattan'] - from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev', 'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule'] See the documentation for scipy.spatial.distance for details on these metrics. metric_kwargs : dict keyword arguments to pass to specified metric function. axis : int, optional, default 1 Axis along which the argmin and distances are to be computed. Returns ======= argmin : numpy.ndarray Y[argmin[i], :] is the row in Y that is closest to X[i, :]. See also ======== sklearn.metrics.pairwise_distances sklearn.metrics.pairwise_distances_argmin_min """ if metric_kwargs is None: metric_kwargs = {} return pairwise_distances_argmin_min(X, Y, axis, metric, batch_size, metric_kwargs)[0] def manhattan_distances(X, Y=None, sum_over_features=True, size_threshold=5e8): """ Compute the L1 distances between the vectors in X and Y. With sum_over_features equal to False it returns the componentwise distances. Parameters ---------- X : array_like An array with shape (n_samples_X, n_features). Y : array_like, optional An array with shape (n_samples_Y, n_features). sum_over_features : bool, default=True If True the function returns the pairwise distance matrix else it returns the componentwise L1 pairwise-distances. Not supported for sparse matrix inputs. size_threshold : int, default=5e8 Unused parameter. Returns ------- D : array If sum_over_features is False shape is (n_samples_X * n_samples_Y, n_features) and D contains the componentwise L1 pairwise-distances (ie. absolute difference), else shape is (n_samples_X, n_samples_Y) and D contains the pairwise L1 distances. Examples -------- >>> from sklearn.metrics.pairwise import manhattan_distances >>> manhattan_distances(3, 3)#doctest:+ELLIPSIS array([[ 0.]]) >>> manhattan_distances(3, 2)#doctest:+ELLIPSIS array([[ 1.]]) >>> manhattan_distances(2, 3)#doctest:+ELLIPSIS array([[ 1.]]) >>> manhattan_distances([[1, 2], [3, 4]],\ [[1, 2], [0, 3]])#doctest:+ELLIPSIS array([[ 0., 2.], [ 4., 4.]]) >>> import numpy as np >>> X = np.ones((1, 2)) >>> y = 2 * np.ones((2, 2)) >>> manhattan_distances(X, y, sum_over_features=False)#doctest:+ELLIPSIS array([[ 1., 1.], [ 1., 1.]]...) """ X, Y = check_pairwise_arrays(X, Y) if issparse(X) or issparse(Y): if not sum_over_features: raise TypeError("sum_over_features=%r not supported" " for sparse matrices" % sum_over_features) X = csr_matrix(X, copy=False) Y = csr_matrix(Y, copy=False) D = np.zeros((X.shape[0], Y.shape[0])) _sparse_manhattan(X.data, X.indices, X.indptr, Y.data, Y.indices, Y.indptr, X.shape[1], D) return D if sum_over_features: return distance.cdist(X, Y, 'cityblock') D = X[:, np.newaxis, :] - Y[np.newaxis, :, :] D = np.abs(D, D) return D.reshape((-1, X.shape[1])) def cosine_distances(X, Y=None): """ Compute cosine distance between samples in X and Y. Cosine distance is defined as 1.0 minus the cosine similarity. Parameters ---------- X : array_like, sparse matrix with shape (n_samples_X, n_features). Y : array_like, sparse matrix (optional) with shape (n_samples_Y, n_features). Returns ------- distance matrix : array An array with shape (n_samples_X, n_samples_Y). See also -------- sklearn.metrics.pairwise.cosine_similarity scipy.spatial.distance.cosine (dense matrices only) """ # 1.0 - cosine_similarity(X, Y) without copy S = cosine_similarity(X, Y) S *= -1 S += 1 return S # Paired distances def paired_euclidean_distances(X, Y): """ Computes the paired euclidean distances between X and Y Parameters ---------- X : array-like, shape (n_samples, n_features) Y : array-like, shape (n_samples, n_features) Returns ------- distances : ndarray (n_samples, ) """ X, Y = check_paired_arrays(X, Y) return row_norms(X - Y) def paired_manhattan_distances(X, Y): """Compute the L1 distances between the vectors in X and Y. Parameters ---------- X : array-like, shape (n_samples, n_features) Y : array-like, shape (n_samples, n_features) Returns ------- distances : ndarray (n_samples, ) """ X, Y = check_paired_arrays(X, Y) diff = X - Y if issparse(diff): diff.data = np.abs(diff.data) return np.squeeze(np.array(diff.sum(axis=1))) else: return np.abs(diff).sum(axis=-1) def paired_cosine_distances(X, Y): """ Computes the paired cosine distances between X and Y Parameters ---------- X : array-like, shape (n_samples, n_features) Y : array-like, shape (n_samples, n_features) Returns ------- distances : ndarray, shape (n_samples, ) Notes ------ The cosine distance is equivalent to the half the squared euclidean distance if each sample is normalized to unit norm """ X, Y = check_paired_arrays(X, Y) return .5 * row_norms(normalize(X) - normalize(Y), squared=True) PAIRED_DISTANCES = { 'cosine': paired_cosine_distances, 'euclidean': paired_euclidean_distances, 'l2': paired_euclidean_distances, 'l1': paired_manhattan_distances, 'manhattan': paired_manhattan_distances, 'cityblock': paired_manhattan_distances, } def paired_distances(X, Y, metric="euclidean", **kwds): """ Computes the paired distances between X and Y. Computes the distances between (X[0], Y[0]), (X[1], Y[1]), etc... Parameters ---------- X : ndarray (n_samples, n_features) Array 1 for distance computation. Y : ndarray (n_samples, n_features) Array 2 for distance computation. metric : string or callable The metric to use when calculating distance between instances in a feature array. If metric is a string, it must be one of the options specified in PAIRED_DISTANCES, including "euclidean", "manhattan", or "cosine". Alternatively, if metric is a callable function, it is called on each pair of instances (rows) and the resulting value recorded. The callable should take two arrays from X as input and return a value indicating the distance between them. Returns ------- distances : ndarray (n_samples, ) Examples -------- >>> from sklearn.metrics.pairwise import paired_distances >>> X = [[0, 1], [1, 1]] >>> Y = [[0, 1], [2, 1]] >>> paired_distances(X, Y) array([ 0., 1.]) See also -------- pairwise_distances : pairwise distances. """ if metric in PAIRED_DISTANCES: func = PAIRED_DISTANCES[metric] return func(X, Y) elif callable(metric): # Check the matrix first (it is usually done by the metric) X, Y = check_paired_arrays(X, Y) distances = np.zeros(len(X)) for i in range(len(X)): distances[i] = metric(X[i], Y[i]) return distances else: raise ValueError('Unknown distance %s' % metric) # Kernels def linear_kernel(X, Y=None): """ Compute the linear kernel between X and Y. Parameters ---------- X : array of shape (n_samples_1, n_features) Y : array of shape (n_samples_2, n_features) Returns ------- Gram matrix : array of shape (n_samples_1, n_samples_2) """ X, Y = check_pairwise_arrays(X, Y) return safe_sparse_dot(X, Y.T, dense_output=True) def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1): """ Compute the polynomial kernel between X and Y:: K(X, Y) = (gamma <X, Y> + coef0)^degree Parameters ---------- X : ndarray of shape (n_samples_1, n_features) Y : ndarray of shape (n_samples_2, n_features) coef0 : int, default 1 degree : int, default 3 Returns ------- Gram matrix : array of shape (n_samples_1, n_samples_2) """ X, Y = check_pairwise_arrays(X, Y) if gamma is None: gamma = 1.0 / X.shape[1] K = safe_sparse_dot(X, Y.T, dense_output=True) K *= gamma K += coef0 K **= degree return K def sigmoid_kernel(X, Y=None, gamma=None, coef0=1): """ Compute the sigmoid kernel between X and Y:: K(X, Y) = tanh(gamma <X, Y> + coef0) Parameters ---------- X : ndarray of shape (n_samples_1, n_features) Y : ndarray of shape (n_samples_2, n_features) coef0 : int, default 1 Returns ------- Gram matrix: array of shape (n_samples_1, n_samples_2) """ X, Y = check_pairwise_arrays(X, Y) if gamma is None: gamma = 1.0 / X.shape[1] K = safe_sparse_dot(X, Y.T, dense_output=True) K *= gamma K += coef0 np.tanh(K, K) # compute tanh in-place return K def rbf_kernel(X, Y=None, gamma=None): """ Compute the rbf (gaussian) kernel between X and Y:: K(x, y) = exp(-gamma ||x-y||^2) for each pair of rows x in X and y in Y. Parameters ---------- X : array of shape (n_samples_X, n_features) Y : array of shape (n_samples_Y, n_features) gamma : float Returns ------- kernel_matrix : array of shape (n_samples_X, n_samples_Y) """ X, Y = check_pairwise_arrays(X, Y) if gamma is None: gamma = 1.0 / X.shape[1] K = euclidean_distances(X, Y, squared=True) K *= -gamma np.exp(K, K) # exponentiate K in-place return K def cosine_similarity(X, Y=None): """Compute cosine similarity between samples in X and Y. Cosine similarity, or the cosine kernel, computes similarity as the normalized dot product of X and Y: K(X, Y) = <X, Y> / (||X||*||Y||) On L2-normalized data, this function is equivalent to linear_kernel. Parameters ---------- X : array_like, sparse matrix with shape (n_samples_X, n_features). Y : array_like, sparse matrix (optional) with shape (n_samples_Y, n_features). Returns ------- kernel matrix : array An array with shape (n_samples_X, n_samples_Y). """ # to avoid recursive import X, Y = check_pairwise_arrays(X, Y) X_normalized = normalize(X, copy=True) if X is Y: Y_normalized = X_normalized else: Y_normalized = normalize(Y, copy=True) K = safe_sparse_dot(X_normalized, Y_normalized.T, dense_output=True) return K def additive_chi2_kernel(X, Y=None): """Computes the additive chi-squared kernel between observations in X and Y The chi-squared kernel is computed between each pair of rows in X and Y. X and Y have to be non-negative. This kernel is most commonly applied to histograms. The chi-squared kernel is given by:: k(x, y) = -Sum [(x - y)^2 / (x + y)] It can be interpreted as a weighted difference per entry. Notes ----- As the negative of a distance, this kernel is only conditionally positive definite. Parameters ---------- X : array-like of shape (n_samples_X, n_features) Y : array of shape (n_samples_Y, n_features) Returns ------- kernel_matrix : array of shape (n_samples_X, n_samples_Y) References ---------- * Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C. Local features and kernels for classification of texture and object categories: A comprehensive study International Journal of Computer Vision 2007 http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf See also -------- chi2_kernel : The exponentiated version of the kernel, which is usually preferable. sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation to this kernel. """ if issparse(X) or issparse(Y): raise ValueError("additive_chi2 does not support sparse matrices.") X, Y = check_pairwise_arrays(X, Y) if (X < 0).any(): raise ValueError("X contains negative values.") if Y is not X and (Y < 0).any(): raise ValueError("Y contains negative values.") result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype) _chi2_kernel_fast(X, Y, result) return result def chi2_kernel(X, Y=None, gamma=1.): """Computes the exponential chi-squared kernel X and Y. The chi-squared kernel is computed between each pair of rows in X and Y. X and Y have to be non-negative. This kernel is most commonly applied to histograms. The chi-squared kernel is given by:: k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)]) It can be interpreted as a weighted difference per entry. Parameters ---------- X : array-like of shape (n_samples_X, n_features) Y : array of shape (n_samples_Y, n_features) gamma : float, default=1. Scaling parameter of the chi2 kernel. Returns ------- kernel_matrix : array of shape (n_samples_X, n_samples_Y) References ---------- * Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C. Local features and kernels for classification of texture and object categories: A comprehensive study International Journal of Computer Vision 2007 http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf See also -------- additive_chi2_kernel : The additive version of this kernel sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation to the additive version of this kernel. """ K = additive_chi2_kernel(X, Y) K *= gamma return np.exp(K, K) # Helper functions - distance PAIRWISE_DISTANCE_FUNCTIONS = { # If updating this dictionary, update the doc in both distance_metrics() # and also in pairwise_distances()! 'cityblock': manhattan_distances, 'cosine': cosine_distances, 'euclidean': euclidean_distances, 'l2': euclidean_distances, 'l1': manhattan_distances, 'manhattan': manhattan_distances, } def distance_metrics(): """Valid metrics for pairwise_distances. This function simply returns the valid pairwise distance metrics. It exists to allow for a description of the mapping for each of the valid strings. The valid distance metrics, and the function they map to, are: ============ ==================================== metric Function ============ ==================================== 'cityblock' metrics.pairwise.manhattan_distances 'cosine' metrics.pairwise.cosine_distances 'euclidean' metrics.pairwise.euclidean_distances 'l1' metrics.pairwise.manhattan_distances 'l2' metrics.pairwise.euclidean_distances 'manhattan' metrics.pairwise.manhattan_distances ============ ==================================== """ return PAIRWISE_DISTANCE_FUNCTIONS def _parallel_pairwise(X, Y, func, n_jobs, **kwds): """Break the pairwise matrix in n_jobs even slices and compute them in parallel""" if n_jobs < 0: n_jobs = max(cpu_count() + 1 + n_jobs, 1) if Y is None: Y = X if n_jobs == 1: # Special case to avoid picklability checks in delayed return func(X, Y, **kwds) # TODO: in some cases, backend='threading' may be appropriate fd = delayed(func) ret = Parallel(n_jobs=n_jobs, verbose=0)( fd(X, Y[s], **kwds) for s in gen_even_slices(Y.shape[0], n_jobs)) return np.hstack(ret) def _pairwise_callable(X, Y, metric, **kwds): """Handle the callable case for pairwise_{distances,kernels} """ X, Y = check_pairwise_arrays(X, Y) if X is Y: # Only calculate metric for upper triangle out = np.zeros((X.shape[0], Y.shape[0]), dtype='float') iterator = itertools.combinations(range(X.shape[0]), 2) for i, j in iterator: out[i, j] = metric(X[i], Y[j], **kwds) # Make symmetric # NB: out += out.T will produce incorrect results out = out + out.T # Calculate diagonal # NB: nonzero diagonals are allowed for both metrics and kernels for i in range(X.shape[0]): x = X[i] out[i, i] = metric(x, x, **kwds) else: # Calculate all cells out = np.empty((X.shape[0], Y.shape[0]), dtype='float') iterator = itertools.product(range(X.shape[0]), range(Y.shape[0])) for i, j in iterator: out[i, j] = metric(X[i], Y[j], **kwds) return out _VALID_METRICS = ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock', 'braycurtis', 'canberra', 'chebyshev', 'correlation', 'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule', "wminkowski"] def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds): """ Compute the distance matrix from a vector array X and optional Y. This method takes either a vector array or a distance matrix, and returns a distance matrix. If the input is a vector array, the distances are computed. If the input is a distances matrix, it is returned instead. This method provides a safe way to take a distance matrix as input, while preserving compatibility with many other algorithms that take a vector array. If Y is given (default is None), then the returned matrix is the pairwise distance between the arrays from both X and Y. Valid values for metric are: - From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2', 'manhattan']. These metrics support sparse matrix inputs. - From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev', 'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule'] See the documentation for scipy.spatial.distance for details on these metrics. These metrics do not support sparse matrix inputs. Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are valid scipy.spatial.distance metrics), the scikit-learn implementation will be used, which is faster and has support for sparse matrices (except for 'cityblock'). For a verbose description of the metrics from scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics function. Parameters ---------- X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \ [n_samples_a, n_features] otherwise Array of pairwise distances between samples, or a feature array. Y : array [n_samples_b, n_features] A second feature array only if X has shape [n_samples_a, n_features]. metric : string, or callable The metric to use when calculating distance between instances in a feature array. If metric is a string, it must be one of the options allowed by scipy.spatial.distance.pdist for its metric parameter, or a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS. If metric is "precomputed", X is assumed to be a distance matrix. Alternatively, if metric is a callable function, it is called on each pair of instances (rows) and the resulting value recorded. The callable should take two arrays from X as input and return a value indicating the distance between them. n_jobs : int The number of jobs to use for the computation. This works by breaking down the pairwise matrix into n_jobs even slices and computing them in parallel. If -1 all CPUs are used. If 1 is given, no parallel computing code is used at all, which is useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one are used. `**kwds` : optional keyword parameters Any further parameters are passed directly to the distance function. If using a scipy.spatial.distance metric, the parameters are still metric dependent. See the scipy docs for usage examples. Returns ------- D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b] A distance matrix D such that D_{i, j} is the distance between the ith and jth vectors of the given matrix X, if Y is None. If Y is not None, then D_{i, j} is the distance between the ith array from X and the jth array from Y. """ if (metric not in _VALID_METRICS and not callable(metric) and metric != "precomputed"): raise ValueError("Unknown metric %s. " "Valid metrics are %s, or 'precomputed', or a " "callable" % (metric, _VALID_METRICS)) if metric == "precomputed": return X elif metric in PAIRWISE_DISTANCE_FUNCTIONS: func = PAIRWISE_DISTANCE_FUNCTIONS[metric] elif callable(metric): func = partial(_pairwise_callable, metric=metric, **kwds) else: if issparse(X) or issparse(Y): raise TypeError("scipy distance metrics do not" " support sparse matrices.") X, Y = check_pairwise_arrays(X, Y) if n_jobs == 1 and X is Y: return distance.squareform(distance.pdist(X, metric=metric, **kwds)) func = partial(distance.cdist, metric=metric, **kwds) return _parallel_pairwise(X, Y, func, n_jobs, **kwds) # Helper functions - distance PAIRWISE_KERNEL_FUNCTIONS = { # If updating this dictionary, update the doc in both distance_metrics() # and also in pairwise_distances()! 'additive_chi2': additive_chi2_kernel, 'chi2': chi2_kernel, 'linear': linear_kernel, 'polynomial': polynomial_kernel, 'poly': polynomial_kernel, 'rbf': rbf_kernel, 'sigmoid': sigmoid_kernel, 'cosine': cosine_similarity, } def kernel_metrics(): """ Valid metrics for pairwise_kernels This function simply returns the valid pairwise distance metrics. It exists, however, to allow for a verbose description of the mapping for each of the valid strings. The valid distance metrics, and the function they map to, are: =============== ======================================== metric Function =============== ======================================== 'additive_chi2' sklearn.pairwise.additive_chi2_kernel 'chi2' sklearn.pairwise.chi2_kernel 'linear' sklearn.pairwise.linear_kernel 'poly' sklearn.pairwise.polynomial_kernel 'polynomial' sklearn.pairwise.polynomial_kernel 'rbf' sklearn.pairwise.rbf_kernel 'sigmoid' sklearn.pairwise.sigmoid_kernel 'cosine' sklearn.pairwise.cosine_similarity =============== ======================================== """ return PAIRWISE_KERNEL_FUNCTIONS KERNEL_PARAMS = { "additive_chi2": (), "chi2": (), "cosine": (), "exp_chi2": frozenset(["gamma"]), "linear": (), "poly": frozenset(["gamma", "degree", "coef0"]), "polynomial": frozenset(["gamma", "degree", "coef0"]), "rbf": frozenset(["gamma"]), "sigmoid": frozenset(["gamma", "coef0"]), } def pairwise_kernels(X, Y=None, metric="linear", filter_params=False, n_jobs=1, **kwds): """Compute the kernel between arrays X and optional array Y. This method takes either a vector array or a kernel matrix, and returns a kernel matrix. If the input is a vector array, the kernels are computed. If the input is a kernel matrix, it is returned instead. This method provides a safe way to take a kernel matrix as input, while preserving compatibility with many other algorithms that take a vector array. If Y is given (default is None), then the returned matrix is the pairwise kernel between the arrays from both X and Y. Valid values for metric are:: ['rbf', 'sigmoid', 'polynomial', 'poly', 'linear', 'cosine'] Parameters ---------- X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \ [n_samples_a, n_features] otherwise Array of pairwise kernels between samples, or a feature array. Y : array [n_samples_b, n_features] A second feature array only if X has shape [n_samples_a, n_features]. metric : string, or callable The metric to use when calculating kernel between instances in a feature array. If metric is a string, it must be one of the metrics in pairwise.PAIRWISE_KERNEL_FUNCTIONS. If metric is "precomputed", X is assumed to be a kernel matrix. Alternatively, if metric is a callable function, it is called on each pair of instances (rows) and the resulting value recorded. The callable should take two arrays from X as input and return a value indicating the distance between them. n_jobs : int The number of jobs to use for the computation. This works by breaking down the pairwise matrix into n_jobs even slices and computing them in parallel. If -1 all CPUs are used. If 1 is given, no parallel computing code is used at all, which is useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one are used. filter_params: boolean Whether to filter invalid parameters or not. `**kwds` : optional keyword parameters Any further parameters are passed directly to the kernel function. Returns ------- K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b] A kernel matrix K such that K_{i, j} is the kernel between the ith and jth vectors of the given matrix X, if Y is None. If Y is not None, then K_{i, j} is the kernel between the ith array from X and the jth array from Y. Notes ----- If metric is 'precomputed', Y is ignored and X is returned. """ if metric == "precomputed": return X elif metric in PAIRWISE_KERNEL_FUNCTIONS: if filter_params: kwds = dict((k, kwds[k]) for k in kwds if k in KERNEL_PARAMS[metric]) func = PAIRWISE_KERNEL_FUNCTIONS[metric] elif callable(metric): func = partial(_pairwise_callable, metric=metric, **kwds) else: raise ValueError("Unknown kernel %r" % metric) return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
DonBeo/scikit-learn
sklearn/metrics/pairwise.py
Python
bsd-3-clause
41,696
[ "Gaussian" ]
b611389f0d4d3f3e817d161e987c710b3c80df1d10ede4f219c26c95380de0b7
# # # File to test current configuration of CA1Pyramidal cell project. # # To execute this type of file, type '..\..\..\nC.bat -python XXX.py' (Windows) # or '../../../nC.sh -python XXX.py' (Linux/Mac). Note: you may have to update the # NC_HOME and NC_MAX_MEMORY variables in nC.bat/nC.sh # # Author: Padraig Gleeson # # This file has been developed as part of the neuroConstruct project # This work has been funded by the Medical Research Council and the # Wellcome Trust # # import sys import os try: from java.io import File except ImportError: print "Note: this file should be run using ..\\..\\..\\nC.bat -python XXX.py' or '../../../nC.sh -python XXX.py'" print "See http://www.neuroconstruct.org/docs/python.html for more details" quit() sys.path.append(os.environ["NC_HOME"]+"/pythonNeuroML/nCUtils") import ncutils as nc # Many useful functions such as SimManager.runMultipleSims found here projFile = File(os.getcwd(), "../LarkumEtAl2009.ncx") ############## Main settings ################## simConfigs = [] simConfigs.append("test_IClamp") #simConfigs.append("CA1Cell") simDt = 0.025 simulators = ["NEURON"] numConcurrentSims = 1 varTimestepNeuron = False plotSims = True plotVoltageOnly = True runInBackground = True analyseSims = True verbose = True ############################################# def testAll(argv=None): if argv is None: argv = sys.argv print "Loading project from "+ projFile.getCanonicalPath() simManager = nc.SimulationManager(projFile, numConcurrentSims = numConcurrentSims, verbose = verbose) simManager.runMultipleSims(simConfigs = simConfigs, simDt = simDt, simulators = simulators, runInBackground = runInBackground, varTimestepNeuron = varTimestepNeuron) simManager.reloadSims(plotVoltageOnly = plotVoltageOnly, plotSims = plotSims, analyseSims = analyseSims) # Times from ModelDB version at dt 0.025... spikeTimesToCheck = {'pyr_group_0': [61.6, 71.05, 84.675, 133.475, 143.975, 190.625, 200.825, 251.75, 262.35, 310.975, 321.675, 369.125, 379.9, 426.275, 437.125]} spikeTimeAccuracy = 0.1 report = simManager.checkSims(spikeTimesToCheck = spikeTimesToCheck, spikeTimeAccuracy = spikeTimeAccuracy) print report # Times recorded from nC mod based impl times = [61.625, 71.075, 84.7, 133.5, 144.0, 190.65, 200.85, 251.775, 262.4, 311.0, 321.7, 369.15, 379.925, 426.3, 437.15] spikeTimesToCheck = {'pyr_group_0': times, 'pyrCML_group_0': times} spikeTimeAccuracy = 0.1 report = simManager.checkSims(spikeTimesToCheck = spikeTimesToCheck, spikeTimeAccuracy = spikeTimeAccuracy) print report return report if __name__ == "__main__": testAll()
pgleeson/TestArea
models/LarkumEtAl2009/pythonScripts/RunTestsModelDB.py
Python
gpl-2.0
3,310
[ "NEURON" ]
0cb5ad6ef7e80713163ea4f74fc7a42c411f2fe72810b0649df050e057d6b624
# Copyright (c) 2003-2012 LOGILAB S.A. (Paris, FRANCE). # Copyright (c) 2009-2010 Arista Networks, Inc. # http://www.logilab.fr/ -- mailto:contact@logilab.fr # This program is free software; you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free Software # Foundation; either version 2 of the License, or (at your option) any later # version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., # 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. """basic checker for Python code""" from logilab import astng from logilab.common.ureports import Table from logilab.astng import are_exclusive from pylint.interfaces import IASTNGChecker from pylint.reporters import diff_string from pylint.checkers import BaseChecker, EmptyReport from pylint.checkers.utils import check_messages, clobber_in_except, is_inside_except import re # regex for class/function/variable/constant name CLASS_NAME_RGX = re.compile('[A-Z_][a-zA-Z0-9]+$') MOD_NAME_RGX = re.compile('(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$') CONST_NAME_RGX = re.compile('(([A-Z_][A-Z0-9_]*)|(__.*__))$') COMP_VAR_RGX = re.compile('[A-Za-z_][A-Za-z0-9_]*$') DEFAULT_NAME_RGX = re.compile('[a-z_][a-z0-9_]{2,30}$') # do not require a doc string on system methods NO_REQUIRED_DOC_RGX = re.compile('__.*__') del re def in_loop(node): """return True if the node is inside a kind of for loop""" parent = node.parent while parent is not None: if isinstance(parent, (astng.For, astng.ListComp, astng.SetComp, astng.DictComp, astng.GenExpr)): return True parent = parent.parent return False def in_nested_list(nested_list, obj): """return true if the object is an element of <nested_list> or of a nested list """ for elmt in nested_list: if isinstance(elmt, (list, tuple)): if in_nested_list(elmt, obj): return True elif elmt == obj: return True return False def report_by_type_stats(sect, stats, old_stats): """make a report of * percentage of different types documented * percentage of different types with a bad name """ # percentage of different types documented and/or with a bad name nice_stats = {} for node_type in ('module', 'class', 'method', 'function'): try: total = stats[node_type] except KeyError: raise EmptyReport() nice_stats[node_type] = {} if total != 0: try: documented = total - stats['undocumented_'+node_type] percent = (documented * 100.) / total nice_stats[node_type]['percent_documented'] = '%.2f' % percent except KeyError: nice_stats[node_type]['percent_documented'] = 'NC' try: percent = (stats['badname_'+node_type] * 100.) / total nice_stats[node_type]['percent_badname'] = '%.2f' % percent except KeyError: nice_stats[node_type]['percent_badname'] = 'NC' lines = ('type', 'number', 'old number', 'difference', '%documented', '%badname') for node_type in ('module', 'class', 'method', 'function'): new = stats[node_type] old = old_stats.get(node_type, None) if old is not None: diff_str = diff_string(old, new) else: old, diff_str = 'NC', 'NC' lines += (node_type, str(new), str(old), diff_str, nice_stats[node_type].get('percent_documented', '0'), nice_stats[node_type].get('percent_badname', '0')) sect.append(Table(children=lines, cols=6, rheaders=1)) def redefined_by_decorator(node): """return True if the object is a method redefined via decorator. For example: @property def x(self): return self._x @x.setter def x(self, value): self._x = value """ if node.decorators: for decorator in node.decorators.nodes: if (isinstance(decorator, astng.Getattr) and getattr(decorator.expr, 'name', None) == node.name): return True return False class _BasicChecker(BaseChecker): __implements__ = IASTNGChecker name = 'basic' class BasicErrorChecker(_BasicChecker): msgs = { 'E0100': ('__init__ method is a generator', 'init-is-generator', 'Used when the special class method __init__ is turned into a ' 'generator by a yield in its body.'), 'E0101': ('Explicit return in __init__', 'return-in-init', 'Used when the special class method __init__ has an explicit \ return value.'), 'E0102': ('%s already defined line %s', 'function-redefined', 'Used when a function / class / method is redefined.'), 'E0103': ('%r not properly in loop', 'not-in-loop', 'Used when break or continue keywords are used outside a loop.'), 'E0104': ('Return outside function', 'return-outside-function', 'Used when a "return" statement is found outside a function or ' 'method.'), 'E0105': ('Yield outside function', 'yield-outside-function', 'Used when a "yield" statement is found outside a function or ' 'method.'), 'E0106': ('Return with argument inside generator', 'return-arg-in-generator', 'Used when a "return" statement with an argument is found ' 'outside in a generator function or method (e.g. with some ' '"yield" statements).'), 'E0107': ("Use of the non-existent %s operator", 'nonexistent-operator', "Used when you attempt to use the C-style pre-increment or" "pre-decrement operator -- and ++, which doesn't exist in Python."), } def __init__(self, linter): _BasicChecker.__init__(self, linter) @check_messages('E0102') def visit_class(self, node): self._check_redefinition('class', node) @check_messages('E0100', 'E0101', 'E0102', 'E0106') def visit_function(self, node): if not redefined_by_decorator(node): self._check_redefinition(node.is_method() and 'method' or 'function', node) # checks for max returns, branch, return in __init__ returns = node.nodes_of_class(astng.Return, skip_klass=(astng.Function, astng.Class)) if node.is_method() and node.name == '__init__': if node.is_generator(): self.add_message('E0100', node=node) else: values = [r.value for r in returns] if [v for v in values if not (v is None or (isinstance(v, astng.Const) and v.value is None) or (isinstance(v, astng.Name) and v.name == 'None'))]: self.add_message('E0101', node=node) elif node.is_generator(): # make sure we don't mix non-None returns and yields for retnode in returns: if isinstance(retnode.value, astng.Const) and \ retnode.value.value is not None: self.add_message('E0106', node=node, line=retnode.fromlineno) @check_messages('E0104') def visit_return(self, node): if not isinstance(node.frame(), astng.Function): self.add_message('E0104', node=node) @check_messages('E0105') def visit_yield(self, node): if not isinstance(node.frame(), astng.Function): self.add_message('E0105', node=node) @check_messages('E0103') def visit_continue(self, node): self._check_in_loop(node, 'continue') @check_messages('E0103') def visit_break(self, node): self._check_in_loop(node, 'break') @check_messages('E0107') def visit_unaryop(self, node): """check use of the non-existent ++ adn -- operator operator""" if ((node.op in '+-') and isinstance(node.operand, astng.UnaryOp) and (node.operand.op == node.op)): self.add_message('E0107', node=node, args=node.op*2) def _check_in_loop(self, node, node_name): """check that a node is inside a for or while loop""" _node = node.parent while _node: if isinstance(_node, (astng.For, astng.While)): break _node = _node.parent else: self.add_message('E0103', node=node, args=node_name) def _check_redefinition(self, redeftype, node): """check for redefinition of a function / method / class name""" defined_self = node.parent.frame()[node.name] if defined_self is not node and not are_exclusive(node, defined_self): self.add_message('E0102', node=node, args=(redeftype, defined_self.fromlineno)) class BasicChecker(_BasicChecker): """checks for : * doc strings * modules / classes / functions / methods / arguments / variables name * number of arguments, local variables, branches, returns and statements in functions, methods * required module attributes * dangerous default values as arguments * redefinition of function / method / class * uses of the global statement """ __implements__ = IASTNGChecker name = 'basic' msgs = { 'W0101': ('Unreachable code', 'unreachable', 'Used when there is some code behind a "return" or "raise" \ statement, which will never be accessed.'), 'W0102': ('Dangerous default value %s as argument', 'dangerous-default-value', 'Used when a mutable value as list or dictionary is detected in \ a default value for an argument.'), 'W0104': ('Statement seems to have no effect', 'pointless-statement', 'Used when a statement doesn\'t have (or at least seems to) \ any effect.'), 'W0105': ('String statement has no effect', 'pointless-string-statement', 'Used when a string is used as a statement (which of course \ has no effect). This is a particular case of W0104 with its \ own message so you can easily disable it if you\'re using \ those strings as documentation, instead of comments.'), 'W0106': ('Expression "%s" is assigned to nothing', 'expression-not-assigned', 'Used when an expression that is not a function call is assigned\ to nothing. Probably something else was intended.'), 'W0108': ('Lambda may not be necessary', 'unnecessary-lambda', 'Used when the body of a lambda expression is a function call \ on the same argument list as the lambda itself; such lambda \ expressions are in all but a few cases replaceable with the \ function being called in the body of the lambda.'), 'W0109': ("Duplicate key %r in dictionary", 'duplicate-key', "Used when a dictionary expression binds the same key multiple \ times."), 'W0122': ('Use of the exec statement', 'exec-statement', 'Used when you use the "exec" statement, to discourage its \ usage. That doesn\'t mean you can not use it !'), 'W0141': ('Used builtin function %r', 'bad-builtin', 'Used when a black listed builtin function is used (see the ' 'bad-function option). Usual black listed functions are the ones ' 'like map, or filter , where Python offers now some cleaner ' 'alternative like list comprehension.'), 'W0142': ('Used * or ** magic', 'star-args', 'Used when a function or method is called using `*args` or ' '`**kwargs` to dispatch arguments. This doesn\'t improve ' 'readability and should be used with care.'), 'W0150': ("%s statement in finally block may swallow exception", 'lost-exception', "Used when a break or a return statement is found inside the \ finally clause of a try...finally block: the exceptions raised \ in the try clause will be silently swallowed instead of being \ re-raised."), 'W0199': ('Assert called on a 2-uple. Did you mean \'assert x,y\'?', 'assert-on-tuple', 'A call of assert on a tuple will always evaluate to true if ' 'the tuple is not empty, and will always evaluate to false if ' 'it is.'), 'C0121': ('Missing required attribute "%s"', # W0103 'missing-module-attribute', 'Used when an attribute required for modules is missing.'), } options = (('required-attributes', {'default' : (), 'type' : 'csv', 'metavar' : '<attributes>', 'help' : 'Required attributes for module, separated by a ' 'comma'} ), ('bad-functions', {'default' : ('map', 'filter', 'apply', 'input'), 'type' :'csv', 'metavar' : '<builtin function names>', 'help' : 'List of builtins function names that should not be ' 'used, separated by a comma'} ), ) reports = ( ('RP0101', 'Statistics by type', report_by_type_stats), ) def __init__(self, linter): _BasicChecker.__init__(self, linter) self.stats = None self._tryfinallys = None def open(self): """initialize visit variables and statistics """ self._tryfinallys = [] self.stats = self.linter.add_stats(module=0, function=0, method=0, class_=0) def visit_module(self, node): """check module name, docstring and required arguments """ self.stats['module'] += 1 for attr in self.config.required_attributes: if attr not in node: self.add_message('C0121', node=node, args=attr) def visit_class(self, node): """check module name, docstring and redefinition increment branch counter """ self.stats['class'] += 1 @check_messages('W0104', 'W0105') def visit_discard(self, node): """check for various kind of statements without effect""" expr = node.value if isinstance(expr, astng.Const) and isinstance(expr.value, basestring): # treat string statement in a separated message self.add_message('W0105', node=node) return # ignore if this is : # * a direct function call # * the unique child of a try/except body # * a yield (which are wrapped by a discard node in _ast XXX) # warn W0106 if we have any underlying function call (we can't predict # side effects), else W0104 if (isinstance(expr, (astng.Yield, astng.CallFunc)) or (isinstance(node.parent, astng.TryExcept) and node.parent.body == [node])): return if any(expr.nodes_of_class(astng.CallFunc)): self.add_message('W0106', node=node, args=expr.as_string()) else: self.add_message('W0104', node=node) @check_messages('W0108') def visit_lambda(self, node): """check whether or not the lambda is suspicious """ # if the body of the lambda is a call expression with the same # argument list as the lambda itself, then the lambda is # possibly unnecessary and at least suspicious. if node.args.defaults: # If the arguments of the lambda include defaults, then a # judgment cannot be made because there is no way to check # that the defaults defined by the lambda are the same as # the defaults defined by the function called in the body # of the lambda. return call = node.body if not isinstance(call, astng.CallFunc): # The body of the lambda must be a function call expression # for the lambda to be unnecessary. return # XXX are lambda still different with astng >= 0.18 ? # *args and **kwargs need to be treated specially, since they # are structured differently between the lambda and the function # call (in the lambda they appear in the args.args list and are # indicated as * and ** by two bits in the lambda's flags, but # in the function call they are omitted from the args list and # are indicated by separate attributes on the function call node). ordinary_args = list(node.args.args) if node.args.kwarg: if (not call.kwargs or not isinstance(call.kwargs, astng.Name) or node.args.kwarg != call.kwargs.name): return elif call.kwargs: return if node.args.vararg: if (not call.starargs or not isinstance(call.starargs, astng.Name) or node.args.vararg != call.starargs.name): return elif call.starargs: return # The "ordinary" arguments must be in a correspondence such that: # ordinary_args[i].name == call.args[i].name. if len(ordinary_args) != len(call.args): return for i in xrange(len(ordinary_args)): if not isinstance(call.args[i], astng.Name): return if node.args.args[i].name != call.args[i].name: return self.add_message('W0108', line=node.fromlineno, node=node) def visit_function(self, node): """check function name, docstring, arguments, redefinition, variable names, max locals """ self.stats[node.is_method() and 'method' or 'function'] += 1 # check for dangerous default values as arguments for default in node.args.defaults: try: value = default.infer().next() except astng.InferenceError: continue if isinstance(value, (astng.Dict, astng.List)): if value is default: msg = default.as_string() else: msg = '%s (%s)' % (default.as_string(), value.as_string()) self.add_message('W0102', node=node, args=(msg,)) if value.qname() == '__builtin__.set': if isinstance(default, astng.CallFunc): msg = default.as_string() else: msg = '%s (%s)' % (default.as_string(), value.qname()) self.add_message('W0102', node=node, args=(msg,)) @check_messages('W0101', 'W0150') def visit_return(self, node): """1 - check is the node has a right sibling (if so, that's some unreachable code) 2 - check is the node is inside the finally clause of a try...finally block """ self._check_unreachable(node) # Is it inside final body of a try...finally bloc ? self._check_not_in_finally(node, 'return', (astng.Function,)) @check_messages('W0101') def visit_continue(self, node): """check is the node has a right sibling (if so, that's some unreachable code) """ self._check_unreachable(node) @check_messages('W0101', 'W0150') def visit_break(self, node): """1 - check is the node has a right sibling (if so, that's some unreachable code) 2 - check is the node is inside the finally clause of a try...finally block """ # 1 - Is it right sibling ? self._check_unreachable(node) # 2 - Is it inside final body of a try...finally bloc ? self._check_not_in_finally(node, 'break', (astng.For, astng.While,)) @check_messages('W0101') def visit_raise(self, node): """check is the node has a right sibling (if so, that's some unreachable code) """ self._check_unreachable(node) @check_messages('W0122') def visit_exec(self, node): """just print a warning on exec statements""" self.add_message('W0122', node=node) @check_messages('W0141', 'W0142') def visit_callfunc(self, node): """visit a CallFunc node -> check if this is not a blacklisted builtin call and check for * or ** use """ if isinstance(node.func, astng.Name): name = node.func.name # ignore the name if it's not a builtin (i.e. not defined in the # locals nor globals scope) if not (name in node.frame() or name in node.root()): if name in self.config.bad_functions: self.add_message('W0141', node=node, args=name) if node.starargs or node.kwargs: scope = node.scope() if isinstance(scope, astng.Function): toprocess = [(n, vn) for (n, vn) in ((node.starargs, scope.args.vararg), (node.kwargs, scope.args.kwarg)) if n] if toprocess: for cfnode, fargname in toprocess[:]: if getattr(cfnode, 'name', None) == fargname: toprocess.remove((cfnode, fargname)) if not toprocess: return # W0142 can be skipped self.add_message('W0142', node=node.func) @check_messages('W0199') def visit_assert(self, node): """check the use of an assert statement on a tuple.""" if node.fail is None and isinstance(node.test, astng.Tuple) and \ len(node.test.elts) == 2: self.add_message('W0199', line=node.fromlineno, node=node) @check_messages('W0109') def visit_dict(self, node): """check duplicate key in dictionary""" keys = set() for k, _ in node.items: if isinstance(k, astng.Const): key = k.value if key in keys: self.add_message('W0109', node=node, args=key) keys.add(key) def visit_tryfinally(self, node): """update try...finally flag""" self._tryfinallys.append(node) def leave_tryfinally(self, node): """update try...finally flag""" self._tryfinallys.pop() def _check_unreachable(self, node): """check unreachable code""" unreach_stmt = node.next_sibling() if unreach_stmt is not None: self.add_message('W0101', node=unreach_stmt) def _check_not_in_finally(self, node, node_name, breaker_classes=()): """check that a node is not inside a finally clause of a try...finally statement. If we found before a try...finally bloc a parent which its type is in breaker_classes, we skip the whole check.""" # if self._tryfinallys is empty, we're not a in try...finally bloc if not self._tryfinallys: return # the node could be a grand-grand...-children of the try...finally _parent = node.parent _node = node while _parent and not isinstance(_parent, breaker_classes): if hasattr(_parent, 'finalbody') and _node in _parent.finalbody: self.add_message('W0150', node=node, args=node_name) return _node = _parent _parent = _node.parent class NameChecker(_BasicChecker): msgs = { 'C0102': ('Black listed name "%s"', 'blacklisted-name', 'Used when the name is listed in the black list (unauthorized \ names).'), 'C0103': ('Invalid name "%s" for type %s (should match %s)', 'invalid-name', 'Used when the name doesn\'t match the regular expression \ associated to its type (constant, variable, class...).'), } options = (('module-rgx', {'default' : MOD_NAME_RGX, 'type' :'regexp', 'metavar' : '<regexp>', 'help' : 'Regular expression which should only match correct ' 'module names'} ), ('const-rgx', {'default' : CONST_NAME_RGX, 'type' :'regexp', 'metavar' : '<regexp>', 'help' : 'Regular expression which should only match correct ' 'module level names'} ), ('class-rgx', {'default' : CLASS_NAME_RGX, 'type' :'regexp', 'metavar' : '<regexp>', 'help' : 'Regular expression which should only match correct ' 'class names'} ), ('function-rgx', {'default' : DEFAULT_NAME_RGX, 'type' :'regexp', 'metavar' : '<regexp>', 'help' : 'Regular expression which should only match correct ' 'function names'} ), ('method-rgx', {'default' : DEFAULT_NAME_RGX, 'type' :'regexp', 'metavar' : '<regexp>', 'help' : 'Regular expression which should only match correct ' 'method names'} ), ('attr-rgx', {'default' : DEFAULT_NAME_RGX, 'type' :'regexp', 'metavar' : '<regexp>', 'help' : 'Regular expression which should only match correct ' 'instance attribute names'} ), ('argument-rgx', {'default' : DEFAULT_NAME_RGX, 'type' :'regexp', 'metavar' : '<regexp>', 'help' : 'Regular expression which should only match correct ' 'argument names'}), ('variable-rgx', {'default' : DEFAULT_NAME_RGX, 'type' :'regexp', 'metavar' : '<regexp>', 'help' : 'Regular expression which should only match correct ' 'variable names'} ), ('inlinevar-rgx', {'default' : COMP_VAR_RGX, 'type' :'regexp', 'metavar' : '<regexp>', 'help' : 'Regular expression which should only match correct ' 'list comprehension / generator expression variable \ names'} ), # XXX use set ('good-names', {'default' : ('i', 'j', 'k', 'ex', 'Run', '_'), 'type' :'csv', 'metavar' : '<names>', 'help' : 'Good variable names which should always be accepted,' ' separated by a comma'} ), ('bad-names', {'default' : ('foo', 'bar', 'baz', 'toto', 'tutu', 'tata'), 'type' :'csv', 'metavar' : '<names>', 'help' : 'Bad variable names which should always be refused, ' 'separated by a comma'} ), ) def open(self): self.stats = self.linter.add_stats(badname_module=0, badname_class=0, badname_function=0, badname_method=0, badname_attr=0, badname_const=0, badname_variable=0, badname_inlinevar=0, badname_argument=0) @check_messages('C0102', 'C0103') def visit_module(self, node): self._check_name('module', node.name.split('.')[-1], node) @check_messages('C0102', 'C0103') def visit_class(self, node): self._check_name('class', node.name, node) for attr, anodes in node.instance_attrs.iteritems(): self._check_name('attr', attr, anodes[0]) @check_messages('C0102', 'C0103') def visit_function(self, node): self._check_name(node.is_method() and 'method' or 'function', node.name, node) # check arguments name args = node.args.args if args is not None: self._recursive_check_names(args, node) @check_messages('C0102', 'C0103') def visit_assname(self, node): """check module level assigned names""" frame = node.frame() ass_type = node.ass_type() if isinstance(ass_type, (astng.Comprehension, astng.Comprehension)): self._check_name('inlinevar', node.name, node) elif isinstance(frame, astng.Module): if isinstance(ass_type, astng.Assign) and not in_loop(ass_type): self._check_name('const', node.name, node) elif isinstance(ass_type, astng.ExceptHandler): self._check_name('variable', node.name, node) elif isinstance(frame, astng.Function): # global introduced variable aren't in the function locals if node.name in frame: self._check_name('variable', node.name, node) def _recursive_check_names(self, args, node): """check names in a possibly recursive list <arg>""" for arg in args: if isinstance(arg, astng.AssName): self._check_name('argument', arg.name, node) else: self._recursive_check_names(arg.elts, node) def _check_name(self, node_type, name, node): """check for a name using the type's regexp""" if is_inside_except(node): clobbering, _ = clobber_in_except(node) if clobbering: return if name in self.config.good_names: return if name in self.config.bad_names: self.stats['badname_' + node_type] += 1 self.add_message('C0102', node=node, args=name) return regexp = getattr(self.config, node_type + '_rgx') if regexp.match(name) is None: type_label = {'inlinedvar': 'inlined variable', 'const': 'constant', 'attr': 'attribute', }.get(node_type, node_type) self.add_message('C0103', node=node, args=(name, type_label, regexp.pattern)) self.stats['badname_' + node_type] += 1 class DocStringChecker(_BasicChecker): msgs = { 'C0111': ('Missing docstring', # W0131 'missing-docstring', 'Used when a module, function, class or method has no docstring.\ Some special methods like __init__ doesn\'t necessary require a \ docstring.'), 'C0112': ('Empty docstring', # W0132 'empty-docstring', 'Used when a module, function, class or method has an empty \ docstring (it would be too easy ;).'), } options = (('no-docstring-rgx', {'default' : NO_REQUIRED_DOC_RGX, 'type' : 'regexp', 'metavar' : '<regexp>', 'help' : 'Regular expression which should only match ' 'functions or classes name which do not require a ' 'docstring'} ), ) def open(self): self.stats = self.linter.add_stats(undocumented_module=0, undocumented_function=0, undocumented_method=0, undocumented_class=0) def visit_module(self, node): self._check_docstring('module', node) def visit_class(self, node): if self.config.no_docstring_rgx.match(node.name) is None: self._check_docstring('class', node) def visit_function(self, node): if self.config.no_docstring_rgx.match(node.name) is None: ftype = node.is_method() and 'method' or 'function' if isinstance(node.parent.frame(), astng.Class): overridden = False # check if node is from a method overridden by its ancestor for ancestor in node.parent.frame().ancestors(): if node.name in ancestor and \ isinstance(ancestor[node.name], astng.Function): overridden = True break if not overridden: self._check_docstring(ftype, node) else: self._check_docstring(ftype, node) def _check_docstring(self, node_type, node): """check the node has a non empty docstring""" docstring = node.doc if docstring is None: self.stats['undocumented_'+node_type] += 1 self.add_message('C0111', node=node) elif not docstring.strip(): self.stats['undocumented_'+node_type] += 1 self.add_message('C0112', node=node) class PassChecker(_BasicChecker): """check is the pass statement is really necessary""" msgs = {'W0107': ('Unnecessary pass statement', 'unnecessary-pass', 'Used when a "pass" statement that can be avoided is ' 'encountered.)'), } def visit_pass(self, node): if len(node.parent.child_sequence(node)) > 1: self.add_message('W0107', node=node) def register(linter): """required method to auto register this checker""" linter.register_checker(BasicErrorChecker(linter)) linter.register_checker(BasicChecker(linter)) linter.register_checker(NameChecker(linter)) linter.register_checker(DocStringChecker(linter)) linter.register_checker(PassChecker(linter))
tkaitchuck/nupic
external/common/lib/python2.6/site-packages/pylint/checkers/base.py
Python
gpl-3.0
34,609
[ "VisIt" ]
e6dc6e547d1e02bef1838e9be72177c1da48e67db9b32973aab703eba66594c6
import pygad as pg import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec import numpy as np from scipy import stats import utils import glob import matplotlib.cm as cm import matplotlib.mlab as mlab from multiprocessing import Pool filename = __file__ n, start, end = 50, -6, -1 bins=np.logspace(start, end, n) def plot(args): halo = args[0] definition = args[1] print args path = '/ptmp/mpa/naab/REFINED/%s/SF_X/4x-2phase/out/snap_%s_4x_???' % (halo, halo) max = int(sorted(glob.glob(path))[-1][-3:]) su, hu, gu = pg.prepare_zoom('/ptmp/mpa/naab/REFINED/%s/SF_X/4x-2phase/out/snap_%s_4x_%s' % (halo, halo, max), gas_trace='/u/mihac/data/%s/4x-2phase/gastrace_%s' % (halo, definition), star_form=None) sb, hb, gb = pg.prepare_zoom('/ptmp/mpa/naab/REFINED/%s/SF_X/4x-2phase/out/snap_%s_4x_%s' % (halo, halo, max), gas_trace='/u/mihac/data/%s/4x-2phase/gastrace_%s' % (halo, 'halo'), star_form=None) ball_IDs = set(sb.gas['ID'][sb.gas['num_recycled'] > -1]) disk_IDs = set(su.gas['ID'][su.gas['num_recycled'] > -1]) ID_overlapp = ball_IDs.intersection(disk_IDs) overlapp_mask = pg.IDMask(list(ID_overlapp)) tracked_ball = sb[overlapp_mask] tracked_disk = su[overlapp_mask] halo_z = tracked_ball.gas['metals_at_infall'] / tracked_ball.gas['mass_at_infall'] disk_z = tracked_disk.gas['metals_at_infall'] / tracked_disk.gas['mass_at_infall'] halo_z_1st_infall = [x[0] for x in halo_z] disk_z_1st_infall = [x[0] for x in disk_z] mass_at_halo_infall, edges_h, count = stats.binned_statistic(halo_z_1st_infall, tracked_ball.gas['mass_at_infall'][:, 0], statistic='sum', bins=bins) mass_at_disc_infall, edges_d, count = stats.binned_statistic(disk_z_1st_infall, tracked_disk.gas['mass_at_infall'][:, 0], statistic='sum', bins=bins) disk_infall_avg, edges_h, count = stats.binned_statistic(halo_z_1st_infall, disk_z_1st_infall, statistic='mean', bins=bins) x, y = bins, bins X, Y = np.meshgrid(x[:-1], y[:-1]) Z = np.histogram2d(disk_z_1st_infall, halo_z_1st_infall, bins=bins)[0] f = plt.figure(figsize=(15, 15)) gs = gridspec.GridSpec(4, 4) ax1 = plt.subplot(gs[:-1, 1:]) ax2 = plt.subplot(gs[:-1, 0]) ax3 = plt.subplot(gs[-1, 1:]) ax1.scatter(halo_z_1st_infall, disk_z_1st_infall, alpha=.1, edgecolor=None) ax1.set_xlabel(r'$z_{halo\ infall}$') ax1.set_ylabel(r'$z_{galaxy\ infall}$') ax1.set_xlim((1e-6, 1e-1)) ax1.set_ylim((1e-4, 1e-1)) ax1.set_xscale('log') ax1.set_yscale('log') ax1.plot([0, 1e10], [0, 1e10], c='r', label='1:1') cont = ax1.contour(X, Y, Z, np.linspace(0, np.percentile(Z, 99), 10)) ax1.step(edges_h[:-1], disk_infall_avg, where='mid', color='k', lw=2.5, label='average') ax1.legend(loc='upper right') ax2.barh(edges_d[:-1], mass_at_disc_infall / 1e9, height=np.diff(edges_d), log=True, align='edge') ax2.set_yscale('log') ax2.set_xscale('linear') ax2_xlim = ax2.get_xlim()[::-1] ax2_xlim[1] = 0 ax2.set_xlim(ax2_xlim) ax2.set_ylim(ax1.get_ylim()) ax2.set_xlabel(r'$Mass\ [10^{9}\ M_\odot]$') ax2.tick_params(labelleft='off') ax3.bar(edges_h[:-1], mass_at_halo_infall / 1e9, width=np.diff(edges_h), align='edge', log=True) ax3.set_xscale('log') ax3.set_yscale('linear') ax3_ylim = ax3.get_ylim()[::-1] ax3_ylim[1] = 0 ax3.set_ylim(ax3_ylim) ax3.set_xlim(ax1.get_xlim()) ax3.set_ylabel(r'$Mass\ [10^{9}\ M_\odot]$') ax3.tick_params(labelbottom='off') f.tight_layout() plt.subplots_adjust(top=0.93) f.suptitle('%s - %s' % (halo, definition), fontsize=32) plt.savefig(filename.split("/")[-1][:-3] + '_' + halo + '_' + definition + ".png", bbox_inches='tight') p = Pool(8) p.map(plot, utils.combinations)
Migelo/mpa_garching
1/z_gal_vs_z_halo.py
Python
mit
3,868
[ "Galaxy" ]
3058496d2e5a52c154e2b8e80c61dc3752f38a9baee1bbf00df16072ee9718c8
#!/usr/bin/env python """ dirac-rss-query-dtcache Select/Add/Delete a new DownTime entry for a given Site or Service. Usage: dirac-rss-query-dtcache [option] <query> Queries: [select|add|delete] Options: --downtimeID= The ID of the downtime --element= Element (Site, Service) affected by the downtime --name= Name of the element --startDate= Starting date of the downtime --endDate= Ending date of the downtime --severity= Severity of the downtime (Warning, Outage) --description= Description of the downtime --link= URL of the downtime announcement --ongoing To force "select" to return the ongoing downtimes Verbosity: -o LogLevel=LEVEL NOTICE by default, levels available: INFO, DEBUG, VERBOSE.. """ from DIRAC import gConfig, gLogger, exit as DIRACExit, S_OK, version from DIRAC.Core.Base import Script from DIRAC.ResourceStatusSystem.Client import ResourceManagementClient from DIRAC.Core.Utilities import Time from DIRAC.Core.Utilities.PrettyPrint import printTable import re import datetime from DIRAC.ResourceStatusSystem.Utilities import Utils ResourceManagementClient = getattr(Utils.voimport( 'DIRAC.ResourceStatusSystem.Client.ResourceManagementClient' ),'ResourceManagementClient') __RCSID__ = '$Id:$' subLogger = None switchDict = {} def registerSwitches(): ''' Registers all switches that can be used while calling the script from the command line interface. ''' switches = ( ( 'downtimeID=', 'ID of the downtime' ), ( 'element=', 'Element (Site, Service) affected by the downtime' ), ( 'name=', 'Name of the element' ), ( 'startDate=', 'Starting date of the downtime' ), ( 'endDate=', 'Ending date of the downtime' ), ( 'severity=', 'Severity of the downtime (Warning, Outage)' ), ( 'description=', 'Description of the downtime' ), ( 'link=', 'URL of the downtime announcement' ), ( 'ongoing', 'To force "select" to return the ongoing downtimes' ) ) for switch in switches: Script.registerSwitch( '', switch[ 0 ], switch[ 1 ] ) def registerUsageMessage(): ''' Takes the script __doc__ and adds the DIRAC version to it ''' usageMessage = 'DIRAC version: %s \n' % version usageMessage += __doc__ Script.setUsageMessage( usageMessage ) def parseSwitches(): ''' Parses the arguments passed by the user ''' Script.parseCommandLine( ignoreErrors = True ) args = Script.getPositionalArgs() if len( args ) == 0: error( "Missing mandatory 'query' argument" ) elif not args[0].lower() in ( 'select', 'add', 'delete' ): error( "Missing mandatory argument" ) else: query = args[0].lower() switches = dict( Script.getUnprocessedSwitches() ) # Default values switches.setdefault( 'downtimeID', None ) switches.setdefault( 'element', None ) switches.setdefault( 'name', None ) switches.setdefault( 'startDate', None ) switches.setdefault( 'endDate', None ) switches.setdefault( 'severity', None ) switches.setdefault( 'description', None ) switches.setdefault( 'link', None ) if query in ( 'add', 'delete' ) and switches['downtimeID'] is None: error( "'downtimeID' switch is mandatory for '%s' but found missing" % query ) if query in ( 'add', 'delete' ) and 'ongoing' in switches: error( "'ongoing' switch can be used only with 'select'" ) subLogger.debug( "The switches used are:" ) map( subLogger.debug, switches.iteritems() ) return ( args, switches ) #............................................................................... # UTILS: for filtering 'select' output def filterDate( selectOutput, start, end ): ''' Selects all the downtimes that meet the constraints of 'start' and 'end' dates ''' downtimes = selectOutput downtimesFiltered = [] if start is not None: try: start = Time.fromString( start ) except: error( "datetime formt is incorrect, pls try [%Y-%m-%d[ %H:%M:%S]]" ) start = Time.toEpoch( start ) if end is not None: try: end = Time.fromString( end ) except: error( "datetime formt is incorrect, pls try [%Y-%m-%d[ %H:%M:%S]]" ) end = Time.toEpoch( end ) if start is not None and end is not None: for dt in downtimes: dtStart = Time.toEpoch( dt[ 'startDate' ] ) dtEnd = Time.toEpoch( dt[ 'endDate' ] ) if ( dtStart >= start ) and ( dtEnd <= end ): downtimesFiltered.append( dt ) elif start is not None and end is None: for dt in downtimes: dtStart = Time.toEpoch( dt[ 'startDate' ] ) if dtStart >= start: downtimesFiltered.append( dt ) elif start is None and end is not None: for dt in downtimes: dtEnd = Time.toEpoch( dt[ 'endDate' ] ) if dtEnd <= end: downtimesFiltered.append( dt ) else: downtimesFiltered = downtimes return downtimesFiltered def filterOngoing( selectOutput ): ''' Selects all the ongoing downtimes ''' downtimes = selectOutput downtimesFiltered = [] currentDate = Time.toEpoch( Time.dateTime() ) for dt in downtimes: dtStart = Time.toEpoch( dt[ 'startDate' ] ) dtEnd = Time.toEpoch( dt[ 'endDate' ] ) if ( dtStart <= currentDate ) and ( dtEnd >= currentDate ): downtimesFiltered.append( dt ) return downtimesFiltered def filterDescription( selectOutput, description ): ''' Selects all the downtimes that match 'description' ''' downtimes = selectOutput downtimesFiltered = [] if description is not None: for dt in downtimes: if description in dt[ 'description' ]: downtimesFiltered.append( dt ) else: downtimesFiltered = downtimes return downtimesFiltered #............................................................................... # Utils: for formatting query output and notifications def error( msg ): ''' Format error messages ''' subLogger.error( "\nERROR:" ) subLogger.error( "\t" + msg ) subLogger.error( "\tPlease, check documentation below" ) Script.showHelp() DIRACExit( 1 ) def confirm( query, matches ): ''' Format confirmation messages ''' subLogger.notice( "\nNOTICE: '%s' request successfully executed ( matches' number: %s )! \n" % ( query, matches ) ) def tabularPrint( table ): columns_names = table[0].keys() records = [] for row in table: record = [] for k,v in row.items(): if type( v ) == datetime.datetime: record.append( Time.toString( v ) ) elif v is None: record.append( '' ) else: record.append( v ) records.append( record ) output = printTable( columns_names, records, numbering = False, columnSeparator = " | ", printOut = False ) subLogger.notice( output ) #............................................................................... def select( switchDict ): ''' Given the switches, request a query 'select' on the ResourceManagementDB that gets from DowntimeCache all rows that match the parameters given. ''' rmsClient = ResourceManagementClient() meta = { 'columns' : [ 'downtimeID', 'element', 'name', 'startDate', 'endDate', 'severity', 'description', 'link', 'dateEffective' ] } result = { 'output': None, 'successful': None, 'message': None, 'match': None } output = rmsClient.selectDowntimeCache( downtimeID = switchDict[ 'downtimeID' ], element = switchDict[ 'element' ], name = switchDict[ 'name' ], #startDate = switchDict[ 'startDate' ], #endDate = switchDict[ 'endDate' ], severity = switchDict[ 'severity' ], #description = switchDict[ 'description' ], #link = switchDict[ 'link' ], #dateEffective = switchDict[ 'dateEffective' ], meta = meta ) result['output'] = [ dict( zip( output[ 'Columns' ], dt ) ) for dt in output[ 'Value' ] ] if 'ongoing' in switchDict: result['output'] = filterOngoing( result['output'] ) else: result['output'] = filterDate( result['output'], switchDict[ 'startDate' ], switchDict[ 'endDate' ] ) result['output'] = filterDescription( result['output'], switchDict[ 'description' ] ) result['match'] = len( result['output'] ) result['successful'] = output['OK'] result['message'] = output['Message'] if 'Message' in output else None return result def add( switchDict ): ''' Given the switches, request a query 'addOrModify' on the ResourceManagementDB that inserts or updates-if-duplicated from DowntimeCache. ''' rmsClient = ResourceManagementClient() result = { 'output': None, 'successful': None, 'message': None, 'match': None } output = rmsClient.addOrModifyDowntimeCache( downtimeID = switchDict[ 'downtimeID' ], element = switchDict[ 'element' ], name = switchDict[ 'name' ], startDate = switchDict[ 'startDate' ], endDate = switchDict[ 'endDate' ], severity = switchDict[ 'severity' ], description = switchDict[ 'description' ], link = switchDict[ 'link' ] #dateEffective = switchDict[ 'dateEffective' ] ) result['match'] = int( output['Value'] ) result['successful'] = output['OK'] result['message'] = output['Message'] if 'Message' in output else None return result def delete( switchDict ): ''' Given the switches, request a query 'delete' on the ResourceManagementDB that deletes from DowntimeCache all rows that match the parameters given. ''' rmsClient = ResourceManagementClient() result = { 'output': None, 'successful': None, 'message': None, 'match': None } output = rmsClient.deleteDowntimeCache( downtimeID = switchDict[ 'downtimeID' ], element = switchDict[ 'element' ], name = switchDict[ 'name' ], startDate = switchDict[ 'startDate' ], endDate = switchDict[ 'endDate' ], severity = switchDict[ 'severity' ], description = switchDict[ 'description' ], link = switchDict[ 'link' ] #dateEffective = switchDict[ 'dateEffective' ] ) result['match'] = int( output['Value'] ) result['successful'] = output['OK'] result['message'] = output['Message'] if 'Message' in output else None return result #............................................................................... def run( args, switchDict ): ''' Main function of the script ''' query = args[0] # it exectues the query request: e.g. if it's a 'select' it executes 'select()' # the same if it is add, delete result = eval( query + '( switchDict )' ) if result[ 'successful' ]: if query == 'select' and result['match'] > 0: tabularPrint( result[ 'output' ] ) confirm( query, result['match'] ) else: error( result[ 'message' ] ) #............................................................................... if __name__ == "__main__": subLogger = gLogger.getSubLogger( __file__ ) #Script initialization registerSwitches() registerUsageMessage() args, switchDict = parseSwitches() #Run script run( args, switchDict ) #Bye DIRACExit( 0 ) ################################################################################ #EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
vmendez/DIRAC
ResourceStatusSystem/scripts/dirac-rss-query-dtcache.py
Python
gpl-3.0
12,482
[ "DIRAC" ]
19428c2d8f748128e1564e4f08ba2fad2146e89ec73a08b290654eb399744349
#!/usr/bin/env python # # Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Convert Android xml resources to API 14 compatible. There are two reasons that we cannot just use API 17 attributes, so we are generating another set of resources by this script. 1. paddingStart attribute can cause a crash on Galaxy Tab 2. 2. There is a bug that paddingStart does not override paddingLeft on JB-MR1. This is fixed on JB-MR2. b/8654490 Therefore, this resource generation script can be removed when we drop the support for JB-MR1. Please refer to http://crbug.com/235118 for the details. """ import codecs import optparse import os import re import shutil import sys import xml.dom.minidom as minidom from util import build_utils # Note that we are assuming 'android:' is an alias of # the namespace 'http://schemas.android.com/apk/res/android'. GRAVITY_ATTRIBUTES = ('android:gravity', 'android:layout_gravity') # Almost all the attributes that has "Start" or "End" in # its name should be mapped. ATTRIBUTES_TO_MAP = {'paddingStart' : 'paddingLeft', 'drawableStart' : 'drawableLeft', 'layout_alignStart' : 'layout_alignLeft', 'layout_marginStart' : 'layout_marginLeft', 'layout_alignParentStart' : 'layout_alignParentLeft', 'layout_toStartOf' : 'layout_toLeftOf', 'paddingEnd' : 'paddingRight', 'drawableEnd' : 'drawableRight', 'layout_alignEnd' : 'layout_alignRight', 'layout_marginEnd' : 'layout_marginRight', 'layout_alignParentEnd' : 'layout_alignParentRight', 'layout_toEndOf' : 'layout_toRightOf'} ATTRIBUTES_TO_MAP = dict(['android:' + k, 'android:' + v] for k, v in ATTRIBUTES_TO_MAP.iteritems()) ATTRIBUTES_TO_MAP_REVERSED = dict([v, k] for k, v in ATTRIBUTES_TO_MAP.iteritems()) def IterateXmlElements(node): """minidom helper function that iterates all the element nodes. Iteration order is pre-order depth-first.""" if node.nodeType == node.ELEMENT_NODE: yield node for child_node in node.childNodes: for child_node_element in IterateXmlElements(child_node): yield child_node_element def ParseAndReportErrors(filename): try: return minidom.parse(filename) except Exception: # pylint: disable=broad-except import traceback traceback.print_exc() sys.stderr.write('Failed to parse XML file: %s\n' % filename) sys.exit(1) def AssertNotDeprecatedAttribute(name, value, filename): """Raises an exception if the given attribute is deprecated.""" msg = None if name in ATTRIBUTES_TO_MAP_REVERSED: msg = '{0} should use {1} instead of {2}'.format(filename, ATTRIBUTES_TO_MAP_REVERSED[name], name) elif name in GRAVITY_ATTRIBUTES and ('left' in value or 'right' in value): msg = '{0} should use start/end instead of left/right for {1}'.format( filename, name) if msg: msg += ('\nFor background, see: http://android-developers.blogspot.com/' '2013/03/native-rtl-support-in-android-42.html\n' 'If you have a legitimate need for this attribute, discuss with ' 'kkimlabs@chromium.org or newt@chromium.org') raise Exception(msg) def WriteDomToFile(dom, filename): """Write the given dom to filename.""" build_utils.MakeDirectory(os.path.dirname(filename)) with codecs.open(filename, 'w', 'utf-8') as f: dom.writexml(f, '', ' ', '\n', encoding='utf-8') def HasStyleResource(dom): """Return True if the dom is a style resource, False otherwise.""" root_node = IterateXmlElements(dom).next() return bool(root_node.nodeName == 'resources' and list(root_node.getElementsByTagName('style'))) def ErrorIfStyleResourceExistsInDir(input_dir): """If a style resource is in input_dir, raises an exception.""" for input_filename in build_utils.FindInDirectory(input_dir, '*.xml'): dom = ParseAndReportErrors(input_filename) if HasStyleResource(dom): # Allow style file in third_party to exist in non-v17 directories so long # as they do not contain deprecated attributes. if not 'third_party' in input_dir or ( GenerateV14StyleResourceDom(dom, input_filename)): raise Exception('error: style file ' + input_filename + ' should be under ' + input_dir + '-v17 directory. Please refer to ' 'http://crbug.com/243952 for the details.') def GenerateV14LayoutResourceDom(dom, filename, assert_not_deprecated=True): """Convert layout resource to API 14 compatible layout resource. Args: dom: Parsed minidom object to be modified. filename: Filename that the DOM was parsed from. assert_not_deprecated: Whether deprecated attributes (e.g. paddingLeft) will cause an exception to be thrown. Returns: True if dom is modified, False otherwise. """ is_modified = False # Iterate all the elements' attributes to find attributes to convert. for element in IterateXmlElements(dom): for name, value in list(element.attributes.items()): # Convert any API 17 Start/End attributes to Left/Right attributes. # For example, from paddingStart="10dp" to paddingLeft="10dp" # Note: gravity attributes are not necessary to convert because # start/end values are backward-compatible. Explained at # https://plus.sandbox.google.com/+RomanNurik/posts/huuJd8iVVXY?e=Showroom if name in ATTRIBUTES_TO_MAP: element.setAttribute(ATTRIBUTES_TO_MAP[name], value) del element.attributes[name] is_modified = True elif assert_not_deprecated: AssertNotDeprecatedAttribute(name, value, filename) return is_modified def GenerateV14StyleResourceDom(dom, filename, assert_not_deprecated=True): """Convert style resource to API 14 compatible style resource. Args: dom: Parsed minidom object to be modified. filename: Filename that the DOM was parsed from. assert_not_deprecated: Whether deprecated attributes (e.g. paddingLeft) will cause an exception to be thrown. Returns: True if dom is modified, False otherwise. """ is_modified = False for style_element in dom.getElementsByTagName('style'): for item_element in style_element.getElementsByTagName('item'): name = item_element.attributes['name'].value value = item_element.childNodes[0].nodeValue if name in ATTRIBUTES_TO_MAP: item_element.attributes['name'].value = ATTRIBUTES_TO_MAP[name] is_modified = True elif assert_not_deprecated: AssertNotDeprecatedAttribute(name, value, filename) return is_modified def GenerateV14LayoutResource(input_filename, output_v14_filename, output_v17_filename): """Convert API 17 layout resource to API 14 compatible layout resource. It's mostly a simple replacement, s/Start/Left s/End/Right, on the attribute names. If the generated resource is identical to the original resource, don't do anything. If not, write the generated resource to output_v14_filename, and copy the original resource to output_v17_filename. """ dom = ParseAndReportErrors(input_filename) is_modified = GenerateV14LayoutResourceDom(dom, input_filename) if is_modified: # Write the generated resource. WriteDomToFile(dom, output_v14_filename) # Copy the original resource. build_utils.MakeDirectory(os.path.dirname(output_v17_filename)) shutil.copy2(input_filename, output_v17_filename) def GenerateV14StyleResource(input_filename, output_v14_filename): """Convert API 17 style resources to API 14 compatible style resource. Write the generated style resource to output_v14_filename. It's mostly a simple replacement, s/Start/Left s/End/Right, on the attribute names. """ dom = ParseAndReportErrors(input_filename) GenerateV14StyleResourceDom(dom, input_filename) # Write the generated resource. WriteDomToFile(dom, output_v14_filename) def GenerateV14LayoutResourcesInDir(input_dir, output_v14_dir, output_v17_dir): """Convert layout resources to API 14 compatible resources in input_dir.""" for input_filename in build_utils.FindInDirectory(input_dir, '*.xml'): rel_filename = os.path.relpath(input_filename, input_dir) output_v14_filename = os.path.join(output_v14_dir, rel_filename) output_v17_filename = os.path.join(output_v17_dir, rel_filename) GenerateV14LayoutResource(input_filename, output_v14_filename, output_v17_filename) def GenerateV14StyleResourcesInDir(input_dir, output_v14_dir): """Convert style resources to API 14 compatible resources in input_dir.""" for input_filename in build_utils.FindInDirectory(input_dir, '*.xml'): rel_filename = os.path.relpath(input_filename, input_dir) output_v14_filename = os.path.join(output_v14_dir, rel_filename) GenerateV14StyleResource(input_filename, output_v14_filename) def ParseArgs(): """Parses command line options. Returns: An options object as from optparse.OptionsParser.parse_args() """ parser = optparse.OptionParser() parser.add_option('--res-dir', help='directory containing resources ' 'used to generate v14 compatible resources') parser.add_option('--res-v14-compatibility-dir', help='output directory into which ' 'v14 compatible resources will be generated') parser.add_option('--stamp', help='File to touch on success') options, args = parser.parse_args() if args: parser.error('No positional arguments should be given.') # Check that required options have been provided. required_options = ('res_dir', 'res_v14_compatibility_dir') build_utils.CheckOptions(options, parser, required=required_options) return options def GenerateV14Resources(res_dir, res_v14_dir): for name in os.listdir(res_dir): if not os.path.isdir(os.path.join(res_dir, name)): continue dir_pieces = name.split('-') resource_type = dir_pieces[0] qualifiers = dir_pieces[1:] api_level_qualifier_index = -1 api_level_qualifier = '' for index, qualifier in enumerate(qualifiers): if re.match('v[0-9]+$', qualifier): api_level_qualifier_index = index api_level_qualifier = qualifier break # Android pre-v17 API doesn't support RTL. Skip. if 'ldrtl' in qualifiers: continue input_dir = os.path.abspath(os.path.join(res_dir, name)) # We also need to copy the original v17 resource to *-v17 directory # because the generated v14 resource will hide the original resource. output_v14_dir = os.path.join(res_v14_dir, name) output_v17_dir = os.path.join(res_v14_dir, name + '-v17') # We only convert layout resources under layout*/, xml*/, # and style resources under values*/. if resource_type in ('layout', 'xml'): if not api_level_qualifier: GenerateV14LayoutResourcesInDir(input_dir, output_v14_dir, output_v17_dir) elif resource_type == 'values': if api_level_qualifier == 'v17': output_qualifiers = qualifiers[:] del output_qualifiers[api_level_qualifier_index] output_v14_dir = os.path.join(res_v14_dir, '-'.join([resource_type] + output_qualifiers)) GenerateV14StyleResourcesInDir(input_dir, output_v14_dir) elif not api_level_qualifier: ErrorIfStyleResourceExistsInDir(input_dir) def main(): options = ParseArgs() res_v14_dir = options.res_v14_compatibility_dir build_utils.DeleteDirectory(res_v14_dir) build_utils.MakeDirectory(res_v14_dir) GenerateV14Resources(options.res_dir, res_v14_dir) if options.stamp: build_utils.Touch(options.stamp) if __name__ == '__main__': sys.exit(main())
Teamxrtc/webrtc-streaming-node
third_party/webrtc/src/chromium/src/build/android/gyp/generate_v14_compatible_resources.py
Python
mit
12,231
[ "Galaxy" ]
3bb4f5ac1b7b12927c56e94f23c8d0f6cd43f7d393a4843bbe9cc0e9720d8040
# BREADTH-FIRST-SEARCH # Visit an initial vertex, and then visit every one of its # neighbours. Only when every neighbour vertex has been # visited then move to a neighbour of a neighbour. Each time # the seach comes to a new vertex it visits all of its # neighbours before visiting any that are further from the # start # The algorithm # ------------------------------------------------------ # create queue # add v to the back of the queue # ITERATE while queue is not empty # remove u from the front of the queue # set the colour of the u to black # ITERATE over all w that are neighbours of u # IF the colour of w is white # set the colour of w to grey # add w to the back of the queue # ------------------------------------------------------ def bfs(v, g): queue = [] queue.append(v) while len(queue) > 0: u = queue.pop(0) g[u]['colour'] = 'black' for w in g[u]['neighbours']: if g[w]['color'] is 'white': g[w]['colour'] = 'grey' queue.append(w) # The idea is that we keep a todo list of vertices that we are # aware of but have not visited yet. The list is a queue so # the next vertex to visit, will always be at the front of # the queue. As in DFS, we colour the vercies but in BFS # there are three colours: # - black for a vertex that has been visited # - grey for a vertex waiting to be visitied # - white for a vertex we have not reached yet # While the todo list is not empty, we repeat these steps: # - Remove the vertex at the front of the queue # - Locate its neighbours. Those coloured white are # coloured grey and added to the back of the queue def chatty_bfs(vertex, graph): g = graph v = vertex queue = [] queue.append(v) while len(queue) > 0: u = queue.pop(0) g[u]['colour'] = 'black' print( "Visited: ", u ) for w in g[u]['neighbours']: if g[w]['colour'] is 'white': g[w]['colour'] = 'grey' queue.append(w) return g graph1 = { 1 : { 'colour' : 'white', 'neighbours' : [2, 3, 4] }, 2 : { 'colour' : 'white', 'neighbours' : [1, 4, 5] }, 3 : { 'colour' : 'white', 'neighbours' : [1, 4] }, 4 : { 'colour' : 'white', 'neighbours' : [1, 2, 3] }, 5 : { 'colour' : 'white', 'neighbours' : [2]} } print( graph1 ) print( chatty_bfs(3, graph1) )
melayev/algods
bfs.py
Python
mit
2,241
[ "VisIt" ]
e4a6e0b896a2119577596ecdcd21931b202f922ea505295b681a26567e6b9c31
# # Gramps - a GTK+/GNOME based genealogy program # # Copyright (C) 2007 Douglas S. Blank # Copyright (C) 2000-2007 Donald N. Allingham # Copyright (C) 2008 Raphael Ackerman # Copyright (C) 2008 Brian G. Matherly # Copyright (C) 2011 Tim G L Lyons # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # # $Id$ "Import from CSV Spreadsheet" #------------------------------------------------------------------------- # # Standard Python Modules # #------------------------------------------------------------------------- import time import csv import codecs #------------------------------------------------------------------------ # # Set up logging # #------------------------------------------------------------------------ import logging LOG = logging.getLogger(".ImportCSV") #------------------------------------------------------------------------- # # GRAMPS modules # #------------------------------------------------------------------------- from gramps.gen.ggettext import sgettext as _ from gramps.gen.ggettext import ngettext from gramps.gen.lib import ChildRef, Citation, Event, EventRef, EventType, Family, FamilyRelType, Name, NameType, Note, NoteType, Person, Place, Source, Surname from gramps.gen.db import DbTxn from gramps.gen.plug.utils import OpenFileOrStdin from gramps.gen.datehandler import parser as _dp from gramps.gen.utils.string import gender as gender_map from gramps.gen.utils.id import create_id from gramps.gui.utils import ProgressMeter from gramps.gen.lib.eventroletype import EventRoleType #------------------------------------------------------------------------- # # Support Functions # #------------------------------------------------------------------------- def get_primary_event_ref_from_type(dbase, person, event_name): """ >>> get_primary_event_ref_from_type(dbase, Person(), "Baptism"): """ for ref in person.event_ref_list: if ref.get_role() == EventRoleType.PRIMARY: event = dbase.get_event_from_handle(ref.ref) if event and event.type.is_type(event_name): return ref return None #------------------------------------------------------------------------- # # Encoding support for CSV, from http://docs.python.org/lib/csv-examples.html # #------------------------------------------------------------------------- class UTF8Recoder(object): """ Iterator that reads an encoded stream and reencodes the input to UTF-8 """ def __init__(self, stream, encoding): self.reader = codecs.getreader(encoding)(stream) def __iter__(self): return self def next(self): "Encode the next line of the file." return self.reader.next().encode("utf-8") class UnicodeReader(object): """ A CSV reader which will iterate over lines in the CSV file, which is encoded in the given encoding. """ def __init__(self, csvfile, encoding="utf-8", **kwds): self.first_row = True csvfile = UTF8Recoder(csvfile, encoding) self.reader = csv.reader(csvfile, **kwds) def next(self): "Read the next line of the file." row = self.reader.next() rowlist = [unicode(s, "utf-8") for s in row] # Add check for Byte Order Mark (Windows, Notepad probably): if self.first_row: if len(rowlist) > 0 and rowlist[0].startswith(u"\ufeff"): rowlist[0] = rowlist[0][1:] self.first_row = False return rowlist def __iter__(self): return self #------------------------------------------------------------------------- # # Support and main functions # #------------------------------------------------------------------------- def rd(line_number, row, col, key, default = None): """ Return Row data by column name """ if key in col: if col[key] >= len(row): LOG.warn("missing '%s, on line %d" % (key, line_number)) return default retval = row[col[key]].strip() if retval == "": return default else: return retval else: return default def importData(dbase, filename, user): """Function called by Gramps to import data on persons in CSV format.""" parser = CSVParser(dbase, user) try: with OpenFileOrStdin(filename, 'b') as filehandle: parser.parse(filehandle) except EnvironmentError, err: user.notify_error(_("%s could not be opened\n") % filename, str(err)) return return None # This module doesn't provide info about what got imported. #------------------------------------------------------------------------- # # CSV Parser # #------------------------------------------------------------------------- class CSVParser(object): """Class to read data in CSV format from a file object.""" def __init__(self, dbase, user): self.db = dbase self.user = user self.trans = None self.lineno = 0 self.index = 0 self.fam_count = 0 self.indi_count = 0 self.pref = {} # person ref, internal to this sheet self.fref = {} # family ref, internal to this sheet column2label = { "surname": ("Lastname", "Surname", _("Surname"), "lastname", "last_name", "surname", _("surname")), "firstname": ("Firstname", "Given name", _("Given name"), "Given", _("Given"), "firstname", "first_name", "given_name", "given name", _("given name"), "given", _("given")), "callname": ("Callname", "Call name", _("Call name"), "Call", _("Call"), "callname", "call_name", "call name", "call", _("call")), "title": ("Title", _("Person|Title"), "title", _("Person|title")), "prefix": ("Prefix", _("Prefix"), "prefix", _("prefix")), "suffix": ("Suffix", _("Suffix"), "suffix", _("suffix")), "gender": ("Gender", _("Gender"), "gender", _("gender")), "source": ("Source", _("Source"), "source", _("source")), "note": ("Note", _("Note"), "note", _("note")), "birthplace": ("Birthplace", "Birth place", _("Birth place"), "birthplace", "birth_place", "birth place", _("birth place")), "birthdate": ("Birthdate", "Birth date", _("Birth date"), "birthdate", "birth_date", "birth date", _("birth date")), "birthsource": ("Birthsource", "Birth source", _("Birth source"), "birthsource", "birth_source", "birth source", _("birth source")), "baptismplace": ("Baptismplace", "Baptism place", _("Baptism place"), "baptismplace", "baptism place", _("baptism place")), "baptismdate": ("Baptismdate", "Baptism date", _("Baptism date"), "baptismdate", "baptism date", _("baptism date")), "baptismsource": ("Baptismsource", "Baptism source", _("Baptism source"), "baptismsource", "baptism source", _("baptism source")), "burialplace": ("Burialplace", "Burial place", _("Burial place"), "burialplace", "burial place", _("burial place")), "burialdate": ("Burialdate", "Burial date", _("Burial date"), "burialdate", "burial date", _("burial date")), "burialsource": ("Burialsource", "Burial source", _("Burial source"), "burialsource", "burial source", _("burial source")), "deathplace": ("Deathplace", "Death place", _("Death place"), "deathplace", "death_place", "death place", _("death place")), "deathdate": ("Deathdate", "Death date", _("Death date"), "deathdate", "death_date", "death date", _("death date")), "deathsource": ("Deathsource", "Death source", _("Death source"), "deathsource", "death_source", "death source", _("death source")), "deathcause": ("Deathcause", "Death cause", _("Death cause"), "deathcause", "death_cause", "death cause", _("death cause")), "grampsid": ("Grampsid", "ID", "Gramps id", _("Gramps ID"), "grampsid", "id", "gramps_id", "gramps id", _("Gramps id")), "person": ("Person", _("Person"), "person", _("person")), # ---------------------------------- "child": ("Child", _("Child"), "child", _("child")), "family": ("Family", _("Family"), "family", _("family")), # ---------------------------------- "wife": ("Mother", _("Mother"), "Wife", _("Wife"), "Parent2", _("Parent2"), "mother", _("mother"), "wife", _("wife"), "parent2", _("parent2")), "husband": ("Father", _("Father"), "Husband", _("Husband"), "Parent1", _("Parent1"), "father", _("father"), "husband", _("husband"), "parent1", _("parent1")), "marriage": ("Marriage", _("Marriage"), "marriage", _("marriage")), "date": ("Date", _("Date"), "date", _("date")), "place": ("Place", _("Place"), "place", _("place")), } lab2col_dict = [] for key in column2label.keys(): for val in column2label[key]: lab2col_dict.append((val, key)) self.label2column = dict(lab2col_dict) def cleanup_column_name(self, column): """Handle column aliases for CSV spreadsheet import and SQL.""" return self.label2column.get(column, column) def read_csv(self, filehandle): "Read the data from the file and return it as a list." reader = UnicodeReader(filehandle) try: data = [[r.strip() for r in row] for row in reader] except csv.Error, err: self.user.notify_error(_('format error: line %(line)d: %(zero)s') % { 'line' : reader.reader.line_num, 'zero' : err } ) return None return data def lookup(self, type_, id_): """ Return the object of type type_ with id id_ from db or previously stored value. """ if id_ is None: return None if type_ == "family": if id_.startswith("[") and id_.endswith("]"): id_ = self.db.fid2user_format(id_[1:-1]) db_lookup = self.db.get_family_from_gramps_id(id_) if db_lookup is None: return self.lookup(type_, id_) else: return db_lookup elif id_.lower() in self.fref: return self.fref[id_.lower()] else: return None elif type_ == "person": if id_.startswith("[") and id_.endswith("]"): id_ = self.db.id2user_format(id_[1:-1]) db_lookup = self.db.get_person_from_gramps_id(id_) if db_lookup is None: return self.lookup(type_, id_) else: return db_lookup elif id_.lower() in self.pref: return self.pref[id_.lower()] else: return None else: LOG.warn("invalid lookup type in CSV import: '%s'" % type_) return None def storeup(self, type_, id_, object_): "Store object object_ of type type_ in a dictionary under key id_." if id_.startswith("[") and id_.endswith("]"): id_ = id_[1:-1] #return # do not store gramps people; go look them up if type_ == "person": id_ = self.db.id2user_format(id_) self.pref[id_.lower()] = object_ elif type_ == "family": id_ = self.db.fid2user_format(id_) self.fref[id_.lower()] = object_ else: LOG.warn("invalid storeup type in CSV import: '%s'" % type_) def parse(self, filehandle): """ Prepare the database and parse the input file. :param filehandle: open file handle positioned at start of the file """ data = self.read_csv(filehandle) progress = ProgressMeter(_('CSV Import')) progress.set_pass(_('Reading data...'), 1) progress.set_pass(_('Importing data...'), len(data)) tym = time.time() self.db.disable_signals() with DbTxn(_("CSV import"), self.db, batch=True) as self.trans: self._parse_csv_data(data, progress) self.db.enable_signals() self.db.request_rebuild() tym = time.time() - tym msg = ngettext('Import Complete: %d second', 'Import Complete: %d seconds', tym ) % tym LOG.debug(msg) LOG.debug("New Families: %d" % self.fam_count) LOG.debug("New Individuals: %d" % self.indi_count) progress.close() def _parse_csv_data(self, data, progress=None): """Parse each line of the input data and act accordingly.""" self.lineno = 0 self.index = 0 self.fam_count = 0 self.indi_count = 0 self.pref = {} # person ref, internal to this sheet self.fref = {} # family ref, internal to this sheet header = None line_number = 0 for row in data: if progress is not None: progress.step() line_number += 1 if "".join(row) == "": # no blanks are allowed inside a table header = None # clear headers, ready for next "table" continue ###################################### if header is None: header = [self.cleanup_column_name(r) for r in row] col = {} count = 0 for key in header: col[key] = count count += 1 continue # three different kinds of data: person, family, and marriage if (("marriage" in header) or ("husband" in header) or ("wife" in header)): self._parse_marriage(line_number, row, col) elif "family" in header: self._parse_family(line_number, row, col) elif "surname" in header: self._parse_person(line_number, row, col) else: LOG.warn("ignoring line %d" % line_number) return None def _parse_marriage(self, line_number, row, col): "Parse the content of a Marriage,Husband,Wife line." marriage_ref = rd(line_number, row, col, "marriage") husband = rd(line_number, row, col, "husband") wife = rd(line_number, row, col, "wife") marriagedate = rd(line_number, row, col, "date") marriageplace = rd(line_number, row, col, "place") marriagesource = rd(line_number, row, col, "source") note = rd(line_number, row, col, "note") wife = self.lookup("person", wife) husband = self.lookup("person", husband) if husband is None and wife is None: # might have children, so go ahead and add LOG.warn("no parents on line %d; adding family anyway" % line_number) family = self.get_or_create_family(marriage_ref, husband, wife) # adjust gender, if not already provided if husband: # this is just a guess, if unknown if husband.get_gender() == Person.UNKNOWN: husband.set_gender(Person.MALE) self.db.commit_person(husband, self.trans) if wife: # this is just a guess, if unknown if wife.get_gender() == Person.UNKNOWN: wife.set_gender(Person.FEMALE) self.db.commit_person(wife, self.trans) if marriage_ref: self.storeup("family", marriage_ref.lower(), family) if marriagesource: # add, if new new, marriagesource = self.get_or_create_source(marriagesource) if marriageplace: # add, if new new, marriageplace = self.get_or_create_place(marriageplace) if marriagedate: marriagedate = _dp.parse(marriagedate) if marriagedate or marriageplace or marriagesource or note: # add, if new; replace, if different new, marriage = self.get_or_create_event(family, EventType.MARRIAGE, marriagedate, marriageplace, marriagesource) if new: mar_ref = EventRef() mar_ref.set_reference_handle(marriage.get_handle()) family.add_event_ref(mar_ref) self.db.commit_family(family, self.trans) # only add note to event: # append notes, if previous notes if note: previous_notes_list = marriage.get_note_list() updated_note = False for note_handle in previous_notes_list: previous_note = self.db.get_note_from_handle( note_handle) if previous_note.type == NoteType.EVENT: previous_text = previous_note.get() if note not in previous_text: note = previous_text + "\n" + note previous_note.set(note) self.db.commit_note(previous_note, self.trans) updated_note = True break if not updated_note: # add new note here new_note = Note() new_note.handle = create_id() new_note.type.set(NoteType.EVENT) new_note.set(note) self.db.add_note(new_note, self.trans) marriage.add_note(new_note.handle) self.db.commit_event(marriage, self.trans) def _parse_family(self, line_number, row, col): "Parse the content of a family line" family_ref = rd(line_number, row, col, "family") if family_ref is None: LOG.warn("no family reference found for family on line %d" % line_number) return # required child = rd(line_number, row, col, "child") source = rd(line_number, row, col, "source") note = rd(line_number, row, col, "note") gender = rd(line_number, row, col, "gender") child = self.lookup("person", child) family = self.lookup("family", family_ref) if family is None: LOG.warn("no matching family reference found for family " "on line %d" % line_number) return if child is None: LOG.warn("no matching child reference found for family " "on line %d" % line_number) return # is this child already in this family? If so, don't add LOG.debug("children: %s", [ref.ref for ref in family.get_child_ref_list()]) LOG.debug("looking for: %s", child.get_handle()) if child.get_handle() not in [ref.ref for ref in family.get_child_ref_list()]: # add child to family LOG.debug(" adding child [%s] to family [%s]", child.get_gramps_id(), family.get_gramps_id()) childref = ChildRef() childref.set_reference_handle(child.get_handle()) family.add_child_ref( childref) self.db.commit_family(family, self.trans) child.add_parent_family_handle(family.get_handle()) if gender: # replace gender = gender.lower() if gender == gender_map[Person.MALE].lower(): gender = Person.MALE elif gender == gender_map[Person.FEMALE].lower(): gender = Person.FEMALE else: gender = Person.UNKNOWN child.set_gender(gender) if source: # add, if new dummy_new, source = self.get_or_create_source(source) self.find_and_set_citation(child, source) # put note on child if note: # append notes, if previous notes previous_notes_list = child.get_note_list() updated_note = False for note_handle in previous_notes_list: previous_note = self.db.get_note_from_handle(note_handle) if previous_note.type == NoteType.PERSON: previous_text = previous_note.get() if note not in previous_text: note = previous_text + "\n" + note previous_note.set(note) self.db.commit_note(previous_note, self.trans) updated_note = True break if not updated_note: # add new note here new_note = Note() new_note.handle = create_id() new_note.type.set(NoteType.PERSON) new_note.set(note) self.db.add_note(new_note, self.trans) child.add_note(new_note.handle) self.db.commit_person(child, self.trans) def _parse_person(self, line_number, row, col): "Parse the content of a Person line." surname = rd(line_number, row, col, "surname") firstname = rd(line_number, row, col, "firstname", "") callname = rd(line_number, row, col, "callname") title = rd(line_number, row, col, "title") prefix = rd(line_number, row, col, "prefix") suffix = rd(line_number, row, col, "suffix") gender = rd(line_number, row, col, "gender") source = rd(line_number, row, col, "source") note = rd(line_number, row, col, "note") birthplace = rd(line_number, row, col, "birthplace") birthdate = rd(line_number, row, col, "birthdate") birthsource = rd(line_number, row, col, "birthsource") baptismplace = rd(line_number, row, col, "baptismplace") baptismdate = rd(line_number, row, col, "baptismdate") baptismsource = rd(line_number, row, col, "baptismsource") burialplace = rd(line_number, row, col, "burialplace") burialdate = rd(line_number, row, col, "burialdate") burialsource = rd(line_number, row, col, "burialsource") deathplace = rd(line_number, row, col, "deathplace") deathdate = rd(line_number, row, col, "deathdate") deathsource = rd(line_number, row, col, "deathsource") deathcause = rd(line_number, row, col, "deathcause") grampsid = rd(line_number, row, col, "grampsid") person_ref = rd(line_number, row, col, "person") ######################################################### # if this person already exists, don't create them person = self.lookup("person", person_ref) if person is None: if surname is None: LOG.warn("empty surname for new person on line %d" % line_number) surname = "" # new person person = self.create_person() name = Name() name.set_type(NameType(NameType.BIRTH)) name.set_first_name(firstname) surname_obj = Surname() surname_obj.set_surname(surname) name.add_surname(surname_obj) person.set_primary_name(name) else: name = person.get_primary_name() ######################################################### if person_ref is not None: self.storeup("person", person_ref, person) # replace if surname is not None: name.get_primary_surname().set_surname(surname) if firstname is not None: name.set_first_name(firstname) if callname is not None: name.set_call_name(callname) if title is not None: name.set_title(title) if prefix is not None: name.get_primary_surname().set_prefix(prefix) name.group_as = '' # HELP? what should I do here? if suffix is not None: name.set_suffix(suffix) if note is not None: # append notes, if previous notes previous_notes_list = person.get_note_list() updated_note = False for note_handle in previous_notes_list: previous_note = self.db.get_note_from_handle(note_handle) if previous_note.type == NoteType.PERSON: previous_text = previous_note.get() if note not in previous_text: note = previous_text + "\n" + note previous_note.set(note) self.db.commit_note(previous_note, self.trans) updated_note = True break if not updated_note: # add new note here new_note = Note() new_note.handle = create_id() new_note.type.set(NoteType.PERSON) new_note.set(note) self.db.add_note(new_note, self.trans) person.add_note(new_note.handle) if grampsid is not None: person.gramps_id = grampsid elif person_ref is not None: if person_ref.startswith("[") and person_ref.endswith("]"): person.gramps_id = self.db.id2user_format(person_ref[1:-1]) if (person.get_gender() == Person.UNKNOWN and gender is not None): gender = gender.lower() if gender == gender_map[Person.MALE].lower(): gender = Person.MALE elif gender == gender_map[Person.FEMALE].lower(): gender = Person.FEMALE else: gender = Person.UNKNOWN person.set_gender(gender) ######################################################### # add if new, replace if different # Birth: if birthdate is not None: birthdate = _dp.parse(birthdate) if birthplace is not None: new, birthplace = self.get_or_create_place(birthplace) if birthsource is not None: new, birthsource = self.get_or_create_source(birthsource) if birthdate or birthplace or birthsource: new, birth = self.get_or_create_event(person, EventType.BIRTH, birthdate, birthplace, birthsource) birth_ref = person.get_birth_ref() if birth_ref is None: # new birth_ref = EventRef() birth_ref.set_reference_handle( birth.get_handle()) person.set_birth_ref( birth_ref) # Baptism: if baptismdate is not None: baptismdate = _dp.parse(baptismdate) if baptismplace is not None: new, baptismplace = self.get_or_create_place(baptismplace) if baptismsource is not None: new, baptismsource = self.get_or_create_source(baptismsource) if baptismdate or baptismplace or baptismsource: new, baptism = self.get_or_create_event(person, EventType.BAPTISM, baptismdate, baptismplace, baptismsource) baptism_ref = get_primary_event_ref_from_type(self.db, person, "Baptism") if baptism_ref is None: # new baptism_ref = EventRef() baptism_ref.set_reference_handle( baptism.get_handle()) person.add_event_ref( baptism_ref) # Death: if deathdate is not None: deathdate = _dp.parse(deathdate) if deathplace is not None: new, deathplace = self.get_or_create_place(deathplace) if deathsource is not None: new, deathsource = self.get_or_create_source(deathsource) if deathdate or deathplace or deathsource or deathcause: new, death = self.get_or_create_event(person, EventType.DEATH, deathdate, deathplace, deathsource) if deathcause: death.set_description(deathcause) self.db.commit_event(death, self.trans) death_ref = person.get_death_ref() if death_ref is None: # new death_ref = EventRef() death_ref.set_reference_handle(death.get_handle()) person.set_death_ref(death_ref) # Burial: if burialdate is not None: burialdate = _dp.parse(burialdate) if burialplace is not None: new, burialplace = self.get_or_create_place(burialplace) if burialsource is not None: new, burialsource = self.get_or_create_source(burialsource) if burialdate or burialplace or burialsource: new, burial = self.get_or_create_event(person, EventType.BURIAL, burialdate, burialplace, burialsource) burial_ref = get_primary_event_ref_from_type(self.db, person, "Burial") if burial_ref is None: # new burial_ref = EventRef() burial_ref.set_reference_handle( burial.get_handle()) person.add_event_ref( burial_ref) if source: # add, if new new, source = self.get_or_create_source(source) self.find_and_set_citation(person, source) self.db.commit_person(person, self.trans) def get_or_create_family(self, family_ref, husband, wife): "Return the family object for the give family ID." # if a gramps_id and exists: LOG.debug("get_or_create_family") if family_ref.startswith("[") and family_ref.endswith("]"): id_ = self.db.fid2user_format(family_ref[1:-1]) family = self.db.get_family_from_gramps_id(id_) if family: # don't delete, only add fam_husband_handle = family.get_father_handle() fam_wife_handle = family.get_mother_handle() if husband: if husband.get_handle() != fam_husband_handle: # this husband is not the same old one! Add him! family.set_father_handle(husband.get_handle()) if wife: if wife.get_handle() != fam_wife_handle: # this wife is not the same old one! Add her! family.set_mother_handle(wife.get_handle()) LOG.debug(" returning existing family") return family # if not, create one: family = Family() # was marked with a gramps_id, but didn't exist, so we'll use it: if family_ref.startswith("[") and family_ref.endswith("]"): id_ = self.db.fid2user_format(family_ref[1:-1]) family.set_gramps_id(id_) # add it: family.set_handle(self.db.create_id()) if husband: family.set_father_handle(husband.get_handle()) husband.add_family_handle(family.get_handle()) if wife: family.set_mother_handle(wife.get_handle()) wife.add_family_handle(family.get_handle()) if husband and wife: family.set_relationship(FamilyRelType.MARRIED) self.db.add_family(family, self.trans) if husband: self.db.commit_person(husband, self.trans) if wife: self.db.commit_person(wife, self.trans) self.fam_count += 1 return family def get_or_create_event(self, object_, type_, date=None, place=None, source=None): """ Add or find a type event on object """ # first, see if it exists LOG.debug("get_or_create_event") ref_list = object_.get_event_ref_list() LOG.debug("refs: %s", ref_list) # look for a match, and possible correction for ref in ref_list: event = self.db.get_event_from_handle(ref.ref) LOG.debug(" compare event type %s == %s", int(event.get_type()), type_) if int(event.get_type()) == type_: # Match! Let's update if date: event.set_date_object(date) if place: event.set_place_handle(place.get_handle()) if source: self.find_and_set_citation(event, source) self.db.commit_event(event, self.trans) LOG.debug(" returning existing event") return (0, event) # else create it: LOG.debug(" creating event") event = Event() if type_: event.set_type(EventType(type_)) if date: event.set_date_object(date) if place: event.set_place_handle(place.get_handle()) if source: self.find_and_set_citation(event, source) self.db.add_event(event, self.trans) return (1, event) def create_person(self): """ Used to create a new person we know doesn't exist """ person = Person() self.db.add_person(person, self.trans) self.indi_count += 1 return person def get_or_create_place(self, place_name): "Return the requested place object tuple-packed with a new indicator." LOG.debug("get_or_create_place: looking for: %s", place_name) for place_handle in self.db.iter_place_handles(): place = self.db.get_place_from_handle(place_handle) if place.get_title() == place_name: return (0, place) place = Place() place.set_title(place_name) self.db.add_place(place, self.trans) return (1, place) def get_or_create_source(self, source_text): "Return the requested source object tuple-packed with a new indicator." source_list = self.db.get_source_handles(sort_handles=False) LOG.debug("get_or_create_source: list: %s", source_list) LOG.debug("get_or_create_source: looking for: %s", source_text) for source_handle in source_list: source = self.db.get_source_from_handle(source_handle) if source.get_title() == source_text: LOG.debug(" returning existing source") return (0, source) LOG.debug(" creating source") source = Source() source.set_title(source_text) self.db.add_source(source, self.trans) return (1, source) def find_and_set_citation(self, obj, source): # look for the source in the existing citations for the object LOG.debug("find_and_set_citation: looking for source: %s", source.get_gramps_id()) for citation_handle in obj.get_citation_list(): citation = self.db.get_citation_from_handle(citation_handle) LOG.debug("find_and_set_citation: existing citation: %s", citation.get_gramps_id()) poss_source = self.db.get_source_from_handle( citation.get_reference_handle()) LOG.debug(" compare source %s == %s", source.get_gramps_id(), poss_source.get_gramps_id()) if poss_source.get_handle() == source.get_handle(): # The source is already cited LOG.debug(" source already cited") return # we couldn't find an appropriate citation, so we have to create one. citation = Citation() LOG.debug(" creating citation") citation.set_reference_handle(source.get_handle()) self.db.add_citation(citation, self.trans) LOG.debug(" created citation, citation %s %s" % (citation, citation.get_gramps_id())) obj.add_citation(citation.get_handle())
arunkgupta/gramps
gramps/plugins/import/importcsv.py
Python
gpl-2.0
36,989
[ "Brian" ]
3b1f9b00e2eb78aa556da125db567e498eeae96ecba9629f2700ae4601c728d2
#!/usr/bin/env python import numpy as np import scipy.io as io import nengo import pyopencl as cl from nengo_ocl import sim_ocl from nengo.utils.numpy import rmse, norm import matplotlib.pyplot as plt from nengo.utils.logging import logger class _InputImage(object): """Structure of the input image to the mode Parameters ---------- height: height of the image width: width of the image height_k: height of the kernel image width_k: width of the kernel image """ def __init__(self,height,width,height_k,width_k): self.height = height self.width = width self.height_k = height_k self.width_k = width_k img = _InputImage(240,320,4,4) def OclSimulator(network): ctx = cl.create_some_context() return sim_ocl.Simulator(network, context=ctx) def get_directions(): verticalPieces = img.height / img.height_k horizontalPieces = img.width / img.width_k directions = [] for i in range(0, verticalPieces): for j in range(0, horizontalPieces): iny = np.array(range(j*img.width_k,(j+1)*img.width_k)) inxy = iny*img.height + i*img.height_k idx=np.array([],dtype = int) for z in range(0, img.height_k): vix = inxy + z*np.ones(img.width_k, dtype=np.int) idx = np.append(idx,vix) temp_dirs = np.zeros(img.height*img.width, dtype = np.int) temp_dirs[idx] = 1 directions.append(temp_dirs) return directions def mt_model(Simulator, nl): mat = io.loadmat('/home/matallanas/Documents/smoothPursuit/LKPYR/flow-vector.mat') speed = mat['Vx'] s2 = speed[0:240,0:320] s2 = np.reshape(s2,240*320,1) l = s2.shape print l print speed #"""A network that represents sin(t).""" N = 768000 mt = nengo.Network(label='mt_model') with mt: input = nengo.Node(output=s2, dimensions=76800) mt_neurons = nengo.Ensemble(nl(N), radius=20, dimensions=76800) nengo.Connection(input, mt_neurons) in_p = nengo.Probe(input, 'output') mt_p = nengo.Probe(mt_neurons, 'decoded_output', synapse=0.02) sim = Simulator(mt) sim.run(5.0) print sim.data[mt_p] #t = sim.trange() #plt.plot(t, sim.data[in_p], label='Input') #plt.plot(t, sim.data[A_p], label='Neuron approximation, pstc=0.02') #plt.legend(loc=0) #plt.show() #target = np.sin(np.arange(5000) / 1000.) #target.shape = (-1, 1) #logger.debug("[New API] input RMSE: %f", rmse(target, sim.data[in_p])) #logger.debug("[New API] A RMSE: %f", rmse(target, sim.data[A_p])) #assert rmse(target, sim.data[in_p]) < 0.001 #assert rmse(target, sim.data[A_p]) < 0.1 mt_model(OclSimulator,nengo.LIF) #encoders = get_directions() #print encoders[0] #print np.nonzero(encoders[0]>0.5) #print np.nonzero(encoders[1]>0.5)
matallanas/smoothPursuit
code/Nengo/mt_model.py
Python
gpl-3.0
2,756
[ "NEURON" ]
01bbc63eeb740b9388240ff169eab06ad5a01cbaac33fbe15ccdded62ecec1f1
__author__ = "HarshaRani" __credits__ = ["Upi Lab"] __license__ = "GPL3" __version__ = "1.0.0" __maintainer__ = "HarshaRani" __email__ = "hrani@ncbs.res.in" __status__ = "Development" __updated__ = "Oct 26 2018" ''' 2018 Oct 26: xfer molecules are not put into screen Sep 28: to zoom the kkit co-ordinates a factor of w=1000 and h=800 is multipled here 2017 Oct 18: moved some function to kkitUtil getxyCord, etc function are added ''' import collections from moose import * import numpy as np from moose import wildcardFind,element,PoolBase,CplxEnzBase,Annotator,exists from networkx.drawing.nx_agraph import graphviz_layout import numpy as np import networkx as nx from kkitUtil import getRandColor,colorCheck,findCompartment, findGroup, findGroup_compt, mooseIsInstance from PyQt4.QtGui import QColor import re import moose._moose as moose def getxyCord(xcord,ycord,list1): for item in list1: # if isinstance(item,Function): # objInfo = element(item.parent).path+'/info' # else: # objInfo = item.path+'/info' if not isinstance(item,Function): objInfo = item.path+'/info' xcord.append(xyPosition(objInfo,'x')) ycord.append(xyPosition(objInfo,'y')) def xyPosition(objInfo,xory): try: return(float(element(objInfo).getField(xory))) except ValueError: return (float(0)) ''' def mooseIsInstance(melement, classNames): return element(melement).__class__.__name__ in classNames def findCompartment(melement): while not mooseIsInstance(melement, ["CubeMesh", "CyclMesh"]): melement = melement.parent return melement def findGroup(melement): while not mooseIsInstance(melement, ["Neutral"]): melement = melement.parent return melement def findGroup_compt(melement): while not (mooseIsInstance(melement, ["Neutral","CubeMesh", "CyclMesh"])): melement = melement.parent return melement ''' def populateMeshEntry(meshEntry,parent,types,obj): #print " parent ",parent, "types ",types, " obj ",obj try: value = meshEntry[element(parent.path)][types] except KeyError: # Key is not present meshEntry[element(parent.path)].update({types :[element(obj)]}) else: mlist = meshEntry[element(parent.path)][types] mlist.append(element(obj)) def updateMeshObj(modelRoot): print " updateMeshObj " meshEntry = {} if meshEntry: meshEntry.clear() else: meshEntry = {} objPar = collections.OrderedDict() for compt in wildcardFind(modelRoot+'/##[ISA=ChemCompt]'): groupColor = [] try: value = meshEntry[element(compt)] except KeyError: # Compt is not present meshEntry[element(compt)] = {} objPar[element(compt)] = element('/') for grp in wildcardFind(compt.path+'/##[TYPE=Neutral]'): test = [x for x in wildcardFind(element(grp).path+'/#') if x.className in ["Pool","Reac","Enz"]] grp_cmpt = findGroup_compt(grp.parent) try: value = meshEntry[element(grp)] except KeyError: # Grp is not present meshEntry[element(grp)] = {} objPar[element(grp)] = element(grp_cmpt) for compt in wildcardFind(modelRoot+'/##[ISA=ChemCompt]'): for m in wildcardFind(compt.path+'/##[ISA=PoolBase]'): grp_cmpt = findGroup_compt(m) if isinstance(element(grp_cmpt),Neutral): if isinstance(element(m.parent),EnzBase): populateMeshEntry(meshEntry,grp_cmpt,"cplx",m) else: populateMeshEntry(meshEntry,grp_cmpt,"pool",m) else: if isinstance(element(m.parent),EnzBase): populateMeshEntry(meshEntry,compt,"cplx",m) else: populateMeshEntry(meshEntry,compt,"pool",m) for r in wildcardFind(compt.path+'/##[ISA=ReacBase]'): rgrp_cmpt = findGroup_compt(r) if isinstance(element(rgrp_cmpt),Neutral): populateMeshEntry(meshEntry,rgrp_cmpt,"reaction",r) else: populateMeshEntry(meshEntry,compt,"reaction",r) for e in wildcardFind(compt.path+'/##[ISA=EnzBase]'): egrp_cmpt = findGroup_compt(e) if isinstance(element(egrp_cmpt),Neutral): populateMeshEntry(meshEntry,egrp_cmpt,"enzyme",e) else: populateMeshEntry(meshEntry,compt,"enzyme",e) for f in wildcardFind(compt.path+'/##[ISA=Function]'): fgrp_cmpt = findGroup_compt(f) if isinstance(element(fgrp_cmpt),Neutral): populateMeshEntry(meshEntry,fgrp_cmpt,"function",f) else: populateMeshEntry(meshEntry,compt,"function",f) for t in wildcardFind(compt.path+'/##[ISA=StimulusTable]'): tgrp_cmpt = findGroup_compt(t) if isinstance(element(tgrp_cmpt),Neutral): populateMeshEntry(meshEntry,tgrp_cmpt,"stimTab",t) else: populateMeshEntry(meshEntry,compt,"stimTab",t) return(objPar,meshEntry) def setupMeshObj(modelRoot): ''' Setup compartment and its members pool,reaction,enz cplx under self.meshEntry dictionaries \ self.meshEntry with "key" as compartment, value is key2:list where key2 represents moose object type,list of objects of a perticular type e.g self.meshEntry[meshEnt] = { 'reaction': reaction_list,'enzyme':enzyme_list,'pool':poollist,'cplx': cplxlist } ''' xmin = 0.0 xmax = 1.0 ymin = 0.0 ymax = 1.0 positionInfoExist = True meshEntry = {} if meshEntry: meshEntry.clear() else: meshEntry = {} xcord = [] ycord = [] n = 1 objPar = collections.OrderedDict() for compt in wildcardFind(modelRoot+'/##[ISA=ChemCompt]'): groupColor = [] try: value = meshEntry[element(compt)] except KeyError: # Compt is not present meshEntry[element(compt)] = {} objPar[element(compt)] = element('/') for grp in wildcardFind(compt.path+'/##[TYPE=Neutral]'): test = [x for x in wildcardFind(element(grp).path+'/#') if x.className in ["Pool","Reac","Enz"]] #if len(test) >1: grpinfo = Annotator(element(grp).path+'/info') validatecolor = colorCheck(grpinfo.color,"bg") validatedgrpcolor = str(QColor(validatecolor).name()) groupColor.append(validatedgrpcolor) grp_cmpt = findGroup_compt(grp.parent) try: value = meshEntry[element(grp)] except KeyError: # Grp is not present meshEntry[element(grp)] = {} objPar[element(grp)] = element(grp_cmpt) # if n > 1: # validatecolor = colorCheck(grpinfo.color,"bg") # validatedgrpcolor = str(QColor(validatecolor).name()) # if validatedgrpcolor in groupColor: # print " inside " # c = getRandColor() # print " c ",c, c.name() # grpinfo.color = str(c.name()) # groupColor.append(str(c.name())) # print " groupColor ",grpinfo,grpinfo.color, groupColor # n =n +1 for compt in wildcardFind(modelRoot+'/##[ISA=ChemCompt]'): for m in wildcardFind(compt.path+'/##[ISA=PoolBase]'): if not re.search("xfer",m.name): grp_cmpt = findGroup_compt(m) xcord.append(xyPosition(m.path+'/info','x')) ycord.append(xyPosition(m.path+'/info','y')) if isinstance(element(grp_cmpt),Neutral): if isinstance(element(m.parent),EnzBase): populateMeshEntry(meshEntry,grp_cmpt,"cplx",m) else: populateMeshEntry(meshEntry,grp_cmpt,"pool",m) else: if isinstance(element(m.parent),EnzBase): populateMeshEntry(meshEntry,compt,"cplx",m) else: populateMeshEntry(meshEntry,compt,"pool",m) for r in wildcardFind(compt.path+'/##[ISA=ReacBase]'): rgrp_cmpt = findGroup_compt(r) xcord.append(xyPosition(r.path+'/info','x')) ycord.append(xyPosition(r.path+'/info','y')) if isinstance(element(rgrp_cmpt),Neutral): populateMeshEntry(meshEntry,rgrp_cmpt,"reaction",r) else: populateMeshEntry(meshEntry,compt,"reaction",r) for e in wildcardFind(compt.path+'/##[ISA=EnzBase]'): egrp_cmpt = findGroup_compt(e) xcord.append(xyPosition(e.path+'/info','x')) ycord.append(xyPosition(e.path+'/info','y')) if isinstance(element(egrp_cmpt),Neutral): populateMeshEntry(meshEntry,egrp_cmpt,"enzyme",e) else: populateMeshEntry(meshEntry,compt,"enzyme",e) for f in wildcardFind(compt.path+'/##[ISA=Function]'): fgrp_cmpt = findGroup_compt(f) if isinstance(element(fgrp_cmpt),Neutral): populateMeshEntry(meshEntry,fgrp_cmpt,"function",f) else: populateMeshEntry(meshEntry,compt,"function",f) for t in wildcardFind(compt.path+'/##[ISA=StimulusTable]'): tgrp_cmpt = findGroup_compt(t) xcord.append(xyPosition(t.path+'/info','x')) ycord.append(xyPosition(t.path+'/info','y')) if isinstance(element(tgrp_cmpt),Neutral): populateMeshEntry(meshEntry,tgrp_cmpt,"stimTab",t) else: populateMeshEntry(meshEntry,compt,"stimTab",t) xmin = min(xcord) xmax = max(xcord) ymin = min(ycord) ymax = max(ycord) positionInfoExist = not(len(np.nonzero(xcord)[0]) == 0 and len(np.nonzero(ycord)[0]) == 0) return(objPar,meshEntry,xmin,xmax,ymin,ymax,positionInfoExist) ''' def setupMeshObj(modelRoot): # Setup compartment and its members pool,reaction,enz cplx under self.meshEntry dictionaries \ # self.meshEntry with "key" as compartment, # value is key2:list where key2 represents moose object type,list of objects of a perticular type # e.g self.meshEntry[meshEnt] = { 'reaction': reaction_list,'enzyme':enzyme_list,'pool':poollist,'cplx': cplxlist } meshEntry = {} if meshEntry: meshEntry.clear() else: meshEntry = {} xcord = [] ycord = [] meshEntryWildcard = '/##[ISA=ChemCompt]' if modelRoot != '/': meshEntryWildcard = modelRoot+meshEntryWildcard for meshEnt in wildcardFind(meshEntryWildcard): mollist = [] realist = [] enzlist = [] cplxlist = [] tablist = [] funclist = [] mol_cpl = wildcardFind(meshEnt.path+'/##[ISA=PoolBase]') funclist = wildcardFind(meshEnt.path+'/##[ISA=Function]') enzlist = wildcardFind(meshEnt.path+'/##[ISA=EnzBase]') realist = wildcardFind(meshEnt.path+'/##[ISA=ReacBase]') tablist = wildcardFind(meshEnt.path+'/##[ISA=StimulusTable]') if mol_cpl or funclist or enzlist or realist or tablist: for m in mol_cpl: if isinstance(element(m.parent),CplxEnzBase): cplxlist.append(m) elif isinstance(element(m),moose.PoolBase): mollist.append(m) meshEntry[meshEnt] = {'enzyme':enzlist, 'reaction':realist, 'pool':mollist, 'cplx':cplxlist, 'table':tablist, 'function':funclist } for mert in [mollist,enzlist,realist,tablist]: for merts in mert: objInfo = merts.path+'/info' if exists(objInfo): xcord.append(element(objInfo).x) ycord.append(element(objInfo).y) return(meshEntry,xcord,ycord) def sizeHint(self): return QtCore.QSize(800,400) ''' def setupItem(modelPath,cntDict): # This function collects information of what is connected to what. \ # eg. substrate and product connectivity to reaction's and enzyme's \ # sumtotal connectivity to its pool are collected #print " setupItem" sublist = [] prdlist = [] zombieType = ['ReacBase','EnzBase','Function','StimulusTable'] for baseObj in zombieType: path = '/##[ISA='+baseObj+']' if modelPath != '/': path = modelPath+path if ( (baseObj == 'ReacBase') or (baseObj == 'EnzBase')): for items in wildcardFind(path): sublist = [] prdlist = [] uniqItem,countuniqItem = countitems(items,'subOut') subNo = uniqItem for sub in uniqItem: sublist.append((element(sub),'s',countuniqItem[sub])) uniqItem,countuniqItem = countitems(items,'prd') prdNo = uniqItem if (len(subNo) == 0 or len(prdNo) == 0): print ("Substrate Product is empty ",path, " ",items) for prd in uniqItem: prdlist.append((element(prd),'p',countuniqItem[prd])) if (baseObj == 'CplxEnzBase') : uniqItem,countuniqItem = countitems(items,'toEnz') for enzpar in uniqItem: sublist.append((element(enzpar),'t',countuniqItem[enzpar])) uniqItem,countuniqItem = countitems(items,'cplxDest') for cplx in uniqItem: prdlist.append((element(cplx),'cplx',countuniqItem[cplx])) if (baseObj == 'EnzBase'): uniqItem,countuniqItem = countitems(items,'enzDest') for enzpar in uniqItem: sublist.append((element(enzpar),'t',countuniqItem[enzpar])) cntDict[items] = sublist,prdlist elif baseObj == 'Function': for items in wildcardFind(path): sublist = [] prdlist = [] item = items.path+'/x[0]' uniqItem,countuniqItem = countitems(item,'input') for funcpar in uniqItem: sublist.append((element(funcpar),'sts',countuniqItem[funcpar])) uniqItem,countuniqItem = countitems(items,'valueOut') for funcpar in uniqItem: prdlist.append((element(funcpar),'stp',countuniqItem[funcpar])) cntDict[items] = sublist,prdlist else: for tab in wildcardFind(path): tablist = [] uniqItem,countuniqItem = countitems(tab,'output') for tabconnect in uniqItem: tablist.append((element(tabconnect),'tab',countuniqItem[tabconnect])) cntDict[tab] = tablist def countitems(mitems,objtype): items = [] items = element(mitems).neighbors[objtype] uniqItems = set(items) #countuniqItemsauto = Counter(items) countuniqItems = dict((i, items.count(i)) for i in items) return(uniqItems,countuniqItems) def recalculatecoordinatesforKkit(mObjlist,xcord,ycord): positionInfoExist = not(len(np.nonzero(xcord)[0]) == 0 \ and len(np.nonzero(ycord)[0]) == 0) if positionInfoExist: #Here all the object has been taken now recalculate and reassign back x and y co-ordinates xmin = min(xcord) xmax = max(xcord) ymin = min(ycord) ymax = max(ycord) for merts in mObjlist: objInfo = merts.path+'/info' if moose.exists(objInfo): Ix = (xyPosition(objInfo,'x')-xmin)/(xmax-xmin) Iy = (ymin-xyPosition(objInfo,'y'))/(ymax-ymin) element(objInfo).x = Ix*1000 element(objInfo).y = Iy*800 def xyPosition(objInfo,xory): try: return(float(element(objInfo).getField(xory))) except ValueError: return (float(0)) def autoCoordinates(meshEntry,srcdesConnection): G = nx.Graph() for cmpt,memb in meshEntry.items(): if memb in ["enzyme"]: for enzObj in find_index(memb,'enzyme'): #G.add_node(enzObj.path) G.add_node(enzObj.path,label='',shape='ellipse',color='',style='filled',fontname='Helvetica',fontsize=12,fontcolor='blue') for cmpt,memb in meshEntry.items(): #if memb.has_key if memb in ["pool","cplx","reaction"]: for poolObj in find_index(memb,'pool'): #G.add_node(poolObj.path) G.add_node(poolObj.path,label = poolObj.name,shape = 'box',color = '',style = 'filled',fontname = 'Helvetica',fontsize = 9,fontcolor = 'blue') for cplxObj in find_index(memb,'cplx'): G.add_node(cplxObj.path) G.add_node(cplxObj.path,label = cplxObj.name,shape = 'box',color = '',style = 'filled',fontname = 'Helvetica',fontsize = 12,fontcolor = 'blue') #G.add_edge((cplxObj.parent).path,cplxObj.path) for reaObj in find_index(memb,'reaction'): #G.add_node(reaObj.path) G.add_node(reaObj.path,label='',shape='circle',color='') for inn,out in srcdesConnection.items(): if (inn.className =='ZombieReac'): arrowcolor = 'green' elif(inn.className =='ZombieEnz'): arrowcolor = 'red' else: arrowcolor = 'blue' if isinstance(out,tuple): if len(out[0])== 0: print (inn.className + ':' +inn.name + " doesn't have input message") else: for items in (items for items in out[0] ): G.add_edge(element(items[0]).path,inn.path) if len(out[1]) == 0: print (inn.className + ':' + inn.name + "doesn't have output mssg") else: for items in (items for items in out[1] ): G.add_edge(inn.path,element(items[0]).path) elif isinstance(out,list): if len(out) == 0: print ("Func pool doesn't have sumtotal") else: for items in (items for items in out ): G.add_edge(element(items[0]).path,inn.path) position = graphviz_layout(G) xcord, ycord = [],[] for item in position.items(): xy = item[1] xroundoff = round(xy[0],0) yroundoff = round(xy[1],0) xcord.append(xroundoff) ycord.append(yroundoff) xmin = min(xcord) xmax = max(xcord) ymin = min(ycord) ymax = max(ycord) for item in position.items(): xy = item[1] anno = Annotator(item[0]+'/info') Ax = (xy[0]-xmin)/(xmax-xmin) Ay = (xy[1]-ymin)/(ymax-ymin) #anno.x = round(Ax,1) #anno.y = round(Ay,1) #not roundingoff to max and min the co-ordinates for bigger model would overlay the co-ordinates anno.x = xy[0] anno.y = xy[1] def find_index(value, key): """ Value.get(key) to avoid expection which would raise if empty value in dictionary for a given key """ if value.get(key) != None: return value.get(key) else: raise ValueError('no dict with the key found')
BhallaLab/moose
moose-gui/plugins/kkitOrdinateUtil.py
Python
gpl-3.0
19,921
[ "MOOSE" ]
9a960ffe3e9a77241fbb1bbefdb59c091c5922b3ca7d9551cc93afab0732852f
# $HeadURL $ ''' ResourceManagementHandler Module that allows users to access the ResourceManagementDB remotely. ''' from DIRAC import gConfig, S_OK, gLogger from DIRAC.Core.DISET.RequestHandler import RequestHandler from DIRAC.ResourceStatusSystem.Utilities import Synchronizer, Utils ResourceManagementDB = getattr(Utils.voimport( 'DIRAC.ResourceStatusSystem.DB.ResourceManagementDB' ),'ResourceManagementDB') __RCSID__ = '$Id: $' db = False def initializeResourceManagementHandler( _serviceInfo ): ''' Handler initialization, where we set the ResourceManagementDB as global db. ''' global db db = ResourceManagementDB() # Regenerates DB tables if needed db._checkTable() syncObject = Synchronizer.Synchronizer() gConfig.addListenerToNewVersionEvent( syncObject.sync ) return S_OK() ################################################################################ class ResourceManagementHandler( RequestHandler ): ''' The ResourceManagementHandler exposes the DB front-end functions through a XML-RPC server, functionalities inherited from :class:`DIRAC.Core.DISET.Reques\ tHandler.RequestHandler` According to the ResourceManagementDB philosophy, only functions of the type: - insert - update - select - delete are exposed. If you need anything more complicated, either look for it on the :class:`ResourceManagementClient`, or code it yourself. This way the DB and the Service are kept clean and tidied. To can use this service on this way, but you MUST NOT DO IT. Use it through the :class:`ResourceManagementClient`. If offers in the worst case as good perfor\ mance as the :class:`ResourceManagementHandler`, if not better. >>> from DIRAC.Core.DISET.RPCClient import RPCCLient >>> server = RPCCLient("ResourceStatus/ResourceManagement") ''' def __init__( self, *args, **kwargs ): super( ResourceManagementHandler, self ).__init__( *args, **kwargs ) @staticmethod def __logResult( methodName, result ): ''' Method that writes to log error messages ''' if not result[ 'OK' ]: gLogger.error( '%s%s' % ( methodName, result[ 'Message' ] ) ) @staticmethod def setDatabase( database ): ''' This method let us inherit from this class and overwrite the database object without having problems with the global variables. :Parameters: **database** - `MySQL` database used by this handler :return: None ''' global db db = database types_insert = [ dict, dict ] def export_insert( self, params, meta ): ''' This method is a bridge to access :class:`ResourceManagementDB` remotely. It does not add neither processing nor validation. If you need to know more about this method, you must keep reading on the database documentation. :Parameters: **params** - `dict` arguments for the mysql query ( must match table columns ! ). **meta** - `dict` metadata for the mysql query. It must contain, at least, `table` key with the proper table name. :return: S_OK() || S_ERROR() ''' gLogger.info( 'insert: %s %s' % ( params, meta ) ) res = db.insert( params, meta ) self.__logResult( 'insert', res ) return res types_update = [ dict, dict ] def export_update( self, params, meta ): ''' This method is a bridge to access :class:`ResourceManagementDB` remotely. It does not add neither processing nor validation. If you need to know more about this method, you must keep reading on the database documentation. :Parameters: **params** - `dict` arguments for the mysql query ( must match table columns ! ). **meta** - `dict` metadata for the mysql query. It must contain, at least, `table` key with the proper table name. :return: S_OK() || S_ERROR() ''' gLogger.info( 'update: %s %s' % ( params, meta ) ) res = db.update( params, meta ) self.__logResult( 'update', res ) return res types_select = [ dict, dict ] def export_select( self, params, meta ): ''' This method is a bridge to access :class:`ResourceManagementDB` remotely. It does not add neither processing nor validation. If you need to know more\ about this method, you must keep reading on the database documentation. :Parameters: **params** - `dict` arguments for the mysql query ( must match table columns ! ). **meta** - `dict` metadata for the mysql query. It must contain, at least, `table` key with the proper table name. :return: S_OK() || S_ERROR() ''' gLogger.info( 'select: %s %s' % ( params, meta ) ) res = db.select( params, meta ) self.__logResult( 'select', res ) return res types_delete = [ dict, dict ] def export_delete( self, params, meta ): ''' This method is a bridge to access :class:`ResourceManagementDB` remotely.\ It does not add neither processing nor validation. If you need to know more \ about this method, you must keep reading on the database documentation. :Parameters: **params** - `dict` arguments for the mysql query ( must match table columns ! ). **meta** - `dict` metadata for the mysql query. It must contain, at least, `table` key with the proper table name. :return: S_OK() || S_ERROR() ''' gLogger.info( 'delete: %s %s' % ( params, meta ) ) res = db.delete( params, meta ) self.__logResult( 'delete', res ) return res types_addOrModify = [ dict, dict ] def export_addOrModify( self, params, meta ): ''' This method is a bridge to access :class:`ResourceManagementDB` remotely. It does not add neither processing nor validation. If you need to know more about this method, you must keep reading on the database documentation. :Parameters: **args** - `tuple` arguments for the mysql query ( must match table columns ! ). **kwargs** - `dict` metadata for the mysql query. It must contain, at least, `table` key with the proper table name. :return: S_OK() || S_ERROR() ''' gLogger.info( 'addOrModify: %s %s' % ( params, meta ) ) res = db.addOrModify( params, meta ) self.__logResult( 'addOrModify', res ) return res types_addIfNotThere = [ dict, dict ] def export_addIfNotThere( self, params, meta ): ''' This method is a bridge to access :class:`ResourceManagementDB` remotely. It does not add neither processing nor validation. If you need to know more about this method, you must keep reading on the database documentation. :Parameters: **args** - `tuple` arguments for the mysql query ( must match table columns ! ). **kwargs** - `dict` metadata for the mysql query. It must contain, at least, `table` key with the proper table name. :return: S_OK() || S_ERROR() ''' gLogger.info( 'addIfNotThere: %s %s' % ( params, meta ) ) res = db.addIfNotThere( params, meta ) self.__logResult( 'addIfNotThere', res ) return res ################################################################################ #EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
calancha/DIRAC
ResourceStatusSystem/Service/ResourceManagementHandler.py
Python
gpl-3.0
7,554
[ "DIRAC" ]
cf1051d090679ff363c0d21d83b2f43dff20f2b984645158f660de9c8fec2995
# Copyright (C) 2012,2013 # Max Planck Institute for Polymer Research # Copyright (C) 2008,2009,2010,2011 # Max-Planck-Institute for Polymer Research & Fraunhofer SCAI # # This file is part of ESPResSo++. # # ESPResSo++ is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ESPResSo++ is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """ ************************************ **Version** - Object ************************************ Return version information of espresso module Example: >>> version = espresso.Version() >>> print "Name = ", version.name >>> print "Major version number = ", version.major >>> print "Minor version number = ", version.minor >>> print "Mercurial(hg) revision = ", version.hgrevision >>> print "boost version = ", version.boostversion >>> print "Patchlevel = ", version.patchlevel >>> print "Compilation date = ", version.date >>> print "Compilation time = ", version.time to print a full version info string: >>> print version.info() """ from espresso import pmi from espresso.esutil import cxxinit import _espresso import mpi4py.MPI as MPI class VersionLocal(_espresso.Version): def __init__(self): 'Local Version object' if pmi._PMIComm and pmi._PMIComm.isActive(): if pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup(): cxxinit(self, _espresso.Version) else : pass else : cxxinit(self, _espresso.Version) if pmi.isController: class Version(object): __metaclass__ = pmi.Proxy pmiproxydefs = dict( cls = 'espresso.VersionLocal', pmiproperty = ['major', 'minor', 'hgrevision', 'boostversion', 'patchlevel', 'date', 'time', 'name'], pmicall = ['info'] )
BackupTheBerlios/espressopp
src/Version.py
Python
gpl-3.0
2,358
[ "ESPResSo" ]
199a641dbf7b58bd76f449a4647a743df87de6f3f6402975b2af25bcbe27def0
#!/usr/bin/python # -------------------------- IMPORTS -------------------------- # import numpy as np import random as rn import scipy import skimage.util import skimage.transform # -------------------------- CROP -------------------------- # # # image: image to be cropped, scale: scale factor, keep_original_size: keep image's original shape def crop(image, scale=0.8, keep_original_size=True): ''' Parameters: image: NumPy array of size NxMxC scale: float number between 0 and 1 keep_original_size: boolean ''' size_x, size_y, num_channels = image.shape if (scale < 1): max_x = int(round(size_x*scale)) max_y = int(round(size_y*scale)) dif_x = size_x - max_x dif_y = size_y - max_y bias_x = int(round(dif_x/2)) bias_y = int(round(dif_y/2)) image = image[bias_x:bias_x+max_x, bias_y:bias_y+max_y, :] if (keep_original_size): image = scipy.misc.imresize(image, (size_x,size_y,num_channels)) return image # -------------------------- METHOS TO RANDOMLY APPLY TRANSFORMATIONS -------------------------- # def apply_random_rotation(image): ''' Parameters: image: NumPy array of size NxMxC ''' ang = rn.randint(0, 360) return rotate(n_image, angle=ang) def apply_random_noise(image): ''' Parameters: image: NumPy array of size NxMxC ''' noise_mode = [] noise_mode.append('pepper') noise_mode.append('s&p') noise_mode.append('salt') #noise_mode.append('speckle') #noise_mode.append('gaussian') return (skimage.util.random_noise(image, mode=noise_mode[rn.randint(0,len(noise_mode)-1)]) * 255).astype(np.uint8) def apply_random_cropping(image): ''' Parameters: image: NumPy array of size NxMxC ''' i = rn.random() if (i >= 0.75): # Crop if the random scale is >= 75%, we don't want to lose too much info return crop(image, scale=i) else: return image def apply_random_vertical_flip(image): ''' Parameters: image: NumPy array of size NxMxC ''' i = rn.random() if (i >= 0.5): # We do this to don't flip ALL the times (50/50 prob of flipping) return np.flipud(image) #cv.flip(image,1) else: return image def apply_random_horizontal_flip(image): ''' Parameters: image: NumPy array of size NxMxC ''' i = rn.random() if (i >= 0.5): # We do this to flip just some very rare times (flip with 25% prob) return np.fliplr(image) #cv.flip(image,0) else: return image # MAIN METHOD def apply_random_transformations(image): ''' This method applies all the transformations using the default parameters. Parameters: image: NumPy array of size NxMxC ''' image = apply_random_noise((image/255.0).astype(float)) image = apply_random_cropping(image) image = apply_random_vertical_flip(image) image = apply_random_horizontal_flip(image) return image def augment_single_data(image, labels, num_new): ''' Augments a single image with its labels. Parameters: image: NumPy array of size NxMxC labels: integer that defines the class of the image num_new: integer that defines the number of new samples ''' n = image.shape[0] m = image.shape[1] c = image.shape[2] new_images = np.empty([num_new, n, m, c], dtype=float) new_labels = np.zeros([num_new], dtype=int) for i in range(0, num_new): new_images[i,:,:,:] = apply_random_transformations(image) new_labels[i] = labels return new_images, new_labels # -------------------------- Same as before but for one-hot encoding labels -------------------------- # def augment_single_data_one_hot(image, labels, num_new): ''' Augments a single image with its labels in one-hot encoding. Parameters: image: NumPy array of size NxMxC labels: NumPy array of size [1,N] -> label in one-hot encoding num_new: integer that defines the number of new samples ''' n = image.shape[0] m = image.shape[1] c = image.shape[2] new_images = np.empty([num_new, n, m, c], dtype=float) new_labels = np.zeros([num_new, labels.shape[0]], dtype=int) for i in range(0, num_new): new_images[i,:,:,:] = apply_random_transformations(image) new_labels[i,:] = labels return new_images, new_labels # -------------------------- Taking off the original dataset, and keep only the augmentated samples -------------------------- # # Returns an augmented dataset without the original values def augment_data_one_hot(dataset, labels, num_new): ''' Augments dataset with its labels in one-hot encoding. Parameters: dataset: NumPy array of size ZxNxMxC. Thus, we have Z images of dimensions NxMxC. labels: NumPy array of size [1,N] -> label in one-hot encoding num_new: integer that defines the number of new samples ''' n = dataset.shape[0] new_dataset = [] new_labels = [] for i in range(n): feat, lab = augment_single_data_one_hot(dataset[i,:,:,:], labels[i,:], num_new, labels.shape[1]) for c in range(feat.shape[0]): new_dataset.append(feat[c,:,:,:]) new_labels.append(lab[c,:]) return np.asarray(new_dataset), np.asarray(new_labels) # -------------------------- METHODS TO BALANCE IMBALANCED DATASET -------------------------- # def get_diff_binary_classes(target): ''' Obtain the difference of number of instances between the two classes of the dataset. It only works for binary classes. ''' unique, counts = np.unique(target, return_counts=True) z = dict(zip(unique, counts)) majority_class_index = np.argmax(counts) minority_class_index = np.argmin(counts) difference = np.absolute(counts[0] - counts[1]) return minority_class_index, majority_class_index, counts, difference # This method balances an imbalanced dataset (usually the train set, although it # can be used for an entire dataset). It only works for binary classes. def balance_dataset(dataset, target): ''' Balance an unbalanced dataset (usually the train set, although it can be used for the entire dataset). It only works for binary classes. ''' # Obtain info about the classes (index of majority class, of minority class, etc.) min_class_idx, maj_class_idx, counts, difference = get_diff_binary_classes(target) print 'Num. of instances for each class before augmentation: ', counts # Augment each instance of the dataset (of the minority class) # until reaching the same number of instances of the majority class new_dataset = dataset new_target = target counter = 0 i = 0 while (i < target.shape[0]): if (target[i] == min_class_idx): feature, label = augment_single_data(dataset[i,:,:,:], target[i], 1) new_dataset = np.concatenate((new_dataset, feature), axis=0) new_target = np.concatenate((new_target, label), axis=0) counter = counter + 1 if (i == target.shape[0]-1) and (counter != difference): i = 0 if counter == difference: break i = i + 1 # Check if train and trainl are balanced min_class_idx, maj_class_idx, counts, difference = get_diff_binary_classes(new_target) print 'Num. of instances for each class after augmentation: ', counts # Return the balanced dataset return new_dataset, new_target #
nmarceloromero/data_augment
augment.py
Python
mit
6,909
[ "Gaussian" ]
4a9ebf4a30b22de7fa2a8cf70eb8bd54654f46d1463da890f67a26c5726d5d9d
#!/usr/bin/env python # # Copyright 2007 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Tool for deploying apps to an app server. Currently, the application only uploads new appversions. To do this, it first walks the directory tree rooted at the path the user specifies, adding all the files it finds to a list. It then uploads the application configuration (app.yaml) to the server using HTTP, followed by uploading each of the files. It then commits the transaction with another request. The bulk of this work is handled by the AppVersionUpload class, which exposes methods to add to the list of files, fetch a list of modified files, upload files, and commit or rollback the transaction. """ import calendar import datetime import errno import getpass import hashlib import logging import mimetypes import optparse import os import random import re import sys import tempfile import time import urllib import urllib2 import google import yaml #from google.appengine.cron import groctimespecification from google.appengine.api import appinfo from google.appengine.api import appinfo_includes from google.appengine.api import backendinfo #from google.appengine.api import croninfo #from google.appengine.api import dosinfo #from google.appengine.api import queueinfo from google.appengine.api import validation from google.appengine.api import yaml_errors from google.appengine.api import yaml_object #from google.appengine.datastore import datastore_index from google.appengine.tools import appengine_rpc #from google.appengine.tools import bulkloader MAX_FILES_TO_CLONE = 100 LIST_DELIMITER = '\n' TUPLE_DELIMITER = '|' BACKENDS_ACTION = 'backends' VERSION_FILE = '../../VERSION' UPDATE_CHECK_TIMEOUT = 3 NAG_FILE = '.appcfg_nag' MAX_LOG_LEVEL = 4 MAX_BATCH_SIZE = 3200000 MAX_BATCH_COUNT = 100 MAX_BATCH_FILE_SIZE = 200000 BATCH_OVERHEAD = 500 verbosity = 1 PREFIXED_BY_ADMIN_CONSOLE_RE = '^(?:admin-console)(.*)' SDK_PRODUCT = 'appcfg_py' DAY = 24*3600 SUNDAY = 6 SUPPORTED_RUNTIMES = ('go', 'python', 'python27', 'php') MB = 1024 * 1024 DEFAULT_RESOURCE_LIMITS = { 'max_file_size': 32 * MB, 'max_blob_size': 32 * MB, 'max_total_file_size': 150 * MB, 'max_file_count': 10000, } def PrintUpdate(msg): """Print a message to stderr. If 'verbosity' is greater than 0, print the message. Args: msg: The string to print. """ if verbosity > 0: print >>sys.stderr, msg def StatusUpdate(msg): """Print a status message to stderr.""" PrintUpdate(msg) def ErrorUpdate(msg): """Print an error message to stderr.""" PrintUpdate(msg) def _PrintErrorAndExit(stream, msg, exit_code=2): """Prints the given error message and exists the program. Args: stream: The stream (e.g. StringIO or file) to write the message to. msg: The error message to display as a string. exit_code: The integer code to pass to sys.exit(). """ stream.write(msg) sys.exit(exit_code) def GetMimeTypeIfStaticFile(config, filename): """Looks up the mime type for 'filename'. Uses the handlers in 'config' to determine if the file should be treated as a static file. Args: config: The app.yaml object to check the filename against. filename: The name of the file. Returns: The mime type string. For example, 'text/plain' or 'image/gif'. None if this is not a static file. """ for handler in config.handlers: handler_type = handler.GetHandlerType() if handler_type in ('static_dir', 'static_files'): if handler_type == 'static_dir': regex = os.path.join(re.escape(handler.GetHandler()), '.*') else: regex = handler.upload if re.match(regex, filename): if handler.mime_type is not None: return handler.mime_type else: guess = mimetypes.guess_type(filename)[0] if guess is None: default = 'application/octet-stream' print >>sys.stderr, ('Could not guess mimetype for %s. Using %s.' % (filename, default)) return default return guess return None def LookupErrorBlob(config, filename): """Looks up the mime type and error_code for 'filename'. Uses the error handlers in 'config' to determine if the file should be treated as an error blob. Args: config: The app.yaml object to check the filename against. filename: The name of the file. Returns: A tuple of (mime_type, error_code), or (None, None) if this is not an error blob. For example, ('text/plain', default) or ('image/gif', timeout) or (None, None). """ if not config.error_handlers: return (None, None) for error_handler in config.error_handlers: if error_handler.file == filename: error_code = error_handler.error_code if not error_code: error_code = 'default' if error_handler.mime_type is not None: return (error_handler.mime_type, error_code) else: guess = mimetypes.guess_type(filename)[0] if guess is None: default = 'application/octet-stream' print >>sys.stderr, ('Could not guess mimetype for %s. Using %s.' % (filename, default)) return (default, error_code) return (guess, error_code) return (None, None) def BuildClonePostBody(file_tuples): """Build the post body for the /api/clone{files,blobs,errorblobs} urls. Args: file_tuples: A list of tuples. Each tuple should contain the entries appropriate for the endpoint in question. Returns: A string containing the properly delimited tuples. """ file_list = [] for tup in file_tuples: path = tup[0] tup = tup[1:] file_list.append(TUPLE_DELIMITER.join([path] + list(tup))) return LIST_DELIMITER.join(file_list) def GetRemoteResourceLimits(rpcserver): """Get the resource limit as reported by the admin console. Get the resource limits by querying the admin_console/appserver. The actual limits returned depends on the server we are talking to and could be missing values we expect or include extra values. Args: rpcserver: The RPC server to use. Returns: A dictionary. """ try: yaml_data = rpcserver.Send('/api/appversion/getresourcelimits') except urllib2.HTTPError, err: if err.code != 404: raise return {} return yaml.safe_load(yaml_data) def GetResourceLimits(rpcserver): """Gets the resource limits. Gets the resource limits that should be applied to apps. Any values that the server does not know about will have their default value reported (although it is also possible for the server to report values we don't know about). Args: rpcserver: The RPC server to use. Returns: A dictionary. """ resource_limits = DEFAULT_RESOURCE_LIMITS.copy() resource_limits.update(GetRemoteResourceLimits(rpcserver)) return resource_limits class NagFile(validation.Validated): """A validated YAML class to represent the user's nag preferences. Attributes: timestamp: The timestamp of the last nag. opt_in: True if the user wants to check for updates on dev_appserver start. False if not. May be None if we have not asked the user yet. """ ATTRIBUTES = { 'timestamp': validation.TYPE_FLOAT, 'opt_in': validation.Optional(validation.TYPE_BOOL), } @staticmethod def Load(nag_file): """Load a single NagFile object where one and only one is expected. Args: nag_file: A file-like object or string containing the yaml data to parse. Returns: A NagFile instance. """ return yaml_object.BuildSingleObject(NagFile, nag_file) def GetVersionObject(isfile=os.path.isfile, open_fn=open): """Gets the version of the SDK by parsing the VERSION file. Args: isfile: used for testing. open_fn: Used for testing. Returns: A Yaml object or None if the VERSION file does not exist. """ return None version_filename = os.path.join(os.path.dirname(google.appengine.__file__), VERSION_FILE) if not isfile(version_filename): logging.error('Could not find version file at %s', version_filename) return None version_fh = open_fn(version_filename, 'r') try: version = yaml.safe_load(version_fh) finally: version_fh.close() return version def RetryWithBackoff(callable_func, retry_notify_func, initial_delay=1, backoff_factor=2, max_delay=60, max_tries=20): """Calls a function multiple times, backing off more and more each time. Args: callable_func: A function that performs some operation that should be retried a number of times up on failure. Signature: () -> (done, value) If 'done' is True, we'll immediately return (True, value) If 'done' is False, we'll delay a bit and try again, unless we've hit the 'max_tries' limit, in which case we'll return (False, value). retry_notify_func: This function will be called immediately before the next retry delay. Signature: (value, delay) -> None 'value' is the value returned by the last call to 'callable_func' 'delay' is the retry delay, in seconds initial_delay: Initial delay after first try, in seconds. backoff_factor: Delay will be multiplied by this factor after each try. max_delay: Maximum delay, in seconds. max_tries: Maximum number of tries (the first one counts). Returns: What the last call to 'callable_func' returned, which is of the form (done, value). If 'done' is True, you know 'callable_func' returned True before we ran out of retries. If 'done' is False, you know 'callable_func' kept returning False and we ran out of retries. Raises: Whatever the function raises--an exception will immediately stop retries. """ delay = initial_delay num_tries = 0 while True: done, opaque_value = callable_func() num_tries += 1 if done: return True, opaque_value if num_tries >= max_tries: return False, opaque_value retry_notify_func(opaque_value, delay) time.sleep(delay) delay = min(delay * backoff_factor, max_delay) def _VersionList(release): """Parse a version string into a list of ints. Args: release: The 'release' version, e.g. '1.2.4'. (Due to YAML parsing this may also be an int or float.) Returns: A list of ints corresponding to the parts of the version string between periods. Example: '1.2.4' -> [1, 2, 4] '1.2.3.4' -> [1, 2, 3, 4] Raises: ValueError if not all the parts are valid integers. """ return [int(part) for part in str(release).split('.')] class UpdateCheck(object): """Determines if the local SDK is the latest version. Nags the user when there are updates to the SDK. As the SDK becomes more out of date, the language in the nagging gets stronger. We store a little yaml file in the user's home directory so that we nag the user only once a week. The yaml file has the following field: 'timestamp': Last time we nagged the user in seconds since the epoch. Attributes: rpcserver: An AbstractRpcServer instance used to check for the latest SDK. config: The app's AppInfoExternal. Needed to determine which api_version the app is using. """ def __init__(self, rpcserver, config, isdir=os.path.isdir, isfile=os.path.isfile, open_fn=open): """Create a new UpdateCheck. Args: rpcserver: The AbstractRpcServer to use. config: The yaml object that specifies the configuration of this application. isdir: Replacement for os.path.isdir (for testing). isfile: Replacement for os.path.isfile (for testing). open_fn: Replacement for the open builtin (for testing). """ self.rpcserver = rpcserver self.config = config self.isdir = isdir self.isfile = isfile self.open = open_fn @staticmethod def MakeNagFilename(): """Returns the filename for the nag file for this user.""" user_homedir = os.path.expanduser('~/') if not os.path.isdir(user_homedir): drive, unused_tail = os.path.splitdrive(os.__file__) if drive: os.environ['HOMEDRIVE'] = drive return os.path.expanduser('~/' + NAG_FILE) def _ParseVersionFile(self): """Parse the local VERSION file. Returns: A Yaml object or None if the file does not exist. """ return GetVersionObject(isfile=self.isfile, open_fn=self.open) def CheckSupportedVersion(self): """Determines if the app's api_version is supported by the SDK. Uses the api_version field from the AppInfoExternal to determine if the SDK supports that api_version. Raises: sys.exit if the api_version is not supported. """ version = self._ParseVersionFile() if version is None: logging.error('Could not determine if the SDK supports the api_version ' 'requested in app.yaml.') return if self.config.api_version not in version['api_versions']: logging.critical('The api_version specified in app.yaml (%s) is not ' 'supported by this release of the SDK. The supported ' 'api_versions are %s.', self.config.api_version, version['api_versions']) sys.exit(1) def CheckForUpdates(self): """Queries the server for updates and nags the user if appropriate. Queries the server for the latest SDK version at the same time reporting the local SDK version. The server will respond with a yaml document containing the fields: 'release': The name of the release (e.g. 1.2). 'timestamp': The time the release was created (YYYY-MM-DD HH:MM AM/PM TZ). 'api_versions': A list of api_version strings (e.g. ['1', 'beta']). We will nag the user with increasing severity if: - There is a new release. - There is a new release with a new api_version. - There is a new release that does not support the api_version named in self.config. """ version = self._ParseVersionFile() if version is None: logging.info('Skipping update check') return logging.info('Checking for updates to the SDK.') try: response = self.rpcserver.Send('/api/updatecheck', timeout=UPDATE_CHECK_TIMEOUT, release=version['release'], timestamp=version['timestamp'], api_versions=version['api_versions'], runtime=self.config.runtime) except urllib2.URLError, e: logging.info('Update check failed: %s', e) return latest = yaml.safe_load(response) if version['release'] == latest['release']: logging.info('The SDK is up to date.') return try: this_release = _VersionList(version['release']) except ValueError: logging.warn('Could not parse this release version (%r)', version['release']) else: try: advertised_release = _VersionList(latest['release']) except ValueError: logging.warn('Could not parse advertised release version (%r)', latest['release']) else: if this_release > advertised_release: logging.info('This SDK release is newer than the advertised release.') return api_versions = latest['api_versions'] if self.config.api_version not in api_versions: self._Nag( 'The api version you are using (%s) is obsolete! You should\n' 'upgrade your SDK and test that your code works with the new\n' 'api version.' % self.config.api_version, latest, version, force=True) return if self.config.api_version != api_versions[len(api_versions) - 1]: self._Nag( 'The api version you are using (%s) is deprecated. You should\n' 'upgrade your SDK to try the new functionality.' % self.config.api_version, latest, version) return self._Nag('There is a new release of the SDK available.', latest, version) def _ParseNagFile(self): """Parses the nag file. Returns: A NagFile if the file was present else None. """ nag_filename = UpdateCheck.MakeNagFilename() if self.isfile(nag_filename): fh = self.open(nag_filename, 'r') try: nag = NagFile.Load(fh) finally: fh.close() return nag return None def _WriteNagFile(self, nag): """Writes the NagFile to the user's nag file. If the destination path does not exist, this method will log an error and fail silently. Args: nag: The NagFile to write. """ nagfilename = UpdateCheck.MakeNagFilename() try: fh = self.open(nagfilename, 'w') try: fh.write(nag.ToYAML()) finally: fh.close() except (OSError, IOError), e: logging.error('Could not write nag file to %s. Error: %s', nagfilename, e) def _Nag(self, msg, latest, version, force=False): """Prints a nag message and updates the nag file's timestamp. Because we don't want to nag the user everytime, we store a simple yaml document in the user's home directory. If the timestamp in this doc is over a week old, we'll nag the user. And when we nag the user, we update the timestamp in this doc. Args: msg: The formatted message to print to the user. latest: The yaml document received from the server. version: The local yaml version document. force: If True, always nag the user, ignoring the nag file. """ nag = self._ParseNagFile() if nag and not force: last_nag = datetime.datetime.fromtimestamp(nag.timestamp) if datetime.datetime.now() - last_nag < datetime.timedelta(weeks=1): logging.debug('Skipping nag message') return if nag is None: nag = NagFile() nag.timestamp = time.time() self._WriteNagFile(nag) print '****************************************************************' print msg print '-----------' print 'Latest SDK:' print yaml.dump(latest) print '-----------' print 'Your SDK:' print yaml.dump(version) print '-----------' print 'Please visit http://code.google.com/appengine for the latest SDK' print '****************************************************************' def AllowedToCheckForUpdates(self, input_fn=raw_input): """Determines if the user wants to check for updates. On startup, the dev_appserver wants to check for updates to the SDK. Because this action reports usage to Google when the user is not otherwise communicating with Google (e.g. pushing a new app version), the user must opt in. If the user does not have a nag file, we will query the user and save the response in the nag file. Subsequent calls to this function will re-use that response. Args: input_fn: used to collect user input. This is for testing only. Returns: True if the user wants to check for updates. False otherwise. """ nag = self._ParseNagFile() if nag is None: nag = NagFile() nag.timestamp = time.time() if nag.opt_in is None: answer = input_fn('Allow dev_appserver to check for updates on startup? ' '(Y/n): ') answer = answer.strip().lower() if answer == 'n' or answer == 'no': print ('dev_appserver will not check for updates on startup. To ' 'change this setting, edit %s' % UpdateCheck.MakeNagFilename()) nag.opt_in = False else: print ('dev_appserver will check for updates on startup. To change ' 'this setting, edit %s' % UpdateCheck.MakeNagFilename()) nag.opt_in = True self._WriteNagFile(nag) return nag.opt_in class IndexDefinitionUpload(object): """Provides facilities to upload index definitions to the hosting service.""" def __init__(self, rpcserver, config, definitions): """Creates a new DatastoreIndexUpload. Args: rpcserver: The RPC server to use. Should be an instance of HttpRpcServer or TestRpcServer. config: The AppInfoExternal object derived from the app.yaml file. definitions: An IndexDefinitions object. """ self.rpcserver = rpcserver self.config = config self.definitions = definitions def DoUpload(self): """Uploads the index definitions.""" StatusUpdate('Uploading index definitions.') self.rpcserver.Send('/api/datastore/index/add', app_id=self.config.application, version=self.config.version, payload=self.definitions.ToYAML()) class CronEntryUpload(object): """Provides facilities to upload cron entries to the hosting service.""" def __init__(self, rpcserver, config, cron): """Creates a new CronEntryUpload. Args: rpcserver: The RPC server to use. Should be an instance of a subclass of AbstractRpcServer config: The AppInfoExternal object derived from the app.yaml file. cron: The CronInfoExternal object loaded from the cron.yaml file. """ self.rpcserver = rpcserver self.config = config self.cron = cron def DoUpload(self): """Uploads the cron entries.""" StatusUpdate('Uploading cron entries.') self.rpcserver.Send('/api/cron/update', app_id=self.config.application, version=self.config.version, payload=self.cron.ToYAML()) class QueueEntryUpload(object): """Provides facilities to upload task queue entries to the hosting service.""" def __init__(self, rpcserver, config, queue): """Creates a new QueueEntryUpload. Args: rpcserver: The RPC server to use. Should be an instance of a subclass of AbstractRpcServer config: The AppInfoExternal object derived from the app.yaml file. queue: The QueueInfoExternal object loaded from the queue.yaml file. """ self.rpcserver = rpcserver self.config = config self.queue = queue def DoUpload(self): """Uploads the task queue entries.""" StatusUpdate('Uploading task queue entries.') self.rpcserver.Send('/api/queue/update', app_id=self.config.application, version=self.config.version, payload=self.queue.ToYAML()) class DosEntryUpload(object): """Provides facilities to upload dos entries to the hosting service.""" def __init__(self, rpcserver, config, dos): """Creates a new DosEntryUpload. Args: rpcserver: The RPC server to use. Should be an instance of a subclass of AbstractRpcServer. config: The AppInfoExternal object derived from the app.yaml file. dos: The DosInfoExternal object loaded from the dos.yaml file. """ self.rpcserver = rpcserver self.config = config self.dos = dos def DoUpload(self): """Uploads the dos entries.""" StatusUpdate('Uploading DOS entries.') self.rpcserver.Send('/api/dos/update', app_id=self.config.application, version=self.config.version, payload=self.dos.ToYAML()) class DefaultVersionSet(object): """Provides facilities to set the default (serving) version.""" def __init__(self, rpcserver, config): """Creates a new DefaultVersionSet. Args: rpcserver: The RPC server to use. Should be an instance of a subclass of AbstractRpcServer. config: The AppInfoExternal object derived from the app.yaml file. """ self.rpcserver = rpcserver self.config = config def SetVersion(self): """Sets the default version.""" StatusUpdate('Setting default version to %s.' % (self.config.version,)) self.rpcserver.Send('/api/appversion/setdefault', app_id=self.config.application, version=self.config.version) class IndexOperation(object): """Provide facilities for writing Index operation commands.""" def __init__(self, rpcserver, config): """Creates a new IndexOperation. Args: rpcserver: The RPC server to use. Should be an instance of HttpRpcServer or TestRpcServer. config: appinfo.AppInfoExternal configuration object. """ self.rpcserver = rpcserver self.config = config def DoDiff(self, definitions): """Retrieve diff file from the server. Args: definitions: datastore_index.IndexDefinitions as loaded from users index.yaml file. Returns: A pair of datastore_index.IndexDefinitions objects. The first record is the set of indexes that are present in the index.yaml file but missing from the server. The second record is the set of indexes that are present on the server but missing from the index.yaml file (indicating that these indexes should probably be vacuumed). """ StatusUpdate('Fetching index definitions diff.') response = self.rpcserver.Send('/api/datastore/index/diff', app_id=self.config.application, payload=definitions.ToYAML()) return datastore_index.ParseMultipleIndexDefinitions(response) def DoDelete(self, definitions): """Delete indexes from the server. Args: definitions: Index definitions to delete from datastore. Returns: A single datstore_index.IndexDefinitions containing indexes that were not deleted, probably because they were already removed. This may be normal behavior as there is a potential race condition between fetching the index-diff and sending deletion confirmation through. """ StatusUpdate('Deleting selected index definitions.') response = self.rpcserver.Send('/api/datastore/index/delete', app_id=self.config.application, payload=definitions.ToYAML()) return datastore_index.ParseIndexDefinitions(response) class VacuumIndexesOperation(IndexOperation): """Provide facilities to request the deletion of datastore indexes.""" def __init__(self, rpcserver, config, force, confirmation_fn=raw_input): """Creates a new VacuumIndexesOperation. Args: rpcserver: The RPC server to use. Should be an instance of HttpRpcServer or TestRpcServer. config: appinfo.AppInfoExternal configuration object. force: True to force deletion of indexes, else False. confirmation_fn: Function used for getting input form user. """ super(VacuumIndexesOperation, self).__init__(rpcserver, config) self.force = force self.confirmation_fn = confirmation_fn def GetConfirmation(self, index): """Get confirmation from user to delete an index. This method will enter an input loop until the user provides a response it is expecting. Valid input is one of three responses: y: Confirm deletion of index. n: Do not delete index. a: Delete all indexes without asking for further confirmation. If the user enters nothing at all, the default action is to skip that index and do not delete. If the user selects 'a', as a side effect, the 'force' flag is set. Args: index: Index to confirm. Returns: True if user enters 'y' or 'a'. False if user enter 'n'. """ while True: print 'This index is no longer defined in your index.yaml file.' print print index.ToYAML() print confirmation = self.confirmation_fn( 'Are you sure you want to delete this index? (N/y/a): ') confirmation = confirmation.strip().lower() if confirmation == 'y': return True elif confirmation == 'n' or not confirmation: return False elif confirmation == 'a': self.force = True return True else: print 'Did not understand your response.' def DoVacuum(self, definitions): """Vacuum indexes in datastore. This method will query the server to determine which indexes are not being used according to the user's local index.yaml file. Once it has made this determination, it confirms with the user which unused indexes should be deleted. Once confirmation for each index is receives, it deletes those indexes. Because another user may in theory delete the same indexes at the same time as the user, there is a potential race condition. In this rare cases, some of the indexes previously confirmed for deletion will not be found. The user is notified which indexes these were. Args: definitions: datastore_index.IndexDefinitions as loaded from users index.yaml file. """ unused_new_indexes, notused_indexes = self.DoDiff(definitions) deletions = datastore_index.IndexDefinitions(indexes=[]) if notused_indexes.indexes is not None: for index in notused_indexes.indexes: if self.force or self.GetConfirmation(index): deletions.indexes.append(index) if deletions.indexes: not_deleted = self.DoDelete(deletions) if not_deleted.indexes: not_deleted_count = len(not_deleted.indexes) if not_deleted_count == 1: warning_message = ('An index was not deleted. Most likely this is ' 'because it no longer exists.\n\n') else: warning_message = ('%d indexes were not deleted. Most likely this ' 'is because they no longer exist.\n\n' % not_deleted_count) for index in not_deleted.indexes: warning_message += index.ToYAML() logging.warning(warning_message) class LogsRequester(object): """Provide facilities to export request logs.""" def __init__(self, rpcserver, config, output_file, num_days, append, severity, end, vhost, include_vhost, include_all=None, time_func=time.time): """Constructor. Args: rpcserver: The RPC server to use. Should be an instance of HttpRpcServer or TestRpcServer. config: appinfo.AppInfoExternal configuration object. output_file: Output file name. num_days: Number of days worth of logs to export; 0 for all available. append: True if appending to an existing file. severity: App log severity to request (0-4); None for no app logs. end: date object representing last day of logs to return. vhost: The virtual host of log messages to get. None for all hosts. include_vhost: If true, the virtual host is included in log messages. include_all: If true, we add to the log message everything we know about the request. time_func: Method that return a timestamp representing now (for testing). """ self.rpcserver = rpcserver self.config = config self.output_file = output_file self.append = append self.num_days = num_days self.severity = severity self.vhost = vhost self.include_vhost = include_vhost self.include_all = include_all self.version_id = self.config.version + '.1' self.sentinel = None self.write_mode = 'w' if self.append: self.sentinel = FindSentinel(self.output_file) self.write_mode = 'a' self.skip_until = False now = PacificDate(time_func()) if end < now: self.skip_until = end else: end = now self.valid_dates = None if self.num_days: start = end - datetime.timedelta(self.num_days - 1) self.valid_dates = (start, end) def DownloadLogs(self): """Download the requested logs. This will write the logs to the file designated by self.output_file, or to stdout if the filename is '-'. Multiple roundtrips to the server may be made. """ StatusUpdate('Downloading request logs for %s %s.' % (self.config.application, self.version_id)) tf = tempfile.TemporaryFile() last_offset = None try: while True: try: new_offset = self.RequestLogLines(tf, last_offset) if not new_offset or new_offset == last_offset: break last_offset = new_offset except KeyboardInterrupt: StatusUpdate('Keyboard interrupt; saving data downloaded so far.') break StatusUpdate('Copying request logs to %r.' % self.output_file) if self.output_file == '-': of = sys.stdout else: try: of = open(self.output_file, self.write_mode) except IOError, err: StatusUpdate('Can\'t write %r: %s.' % (self.output_file, err)) sys.exit(1) try: line_count = CopyReversedLines(tf, of) finally: of.flush() if of is not sys.stdout: of.close() finally: tf.close() StatusUpdate('Copied %d records.' % line_count) def RequestLogLines(self, tf, offset): """Make a single roundtrip to the server. Args: tf: Writable binary stream to which the log lines returned by the server are written, stripped of headers, and excluding lines skipped due to self.sentinel or self.valid_dates filtering. offset: Offset string for a continued request; None for the first. Returns: The offset string to be used for the next request, if another request should be issued; or None, if not. """ logging.info('Request with offset %r.', offset) kwds = {'app_id': self.config.application, 'version': self.version_id, 'limit': 1000, } if offset: kwds['offset'] = offset if self.severity is not None: kwds['severity'] = str(self.severity) if self.vhost is not None: kwds['vhost'] = str(self.vhost) if self.include_vhost is not None: kwds['include_vhost'] = str(self.include_vhost) if self.include_all is not None: kwds['include_all'] = str(self.include_all) response = self.rpcserver.Send('/api/request_logs', payload=None, **kwds) response = response.replace('\r', '\0') lines = response.splitlines() logging.info('Received %d bytes, %d records.', len(response), len(lines)) offset = None if lines and lines[0].startswith('#'): match = re.match(r'^#\s*next_offset=(\S+)\s*$', lines[0]) del lines[0] if match: offset = match.group(1) if lines and lines[-1].startswith('#'): del lines[-1] valid_dates = self.valid_dates sentinel = self.sentinel skip_until = self.skip_until len_sentinel = None if sentinel: len_sentinel = len(sentinel) for line in lines: if (sentinel and line.startswith(sentinel) and line[len_sentinel : len_sentinel+1] in ('', '\0')): return None linedate = DateOfLogLine(line) if not linedate: continue if skip_until: if linedate > skip_until: continue else: self.skip_until = skip_until = False if valid_dates and not valid_dates[0] <= linedate <= valid_dates[1]: return None tf.write(line + '\n') if not lines: return None return offset def DateOfLogLine(line): """Returns a date object representing the log line's timestamp. Args: line: a log line string. Returns: A date object representing the timestamp or None if parsing fails. """ m = re.compile(r'[^[]+\[(\d+/[A-Za-z]+/\d+):[^\d]*').match(line) if not m: return None try: return datetime.date(*time.strptime(m.group(1), '%d/%b/%Y')[:3]) except ValueError: return None def PacificDate(now): """For a UTC timestamp, return the date in the US/Pacific timezone. Args: now: A posix timestamp giving current UTC time. Returns: A date object representing what day it is in the US/Pacific timezone. """ return datetime.date(*time.gmtime(PacificTime(now))[:3]) def PacificTime(now): """Helper to return the number of seconds between UTC and Pacific time. This is needed to compute today's date in Pacific time (more specifically: Mountain View local time), which is how request logs are reported. (Google servers always report times in Mountain View local time, regardless of where they are physically located.) This takes (post-2006) US DST into account. Pacific time is either 8 hours or 7 hours west of UTC, depending on whether DST is in effect. Since 2007, US DST starts on the Second Sunday in March March, and ends on the first Sunday in November. (Reference: http://aa.usno.navy.mil/faq/docs/daylight_time.php.) Note that the server doesn't report its local time (the HTTP Date header uses UTC), and the client's local time is irrelevant. Args: now: A posix timestamp giving current UTC time. Returns: A pseudo-posix timestamp giving current Pacific time. Passing this through time.gmtime() will produce a tuple in Pacific local time. """ now -= 8*3600 if IsPacificDST(now): now += 3600 return now def IsPacificDST(now): """Helper for PacificTime to decide whether now is Pacific DST (PDT). Args: now: A pseudo-posix timestamp giving current time in PST. Returns: True if now falls within the range of DST, False otherwise. """ pst = time.gmtime(now) year = pst[0] assert year >= 2007 begin = calendar.timegm((year, 3, 8, 2, 0, 0, 0, 0, 0)) while time.gmtime(begin).tm_wday != SUNDAY: begin += DAY end = calendar.timegm((year, 11, 1, 2, 0, 0, 0, 0, 0)) while time.gmtime(end).tm_wday != SUNDAY: end += DAY return begin <= now < end def CopyReversedLines(instream, outstream, blocksize=2**16): r"""Copy lines from input stream to output stream in reverse order. As a special feature, null bytes in the input are turned into newlines followed by tabs in the output, but these 'sub-lines' separated by null bytes are not reversed. E.g. If the input is 'A\0B\nC\0D\n', the output is 'C\n\tD\nA\n\tB\n'. Args: instream: A seekable stream open for reading in binary mode. outstream: A stream open for writing; doesn't have to be seekable or binary. blocksize: Optional block size for buffering, for unit testing. Returns: The number of lines copied. """ line_count = 0 instream.seek(0, 2) last_block = instream.tell() // blocksize spillover = '' for iblock in xrange(last_block + 1, -1, -1): instream.seek(iblock * blocksize) data = instream.read(blocksize) lines = data.splitlines(True) lines[-1:] = ''.join(lines[-1:] + [spillover]).splitlines(True) if lines and not lines[-1].endswith('\n'): lines[-1] += '\n' lines.reverse() if lines and iblock > 0: spillover = lines.pop() if lines: line_count += len(lines) data = ''.join(lines).replace('\0', '\n\t') outstream.write(data) return line_count def FindSentinel(filename, blocksize=2**16): """Return the sentinel line from the output file. Args: filename: The filename of the output file. (We'll read this file.) blocksize: Optional block size for buffering, for unit testing. Returns: The contents of the last line in the file that doesn't start with a tab, with its trailing newline stripped; or None if the file couldn't be opened or no such line could be found by inspecting the last 'blocksize' bytes of the file. """ if filename == '-': StatusUpdate('Can\'t combine --append with output to stdout.') sys.exit(2) try: fp = open(filename, 'rb') except IOError, err: StatusUpdate('Append mode disabled: can\'t read %r: %s.' % (filename, err)) return None try: fp.seek(0, 2) fp.seek(max(0, fp.tell() - blocksize)) lines = fp.readlines() del lines[:1] sentinel = None for line in lines: if not line.startswith('\t'): sentinel = line if not sentinel: StatusUpdate('Append mode disabled: can\'t find sentinel in %r.' % filename) return None return sentinel.rstrip('\n') finally: fp.close() class UploadBatcher(object): """Helper to batch file uploads.""" def __init__(self, what, rpcserver, params): """Constructor. Args: what: Either 'file' or 'blob' or 'errorblob' indicating what kind of objects this batcher uploads. Used in messages and URLs. rpcserver: The RPC server. params: A dictionary object containing URL params to add to HTTP requests. """ assert what in ('file', 'blob', 'errorblob'), repr(what) self.what = what self.params = params self.rpcserver = rpcserver self.single_url = '/api/appversion/add' + what self.batch_url = self.single_url + 's' self.batching = True self.batch = [] self.batch_size = 0 def SendBatch(self): """Send the current batch on its way. If successful, resets self.batch and self.batch_size. Raises: HTTPError with code=404 if the server doesn't support batching. """ boundary = 'boundary' parts = [] for path, payload, mime_type in self.batch: while boundary in payload: boundary += '%04x' % random.randint(0, 0xffff) assert len(boundary) < 80, 'Unexpected error, please try again.' part = '\n'.join(['', 'X-Appcfg-File: %s' % urllib.quote(path), 'X-Appcfg-Hash: %s' % _Hash(payload), 'Content-Type: %s' % mime_type, 'Content-Length: %d' % len(payload), 'Content-Transfer-Encoding: 8bit', '', payload, ]) parts.append(part) parts.insert(0, 'MIME-Version: 1.0\n' 'Content-Type: multipart/mixed; boundary="%s"\n' '\n' 'This is a message with multiple parts in MIME format.' % boundary) parts.append('--\n') delimiter = '\n--%s' % boundary payload = delimiter.join(parts) logging.info('Uploading batch of %d %ss to %s with boundary="%s".', len(self.batch), self.what, self.batch_url, boundary) self.rpcserver.Send(self.batch_url, payload=payload, content_type='message/rfc822', **self.params) self.batch = [] self.batch_size = 0 def SendSingleFile(self, path, payload, mime_type): """Send a single file on its way.""" logging.info('Uploading %s %s (%s bytes, type=%s) to %s.', self.what, path, len(payload), mime_type, self.single_url) self.rpcserver.Send(self.single_url, payload=payload, content_type=mime_type, path=path, **self.params) def Flush(self): """Flush the current batch. This first attempts to send the batch as a single request; if that fails because the server doesn't support batching, the files are sent one by one, and self.batching is reset to False. At the end, self.batch and self.batch_size are reset. """ if not self.batch: return try: self.SendBatch() except urllib2.HTTPError, err: if err.code != 404: raise logging.info('Old server detected; turning off %s batching.', self.what) self.batching = False for path, payload, mime_type in self.batch: self.SendSingleFile(path, payload, mime_type) self.batch = [] self.batch_size = 0 def AddToBatch(self, path, payload, mime_type): """Batch a file, possibly flushing first, or perhaps upload it directly. Args: path: The name of the file. payload: The contents of the file. mime_type: The MIME Content-type of the file, or None. If mime_type is None, application/octet-stream is substituted. """ if not mime_type: mime_type = 'application/octet-stream' size = len(payload) if size <= MAX_BATCH_FILE_SIZE: if (len(self.batch) >= MAX_BATCH_COUNT or self.batch_size + size > MAX_BATCH_SIZE): self.Flush() if self.batching: logging.info('Adding %s %s (%s bytes, type=%s) to batch.', self.what, path, size, mime_type) self.batch.append((path, payload, mime_type)) self.batch_size += size + BATCH_OVERHEAD return self.SendSingleFile(path, payload, mime_type) def _FormatHash(h): """Return a string representation of a hash. The hash is a sha1 hash. It is computed both for files that need to be pushed to App Engine and for data payloads of requests made to App Engine. Args: h: The hash Returns: The string representation of the hash. """ return '%s_%s_%s_%s_%s' % (h[0:8], h[8:16], h[16:24], h[24:32], h[32:40]) def _Hash(content): """Compute the sha1 hash of the content. Args: content: The data to hash as a string. Returns: The string representation of the hash. """ h = hashlib.sha1(content).hexdigest() return _FormatHash(h) def _HashFromFileHandle(file_handle): """Compute the hash of the content of the file pointed to by file_handle. Args: file_handle: File-like object which provides seek, read and tell. Returns: The string representation of the hash. """ pos = file_handle.tell() content_hash = _Hash(file_handle.read()) file_handle.seek(pos, 0) return content_hash def EnsureDir(path): """Makes sure that a directory exists at the given path. If a directory already exists at that path, nothing is done. Otherwise, try to create a directory at that path with os.makedirs. If that fails, propagate the resulting OSError exception. Args: path: The path that you want to refer to a directory. """ try: os.makedirs(path) except OSError, exc: if not (exc.errno == errno.EEXIST and os.path.isdir(path)): raise def DoDownloadApp(rpcserver, out_dir, app_id, app_version): """Downloads the files associated with a particular app version. Args: rpcserver: The RPC server to use to download. out_dir: The directory the files should be downloaded to. app_id: The app ID of the app whose files we want to download. app_version: The version number we want to download. Can be: - None: We'll download the latest default version. - <major>: We'll download the latest minor version. - <major>/<minor>: We'll download that exact version. """ StatusUpdate('Fetching file list...') url_args = {'app_id': app_id} if app_version is not None: url_args['version_match'] = app_version result = rpcserver.Send('/api/files/list', **url_args) StatusUpdate('Fetching files...') lines = result.splitlines() if len(lines) < 1: logging.error('Invalid response from server: empty') return full_version = lines[0] file_lines = lines[1:] current_file_number = 0 num_files = len(file_lines) num_errors = 0 for line in file_lines: parts = line.split('|', 2) if len(parts) != 3: logging.error('Invalid response from server: expecting ' '"<id>|<size>|<path>", found: "%s"\n', line) return current_file_number += 1 file_id, size_str, path = parts try: size = int(size_str) except ValueError: logging.error('Invalid file list entry from server: invalid size: ' '"%s"', size_str) return StatusUpdate('[%d/%d] %s' % (current_file_number, num_files, path)) def TryGet(): """A request to /api/files/get which works with the RetryWithBackoff.""" try: contents = rpcserver.Send('/api/files/get', app_id=app_id, version=full_version, id=file_id) return True, contents except urllib2.HTTPError, exc: if exc.code == 503: return False, exc else: raise def PrintRetryMessage(_, delay): StatusUpdate('Server busy. Will try again in %d seconds.' % delay) success, contents = RetryWithBackoff(TryGet, PrintRetryMessage) if not success: logging.error('Unable to download file "%s".', path) num_errors += 1 continue if len(contents) != size: logging.error('File "%s": server listed as %d bytes but served ' '%d bytes.', path, size, len(contents)) num_errors += 1 full_path = os.path.join(out_dir, path) if os.path.exists(full_path): logging.error('Unable to create file "%s": path conflicts with ' 'an existing file or directory', path) num_errors += 1 continue full_dir = os.path.dirname(full_path) try: EnsureDir(full_dir) except OSError, exc: logging.error('Couldn\'t create directory "%s": %s', full_dir, exc) num_errors += 1 continue try: out_file = open(full_path, 'wb') except IOError, exc: logging.error('Couldn\'t open file "%s": %s', full_path, exc) num_errors += 1 continue try: try: out_file.write(contents) except IOError, exc: logging.error('Couldn\'t write to file "%s": %s', full_path, exc) num_errors += 1 continue finally: out_file.close() if num_errors > 0: logging.error('Number of errors: %d. See output for details.', num_errors) class AppVersionUpload(object): """Provides facilities to upload a new appversion to the hosting service. Attributes: rpcserver: The AbstractRpcServer to use for the upload. config: The AppInfoExternal object derived from the app.yaml file. app_id: The application string from 'config'. version: The version string from 'config', or an alternate version override. backend: The backend to update, if any. files: A dictionary of files to upload to the rpcserver, mapping path to hash of the file contents. in_transaction: True iff a transaction with the server has started. An AppVersionUpload can do only one transaction at a time. deployed: True iff the Deploy method has been called. started: True iff the StartServing method has been called. """ def __init__(self, rpcserver, config, version=None, backend=None, error_fh=None): """Creates a new AppVersionUpload. Args: rpcserver: The RPC server to use. Should be an instance of HttpRpcServer or TestRpcServer. config: An AppInfoExternal object that specifies the configuration for this application. version: If specified, overrides the app version specified in config. backend: If specified, indicates the update applies to the given backend. The backend name must match an entry in the backends: stanza. error_fh: Unexpected HTTPErrors are printed to this file handle. """ self.rpcserver = rpcserver self.config = config self.app_id = self.config.application self.backend = backend self.error_fh = error_fh or sys.stderr if version: self.version = version else: self.version = self.config.version self.params = {} if self.app_id: self.params['app_id'] = self.app_id if self.backend: self.params['backend'] = self.backend elif self.version: self.params['version'] = self.version self.files = {} self.all_files = set() self.in_transaction = False self.deployed = False self.started = False self.batching = True self.file_batcher = UploadBatcher('file', self.rpcserver, self.params) self.blob_batcher = UploadBatcher('blob', self.rpcserver, self.params) self.errorblob_batcher = UploadBatcher('errorblob', self.rpcserver, self.params) def Send(self, url, payload=''): """Sends a request to the server, with common params.""" logging.info('Send: %s, params=%s', url, self.params) return self.rpcserver.Send(url, payload=payload, **self.params) def AddFile(self, path, file_handle): """Adds the provided file to the list to be pushed to the server. Args: path: The path the file should be uploaded as. file_handle: A stream containing data to upload. """ assert not self.in_transaction, 'Already in a transaction.' assert file_handle is not None reason = appinfo.ValidFilename(path) if reason: logging.error(reason) return content_hash = _HashFromFileHandle(file_handle) self.files[path] = content_hash self.all_files.add(path) def Describe(self): """Returns a string describing the object being updated.""" result = 'app: %s' % self.app_id if self.backend: result += ', backend: %s' % self.backend elif self.version: result += ', version: %s' % self.version return result def Begin(self): """Begins the transaction, returning a list of files that need uploading. All calls to AddFile must be made before calling Begin(). Returns: A list of pathnames for files that should be uploaded using UploadFile() before Commit() can be called. """ assert not self.in_transaction, 'Already in a transaction.' self.Send('/api/appversion/create', payload=self.config.ToYAML()) self.in_transaction = True files_to_clone = [] blobs_to_clone = [] errorblobs = {} for path, content_hash in self.files.iteritems(): match_found = False mime_type = GetMimeTypeIfStaticFile(self.config, path) if mime_type is not None: blobs_to_clone.append((path, content_hash, mime_type)) match_found = True (mime_type, unused_error_code) = LookupErrorBlob(self.config, path) if mime_type is not None: errorblobs[path] = content_hash match_found = True if not match_found: files_to_clone.append((path, content_hash)) files_to_upload = {} def CloneFiles(url, files, file_type): """Sends files to the given url. Args: url: the server URL to use. files: a list of files file_type: the type of the files """ if not files: return StatusUpdate('Cloning %d %s file%s.' % (len(files), file_type, len(files) != 1 and 's' or '')) for i in xrange(0, len(files), MAX_FILES_TO_CLONE): if i > 0 and i % MAX_FILES_TO_CLONE == 0: StatusUpdate('Cloned %d files.' % i) chunk = files[i:min(len(files), i + MAX_FILES_TO_CLONE)] result = self.Send(url, payload=BuildClonePostBody(chunk)) if result: files_to_upload.update(dict( (f, self.files[f]) for f in result.split(LIST_DELIMITER))) CloneFiles('/api/appversion/cloneblobs', blobs_to_clone, 'static') CloneFiles('/api/appversion/clonefiles', files_to_clone, 'application') logging.debug('Files to upload: %s', files_to_upload) for (path, content_hash) in errorblobs.iteritems(): files_to_upload[path] = content_hash self.files = files_to_upload return sorted(files_to_upload.iterkeys()) def UploadFile(self, path, file_handle): """Uploads a file to the hosting service. Must only be called after Begin(). The path provided must be one of those that were returned by Begin(). Args: path: The path the file is being uploaded as. file_handle: A file-like object containing the data to upload. Raises: KeyError: The provided file is not amongst those to be uploaded. """ assert self.in_transaction, 'Begin() must be called before UploadFile().' if path not in self.files: raise KeyError('File \'%s\' is not in the list of files to be uploaded.' % path) del self.files[path] match_found = False mime_type = GetMimeTypeIfStaticFile(self.config, path) payload = file_handle.read() if mime_type is not None: self.blob_batcher.AddToBatch(path, payload, mime_type) match_found = True (mime_type, error_code) = LookupErrorBlob(self.config, path) if mime_type is not None: self.errorblob_batcher.AddToBatch(error_code, payload, mime_type) match_found = True if not match_found: self.file_batcher.AddToBatch(path, payload, None) def Precompile(self): """Handle bytecode precompilation.""" StatusUpdate('Compilation starting.') files = [] if self.config.runtime == 'go': for f in self.all_files: if f.endswith('.go') and not self.config.nobuild_files.match(f): files.append(f) while True: if files: StatusUpdate('Compilation: %d files left.' % len(files)) files = self.PrecompileBatch(files) if not files: break StatusUpdate('Compilation completed.') def PrecompileBatch(self, files): """Precompile a batch of files. Args: files: Either an empty list (for the initial request) or a list of files to be precompiled. Returns: Either an empty list (if no more files need to be precompiled) or a list of files to be precompiled subsequently. """ payload = LIST_DELIMITER.join(files) response = self.Send('/api/appversion/precompile', payload=payload) if not response: return [] return response.split(LIST_DELIMITER) def Commit(self): """Commits the transaction, making the new app version available. All the files returned by Begin() must have been uploaded with UploadFile() before Commit() can be called. This tries the new 'deploy' method; if that fails it uses the old 'commit'. Returns: An appinfo.AppInfoSummary if one was returned from the Deploy, None otherwise. Raises: Exception: Some required files were not uploaded. """ assert self.in_transaction, 'Begin() must be called before Commit().' if self.files: raise Exception('Not all required files have been uploaded.') def PrintRetryMessage(_, delay): StatusUpdate('Will check again in %s seconds.' % delay) app_summary = None try: app_summary = self.Deploy() success, unused_contents = RetryWithBackoff( lambda: (self.IsReady(), None), PrintRetryMessage, 1, 2, 60, 20) if not success: logging.warning('Version still not ready to serve, aborting.') raise Exception('Version not ready.') result = self.StartServing() if not result: self.in_transaction = False else: success, unused_contents = RetryWithBackoff( lambda: (self.IsServing(), None), PrintRetryMessage, 1, 1, 1, 60) if not success: logging.warning('Version still not serving, aborting.') raise Exception('Version not ready.') self.in_transaction = False except urllib2.HTTPError, e: if e.code != 404: raise StatusUpdate('Closing update.') self.Send('/api/appversion/commit') self.in_transaction = False return app_summary def Deploy(self): """Deploys the new app version but does not make it default. All the files returned by Begin() must have been uploaded with UploadFile() before Deploy() can be called. Returns: An appinfo.AppInfoSummary if one was returned from the Deploy, None otherwise. Raises: Exception: Some required files were not uploaded. """ assert self.in_transaction, 'Begin() must be called before Deploy().' if self.files: raise Exception('Not all required files have been uploaded.') StatusUpdate('Starting deployment.') result = self.Send('/api/appversion/deploy') self.deployed = True if result: return yaml_object.BuildSingleObject(appinfo.AppInfoSummary, result) else: return None def IsReady(self): """Check if the new app version is ready to serve traffic. Raises: Exception: Deploy has not yet been called. Returns: True if the server returned the app is ready to serve. """ assert self.deployed, 'Deploy() must be called before IsReady().' StatusUpdate('Checking if deployment succeeded.') result = self.Send('/api/appversion/isready') return result == '1' def StartServing(self): """Start serving with the newly created version. Raises: Exception: Deploy has not yet been called. Returns: The response body, as a string. """ assert self.deployed, 'Deploy() must be called before StartServing().' StatusUpdate('Deployment successful.') self.params['willcheckserving'] = '1' result = self.Send('/api/appversion/startserving') del self.params['willcheckserving'] self.started = True return result def IsServing(self): """Check if the new app version is serving. Raises: Exception: Deploy has not yet been called. Returns: True if the deployed app version is serving. """ assert self.started, 'StartServing() must be called before IsServing().' StatusUpdate('Checking if updated app version is serving.') result = self.Send('/api/appversion/isserving') return result == '1' def Rollback(self): """Rolls back the transaction if one is in progress.""" if not self.in_transaction: return StatusUpdate('Rolling back the update.') self.Send('/api/appversion/rollback') self.in_transaction = False self.files = {} def DoUpload(self, paths, max_size, openfunc): """Uploads a new appversion with the given config and files to the server. Args: paths: An iterator that yields the relative paths of the files to upload. max_size: The maximum size file to upload. openfunc: A function that takes a path and returns a file-like object. Returns: An appinfo.AppInfoSummary if one was returned from the server, None otherwise. """ logging.info('Reading app configuration.') StatusUpdate('\nStarting update of %s' % self.Describe()) path = '' try: StatusUpdate('Scanning files on local disk.') num_files = 0 for path in paths: file_handle = openfunc(path) try: file_length = GetFileLength(file_handle) if file_length > max_size: logging.error('Ignoring file \'%s\': Too long ' '(max %d bytes, file is %d bytes)', path, max_size, file_length) else: logging.info('Processing file \'%s\'', path) self.AddFile(path, file_handle) finally: file_handle.close() num_files += 1 if num_files % 500 == 0: StatusUpdate('Scanned %d files.' % num_files) except KeyboardInterrupt: logging.info('User interrupted. Aborting.') raise except EnvironmentError, e: logging.error('An error occurred processing file \'%s\': %s. Aborting.', path, e) raise app_summary = None try: missing_files = self.Begin() if missing_files: StatusUpdate('Uploading %d files and blobs.' % len(missing_files)) num_files = 0 for missing_file in missing_files: file_handle = openfunc(missing_file) try: self.UploadFile(missing_file, file_handle) finally: file_handle.close() num_files += 1 if num_files % 500 == 0: StatusUpdate('Processed %d out of %s.' % (num_files, len(missing_files))) self.file_batcher.Flush() self.blob_batcher.Flush() self.errorblob_batcher.Flush() StatusUpdate('Uploaded %d files and blobs' % num_files) if (self.config.derived_file_type and appinfo.PYTHON_PRECOMPILED in self.config.derived_file_type): try: self.Precompile() except urllib2.HTTPError, e: ErrorUpdate('Error %d: --- begin server output ---\n' '%s\n--- end server output ---' % (e.code, e.read().rstrip('\n'))) if e.code == 422 or self.config.runtime == 'go': raise print >>self.error_fh, ( 'Precompilation failed. Your app can still serve but may ' 'have reduced startup performance. You can retry the update ' 'later to retry the precompilation step.') app_summary = self.Commit() StatusUpdate('Completed update of %s' % self.Describe()) except KeyboardInterrupt: logging.info('User interrupted. Aborting.') self.Rollback() raise except urllib2.HTTPError, err: logging.info('HTTP Error (%s)', err) self.Rollback() raise except: logging.exception('An unexpected error occurred. Aborting.') self.Rollback() raise logging.info('Done!') return app_summary def FileIterator(base, skip_files, runtime, separator=os.path.sep): """Walks a directory tree, returning all the files. Follows symlinks. Args: base: The base path to search for files under. skip_files: A regular expression object for files/directories to skip. separator: Path separator used by the running system's platform. runtime: The name of the runtime e.g. "python". If "python27" then .pyc files with matching .py files will be skipped. Yields: Paths of files found, relative to base. """ dirs = [''] while dirs: current_dir = dirs.pop() entries = set(os.listdir(os.path.join(base, current_dir))) for entry in sorted(entries): name = os.path.join(current_dir, entry) fullname = os.path.join(base, name) if separator == '\\': name = name.replace('\\', '/') if runtime == 'python27' and not skip_files.match(name): root, extension = os.path.splitext(entry) if extension == '.pyc' and (root + '.py') in entries: logging.warning('Ignoring file \'%s\': Cannot upload both ' '<filename>.py and <filename>.pyc', name) continue if os.path.isfile(fullname): if skip_files.match(name): logging.info('Ignoring file \'%s\': File matches ignore regex.', name) else: yield name elif os.path.isdir(fullname): if skip_files.match(name): logging.info( 'Ignoring directory \'%s\': Directory matches ignore regex.', name) else: dirs.append(name) def GetFileLength(fh): """Returns the length of the file represented by fh. This function is capable of finding the length of any seekable stream, unlike os.fstat, which only works on file streams. Args: fh: The stream to get the length of. Returns: The length of the stream. """ pos = fh.tell() fh.seek(0, 2) length = fh.tell() fh.seek(pos, 0) return length def GetUserAgent(get_version=GetVersionObject, get_platform=appengine_rpc.GetPlatformToken): """Determines the value of the 'User-agent' header to use for HTTP requests. If the 'APPCFG_SDK_NAME' environment variable is present, that will be used as the first product token in the user-agent. Args: get_version: Used for testing. get_platform: Used for testing. Returns: String containing the 'user-agent' header value, which includes the SDK version, the platform information, and the version of Python; e.g., 'appcfg_py/1.0.1 Darwin/9.2.0 Python/2.5.2'. """ product_tokens = [] sdk_name = os.environ.get('APPCFG_SDK_NAME') if sdk_name: product_tokens.append(sdk_name) else: version = get_version() if version is None: release = 'unknown' else: release = version['release'] product_tokens.append('%s/%s' % (SDK_PRODUCT, release)) product_tokens.append(get_platform()) python_version = '.'.join(str(i) for i in sys.version_info) product_tokens.append('Python/%s' % python_version) return ' '.join(product_tokens) def GetSourceName(get_version=GetVersionObject): """Gets the name of this source version.""" version = get_version() if version is None: release = 'unknown' else: release = version['release'] return 'Google-appcfg-%s' % (release,) class AppCfgApp(object): """Singleton class to wrap AppCfg tool functionality. This class is responsible for parsing the command line and executing the desired action on behalf of the user. Processing files and communicating with the server is handled by other classes. Attributes: actions: A dictionary mapping action names to Action objects. action: The Action specified on the command line. parser: An instance of optparse.OptionParser. options: The command line options parsed by 'parser'. argv: The original command line as a list. args: The positional command line args left over after parsing the options. raw_input_fn: Function used for getting raw user input, like email. password_input_fn: Function used for getting user password. error_fh: Unexpected HTTPErrors are printed to this file handle. Attributes for testing: parser_class: The class to use for parsing the command line. Because OptionsParser will exit the program when there is a parse failure, it is nice to subclass OptionsParser and catch the error before exiting. """ def __init__(self, argv, parser_class=optparse.OptionParser, rpc_server_class=appengine_rpc.HttpRpcServer, raw_input_fn=raw_input, password_input_fn=getpass.getpass, out_fh=sys.stdout, error_fh=sys.stderr, update_check_class=UpdateCheck, throttle_class=None, opener=open, file_iterator=FileIterator, time_func=time.time, wrap_server_error_message=True): """Initializer. Parses the cmdline and selects the Action to use. Initializes all of the attributes described in the class docstring. Prints help or error messages if there is an error parsing the cmdline. Args: argv: The list of arguments passed to this program. parser_class: Options parser to use for this application. rpc_server_class: RPC server class to use for this application. raw_input_fn: Function used for getting user email. password_input_fn: Function used for getting user password. out_fh: All normal output is printed to this file handle. error_fh: Unexpected HTTPErrors are printed to this file handle. update_check_class: UpdateCheck class (can be replaced for testing). throttle_class: A class to use instead of ThrottledHttpRpcServer (only used in the bulkloader). opener: Function used for opening files. file_iterator: Callable that takes (basepath, skip_files, file_separator) and returns a generator that yields all filenames in the file tree rooted at that path, skipping files that match the skip_files compiled regular expression. time_func: Function which provides the current time (can be replaced for testing). wrap_server_error_message: If true, the error messages from urllib2.HTTPError exceptions in Run() are wrapped with '--- begin server output ---' and '--- end server output ---', otherwise the error message is printed as is. """ self.parser_class = parser_class self.argv = argv self.rpc_server_class = rpc_server_class self.raw_input_fn = raw_input_fn self.password_input_fn = password_input_fn self.out_fh = out_fh self.error_fh = error_fh self.update_check_class = update_check_class self.throttle_class = throttle_class self.time_func = time_func self.wrap_server_error_message = wrap_server_error_message self.parser = self._GetOptionParser() for action in self.actions.itervalues(): action.options(self, self.parser) self.options, self.args = self.parser.parse_args(argv[1:]) if len(self.args) < 1: self._PrintHelpAndExit() if not self.options.allow_any_runtime: if self.options.runtime: if self.options.runtime not in SUPPORTED_RUNTIMES: _PrintErrorAndExit(self.error_fh, '"%s" is not a supported runtime\n' % self.options.runtime) else: appinfo.AppInfoExternal.ATTRIBUTES[appinfo.RUNTIME] = ( '|'.join(SUPPORTED_RUNTIMES)) action = self.args.pop(0) def RaiseParseError(actionname, action): self.parser, self.options = self._MakeSpecificParser(action) error_desc = action.error_desc if not error_desc: error_desc = "Expected a <directory> argument after '%s'." % ( actionname.split(' ')[0]) self.parser.error(error_desc) if action == BACKENDS_ACTION: if len(self.args) < 1: RaiseParseError(action, self.actions[BACKENDS_ACTION]) backend_action_first = BACKENDS_ACTION + ' ' + self.args[0] if backend_action_first in self.actions: self.args.pop(0) action = backend_action_first elif len(self.args) > 1: backend_directory_first = BACKENDS_ACTION + ' ' + self.args[1] if backend_directory_first in self.actions: self.args.pop(1) action = backend_directory_first if len(self.args) < 1 or action == BACKENDS_ACTION: RaiseParseError(action, self.actions[action]) if action not in self.actions: self.parser.error("Unknown action: '%s'\n%s" % (action, self.parser.get_description())) self.action = self.actions[action] if not self.action.uses_basepath or self.options.help: self.basepath = None else: if not self.args: RaiseParseError(action, self.action) self.basepath = self.args.pop(0) self.parser, self.options = self._MakeSpecificParser(self.action) if self.options.help: self._PrintHelpAndExit() if self.options.verbose == 2: logging.getLogger().setLevel(logging.INFO) elif self.options.verbose == 3: logging.getLogger().setLevel(logging.DEBUG) global verbosity verbosity = self.options.verbose self.opener = opener self.file_iterator = file_iterator def Run(self): """Executes the requested action. Catches any HTTPErrors raised by the action and prints them to stderr. Returns: 1 on error, 0 if successful. """ try: self.action(self) except urllib2.HTTPError, e: body = e.read() if self.wrap_server_error_message: error_format = ('Error %d: --- begin server output ---\n' '%s\n--- end server output ---') else: error_format = 'Error %d: %s' print >>self.error_fh, (error_format % (e.code, body.rstrip('\n'))) return 1 except yaml_errors.EventListenerError, e: print >>self.error_fh, ('Error parsing yaml file:\n%s' % e) return 1 return 0 def _GetActionDescriptions(self): """Returns a formatted string containing the short_descs for all actions.""" action_names = self.actions.keys() action_names.sort() desc = '' for action_name in action_names: desc += ' %s: %s\n' % (action_name, self.actions[action_name].short_desc) return desc def _GetOptionParser(self): """Creates an OptionParser with generic usage and description strings. Returns: An OptionParser instance. """ class Formatter(optparse.IndentedHelpFormatter): """Custom help formatter that does not reformat the description.""" def format_description(self, description): """Very simple formatter.""" return description + '\n' desc = self._GetActionDescriptions() desc = ('Action must be one of:\n%s' 'Use \'help <action>\' for a detailed description.') % desc parser = self.parser_class(usage='%prog [options] <action>', description=desc, formatter=Formatter(), conflict_handler='resolve') parser.add_option('-h', '--help', action='store_true', dest='help', help='Show the help message and exit.') parser.add_option('-q', '--quiet', action='store_const', const=0, dest='verbose', help='Print errors only.') parser.add_option('-v', '--verbose', action='store_const', const=2, dest='verbose', default=1, help='Print info level logs.') parser.add_option('--noisy', action='store_const', const=3, dest='verbose', help='Print all logs.') parser.add_option('-s', '--server', action='store', dest='server', default='appengine.google.com', metavar='SERVER', help='The App Engine server.') parser.add_option('--secure', action='store_true', dest='secure', default=True, help=optparse.SUPPRESS_HELP) parser.add_option('--insecure', action='store_false', dest='secure', help='Use HTTP when communicating with the server.') parser.add_option('-e', '--email', action='store', dest='email', metavar='EMAIL', default=None, help='The username to use. Will prompt if omitted.') parser.add_option('-H', '--host', action='store', dest='host', metavar='HOST', default=None, help='Overrides the Host header sent with all RPCs.') parser.add_option('--no_cookies', action='store_false', dest='save_cookies', default=True, help='Do not save authentication cookies to local disk.') parser.add_option('--skip_sdk_update_check', action='store_true', dest='skip_sdk_update_check', default=False, help='Do not check for SDK updates.') parser.add_option('--passin', action='store_true', dest='passin', default=False, help='Read the login password from stdin.') parser.add_option('-A', '--application', action='store', dest='app_id', help='Override application from app.yaml file.') parser.add_option('-V', '--version', action='store', dest='version', help='Override (major) version from app.yaml file.') parser.add_option('-r', '--runtime', action='store', dest='runtime', help='Override runtime from app.yaml file.') parser.add_option('-R', '--allow_any_runtime', action='store_true', dest='allow_any_runtime', default=False, help='Do not validate the runtime in app.yaml') return parser def _MakeSpecificParser(self, action): """Creates a new parser with documentation specific to 'action'. Args: action: An Action instance to be used when initializing the new parser. Returns: A tuple containing: parser: An instance of OptionsParser customized to 'action'. options: The command line options after re-parsing. """ parser = self._GetOptionParser() parser.set_usage(action.usage) parser.set_description('%s\n%s' % (action.short_desc, action.long_desc)) action.options(self, parser) options, unused_args = parser.parse_args(self.argv[1:]) return parser, options def _PrintHelpAndExit(self, exit_code=2): """Prints the parser's help message and exits the program. Args: exit_code: The integer code to pass to sys.exit(). """ self.parser.print_help() sys.exit(exit_code) def _GetRpcServer(self): """Returns an instance of an AbstractRpcServer. Returns: A new AbstractRpcServer, on which RPC calls can be made. """ def GetUserCredentials(): """Prompts the user for a username and password.""" email = self.options.email if email is None: email = self.raw_input_fn('Email: ') password_prompt = 'Password for %s: ' % email if self.options.passin: password = self.raw_input_fn(password_prompt) else: password = self.password_input_fn(password_prompt) return (email, password) StatusUpdate('Host: %s' % self.options.server) if self.options.host and self.options.host == 'localhost': email = self.options.email if email is None: email = 'test@example.com' logging.info('Using debug user %s. Override with --email', email) rpcserver = self.rpc_server_class( self.options.server, lambda: (email, 'password'), GetUserAgent(), GetSourceName(), host_override=self.options.host, save_cookies=self.options.save_cookies, secure=True) rpcserver.authenticated = True return rpcserver if self.options.passin: auth_tries = 1 else: auth_tries = 3 return self.rpc_server_class(self.options.server, GetUserCredentials, GetUserAgent(), GetSourceName(), host_override=self.options.host, save_cookies=self.options.save_cookies, auth_tries=auth_tries, account_type='HOSTED_OR_GOOGLE', secure=self.options.secure) def _FindYaml(self, basepath, file_name): """Find yaml files in application directory. Args: basepath: Base application directory. file_name: Filename without extension to search for. Returns: Path to located yaml file if one exists, else None. """ if not os.path.isdir(basepath): self.parser.error('Not a directory: %s' % basepath) alt_basepath = os.path.join(basepath, "WEB-INF", "appengine-generated") for yaml_basepath in (basepath, alt_basepath): for yaml_file in (file_name + '.yaml', file_name + '.yml'): yaml_path = os.path.join(yaml_basepath, yaml_file) if os.path.isfile(yaml_path): return yaml_path return None def _ParseAppYaml(self, basepath, includes=True): """Parses the app.yaml file. Args: basepath: the directory of the application. includes: if True builtins and includes will be parsed. Returns: An AppInfoExternal object. """ appyaml_filename = self._FindYaml(basepath, 'app') if appyaml_filename is None: self.parser.error('Directory does not contain an app.yaml ' 'configuration file.') fh = self.opener(appyaml_filename, 'r') try: if includes: appyaml = appinfo_includes.Parse(fh, self.opener) else: appyaml = appinfo.LoadSingleAppInfo(fh) finally: fh.close() orig_application = appyaml.application orig_version = appyaml.version if self.options.app_id: appyaml.application = self.options.app_id if self.options.version: appyaml.version = self.options.version if self.options.runtime: appyaml.runtime = self.options.runtime msg = 'Application: %s' % appyaml.application if appyaml.application != orig_application: msg += ' (was: %s)' % orig_application if self.action.function is 'Update': msg += '; version: %s' % appyaml.version if appyaml.version != orig_version: msg += ' (was: %s)' % orig_version StatusUpdate(msg) return appyaml def _ParseYamlFile(self, basepath, basename, parser): """Parses the a yaml file. Args: basepath: the directory of the application. basename: the base name of the file (with the '.yaml' stripped off). parser: the function or method used to parse the file. Returns: A single parsed yaml file or None if the file does not exist. """ file_name = self._FindYaml(basepath, basename) if file_name is not None: fh = self.opener(file_name, 'r') try: defns = parser(fh) finally: fh.close() return defns return None def _ParseBackendsYaml(self, basepath): """Parses the backends.yaml file. Args: basepath: the directory of the application. Returns: A BackendsInfoExternal object or None if the file does not exist. """ return self._ParseYamlFile(basepath, 'backends', backendinfo.LoadBackendInfo) def _ParseIndexYaml(self, basepath): """Parses the index.yaml file. Args: basepath: the directory of the application. Returns: A single parsed yaml file or None if the file does not exist. """ return None return self._ParseYamlFile(basepath, 'index', datastore_index.ParseIndexDefinitions) def _ParseCronYaml(self, basepath): """Parses the cron.yaml file. Args: basepath: the directory of the application. Returns: A CronInfoExternal object or None if the file does not exist. """ return None return self._ParseYamlFile(basepath, 'cron', croninfo.LoadSingleCron) def _ParseQueueYaml(self, basepath): """Parses the queue.yaml file. Args: basepath: the directory of the application. Returns: A CronInfoExternal object or None if the file does not exist. """ return None return self._ParseYamlFile(basepath, 'queue', queueinfo.LoadSingleQueue) def _ParseDosYaml(self, basepath): """Parses the dos.yaml file. Args: basepath: the directory of the application. Returns: A DosInfoExternal object or None if the file does not exist. """ return None return self._ParseYamlFile(basepath, 'dos', dosinfo.LoadSingleDos) def Help(self, action=None): """Prints help for a specific action. Args: action: If provided, print help for the action provided. Expects self.args[0], or 'action', to contain the name of the action in question. Exits the program after printing the help message. """ if not action: if len(self.args) > 1: self.args = [' '.join(self.args)] if len(self.args) != 1 or self.args[0] not in self.actions: self.parser.error('Expected a single action argument. ' ' Must be one of:\n' + self._GetActionDescriptions()) action = self.args[0] action = self.actions[action] self.parser, unused_options = self._MakeSpecificParser(action) self._PrintHelpAndExit(exit_code=0) def DownloadApp(self): """Downloads the given app+version.""" if len(self.args) != 1: self.parser.error('\"download_app\" expects one non-option argument, ' 'found ' + str(len(self.args)) + '.') out_dir = self.args[0] app_id = self.options.app_id if app_id is None: self.parser.error('You must specify an app ID via -A or --application.') app_version = self.options.version if os.path.exists(out_dir): if not os.path.isdir(out_dir): self.parser.error('Cannot download to path "%s": ' 'there\'s a file in the way.' % out_dir) elif os.listdir(out_dir): self.parser.error('Cannot download to path "%s": directory already ' 'exists and it isn\'t empty.' % out_dir) rpcserver = self._GetRpcServer() DoDownloadApp(rpcserver, out_dir, app_id, app_version) def UpdateVersion(self, rpcserver, basepath, appyaml, backend=None): """Updates and deploys a new appversion. Args: rpcserver: An AbstractRpcServer instance on which RPC calls can be made. basepath: The root directory of the version to update. appyaml: The AppInfoExternal object parsed from app.yaml backend: The name of the backend to update, if any. Returns: An appinfo.AppInfoSummary if one was returned from the Deploy, None otherwise. """ if self.options.precompilation: if not appyaml.derived_file_type: appyaml.derived_file_type = [] if appinfo.PYTHON_PRECOMPILED not in appyaml.derived_file_type: appyaml.derived_file_type.append(appinfo.PYTHON_PRECOMPILED) if self.options.skip_sdk_update_check: logging.info('Skipping update check') else: updatecheck = self.update_check_class(rpcserver, appyaml) updatecheck.CheckForUpdates() appversion = AppVersionUpload(rpcserver, appyaml, self.options.version, backend, self.error_fh) return appversion.DoUpload( self.file_iterator(basepath, appyaml.skip_files, appyaml.runtime), self.options.max_size, lambda path: self.opener(os.path.join(basepath, path), 'rb')) def Update(self): """Updates and deploys a new appversion and global app configs.""" if self.args: self.parser.error('Expected a single <directory> argument.') appyaml = self._ParseAppYaml(self.basepath, includes=True) rpcserver = self._GetRpcServer() self.UpdateVersion(rpcserver, self.basepath, appyaml) if self.options.backends: self.BackendsUpdate() index_defs = self._ParseIndexYaml(self.basepath) if index_defs: index_upload = IndexDefinitionUpload(rpcserver, appyaml, index_defs) try: index_upload.DoUpload() except urllib2.HTTPError, e: ErrorUpdate('Error %d: --- begin server output ---\n' '%s\n--- end server output ---' % (e.code, e.read().rstrip('\n'))) print >> self.error_fh, ( 'Your app was updated, but there was an error updating your ' 'indexes. Please retry later with appcfg.py update_indexes.') cron_yaml = self._ParseCronYaml(self.basepath) if cron_yaml: cron_upload = CronEntryUpload(rpcserver, appyaml, cron_yaml) cron_upload.DoUpload() queue_yaml = self._ParseQueueYaml(self.basepath) if queue_yaml: queue_upload = QueueEntryUpload(rpcserver, appyaml, queue_yaml) queue_upload.DoUpload() dos_yaml = self._ParseDosYaml(self.basepath) if dos_yaml: dos_upload = DosEntryUpload(rpcserver, appyaml, dos_yaml) dos_upload.DoUpload() def _UpdateOptions(self, parser): """Adds update-specific options to 'parser'. Args: parser: An instance of OptionsParser. """ parser.add_option('-S', '--max_size', type='int', dest='max_size', default=32000000, metavar='SIZE', help='Maximum size of a file to upload.') parser.add_option('--no_precompilation', action='store_false', dest='precompilation', default=True, help='Disable automatic Python precompilation.') parser.add_option('--backends', action='store_true', dest='backends', default=False, help='Update backends when performing appcfg update.') def VacuumIndexes(self): """Deletes unused indexes.""" if self.args: self.parser.error('Expected a single <directory> argument.') appyaml = self._ParseAppYaml(self.basepath) index_defs = self._ParseIndexYaml(self.basepath) if index_defs is None: index_defs = datastore_index.IndexDefinitions() rpcserver = self._GetRpcServer() vacuum = VacuumIndexesOperation(rpcserver, appyaml, self.options.force_delete) vacuum.DoVacuum(index_defs) def _VacuumIndexesOptions(self, parser): """Adds vacuum_indexes-specific options to 'parser'. Args: parser: An instance of OptionsParser. """ parser.add_option('-f', '--force', action='store_true', dest='force_delete', default=False, help='Force deletion without being prompted.') def UpdateCron(self): """Updates any new or changed cron definitions.""" if self.args: self.parser.error('Expected a single <directory> argument.') appyaml = self._ParseAppYaml(self.basepath) rpcserver = self._GetRpcServer() cron_yaml = self._ParseCronYaml(self.basepath) if cron_yaml: cron_upload = CronEntryUpload(rpcserver, appyaml, cron_yaml) cron_upload.DoUpload() def UpdateIndexes(self): """Updates indexes.""" if self.args: self.parser.error('Expected a single <directory> argument.') appyaml = self._ParseAppYaml(self.basepath) rpcserver = self._GetRpcServer() index_defs = self._ParseIndexYaml(self.basepath) if index_defs: index_upload = IndexDefinitionUpload(rpcserver, appyaml, index_defs) index_upload.DoUpload() def UpdateQueues(self): """Updates any new or changed task queue definitions.""" if self.args: self.parser.error('Expected a single <directory> argument.') appyaml = self._ParseAppYaml(self.basepath) rpcserver = self._GetRpcServer() queue_yaml = self._ParseQueueYaml(self.basepath) if queue_yaml: queue_upload = QueueEntryUpload(rpcserver, appyaml, queue_yaml) queue_upload.DoUpload() def UpdateDos(self): """Updates any new or changed dos definitions.""" if self.args: self.parser.error('Expected a single <directory> argument.') appyaml = self._ParseAppYaml(self.basepath) rpcserver = self._GetRpcServer() dos_yaml = self._ParseDosYaml(self.basepath) if dos_yaml: dos_upload = DosEntryUpload(rpcserver, appyaml, dos_yaml) dos_upload.DoUpload() def BackendsAction(self): """Placeholder; we never expect this action to be invoked.""" pass def BackendsYamlCheck(self, appyaml, backend=None): """Check the backends.yaml file is sane and which backends to update.""" if appyaml.backends: self.parser.error('Backends are not allowed in app.yaml.') backends_yaml = self._ParseBackendsYaml(self.basepath) appyaml.backends = backends_yaml.backends if not appyaml.backends: self.parser.error('No backends found in backends.yaml.') backends = [] for backend_entry in appyaml.backends: entry = backendinfo.LoadBackendEntry(backend_entry.ToYAML()) if entry.name in backends: self.parser.error('Duplicate entry for backend: %s.' % entry.name) else: backends.append(entry.name) backends_to_update = [] if backend: if backend in backends: backends_to_update = [backend] else: self.parser.error("Backend '%s' not found in backends.yaml." % backend) else: backends_to_update = backends return backends_to_update def BackendsUpdate(self): """Updates a backend.""" self.backend = None if len(self.args) == 1: self.backend = self.args[0] elif len(self.args) > 1: self.parser.error('Expected an optional <backend> argument.') appyaml = self._ParseAppYaml(self.basepath) rpcserver = self._GetRpcServer() backends_to_update = self.BackendsYamlCheck(appyaml, self.backend) for backend in backends_to_update: self.UpdateVersion(rpcserver, self.basepath, appyaml, backend) def BackendsList(self): """Lists all backends for an app.""" if self.args: self.parser.error('Expected no arguments.') appyaml = self._ParseAppYaml(self.basepath) rpcserver = self._GetRpcServer() response = rpcserver.Send('/api/backends/list', app_id=appyaml.application) print >> self.out_fh, response def BackendsRollback(self): """Does a rollback of an existing transaction on this backend.""" if len(self.args) != 1: self.parser.error('Expected a single <backend> argument.') self._Rollback(self.args[0]) def BackendsStart(self): """Starts a backend.""" if len(self.args) != 1: self.parser.error('Expected a single <backend> argument.') backend = self.args[0] appyaml = self._ParseAppYaml(self.basepath) rpcserver = self._GetRpcServer() response = rpcserver.Send('/api/backends/start', app_id=appyaml.application, backend=backend) print >> self.out_fh, response def BackendsStop(self): """Stops a backend.""" if len(self.args) != 1: self.parser.error('Expected a single <backend> argument.') backend = self.args[0] appyaml = self._ParseAppYaml(self.basepath) rpcserver = self._GetRpcServer() response = rpcserver.Send('/api/backends/stop', app_id=appyaml.application, backend=backend) print >> self.out_fh, response def BackendsDelete(self): """Deletes a backend.""" if len(self.args) != 1: self.parser.error('Expected a single <backend> argument.') backend = self.args[0] appyaml = self._ParseAppYaml(self.basepath) rpcserver = self._GetRpcServer() response = rpcserver.Send('/api/backends/delete', app_id=appyaml.application, backend=backend) print >> self.out_fh, response def BackendsConfigure(self): """Changes the configuration of an existing backend.""" if len(self.args) != 1: self.parser.error('Expected a single <backend> argument.') backend = self.args[0] appyaml = self._ParseAppYaml(self.basepath) backends_yaml = self._ParseBackendsYaml(self.basepath) rpcserver = self._GetRpcServer() response = rpcserver.Send('/api/backends/configure', app_id=appyaml.application, backend=backend, payload=backends_yaml.ToYAML()) print >> self.out_fh, response def Rollback(self): """Does a rollback of an existing transaction for this app version.""" if self.args: self.parser.error('Expected a single <directory> argument.') self._Rollback() def _Rollback(self, backend=None): """Does a rollback of an existing transaction. Args: backend: name of a backend to rollback, or None If a backend is specified the rollback will affect only that backend, if no backend is specified the rollback will affect the current app version. """ appyaml = self._ParseAppYaml(self.basepath) appversion = AppVersionUpload(self._GetRpcServer(), appyaml, self.options.version, backend) appversion.in_transaction = True appversion.Rollback() def SetDefaultVersion(self): """Sets the default version.""" if self.args: self.parser.error('Expected a single <directory> argument.') appyaml = self._ParseAppYaml(self.basepath) version_setter = DefaultVersionSet(self._GetRpcServer(), appyaml) version_setter.SetVersion() def RequestLogs(self): """Write request logs to a file.""" if len(self.args) != 1: self.parser.error( 'Expected a <directory> argument and an <output_file> argument.') if (self.options.severity is not None and not 0 <= self.options.severity <= MAX_LOG_LEVEL): self.parser.error( 'Severity range is 0 (DEBUG) through %s (CRITICAL).' % MAX_LOG_LEVEL) if self.options.num_days is None: self.options.num_days = int(not self.options.append) try: end_date = self._ParseEndDate(self.options.end_date) except (TypeError, ValueError): self.parser.error('End date must be in the format YYYY-MM-DD.') rpcserver = self._GetRpcServer() appyaml = self._ParseAppYaml(self.basepath) logs_requester = LogsRequester(rpcserver, appyaml, self.args[0], self.options.num_days, self.options.append, self.options.severity, end_date, self.options.vhost, self.options.include_vhost, self.options.include_all, time_func=self.time_func) logs_requester.DownloadLogs() @staticmethod def _ParseEndDate(date, time_func=time.time): """Translates an ISO 8601 date to a date object. Args: date: A date string as YYYY-MM-DD. time_func: time.time() function for testing. Returns: A date object representing the last day of logs to get. If no date is given, returns today in the US/Pacific timezone. """ if not date: return PacificDate(time_func()) return datetime.date(*[int(i) for i in date.split('-')]) def _RequestLogsOptions(self, parser): """Adds request_logs-specific options to 'parser'. Args: parser: An instance of OptionsParser. """ parser.add_option('-n', '--num_days', type='int', dest='num_days', action='store', default=None, help='Number of days worth of log data to get. ' 'The cut-off point is midnight US/Pacific. ' 'Use 0 to get all available logs. ' 'Default is 1, unless --append is also given; ' 'then the default is 0.') parser.add_option('-a', '--append', dest='append', action='store_true', default=False, help='Append to existing file.') parser.add_option('--severity', type='int', dest='severity', action='store', default=None, help='Severity of app-level log messages to get. ' 'The range is 0 (DEBUG) through 4 (CRITICAL). ' 'If omitted, only request logs are returned.') parser.add_option('--vhost', type='string', dest='vhost', action='store', default=None, help='The virtual host of log messages to get. ' 'If omitted, all log messages are returned.') parser.add_option('--include_vhost', dest='include_vhost', action='store_true', default=False, help='Include virtual host in log messages.') parser.add_option('--include_all', dest='include_all', action='store_true', default=None, help='Include everything in log messages.') parser.add_option('--end_date', dest='end_date', action='store', default='', help='End date (as YYYY-MM-DD) of period for log data. ' 'Defaults to today.') def CronInfo(self, now=None, output=sys.stdout): """Displays information about cron definitions. Args: now: used for testing. output: Used for testing. """ if self.args: self.parser.error('Expected a single <directory> argument.') if now is None: now = datetime.datetime.now() cron_yaml = self._ParseCronYaml(self.basepath) if cron_yaml and cron_yaml.cron: for entry in cron_yaml.cron: description = entry.description if not description: description = '<no description>' print >>output, '\n%s:\nURL: %s\nSchedule: %s' % (description, entry.url, entry.schedule) schedule = groctimespecification.GrocTimeSpecification(entry.schedule) matches = schedule.GetMatches(now, self.options.num_runs) for match in matches: print >>output, '%s, %s from now' % ( match.strftime('%Y-%m-%d %H:%M:%S'), match - now) def _CronInfoOptions(self, parser): """Adds cron_info-specific options to 'parser'. Args: parser: An instance of OptionsParser. """ parser.add_option('-n', '--num_runs', type='int', dest='num_runs', action='store', default=5, help='Number of runs of each cron job to display' 'Default is 5') def _CheckRequiredLoadOptions(self): """Checks that upload/download options are present.""" for option in ['filename']: if getattr(self.options, option) is None: self.parser.error('Option \'%s\' is required.' % option) if not self.options.url: self.parser.error('You must have google.appengine.ext.remote_api.handler ' 'assigned to an endpoint in app.yaml, or provide ' 'the url of the handler via the \'url\' option.') def InferRemoteApiUrl(self, appyaml): """Uses app.yaml to determine the remote_api endpoint. Args: appyaml: A parsed app.yaml file. Returns: The url of the remote_api endpoint as a string, or None """ handlers = appyaml.handlers handler_suffix = 'remote_api/handler.py' app_id = appyaml.application for handler in handlers: if hasattr(handler, 'script') and handler.script: if handler.script.endswith(handler_suffix): server = self.options.server url = handler.url if url.endswith('(/.*)?'): url = url[:-6] if server == 'appengine.google.com': return 'http://%s.appspot.com%s' % (app_id, url) else: match = re.match(PREFIXED_BY_ADMIN_CONSOLE_RE, server) if match: return 'http://%s%s%s' % (app_id, match.group(1), url) else: return 'http://%s%s' % (server, url) return None def RunBulkloader(self, arg_dict): """Invokes the bulkloader with the given keyword arguments. Args: arg_dict: Dictionary of arguments to pass to bulkloader.Run(). """ try: import sqlite3 except ImportError: logging.error('upload_data action requires SQLite3 and the python ' 'sqlite3 module (included in python since 2.5).') sys.exit(1) sys.exit(bulkloader.Run(arg_dict)) def _SetupLoad(self): """Performs common verification and set up for upload and download.""" if len(self.args) != 1 and not self.options.url: self.parser.error('Expected either --url or a single <directory> ' 'argument.') if len(self.args) == 1: self.basepath = self.args[0] appyaml = self._ParseAppYaml(self.basepath, includes=True) self.options.app_id = appyaml.application if not self.options.url: url = self.InferRemoteApiUrl(appyaml) if url is not None: self.options.url = url self._CheckRequiredLoadOptions() if self.options.batch_size < 1: self.parser.error('batch_size must be 1 or larger.') if verbosity == 1: logging.getLogger().setLevel(logging.INFO) self.options.debug = False else: logging.getLogger().setLevel(logging.DEBUG) self.options.debug = True def _MakeLoaderArgs(self): args = dict([(arg_name, getattr(self.options, arg_name, None)) for arg_name in ( 'url', 'filename', 'batch_size', 'kind', 'num_threads', 'bandwidth_limit', 'rps_limit', 'http_limit', 'db_filename', 'config_file', 'auth_domain', 'has_header', 'loader_opts', 'log_file', 'passin', 'email', 'debug', 'exporter_opts', 'mapper_opts', 'result_db_filename', 'mapper_opts', 'dry_run', 'dump', 'restore', 'namespace', 'create_config', )]) args['application'] = self.options.app_id args['throttle_class'] = self.throttle_class return args def PerformDownload(self, run_fn=None): """Performs a datastore download via the bulkloader. Args: run_fn: Function to invoke the bulkloader, used for testing. """ if run_fn is None: run_fn = self.RunBulkloader self._SetupLoad() StatusUpdate('Downloading data records.') args = self._MakeLoaderArgs() args['download'] = bool(args['config_file']) args['has_header'] = False args['map'] = False args['dump'] = not args['config_file'] args['restore'] = False args['create_config'] = False run_fn(args) def PerformUpload(self, run_fn=None): """Performs a datastore upload via the bulkloader. Args: run_fn: Function to invoke the bulkloader, used for testing. """ if run_fn is None: run_fn = self.RunBulkloader self._SetupLoad() StatusUpdate('Uploading data records.') args = self._MakeLoaderArgs() args['download'] = False args['map'] = False args['dump'] = False args['restore'] = not args['config_file'] args['create_config'] = False run_fn(args) def CreateBulkloadConfig(self, run_fn=None): """Create a bulkloader config via the bulkloader wizard. Args: run_fn: Function to invoke the bulkloader, used for testing. """ if run_fn is None: run_fn = self.RunBulkloader self._SetupLoad() StatusUpdate('Creating bulkloader configuration.') args = self._MakeLoaderArgs() args['download'] = False args['has_header'] = False args['map'] = False args['dump'] = False args['restore'] = False args['create_config'] = True run_fn(args) def _PerformLoadOptions(self, parser): """Adds options common to 'upload_data' and 'download_data'. Args: parser: An instance of OptionsParser. """ parser.add_option('--url', type='string', dest='url', action='store', help='The location of the remote_api endpoint.') parser.add_option('--batch_size', type='int', dest='batch_size', action='store', default=10, help='Number of records to post in each request.') parser.add_option('--bandwidth_limit', type='int', dest='bandwidth_limit', action='store', default=250000, help='The maximum bytes/second bandwidth for transfers.') parser.add_option('--rps_limit', type='int', dest='rps_limit', action='store', default=20, help='The maximum records/second for transfers.') parser.add_option('--http_limit', type='int', dest='http_limit', action='store', default=8, help='The maximum requests/second for transfers.') parser.add_option('--db_filename', type='string', dest='db_filename', action='store', help='Name of the progress database file.') parser.add_option('--auth_domain', type='string', dest='auth_domain', action='store', default='gmail.com', help='The name of the authorization domain to use.') parser.add_option('--log_file', type='string', dest='log_file', help='File to write bulkloader logs. If not supplied ' 'then a new log file will be created, named: ' 'bulkloader-log-TIMESTAMP.') parser.add_option('--dry_run', action='store_true', dest='dry_run', default=False, help='Do not execute any remote_api calls') parser.add_option('--namespace', type='string', dest='namespace', action='store', default='', help='Namespace to use when accessing datastore.') parser.add_option('--num_threads', type='int', dest='num_threads', action='store', default=10, help='Number of threads to transfer records with.') def _PerformUploadOptions(self, parser): """Adds 'upload_data' specific options to the 'parser' passed in. Args: parser: An instance of OptionsParser. """ self._PerformLoadOptions(parser) parser.add_option('--filename', type='string', dest='filename', action='store', help='The name of the file containing the input data.' ' (Required)') parser.add_option('--kind', type='string', dest='kind', action='store', help='The kind of the entities to store.') parser.add_option('--has_header', dest='has_header', action='store_true', default=False, help='Whether the first line of the input file should be' ' skipped') parser.add_option('--loader_opts', type='string', dest='loader_opts', help='A string to pass to the Loader.initialize method.') parser.add_option('--config_file', type='string', dest='config_file', action='store', help='Name of the configuration file.') def _PerformDownloadOptions(self, parser): """Adds 'download_data' specific options to the 'parser' passed in. Args: parser: An instance of OptionsParser. """ self._PerformLoadOptions(parser) parser.add_option('--filename', type='string', dest='filename', action='store', help='The name of the file where output data is to be' ' written. (Required)') parser.add_option('--kind', type='string', dest='kind', action='store', help='The kind of the entities to retrieve.') parser.add_option('--exporter_opts', type='string', dest='exporter_opts', help='A string to pass to the Exporter.initialize method.' ) parser.add_option('--result_db_filename', type='string', dest='result_db_filename', action='store', help='Database to write entities to for download.') parser.add_option('--config_file', type='string', dest='config_file', action='store', help='Name of the configuration file.') def _CreateBulkloadConfigOptions(self, parser): """Adds 'download_data' specific options to the 'parser' passed in. Args: parser: An instance of OptionsParser. """ self._PerformLoadOptions(parser) parser.add_option('--filename', type='string', dest='filename', action='store', help='The name of the file where the generated template' ' is to be written. (Required)') def ResourceLimitsInfo(self, output=None): """Outputs the current resource limits.""" resource_limits = GetResourceLimits(self._GetRpcServer()) for attr_name in sorted(resource_limits): print >>output, '%s: %s' % (attr_name, resource_limits[attr_name]) class Action(object): """Contains information about a command line action. Attributes: function: The name of a function defined on AppCfg or its subclasses that will perform the appropriate action. usage: A command line usage string. short_desc: A one-line description of the action. long_desc: A detailed description of the action. Whitespace and formatting will be preserved. error_desc: An error message to display when the incorrect arguments are given. options: A function that will add extra options to a given OptionParser object. uses_basepath: Does the action use a basepath/app-directory (and hence app.yaml). """ def __init__(self, function, usage, short_desc, long_desc='', error_desc=None, options=lambda obj, parser: None, uses_basepath=True): """Initializer for the class attributes.""" self.function = function self.usage = usage self.short_desc = short_desc self.long_desc = long_desc self.error_desc = error_desc self.options = options self.uses_basepath = uses_basepath def __call__(self, appcfg): """Invoke this Action on the specified AppCfg. This calls the function of the appropriate name on AppCfg, and respects polymophic overrides. Args: appcfg: The appcfg to use. Returns: The result of the function call. """ method = getattr(appcfg, self.function) return method() actions = { 'help': Action( function='Help', usage='%prog help <action>', short_desc='Print help for a specific action.', uses_basepath=False), 'update': Action( function='Update', usage='%prog [options] update <directory> [version]', options=_UpdateOptions, short_desc='Create or update an app version.', long_desc=""" Specify a directory that contains all of the files required by the app, and appcfg.py will create/update the app version referenced in the app.yaml file at the top level of that directory. appcfg.py will follow symlinks and recursively upload all files to the server. Temporary or source control files (e.g. foo~, .svn/*) will be skipped."""), 'download_app': Action( function='DownloadApp', usage='%prog [options] download_app -A app_id [ -V version ]' ' <out-dir>', short_desc='Download a previously-uploaded app.', long_desc=""" Download a previously-uploaded app to the specified directory. The app ID is specified by the \"-A\" option. The optional version is specified by the \"-V\" option.""", uses_basepath=False), 'update_cron': Action( function='UpdateCron', usage='%prog [options] update_cron <directory>', short_desc='Update application cron definitions.', long_desc=""" The 'update_cron' command will update any new, removed or changed cron definitions from the optional cron.yaml file."""), 'update_indexes': Action( function='UpdateIndexes', usage='%prog [options] update_indexes <directory>', short_desc='Update application indexes.', long_desc=""" The 'update_indexes' command will add additional indexes which are not currently in production as well as restart any indexes that were not completed."""), 'update_queues': Action( function='UpdateQueues', usage='%prog [options] update_queues <directory>', short_desc='Update application task queue definitions.', long_desc=""" The 'update_queue' command will update any new, removed or changed task queue definitions from the optional queue.yaml file."""), 'update_dos': Action( function='UpdateDos', usage='%prog [options] update_dos <directory>', short_desc='Update application dos definitions.', long_desc=""" The 'update_dos' command will update any new, removed or changed dos definitions from the optional dos.yaml file."""), 'backends': Action( function='BackendsAction', usage='%prog [options] backends <directory> <action>', short_desc='Perform a backend action.', long_desc=""" The 'backends' command will perform a backends action.""", error_desc="""\ Expected a <directory> and <action> argument."""), 'backends list': Action( function='BackendsList', usage='%prog [options] backends <directory> list', short_desc='List all backends configured for the app.', long_desc=""" The 'backends list' command will list all backends configured for the app."""), 'backends update': Action( function='BackendsUpdate', usage='%prog [options] backends <directory> update [<backend>]', options=_UpdateOptions, short_desc='Update one or more backends.', long_desc=""" The 'backends update' command updates one or more backends. This command updates backend configuration settings and deploys new code to the server. Any existing instances will stop and be restarted. Updates all backends, or a single backend if the <backend> argument is provided."""), 'backends rollback': Action( function='BackendsRollback', usage='%prog [options] backends <directory> rollback <backend>', short_desc='Roll back an update of a backend.', long_desc=""" The 'backends update' command requires a server-side transaction. Use 'backends rollback' if you experience an error during 'backends update' and want to start the update over again."""), 'backends start': Action( function='BackendsStart', usage='%prog [options] backends <directory> start <backend>', short_desc='Start a backend.', long_desc=""" The 'backends start' command will put a backend into the START state."""), 'backends stop': Action( function='BackendsStop', usage='%prog [options] backends <directory> stop <backend>', short_desc='Stop a backend.', long_desc=""" The 'backends start' command will put a backend into the STOP state."""), 'backends delete': Action( function='BackendsDelete', usage='%prog [options] backends <directory> delete <backend>', short_desc='Delete a backend.', long_desc=""" The 'backends delete' command will delete a backend."""), 'backends configure': Action( function='BackendsConfigure', usage='%prog [options] backends <directory> configure <backend>', short_desc='Reconfigure a backend without stopping it.', long_desc=""" The 'backends configure' command performs an online update of a backend, without stopping instances that are currently running. No code or handlers are updated, only certain configuration settings specified in backends.yaml. Valid settings are: instances, options: public, and options: failfast."""), 'vacuum_indexes': Action( function='VacuumIndexes', usage='%prog [options] vacuum_indexes <directory>', options=_VacuumIndexesOptions, short_desc='Delete unused indexes from application.', long_desc=""" The 'vacuum_indexes' command will help clean up indexes which are no longer in use. It does this by comparing the local index configuration with indexes that are actually defined on the server. If any indexes on the server do not exist in the index configuration file, the user is given the option to delete them."""), 'rollback': Action( function='Rollback', usage='%prog [options] rollback <directory>', short_desc='Rollback an in-progress update.', long_desc=""" The 'update' command requires a server-side transaction. Use 'rollback' if you experience an error during 'update' and want to begin a new update transaction."""), 'request_logs': Action( function='RequestLogs', usage='%prog [options] request_logs <directory> <output_file>', options=_RequestLogsOptions, short_desc='Write request logs in Apache common log format.', long_desc=""" The 'request_logs' command exports the request logs from your application to a file. It will write Apache common log format records ordered chronologically. If output file is '-' stdout will be written.""", error_desc="""\ Expected a <directory> and <output_file> arguments."""), 'cron_info': Action( function='CronInfo', usage='%prog [options] cron_info <directory>', options=_CronInfoOptions, short_desc='Display information about cron jobs.', long_desc=""" The 'cron_info' command will display the next 'number' runs (default 5) for each cron job defined in the cron.yaml file."""), 'upload_data': Action( function='PerformUpload', usage='%prog [options] upload_data <directory>', options=_PerformUploadOptions, short_desc='Upload data records to datastore.', long_desc=""" The 'upload_data' command translates input records into datastore entities and uploads them into your application's datastore.""", uses_basepath=False), 'download_data': Action( function='PerformDownload', usage='%prog [options] download_data <directory>', options=_PerformDownloadOptions, short_desc='Download entities from datastore.', long_desc=""" The 'download_data' command downloads datastore entities and writes them to file as CSV or developer defined format.""", uses_basepath=False), 'create_bulkloader_config': Action( function='CreateBulkloadConfig', usage='%prog [options] create_bulkload_config <directory>', options=_CreateBulkloadConfigOptions, short_desc='Create a bulkloader.yaml from a running application.', long_desc=""" The 'create_bulkloader_config' command creates a bulkloader.yaml configuration template for use with upload_data or download_data.""", uses_basepath=False), 'set_default_version': Action( function='SetDefaultVersion', usage='%prog [options] set_default_version <directory>', short_desc='Set the default (serving) version.', long_desc=""" The 'set_default_version' command sets the default (serving) version of the app. Defaults to using the version specified in app.yaml; use the --version flag to override this."""), 'resource_limits_info': Action( function='ResourceLimitsInfo', usage='%prog [options] resource_limits_info <directory>', short_desc='Get the resource limits.', long_desc=""" The 'resource_limits_info' command prints the current resource limits that are enforced.""", uses_basepath=False), } def main(argv): logging.basicConfig(format=('%(asctime)s %(levelname)s %(filename)s:' '%(lineno)s %(message)s ')) try: result = AppCfgApp(argv).Run() if result: sys.exit(result) except KeyboardInterrupt: StatusUpdate('Interrupted.') sys.exit(1) if __name__ == '__main__': main(sys.argv)
JerryXia/fastgoagent
goagent/server/uploader/google/appengine/tools/appcfg.py
Python
mit
127,429
[ "VisIt" ]
2eace6825a1e1d3b52f80da4da18d69dba608ffa222bedb3320d0d48721fb996
# This file is part of cclib (http://cclib.github.io), a library for parsing # and interpreting the results of computational chemistry packages. # # Copyright (C) 2015, the cclib development team # # The library is free software, distributed under the terms of # the GNU Lesser General Public version 2.1 or later. You should have # received a copy of the license along with cclib. You can also access # the full license online at http://www.gnu.org/copyleft/lgpl.html. """Unit tests for parser logfileparser module.""" import io import os import sys import unittest # The structure of urllib changed in Python3. try: from urllib.request import urlopen except ImportError: from urllib import urlopen import numpy import cclib __filedir__ = os.path.dirname(__file__) __filepath__ = os.path.realpath(__filedir__) __datadir__ = os.path.join(__filepath__, "..", "..") class FileWrapperTest(unittest.TestCase): def test_file_seek(self): """Can we seek anywhere in a file object?""" fpath = os.path.join(__datadir__,"data/ADF/basicADF2007.01/dvb_gopt.adfout") with open(fpath, 'r') as fobject: wrapper = cclib.parser.logfileparser.FileWrapper(fobject) wrapper.seek(0, 0) self.assertEqual(wrapper.pos, 0) wrapper.seek(10, 0) self.assertEqual(wrapper.pos, 10) wrapper.seek(0, 2) self.assertEqual(wrapper.pos, wrapper.size) def test_url_seek(self): """Can we seek only to the end of an url stream?""" url = "https://raw.githubusercontent.com/cclib/cclib/master/data/ADF/basicADF2007.01/dvb_gopt.adfout" stream = urlopen(url) wrapper = cclib.parser.logfileparser.FileWrapper(stream) # Unfortunately, the behavior of this wrapper differs between Python 2 and 3, # so we need to diverge the assertions. We should try to keep the code as # consistent as possible, but the Errors raised are actually different. if sys.version_info[0] == "2": wrapper.seek(0, 2) self.assertEqual(wrapper.pos, wrapper.size) self.assertRaises(AttributeError, wrapper.seek, 0, 0) self.assertRaises(AttributeError, wrapper.seek, 0, 1) elif sys.version_info[0] == "3": wrapper.seek(0, 2) self.assertEqual(wrapper.pos, wrapper.size) self.assertRaises(io.UnsupportedOperation, wrapper.seek, 0, 0) self.assertRaises(io.UnsupportedOperation, wrapper.seek, 0, 1) class LogfileTest(unittest.TestCase): """Unit tests for the Logfile class.""" def test_float_basic(self): """Are floats converted from strings correctly?""" float = cclib.parser.logfileparser.Logfile('').float self.assertEqual(float("0.0"), 0.0) self.assertEqual(float("1.0"), 1.0) self.assertEqual(float("-1.0"), -1.0) def test_float_numeric_format(self): """Does numeric formatting get converted correctly?""" float = cclib.parser.logfileparser.Logfile('').float self.assertEqual(float("1.2345E+02"), 123.45) self.assertEqual(float("1.2345D+02"), 123.45) def test_float_stars(self): """Does the function return nan for stars?""" float = cclib.parser.logfileparser.Logfile('').float self.assertTrue(numpy.isnan(float("*"))) self.assertTrue(numpy.isnan(float("*****"))) def test_normalisesym_base_class_error(self): """Does this method return ERROR in base class?""" normalisesym = cclib.parser.logfileparser.Logfile('').normalisesym self.assertTrue("ERROR" in normalisesym("")) if __name__ == "__main__": unittest.main()
jchodera/cclib
test/parser/testlogfileparser.py
Python
lgpl-2.1
3,707
[ "ADF", "cclib" ]
4921b9e564727d262e2e76798a8b9495ce93a455b41b96ef34bc18a00cbca59b
#!/usr/bin/env python #pylint: disable=missing-docstring ################################################################# # DO NOT MODIFY THIS HEADER # # MOOSE - Multiphysics Object Oriented Simulation Environment # # # # (c) 2010 Battelle Energy Alliance, LLC # # ALL RIGHTS RESERVED # # # # Prepared by Battelle Energy Alliance, LLC # # Under Contract No. DE-AC07-05ID14517 # # With the U. S. Department of Energy # # # # See COPYRIGHT for full restrictions # ################################################################# import chigger reader = chigger.exodus.ExodusReader('../input/step10_micro_out.e') mug = chigger.exodus.ExodusResult(reader, variable='phi') mug.update() p0 = (0, 0.05, 0) p1 = (0.1, 0.05, 0) sample = chigger.exodus.ExodusResultLineSampler(mug, point1=p0, point2=p1, resolution=200) sample.update() x = sample[0].getDistance() y = sample[0].getSample('phi') print x[98], y[98] line = chigger.graphs.Line(x, y, width=4, label='probe') graph = chigger.graphs.Graph(line, yaxis={'lim':[0,1]}, xaxis={'lim':[0,0.1]}) window = chigger.RenderWindow(graph, size=[600, 200], test=True) window.write('line_sample.png') window.start()
liuwenf/moose
python/chigger/tests/line_sample/line_sample.py
Python
lgpl-2.1
1,545
[ "MOOSE" ]
da244cc919943195d92daf87fdb1ee7e431e4b3c8f76e07952f22569018141d9
from scipy.ndimage import gaussian_filter def smooth_data(data, SD) : """Smooth data based on gaussian_filter Parameters ---------- data: numpy array unsmootheed fMRI data SD: the standard deviation for gaussian smoothing Returns ------- nparray of same shape as input but smoothed """ smoothed_data = gaussian_filter(data, [SD, SD, SD, 0]) return smoothed_data
juanshishido/project-eta
code/utils/smoothing.py
Python
bsd-3-clause
391
[ "Gaussian" ]
e407e4afcbdf45385900e5546e074815bf3107089837b0d5489db92760b88951
#!/usr/bin/env python # ---------------------------------------------------------------------------- # Copyright (c) 2013--, scikit-bio development team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file COPYING.txt, distributed with this software. # ---------------------------------------------------------------------------- from __future__ import absolute_import, division, print_function from unittest import TestCase, main from collections import Counter, defaultdict, OrderedDict try: from StringIO import StringIO except ImportError: # python3 system from io import StringIO import tempfile import numpy as np import numpy.testing as npt from scipy.spatial.distance import hamming from skbio import (NucleotideSequence, DNASequence, RNASequence, DNA, DistanceMatrix, Alignment, SequenceCollection) from skbio.alignment import (StockholmAlignment, SequenceCollectionError, StockholmParseError) class SequenceCollectionTests(TestCase): """Tests of the SequenceCollection class """ def setUp(self): """Initialize values to be used in tests """ self.d1 = DNASequence('GATTACA', id="d1") self.d2 = DNASequence('TTG', id="d2") self.d1_lower = DNASequence('gattaca', id="d1") self.d2_lower = DNASequence('ttg', id="d2") self.r1 = RNASequence('GAUUACA', id="r1") self.r2 = RNASequence('UUG', id="r2") self.r3 = RNASequence('U-----UGCC--', id="r3") self.i1 = DNASequence('GATXACA', id="i1") self.seqs1 = [self.d1, self.d2] self.seqs1_lower = [self.d1_lower, self.d2_lower] self.seqs2 = [self.r1, self.r2, self.r3] self.seqs3 = self.seqs1 + self.seqs2 self.seqs1_t = [('d1', 'GATTACA'), ('d2', 'TTG')] self.seqs2_t = [('r1', 'GAUUACA'), ('r2', 'UUG'), ('r3', 'U-----UGCC--')] self.seqs3_t = self.seqs1_t + self.seqs2_t self.s1 = SequenceCollection(self.seqs1) self.s1_lower = SequenceCollection(self.seqs1_lower) self.s2 = SequenceCollection(self.seqs2) self.s3 = SequenceCollection(self.seqs3) self.empty = SequenceCollection([]) self.invalid_s1 = SequenceCollection([self.i1]) def test_init(self): """Initialization functions as expected with varied input types """ SequenceCollection(self.seqs1) SequenceCollection(self.seqs2) SequenceCollection(self.seqs3) SequenceCollection([]) def test_init_fail(self): """initialization with sequences with overlapping ids fails """ s1 = [self.d1, self.d1] self.assertRaises(SequenceCollectionError, SequenceCollection, s1) def test_init_validate(self): """initialization with validation functions as expected """ SequenceCollection(self.seqs1, validate=True) SequenceCollection(self.seqs1, validate=True) # can't validate self.seqs2 as a DNASequence self.assertRaises(SequenceCollectionError, SequenceCollection, self.invalid_s1, validate=True) def test_from_fasta_records(self): """Initialization from list of tuples functions as expected """ SequenceCollection.from_fasta_records(self.seqs1_t, DNASequence) SequenceCollection.from_fasta_records(self.seqs2_t, RNASequence) SequenceCollection.from_fasta_records(self.seqs3_t, NucleotideSequence) def test_contains(self): """in operator functions as expected """ self.assertTrue('d1' in self.s1) self.assertTrue('r2' in self.s2) self.assertFalse('r2' in self.s1) def test_eq(self): """equality operator functions as expected """ self.assertTrue(self.s1 == self.s1) self.assertFalse(self.s1 == self.s2) # different objects can be equal self.assertTrue(self.s1 == SequenceCollection([self.d1, self.d2])) self.assertTrue(SequenceCollection([self.d1, self.d2]) == self.s1) # SequenceCollections with different number of sequences are not equal self.assertFalse(self.s1 == SequenceCollection([self.d1])) class FakeSequenceCollection(SequenceCollection): pass # SequenceCollections of different types are not equal self.assertFalse(self.s1 == FakeSequenceCollection([self.d1, self.d2])) self.assertFalse(self.s1 == Alignment([self.d1, self.d2])) # SequenceCollections with different sequences are not equal self.assertFalse(self.s1 == SequenceCollection([self.d1, self.r1])) def test_getitem(self): """getitem functions as expected """ self.assertEqual(self.s1[0], self.d1) self.assertEqual(self.s1[1], self.d2) self.assertEqual(self.s2[0], self.r1) self.assertEqual(self.s2[1], self.r2) self.assertRaises(IndexError, self.empty.__getitem__, 0) self.assertRaises(KeyError, self.empty.__getitem__, '0') def test_iter(self): """iter functions as expected """ s1_iter = iter(self.s1) count = 0 for actual, expected in zip(s1_iter, self.seqs1): count += 1 self.assertEqual(actual, expected) self.assertEqual(count, len(self.seqs1)) self.assertRaises(StopIteration, lambda: next(s1_iter)) def test_len(self): """len functions as expected """ self.assertEqual(len(self.s1), 2) self.assertEqual(len(self.s2), 3) self.assertEqual(len(self.s3), 5) self.assertEqual(len(self.empty), 0) def test_ne(self): """inequality operator functions as expected """ self.assertFalse(self.s1 != self.s1) self.assertTrue(self.s1 != self.s2) # SequenceCollections with different number of sequences are not equal self.assertTrue(self.s1 != SequenceCollection([self.d1])) class FakeSequenceCollection(SequenceCollection): pass # SequenceCollections of different types are not equal self.assertTrue(self.s1 != FakeSequenceCollection([self.d1, self.d2])) self.assertTrue(self.s1 != Alignment([self.d1, self.d2])) # SequenceCollections with different sequences are not equal self.assertTrue(self.s1 != SequenceCollection([self.d1, self.r1])) def test_repr(self): """repr functions as expected """ self.assertEqual(repr(self.s1), "<SequenceCollection: n=2; " "mean +/- std length=5.00 +/- 2.00>") self.assertEqual(repr(self.s2), "<SequenceCollection: n=3; " "mean +/- std length=7.33 +/- 3.68>") self.assertEqual(repr(self.s3), "<SequenceCollection: n=5; " "mean +/- std length=6.40 +/- 3.32>") self.assertEqual(repr(self.empty), "<SequenceCollection: n=0; " "mean +/- std length=0.00 +/- 0.00>") def test_reversed(self): """reversed functions as expected """ s1_iter = reversed(self.s1) count = 0 for actual, expected in zip(s1_iter, self.seqs1[::-1]): count += 1 self.assertEqual(actual, expected) self.assertEqual(count, len(self.seqs1)) self.assertRaises(StopIteration, lambda: next(s1_iter)) def test_k_word_frequencies(self): """k_word_frequencies functions as expected """ expected1 = defaultdict(int) expected1['A'] = 3 / 7. expected1['C'] = 1 / 7. expected1['G'] = 1 / 7. expected1['T'] = 2 / 7. expected2 = defaultdict(int) expected2['G'] = 1 / 3. expected2['T'] = 2 / 3. self.assertEqual(self.s1.k_word_frequencies(k=1), [expected1, expected2]) expected1 = defaultdict(int) expected1['GAT'] = 1 / 2. expected1['TAC'] = 1 / 2. expected2 = defaultdict(int) expected2['TTG'] = 1 / 1. self.assertEqual(self.s1.k_word_frequencies(k=3, overlapping=False), [expected1, expected2]) self.assertEqual(self.empty.k_word_frequencies(k=1), []) def test_str(self): """str functions as expected """ exp1 = ">d1\nGATTACA\n>d2\nTTG\n" self.assertEqual(str(self.s1), exp1) exp2 = ">r1\nGAUUACA\n>r2\nUUG\n>r3\nU-----UGCC--\n" self.assertEqual(str(self.s2), exp2) exp4 = "" self.assertEqual(str(self.empty), exp4) def test_distances(self): """distances functions as expected """ s1 = SequenceCollection([DNA("ACGT", "d1"), DNA("ACGG", "d2")]) expected = [[0, 0.25], [0.25, 0]] expected = DistanceMatrix(expected, ['d1', 'd2']) actual = s1.distances(hamming) self.assertEqual(actual, expected) # alt distance function provided def dumb_distance(s1, s2): return 42. expected = [[0, 42.], [42., 0]] expected = DistanceMatrix(expected, ['d1', 'd2']) actual = s1.distances(dumb_distance) self.assertEqual(actual, expected) def test_distribution_stats(self): """distribution_stats functions as expected """ actual1 = self.s1.distribution_stats() self.assertEqual(actual1[0], 2) self.assertAlmostEqual(actual1[1], 5.0, 3) self.assertAlmostEqual(actual1[2], 2.0, 3) actual2 = self.s2.distribution_stats() self.assertEqual(actual2[0], 3) self.assertAlmostEqual(actual2[1], 7.333, 3) self.assertAlmostEqual(actual2[2], 3.682, 3) actual3 = self.s3.distribution_stats() self.assertEqual(actual3[0], 5) self.assertAlmostEqual(actual3[1], 6.400, 3) self.assertAlmostEqual(actual3[2], 3.323, 3) actual4 = self.empty.distribution_stats() self.assertEqual(actual4[0], 0) self.assertEqual(actual4[1], 0.0) self.assertEqual(actual4[2], 0.0) def test_degap(self): """degap functions as expected """ expected = [(id_, seq.replace('.', '').replace('-', '')) for id_, seq in self.seqs2_t] expected = SequenceCollection.from_fasta_records(expected, RNASequence) actual = self.s2.degap() self.assertEqual(actual, expected) def test_get_seq(self): """getseq functions asexpected """ self.assertEqual(self.s1.get_seq('d1'), self.d1) self.assertEqual(self.s1.get_seq('d2'), self.d2) def test_ids(self): """ids functions as expected """ self.assertEqual(self.s1.ids(), ['d1', 'd2']) self.assertEqual(self.s2.ids(), ['r1', 'r2', 'r3']) self.assertEqual(self.s3.ids(), ['d1', 'd2', 'r1', 'r2', 'r3']) self.assertEqual(self.empty.ids(), []) def test_int_map(self): """int_map functions as expected """ expected1 = {"1": self.d1, "2": self.d2} expected2 = {"1": "d1", "2": "d2"} self.assertEqual(self.s1.int_map(), (expected1, expected2)) expected1 = {"h-1": self.d1, "h-2": self.d2} expected2 = {"h-1": "d1", "h-2": "d2"} self.assertEqual(self.s1.int_map(prefix='h-'), (expected1, expected2)) def test_is_empty(self): """is_empty functions as expected """ self.assertFalse(self.s1.is_empty()) self.assertFalse(self.s2.is_empty()) self.assertFalse(self.s3.is_empty()) self.assertTrue(self.empty.is_empty()) def test_is_valid(self): """is_valid functions as expected """ self.assertTrue(self.s1.is_valid()) self.assertTrue(self.s2.is_valid()) self.assertTrue(self.s3.is_valid()) self.assertTrue(self.empty.is_valid()) self.assertFalse(self.invalid_s1.is_valid()) def test_iteritems(self): """iteritems functions as expected """ self.assertEqual(list(self.s1.iteritems()), [(s.id, s) for s in self.s1]) def test_lower(self): """lower functions as expected """ self.assertEqual(self.s1.lower(), self.s1_lower) def test_sequence_count(self): """num_seqs functions as expected """ self.assertEqual(self.s1.sequence_count(), 2) self.assertEqual(self.s2.sequence_count(), 3) self.assertEqual(self.s3.sequence_count(), 5) self.assertEqual(self.empty.sequence_count(), 0) def test_sequence_lengths(self): """sequence_lengths functions as expected """ self.assertEqual(self.s1.sequence_lengths(), [7, 3]) self.assertEqual(self.s2.sequence_lengths(), [7, 3, 12]) self.assertEqual(self.s3.sequence_lengths(), [7, 3, 7, 3, 12]) self.assertEqual(self.empty.sequence_lengths(), []) def test_to_fasta(self): """to_fasta functions as expected """ exp1 = ">d1\nGATTACA\n>d2\nTTG\n" self.assertEqual(self.s1.to_fasta(), exp1) exp2 = ">r1\nGAUUACA\n>r2\nUUG\n>r3\nU-----UGCC--\n" self.assertEqual(self.s2.to_fasta(), exp2) def test_toFasta(self): exp = ">d1\nGATTACA\n>d2\nTTG\n" obs = npt.assert_warns(UserWarning, self.s1.toFasta) self.assertEqual(obs, exp) def test_upper(self): """upper functions as expected """ self.assertEqual(self.s1_lower.upper(), self.s1) class AlignmentTests(TestCase): def setUp(self): self.d1 = DNASequence('..ACC-GTTGG..', id="d1") self.d2 = DNASequence('TTACCGGT-GGCC', id="d2") self.d3 = DNASequence('.-ACC-GTTGC--', id="d3") self.r1 = RNASequence('UUAU-', id="r1") self.r2 = RNASequence('ACGUU', id="r2") self.seqs1 = [self.d1, self.d2, self.d3] self.seqs2 = [self.r1, self.r2] self.seqs1_t = [('d1', '..ACC-GTTGG..'), ('d2', 'TTACCGGT-GGCC'), ('d3', '.-ACC-GTTGC--')] self.seqs2_t = [('r1', 'UUAU-'), ('r2', 'ACGUU')] self.a1 = Alignment(self.seqs1) self.a2 = Alignment(self.seqs2) self.a3 = Alignment(self.seqs2, score=42.0, start_end_positions=[(0, 3), (5, 9)]) self.a4 = Alignment(self.seqs2, score=-42.0, start_end_positions=[(1, 4), (6, 10)]) self.empty = Alignment([]) def test_degap(self): """degap functions as expected """ expected = [(id_, seq.replace('.', '').replace('-', '')) for id_, seq in self.seqs1_t] expected = SequenceCollection.from_fasta_records(expected, DNASequence) actual = self.a1.degap() self.assertEqual(actual, expected) expected = [(id_, seq.replace('.', '').replace('-', '')) for id_, seq in self.seqs2_t] expected = SequenceCollection.from_fasta_records(expected, RNASequence) actual = self.a2.degap() self.assertEqual(actual, expected) def test_distances(self): """distances functions as expected """ expected = [[0, 6. / 13, 4. / 13], [6. / 13, 0, 7. / 13], [4. / 13, 7. / 13, 0]] expected = DistanceMatrix(expected, ['d1', 'd2', 'd3']) actual = self.a1.distances() self.assertEqual(actual, expected) # alt distance function provided def dumb_distance(s1, s2): return 42. expected = [[0, 42., 42.], [42., 0, 42.], [42., 42., 0]] expected = DistanceMatrix(expected, ['d1', 'd2', 'd3']) actual = self.a1.distances(dumb_distance) self.assertEqual(actual, expected) def test_score(self): self.assertEqual(self.a3.score(), 42.0) self.assertEqual(self.a4.score(), -42.0) def test_start_end_positions(self): self.assertEqual(self.a3.start_end_positions(), [(0, 3), (5, 9)]) self.assertEqual(self.a4.start_end_positions(), [(1, 4), (6, 10)]) def test_subalignment(self): """subalignment functions as expected """ # keep seqs by ids actual = self.a1.subalignment(seqs_to_keep=['d1', 'd3']) expected = Alignment([self.d1, self.d3]) self.assertEqual(actual, expected) # keep seqs by indices actual = self.a1.subalignment(seqs_to_keep=[0, 2]) expected = Alignment([self.d1, self.d3]) self.assertEqual(actual, expected) # keep seqs by ids (invert) actual = self.a1.subalignment(seqs_to_keep=['d1', 'd3'], invert_seqs_to_keep=True) expected = Alignment([self.d2]) self.assertEqual(actual, expected) # keep seqs by indices (invert) actual = self.a1.subalignment(seqs_to_keep=[0, 2], invert_seqs_to_keep=True) expected = Alignment([self.d2]) self.assertEqual(actual, expected) # keep positions actual = self.a1.subalignment(positions_to_keep=[0, 2, 3]) d1 = DNASequence('.AC', id="d1") d2 = DNASequence('TAC', id="d2") d3 = DNASequence('.AC', id="d3") expected = Alignment([d1, d2, d3]) self.assertEqual(actual, expected) # keep positions (invert) actual = self.a1.subalignment(positions_to_keep=[0, 2, 3], invert_positions_to_keep=True) d1 = DNASequence('.C-GTTGG..', id="d1") d2 = DNASequence('TCGGT-GGCC', id="d2") d3 = DNASequence('-C-GTTGC--', id="d3") expected = Alignment([d1, d2, d3]) self.assertEqual(actual, expected) # keep seqs and positions actual = self.a1.subalignment(seqs_to_keep=[0, 2], positions_to_keep=[0, 2, 3]) d1 = DNASequence('.AC', id="d1") d3 = DNASequence('.AC', id="d3") expected = Alignment([d1, d3]) self.assertEqual(actual, expected) # keep seqs and positions (invert) actual = self.a1.subalignment(seqs_to_keep=[0, 2], positions_to_keep=[0, 2, 3], invert_seqs_to_keep=True, invert_positions_to_keep=True) d2 = DNASequence('TCGGT-GGCC', id="d2") expected = Alignment([d2]) self.assertEqual(actual, expected) def test_subalignment_filter_out_everything(self): exp = Alignment([]) # no sequences obs = self.a1.subalignment(seqs_to_keep=None, invert_seqs_to_keep=True) self.assertEqual(obs, exp) # no positions obs = self.a1.subalignment(positions_to_keep=None, invert_positions_to_keep=True) self.assertEqual(obs, exp) def test_init_validate(self): """initialization with validation functions as expected """ Alignment(self.seqs1, validate=True) # invalid DNA character invalid_seqs1 = [self.d1, self.d2, self.d3, DNASequence('.-ACC-GTXGC--', id="i1")] self.assertRaises(SequenceCollectionError, Alignment, invalid_seqs1, validate=True) # invalid lengths (they're not all equal) invalid_seqs2 = [self.d1, self.d2, self.d3, DNASequence('.-ACC-GTGC--', id="i2")] self.assertRaises(SequenceCollectionError, Alignment, invalid_seqs2, validate=True) def test_is_valid(self): """is_valid functions as expected """ self.assertTrue(self.a1.is_valid()) self.assertTrue(self.a2.is_valid()) self.assertTrue(self.empty.is_valid()) # invalid because of length mismatch d1 = DNASequence('..ACC-GTTGG..', id="d1") d2 = DNASequence('TTACCGGT-GGC', id="d2") self.assertFalse(Alignment([d1, d2]).is_valid()) # invalid because of invalid charaters d1 = DNASequence('..ACC-GTXGG..', id="d1") d2 = DNASequence('TTACCGGT-GGCC', id="d2") self.assertFalse(Alignment([d1, d2]).is_valid()) def test_iter_positions(self): """iter_positions functions as expected """ actual = list(self.a2.iter_positions()) expected = [[RNASequence(j) for j in i] for i in ['UA', 'UC', 'AG', 'UU', '-U']] self.seqs2_t = [('r1', 'UUAU-'), ('r2', 'ACGUU')] self.assertEqual(actual, expected) actual = list(self.a2.iter_positions(constructor=str)) expected = [list('UA'), list('UC'), list('AG'), list('UU'), list('-U')] self.seqs2_t = [('r1', 'UUAU-'), ('r2', 'ACGUU')] self.assertEqual(actual, expected) def test_majority_consensus(self): """majority_consensus functions as expected """ d1 = DNASequence('TTT', id="d1") d2 = DNASequence('TT-', id="d2") d3 = DNASequence('TC-', id="d3") a1 = Alignment([d1, d2, d3]) self.assertEqual(a1.majority_consensus(), DNASequence('TT-')) d1 = DNASequence('T', id="d1") d2 = DNASequence('A', id="d2") a1 = Alignment([d1, d2]) self.assertTrue(a1.majority_consensus() in [DNASequence('T'), DNASequence('A')]) self.assertEqual(self.empty.majority_consensus(), '') def test_omit_gap_positions(self): """omitting gap positions functions as expected """ expected = self.a2 self.assertEqual(self.a2.omit_gap_positions(1.0), expected) self.assertEqual(self.a2.omit_gap_positions(0.51), expected) r1 = RNASequence('UUAU', id="r1") r2 = RNASequence('ACGU', id="r2") expected = Alignment([r1, r2]) self.assertEqual(self.a2.omit_gap_positions(0.49), expected) r1 = RNASequence('UUAU', id="r1") r2 = RNASequence('ACGU', id="r2") expected = Alignment([r1, r2]) self.assertEqual(self.a2.omit_gap_positions(0.0), expected) self.assertEqual(self.empty.omit_gap_positions(0.0), self.empty) self.assertEqual(self.empty.omit_gap_positions(0.49), self.empty) self.assertEqual(self.empty.omit_gap_positions(1.0), self.empty) def test_omit_gap_sequences(self): """omitting gap sequences functions as expected """ expected = self.a2 self.assertEqual(self.a2.omit_gap_sequences(1.0), expected) self.assertEqual(self.a2.omit_gap_sequences(0.20), expected) expected = Alignment([self.r2]) self.assertEqual(self.a2.omit_gap_sequences(0.19), expected) self.assertEqual(self.empty.omit_gap_sequences(0.0), self.empty) self.assertEqual(self.empty.omit_gap_sequences(0.2), self.empty) self.assertEqual(self.empty.omit_gap_sequences(1.0), self.empty) def test_position_counters(self): """position_counters functions as expected """ expected = [Counter({'U': 1, 'A': 1}), Counter({'U': 1, 'C': 1}), Counter({'A': 1, 'G': 1}), Counter({'U': 2}), Counter({'-': 1, 'U': 1})] self.assertEqual(self.a2.position_counters(), expected) self.assertEqual(self.empty.position_counters(), []) def test_position_frequencies(self): """computing position frequencies functions as expected """ expected = [defaultdict(int, {'U': 0.5, 'A': 0.5}), defaultdict(int, {'U': 0.5, 'C': 0.5}), defaultdict(int, {'A': 0.5, 'G': 0.5}), defaultdict(int, {'U': 1.0}), defaultdict(int, {'-': 0.5, 'U': 0.5})] self.assertEqual(self.a2.position_frequencies(), expected) self.assertEqual(self.empty.position_frequencies(), []) def test_position_entropies(self): """computing positional uncertainties functions as expected tested by calculating values as described in this post: http://stackoverflow.com/a/15476958/3424666 """ expected = [0.69314, 0.69314, 0.69314, 0.0, np.nan] np.testing.assert_almost_equal(self.a2.position_entropies(), expected, 5) expected = [1.0, 1.0, 1.0, 0.0, np.nan] np.testing.assert_almost_equal(self.a2.position_entropies(base=2), expected, 5) np.testing.assert_almost_equal(self.empty.position_entropies(base=2), []) def test_k_word_frequencies(self): """k_word_frequencies functions as expected """ expected = [defaultdict(int, {'U': 3 / 5, 'A': 1 / 5, '-': 1 / 5}), defaultdict(int, {'A': 1 / 5, 'C': 1 / 5, 'G': 1 / 5, 'U': 2 / 5})] actual = self.a2.k_word_frequencies(k=1) for a, e in zip(actual, expected): self.assertEqual(sorted(a), sorted(e), 5) np.testing.assert_almost_equal(sorted(a.values()), sorted(e.values()), 5) def test_sequence_length(self): """sequence_length functions as expected """ self.assertEqual(self.a1.sequence_length(), 13) self.assertEqual(self.a2.sequence_length(), 5) self.assertEqual(self.empty.sequence_length(), 0) def test_to_phylip(self): """to_phylip functions as expected """ d1 = DNASequence('..ACC-GTTGG..', id="d1") d2 = DNASequence('TTACCGGT-GGCC', id="d2") d3 = DNASequence('.-ACC-GTTGC--', id="d3") a = Alignment([d1, d2, d3]) phylip_str, id_map = a.to_phylip(map_labels=False) self.assertEqual(id_map, {'d1': 'd1', 'd3': 'd3', 'd2': 'd2'}) expected = "\n".join(["3 13", "d1 ..ACC-GTTGG..", "d2 TTACCGGT-GGCC", "d3 .-ACC-GTTGC--"]) self.assertEqual(phylip_str, expected) def test_to_phylip_map_labels(self): """to_phylip functions as expected with label mapping """ d1 = DNASequence('..ACC-GTTGG..', id="d1") d2 = DNASequence('TTACCGGT-GGCC', id="d2") d3 = DNASequence('.-ACC-GTTGC--', id="d3") a = Alignment([d1, d2, d3]) phylip_str, id_map = a.to_phylip(map_labels=True, label_prefix="s") self.assertEqual(id_map, {'s1': 'd1', 's3': 'd3', 's2': 'd2'}) expected = "\n".join(["3 13", "s1 ..ACC-GTTGG..", "s2 TTACCGGT-GGCC", "s3 .-ACC-GTTGC--"]) self.assertEqual(phylip_str, expected) def test_to_phylip_unequal_sequence_lengths(self): d1 = DNASequence('A-CT', id="d1") d2 = DNASequence('TTA', id="d2") d3 = DNASequence('.-AC', id="d3") a = Alignment([d1, d2, d3]) with self.assertRaises(SequenceCollectionError): a.to_phylip() def test_to_phylip_no_sequences(self): with self.assertRaises(SequenceCollectionError): Alignment([]).to_phylip() def test_to_phylip_no_positions(self): d1 = DNASequence('', id="d1") d2 = DNASequence('', id="d2") a = Alignment([d1, d2]) with self.assertRaises(SequenceCollectionError): a.to_phylip() def test_validate_lengths(self): """ """ self.assertTrue(self.a1._validate_lengths()) self.assertTrue(self.a2._validate_lengths()) self.assertTrue(self.empty._validate_lengths()) self.assertTrue(Alignment([ DNASequence('TTT', id="d1")])._validate_lengths()) self.assertFalse(Alignment([ DNASequence('TTT', id="d1"), DNASequence('TT', id="d2")])._validate_lengths()) class StockholmAlignmentTests(TestCase): """Tests for stockholmAlignment object""" def setUp(self): """Setup for stockholm tests.""" self.seqs = [DNASequence("ACC-G-GGTA", id="seq1"), DNASequence("TCC-G-GGCA", id="seq2")] self.GF = OrderedDict([ ("AC", "RF00360"), ("BM", ["cmbuild -F CM SEED", "cmsearch -Z 274931 -E 1000000"]), ("SQ", "9"), ("RT", ["TITLE1", "TITLE2"]), ("RN", ["[1]", "[2]"]), ("RA", ["Auth1;", "Auth2;"]), ("RL", ["J Mol Biol", "Cell"]), ("RM", ["11469857", "12007400"]), ('RN', ['[1]', '[2]']) ]) self.GS = {"AC": OrderedDict([("seq1", "111"), ("seq2", "222")])} self.GR = {"SS": OrderedDict([("seq1", "1110101111"), ("seq2", "0110101110")])} self.GC = {"SS_cons": "(((....)))"} self.st = StockholmAlignment(self.seqs, gc=self.GC, gf=self.GF, gs=self.GS, gr=self.GR) def test_retrieve_metadata(self): self.assertEqual(self.st.gc, self.GC) self.assertEqual(self.st.gf, self.GF) self.assertEqual(self.st.gs, self.GS) self.assertEqual(self.st.gr, self.GR) def test_from_file_alignment(self): """make sure can parse basic sto file with interleaved alignment""" sto = StringIO("# STOCKHOLM 1.0\n" "seq1 ACC-G\n" "seq2 TCC-G\n\n" "seq1 -GGTA\n" "seq2 -GGCA\n//") obs_sto = next(StockholmAlignment.from_file(sto, DNA)) exp_sto = StockholmAlignment(self.seqs) self.assertEqual(obs_sto, exp_sto) def test_from_file_GF(self): """Make sure GF lines are parsed correctly""" # remove rn line to make sure auto-added self.GF.pop("RN") sto = StringIO("# STOCKHOLM 1.0\n#=GF RN [1]\n#=GF RM 11469857\n" "#=GF RT TITLE1\n#=GF RA Auth1;\n#=GF RL J Mol Biol\n" "#=GF RN [2]\n#=GF RM 12007400\n#=GF RT TITLE2\n" "#=GF RA Auth2;\n#=GF RL Cell\n#=GF AC RF00360\n" "#=GF BM cmbuild -F CM SEED\n" "#=GF BM cmsearch -Z 274931 -E 1000000\n#=GF SQ 9\n" "seq1 ACC-G-GGTA\nseq2 TCC-G-GGCA\n//") obs_sto = next(StockholmAlignment.from_file(sto, DNA)) exp_sto = StockholmAlignment(self.seqs, self.GF, {}, {}, {}) self.assertEqual(obs_sto, exp_sto) def test_from_file_GC(self): """Make sure GC lines are parsed correctly""" sto = StringIO("# STOCKHOLM 1.0\n" "seq1 ACC-G-GGTA\nseq2 TCC-G-GGCA\n" "#=GC SS_cons (((....)))\n//") obs_sto = next(StockholmAlignment.from_file(sto, DNA)) exp_sto = StockholmAlignment(self.seqs, {}, {}, {}, self.GC) self.assertEqual(obs_sto, exp_sto) def test_from_file_GS(self): """Make sure GS lines are parsed correctly""" sto = StringIO("# STOCKHOLM 1.0\n#=GS seq2 AC 222\n#=GS seq1 AC 111\n" "seq1 ACC-G-GGTA\n" "seq2 TCC-G-GGCA\n//") obs_sto = next(StockholmAlignment.from_file(sto, DNA)) exp_sto = StockholmAlignment(self.seqs, {}, self.GS, {}, {}) self.assertEqual(obs_sto, exp_sto) def test_from_file_GR(self): """Make sure GR lines are parsed correctly""" sto = StringIO("# STOCKHOLM 1.0\nseq1 ACC-G\n" "#=GR seq1 SS 11101\nseq2 TCC-G\n" "#=GR seq2 SS 01101\n\nseq1 -GGTA\n" "#=GR seq1 SS 01111\nseq2 -GGCA\n" "#=GR seq2 SS 01110\n//") obs_sto = next(StockholmAlignment.from_file(sto, DNA)) exp_sto = StockholmAlignment(self.seqs, {}, {}, self.GR, {}) self.assertEqual(obs_sto, exp_sto) def test_from_file_multi(self): """Make sure yield works correctly with multi-alignment sto files""" sto = StringIO("# STOCKHOLM 1.0\n#=GS seq2 AC 222\n#=GS seq1 AC 111\n" "seq1 ACC-G-GGTA\n" "seq2 TCC-G-GGCA\n//\n" "# STOCKHOLM 1.0\nseq1 ACC-G-GGTA\n" "#=GR seq1 SS 1110101111\nseq2 TCC-G-GGCA\n" "#=GR seq2 SS 0110101110\n//") obs_sto = StockholmAlignment.from_file(sto, DNA) count = 0 for obs in obs_sto: if count == 0: exp_sto = StockholmAlignment(self.seqs, {}, self.GS, {}, {}) self.assertEqual(obs, exp_sto) elif count == 1: exp_sto = StockholmAlignment(self.seqs, {}, {}, self.GR, {}) self.assertEqual(obs, exp_sto) else: raise AssertionError("More than 2 sto alignments parsed!") count += 1 def test_parse_gf_multiline_nh(self): """Makes sure a multiline NH code is parsed correctly""" sto = ["#=GF TN MULTILINE TREE", "#=GF NH THIS IS FIRST", "#=GF NH THIS IS SECOND", "#=GF AC 1283394"] exp = {'TN': 'MULTILINE TREE', 'NH': 'THIS IS FIRST THIS IS SECOND', 'AC': '1283394'} self.assertEqual(self.st._parse_gf_info(sto), exp) def test_parse_gf_multiline_cc(self): """Makes sure a multiline CC code is parsed correctly""" sto = ["#=GF CC THIS IS FIRST", "#=GF CC THIS IS SECOND"] exp = {'CC': 'THIS IS FIRST THIS IS SECOND'} self.assertEqual(self.st._parse_gf_info(sto), exp) def test_parse_gf_info_nongf(self): """Makes sure error raised if non-GF line passed""" sto = ["#=GF AC BLAAAAAAAHHH", "#=GC HUH THIS SHOULD NOT BE HERE"] with self.assertRaises(StockholmParseError): self.st._parse_gf_info(sto) def test_parse_gf_info_malformed(self): """Makes sure error raised if too short a line passed""" sto = ["#=GF AC", "#=GF"] with self.assertRaises(StockholmParseError): self.st._parse_gf_info(sto) def test_parse_gc_info_nongf(self): """Makes sure error raised if non-GC line passed""" sto = ["#=GC AC BLAAAAAAAHHH", "#=GF HUH THIS SHOULD NOT BE HERE"] with self.assertRaises(StockholmParseError): self.st._parse_gf_info(sto) def test_parse_gc_info_strict_len(self): """Make sure error raised if GC lines bad length and strict parsing""" sto = ["#=GC SS_cons (((..)))"] with self.assertRaises(StockholmParseError): self.st._parse_gc_info(sto, seqlen=20, strict=True) def test_parse_gc_info_strict_duplicate(self): """Make sure error raised if GC lines repeated""" sto = ["#=GC SS_cons (((..)))", "#=GC SS_cons (((..)))"] with self.assertRaises(StockholmParseError): self.st._parse_gc_info(sto, seqlen=8, strict=True) def test_parse_gc_info_malformed(self): """Makes sure error raised if too short a line passed""" sto = ["#=GC AC BLAAAAAAAHHH", "#=GC"] with self.assertRaises(StockholmParseError): self.st._parse_gc_info(sto) def test_parse_gs_gr_info_mixed(self): """Makes sure error raised if mixed GS and GR lines passed""" sto = ["#=GS seq1 AC BLAAA", "#=GR seq2 HUH THIS SHOULD NOT BE HERE"] with self.assertRaises(StockholmParseError): self.st._parse_gs_gr_info(sto) def test_parse_gs_gr_info_malformed(self): """Makes sure error raised if too short a line passed""" sto = ["#=GS AC BLAAAAAAAHHH", "#=GS"] with self.assertRaises(StockholmParseError): self.st._parse_gs_gr_info(sto) def test_parse_gs_gr_info_strict(self): """Make sure error raised if GR lines bad length and strict parsing""" sto = ["#=GR seq1 SS 10101111", "#=GR seq2 SS 01101"] with self.assertRaises(StockholmParseError): self.st._parse_gs_gr_info(sto, seqlen=20, strict=True) def test_str(self): """ Make sure stockholm with all information contained is formatted correctly """ st = StockholmAlignment(self.seqs, gc=self.GC, gf=self.GF, gs=self.GS, gr=self.GR) obs = str(st) exp = ('# STOCKHOLM 1.0\n' '#=GF AC RF00360\n' '#=GF BM cmbuild -F CM SEED\n' '#=GF BM cmsearch -Z 274931 -E 1000000\n' '#=GF SQ 9\n' '#=GF RN [1]\n' '#=GF RM 11469857\n' '#=GF RT TITLE1\n' '#=GF RA Auth1;\n' '#=GF RL J Mol Biol\n' '#=GF RN [2]\n' '#=GF RM 12007400\n' '#=GF RT TITLE2\n' '#=GF RA Auth2;\n' '#=GF RL Cell\n' '#=GS seq1 AC 111\n' '#=GS seq2 AC 222\n' 'seq1 ACC-G-GGTA\n' '#=GR seq1 SS 1110101111\n' 'seq2 TCC-G-GGCA\n' '#=GR seq2 SS 0110101110\n' '#=GC SS_cons (((....)))\n//') self.assertEqual(obs, exp) def test_to_file(self): st = StockholmAlignment(self.seqs, gc=self.GC, gf=self.GF, gs=self.GS, gr=self.GR) with tempfile.NamedTemporaryFile('r+') as temp_file: st.to_file(temp_file) temp_file.flush() temp_file.seek(0) obs = temp_file.read() exp = ('# STOCKHOLM 1.0\n' '#=GF AC RF00360\n' '#=GF BM cmbuild -F CM SEED\n' '#=GF BM cmsearch -Z 274931 -E 1000000\n' '#=GF SQ 9\n' '#=GF RN [1]\n' '#=GF RM 11469857\n' '#=GF RT TITLE1\n' '#=GF RA Auth1;\n' '#=GF RL J Mol Biol\n' '#=GF RN [2]\n' '#=GF RM 12007400\n' '#=GF RT TITLE2\n' '#=GF RA Auth2;\n' '#=GF RL Cell\n' '#=GS seq1 AC 111\n' '#=GS seq2 AC 222\n' 'seq1 ACC-G-GGTA\n' '#=GR seq1 SS 1110101111\n' 'seq2 TCC-G-GGCA\n' '#=GR seq2 SS 0110101110\n' '#=GC SS_cons (((....)))\n//') self.assertEqual(obs, exp) def test_str_gc(self): """ Make sure stockholm with only GC information contained is formatted correctly """ st = StockholmAlignment(self.seqs, gc=self.GC, gf=None, gs=None, gr=None) obs = str(st) exp = ("# STOCKHOLM 1.0\nseq1 ACC-G-GGTA\n" "seq2 TCC-G-GGCA\n" "#=GC SS_cons (((....)))\n//") self.assertEqual(obs, exp) def test_str_gf(self): """ Make sure stockholm with only GF information contained is formatted correctly """ st = StockholmAlignment(self.seqs, gc=None, gf=self.GF, gs=None, gr=None) obs = str(st) exp = ('# STOCKHOLM 1.0\n' '#=GF AC RF00360\n' '#=GF BM cmbuild -F CM SEED\n' '#=GF BM cmsearch -Z 274931 -E 1000000\n' '#=GF SQ 9\n' '#=GF RN [1]\n' '#=GF RM 11469857\n' '#=GF RT TITLE1\n' '#=GF RA Auth1;\n' '#=GF RL J Mol Biol\n' '#=GF RN [2]\n' '#=GF RM 12007400\n' '#=GF RT TITLE2\n' '#=GF RA Auth2;\n' '#=GF RL Cell\n' 'seq1 ACC-G-GGTA\n' 'seq2 TCC-G-GGCA\n//') self.assertEqual(obs, exp) def test_str_gs(self): """ Make sure stockholm with only GS information contained is formatted correctly """ st = StockholmAlignment(self.seqs, gc=None, gf=None, gs=self.GS, gr=None) obs = str(st) exp = ('# STOCKHOLM 1.0\n' '#=GS seq1 AC 111\n' '#=GS seq2 AC 222\n' 'seq1 ACC-G-GGTA\n' 'seq2 TCC-G-GGCA\n//') self.assertEqual(obs, exp) def test_str_gr(self): """ Make sure stockholm with only GR information contained is formatted correctly """ st = StockholmAlignment(self.seqs, gc=None, gf=None, gs=None, gr=self.GR) obs = str(st) exp = ("# STOCKHOLM 1.0\nseq1 ACC-G-GGTA\n" "#=GR seq1 SS 1110101111\nseq2 TCC-G-GGCA\n" "#=GR seq2 SS 0110101110\n//") self.assertEqual(obs, exp) def test_str_trees(self): """ Make sure stockholm with trees printed correctly""" GF = OrderedDict({"NH": ["IMATREE", "IMATREETOO"], "TN": ["Tree2", "Tree1"]}) st = StockholmAlignment(self.seqs, gc=None, gf=GF, gs=None, gr=None) obs = str(st) exp = ("# STOCKHOLM 1.0\n#=GF TN Tree2\n#=GF NH IMATREE\n#=GF TN Tree1" "\n#=GF NH IMATREETOO\nseq1 ACC-G-GGTA\n" "seq2 TCC-G-GGCA\n//") self.assertEqual(obs, exp) if __name__ == "__main__": main()
JWDebelius/scikit-bio
skbio/alignment/tests/test_alignment.py
Python
bsd-3-clause
42,179
[ "scikit-bio" ]
e8ceccfa8cdd56921fc98cf1f35d7ce7b6c32e6990f028a99c5f7a884bf6801e
# -*- coding: utf-8 -*- # # connections.py # # This file is part of NEST. # # Copyright (C) 2004 The NEST Initiative # # NEST is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # NEST is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with NEST. If not, see <http://www.gnu.org/licenses/>. # create connectivity figures for topology manual import nest import nest.topology as tp import matplotlib.pyplot as plt from mpl_toolkits.mplot3d.axes3d import Axes3D import numpy as np def beautify_layer(l, fig=plt.gcf(), xlabel=None, ylabel=None, xlim=None, ylim=None, xticks=None, yticks=None, dx=0, dy=0): """Assume either x and ylims/ticks given or none""" top = nest.GetStatus(l)[0]['topology'] ctr = top['center'] ext = top['extent'] if xticks is None: if 'rows' in top: dx = float(ext[0]) / top['columns'] dy = float(ext[1]) / top['rows'] xticks = ctr[0] - ext[0] / 2. + dx / 2. + dx * np.arange( top['columns']) yticks = ctr[1] - ext[1] / 2. + dy / 2. + dy * np.arange( top['rows']) if xlim is None: xlim = [ctr[0] - ext[0] / 2. - dx / 2., ctr[0] + ext[ 0] / 2. + dx / 2.] # extra space so extent is visible ylim = [ctr[1] - ext[1] / 2. - dy / 2., ctr[1] + ext[1] / 2. + dy / 2.] else: ext = [xlim[1] - xlim[0], ylim[1] - ylim[0]] ax = fig.gca() ax.set_xlim(xlim) ax.set_ylim(ylim) ax.set_aspect('equal', 'box') ax.set_xticks(xticks) ax.set_yticks(yticks) ax.grid(True) ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) return def conn_figure(fig, layer, connd, targets=None, showmask=True, showkern=False, xticks=range(-5, 6), yticks=range(-5, 6), xlim=[-5.5, 5.5], ylim=[-5.5, 5.5]): if targets is None: targets = ((tp.FindCenterElement(layer), 'red'),) tp.PlotLayer(layer, fig=fig, nodesize=60) for src, clr in targets: if showmask: mask = connd['mask'] else: mask = None if showkern: kern = connd['kernel'] else: kern = None tp.PlotTargets(src, layer, fig=fig, mask=mask, kernel=kern, src_size=250, tgt_color=clr, tgt_size=20, kernel_color='green') beautify_layer(layer, fig, xlim=xlim, ylim=ylim, xticks=xticks, yticks=yticks, xlabel='', ylabel='') fig.gca().grid(False) # ----------------------------------------------- # Simple connection # { conn1 #} l = tp.CreateLayer({'rows': 11, 'columns': 11, 'extent': [11., 11.], 'elements': 'iaf_neuron'}) conndict = {'connection_type': 'divergent', 'mask': {'rectangular': {'lower_left': [-2., -1.], 'upper_right': [2., 1.]}}} tp.ConnectLayers(l, l, conndict) # { end #} fig = plt.figure() fig.add_subplot(121) conn_figure(fig, l, conndict, targets=((tp.FindCenterElement(l), 'red'), (tp.FindNearestElement(l, [4., 5.]), 'yellow'))) # same another time, with periodic bcs lpbc = tp.CreateLayer({'rows': 11, 'columns': 11, 'extent': [11., 11.], 'elements': 'iaf_neuron', 'edge_wrap': True}) tp.ConnectLayers(lpbc, lpbc, conndict) fig.add_subplot(122) conn_figure(fig, lpbc, conndict, showmask=False, targets=((tp.FindCenterElement(lpbc), 'red'), (tp.FindNearestElement(lpbc, [4., 5.]), 'yellow'))) plt.savefig('../user_manual_figures/conn1.png', bbox_inches='tight') # ----------------------------------------------- # free masks def free_mask_fig(fig, loc, cdict): nest.ResetKernel() l = tp.CreateLayer({'rows': 11, 'columns': 11, 'extent': [11., 11.], 'elements': 'iaf_neuron'}) tp.ConnectLayers(l, l, cdict) fig.add_subplot(loc) conn_figure(fig, l, cdict, xticks=range(-5, 6, 2), yticks=range(-5, 6, 2)) fig = plt.figure() # { conn2r #} conndict = {'connection_type': 'divergent', 'mask': {'rectangular': {'lower_left': [-2., -1.], 'upper_right': [2., 1.]}}} # { end #} free_mask_fig(fig, 231, conndict) # { conn2ro #} conndict = {'connection_type': 'divergent', 'mask': {'rectangular': {'lower_left': [-2., -1.], 'upper_right': [2., 1.]}, 'anchor': [-1.5, -1.5]}} # { end #} free_mask_fig(fig, 234, conndict) # { conn2c #} conndict = {'connection_type': 'divergent', 'mask': {'circular': {'radius': 2.0}}} # { end #} free_mask_fig(fig, 232, conndict) # { conn2co #} conndict = {'connection_type': 'divergent', 'mask': {'circular': {'radius': 2.0}, 'anchor': [-2.0, 0.0]}} # { end #} free_mask_fig(fig, 235, conndict) # { conn2d #} conndict = {'connection_type': 'divergent', 'mask': {'doughnut': {'inner_radius': 1.5, 'outer_radius': 3.}}} # { end #} free_mask_fig(fig, 233, conndict) # { conn2do #} conndict = {'connection_type': 'divergent', 'mask': {'doughnut': {'inner_radius': 1.5, 'outer_radius': 3.}, 'anchor': [1.5, 1.5]}} # { end #} free_mask_fig(fig, 236, conndict) plt.savefig('../user_manual_figures/conn2.png', bbox_inches='tight') # ----------------------------------------------- # 3d masks def conn_figure_3d(fig, layer, connd, targets=None, showmask=True, showkern=False, xticks=range(-5, 6), yticks=range(-5, 6), xlim=[-5.5, 5.5], ylim=[-5.5, 5.5]): if targets is None: targets = ((tp.FindCenterElement(layer), 'red'),) tp.PlotLayer(layer, fig=fig, nodesize=20, nodecolor=(.5, .5, 1.)) for src, clr in targets: if showmask: mask = connd['mask'] else: mask = None if showkern: kern = connd['kernel'] else: kern = None tp.PlotTargets(src, layer, fig=fig, mask=mask, kernel=kern, src_size=250, tgt_color=clr, tgt_size=60, kernel_color='green') ax = fig.gca() ax.set_aspect('equal', 'box') plt.draw() def free_mask_3d_fig(fig, loc, cdict): nest.ResetKernel() l = tp.CreateLayer( {'rows': 11, 'columns': 11, 'layers': 11, 'extent': [11., 11., 11.], 'elements': 'iaf_neuron'}) tp.ConnectLayers(l, l, cdict) fig.add_subplot(loc, projection='3d') conn_figure_3d(fig, l, cdict, xticks=range(-5, 6, 2), yticks=range(-5, 6, 2)) fig = plt.figure() # { conn_3d_a #} conndict = {'connection_type': 'divergent', 'mask': {'box': {'lower_left': [-2., -1., -1.], 'upper_right': [2., 1., 1.]}}} # { end #} free_mask_3d_fig(fig, 121, conndict) # { conn_3d_b #} conndict = {'connection_type': 'divergent', 'mask': {'spherical': {'radius': 2.5}}} # { end #} free_mask_3d_fig(fig, 122, conndict) plt.savefig('../user_manual_figures/conn_3d.png', bbox_inches='tight') # ----------------------------------------------- # grid masks def grid_mask_fig(fig, loc, cdict): nest.ResetKernel() l = tp.CreateLayer({'rows': 11, 'columns': 11, 'extent': [11., 11.], 'elements': 'iaf_neuron'}) tp.ConnectLayers(l, l, cdict) fig.add_subplot(loc) conn_figure(fig, l, cdict, xticks=range(-5, 6, 2), yticks=range(-5, 6, 2), showmask=False) fig = plt.figure() # { conn3 #} conndict = {'connection_type': 'divergent', 'mask': {'grid': {'rows': 3, 'columns': 5}}} # { end #} grid_mask_fig(fig, 131, conndict) # { conn3c #} conndict = {'connection_type': 'divergent', 'mask': {'grid': {'rows': 3, 'columns': 5}, 'anchor': {'row': 1, 'column': 2}}} # { end #} grid_mask_fig(fig, 132, conndict) # { conn3x #} conndict = {'connection_type': 'divergent', 'mask': {'grid': {'rows': 3, 'columns': 5}, 'anchor': {'row': -1, 'column': 2}}} # { end #} grid_mask_fig(fig, 133, conndict) plt.savefig('../user_manual_figures/conn3.png', bbox_inches='tight') # ----------------------------------------------- # free masks def kernel_fig(fig, loc, cdict, showkern=True): nest.ResetKernel() l = tp.CreateLayer({'rows': 11, 'columns': 11, 'extent': [11., 11.], 'elements': 'iaf_neuron'}) tp.ConnectLayers(l, l, cdict) fig.add_subplot(loc) conn_figure(fig, l, cdict, xticks=range(-5, 6, 2), yticks=range(-5, 6, 2), showkern=showkern) fig = plt.figure() # { conn4cp #} conndict = {'connection_type': 'divergent', 'mask': {'circular': {'radius': 4.}}, 'kernel': 0.5} # { end #} kernel_fig(fig, 231, conndict) # { conn4g #} conndict = {'connection_type': 'divergent', 'mask': {'circular': {'radius': 4.}}, 'kernel': {'gaussian': {'p_center': 1.0, 'sigma': 1.}}} # { end #} kernel_fig(fig, 232, conndict) # { conn4gx #} conndict = {'connection_type': 'divergent', 'mask': {'circular': {'radius': 4.}, 'anchor': [1.5, 1.5]}, 'kernel': {'gaussian': {'p_center': 1.0, 'sigma': 1., 'anchor': [1.5, 1.5]}}} # { end #} kernel_fig(fig, 233, conndict) plt.draw() # { conn4cut #} conndict = {'connection_type': 'divergent', 'mask': {'circular': {'radius': 4.}}, 'kernel': {'gaussian': {'p_center': 1.0, 'sigma': 1., 'cutoff': 0.5}}} # { end #} kernel_fig(fig, 234, conndict) # { conn42d #} conndict = {'connection_type': 'divergent', 'mask': {'circular': {'radius': 4.}}, 'kernel': {'gaussian2D': {'p_center': 1.0, 'sigma_x': 1., 'sigma_y': 3.}}} # { end #} kernel_fig(fig, 235, conndict, showkern=False) plt.savefig('../user_manual_figures/conn4.png', bbox_inches='tight') # ----------------------------------------------- def wd_fig(fig, loc, ldict, cdict, what, rpos=None, xlim=[-1, 51], ylim=[0, 1], xticks=range(0, 51, 5), yticks=np.arange(0., 1.1, 0.2), clr='blue', label=''): nest.ResetKernel() l = tp.CreateLayer(ldict) tp.ConnectLayers(l, l, cdict) ax = fig.add_subplot(loc) if rpos is None: rn = nest.GetLeaves(l)[0][:1] # first node else: rn = tp.FindNearestElement(l, rpos) conns = nest.GetConnections(rn) cstat = nest.GetStatus(conns) vals = np.array([sd[what] for sd in cstat]) tgts = [sd['target'] for sd in cstat] locs = np.array(tp.GetPosition(tgts)) ax.plot(locs[:, 0], vals, 'o', mec='none', mfc=clr, label=label) ax.set_xlim(xlim) ax.set_ylim(ylim) ax.set_xticks(xticks) ax.set_yticks(yticks) fig = plt.figure() # { conn5lin #} ldict = {'rows': 1, 'columns': 51, 'extent': [51., 1.], 'center': [25., 0.], 'elements': 'iaf_neuron'} cdict = {'connection_type': 'divergent', 'mask': {'rectangular': {'lower_left': [-25.5, -0.5], 'upper_right': [25.5, 0.5]}}, 'weights': {'linear': {'c': 1.0, 'a': -0.05, 'cutoff': 0.0}}, 'delays': {'linear': {'c': 0.1, 'a': 0.02}}} # { end #} wd_fig(fig, 311, ldict, cdict, 'weight', label='Weight') wd_fig(fig, 311, ldict, cdict, 'delay', label='Delay', clr='red') fig.gca().legend() lpdict = {'rows': 1, 'columns': 51, 'extent': [51., 1.], 'center': [25., 0.], 'elements': 'iaf_neuron', 'edge_wrap': True} # { conn5linpbc #} cdict = {'connection_type': 'divergent', 'mask': {'rectangular': {'lower_left': [-25.5, -0.5], 'upper_right': [25.5, 0.5]}}, 'weights': {'linear': {'c': 1.0, 'a': -0.05, 'cutoff': 0.0}}, 'delays': {'linear': {'c': 0.1, 'a': 0.02}}} # { end #} wd_fig(fig, 312, lpdict, cdict, 'weight', label='Weight') wd_fig(fig, 312, lpdict, cdict, 'delay', label='Delay', clr='red') fig.gca().legend() cdict = {'connection_type': 'divergent', 'mask': {'rectangular': {'lower_left': [-25.5, -0.5], 'upper_right': [25.5, 0.5]}}, 'weights': {'linear': {'c': 1.0, 'a': -0.05, 'cutoff': 0.0}}} wd_fig(fig, 313, ldict, cdict, 'weight', label='Linear', rpos=[25., 0.], clr='orange') # { conn5exp #} cdict = {'connection_type': 'divergent', 'mask': {'rectangular': {'lower_left': [-25.5, -0.5], 'upper_right': [25.5, 0.5]}}, 'weights': {'exponential': {'a': 1., 'tau': 5.}}} # { end #} wd_fig(fig, 313, ldict, cdict, 'weight', label='Exponential', rpos=[25., 0.]) # { conn5gauss #} cdict = {'connection_type': 'divergent', 'mask': {'rectangular': {'lower_left': [-25.5, -0.5], 'upper_right': [25.5, 0.5]}}, 'weights': {'gaussian': {'p_center': 1., 'sigma': 5.}}} # { end #} wd_fig(fig, 313, ldict, cdict, 'weight', label='Gaussian', clr='green', rpos=[25., 0.]) # { conn5uniform #} cdict = {'connection_type': 'divergent', 'mask': {'rectangular': {'lower_left': [-25.5, -0.5], 'upper_right': [25.5, 0.5]}}, 'weights': {'uniform': {'min': 0.2, 'max': 0.8}}} # { end #} wd_fig(fig, 313, ldict, cdict, 'weight', label='Uniform', clr='red', rpos=[25., 0.]) fig.gca().legend() plt.savefig('../user_manual_figures/conn5.png', bbox_inches='tight') # -------------------------------- def pn_fig(fig, loc, ldict, cdict, xlim=[0., .5], ylim=[0, 3.5], xticks=range(0, 51, 5), yticks=np.arange(0., 1.1, 0.2), clr='blue', label=''): nest.ResetKernel() l = tp.CreateLayer(ldict) tp.ConnectLayers(l, l, cdict) ax = fig.add_subplot(loc) rn = nest.GetLeaves(l)[0] conns = nest.GetConnections(rn) cstat = nest.GetStatus(conns) srcs = [sd['source'] for sd in cstat] tgts = [sd['target'] for sd in cstat] dist = np.array(tp.Distance(srcs, tgts)) ax.hist(dist, bins=50, histtype='stepfilled', normed=True) r = np.arange(0., 0.51, 0.01) plt.plot(r, 2 * np.pi * r * (1 - 2 * r) * 12 / np.pi, 'r-', lw=3, zorder=-10) ax.set_xlim(xlim) ax.set_ylim(ylim) """ax.set_xticks(xticks) ax.set_yticks(yticks)""" # ax.set_aspect(100, 'box') ax.set_xlabel('Source-target distance d') ax.set_ylabel('Connection probability pconn(d)') fig = plt.figure() # { conn6 #} pos = [[np.random.uniform(-1., 1.), np.random.uniform(-1., 1.)] for j in range(1000)] ldict = {'positions': pos, 'extent': [2., 2.], 'elements': 'iaf_neuron', 'edge_wrap': True} cdict = {'connection_type': 'divergent', 'mask': {'circular': {'radius': 1.0}}, 'kernel': {'linear': {'c': 1., 'a': -2., 'cutoff': 0.0}}, 'number_of_connections': 50, 'allow_multapses': True, 'allow_autapses': False} # { end #} pn_fig(fig, 111, ldict, cdict) plt.savefig('../user_manual_figures/conn6.png', bbox_inches='tight') # ----------------------------- # { conn7 #} nest.ResetKernel() nest.CopyModel('iaf_neuron', 'pyr') nest.CopyModel('iaf_neuron', 'in') ldict = {'rows': 10, 'columns': 10, 'elements': ['pyr', 'in']} cdict_p2i = {'connection_type': 'divergent', 'mask': {'circular': {'radius': 0.5}}, 'kernel': 0.8, 'sources': {'model': 'pyr'}, 'targets': {'model': 'in'}} cdict_i2p = {'connection_type': 'divergent', 'mask': {'rectangular': {'lower_left': [-0.2, -0.2], 'upper_right': [0.2, 0.2]}}, 'sources': {'model': 'in'}, 'targets': {'model': 'pyr'}} l = tp.CreateLayer(ldict) tp.ConnectLayers(l, l, cdict_p2i) tp.ConnectLayers(l, l, cdict_i2p) # { end #} # ---------------------------- # { conn8 #} nest.ResetKernel() nest.CopyModel('iaf_neuron', 'pyr') nest.CopyModel('iaf_neuron', 'in') nest.CopyModel('static_synapse', 'exc', {'weight': 2.0}) nest.CopyModel('static_synapse', 'inh', {'weight': -8.0}) ldict = {'rows': 10, 'columns': 10, 'elements': ['pyr', 'in']} cdict_p2i = {'connection_type': 'divergent', 'mask': {'circular': {'radius': 0.5}}, 'kernel': 0.8, 'sources': {'model': 'pyr'}, 'targets': {'model': 'in'}, 'synapse_model': 'exc'} cdict_i2p = {'connection_type': 'divergent', 'mask': {'rectangular': {'lower_left': [-0.2, -0.2], 'upper_right': [0.2, 0.2]}}, 'sources': {'model': 'in'}, 'targets': {'model': 'pyr'}, 'synapse_model': 'inh'} l = tp.CreateLayer(ldict) tp.ConnectLayers(l, l, cdict_p2i) tp.ConnectLayers(l, l, cdict_i2p) # { end #} # ---------------------------- # { conn9 #} nrns = tp.CreateLayer({'rows': 20, 'columns': 20, 'elements': 'iaf_neuron'}) stim = tp.CreateLayer({'rows': 1, 'columns': 1, 'elements': 'poisson_generator'}) cdict_stim = {'connection_type': 'divergent', 'mask': {'circular': {'radius': 0.1}, 'anchor': [0.2, 0.2]}} tp.ConnectLayers(stim, nrns, cdict_stim) # { end #} # ---------------------------- # { conn10 #} rec = tp.CreateLayer({'rows': 1, 'columns': 1, 'elements': 'spike_detector'}) cdict_rec = {'connection_type': 'convergent', 'mask': {'circular': {'radius': 0.1}, 'anchor': [-0.2, 0.2]}} tp.ConnectLayers(nrns, rec, cdict_rec) # { end #}
flinz/nest-simulator
topology/doc/user_manual_scripts/connections.py
Python
gpl-2.0
18,279
[ "Gaussian" ]
4d6ca28489343af62993d850eb1bb9a2e702b2ed43836b74247e20aad5105982